gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import datetime
import json
from epumgmt.api.exceptions import InvalidConfig
import os
# Torque times are reported using the local timezone, e.g. pacific if using
# us-west EC2, however, EPU components are logging in UTC
UTC_OFFSET = 7
# Events:
# EPU_CONTROLLER_START
# EPU_CONTROLLER_TERMINATE
class ControllerEvents:
def __init__(self, p, c, m, run_name):
self.p = p
self.c = c
self.m = m
self.run_name = run_name
def _set_controllerlog_filenames(self):
filenames = []
runlogdir = self.p.get_conf_or_none("events", "runlogdir")
if not runlogdir:
raise InvalidConfig("There is no runlogdir configuration")
if not os.path.isabs(runlogdir):
runlogdir = self.c.resolve_var_dir(runlogdir)
tld = os.path.join(runlogdir, self.run_name)
controllerlogdir = os.path.join(tld, "epucontrollerkill_logs")
logs = os.listdir(controllerlogdir)
for log in logs:
filenames.append(os.path.join(controllerlogdir, log))
self.c.log.debug("Setting controller log filenames: %s" % filenames)
self.controllerlog_filenames = filenames
def _update_log_filenames(self):
self.c.log.debug('Gathering controller kill log filenames')
self.controllerlog_filenames = None
self._set_controllerlog_filenames()
def get_event_count(self, event):
events = self.get_event_datetimes_dict(event)
return len(events.keys())
def _create_datetime(self, date_str, time_str):
splitdate = date_str.split('-')
splittime = time_str.split(':')
month = int(splitdate[1].strip())
day = int(splitdate[2].strip())
year = int(splitdate[0].strip())
hour = int(splittime[0].strip())
minute = int(splittime[1].strip())
second = int(splittime[2].strip().split('.')[0].strip())
microsecond = int(splittime[2].strip().split('.')[1].strip())
dateTime = datetime.datetime(year, \
month, \
day, \
hour, \
minute, \
second, \
microsecond)
return dateTime
def get_event_datetimes_list(self, orig_event):
self._update_log_filenames()
# all of these events will be in the server_log files
filenames = self.controllerlog_filenames
event = orig_event
event_times = []
if filenames:
for filename in filenames:
try:
event_file = open(filename, 'r')
try:
for line in event_file:
if event in line:
splitline = line.split()
lineevent = splitline[0]
date_str = splitline[1].strip()
time_str = splitline[2].strip()
event_time = self._create_datetime(date_str, time_str)
event_times.append(event_time)
finally:
event_file.close()
except IOError:
self.c.log.error('Failed to open and read from file: ' + \
'%s' % filename)
return event_times
# Events:
# job_sent: Job Queued
# job_begin: Job Run
# job_end: Exit_status
class TorqueEvents:
def __init__(self, p, c, m, run_name):
self.p = p
self.c = c
self.m = m
self.run_name = run_name
def _set_serverlog_filenames(self):
filenames = []
runlogdir = self.p.get_conf_or_none("events", "runlogdir")
if not runlogdir:
raise InvalidConfig("There is no runlogdir configuration")
if not os.path.isabs(runlogdir):
runlogdir = self.c.resolve_var_dir(runlogdir)
tld = os.path.join(runlogdir, self.run_name)
torquelogdir = os.path.join(tld, "torque-server_logs")
logs = os.listdir(torquelogdir)
for log in logs:
filenames.append(os.path.join(torquelogdir, log))
self.c.log.debug("Setting server log filenames: %s" % filenames)
self.serverlog_filenames = filenames
def _update_log_filenames(self):
self.c.log.debug('Gathering torque log filenames')
self.serverlog_filenames = None
self._set_serverlog_filenames()
def get_event_count(self, event):
events = self.get_event_datetimes_dict(event)
return len(events.keys())
def _create_datetime(self, date_str, time_str):
splitdate = date_str.split('/')
splittime = time_str.split(':')
month = int(splitdate[0].strip())
day = int(splitdate[1].strip())
year = int(splitdate[2].strip())
hour = int(splittime[0].strip()) + UTC_OFFSET
minute = int(splittime[1].strip())
second = int(splittime[2].strip())
microsecond = 0
dateTime = datetime.datetime(year, \
month, \
day, \
hour, \
minute, \
second, \
microsecond)
return dateTime
def get_event_datetimes_dict(self, orig_event):
self._update_log_filenames()
# all of these events will be in the server_log files
filenames = self.serverlog_filenames
event = orig_event
if orig_event == 'job_sent':
event = 'Job Queued'
elif orig_event == 'job_begin':
event = 'Job Run'
elif orig_event == 'job_end':
event = 'Exit_status'
else:
self.c.log.error("Unrecognized event: %s" % event)
return {}
event_times = {}
if filenames:
for filename in filenames:
try:
event_file = open(filename, 'r')
try:
for line in event_file:
if event in line:
splitline = line.split()
splitinfo = splitline[1].split(';')
date_str = splitline[0].strip()
time_str = splitinfo[0].strip()
event_time = self._create_datetime(date_str, time_str)
job_id = int(splitinfo[4].strip().split('.')[0].strip())
print splitinfo
event_times[job_id] = event_time
finally:
event_file.close()
except IOError:
self.c.log.error('Failed to open and read from file: ' + \
'%s' % filename)
self.c.log.debug("Event %s times: %s" % (orig_event, event_times))
return event_times
# Events:
# fetch_killed: time VM killed
# new_node: node launch time (earlier event)
# node_started: node boot time (later event)
class NodeEvents:
def __init__(self, p, c, m, run_name):
self.p = p
self.c = c
self.m = m
self.run_name = run_name
def _create_datetime(self, timestamp):
dateTime = datetime.datetime(timestamp['year'], \
timestamp['month'], \
timestamp['day'], \
timestamp['hour'], \
timestamp['minute'], \
timestamp['second'], \
timestamp['microsecond'])
return dateTime
# node boot times and node launch times
def _set_provisionerlog_filenames(self):
logName = 'ioncontainer.log'
filenames = []
baseDir = self.p.get_conf_or_none("events", "runlogdir")
if not os.path.isabs(baseDir):
baseDir = self.c.resolve_var_dir(baseDir)
baseDir = os.path.join(baseDir, self.run_name)
for root, dirs, files in os.walk(baseDir):
if 'provisioner' in os.path.basename(root):
if logName in files:
filenames.append(os.path.join(root, logName))
self.provisionerlog_filenames = filenames
# vm fetch killed times
def _set_vmkilllog_filenames(self):
logName = '--' + self.run_name + '-fetchkill-'
filenames = []
baseDir = self.p.get_conf_or_none("logging", "logfiledir")
if not os.path.isabs(baseDir):
baseDir = self.c.resolve_var_dir(baseDir)
for root, dirs, files in os.walk(baseDir):
print files
for fileName in files:
if logName in fileName:
filenames.append(os.path.join(root, fileName))
self.vmkilllog_filenames = filenames
def _update_log_filenames(self):
self.c.log.debug('Gathering node log filenames')
self.provisionerlog_filenames = None
self.vmkilllog_filenames = None
self._set_provisionerlog_filenames()
self._set_vmkilllog_filenames()
def get_event_count(self, event):
events = self.get_event_datetimes_dict(event)
return len(events.keys())
def get_event_datetimes_dict(self, event):
# first update the filenames, logs from new instances
# may have arrived since we last ran this
self._update_log_filenames()
filenames = []
if 'launch_ctx_done' == event:
jsonid = 'node_ids'
else:
jsonid = ''
if 'fetch_killed' == event:
filenames = self.vmkilllog_filenames
elif 'new_node' == event:
filenames = self.provisionerlog_filenames
elif 'terminated_node' == event:
filenames = self.provisionerlog_filenames
elif 'node_started' == event:
filenames = self.provisionerlog_filenames
elif 'launch_ctx_done' == event:
filenames = self.provisionerlog_filenames
else:
self.c.log.error("Unrecognized event: %s" % event)
return {}
event_times = {}
if filenames:
for filename in filenames:
try:
event_file = open(filename, 'r')
try:
for line in event_file:
if event in line:
if not jsonid:
if 'iaas_id' in line:
jsonid = 'iaas_id'
else:
jsonid = 'node_id'
splitline = line.rpartition('JSON:')[2]
splitline.strip()
try:
jsonEvent = json.loads(splitline)
except:
emsg = "Problem parsing JSON: '%s'"
self.c.log.exception(emsg % splitline)
continue
timestamp = jsonEvent['timestamp']
event_time = self._create_datetime(timestamp)
if event == 'launch_ctx_done':
k = jsonEvent['extra'][jsonid][0]
else:
k = jsonEvent['extra'][jsonid]
event_times[k] = event_time
finally:
event_file.close()
except IOError:
self.c.log.error('Failed to open and read from file: ' + \
'%s' % filename)
return event_times
# Events:
# job_sent: time job sent from amqp server to worker
# job_begin: time job starts on worker
# job_end: time job ends on worker
class AmqpEvents:
def __init__(self, p, c, m, run_name):
self.p = p
self.c = c
self.m = m
self.run_name = run_name
def _create_datetime(self, timestamp):
dateTime = datetime.datetime(timestamp['year'], \
timestamp['month'], \
timestamp['day'], \
timestamp['hour'], \
timestamp['minute'], \
timestamp['second'], \
timestamp['microsecond'])
return dateTime
# job events: job_sent
def _set_workproducerlog_filenames(self):
logName = 'ioncontainer.log'
filenames = []
baseDir = self.p.get_conf_or_none("events", "runlogdir")
if not os.path.isabs(baseDir):
baseDir = self.c.resolve_var_dir(baseDir)
baseDir = os.path.join(baseDir, self.run_name)
for root, dirs, files in os.walk(baseDir):
if 'producer1-container' in os.path.basename(root):
if logName in files:
filenames.append(os.path.join(root, logName))
self.workproducerlog_filenames = filenames
# job events: job_begin, job_end
def _set_workconsumerlog_filenames(self):
logName = 'ioncontainer.log'
filenames = []
baseDir = self.p.get_conf_or_none("events", "runlogdir")
if not os.path.isabs(baseDir):
baseDir = self.c.resolve_var_dir(baseDir)
baseDir = os.path.join(baseDir, self.run_name)
for root, dirs, files in os.walk(baseDir):
if 'epuworker_container' in os.path.basename(root):
if logName in files:
filenames.append(os.path.join(root, logName))
self.workconsumerlog_filenames = filenames
def _update_log_filenames(self):
self.c.log.debug('Gathering amqp log filenames')
self.workproducerlog_filenames = None
self.workconsumerlog_filenames = None
self._set_workproducerlog_filenames()
self._set_workconsumerlog_filenames()
def get_event_count(self, event):
events = self.get_event_datetimes_dict(event)
return len(events.keys())
def get_event_datetimes_dict(self, event):
# first update the filenames, logs from new instances
# may have arrived since we last ran this
self._update_log_filenames()
filenames = []
jsonid = ''
if ('job_begin' == event) or ('job_end' == event):
filenames = self.workconsumerlog_filenames
jsonid = 'jobid'
elif 'job_sent' == event:
filenames = self.workproducerlog_filenames
jsonid = 'jobid'
else:
self.c.log.error("Unrecognized event: %s" % event)
return {}
event_times = {}
if filenames:
for filename in filenames:
try:
event_file = open(filename, 'r')
try:
for line in event_file:
if event in line:
splitline = line.rpartition('JSON:')[2]
splitline.strip()
jsonEvent = json.loads(splitline)
timestamp = jsonEvent['timestamp']
event_time = self._create_datetime(timestamp)
k = jsonEvent['extra'][jsonid]
event_times[k] = event_time
finally:
event_file.close()
except IOError:
self.c.log.error('Failed to open and read from file: ' + \
'%s' % filename)
return event_times
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_checkpoint_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
ops.NotDifferentiable("GenerateVocabRemapping")
ops.NotDifferentiable("LoadAndRemapMatrix")
def _load_and_remap_matrix(ckpt_path,
old_tensor_name,
new_row_vocab_offset,
num_rows_to_load,
new_col_vocab_size,
initializer,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
max_rows_in_memory=-1):
"""Loads a 2-D (matrix) `Tensor` from checkpoint.
Generates 1D-remappings for rows and columns using the
`GenerateVocabRemapping` op, and initializes any anticipated values with the
provided initializer. Then, uses the `LoadAndRemapMatrix` op to create a
matrix that loads existing values from the checkpoint, while filling out
"missing" values with the newly initialized values. See
contrib/framework/ops/checkpoint_ops.cc for more information on the wrapped
functionality (LoadAndRemapMatrix). This wrapper can be used to perform only
row remapping or only col remapping. If only row remapping is desired,
{new,old}_col_vocab_file should be `None`, and vice versa for column
remapping.
NOTE: This only supports div-partitioning the vocabulary on the 1st dimension
(row axis) via `new_row_vocab_offset`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_offset: A 0-indexed integer representing what line to
start reading at in the new row vocabulary. Used for partitioned
variables.
num_rows_to_load: Number of rows to load for the new vocabulary (note: to
support variable partitioning and partial loading, this does not need to
be the same as the number of entries in `new_row_vocab_file`).
new_col_vocab_size: Number of columns to load - should be the same as the
number of entries in `new_col_vocab_file`, since we don't support
partitioning along the column axis.
initializer: Callable initializer function that accepts a 1-D tensor as the
arg to specify the shape of the returned tensor. Used to initialize
missing values.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis - in which case, `new_row_vocab_offset` and
`num_rows_to_load` work under the assumption that the new row vocab is the
same as the old row vocab.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis - in which case, `new_col_vocab_size` works
under the assumption that the new col vocab is the same as the old col
vocab.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A Tensor of shape `[num_rows_to_load + num_row_oov_buckets,
new_col_vocab_size + num_col_oov_buckets]`, with values loaded from the
specified tensor in the checkpoint, and any missing or OOV values
initialized with the given `initializer`.
Raises:
ValueError: If `num_row_oov_buckets` or `num_col_oov_buckets` < 0.
ValueError: If either `old_row_vocab_file` or `new_row_vocab_file` is
provided, while the other is not. Same for `old_col_vocab_file` and
`new_col_vocab_file`.
ValueError: If neither row vocabs or col vocabs are provided.
"""
if num_row_oov_buckets < 0:
raise ValueError("num_row_oov_buckets must be >= 0, but received %d" %
num_row_oov_buckets)
if num_col_oov_buckets < 0:
raise ValueError("num_col_oov_buckets must be >= 0, but received %d" %
num_col_oov_buckets)
if bool(old_row_vocab_file) != bool(new_row_vocab_file):
raise ValueError(
"old_row_vocab_file and new_row_vocab_file must both be specified or "
"left unspecified. old_row_vocab_file='{}', new_row_vocab_file='{}'".
format(old_row_vocab_file, new_row_vocab_file))
if bool(old_col_vocab_file) != bool(new_col_vocab_file):
raise ValueError(
"old_col_vocab_file and new_col_vocab_file must both be specified or "
"left unspecified. old_col_vocab_file='{}', new_col_vocab_file='{}'".
format(old_col_vocab_file, new_col_vocab_file))
remap_rows = new_row_vocab_file and old_row_vocab_file
remap_cols = new_col_vocab_file and old_col_vocab_file
if not (remap_rows or remap_cols):
raise ValueError(
"Must provide either row or column vocab files. If no remapping is "
"necessary, consider using `tf.contrib.framework.init_from_checkpoint` "
"instead.")
num_rows_present = num_rows_to_load
if remap_rows:
row_remapping, num_rows_present = (
gen_checkpoint_ops._generate_vocab_remapping( # pylint: disable=protected-access
new_vocab_file=new_row_vocab_file,
old_vocab_file=old_row_vocab_file,
new_vocab_offset=new_row_vocab_offset,
num_new_vocab=num_rows_to_load))
else:
# Even when the rows are not being reordered, we still need to generate a
# remapping to account for initializing partitioned Variables (when
# new_row_vocab_offset is non-zero).
row_remapping = math_ops.range(
new_row_vocab_offset,
new_row_vocab_offset + num_rows_to_load,
dtype=dtypes.int64)
col_remapping = []
num_cols_present = new_col_vocab_size
if remap_cols:
col_remapping, num_cols_present = (
gen_checkpoint_ops._generate_vocab_remapping( # pylint: disable=protected-access
new_vocab_file=new_col_vocab_file,
old_vocab_file=old_col_vocab_file,
new_vocab_offset=0, # Offset is unused for cols (no partitioning).
num_new_vocab=new_col_vocab_size))
init_vals = initializer([
num_rows_to_load * new_col_vocab_size -
num_rows_present * num_cols_present, 1
])
return_tensor = gen_checkpoint_ops._load_and_remap_matrix( # pylint: disable=protected-access
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
row_remapping=row_remapping,
col_remapping=col_remapping,
initializing_values=init_vals,
num_rows=num_rows_to_load,
num_cols=new_col_vocab_size,
max_rows_in_memory=max_rows_in_memory)
# Add OOV row(s) and column(s).
if num_row_oov_buckets > 0:
init_row_oov_val = initializer([num_row_oov_buckets, new_col_vocab_size])
init_row_oov_val = ops.convert_to_tensor(init_row_oov_val)
return_tensor = array_ops.concat([return_tensor, init_row_oov_val], 0)
if num_col_oov_buckets > 0:
# We need to add any row OOV to the new column shape.
init_col_oov_val = initializer(
[num_rows_to_load + num_row_oov_buckets, num_col_oov_buckets])
init_col_oov_val = ops.convert_to_tensor(init_col_oov_val)
return_tensor = array_ops.concat([return_tensor, init_col_oov_val], 1)
return return_tensor
def load_and_remap_matrix_initializer(ckpt_path,
old_tensor_name,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
r"""Returns a var initializer for loading and remapping a 2-D (matrix) tensor.
The returned initializer loads a 2-D (matrix) `Tensor` with name
`old_tensor_name` from the checkpoint at `ckpt_path`. It will reorder the
rows/columns according to the specified vocab files and append additional
out-of-vocabulary rows/columns according to the number of OOV buckets.
The format of the file at the `{old,new}_{row,col}_vocab_file` path should be
a text file, with each line containing a single entity within the vocabulary.
Let the function `line_of(f, "x")` return the 0-indexed line number of the
entity "x" in file f, and the function `entity_at(f, i)` return the entity at
line i of file f. Then, row i of the new output matrix will be taken from row
`line_of(old_row_vocab_file, entity_at(new_row_vocab_file, i))` of the old
matrix. If any entity in `new_row_vocab_file` is not found in
`old_row_vocab_file`, that row is considered a "missing" row, and its values
will be initialized using the `initializer` arg. The same logic also applies
for the columns.
For example, assuming that:
* `old_row_vocab_file` contains "mercury\nvenus\nmars"
* `new_row_vocab_file` contains "venus\njupiter\nmercury"
* `old_col_vocab_file` contains "good\nbetter\nbest"
* `new_col_vocab_file` contains "good\nbest\nfantastic"
* `initializer` returns the natural numbers `[1, 2, 3, 4, ...]`
* `w(i, j)` represents the value from row i, column j of the old matrix
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1],
[2, 3, 4],
[w(0, 0), w(0, 2), 5]]`
If we further specify that:
* `num_row_oov_buckets` == 2
* `num_col_oov_buckets` == 1
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1, 12],
[2, 3, 4, 13],
[w(0, 0), w(0, 2), 5, 14],
[6, 7, 8, 15],
[9, 10, 11, 16]]`
If `{old,new}_row_vocab_file` are None, we assume that the old and new row
vocab files are the same, and no row remapping is done. If
`{old,new}_col_vocab_file` are None, we assume that the old and new column
vocab files are the same, and no column remapping is done.
The returned initializer only supports div-partitioning along the row axis. It
does not support partitioning along the column axis or mod-partitioning.
NOTE: When this is used to warm-start variables, client code should use
`tf.lookup.index_table_from_tensor()` like
contrib/layers/python/layers/feature_column.py does, as opposed to
`tf.feature_to_id()` - in order to ensure the underlying lookup tables are the
same.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
if initializer is None:
# TODO(b/25671353): Consider using sqrt(6/(fan_in + fan_out)) instead, from
# Glorot and Bengio, 2010.
initializer = init_ops.zeros_initializer()
if not callable(initializer):
raise TypeError(
"initializer must be callable, instead of being {} of type {}.".format(
initializer, type(initializer)))
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
"""Variable initializer.
Args:
shape: Shape of `Tensor` to return. Should include OOV on both axes.
dtype: Must be float32.
partition_info: variable_scope._PartitionInfo.
Returns:
`Tensor` of shape `shape`.
Raises:
TypeError: If `dtype` is anything other than float32.
ValueError: For shape mismatch upon invocation.
"""
# Sanity checks.
if dtype != dtypes.float32:
raise TypeError(
"Currently, only float32 is supported. Received dtype: {}".format(
dtype))
if len(shape) != 2:
raise ValueError("Expected 2-dim shape, but received: {}".format(shape))
if shape[0] <= 0:
raise ValueError(
"Expected 1st dim of shape to be > 0, but received shape: {}".format(
shape))
if shape[1] != (new_col_vocab_size + num_col_oov_buckets):
raise ValueError(
"Expected 2nd dim of shape to be new_col_vocab_size ({}) + "
"num_col_oov_buckets ({}) = {}, but received shape: {}".format(
new_col_vocab_size, num_col_oov_buckets,
new_col_vocab_size + num_col_oov_buckets, shape))
offset = 0
if partition_info is not None:
offset = partition_info.single_offset(shape)
if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets:
raise ValueError(
"Trying to initialize {} additional rows after {} rows have already "
"been initialized, which would exceed expected total row count of "
"new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.".format(
shape[0], offset, new_row_vocab_size, num_row_oov_buckets,
new_row_vocab_size + num_row_oov_buckets))
row_oov_buckets_to_use = min(shape[0],
max(0, offset + shape[0] - new_row_vocab_size))
num_rows_to_load = shape[0] - row_oov_buckets_to_use
return _load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_offset=offset,
num_rows_to_load=num_rows_to_load,
new_col_vocab_size=new_col_vocab_size,
initializer=initializer,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=row_oov_buckets_to_use,
num_col_oov_buckets=num_col_oov_buckets,
max_rows_in_memory=max_rows_in_memory)
return _initializer
def load_embedding_initializer(ckpt_path,
embedding_tensor_name,
new_vocab_size,
embedding_dim,
old_vocab_file,
new_vocab_file,
num_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Returns a variable initializer for loading pre-trained embeddings.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
embedding weights and remapping according to the provided vocab files. See
docs for `load_and_remap_matrix_initializer()` for more details.
NOTE: Only for use with div-partitioned variables / vocabularies.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_vocab_size: Number of entries in the new vocab.
embedding_dim: `int` specifying the dimension of the embedding vectors from
the checkpoint. Must match the number of columns in the old embedding
matrix.
old_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old vocabulary file.
new_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new vocabulary file.
num_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`truncated_normal_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
if initializer is None:
# TODO(b/25671353): This should be kept in sync with the stddev used by
# feature_column.py's _EmbeddingColumn.
initializer = init_ops.truncated_normal_initializer(
stddev=1.0 / math.sqrt(embedding_dim))
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=embedding_tensor_name,
new_row_vocab_size=new_vocab_size,
new_col_vocab_size=embedding_dim,
old_row_vocab_file=old_vocab_file,
new_row_vocab_file=new_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_linear_multiclass_bias_initializer(ckpt_path,
bias_tensor_name,
new_class_vocab_size,
old_class_vocab_file,
new_class_vocab_file,
num_class_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class biases for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class bias and remapping according to the provided vocab files. See docs
for `load_and_remap_matrix_initializer()` for more details. In this case, the
provided row_vocab is the class vocabulary, and the expected shape is
`[new_class_vocab_size, 1]`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
bias_tensor_name: Tensor name to load from in the checkpoints.
new_class_vocab_size: Number of entries in the new class vocab.
old_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old class vocabulary file.
new_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new class vocabulary file.
num_class_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use for the classes. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
# Linear multi-class biases should be zero-initialized.
if initializer is None:
initializer = init_ops.zeros_initializer()
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=bias_tensor_name,
new_row_vocab_size=new_class_vocab_size,
new_col_vocab_size=1,
old_row_vocab_file=old_class_vocab_file,
new_row_vocab_file=new_class_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_class_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_variable_slot_initializer(ckpt_path,
old_tensor_name,
primary_partition_info,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class slots for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class slots (such as optimizer accumulators) and remapping them
according to the provided vocab files. See docs for
`load_and_remap_matrix_initializer()` for more details. Takes in a
`variable_scope._PartitionInfo` representing the slot's primary `Variable`'s
partitioning. This is necessary since accumulator `Variable` creation ignores
primary scoping and partitioning information.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
primary_partition_info: A `variable_scope._PartitionInfo` containing this
slot's primary `Variable`'s partitioning information. This is used to
calculate the offset and override the partition_info passed to the call to
_initialize.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
initializer_fn = load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
del partition_info # Unused by this override.
return initializer_fn(shape, dtype, partition_info=primary_partition_info)
return _initializer
|
|
#!/usr/bin/env python
"""This module contains the unit tests for the pytodoist.todoist module."""
import time
import unittest
from pytodoist import todoist
from pytodoist.test.util import create_user
# Sometimes Todoist changes this which will cause tests to fail.
N_DEFAULT_TASKS = 13
N_DEFAULT_PROJECTS = 3
N_DEFAULT_FILTERS = 2
_INBOX_PROJECT_NAME = 'Inbox'
_PROJECT_NAME = 'Test Project'
_TASK = 'Test Task'
_LABEL = 'test_label'
_NOTE = 'Test Note'
_FILTER = 'Test Filter'
class UserTest(unittest.TestCase):
def setUp(self):
self.user = create_user()
time.sleep(30) # Rate limit ourselves to avoid a server rate limit.
def tearDown(self):
self.user.delete()
def test_login_success(self):
todoist.login(self.user.email, self.user.password)
def test_login_failure(self):
with self.assertRaises(todoist.RequestError):
todoist.login(self.user.email, '')
def test_login_with_google_success(self):
pass # TODO
def test_login_with_google_failure(self):
with self.assertRaises(todoist.RequestError):
todoist.login_with_google(self.user.email, '')
def test_login_with_api_token_success(self):
todoist.login_with_api_token(self.user.token)
def test_login_with_api_token_failure(self):
with self.assertRaises(todoist.RequestError):
todoist.login_with_api_token('')
def test_register_success(self):
try:
user = create_user()
user.delete()
except todoist.RequestError:
self.fail("register(...) raised an exception")
def test_register_failure(self):
with self.assertRaises(todoist.RequestError):
todoist.register('', '', '')
def test_register_already_registered(self):
with self.assertRaises(todoist.RequestError):
todoist.register(self.user.full_name, self.user.email,
self.user.password)
def test_register_with_google_success(self):
pass # TODO
def test_register_with_google_failure(self):
with self.assertRaises(todoist.RequestError):
todoist.register_with_google('', '', '')
def test_get_redirect_link(self):
link = self.user.get_redirect_link()
self.assertIsNotNone(link)
def test_update(self):
new_name = self.user.full_name + ' Jnr'
self.user.full_name = new_name
self.user.update()
self.user = todoist.login(self.user.email, self.user.password)
self.assertEqual(self.user.full_name, new_name)
def test_quick_add(self):
text = 'Buy milk #Inbox'
task = self.user.quick_add(text)
self.assertEqual(task.content, 'Buy milk')
def test_add_project(self):
self.user.add_project(_PROJECT_NAME)
projects = self.user.get_projects()
self.assertEqual(len(projects), N_DEFAULT_PROJECTS + 1)
project = self.user.get_project(_PROJECT_NAME)
self.assertIsNotNone(project)
self.assertEqual(project.name, _PROJECT_NAME)
def test_get_projects(self):
for i in range(5):
self.user.add_project(_PROJECT_NAME + str(i))
projects = self.user.get_projects()
self.assertEqual(len(projects), N_DEFAULT_PROJECTS + 5)
for project in projects:
self.assertIsNotNone(project)
def test_get_project_success(self):
inbox = self.user.get_project(_INBOX_PROJECT_NAME)
self.assertIsNotNone(inbox)
self.assertEqual(inbox.name, _INBOX_PROJECT_NAME)
def test_get_project_failure(self):
project = self.user.get_project('')
self.assertIsNone(project)
def test_get_archived_projects(self):
n_arch_projects = len(self.user.get_archived_projects())
self.assertEqual(n_arch_projects, 0)
project = self.user.add_project(_PROJECT_NAME)
project.archive()
n_arch_projects = len(self.user.get_archived_projects())
self.assertEqual(n_arch_projects, 1)
def test_get_uncompleted_tasks(self):
inbox = self.user.get_project(_INBOX_PROJECT_NAME)
inbox.add_task(_TASK)
tasks = self.user.get_uncompleted_tasks()
self.assertGreater(len(tasks), 0)
def test_get_completed_tasks(self):
inbox = self.user.get_project(_INBOX_PROJECT_NAME)
task = inbox.add_task(_TASK)
task.complete()
completed_tasks = self.user.get_completed_tasks()
self.assertEqual(len(completed_tasks), 1)
def test_get_tasks(self):
inbox = self.user.get_project(_INBOX_PROJECT_NAME)
inbox.add_task(_TASK)
inbox.add_task(_TASK + '2')
tasks = self.user.get_tasks()
for task in tasks:
self.assertIsNotNone(task)
def test_add_label(self):
self.user.add_label(_LABEL)
labels = self.user.get_labels()
self.assertEqual(len(labels), 1)
label = labels[0]
self.assertEqual(label.name, _LABEL)
def test_get_label(self):
self.user.add_label(_LABEL)
label = self.user.get_label(_LABEL)
self.assertIsNotNone(label)
self.assertEqual(label.name, _LABEL)
def test_get_labels(self):
for i in range(5):
self.user.add_label(_LABEL + str(i))
labels = self.user.get_labels()
self.assertEqual(len(labels), 5)
for label in labels:
self.assertIsNotNone(label)
def test_add_filter(self):
self.user.add_filter(_FILTER, 'today')
flters = self.user.get_filters()
self.assertEqual(len(flters), N_DEFAULT_FILTERS + 1)
def test_get_filter(self):
self.user.add_filter(_FILTER, 'today')
flter = self.user.get_filter(_FILTER)
self.assertIsNotNone(flter)
self.assertEqual(flter.name, _FILTER)
def test_search_tasks_overdue(self):
inbox = self.user.get_project(_INBOX_PROJECT_NAME)
inbox.add_task(_TASK, date='today')
inbox.add_task(_TASK + '2', date='1 Jan 2000')
tasks = self.user.search_tasks(todoist.Query.OVERDUE)
self.assertEqual(len(tasks), 1)
def test_get_productivity_stats(self):
stats = self.user.get_productivity_stats()
self.assertIsNotNone(stats)
self.assertIn('karma', stats)
def test_enable_disable_karma(self):
# Just make sure we don't throw any exceptions.
self.user.disable_karma()
self.user.enable_karma()
def test_enable_disable_vacation(self):
# Just make sure we don't throw any exceptions.
self.user.disable_vacation()
self.user.enable_vacation()
def test_update_goals(self):
# Just make sure we don't throw any exceptions.
self.user.update_daily_karma_goal(10)
self.user.update_weekly_karma_goal(100)
def test_clear_reminder_locations(self):
# Just make sure we don't throw any exceptions.
self.user.clear_reminder_locations()
class ProjectTest(unittest.TestCase):
def setUp(self):
self.user = create_user()
time.sleep(10) # Rate limit ourselves to avoid a server rate limit.
self.user.add_project(_PROJECT_NAME)
self.project = self.user.get_project(_PROJECT_NAME)
def tearDown(self):
self.user.delete()
def test_delete(self):
self.project.delete()
projects = [p for p in self.user.get_projects() if not p.is_deleted]
self.assertEqual(len(projects), N_DEFAULT_PROJECTS)
def test_update(self):
new_name = _PROJECT_NAME + '2'
self.project.name = new_name
self.project.update()
project = self.user.get_project(new_name)
self.assertEqual(project.name, new_name)
def test_archive(self):
self.project.archive()
for project in self.user.get_archived_projects():
if project.id == self.project.id:
self.assertTrue(project.is_archived)
def test_unarchive(self):
self.project.archive()
for project in self.user.get_archived_projects():
if project.id == self.project.id:
self.assertTrue(project.is_archived)
self.project.unarchive()
self.project = self.user.get_project(self.project.name)
self.assertFalse(self.project.is_archived)
def test_collapse(self):
self.assertFalse(self.project.collapsed)
self.project.collapse()
self.project = self.user.get_project(self.project.name)
self.assertTrue(self.project.collapsed)
def test_add_task(self):
self.project.add_task(_TASK)
tasks = self.project.get_tasks()
self.assertEqual(len(tasks), 1)
def test_get_tasks(self):
for i in range(5):
self.project.add_task(_TASK + str(i))
inbox = self.user.get_project('Inbox')
inbox.add_task(_TASK)
tasks = self.project.get_tasks()
self.assertEqual(len(tasks), 5)
def test_get_uncompleted_tasks(self):
for i in range(5):
self.project.add_task(_TASK + str(i))
tasks = self.project.get_uncompleted_tasks()
self.assertEqual(len(tasks), 5)
def test_share(self):
self.project.share('[email protected]')
def test_delete_collaborator(self):
self.project.share('[email protected]')
self.project.delete_collaborator('[email protected]')
class TaskTest(unittest.TestCase):
def setUp(self):
self.user = create_user()
time.sleep(10) # Rate limit ourselves to avoid a server rate limit.
self.project = self.user.add_project(_PROJECT_NAME)
self.task = self.project.add_task(_TASK, date='every day')
def tearDown(self):
self.user.delete()
def test_update(self):
new_content = _TASK + '2'
self.task.content = new_content
self.task.update()
tasks = self.project.get_tasks()
for task in tasks:
if task.id == self.task.id:
self.assertEqual(task.content, new_content)
def test_delete(self):
tasks = self.project.get_tasks()
self.assertEqual(len(tasks), 1)
self.task.delete()
tasks = [t for t in self.project.get_tasks() if not t.is_deleted]
self.assertEqual(len(tasks), 0)
def test_complete(self):
self.task.complete()
tasks = self.project.get_completed_tasks()
self.assertEqual(len(tasks), 1)
def test_uncomplete(self):
self.task.complete()
self.task.uncomplete()
tasks = self.project.get_uncompleted_tasks()
self.assertEqual(len(tasks), 1)
def test_add_note(self):
self.task.add_note(_NOTE)
notes = self.task.get_notes()
self.assertEqual(len(notes), 1)
self.assertEqual(notes[0].content, _NOTE)
def test_get_notes(self):
for i in range(5):
self.task.add_note(_NOTE + str(i))
notes = self.task.get_notes()
self.assertEqual(len(notes), 5)
def test_move(self):
inbox = self.user.get_project(_INBOX_PROJECT_NAME)
self.task.move(inbox)
tasks = inbox.get_tasks()
self.assertEqual(len(tasks), 1)
def test_add_date_reminder(self):
self.task.add_date_reminder('email', '2050-3-24T23:59')
def test_add_location_reminder(self):
self.task.add_location_reminder('email', 'Leave Glasgow',
'55.8580', '4.2590', 'on_leave',
100)
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DiskEncryptionSetsOperations(object):
"""DiskEncryptionSetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_encryption_set, 'DiskEncryptionSet')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> LROPoller["_models.DiskEncryptionSet"]:
"""Creates or updates a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Put disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskEncryptionSet or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_encryption_set, 'DiskEncryptionSetUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> LROPoller["_models.DiskEncryptionSet"]:
"""Updates (patches) a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskEncryptionSet or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> "_models.DiskEncryptionSet":
"""Gets information about a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskEncryptionSet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_07_01.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
|
|
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""API for interacting with the LUCI Scheduler service.
Depends on 'prpc' binary available in $PATH:
https://godoc.org/go.chromium.org/luci/grpc/cmd/prpc
Documentation for scheduler API is in
https://chromium.googlesource.com/infra/luci/luci-go/+/main/scheduler/api/scheduler/v1/scheduler.proto
RPCExplorer available at
https://luci-scheduler.appspot.com/rpcexplorer/services/scheduler.Scheduler
"""
from future.utils import iteritems
import copy
import uuid
from google.protobuf import json_format
from recipe_engine import recipe_api
from PB.go.chromium.org.luci.scheduler.api.scheduler.v1 import (
triggers as triggers_pb2)
class SchedulerApi(recipe_api.RecipeApi):
"""A module for interacting with LUCI Scheduler service."""
def __init__(self, init_state, **kwargs):
super(SchedulerApi, self).__init__(**kwargs)
self._host = init_state.get('hostname') or 'luci-scheduler.appspot.com'
self._fake_uuid_count = 0
self._triggers = []
for t_dict in init_state.get('triggers') or []:
self._triggers.append(
json_format.ParseDict(t_dict, triggers_pb2.Trigger()))
@property
def triggers(self):
"""Returns a list of triggers that triggered the current build.
A trigger is an instance of triggers_pb2.Trigger.
"""
return copy.copy(self._triggers)
@property
def host(self):
"""Returns the backend hostname used by this module."""
return self._host
def set_host(self, host):
"""Changes the backend hostname used by this module.
Args:
host (str): server host (e.g. 'luci-scheduler.appspot.com').
"""
self._host = host
class Trigger(object):
"""Generic Trigger accepted by LUCI Scheduler API.
Don't instantiate Trigger itself. Use either BuildbucketTrigger or
GitilesTrigger instead.
All supported triggers are documented here:
https://chromium.googlesource.com/infra/luci/luci-go/+/main/scheduler/api/scheduler/v1/triggers.proto
"""
def __init__(
self, id=None, title=None, url=None,
properties=None, tags=None, inherit_tags=True):
self._id = id
self._title = title
self._url = url
self._properties = properties
self._tags = tags
self._inherit_tags = inherit_tags
def _serialize(self, api_self):
t = {}
t['id'] = self._id or api_self._next_uuid()
t['title'] = self._title or ('%s/%s' % (
api_self.m.buildbucket.builder_name,
api_self.m.buildbucket.build.number))
# TODO(tandrii): find a way to get URL of current build.
if self._url:
t['url'] = self._url
tags = {}
if self._inherit_tags:
tags = self._tags_for_child_build(api_self.m.buildbucket.build).copy()
if self._tags:
tags.update(self._tags)
self._cleanup_tags(tags)
tags = list(
map(':'.join,
sorted((k, v) for k, v in iteritems(tags) if v is not None)))
base = {}
if self._properties:
base['properties'] = self._properties
if tags:
base['tags'] = tags
t.update(self._serialize_payload(base))
return t
def _tags_for_child_build(self, build): # pragma: no cover
"""A dict of tags (key -> value) derived from current (parent) build for a
child build."""
original_tags = {
t.key: t.value for t in build.tags
}
new_tags = {'user_agent': 'recipe'}
commit = build.input.gitiles_commit
if build.input.gerrit_changes:
cl = build.input.gerrit_changes[0]
new_tags['buildset'] = 'patch/gerrit/%s/%d/%d' % (
cl.host, cl.change, cl.patchset)
# Note: an input gitiles commit with ref without id is valid
# but such commit cannot be used to construct a valid commit buildset.
elif commit.host and commit.project and commit.id:
new_tags['buildset'] = (
'commit/gitiles/%s/%s/+/%s' % (
commit.host, commit.project, commit.id))
if commit.ref:
new_tags['gitiles_ref'] = commit.ref
else:
buildset = original_tags.get('buildset')
if buildset:
new_tags['buildset'] = buildset
if build.number:
new_tags['parent_buildnumber'] = str(build.number)
if build.builder.builder:
new_tags['parent_buildername'] = str(build.builder.builder)
return new_tags
def _cleanup_tags(self, tags):
pass
def _serialize_payload(self, base):
raise NotImplementedError() # pragma: no cover
class BuildbucketTrigger(Trigger):
"""Trigger with buildbucket payload for buildbucket jobs.
Args:
properties (dict, optional): key -> value properties.
tags (dict, optional): custom tags to add. See also `inherit_tags`.
If tag's value is None, this tag will be removed from resulting tags,
however if you rely on this, consider using `inherit_tags=False`
instead.
inherit_tags (bool): if true (default), auto-adds tags using
`api.buildbucket.tags_for_child_build` api.
"""
def _serialize_payload(self, base):
return {'buildbucket': base}
class GitilesTrigger(Trigger):
"""Trigger with new Gitiles commit payload, typically for buildbucket jobs.
Args:
repo (str): URL of a repo that changed.
ref (str): a ref that changed, in full, e.g. "refs/heads/main".
revision (str): a revision (SHA1 in hex) pointed to by the ref.
properties (dict, optional): extra key -> value properties.
tags (dict, optional): extra custom tags to add. See also `inherit_tags`.
If tag's value is None, this tag will be removed from resulting tags,
however if you rely on this, consider using `inherit_tags=False`
instead.
inherit_tags (bool): if true (default), auto-adds extra tags using
`api.buildbucket.tags_for_child_build` api.
"""
def __init__(self, repo, ref, revision, **kwargs):
super(SchedulerApi.GitilesTrigger, self).__init__(**kwargs)
self._repo = repo
self._ref = ref
self._revision = revision
def _cleanup_tags(self, tags):
# These tags are populated based on the triggered commit by Buildbucket.
# They could have been inherited (with wrong values) from
# _tags_for_child_build. Drop them.
tags.pop('buildset', None)
tags.pop('gitiles_ref', None)
def _serialize_payload(self, base):
base.update({
'repo': self._repo,
'ref': self._ref,
'revision': self._revision,
})
return {'gitiles': base}
def emit_trigger(self, trigger, project, jobs, step_name=None):
"""Emits trigger to one or more jobs of a given project.
Args:
trigger (Trigger): defines payload to trigger jobs with.
project (str): name of the project in LUCI Config service, which is used
by LUCI Scheduler instance. See https://luci-config.appspot.com/.
jobs (iterable of str): job names per LUCI Scheduler config for the given
project. These typically are the same as builder names.
"""
return self.emit_triggers([(trigger, project, jobs)], step_name=step_name)
def emit_triggers(
self, trigger_project_jobs, timestamp_usec=None, step_name=None):
"""Emits a batch of triggers spanning one or more projects.
Up to date documentation is at
https://chromium.googlesource.com/infra/luci/luci-go/+/main/scheduler/api/scheduler/v1/scheduler.proto
Args:
trigger_project_jobs (iterable of tuples(trigger, project, jobs)):
each tuple corresponds to parameters of `emit_trigger` API above.
timestamp_usec (int): unix timestamp in microseconds.
Useful for idempotency of calls if your recipe is doing its own retries.
https://chromium.googlesource.com/infra/luci/luci-go/+/main/scheduler/api/scheduler/v1/triggers.proto
"""
req = {
'batches': [
{
'trigger': trigger._serialize(self),
'jobs': [{'project': project, 'job': job} for job in jobs],
}
for trigger, project, jobs in trigger_project_jobs
],
}
if timestamp_usec:
assert isinstance(timestamp_usec, int), timestamp_usec
else:
timestamp_usec = int(self.m.time.time() * 1e6)
req['timestamp'] = timestamp_usec
# There is no output from EmitTriggers API.
self._run(
'EmitTriggers', req, step_name=step_name,
step_test_data=lambda: self.m.json.test_api.output_stream({}))
def _run(self, method, input_data, step_test_data=None, step_name=None):
# TODO(tandrii): encapsulate running prpc command in a standalone module.
step_name = step_name or ('luci-scheduler.' + method)
args = ['prpc', 'call', '-format=json', self._host,
'scheduler.Scheduler.' + method]
step_result = None
try:
step_result = self.m.step(
step_name,
args,
stdin=self.m.json.input(input_data),
stdout=self.m.json.output(add_json_log='on_failure'),
infra_step=True,
step_test_data=step_test_data)
# TODO(tandrii): add hostname to step presentation's links.
# TODO(tandrii): handle errors nicely.
finally:
self.m.step.active_result.presentation.logs['input'] = self.m.json.dumps(
input_data, indent=4).splitlines()
return step_result.stdout
def _next_uuid(self):
if self._test_data.enabled:
self._fake_uuid_count += 1
return '6a0a73b0-070b-492b-9135-9f26a2a' + '%05d' % (
self._fake_uuid_count,)
else: # pragma: no cover
return str(uuid.uuid4())
|
|
import re
import json
import time
import types
import urllib
import pymongo
import decimal
import functools
import unicodecsv
import datetime
import urlparse
from bson import ObjectId
from operator import itemgetter
from itertools import chain, imap
from collections import defaultdict, OrderedDict
from django.core import urlresolvers
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
from django.template.loader import render_to_string
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_http_methods
from billy.core import db, mdb, settings
from billy.utils import metadata, JSONEncoderPlus
from billy.importers.utils import merge_legislators
from billy.importers.legislators import deactivate_legislators
from billy.reports.utils import get_quality_exceptions, QUALITY_EXCEPTIONS
from billy.web.admin.decorators import is_superuser
def _meta_and_report(abbr):
meta = metadata(abbr)
if not meta:
raise Http404('No metadata found for abbreviation %r.' % abbr)
report = db.reports.find_one({'_id': abbr})
if not report:
raise Http404('No reports found for abbreviation %r.' % abbr)
return meta, report
def keyfunc(obj):
try:
return int(obj['district'])
except ValueError:
return obj['district']
@is_superuser
def _csv_response(request, csv_name, columns, data, abbr):
if 'csv' in request.REQUEST:
resp = HttpResponse(content_type="text/csv")
resp['Content-Disposition'] = 'attachment; filename=%s_%s.csv' % (
abbr, csv_name)
out = unicodecsv.writer(resp)
for item in data:
out.writerow(item)
return resp
else:
return render(request, 'billy/generic_table.html',
{'columns': columns,
'data': data, 'metadata': metadata(abbr)})
@is_superuser
def browse_index(request, template='billy/index.html'):
rows = []
spec = {}
only = request.GET.get('only', [])
if only:
spec = {'_id': {'$in': only.split(',')}}
for report in db.reports.find(spec, fields=('legislators', 'committees',
'votes',
'bills.actions_per_type',
'bills.versionless_count',
'bills.actionless_count',
'bills.sponsorless_count',
'bills.sponsors_with_id',
'bills.duplicate_versions',
'bills.have_subjects',
)):
report['id'] = report['_id']
meta = db.metadata.find_one({'_id': report['_id']})
report['name'] = meta['name']
report['chambers'] = meta['chambers'].keys()
report['influence_explorer'] = ('influenceexplorer' in
meta['feature_flags'])
report['bills']['typed_actions'] = (
100 - report['bills']['actions_per_type'].get('other', 100))
rows.append(report)
rows.sort(key=lambda x: x['name'])
return render(request, template, {'rows': rows})
@is_superuser
def overview(request, abbr):
meta, report = _meta_and_report(abbr)
context = {}
context['metadata'] = meta
context['report'] = report
context['sessions'] = db.bills.find({settings.LEVEL_FIELD: abbr}
).distinct('session')
def _add_time_delta(runlog):
time_delta = runlog['scraped']['ended'] - runlog['scraped']['started']
runlog['scraped']['time_delta'] = datetime.timedelta(time_delta.days,
time_delta.seconds
)
try:
runlog = db.billy_runs.find({"abbr": abbr}).sort(
"scraped.started", direction=pymongo.DESCENDING)[0]
_add_time_delta(runlog)
context['runlog'] = runlog
if runlog.get('failure'):
last_success = db.billy_runs.find({"abbr": abbr, "failure": None}
).sort("scraped.started", direction=pymongo.DESCENDING)[0]
_add_time_delta(last_success)
context['last_success'] = last_success
except IndexError:
runlog = False
return render(request, 'billy/overview.html', context)
@is_superuser
def run_detail_graph_data(request, abbr):
def rolling_average(oldAverage, newItem, oldAverageCount):
"""
Simple, unweighted rolling average. If you don't get why we have
to factor the oldAverageCount back in, it's because new values will
have as much weight as the last sum combined if you put it over 2.
"""
return float((newItem + (oldAverageCount * (oldAverage))) /
(oldAverageCount + 1))
def _do_pie(runs):
excs = {}
for run in runs:
if "failure" in run:
for r in run['scraped']['run_record']:
if "exception" in r:
ex = r['exception']
try:
excs[ex['type']] += 1
except KeyError:
excs[ex['type']] = 1
ret = []
for l in excs:
ret.append([l, excs[l]])
return ret
def _do_stacked(runs):
fields = ["legislators", "bills", "votes", "committees"]
ret = {}
for field in fields:
ret[field] = []
for run in runs:
guy = run['scraped']['run_record']
for field in fields:
try:
g = None
for x in guy:
if x['type'] == field:
g = x
if not g:
raise KeyError("Missing kruft")
delt = (g['end_time'] - g['start_time']).total_seconds()
ret[field].append(delt)
except KeyError: # XXX: THIS MESSES STUFF UP. REVISE.
ret[field].append(0)
l = []
for line in fields:
l.append(ret[line])
return l
def _do_digest(runs):
oldAverage = 0
oldAverageCount = 0
data = {"runs": [], "avgs": [], "stat": []}
for run in runs:
timeDelta = (
run['scraped']['ended'] - run['scraped']['started']
).total_seconds()
oldAverage = rolling_average(oldAverage, timeDelta,
oldAverageCount)
oldAverageCount += 1
stat = "Failure" if "failure" in run else ""
s = time.mktime(run['scraped']['started'].timetuple())
data['runs'].append([s, timeDelta, stat])
data['avgs'].append([s, oldAverage, ''])
data['stat'].append(stat)
return data
history_count = 50
default_spec = {"abbr": abbr}
data = {"lines": {}, "pies": {}, "stacked": {}, "title": {}}
speck = {
"default-stacked": {"run": _do_stacked,
"title": "Last %s runs" % (history_count),
"type": "stacked",
"spec": {}
},
#"default": {"run": _do_digest,
# "title": "Last %s runs" % (history_count),
# "type": "lines",
# "spec": {}
#},
#"clean": {"run": _do_digest,
# "title": "Last %s non-failed runs" % (history_count),
# "type": "lines",
# "spec": {
# "failure": {"$exists": False }
# }
#},
#"failure": {"run": _do_digest,
# "title": "Last %s failed runs" % (history_count),
# "type": "lines",
# "spec": {
# "failure": {"$exists": True }
# }
#},
"falure-pie": {"run": _do_pie,
"title": "Digest of what exceptions have been thrown",
"type": "pies",
"spec": {"failure": {"$exists": True}}
},
}
for line in speck:
query = speck[line]["spec"].copy()
query.update(default_spec)
runs = db.billy_runs.find(query).sort(
"scrape.start", direction=pymongo.ASCENDING)[:history_count]
data[speck[line]['type']][line] = speck[line]["run"](runs)
data['title'][line] = speck[line]['title']
return HttpResponse(
json.dumps(data, cls=JSONEncoderPlus),
#content_type="text/json"
content_type="text/plain")
@is_superuser
def run_detail(request, obj=None):
try:
run = db.billy_runs.find({"_id": ObjectId(obj)})[0]
except IndexError as e:
return render(request, 'billy/run_detail.html', {
"warning": "No records exist. Fetch returned a(n) %s" % (
e.__class__.__name__)})
return render(request, 'billy/run_detail.html', {
"run": run,
"metadata": {"abbreviation": run['abbr'], "name": run['abbr']}
})
@is_superuser
def run_detail_list(request, abbr):
try:
allruns = db.billy_runs.find({"abbr": abbr}
).sort("scraped.started", direction=pymongo.DESCENDING)[:25]
runlog = allruns[0]
except IndexError as e:
return render(request, 'billy/run_detail.html', {
"warning": "No records exist. Fetch returned a(n) %s" % (
e.__class__.__name__)})
# pre-process goodies for the template
runlog['scraped']['t_delta'] = (
runlog['scraped']['ended'] - runlog['scraped']['started'])
for entry in runlog['scraped']['run_record']:
if not "exception" in entry:
entry['t_delta'] = (
entry['end_time'] - entry['start_time'])
context = {"runlog": runlog, "allruns": allruns, "abbr": abbr,
"metadata": metadata(abbr)}
if "failure" in runlog:
context["alert"] = dict(type='error',
title="Exception during Execution",
message="""
This build had an exception during it's execution. Please check below
for the exception and error message.
""")
return render(request, 'billy/run_detail_list.html', context)
@never_cache
@is_superuser
def bills(request, abbr):
meta, report = _meta_and_report(abbr)
terms = list(chain.from_iterable(map(itemgetter('sessions'),
meta['terms'])))
def sorter(item, index=terms.index, len_=len(terms)):
'''Sort session strings in order described in metadata.'''
session, data = item
return index(session)
# Convert sessions into an ordered dict.
sessions = report['bills']['sessions']
sessions = sorted(sessions.items(), key=sorter)
sessions = OrderedDict(sessions)
def decimal_format(value, TWOPLACES=decimal.Decimal(100) ** -1):
'''Format a float like 2.2345123 as a decimal like 2.23'''
n = decimal.Decimal(str(value))
n = n.quantize(TWOPLACES)
return unicode(n)
# Define data for the tables for counts, types, etc.
tablespecs = [
('Bill Counts', {'rownames': ['upper_count', 'lower_count',
'version_count']}),
('Bill Types', {
'keypath': ['bill_types'], 'summary': {
'object_type': 'bills', 'key': 'type',
},
}),
('Actions by Type', {
'keypath': ['actions_per_type'], 'summary': {
'object_type': 'actions',
'key': 'type',
},
}),
('Actions by Actor', {
'keypath': ['actions_per_actor'], 'summary': {
'object_type': 'actions',
'key': 'actor',
},
}),
('Quality Issues', {'rownames': [
'sponsorless_count', 'actionless_count', 'actions_unsorted',
'bad_vote_counts', 'version_count', 'versionless_count',
'sponsors_with_id', 'rollcalls_with_leg_id', 'have_subjects',
'updated_this_year', 'updated_this_month', 'updated_today',
'vote_passed']}),
]
format_as_percent = [
'sponsors_with_id',
'rollcalls_with_leg_id',
'have_subjects',
'updated_this_year',
'updated_this_month',
'updated_today',
'actions_per_actor',
'actions_per_type']
# Create the data for each table.
tables = []
for name, spec in tablespecs:
column_names = []
rows = defaultdict(list)
href_params = {}
tabledata = {'abbr': abbr,
'title': name,
'column_names': column_names,
'rows': rows}
contexts = []
for session, context in sessions.items():
column_names.append(session)
if 'keypath' in spec:
for k in spec['keypath']:
context = context[k]
contexts.append(context)
try:
rownames = spec['rownames']
except KeyError:
rownames = reduce(lambda x, y: set(x) | set(y), contexts)
for context in contexts:
for r in rownames:
val = context.get(r, 0)
if not isinstance(val, (int, float, decimal.Decimal)):
val = len(val)
use_percent = any([
r in format_as_percent,
name in ['Actions by Actor', 'Actions by Type'],
])
if use_percent and (val != 0):
val = decimal_format(val)
val += ' %'
rows[r].append(val)
# Link to summary/distinct views.
if 'summary' in spec:
try:
spec_val = spec['spec'](r)
except KeyError:
spec_val = r
else:
spec_val = json.dumps(spec_val, cls=JSONEncoderPlus)
params = dict(spec['summary'], session=session,
val=spec_val)
params = urllib.urlencode(params)
href_params[r] = params
# Add the final "total" column.
tabledata['column_names'].append('Total')
for k, v in rows.items():
try:
sum_ = sum(v)
except TypeError:
sum_ = 'n/a'
v.append(sum_)
rowdata = [((r, href_params.get(r)), cells)
for (r, cells) in rows.items()]
tabledata['rowdata'] = rowdata
tables.append(tabledata)
# ------------------------------------------------------------------------
# Render the tables.
_render = functools.partial(render_to_string, 'billy/bills_table.html')
tables = map(_render, tables)
return render(request, "billy/bills.html",
dict(tables=tables, metadata=meta, sessions=sessions,
tablespecs=tablespecs))
@is_superuser
def summary_index(request, abbr, session):
object_types = 'actions versions sponsors documents sources'.split()
def build(context_set):
summary = defaultdict(int)
for c in context_set:
for k, v in c.items():
summary[k] += 1
return dict(summary)
def build_summary(abbr):
bills = list(db.bills.find({settings.LEVEL_FIELD: abbr,
'session': session}))
res = {}
for k in object_types:
res[k] = build(chain.from_iterable(map(itemgetter(k), bills)))
res.update(bills=build(bills))
return res
summary = build_summary(abbr)
context = {'summary': summary, 'object_types': object_types,
'abbr': abbr, 'session': session}
return render(request, 'billy/summary_index.html', context)
@is_superuser
def summary_object_key(request, abbr, urlencode=urllib.urlencode,
collections=("bills", "legislators", "committees"),
dumps=json.dumps, Decimal=decimal.Decimal):
session = request.GET['session']
object_type = request.GET['object_type']
key = request.GET['key']
spec = {settings.LEVEL_FIELD: abbr, 'session': session}
if object_type in collections:
collection = getattr(db, object_type)
fields_key = key
objs = collection.find(spec, {fields_key: 1})
objs = imap(itemgetter(key), objs)
else:
collection = db.bills
fields_key = '%s.%s' % (object_type, key)
objs = collection.find(spec, {fields_key: 1})
objs = imap(itemgetter(object_type), objs)
def get_objects(objs):
for _list in objs:
for _obj in _list:
try:
yield _obj[key]
except KeyError:
pass
objs = get_objects(objs)
objs = (dumps(obj, cls=JSONEncoderPlus) for obj in objs)
counter = defaultdict(Decimal)
for obj in objs:
counter[obj] += 1
params = lambda val: urlencode(dict(object_type=object_type,
key=key, val=val, session=session))
total = len(counter)
objs = sorted(counter, key=counter.get, reverse=True)
objs = ((obj, counter[obj], counter[obj] / total, params(obj))
for obj in objs)
return render(request, 'billy/summary_object_key.html', locals())
@is_superuser
def summary_object_key_vals(
request, abbr, urlencode=urllib.urlencode,
collections=("bills", "legislators", "committees")):
meta = metadata(abbr)
session = request.GET['session']
object_type = request.GET['object_type']
key = request.GET['key']
val = request.GET['val']
try:
val = json.loads(val)
except ValueError:
pass
spec = {settings.LEVEL_FIELD: abbr, 'session': session}
fields = {'_id': 1}
if object_type in collections:
spec.update({key: val})
objects = getattr(db, object_type).find(spec, fields)
objects = ((object_type, obj['_id']) for obj in objects)
else:
spec.update({'.'.join([object_type, key]): val})
objects = db.bills.find(spec, fields)
objects = (('bills', obj['_id']) for obj in objects)
spec = json.dumps(spec, cls=JSONEncoderPlus, indent=4)
return render(request, 'billy/summary_object_keyvals.html', dict(
object_type=object_type,
objects=objects,
spec=spec,
meta=meta
))
@is_superuser
def object_json(request, collection, _id):
re_attr = re.compile(r'^ "(.{1,100})":', re.M)
model_obj = getattr(mdb, collection).find_one(_id)
if model_obj is None:
msg = 'No object found with id %r in collection %r'
raise Http404(msg % (_id, collection))
obj = OrderedDict(sorted(model_obj.items()))
obj_id = obj['_id']
obj_json = json.dumps(obj, cls=JSONEncoderPlus, indent=4)
def subfunc(m, tmpl=' <a name="%s">%s:</a>'):
val = m.group(1)
return tmpl % (val, val)
for k in obj:
obj_json = re_attr.sub(subfunc, obj_json)
tmpl = '<a href="{0}">{0}</a>'
obj_json = re.sub('"(http://.+?)"',
lambda m: tmpl.format(*m.groups()), obj_json)
if obj['_type'] != 'metadata':
mdata = metadata(obj[settings.LEVEL_FIELD])
else:
mdata = obj
return render(request, 'billy/object_json.html', dict(
obj=obj, obj_id=obj_id, obj_json=obj_json, collection=collection,
metadata=mdata, model_obj=model_obj
))
@is_superuser
def other_actions(request, abbr):
report = db.reports.find_one({'_id': abbr})
if not report:
raise Http404('No reports found for abbreviation %r.' % abbr)
return _csv_response(request, 'other_actions', ('action', '#'),
sorted(report['bills']['other_actions']), abbr)
@is_superuser
def duplicate_versions(request, abbr):
meta, report = _meta_and_report(abbr)
return render(request, "billy/duplicate_versions.html",
{'metadata': meta, 'report': report})
def _bill_spec(meta, limit):
abbr = meta['abbreviation']
# basic spec
latest_session = meta['terms'][-1]['sessions'][-1]
spec = {settings.LEVEL_FIELD: abbr.lower(), 'session': latest_session}
basic_specs = {
"no_versions": {'versions': []},
"no_sponsors": {'sponsors': []},
"no_actions": {'actions': []}
}
if limit in basic_specs:
spec.update(basic_specs[limit])
spec.pop('session') # all sessions
elif limit == 'current_term':
curTerms = meta['terms'][-1]['sessions']
spec['session'] = {"$in": curTerms}
elif limit == '':
pass
else:
raise ValueError('invalid limit: {0}'.format(limit))
return spec
@is_superuser
def bill_list(request, abbr):
meta = metadata(abbr)
if not meta:
raise Http404('No metadata found for abbreviation %r' % abbr)
if 'version_url' in request.GET:
version_url = request.GET.get('version_url')
spec = {'versions.url': version_url}
exceptions = []
else:
limit = request.GET.get('limit', '')
exceptions = get_quality_exceptions(abbr)['bills:' + limit]
spec = _bill_spec(meta, limit)
query_text = repr(spec)
if exceptions:
spec['_id'] = {'$nin': list(exceptions)}
query_text += ' (excluding {0} exceptions)'.format(len(exceptions))
bills = list(mdb.bills.find(spec))
bill_ids = [b['_id'] for b in bills if b['_id'] not in exceptions]
context = {'metadata': meta, 'query_text': query_text, 'bills': bills,
'bill_ids': bill_ids}
return render(request, 'billy/bill_list.html', context)
@is_superuser
def bad_vote_list(request, abbr):
meta = metadata(abbr)
if not meta:
raise Http404('No metadata found for abbreviation %r' % abbr)
report = mdb.reports.find_one({'_id': abbr})
bad_vote_ids = report['votes']['bad_vote_counts']
votes = mdb.votes.find({'_id': {'$in': bad_vote_ids}})
context = {'metadata': meta, 'vote_ids': bad_vote_ids,
'votes': votes}
return render(request, 'billy/vote_list.html', context)
@is_superuser
def legislators(request, abbr):
meta = metadata(abbr)
report = db.reports.find_one({'_id': abbr})
if not report:
raise Http404('No report was found for abbr %r.' % abbr)
else:
report = report['legislators']
chambers = meta['chambers'].copy()
for chamber_type, chamber in chambers.iteritems():
chamber['legislators'] = sorted(db.legislators.find(
{settings.LEVEL_FIELD: abbr.lower(), 'active': True,
'chamber': chamber_type}), key=keyfunc)
inactive_legs = db.legislators.find({settings.LEVEL_FIELD: abbr.lower(),
'active': {'$ne': True}})
inactive_legs = sorted(inactive_legs, key=lambda x: x['last_name'])
return render(request, 'billy/legislators.html', {
'chambers': chambers.values(),
'inactive_legs': inactive_legs,
'metadata': meta,
'overfilled': report['overfilled_seats'],
'vacant': report['vacant_seats'],
})
@is_superuser
def subjects(request, abbr):
meta = metadata(abbr)
subjects = db.subjects.find({
'abbr': abbr.lower()
})
report = db.reports.find_one({'_id': abbr})
uc_s = report['bills']['uncategorized_subjects']
uc_subjects = []
c_subjects = {}
for sub in subjects:
c_subjects[sub['remote']] = sub
subjects.rewind()
uniqid = 1
for sub in uc_s:
if not sub[0] in c_subjects:
sub.append(uniqid)
uniqid += 1
uc_subjects.append(sub)
normalized_subjects = settings.BILLY_SUBJECTS[:]
normalized_subjects.append("IGNORED")
return render(request, 'billy/subjects.html', {
'metadata': meta,
'subjects': subjects,
'normalized_subjects': normalized_subjects,
'uncat_subjects': uc_subjects
})
@is_superuser
def subjects_remove(request, abbr=None, id=None):
db.subjects.remove({"_id": id}, safe=True)
return redirect('admin_subjects', abbr)
@is_superuser
@require_http_methods(["POST"])
def subjects_commit(request, abbr):
def _gen_id(abbr, subject):
return "%s-%s" % (abbr, subject)
payload = dict(request.POST)
if 'sub' in payload:
del(payload['sub'])
catd_subjects = defaultdict(dict)
for idex in payload:
if idex == 'csrfmiddlewaretoken':
continue
key, val = idex.split("-", 1)
if val == 'remote' and not 'normal' in catd_subjects[key]:
catd_subjects[key]['normal'] = []
catd_subjects[key][val] = payload[idex]
for idex in catd_subjects:
sub = catd_subjects[idex]
remote = sub['remote'][0]
normal = [x.strip() for x in sub['normal']]
if normal == []:
continue
if "IGNORED" in normal:
normal = []
eyedee = _gen_id(abbr, remote)
obj = {
"_id": eyedee,
"abbr": abbr,
"remote": remote,
"normal": normal
}
db.subjects.update({"_id": eyedee},
obj,
True, # Upsert
safe=True)
return redirect('admin_subjects', abbr)
@is_superuser
def quality_exceptions(request, abbr):
meta = metadata(abbr)
exceptions = db.quality_exceptions.find({
'abbr': abbr.lower()
}) # Natural sort is fine
extypes = QUALITY_EXCEPTIONS
return render(request, 'billy/quality_exceptions.html', {
'metadata': meta,
'exceptions': exceptions,
"extypes": extypes
})
@is_superuser
def quality_exception_remove(request, abbr, obj):
meta = metadata(abbr)
errors = []
db.quality_exceptions.remove({"_id": ObjectId(obj)})
if errors != []:
return render(request, 'billy/quality_exception_error.html', {
'metadata': meta,
'errors': errors
})
return redirect('quality_exceptions', abbr)
@is_superuser
def quality_exception_commit(request, abbr):
def classify_object(oid):
oid = oid.upper()
try:
return {
"L": "legislators",
"B": "bills",
"E": "events",
"V": "votes"
}[oid[2]]
except KeyError:
return None
meta = metadata(abbr)
error = []
get = request.POST
objects = get['affected'].split()
if "" in objects:
objects.remove("")
if len(objects) == 0:
error.append("No objects.")
for obj in objects:
classy = classify_object(obj)
o = getattr(db, classy, None).find({
"_id": obj
})
if o.count() == 0:
error.append("Unknown %s object - %s" % (classy, obj))
elif o.count() != 1:
error.append("Somehow %s matched more then one ID..." % (obj))
else:
o = o[0]
if o[settings.LEVEL_FIELD] != abbr:
error.append("Object %s is not from '%s'." % (obj, abbr))
type = get['extype'].strip()
if type not in QUALITY_EXCEPTIONS:
error.append("Type %s is not a real type" % type)
notes = get['notes'].strip()
if type == "":
error.append("Empty type")
if notes == "":
error.append("Empty notes")
if error != []:
return render(request, 'billy/quality_exception_error.html', {
'metadata': meta,
'errors': error
})
db.quality_exceptions.insert({
"abbr": abbr,
"notes": notes,
"ids": objects,
"type": type
})
return redirect('quality_exceptions', abbr)
@is_superuser
def events(request, abbr):
meta = metadata(abbr)
events = db.events.find({settings.LEVEL_FIELD: abbr.lower()},
sort=[('when', pymongo.DESCENDING)]).limit(20)
# sort and get rid of old events.
return render(request, 'billy/events.html', {
'events': ((e, e['_id']) for e in events),
'metadata': meta,
})
@is_superuser
def event(request, abbr, event_id):
meta = metadata(abbr)
event = db.events.find_one(event_id)
return render(request, 'billy/events.html', {
'event': event,
'metadata': meta,
})
@is_superuser
def legislator_edit(request, id):
leg = db.legislators.find_one({'_all_ids': id})
if not leg:
raise Http404('No legislators found for id %r.' % id)
meta = metadata(leg[settings.LEVEL_FIELD])
return render(request, 'billy/legislator_edit.html', {
'leg': leg,
'metadata': meta,
'locked': leg.get('_locked_fields', []),
'fields': [
"last_name",
"full_name",
"first_name",
"middle_name",
"nickname",
"suffixes",
"email",
"transparencydata_id",
"photo_url",
"url",
]
})
@is_superuser
@require_http_methods(["POST"])
def legislator_edit_commit(request):
payload = dict(request.POST)
sources = payload.pop('change_source')
leg_id = payload['leg_id'][0]
legislator = db.legislators.find_one({'_all_ids': leg_id})
if not legislator:
raise Http404('No legislators found for id %r.' % leg_id)
cur_sources = [x['url'] for x in legislator['sources']]
for source in sources:
if source and source.strip() != "":
source = source.strip()
if source in cur_sources:
continue
legislator['sources'].append({
"url": source
})
for key in ["leg_id", "csrfmiddlewaretoken"]:
del(payload[key])
update = {}
locked = []
for key in payload:
if "locked" in key:
locked.append(payload[key][0].split("-", 1)[0])
continue
update[key] = payload[key][0]
legislator.update(update)
legislator['_locked_fields'] = locked
db.legislators.update({"_id": legislator['_id']},
legislator,
upsert=False, safe=True)
return redirect('admin_legislator_edit', legislator['leg_id'])
@is_superuser
def retire_legislator(request, id):
legislator = db.legislators.find_one({'_all_ids': id})
if not legislator:
raise Http404('No legislators found for id %r.' % id)
# retire a legislator
abbr = legislator[settings.LEVEL_FIELD]
meta = metadata(abbr)
term = meta['terms'][-1]['name']
cur_role = legislator['roles'][0]
if cur_role['type'] != 'member' or cur_role['term'] != term:
raise ValueError('member missing role for %s' % term)
end_date = request.POST.get('end_date')
if not end_date:
alert = dict(type='warning', title='Warning!',
message='missing end_date for retirement')
else:
cur_role['end_date'] = datetime.datetime.strptime(end_date, '%Y-%m-%d')
db.legislators.save(legislator, safe=True)
deactivate_legislators(term, abbr)
alert = dict(type='success', title='Retired Legislator',
message='{0} was successfully retired.'.format(
legislator['full_name']))
return render(request, 'billy/legislator_edit.html', {'leg': legislator,
'metadata': meta,
'alert': alert})
@is_superuser
def committees(request, abbr):
meta = metadata(abbr)
chambers = meta['chambers'].copy()
chambers['joint'] = {'name': 'Joint'}
for chamber_type, chamber in chambers.iteritems():
chamber['committees'] = sorted(db.committees.find(
{settings.LEVEL_FIELD: abbr.lower(), 'chamber': chamber_type}))
return render(request, 'billy/committees.html', {
'chambers': chambers.values(),
'metadata': meta,
})
@is_superuser
def delete_committees(request):
ids = request.POST.getlist('committees')
committees = db.committees.find({'_id': {'$in': ids}})
abbr = committees[0][settings.LEVEL_FIELD]
if not request.POST.get('confirm'):
return render(request, 'billy/delete_committees.html',
{'abbr': abbr, 'committees': committees})
else:
db.committees.remove({'_id': {'$in': ids}}, safe=True)
return redirect('admin_committees', abbr)
@is_superuser
def mom_index(request, abbr):
legislators = list(db.legislators.find({settings.LEVEL_FIELD: abbr}))
return render(request, 'billy/mom_index.html', {
"abbr": abbr,
"legs": legislators
})
@is_superuser
def mom_commit(request, abbr):
actions = []
leg1 = request.POST['leg1']
leg2 = request.POST['leg2']
leg1 = db.legislators.find_one({'_id': leg1})
actions.append("Loaded Legislator '%s as `leg1''" % leg1['leg_id'])
leg2 = db.legislators.find_one({'_id': leg2})
actions.append("Loaded Legislator '%s as `leg2''" % leg2['leg_id'])
# XXX: Re-direct on None
merged, remove = merge_legislators(leg1, leg2)
actions.append("Merged Legislators as '%s'" % merged['leg_id'])
db.legislators.remove({'_id': remove}, safe=True)
actions.append("Deleted Legislator (which had the ID of %s)" % remove)
db.legislators.save(merged, safe=True)
actions.append("Saved Legislator %s with merged data" % merged['leg_id'])
for attr in merged:
merged[attr] = _mom_mangle(merged[attr])
return render(request, 'billy/mom_commit.html', {
"merged": merged,
"actions": actions,
"abbr": abbr
})
def _mom_attr_diff(merge, leg1, leg2):
mv_info = {
"1": "Root Legislator",
"2": "Duplicate Legislator",
"U": "Unchanged",
"N": "New Information"
}
mv = {}
for key in merge:
if key in leg1 and key in leg2:
if leg1[key] == leg2[key]:
mv[key] = "U"
elif key == leg1[key]:
mv[key] = "1"
else:
mv[key] = "2"
elif key in leg1:
mv[key] = "1"
elif key in leg2:
mv[key] = "2"
else:
mv[key] = "N"
return (mv, mv_info)
def _mom_mangle(attr):
args = {"sort_keys": True, "indent": 4, "cls": JSONEncoderPlus}
if isinstance(attr, types.ListType):
return json.dumps(attr, **args)
if isinstance(attr, types.DictType):
return json.dumps(attr, **args)
return attr
@is_superuser
def mom_merge(request, abbr):
leg1 = "leg1"
leg2 = "leg2"
leg1 = request.GET[leg1]
leg2 = request.GET[leg2]
leg1_db = db.legislators.find_one({'_id': leg1})
leg2_db = db.legislators.find_one({'_id': leg2})
# XXX: break this out into its own error page
if leg1_db is None or leg2_db is None:
nonNull = leg1_db if leg1_db is None else leg2_db
if nonNull is not None:
nonID = leg1 if nonNull['_id'] == leg1 else leg2
else:
nonID = None
return render(request, 'billy/mom_error.html', {"leg1": leg1,
"leg2": leg2,
"leg1_db": leg1_db,
"leg2_db": leg2_db,
"same": nonNull,
"sameid": nonID,
"abbr": abbr})
leg1, leg2 = leg1_db, leg2_db
merge, toRemove = merge_legislators(leg1, leg2)
mv, mv_info = _mom_attr_diff(merge, leg1, leg2)
for foo in [leg1, leg2, merge]:
for attr in foo:
foo[attr] = _mom_mangle(foo[attr])
return render(request, 'billy/mom_merge.html', {
'leg1': leg1, 'leg2': leg2, 'merge': merge, 'merge_view': mv,
'remove': toRemove, 'merge_view_info': mv_info, "abbr": abbr})
@is_superuser
def newsblogs(request):
'''
Demo view for news/blog aggregation.
'''
# Pagination insanity.
total_count = db.feed_entries.count()
limit = int(request.GET.get('limit', 6))
page = int(request.GET.get('page', 1))
if page < 1:
page = 1
skip = limit * (page - 1)
# Whether display is limited to entries tagged with legislator
# committee or bill object.
entities = request.GET.get('entities', True)
tab_range = range(1, int(float(total_count) / limit) + 1)
tab = skip / limit + 1
try:
tab_index = tab_range.index(tab)
except ValueError:
tab_index = 1
tab_range_len = len(tab_range)
pagination_truncated = False
if tab_range_len > 8:
i = tab_index - 4
if i < 0:
i = 1
j = tab_index
k = j + 5
previous = tab_range[i: j]
next_ = tab_range[j + 1: k]
pagination_truncated = True
elif tab_range_len == 8:
previous = tab_range[:4]
next_ = tab_range[4:]
else:
div, mod = divmod(tab_range_len, 2)
if mod == 2:
i = tab_range_len / 2
else:
i = (tab_range_len - 1) / 2
previous = tab_range[:i]
next_ = tab_range[i:]
# Get the data.
abbr = request.GET.get('abbr')
if entities is True:
spec = {'entity_ids': {'$ne': None}}
else:
spec = {}
if abbr:
spec.update(abbr=abbr)
entries = db.feed_entries.find(spec, skip=skip, limit=limit,
sort=[('published_parsed',
pymongo.DESCENDING)])
_entries = []
entity_types = {'L': 'legislators',
'C': 'committees',
'B': 'bills'}
for entry in entries:
summary = entry['summary']
entity_strings = entry['entity_strings']
entity_ids = entry['entity_ids']
_entity_strings = []
_entity_ids = []
_entity_urls = []
_done = []
if entity_strings:
for entity_string, _id in zip(entity_strings, entity_ids):
if entity_string in _done:
continue
else:
_done.append(entity_string)
_entity_strings.append(entity_string)
_entity_ids.append(_id)
entity_type = entity_types[_id[2]]
url = urlresolvers.reverse('object_json',
args=[entity_type, _id])
_entity_urls.append(url)
summary = summary.replace(entity_string,
'<b><a href="%s">%s</a></b>' % (
url, entity_string))
entity_data = zip(_entity_strings, _entity_ids, _entity_urls)
entry['summary'] = summary
entry['entity_data'] = entity_data
entry['id'] = entry['_id']
entry['host'] = urlparse.urlparse(entry['link']).netloc
# Now hyperlink the inbox data.
# if '_inbox_data' in entry:
# inbox_data = entry['_inbox_data']
# for entity in inbox_data['entities']:
# entity_data = entity['entity_data']
# if entity_data['type'] == 'organization':
# ie_url = 'http://influenceexplorer.com/organization/%s/%s'
# ie_url = ie_url % (entity_data['slug'], entity_data['id'])
# else:
# continue
# summary = entry['summary']
# tmpl = '<a href="%s">%s</a>'
# for string in entity['matched_text']:
# summary = summary.replace(string, tmpl % (ie_url, string))
# entry['summary'] = summary
_entries.append(entry)
return render(request, 'billy/newsblogs.html', {
'entries': _entries,
'entry_count': entries.count(),
'abbrs': db.feed_entries.distinct('abbr'),
'abbr': abbr,
'tab_range': tab_range,
'previous': previous,
'next_': next_,
'pagination_truncated': pagination_truncated,
'page': page,
})
@is_superuser
def progress_meter_gaps(request, abbr):
'''List all bills that have been signed but haven't passed
their house of origin. See billy.importers.bills for the
actual conditions applied. There are a few.
'''
meta = metadata(abbr)
if not meta:
raise Http404('No metadata found for abbreviation %r' % abbr)
report = mdb.reports.find_one({'_id': abbr})
ids = report['bills']['progress_meter_gaps']
bills = db.bills.find({'_id': {'$in': ids}})
context = {'metadata': meta, 'bill_ids': ids,
'bills': bills, 'query_text': 'progress meter gaps exist'}
return render(request, 'billy/bill_list.html', context)
|
|
from __future__ import absolute_import
import copy
from django.test import TestCase
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.template import Context
from django.utils import unittest
import mock
from widgy.forms import WidgyFormMixin, WidgyFormField
from widgy.models import Node, VersionTracker
from ..widgy_config import widgy_site
from ..models import (
HasAWidgy, HasAWidgyNonNull, Layout, HasAWidgyOnlyAnotherLayout, AnotherLayout,
VersionedPage, RawTextWidget)
class TestWidgyField(TestCase):
def test_it_acts_like_a_foreignkey(self):
x = HasAWidgy()
x.widgy = Layout.add_root(widgy_site).node
x.save()
x = HasAWidgy.objects.get(pk=x.pk)
self.assertIsInstance(x.widgy.content, Layout)
def test_formfield(self):
class TheForm(forms.ModelForm):
class Meta:
model = HasAWidgy
fields = '__all__'
the_layout_contenttype = ContentType.objects.get_for_model(Layout)
x = TheForm({'widgy': the_layout_contenttype.id})
layout_contenttypes = x.fields['widgy'].queryset.all()
self.assertEqual(len(layout_contenttypes), 2)
self.assertIn(the_layout_contenttype, layout_contenttypes)
self.assertIn(ContentType.objects.get_for_model(AnotherLayout),
layout_contenttypes)
self.assertTrue(x.is_valid())
obj = x.save()
self.assertIsInstance(obj.widgy.content, Layout)
def test_sublayout(self):
class TheForm(forms.ModelForm):
class Meta:
model = HasAWidgyOnlyAnotherLayout
fields = '__all__'
the_layout_contenttype = ContentType.objects.get_for_model(AnotherLayout)
x = TheForm({'widgy': the_layout_contenttype.id})
layout_contenttypes = x.fields['widgy'].queryset.all()
self.assertEqual(len(layout_contenttypes), 1)
self.assertIn(the_layout_contenttype, layout_contenttypes)
def test_add_root(self):
instance = HasAWidgy()
instance.widgy = ContentType.objects.get_for_model(Layout)
root_node = HasAWidgy._meta.get_field('widgy').add_root(instance, {
'pk': 1337,
})
self.assertEqual(root_node.content.pk, 1337)
def test_override_add_root(self):
"""
If we put a widgy content before save()ing, the root_node shouldn't be overridden.
"""
instance = HasAWidgy()
field = HasAWidgy._meta.get_field('widgy')
instance.widgy = ContentType.objects.get_for_model(Layout)
instance.widgy = field.add_root(instance, {'pk': 1337})
instance.save()
self.assertEqual(instance.widgy.content.pk, 1337)
@unittest.skip("We want WidgyFields to work with non-modelforms, but we haven't designed an API yet.")
class TestPlainForm(TestCase):
def setUp(self):
# WidgyForms cannot be at the root level of a test because they make
# database calls and the database isn't setup yet.
class WidgiedForm(WidgyFormMixin, forms.Form):
text_field = forms.CharField()
widgy_field = WidgyFormField(
queryset=ContentType.objects.filter(model__in=['layout', 'anotherlayout']),
site=widgy_site,
)
self.form = WidgiedForm
def test_render_initial(self):
x = self.form()
rendered = x.as_p()
self.assertIn('value="%s"' % ContentType.objects.get_for_model(Layout).id,
rendered)
self.assertIn('value="%s"' % ContentType.objects.get_for_model(AnotherLayout).id,
rendered)
self.assertIn('name="widgy_field"',
rendered)
self.assertIn('name="text_field"',
rendered)
# class names
self.assertIn('core_tests',
rendered)
self.assertIn('anotherlayout',
rendered)
def test_initial_save(self):
x = self.form({
'text_field': 'foo',
'widgy_field': str(ContentType.objects.get_for_model(AnotherLayout).id),
})
self.assertTrue(x.is_valid())
x = self.form({
'text_field': 'foo',
'widgy_field': str(ContentType.objects.get_for_model(HasAWidgy).id),
})
self.assertFalse(x.is_valid())
def test_second_save(self):
# todo...I don't even know what the api for a non-modelform widgy field is
root_node = Layout.add_root(widgy_site)
x = self.form(initial={'widgy_field': root_node})
class TestModelForm(TestCase):
def setUp(self):
class WidgiedModelForm(WidgyFormMixin, forms.ModelForm):
text_field = forms.CharField()
class Meta:
model = HasAWidgy
fields = '__all__'
self.form = WidgiedModelForm
def test_render_initial(self):
x = self.form()
rendered = x.as_p()
self.assertIn('value="%s"' % ContentType.objects.get_for_model(Layout).id,
rendered)
self.assertIn('value="%s"' % ContentType.objects.get_for_model(AnotherLayout).id,
rendered)
self.assertIn('name="widgy"',
rendered)
self.assertIn('name="text_field"',
rendered)
# class names
self.assertIn('core_tests',
rendered)
self.assertIn('anotherlayout',
rendered)
def test_initial_save(self):
x = self.form({
'text_field': 'asdf',
'widgy': ContentType.objects.get_for_model(AnotherLayout).id,
})
self.assertTrue(x.is_valid())
instance = x.save()
self.assertIsInstance(instance.widgy, Node)
self.assertIsInstance(instance.widgy.content, AnotherLayout)
def test_initial_save_invalid(self):
x = self.form({
'text_field': 'asdf',
'widgy': ContentType.objects.get_for_model(HasAWidgy).id,
})
self.assertFalse(x.is_valid())
def test_second_render(self):
from argonauts.templatetags.argonauts import json as json_tag
instance = HasAWidgy.objects.create(
widgy=Layout.add_root(widgy_site).node
)
x = self.form(instance=instance)
rendered = x.as_p()
self.assertIn('input type="hidden" name="widgy" value="%s"' % instance.widgy.pk,
rendered)
self.assertIn('new Widgy',
rendered)
self.assertIn(json_tag(instance.widgy.to_json(widgy_site)),
rendered)
def test_second_save(self):
instance = HasAWidgy.objects.create(
widgy=Layout.add_root(widgy_site).node
)
x = self.form(instance=instance, data={
'widgy': '1',
'text_field': 'asdf',
})
# what assertions can we do here?
x.save()
def test_single_content_type(self):
class Form(WidgyFormMixin, forms.ModelForm):
class Meta:
model = HasAWidgyOnlyAnotherLayout
fields = '__all__'
x = Form()
self.assertIn(AnotherLayout._meta.verbose_name.lower(),
x.as_p().lower())
x = Form({})
self.assertTrue(x.is_valid())
instance = x.save()
self.assertIsInstance(instance.widgy.content, AnotherLayout)
def test_formfield_non_null(self):
class TheForm(WidgyFormMixin, forms.ModelForm):
class Meta:
model = HasAWidgyNonNull
fields = '__all__'
x = TheForm({})
self.assertTrue(x.is_valid())
obj = x.save()
self.assertTrue(obj.widgy)
class TestVersionedModelForm(TestCase):
def setUp(self):
class VersionedWidgiedForm(WidgyFormMixin, forms.ModelForm):
class Meta:
model = VersionedPage
fields = '__all__'
self.form = VersionedWidgiedForm
def test_render(self):
x = self.form()
rendered = x.as_p()
self.assertIn('value="%s"' % ContentType.objects.get_for_model(Layout).id,
rendered)
self.assertIn('value="%s"' % ContentType.objects.get_for_model(AnotherLayout).id,
rendered)
self.assertIn('name="version_tracker"',
rendered)
self.assertIn('core_tests',
rendered)
self.assertIn('anotherlayout',
rendered)
def test_first_save_noroot(self):
x = self.form({})
self.assertTrue(x.is_valid())
instance = x.save()
self.assertEqual(instance.version_tracker, None)
def test_first_save(self):
x = self.form({
'version_tracker': ContentType.objects.get_for_model(Layout).id,
})
self.assertTrue(x.is_valid())
instance = x.save()
self.assertIsInstance(instance.version_tracker, VersionTracker)
self.assertIsInstance(instance.version_tracker.working_copy.content, Layout)
def test_second_render(self):
x = self.form({
'version_tracker': ContentType.objects.get_for_model(Layout).id,
})
instance = x.save()
x = self.form(instance=instance)
url = widgy_site.reverse(widgy_site.commit_view, kwargs={'pk': instance.version_tracker.pk})
self.assertIn(url, x.as_p())
def copy_call_args(mock):
"""
`copy.copy`s a mock's call_args to handle mutable arguments.
Like template Context
"""
new_mock = mock.Mock()
def side_effect(*args, **kwargs):
new_args = tuple(copy.copy(i) for i in args)
new_kwargs = dict((k, copy.copy(v)) for k, v in kwargs.items())
new_mock(*new_args, **new_kwargs)
return mock.DEFAULT
mock.side_effect = side_effect
return new_mock
class TestRender(TestCase):
def setUp(self):
self.widgied = HasAWidgy()
self.widgied.widgy = Layout.add_root(widgy_site).node
self.widgied.save()
self.widgied.widgy.get_children()[1].content.add_child(widgy_site, RawTextWidget, text='asdf')
self.widgy_field = HasAWidgy._meta.get_field_by_name('widgy')[0]
def test_simple(self):
rendered = self.widgy_field.render(self.widgied)
self.assertIn('asdf', rendered)
def test_widgy_env(self):
with mock.patch.object(Layout, 'render') as patched_render:
patched_render = copy_call_args(patched_render)
self.widgy_field.render(self.widgied)
args, kwargs = patched_render.call_args
context = args[0]
widgy = context['widgy']
self.assertEqual(widgy['site'], widgy_site)
self.assertEqual(widgy['owner'], self.widgied)
def test_parent(self):
parent_widgy = object()
context = Context({'widgy': parent_widgy})
with mock.patch.object(Layout, 'render') as patched_render:
patched_render = copy_call_args(patched_render)
self.widgy_field.render(self.widgied, context)
args, kwargs = patched_render.call_args
context = args[0]
widgy = context['widgy']
self.assertIs(widgy['parent'], parent_widgy)
def test_null(self):
"""
Rendering a NULL WidgyField
"""
self.widgied.widgy = None
self.widgied.save()
# doesn't matter what happens as long as it doesn't throw an exception
self.widgy_field.render(self.widgied)
def test_null_versioned(self):
"""
Rendering a NULL VersionedWidgyField
"""
page = VersionedPage.objects.create()
field = VersionedPage._meta.get_field_by_name('version_tracker')[0]
# doesn't matter what happens as long as it doesn't throw an exception
field.render(page)
def test_no_commits(self):
"""
Rendering a VersionedWidgyField without any commits
"""
page = VersionedPage.objects.create(
version_tracker=VersionTracker.objects.create(
working_copy=Layout.add_root(widgy_site).node,
)
)
field = VersionedPage._meta.get_field_by_name('version_tracker')[0]
# doesn't matter what happens as long as it doesn't throw an exception
field.render(page)
|
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class AnimationTest( GafferTest.TestCase ) :
def testKey( self ) :
k = Gaffer.Animation.Key()
self.assertEqual( k.type, Gaffer.Animation.Type.Invalid )
self.assertFalse( k )
k = Gaffer.Animation.Key( 0, 1, Gaffer.Animation.Type.Step )
self.assertTrue( k )
self.assertEqual( k.time, 0 )
self.assertEqual( k.value, 1 )
self.assertEqual( k.type, Gaffer.Animation.Type.Step )
k.time = 1
k.value = 0
k.type = Gaffer.Animation.Type.Linear
self.assertEqual( k.time, 1 )
self.assertEqual( k.value, 0 )
self.assertEqual( k.type, Gaffer.Animation.Type.Linear )
k2 = Gaffer.Animation.Key( k )
self.assertEqual( k, k2 )
k2.time = 10
self.assertNotEqual( k, k2 )
def testKeyRepr( self ) :
k = Gaffer.Animation.Key()
self.assertEqual( k, eval( repr( k ) ) )
k = Gaffer.Animation.Key( 0, 1, Gaffer.Animation.Type.Step )
self.assertEqual( k, eval( repr( k ) ) )
def testCanAnimate( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["b"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["s"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["f"] ) )
self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["i"] ) )
self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["b"] ) )
self.assertFalse( Gaffer.Animation.canAnimate( s["n"]["user"]["s"] ) )
# Can't key because it has an input.
s["n"]["user"]["f"].setInput( s["n"]["user"]["i"] )
self.assertFalse( Gaffer.Animation.canAnimate( s["n"]["user"]["f"] ) )
# Can't key because there's no parent where we can
# put the Animation node.
n = Gaffer.Node()
n["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertFalse( Gaffer.Animation.canAnimate( n["user"]["f"] ) )
def testAcquire( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertFalse( Gaffer.Animation.isAnimated( s["n"]["user"]["f"] ) )
self.assertTrue( Gaffer.Animation.canAnimate( s["n"]["user"]["f"] ) )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
self.assertTrue( isinstance( curve, Gaffer.Animation.CurvePlug ) )
self.assertTrue( curve.isSame( Gaffer.Animation.acquire( s["n"]["user"]["f"] ) ) )
self.assertTrue( curve.node().parent().isSame( s ) )
def testAcquireSharesAnimationNodes( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n1"]["user"]["p1"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n1"]["user"]["p2"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"] = Gaffer.Node()
s["n2"]["user"]["p1"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"]["user"]["p2"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n1p1 = Gaffer.Animation.acquire( s["n1"]["user"]["p1"] )
n1p2 = Gaffer.Animation.acquire( s["n1"]["user"]["p2"] )
n2p1 = Gaffer.Animation.acquire( s["n2"]["user"]["p1"] )
n2p2 = Gaffer.Animation.acquire( s["n2"]["user"]["p2"] )
self.assertTrue( n1p1.node().isSame( n1p2.node() ) )
self.assertTrue( n2p1.node().isSame( n2p2.node() ) )
self.assertFalse( n1p1.node().isSame( n2p1.node() ) )
def testAddKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
key = Gaffer.Animation.Key( time = 10, value = 10 )
self.assertFalse( curve.hasKey( key.time ) )
curve.addKey( key )
self.assertTrue( curve.hasKey( key.time ) )
self.assertEqual( curve.getKey( key.time ), key )
def testClosestKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
key0 = Gaffer.Animation.Key( 0, 0 )
key1 = Gaffer.Animation.Key( 1, 1 )
curve.addKey( key0 )
curve.addKey( key1 )
self.assertEqual( curve.closestKey( -1 ), key0 )
self.assertEqual( curve.closestKey( -0.1 ), key0 )
self.assertEqual( curve.closestKey( 0 ), key0 )
self.assertEqual( curve.closestKey( 0.1 ), key0 )
self.assertEqual( curve.closestKey( 0.49 ), key0 )
self.assertEqual( curve.closestKey( 0.51 ), key1 )
self.assertEqual( curve.closestKey( 0.75 ), key1 )
self.assertEqual( curve.closestKey( 1 ), key1 )
self.assertEqual( curve.closestKey( 1.1 ), key1 )
def testRemoveKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
key = Gaffer.Animation.Key( time = 10, value = 10 )
curve.addKey( key )
self.assertEqual( curve.getKey( key.time ), key )
self.assertEqual( curve.closestKey( 0 ), key )
curve.removeKey( key.time )
self.assertFalse( curve.getKey( key.time ) )
self.assertFalse( curve.closestKey( 0 ) )
def testSingleKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
curve.addKey( Gaffer.Animation.Key( 0, 1, Gaffer.Animation.Type.Linear ) )
with Gaffer.Context() as c :
for t in range( -10, 10 ) :
c.setTime( t )
self.assertEqual( s["n"]["user"]["f"].getValue(), 1 )
def testLinear( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
curve.addKey( Gaffer.Animation.Key( 0, 1, Gaffer.Animation.Type.Linear ) )
curve.addKey( Gaffer.Animation.Key( 1, 3, Gaffer.Animation.Type.Linear ) )
with Gaffer.Context() as c :
for i in range( 0, 10 ) :
c.setTime( i / 9.0 )
self.assertAlmostEqual( s["n"]["user"]["f"].getValue(), 1 + c.getTime() * 2, 6 )
def testStep( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
curve.addKey( Gaffer.Animation.Key( 0, 0 ) )
curve.addKey( Gaffer.Animation.Key( 1, 1, Gaffer.Animation.Type.Linear ) )
curve.addKey( Gaffer.Animation.Key( 2, 2, Gaffer.Animation.Type.Step ) )
with Gaffer.Context() as c :
# Linear interpolation from 0 to 1.
for i in range( 0, 10 ) :
c.setTime( i / 9.0 )
self.assertAlmostEqual( s["n"]["user"]["f"].getValue(), c.getTime() )
# Step interpolation from 1 to 2
for i in range( 0, 10 ) :
c.setTime( 1 + i / 10.0 )
self.assertEqual( s["n"]["user"]["f"].getValue(), 1 )
c.setTime( 2 )
self.assertEqual( s["n"]["user"]["f"].getValue(), 2 )
def testAffects( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
cs = GafferTest.CapturingSlot( s["n"].plugDirtiedSignal() )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
self.assertTrue( s["n"]["user"]["f"] in set( [ c[0] for c in cs ] ) )
del cs[:]
curve.addKey( Gaffer.Animation.Key( 1, 1 ) )
self.assertTrue( s["n"]["user"]["f"] in set( [ c[0] for c in cs ] ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
curve.addKey( Gaffer.Animation.Key( 0, 0, Gaffer.Animation.Type.Linear ) )
curve.addKey( Gaffer.Animation.Key( 1, 1, Gaffer.Animation.Type.Linear ) )
def assertAnimation( script ) :
curve = Gaffer.Animation.acquire( script["n"]["user"]["f"] )
self.assertEqual( curve.getKey( 0 ), Gaffer.Animation.Key( 0, 0, Gaffer.Animation.Type.Linear ) )
self.assertEqual( curve.getKey( 1 ), Gaffer.Animation.Key( 1, 1, Gaffer.Animation.Type.Linear ) )
with Gaffer.Context() as c :
for i in range( 0, 10 ) :
c.setTime( i / 9.0 )
self.assertAlmostEqual( script["n"]["user"]["f"].getValue(), c.getTime() )
assertAnimation( s )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
assertAnimation( s2 )
def testUndoAcquire( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( len( s.children( Gaffer.Node ) ), 1 )
with Gaffer.UndoContext( s ) :
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
self.assertEqual( len( s.children( Gaffer.Node ) ), 2 )
s.undo()
self.assertEqual( len( s.children( Gaffer.Node ) ), 1 )
s.redo()
self.assertEqual( len( s.children( Gaffer.Node ) ), 2 )
def testUndoAddKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
self.assertFalse( curve.getKey( 0 ) )
key1 = Gaffer.Animation.Key( 1, 0 )
key2 = Gaffer.Animation.Key( 1, 1 )
with Gaffer.UndoContext( s ) :
curve.addKey( key1 )
self.assertEqual( curve.getKey( 1 ), key1 )
with Gaffer.UndoContext( s ) :
curve.addKey( key2 )
self.assertEqual( curve.getKey( 1 ), key2 )
s.undo()
self.assertEqual( curve.getKey( 1 ), key1 )
s.undo()
self.assertFalse( curve.getKey( 1 ) )
s.redo()
self.assertEqual( curve.getKey( 1 ), key1 )
s.redo()
self.assertEqual( curve.getKey( 1 ), key2 )
def testUndoRemoveKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
key = Gaffer.Animation.Key( 0, 0 )
curve.addKey( key )
self.assertEqual( curve.getKey( key.time ), key )
with Gaffer.UndoContext( s ) :
curve.removeKey( key.time )
self.assertFalse( curve.hasKey( key.time ) )
s.undo()
self.assertEqual( curve.getKey( key.time ), key )
s.redo()
self.assertFalse( curve.hasKey( key.time ) )
def testNextAndPreviousKeys( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
key1 = Gaffer.Animation.Key( 0, 0 )
key2 = Gaffer.Animation.Key( 1, 1 )
key3 = Gaffer.Animation.Key( 2, 2 )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
curve.addKey( key1 )
curve.addKey( key2 )
curve.addKey( key3 )
self.assertEqual( curve.nextKey( -1 ), key1 )
self.assertEqual( curve.nextKey( 0 ), key2 )
self.assertEqual( curve.nextKey( 0.5 ), key2 )
self.assertEqual( curve.nextKey( 1 ), key3 )
self.assertEqual( curve.nextKey( 1.5 ), key3 )
self.assertFalse( curve.nextKey( 2 ) )
self.assertFalse( curve.previousKey( -1 ) )
self.assertFalse( curve.previousKey( 0 ) )
self.assertEqual( curve.previousKey( 0.5 ), key1 )
self.assertEqual( curve.previousKey( 1 ), key1 )
self.assertEqual( curve.previousKey( 1.5 ), key2 )
self.assertEqual( curve.previousKey( 2 ), key2 )
self.assertEqual( curve.previousKey( 2.5 ), key3 )
def testAnimationWithinAReference( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"] = GafferTest.AddNode()
s["b"].promotePlug( s["b"]["n"]["op1"] )
s["b"].promotePlug( s["b"]["n"]["sum"] )
self.assertTrue( s["b"].canPromotePlug( s["b"]["n"]["op2"] ) )
op2Curve = Gaffer.Animation.acquire( s["b"]["n"]["op2"] )
# Cannot promote an animated plug, because it has an input.
self.assertFalse( s["b"].canPromotePlug( s["b"]["n"]["op2"] ) )
op2Curve.addKey( Gaffer.Animation.Key( 0, 0, Gaffer.Animation.Type.Step ) )
op2Curve.addKey( Gaffer.Animation.Key( 1, 1, Gaffer.Animation.Type.Step ) )
with Gaffer.Context() as c :
self.assertEqual( s["b"]["sum"].getValue(), 0 )
c.setTime( 1 )
self.assertEqual( s["b"]["sum"].getValue(), 1 )
fileName = self.temporaryDirectory() + "/reference.grf"
s["b"].exportForReference( fileName )
s["r"] = Gaffer.Reference()
s["r"].load( fileName )
with Gaffer.Context() as c :
self.assertEqual( s["r"]["sum"].getValue(), 0 )
c.setTime( 1 )
self.assertEqual( s["r"]["sum"].getValue(), 1 )
s["r"]["op1"].setValue( 2 )
with Gaffer.Context() as c :
self.assertEqual( s["r"]["sum"].getValue(), 2 )
c.setTime( 1 )
self.assertEqual( s["r"]["sum"].getValue(), 3 )
if __name__ == "__main__":
unittest.main()
|
|
import unittest
import pickle
import cPickle
import pickletools
import copy_reg
from test.test_support import TestFailed, have_unicode, TESTFN, \
run_with_locale
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
assert pickle.HIGHEST_PROTOCOL == cPickle.HIGHEST_PROTOCOL == 2
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code:
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code:
n += 1
return n
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copy_reg._inverted_registry:
self.pair = copy_reg._inverted_registry[code]
copy_reg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copy_reg._inverted_registry.get(code)
if curpair is not None:
copy_reg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copy_reg.add_extension(pair[0], pair[1], code)
class C:
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
import __main__
__main__.C = C
C.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object):
__metaclass__ = metaclass
# DATA0 .. DATA2 are the pickles we expect under the various protocols, for
# the object returned by create_data().
# break into multiple strings to avoid confusing font-lock-mode
DATA0 = """(lp1
I0
aL1L
aF2
ac__builtin__
complex
p2
""" + \
"""(F3
F0
tRp3
aI1
aI-1
aI255
aI-255
aI-256
aI65535
aI-65535
aI-65536
aI2147483647
aI-2147483647
aI-2147483648
a""" + \
"""(S'abc'
p4
g4
""" + \
"""(i__main__
C
p5
""" + \
"""(dp6
S'foo'
p7
I1
sS'bar'
p8
I2
sbg5
tp9
ag9
aI5
a.
"""
# Disassembly of DATA0.
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 1
5: I INT 0
8: a APPEND
9: L LONG 1L
13: a APPEND
14: F FLOAT 2.0
17: a APPEND
18: c GLOBAL '__builtin__ complex'
39: p PUT 2
42: ( MARK
43: F FLOAT 3.0
46: F FLOAT 0.0
49: t TUPLE (MARK at 42)
50: R REDUCE
51: p PUT 3
54: a APPEND
55: I INT 1
58: a APPEND
59: I INT -1
63: a APPEND
64: I INT 255
69: a APPEND
70: I INT -255
76: a APPEND
77: I INT -256
83: a APPEND
84: I INT 65535
91: a APPEND
92: I INT -65535
100: a APPEND
101: I INT -65536
109: a APPEND
110: I INT 2147483647
122: a APPEND
123: I INT -2147483647
136: a APPEND
137: I INT -2147483648
150: a APPEND
151: ( MARK
152: S STRING 'abc'
159: p PUT 4
162: g GET 4
165: ( MARK
166: i INST '__main__ C' (MARK at 165)
178: p PUT 5
181: ( MARK
182: d DICT (MARK at 181)
183: p PUT 6
186: S STRING 'foo'
193: p PUT 7
196: I INT 1
199: s SETITEM
200: S STRING 'bar'
207: p PUT 8
210: I INT 2
213: s SETITEM
214: b BUILD
215: g GET 5
218: t TUPLE (MARK at 151)
219: p PUT 9
222: a APPEND
223: g GET 9
226: a APPEND
227: I INT 5
230: a APPEND
231: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (']q\x01(K\x00L1L\nG@\x00\x00\x00\x00\x00\x00\x00'
'c__builtin__\ncomplex\nq\x02(G@\x08\x00\x00\x00\x00\x00'
'\x00G\x00\x00\x00\x00\x00\x00\x00\x00tRq\x03K\x01J\xff\xff'
'\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xff'
'J\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00'
'\x00\x80J\x00\x00\x00\x80(U\x03abcq\x04h\x04(c__main__\n'
'C\nq\x05oq\x06}q\x07(U\x03fooq\x08K\x01U\x03barq\tK\x02ubh'
'\x06tq\nh\nK\x05e.'
)
# Disassembly of DATA1.
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 1
3: ( MARK
4: K BININT1 0
6: L LONG 1L
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 2
42: ( MARK
43: G BINFLOAT 3.0
52: G BINFLOAT 0.0
61: t TUPLE (MARK at 42)
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: U SHORT_BINSTRING 'abc'
118: q BINPUT 4
120: h BINGET 4
122: ( MARK
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: o OBJ (MARK at 122)
138: q BINPUT 6
140: } EMPTY_DICT
141: q BINPUT 7
143: ( MARK
144: U SHORT_BINSTRING 'foo'
149: q BINPUT 8
151: K BININT1 1
153: U SHORT_BINSTRING 'bar'
158: q BINPUT 9
160: K BININT1 2
162: u SETITEMS (MARK at 143)
163: b BUILD
164: h BINGET 6
166: t TUPLE (MARK at 112)
167: q BINPUT 10
169: h BINGET 10
171: K BININT1 5
173: e APPENDS (MARK at 3)
174: . STOP
highest protocol among opcodes = 1
"""
DATA2 = ('\x80\x02]q\x01(K\x00\x8a\x01\x01G@\x00\x00\x00\x00\x00\x00\x00'
'c__builtin__\ncomplex\nq\x02G@\x08\x00\x00\x00\x00\x00\x00G\x00'
'\x00\x00\x00\x00\x00\x00\x00\x86Rq\x03K\x01J\xff\xff\xff\xffK'
'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xff'
'J\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00'
'\x80(U\x03abcq\x04h\x04(c__main__\nC\nq\x05oq\x06}q\x07(U\x03foo'
'q\x08K\x01U\x03barq\tK\x02ubh\x06tq\nh\nK\x05e.')
# Disassembly of DATA2.
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 1
5: ( MARK
6: K BININT1 0
8: \x8a LONG1 1L
11: G BINFLOAT 2.0
20: c GLOBAL '__builtin__ complex'
41: q BINPUT 2
43: G BINFLOAT 3.0
52: G BINFLOAT 0.0
61: \x86 TUPLE2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: U SHORT_BINSTRING 'abc'
118: q BINPUT 4
120: h BINGET 4
122: ( MARK
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: o OBJ (MARK at 122)
138: q BINPUT 6
140: } EMPTY_DICT
141: q BINPUT 7
143: ( MARK
144: U SHORT_BINSTRING 'foo'
149: q BINPUT 8
151: K BININT1 1
153: U SHORT_BINSTRING 'bar'
158: q BINPUT 9
160: K BININT1 2
162: u SETITEMS (MARK at 143)
163: b BUILD
164: h BINGET 6
166: t TUPLE (MARK at 112)
167: q BINPUT 10
169: h BINGET 10
171: K BININT1 5
173: e APPENDS (MARK at 5)
174: . STOP
highest protocol among opcodes = 2
"""
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1L, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads, self.error.
_testdata = create_data()
def setUp(self):
pass
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assertEqual(expected, got)
def test_load_from_canned_string(self):
expected = self._testdata
for canned in DATA0, DATA1, DATA2:
got = self.loads(canned)
self.assertEqual(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from cStringIO import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assert_(x is x[0])
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertEqual(x.keys(), [1])
self.assert_(x[1] is x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, 2)
x = self.loads(s)
self.assertEqual(dir(x), dir(i))
self.assert_(x.attr is x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(x[0].attr.keys(), [1])
self.assert_(x[0].attr[1] is x)
def test_garyp(self):
self.assertRaises(self.error, self.loads, 'garyp')
def test_insecure_strings(self):
insecure = ["abc", "2 + 2", # not quoted
#"'abc' + 'def'", # not a single quoted string
"'abc", # quote is not closed
"'abc\"", # open quote and close quote don't match
"'abc' ?", # junk after close quote
"'\\'", # trailing backslash
# some tests of the quoting rules
#"'abc\"\''",
#"'\\\\a\'\'\'\\\'\\\\\''",
]
for s in insecure:
buf = "S" + s + "\012p0\012."
self.assertRaises(ValueError, self.loads, buf)
if have_unicode:
def test_unicode(self):
endcases = [u'', u'<\\u>', u'<\\\u1234>', u'<\n>',
u'<\\>', u'<\\\U00012345>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assertEqual(u2, u)
def test_unicode_high_plane(self):
t = u'\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assertEqual(t2, t)
def test_ints(self):
import sys
for proto in protocols:
n = sys.maxint
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assertEqual(expected, n2)
n = n >> 1
def test_maxint64(self):
maxint64 = (1L << 63) - 1
data = 'I' + str(maxint64) + '\n.'
got = self.loads(data)
self.assertEqual(got, maxint64)
# Try too with a bogus literal.
data = 'I' + str(maxint64) + 'JUNK\n.'
self.assertRaises(ValueError, self.loads, data)
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1L << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assertEqual(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = long("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assertEqual(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent
self.assertEqual(self.dumps(1.2)[0:3], 'F1.')
def test_reduce(self):
pass
def test_getinitargs(self):
pass
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "stat"):
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assertEqual(t, u)
# Tests for protocol 2
def test_proto(self):
build_none = pickle.NONE + pickle.STOP
for proto in protocols:
expected = build_none
if proto >= 2:
expected = pickle.PROTO + chr(proto) + expected
p = self.dumps(None, proto)
self.assertEqual(p, expected)
oob = protocols[-1] + 1 # a future protocol
badpickle = pickle.PROTO + chr(oob) + build_none
try:
self.loads(badpickle)
except ValueError, detail:
self.failUnless(str(detail).startswith(
"unsupported pickle protocol"))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920L
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920L << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y, (proto, x, s, y))
expected = expected_opcode[proto, len(x)]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_(x is y, (proto, x, s, y))
expected = expected_opcode[proto, x]
self.assertEqual(opcode_in_pickle(expected, s), True)
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(tuple(x), tuple(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
# Register a type with copy_reg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copy_reg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assert_(__name__ in s1)
self.assert_("MyList" in s1)
self.assertEqual(opcode_in_pickle(opcode, s1), False)
y = self.loads(s1)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assert_(__name__ not in s2)
self.assert_("MyList" not in s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True)
y = self.loads(s2)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = range(n)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = range(n)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.failUnless(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertEqual(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.failUnless(num_setitems >= 2)
def test_simple_newobj(self):
x = object.__new__(SimpleNewObj) # avoid __init__
x.abc = 666
for proto in protocols:
s = self.dumps(x, proto)
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s), proto >= 2)
y = self.loads(s) # will raise TypeError if __init__ called
self.assertEqual(y.abc, 666)
self.assertEqual(x.__dict__, y.__dict__)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assertEqual(list(x), list(y))
self.assertEqual(x.__dict__, y.__dict__)
self.assertEqual(x.foo, y.foo)
self.assertEqual(x.bar, y.bar)
def test_reduce_overrides_default_reduce_ex(self):
for proto in 0, 1, 2:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in 0, 1, 2:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in 0, 1, 2:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in 0, 1, 2:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in 0, 1, 2:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Protocol 0 is less strict and also accept iterables.
for proto in 0, 1, 2:
try:
self.dumps(C(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
try:
self.dumps(D(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
# Test classes for reduce_ex
class REX_one(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
# No __reduce_ex__ here, but inheriting it from object
class REX_two(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
# No __reduce__ here, but inheriting it from object
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed, "This __reduce__ shouldn't be called"
class REX_four(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
# Calling base class method should succeed
class REX_five(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
# This one used to fail with infinite recursion
# Test classes for newobj
class MyInt(int):
sample = 1
class MyLong(long):
sample = 1L
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(unicode):
sample = u"hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
myclasses = [MyInt, MyLong, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(object):
def __init__(self, a, b, c):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(self.module.HIGHEST_PROTOCOL, 2)
def test_callapi(self):
from cStringIO import StringIO
f = StringIO()
# With and without keyword arguments
self.module.dump(123, f, -1)
self.module.dump(123, file=f, protocol=-1)
self.module.dumps(123, -1)
self.module.dumps(123, protocol=-1)
self.module.Pickler(f, -1)
self.module.Pickler(f, protocol=-1)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
else:
return None
def persistent_load(self, oid):
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
def test_bin_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L, 1)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
|
|
import glob
import os
import datetime
from collections import OrderedDict
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
import scipy.stats as stats
import scipy.sparse as sparse
from sparse_dataframe import SparseDataFrame
def combine_sdf_files(run_folder, folders, verbose=False, **kwargs):
"""function for concatenating SparseDataFrames together"""
combined = SparseDataFrame()
combined.rows = []
columns = set()
for folder in folders:
filename = os.path.join(run_folder, folder, f'{folder}.mus.cell-gene.npz')
if verbose:
print(f'Reading {filename} ...')
sdf = SparseDataFrame(filename)
columns.add(tuple(sdf.columns))
combined.rows.extend(sdf.rows)
if combined.matrix is None:
combined.matrix = sdf.matrix
else:
combined.matrix = sparse.vstack((combined.matrix, sdf.matrix),
format='csr')
assert len(columns) == 1
combined.columns = columns.pop()
return combined
def combine_csv_files(folder, globber, verbose=False, **kwargs):
"""generic function for concatentating a bunch of csv files into a single
pandas Dataframe"""
dfs = []
for filename in glob.iglob(os.path.join(folder, globber)):
if verbose:
print(f'Reading {filename} ...')
df = pd.read_csv(filename, **kwargs)
dfs.append(df)
combined = pd.concat(dfs)
return combined
def maybe_to_numeric(series):
try:
return pd.to_numeric(series)
except ValueError:
return series
def clean_mapping_stats(mapping_stats_original, convert_to_percentage=None):
"""Remove whitespace from all values and convert to numbers"""
if convert_to_percentage is None:
convert_to_percentage = set()
mapping_stats_original = mapping_stats_original.applymap(
lambda x: (x.replace(',', '').strip().strip('%')
if isinstance(x, str) else x))
numeric = mapping_stats_original.apply(maybe_to_numeric)
numeric.columns = numeric.columns.map(str.strip)
# for 10X mapping stats
numeric.columns = numeric.columns.map(
lambda x: ('Percent {}'.format(x.replace('Fraction ', ''))
if x in convert_to_percentage else x)
)
return numeric
def diff_exp(matrix, group1, group2, index):
"""Computes differential expression between group 1 and group 2
for each column in the dataframe counts.
Returns a dataframe of Z-scores and p-values."""
g1 = matrix[group1, :]
g2 = matrix[group2, :]
g1mu = g1.mean(0)
g2mu = g2.mean(0)
mean_diff = np.asarray(g1mu - g2mu).flatten()
# E[X^2] - (E[X])^2
pooled_sd = np.sqrt(
((g1.power(2)).mean(0) - np.power(g1mu, 2)) / len(group1)
+ ((g2.power(2)).mean(0) - np.power(g2mu, 2)) / len(group2))
pooled_sd = np.asarray(pooled_sd).flatten()
z_scores = np.zeros_like(pooled_sd)
nz = pooled_sd > 0
z_scores[nz] = np.nan_to_num(mean_diff[nz] / pooled_sd[nz])
# t-test
p_vals = (1 - stats.norm.cdf(np.abs(z_scores))) * 2
df = pd.DataFrame(OrderedDict([('z', z_scores), ('p', p_vals)]),
index=index)
return df
class Plates(object):
# Names of commonly accessed columns
MEAN_READS_PER_CELL = 'Mean reads per well'
MEDIAN_GENES_PER_CELL = 'Median genes per well'
PERCENT_ERCC = 'Percent ERCC'
PERCENT_MAPPED_READS = 'Percent mapped to genome'
# maybe we should change this to the right thing
SAMPLE_MAPPING = 'WELL_MAPPING'
def __init__(self, data_folder, metadata, genes_to_drop='Rn45s',
verbose=False, nrows=None):
plates_folder = os.path.join(data_folder, 'plates')
counts = combine_csv_files(
plates_folder, '*.htseq-count-by-cell.csv',
index_col=[0, 1, 2, 3], verbose=verbose, nrows=nrows)
mapping_stats = combine_csv_files(
plates_folder, '*.log-by-cell.csv',
index_col=[0, 1, 2, 3], verbose=verbose)
self.genes, self.cell_metadata, self.mapping_stats = \
self.clean_and_reformat(counts, mapping_stats)
self.plate_summaries = self.calculate_plate_summaries()
original_metadata = pd.read_csv(metadata, index_col=0)
self.plate_metadata = self.clean_plate_metadata(original_metadata)
self.plate_metadata = self.plate_metadata.loc[
self.plate_summaries.index]
if not os.path.exists(os.path.join(data_folder, 'coords')):
os.mkdir(os.path.join(data_folder, 'coords'))
self.bulk_smushed_cache_file = os.path.join(data_folder, 'coords',
'bulk_smushed.csv')
self.cell_smushed_cache_file = os.path.join(data_folder, 'coords',
'cell_smushed.pickle')
self.bulk_smushed = self.compute_bulk_smushing()
self.cell_smushed = self.compute_cell_smushing()
self.gene_names = sorted(self.genes.columns)
self.plate_metadata_features = sorted(self.plate_metadata.columns)
# Remove pesky genes
self.genes = self.genes.drop(genes_to_drop, axis=1)
# Get a counts per million rescaling of the genes
self.counts_per_million = self.genes.divide(self.genes.sum(axis=1),
axis=0) * 1e6
self.top_genes = self.compute_top_genes_per_cell()
self.data = {'genes': self.genes,
'mapping_stats': self.mapping_stats,
'cell_metadata': self.cell_metadata,
'plate_metadata': self.plate_metadata,
'plate_summaries': self.plate_summaries}
def __repr__(self):
n_plates = self.plate_summaries.shape[0]
n_barcodes = self.genes.shape[0]
s = f'This is an object holding data for {n_plates} plates and ' \
f'{n_barcodes} barcodes.\nHere are the accessible dataframes:\n'
for name, df in self.data.items():
s += f'\t"{name}" table dimensions: ' + str(df.shape) + '\n'
return s
@staticmethod
def clean_and_reformat(counts, mapping_stats):
"""Move metadata information into separate dataframe and simplify ids
Parameters
----------
counts : pandas.DataFrame
A (samples, genes) dataframe of integer number of reads that mapped
to a gene in a cell, but also has extra columns of ERCC mapping and
htseq-count output that we want to remove
mapping_stats : pandas.DataFrame
A (samples, mapping_statistics) dataframe of the time the alignment
began, number of input reads, number of mapped reads, and other
information output by STAR, but everything is a string instead of
numbers which makes us sad
Returns
-------
genes : pandas.DataFrame
A (samples, genes) dataframe of integer number of reads that mapped
to a gene in a cell
cell_metadata : pandas.DataFrame
A (samples, sample_features) dataframe of number of detected genes,
total reads, ercc counts, and "WELL_MAPPING" (really,
plate mapping)
mapping_stats : pandas.DataFrame
A (samples, mapping_statistics) dataframe of the time the alignment
began, number of input reads, number of mapped reads, and other
information output by STAR, with numbers properly formatted
"""
mapping_stats = clean_mapping_stats(mapping_stats)
cell_metadata = counts.index.to_frame()
sample_ids = cell_metadata.index.droplevel([1, 2, 3])
cell_metadata.index = sample_ids
mapping_stats.index = sample_ids
counts.index = sample_ids
# Extract htseq-count outputs and save as separate files
cols = [x for x in counts if x.startswith('__')]
count_stats = counts[cols]
count_stats.columns = [x.strip('_') for x in count_stats]
# Separate spike-ins (ERCCs) and genes
ercc_names = [col for col in counts.columns[3:] if 'ERCC-' in col]
gene_names = [col for col in counts.columns[3:] if
'ERCC-' not in col and col[0] != '_']
cell_metadata['total_reads'] = counts.sum(axis=1)
# Separate counts of everything from genes-only
genes = counts[gene_names]
# Add mapping and ERCC counts to cell metadata
cell_metadata['n_genes'] = (genes > 0).sum(axis=1)
cell_metadata['mapped_reads'] = genes.sum(axis=1)
cell_metadata['ercc'] = counts[ercc_names].sum(axis=1)
cell_metadata = pd.concat([cell_metadata, count_stats], axis=1)
# Remove not useful columns
cell_metadata.drop(['too_low_aQual', 'not_aligned'], inplace=True,
axis=1)
return genes, cell_metadata, mapping_stats
def calculate_plate_summaries(self):
"""Get mean reads, percent mapping, etc summaries for each plate"""
well_map = self.cell_metadata.groupby(Plates.SAMPLE_MAPPING)
# these stats are from STAR mapping
star_cols = ['Number of input reads', 'Uniquely mapped reads number']
star_stats = self.mapping_stats[star_cols].groupby(
self.cell_metadata[Plates.SAMPLE_MAPPING]).sum()
total_reads = star_stats['Number of input reads']
unique_reads = star_stats['Uniquely mapped reads number']
percent_ercc = well_map.sum()['ercc'].divide(total_reads, axis=0)
percent_mapped_reads = unique_reads / total_reads - percent_ercc
plate_summaries = pd.DataFrame(OrderedDict([
(Plates.MEAN_READS_PER_CELL, total_reads / well_map.size()),
(Plates.MEDIAN_GENES_PER_CELL, well_map.median()['n_genes']),
('Percent not uniquely aligned', 100 * well_map.sum()['alignment_not_unique'].divide(total_reads, axis=0)),
(Plates.PERCENT_MAPPED_READS, 100 * percent_mapped_reads),
('Percent no feature', 100 * well_map.sum()['no_feature'].divide(total_reads, axis=0)),
('Percent Rn45s', 100 * self.genes['Rn45s'].groupby(
self.cell_metadata[Plates.SAMPLE_MAPPING]).sum() / total_reads),
(Plates.PERCENT_ERCC, 100 * percent_ercc),
('n_wells', well_map.size())
]))
return plate_summaries
@staticmethod
def clean_plate_metadata(plate_metadata):
# Remove whitespace from "tissue" column
plate_metadata.tissue = plate_metadata.tissue.map(
lambda x: x.strip() if isinstance(x, str) else x)
# Add a column with both tissue and subtissue
cleaned_subtissue = plate_metadata['subtissue'].map(
lambda x: ': ' + x.strip() if isinstance(x, str) else '')
plate_metadata['tissue_subtissue'] = plate_metadata['tissue'] \
+ cleaned_subtissue
# Hard-coded column name of 21_55_F is actually the sample id column
plate_metadata = plate_metadata.rename(
columns={'mouse.id': 'Sample ID'})
plate_metadata['Age (months)'] = plate_metadata['Sample ID'].map(
lambda x: x.split('_')[0] if isinstance(x, str) else '')
def parse_date(x):
if isinstance(x, str):
x = x.strip()
if not x:
return np.nan
if x.endswith('/2017'):
return datetime.datetime.strptime(x, '%m/%d/%Y')
elif x.endswith('/17'):
return datetime.datetime.strptime(x, '%m/%d/%y')
else:
return datetime.datetime.strptime(x, '%y%m%d')
elif isinstance(x, float):
return datetime.datetime.strptime(str(int(x)), '%y%m%d')
else:
raise TypeError
for col in plate_metadata.columns:
if 'date' in col.lower():
plate_metadata[col] = plate_metadata[col].map(
parse_date,
na_action='ignore'
)
# Use only the metadata for the plates that have been sequenced
plate_metadata = plate_metadata.dropna(how='all', axis=1)
return plate_metadata
def compute_bulk_smushing(self):
"""Get average signal from each plate ('bulk') and find 2d embedding"""
grouped = self.genes.groupby(self.cell_metadata[self.SAMPLE_MAPPING])
if os.path.exists(self.bulk_smushed_cache_file):
smushed = pd.read_csv(self.bulk_smushed_cache_file, names=[0, 1],
header=0, index_col=0)
# if the set of plates hasn't changed, return the cached version
if set(grouped.groups) == set(smushed.index):
return smushed
# if the cache was missing or invalid, compute a new projection
medians = grouped.median()
smusher = TSNE(random_state=0, perplexity=10, metric='cosine')
smushed = pd.DataFrame(smusher.fit_transform(medians),
index=medians.index)
smushed.to_csv(self.bulk_smushed_cache_file)
return smushed
def compute_cell_smushing(self):
"""Within each plate, find a 2d embedding of all cells"""
grouped = self.genes.groupby(self.cell_metadata[self.SAMPLE_MAPPING])
if os.path.exists(self.cell_smushed_cache_file):
smusheds = pd.read_pickle(self.cell_smushed_cache_file)
# if nothing is missing, return the cached version
if not set(grouped.groups) - set(smusheds):
return smusheds
else:
smusheds = {}
for plate_name, genes_subset in grouped:
if plate_name not in smusheds:
cell_smusher = TSNE(metric='cosine', random_state=0)
cell_smushed = pd.DataFrame(
cell_smusher.fit_transform(genes_subset),
index=genes_subset.index)
smusheds[plate_name] = cell_smushed
pd.to_pickle(smusheds, self.cell_smushed_cache_file)
return smusheds
def compute_top_genes_per_cell(self):
"""Get the most highly expressed genes in every cell
Returns
-------
top_genes : pandas.Series
A mapping of the cell barcode to a ranked list of the top 10 genes,
where the first item is the most highly expressed (e.g. Rn45s)
"""
ranks = self.genes.rank(axis=1, ascending=False)
in_top10 = ranks[ranks <= 10]
top_genes = in_top10.apply(
lambda x: x.sort_values().dropna().index.tolist(), axis=1)
return top_genes
class TenX_Runs(Plates):
# Names of commonly accessed columns
MEAN_READS_PER_CELL = 'Mean Reads per Cell'
MEDIAN_GENES_PER_CELL = 'Median Genes per Cell'
PERCENT_MAPPED_READS = 'Percent Reads Mapped Confidently to Transcriptome'
SAMPLE_MAPPING = 'CHANNEL_MAPPING'
COLUMNS_TO_CONVERT = {'Valid Barcodes',
'Reads Mapped Confidently to Transcriptome',
'Reads Mapped Confidently to Exonic Regions',
'Reads Mapped Confidently to Intronic Regions',
'Reads Mapped Confidently to Intergenic Regions',
'Reads Mapped Antisense to Gene',
'Sequencing Saturation',
'Q30 Bases in Barcode', 'Q30 Bases in RNA Read',
'Q30 Bases in Sample Index', 'Q30 Bases in UMI',
'Fraction Reads in Cells'}
def __init__(self, data_folder, genes_to_drop='Rn45s',
verbose=False, nrows=None, tissue=None,
channels_to_use=None, tissue_folder='tissues'):
run_folder = os.path.join(data_folder, '10x_data')
self.plate_metadata = combine_csv_files(run_folder,
'MACA_10X_P*.csv',
index_col=0)
if tissue is not None:
tissues = tissue.split(',')
folders = self.plate_metadata.index[self.plate_metadata['Tissue'].isin(tissues)]
else:
folders = self.plate_metadata.index
folders = [f for f in folders if os.path.exists(os.path.join(run_folder, f))]
if channels_to_use is not None:
folders = [f for f in folders if f in channels_to_use]
counts = combine_sdf_files(run_folder, folders,
verbose=verbose)
mapping_stats = self.combine_metrics_files(
run_folder, folders)
self.genes, self.cell_metadata, self.mapping_stats = \
self.clean_and_reformat(counts, mapping_stats)
self.plate_summaries = self.calculate_plate_summaries()
self.plate_metadata = self.plate_metadata.loc[
self.plate_summaries.index]
self.cell_metadata = self.cell_metadata.join(self.plate_metadata,
on=self.SAMPLE_MAPPING)
smushed_folder = os.path.join(run_folder, tissue_folder)
if not os.path.exists(smushed_folder):
os.mkdir(smushed_folder)
self.cell_smushed = self.read_tissue_smushed(smushed_folder, verbose,
tissue)
self.gene_names = sorted(self.genes.columns)
self.plate_metadata_features = sorted(self.plate_metadata.columns)
# Remove pesky genes
self.genes = self.genes.drop(genes_to_drop)
# Get a counts per million rescaling of the genes
# self.counts_per_million = self.genes.divide(self.genes.sum(axis=1),
# axis=0) * 1e6
# self.top_genes = self.compute_top_genes_per_cell()
self.data = {'genes': self.genes,
'mapping_stats': self.mapping_stats,
'cell_metadata': self.cell_metadata,
'plate_metadata': self.plate_metadata,
'plate_summaries': self.plate_summaries}
def __repr__(self):
n_channels = self.plate_summaries.shape[0]
n_barcodes = len(self.genes.rows)
s = f'This is an object holding data for {n_channels} 10X channels and ' \
f'{n_barcodes} barcodes.\nHere are the accessible dataframes:\n'
for name, df in self.data.items():
s += f'\t"{name}" table dimensions: ' + str(df.shape) + '\n'
return s
@staticmethod
def combine_cell_files(folder, globber, verbose=False):
dfs = []
for filename in glob.iglob(os.path.join(folder, globber)):
if verbose:
print(f'Reading {filename} ...')
channel = os.path.basename(os.path.dirname(filename))
df = pd.read_csv(filename, index_col=0)
df.index = pd.MultiIndex.from_product(([channel], df.index),
names=['channel', 'cell_id'])
dfs.append(df)
combined = pd.concat(dfs)
return combined
@staticmethod
def combine_metrics_files(run_folder, folders):
dfs = []
for folder in folders:
filename = os.path.join(run_folder, folder, 'metrics_summary.csv')
p_name = os.path.basename(os.path.dirname(filename))
df = pd.read_csv(filename)
df[TenX_Runs.SAMPLE_MAPPING] = p_name
dfs.append(df)
combined = pd.concat(dfs)
combined.set_index(TenX_Runs.SAMPLE_MAPPING, inplace=True)
return combined
@staticmethod
def clean_and_reformat(counts, mapping_stats):
"""Move metadata information into separate dataframe and simplify ids
Parameters
----------
counts : pandas.DataFrame
A (samples, genes) dataframe of integer number of reads that mapped
to a gene in a cell, but also has extra columns of ERCC mapping and
htseq-count output that we want to remove
mapping_stats : pandas.DataFrame
A (samples, mapping_statistics) dataframe of the time the alignment
began, number of input reads, number of mapped reads, and other
information output by STAR, but everything is a string instead of
numbers which makes us sad
Returns
-------
genes : SparseDataFrame
A (samples, genes) dataframe of integer number of reads that mapped
to a gene in a cell
cell_metadata : pandas.DataFrame
A (samples, sample_features) dataframe of number of detected genes,
total reads, ercc counts, and "WELL_MAPPING" (really,
plate mapping)
mapping_stats : pandas.DataFrame
A (samples, mapping_statistics) dataframe of the time the alignment
began, number of input reads, number of mapped reads, and other
information output by CellRanger, with numbers properly formatted
"""
# counts.sort_index(inplace=True)
# channel_ids = counts.index.get_level_values(0)
channel_ids = [c.rsplit('_', 1)[0] for c in counts.rows]
mapping_stats = clean_mapping_stats(
mapping_stats,
convert_to_percentage=TenX_Runs.COLUMNS_TO_CONVERT
)
sample_ids = pd.Series(counts.rows)
# '{}_{}'.format(channel, index) for channel, index in
# counts.index
# )
cell_metadata = pd.DataFrame(
index=sample_ids,
data={TenX_Runs.SAMPLE_MAPPING: channel_ids}
)
counts.index = sample_ids
# Separate spike-ins (ERCCs) and genes
ercc_names = [col for col in counts.columns if col.startswith('ERCC-')]
gene_names = [col for col in counts.columns if
not (col.startswith('ERCC-')
or col.endswith('_transgene'))]
# Separate counts of everything from genes-only
genes = SparseDataFrame()
genes.matrix = counts[gene_names]
genes.columns = gene_names[:]
genes.rows = counts.rows[:]
# Add mapping and ERCC counts to cell metadata
cell_metadata['total_reads'] = counts.matrix.sum(axis=1)
cell_metadata['n_genes'] = (genes.matrix > 0).sum(axis=1)
cell_metadata['mapped_reads'] = genes.matrix.sum(axis=1)
cell_metadata['ercc'] = counts[ercc_names].sum(axis=1)
return genes, cell_metadata, mapping_stats
def calculate_plate_summaries(self):
"""Get mean reads, percent mapping, etc summaries for each plate"""
channel_map = self.cell_metadata.groupby(TenX_Runs.SAMPLE_MAPPING)
total_reads = self.mapping_stats['Number of Reads']
# percent_rn45s = pd.Series(self.genes['Rn45s'].todense()).groupby(
# self.cell_metadata[TenX_Runs.SAMPLE_MAPPING]
# ).sum() / total_reads
percent_ercc = channel_map['ercc'].sum().divide(total_reads, axis=0)
plate_summaries = pd.concat(
[self.mapping_stats,
pd.DataFrame(OrderedDict([
# ('Percent Rn45s', percent_rn45s),
(TenX_Runs.PERCENT_ERCC, percent_ercc),
('n_barcodes', channel_map.size())
]))], axis=1
)
return plate_summaries
def read_tissue_smushed(self, folder, verbose=False, tissue=None):
smusheds = {}
if tissue is None:
globber = glob.iglob(os.path.join(folder, 'smushed-*'))
else:
globber = glob.iglob(os.path.join(folder, f'smushed-{tissue}*'))
for filename in globber:
if verbose:
print(f'Reading {filename} ...')
tissue = filename.split('smushed-')[-1].split('.')[0]
tissue = tissue.split('-')[0]
df = pd.read_csv(filename, index_col=0)
df.rename(columns={'0': 0, '1': 1}, inplace=True)
smusheds[tissue] = df
assert len(df.columns.difference([0, 1, 'cluster'])) == 0
return smusheds
|
|
import numpy as np
from copy import deepcopy
from matplotlib.pyplot import text
from pyKinectTools.algs.SkeletonBeliefPropagation import *
from pyKinectTools.algs.GraphAlgs import *
from pyKinectTools.algs.OrientationEstimate import *
import pyKinectTools.algs.NeighborSuperpixels.NeighborSuperpixels as nsp
import sys
# Superpixels
sys.path.append('/Users/colin/libs/visionTools/slic-python/')
import slic
# class PictorialStructures:
# # Learn Prior (use negative images)
# # Sample from posterior
# # SVM
# #
# # Steps:
# # 1) Calculate HOG Features OR use regions
# # 2)
# # 3) Inference: Belief Propogation
# def __init__():
# pass
# def setPotentialPoses(self, poses):
# pass
# def setLabels(self, labels):
# pass
# def Score(Im, z):
# # z=configuration
# sum = 0
# for each part:
# sum += w_part * appearanceFcn
# for each set of edges
# sum += w_ij * deformation term
# E = sum(how well does this part fit + how well does it fit it's neighbors)
def pictorialScores(regionXYZ, regionPos, xyz, edgeDict, geoExtrema=[], geoExtremaPos=[], sampleThresh=.9, gaborResponse = [], regions=[]):
regionCount = len(regionXYZ)-1
# forwardVec, frame = roughOrientationEstimate(xyz)
# regionMean = np.mean(regionXYZ[1:],0)
if 0:
forwardRegionXYZ = np.asarray(np.asmatrix(frame)*np.asmatrix(regionXYZ-regionMean).T).T
forwardRegionXYZ += regionMean
else:
forwardRegionXYZ = regionXYZ
relDists = np.ones([regionCount+1,regionCount+1,3])*np.inf
for i in range(1,regionCount+1):
relDists[i,1:] = (forwardRegionXYZ[1:] - forwardRegionXYZ[i])
relDists[np.abs(relDists)<5] = np.inf
for i in range(3):
np.fill_diagonal(relDists[:,:,i], np.inf)
# distMat = UnstructuredDijkstras(forwardRegionXYZ, edgeDict)
# print regionXYZ
distMat = UnstructuredDijkstras(regionXYZ, edgeDict)
geoVariance = 50.0# geodesic weight parameter
if geoExtrema != []:
geoDists = np.ones([regionCount+1])*np.inf
for i in range(1,regionCount+1):
# geoDists[i] = np.min(np.sqrt(np.sum((geoExtrema-regionXYZ[i])**2,1)))
geoDists[i] = np.min(distMat[i,regions[geoExtremaPos[:,0],geoExtremaPos[:,1]]])
geoDists[regions[geoExtremaPos[:,0],geoExtremaPos[:,1]]] = 0
# Normalize
geoDists = np.exp(-.5*geoDists**2/(geoVariance**2))
gaborFeatures =[0]
if gaborResponse != []:
# gaborFeatures = gaborResponse[regionPos[:,0], regionPos[:,1]]
for i in range(1, regionCount+1):
gaborFeatures.append(gaborResponse[regions==i].mean())
# gaborResponse2[regions==i] = gaborResponse[regions==i].mean()
gaborFeatures = np.array(gaborFeatures)
gaborFeatures -= gaborFeatures.min()
gaborFeatures /= gaborFeatures.max()
''' --- Structure params ---- '''
HEAD=0;
L_SHOUL=1; L_ELBOW=2; L_HAND=3;
R_SHOUL=4; R_ELBOW=5; R_HAND=6;
jointType = ['xy','r','r',
'xy','r','r']
# jointPos = np.array([[-0, 180], 155, 210,
# [0, -180], 155, 210])
jointPos = np.array([[25, 165], 160, 220,
[-25, -165], 160, 220])
jointVariance = np.array([95, 90, 100,
95, 90, 100])
jointGeodesicParam = np.array([.1, .0, .3,
.1, .0, .3])*2.0
gaborParam = np.array([0, .2, .3,
0, .2, .3])*0.0
jointStart = [HEAD,L_SHOUL,L_ELBOW, HEAD,R_SHOUL,R_ELBOW]
jointEnd = [L_SHOUL,L_ELBOW,L_HAND, R_SHOUL,R_ELBOW,R_HAND]
''' --- /Structure params ---- '''
cumScores = []
indivScores = {}
Configs = []
for currPart in range(0, regionCount+1): # For each potential starting location
# for currPart in range(0, 5): # For each potential starting location
Configs.append([currPart])
cumScores.append(0)
indivScores[currPart] = {}
''' While not all of the configurations are full '''
config_ind = 0
while not np.all(np.equal([len(x) for x in Configs], len(jointPos)+1)):
config_i = Configs[config_ind]
score_i = cumScores[config_ind]
# indivScore_i = indivScores[config_ind]
if len(config_i) < len(jointPos)+1:
l = len(config_i)-1
if l < 2:
pass
# print l, config_i
''' Calculate probability '''
if jointType[l] == 'xy':
partCostsRaw = np.sqrt(np.sum((relDists[config_i[jointStart[l]],:,0:2] - jointPos[l])**2, 1))
partCosts = np.exp(-partCostsRaw**2 / (2*jointVariance[l]**2))
if gaborResponse != []:
# partCosts = np.minimum(1, partCosts + (gaborParam[l] * gaborFeatures ))
partCosts += gaborParam[l] * gaborFeatures
if geoExtrema != []:
# partCosts = np.minimum(1, partCosts + (np.abs(np.sqrt(np.sum((relDists[config_i[jointStart[l]],:,1:3])**2,1)) - jointPos[l]) < 1.5*partCostsRaw))
partCosts += jointGeodesicParam[l] * geoDists
largeGeodesic = np.abs(distMat[config_i[jointStart[l]],:]) > 4.0*np.sqrt(np.sum(np.array(jointPos[l])**2))
partCosts[largeGeodesic] = 0
# inRange = np.abs(distMat[config_i[jointStart[l]],:] - jointPos[l]) > 2.0*partCostsRaw
# partCosts[-inRange] = 0
elif jointType[l] == 'r':
partCostsRaw = np.abs(distMat[config_i[jointStart[l]],:] - jointPos[l])
partCosts = np.exp(-partCostsRaw**2 / (2*jointVariance[l]**2))
largeGeodesic = (np.abs(np.sqrt(np.sum((relDists[config_i[jointStart[l]],:,1:3])**2,1)) - jointPos[l]) > 4.0*partCostsRaw)
if geoExtrema != []:
# partCosts = np.minimum(1, partCosts + (np.abs(np.sqrt(np.sum((relDists[config_i[jointStart[l]],:,1:3])**2,1)) - jointPos[l]) < 1.5*partCostsRaw))
partCosts += jointGeodesicParam[l] * geoDists
if gaborResponse != []:
# partCosts = np.minimum(1, partCosts + (gaborParam[l] * gaborFeatures ))
partCosts += gaborParam[l] * gaborFeatures
partCosts[largeGeodesic] = 0
''' Prevent the same region from being used multiple times '''
# print partCosts
partCosts[config_i] = 0
if partCosts.max() > 1.0:
partCosts /= partCosts.max()
sortNext = np.argsort(partCosts)
MAX_NEW = 3
for n in range(1, np.min(np.sum(partCosts >= sampleThresh*partCosts.max())+1), MAX_NEW):
nextPart = sortNext[-n]
nextScore = partCosts[sortNext[-n]]
Configs.append([x for x in config_i] + [nextPart])
cumScores.append(score_i + nextScore)
# import pdb
# pdb.set_trace()
# print config_i[jointStart[l]], config_i
if jointEnd[l] not in indivScores[config_i[jointStart[l]]]:
indivScores[config_i[jointStart[l]]][jointEnd[l]] = []
indivScores[config_i[jointStart[l]]][jointEnd[l]].append(partCosts)
Configs.pop(config_ind)
cumScores.pop(config_ind)
# Increment
if config_ind == len(Configs)-1:
config_ind = 0
else:
config_ind += 1
argSortScores = np.argsort(cumScores)
ConfigsOut = []
for i in argSortScores:
ConfigsOut.append(Configs[i])
return ConfigsOut, indivScores
# fig = figure(1)
# ax = Axes3D(fig)
# xlabel('X'); ylabel('Y'); axis('equal')
# ax.plot(-forwardRegionXYZ[:,0],forwardRegionXYZ[:,1],forwardRegionXYZ[:,2],'g.')
# figure(1)
# plot(regionXYZ[:,1],regionXYZ[:,0], 'b.')
# plot(-forwardRegionXYZ[:,0],forwardRegionXYZ[:,1], 'g.')
# for b2 in range(1, regionCount):
def regionGraph(posMat, pixelSize=750):
im8bit = deepcopy(posMat)
mask_erode = posMat[:,:,2]>0
if 0:
for i in range(3):
im8bit[:,:,i][im8bit[:,:,i]!=0] -= im8bit[:,:,i][im8bit[:,:,i]!=0].min()
im8bit[:,:,i] /= im8bit[:,:,i].max()/256
im8bit = np.array(im8bit, dtype=np.uint8)
im4d = np.dstack([mask_erode, im8bit])
else:
im8bit = im8bit[:,:,2]
im8bit /= im8bit.max()/256
im8bit = np.array(im8bit, dtype=np.uint8)
im4d = np.dstack([mask_erode*0+1, im8bit, im8bit, im8bit])
# im4d = np.dstack([mask_erode, im8bit, mask_erode, mask_erode])
# slicRegionCount = int(posMat.shape[0]*posMat.shape[1]/1020)
# regions = slic.slic_n(np.array(im4d, dtype=np.uint8), slicRegionCount,10)#50
posMean = posMat[posMat[:,:,2]>0,:].mean(0)[2]*1.2
# print posMean
# regions = slic.slic_s(np.array(np.ascontiguousarray(im4d), dtype=np.uint8), int(1000 * (1000.0/posMean)**2),5)+1
# regions = slic.slic_s(np.array(np.ascontiguousarray(im4d), dtype=np.uint8), 1500,5)+1
regions = slic.slic_s(np.array(np.ascontiguousarray(im4d), dtype=np.uint8), pixelSize,5)+1
regions *= mask_erode
avgColor = np.zeros([regions.shape[0],regions.shape[1],3])
regionCount = regions.max()
regionLabels = [[0]]
goodRegions = 0
bodyMean = np.array([posMat[mask_erode,0].mean(),posMat[mask_erode,1].mean(),posMat[mask_erode,2].mean()])
for i in range(1, regionCount+2):
if np.sum(np.equal(regions,i)) < 100:
regions[regions==i] = 0
else:
if 1: #if using x/y/z
meanPos = posMat[regions==i,:].mean(0)
if 0: # If using distance map
meanPos = np.array([posMat[regions==i,:].mean(0)[0],
posMat[regions==i,:].mean(0)[1],
# posMat[regions==i,:].mean(0)[2],
(dists2Tot[regions==i].mean())])
if 0: # If using depth only
meanPos = np.array([(np.nonzero(regions==i)[0].mean()),
(np.nonzero(regions==i)[1].mean()),
(im8bit[regions==i].mean())])
avgColor[regions==i,:] = meanPos - bodyMean
if not np.isnan(meanPos[0]) and meanPos[0] != 0.0:
tmp = np.nonzero(regions==i)
argPos = [int(tmp[0].mean()),int(tmp[1].mean())]
regionLabels.append([i, meanPos-bodyMean, argPos])
goodRegions += 1
regions[regions==i] = goodRegions
# print np.sum(np.equal(regions,i))
else:
regions[regions==i] = 0
regionCount = regions.max()
allEdges = nsp.getNeighborEdges(np.ascontiguousarray(regions, dtype=np.uint8))
edges = []
for i in allEdges:
if i[0] != 0 and i[1] != 0:
if i not in edges:
edges.append(i)
if [i[1],i[0]] not in edges:
edges.append([i[1],i[0]])
edgeDict = edgeList2Dict(edges)
regionXYZ = ([x[1] for x in regionLabels if x[0] != 0])
regionXYZ.insert(0,[0,0,0])
regionPos = ([x[2] for x in regionLabels if x[0] != 0])
regionPos.insert(0,[0,0])
# distMat = UnstructuredDijkstras(regionXYZ, edgeDict)
# distMat, bendMat = UnstructuredDijkstrasAndBend(regionXYZ, edgeDict)
# mstEdges, edgeDict2 = MinimumSpanningTree(distMat[1:,1:])
return regions, regionXYZ, regionLabels, edgeDict
def labelGraphImage(regionLabels):
regionCount = len(regionLabels)-1
for i in range(1,regionCount+1):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
# cv2.circle(imLines, pt1, radius=0, color=50, thickness=3)
text(pt1[0]+2, pt1[1], str(i))
def labelSkeleton(skeleton, regionLabels, regionIm):
import cv2
im = deepcopy(regionIm)
regionCount = len(regionLabels)-1
#Head
ptHead = (regionLabels[skeleton[0]][2][1],regionLabels[skeleton[0]][2][0])
cv2.circle(im, ptHead, radius=10, color=regionCount, thickness=4)
cv2.circle(im, ptHead, radius=5, color=regionCount/2, thickness=4)
#Left arm
for i in range(1,4):
pt = (regionLabels[skeleton[i]][2][1],regionLabels[skeleton[i]][2][0])
cv2.circle(im, pt, radius=(10-i)/2, color=5, thickness=2)
cv2.circle(im, pt, radius=(10-i), color=30, thickness=2)
for i in range(2):
pt1 = (regionLabels[skeleton[1]][2][1],regionLabels[skeleton[1]][2][0])
cv2.line(im, ptHead, pt1, color=5*i, thickness=i+1)
pt2 = (regionLabels[skeleton[2]][2][1],regionLabels[skeleton[2]][2][0])
cv2.line(im, pt1, pt2, color=5*i, thickness=i+1)
pt3 = (regionLabels[skeleton[3]][2][1],regionLabels[skeleton[3]][2][0])
cv2.line(im, pt2, pt3, color=5*i, thickness=i+1)
#Right arm
for i in range(4,7):
pt = (regionLabels[skeleton[i]][2][1],regionLabels[skeleton[i]][2][0])
cv2.circle(im, pt, radius=(10+3-i), color=35, thickness=2)
cv2.circle(im, pt, radius=(10+3-i)/2, color=2, thickness=2)
pt1 = (regionLabels[skeleton[4]][2][1],regionLabels[skeleton[4]][2][0])
cv2.line(im, ptHead, pt1, color=30, thickness=2)
pt2 = (regionLabels[skeleton[5]][2][1],regionLabels[skeleton[5]][2][0])
cv2.line(im, pt1, pt2, color=30, thickness=2)
pt3 = (regionLabels[skeleton[6]][2][1],regionLabels[skeleton[6]][2][0])
cv2.line(im, pt2, pt3, color=30, thickness=2)
for i in range(1,regionCount+1):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
text(pt1[0]+2, pt1[1], str(i))
return im
if 0:
# maxDist = distMat[distMat<np.inf].max()
maxDist = distMat[1,2:].max()
minDist = distMat[1,1:].min()
# Draw lines between nodes
imLines = deepcopy(regions)
removeEdges = []
for i, ind in zip(edges, range(len(edges))):
i1 = i[0]
i2 = i[1]
for i, ind in zip(mstEdges, range(len(edges))):
i1 = i[0]+1
i2 = i[1]+1
pt1 = (regionLabels[i1][2][1],regionLabels[i1][2][0])
pt2 = (regionLabels[i2][2][1],regionLabels[i2][2][0])
cv2.line(imLines, pt1, pt2, 40)
# cv2.line(imLines, pt1, pt2, 255.0/maxDist*distMat[18, edges[i[1]]])
# cv2.line(imLines, pt1, pt2, 255.0/maxDist*distMat[edges[i[0]], edges[i[1]]])
for i in range(1,regionCount+1):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
# cv2.circle(imLines, pt1, radius=0, color=50, thickness=3)
# cv2.circle(imLines, pt1, radius=0, color=distMat[1, i]*255.0/maxDist, thickness=3)
text(pt1[0]+2, pt1[1], str(i))
imshow(imLines)
# # Test on ICU data
# image_argb = dstack([d1c, d1c, d1c, d1c])
# image_argb = dstack([m1, m1, m1, m1])
# # region_labels = slic.slic_s(image_argb, 10000, 1)
# image_argb = dstack([diffDraw1,diffDraw1,diffDraw1,diffDraw1])
# region_labels = slic.slic_n(image_argb, 100, 0)
# slic.contours(image_argb, region_labels, 1)
# plt.imshow(image_argb[:, :, 0])
# regions = slic.slic_n(np.array(np.dstack([im,im[:,:,2]]), dtype=uint8), 50,10)
# i=3
# # im8bit = np.array(1.0*imgs2[i]/imgs2[i].max()*256.0)
# im8bit = im*(im<150)*(im>50)
# # im8bit = im
# im4d = np.dstack([im8bit>0, im8bit, im8bit, im8bit])
# regions = slic.slic_n(np.array(im4d, dtype=uint8), 100,5)
# regions *= (im8bit>0)
# regions2 = slic.slic_n(np.array(im4d, dtype=uint8), 50,5)
# regions2 *= (im8bit>0)
# regions3 = slic.slic_n(np.array(im4d, dtype=uint8), 20,1)
# regions3 *= (im8bit>0)
# # regions = slic.slic_n(np.array(im4d, dtype=uint8), 30,5)
# imshow(regions)
# -----
# dists2Tot[dists2Tot>1000] = 1000
# im8bit = (d[objects[ind]]*mask_erode)
# im8bit = im8bit / np.ceil(im8bit.max()/256.0)
im8bit = deepcopy(posMat)
for i in range(3):
im8bit[:,:,i][im8bit[:,:,i]!=0] -= im8bit[:,:,i][im8bit[:,:,i]!=0].min()
im8bit[:,:,i] /= im8bit[:,:,i].max()/256
im8bit = np.array(im8bit, dtype=uint8)
# im8bit = im8bit[:,:,2]
im4d = np.dstack([mask_erode, im8bit])
# im4d = np.dstack([mask_erode, im8bit, im8bit, im8bit])
# im4d = np.dstack([mask_erode, dists2Tot, dists2Tot, dists2Tot])
# im4d = np.dstack([mask_erode, im8bit, dists2Tot, mask_erode])
regions = slic.slic_n(np.array(im4d, dtype=uint8), 50,10)#2
# regions = slic.slic_s(np.array(im4d, dtype=uint8), 550,3)
regions *= mask_erode
imshow(regions)
avgColor = np.zeros([regions.shape[0],regions.shape[1],3])
# avgColor = np.zeros([regions.shape[0],regions.shape[1],4])
regionCount = regions.max()
regionLabels = [[0]]
goodRegions = 0
bodyMean = np.array([posMat[mask_erode,0].mean(),posMat[mask_erode,1].mean(),posMat[mask_erode,2].mean()])
for i in range(1, regionCount+2):
if np.sum(np.equal(regions,i)) < 50:
regions[regions==i] = 0
else:
if 1: #if using x/y/z
meanPos = posMat[regions==i,:].mean(0)
if 0: # If using distance map
meanPos = np.array([posMat[regions==i,:].mean(0)[0],
posMat[regions==i,:].mean(0)[1],
# posMat[regions==i,:].mean(0)[2],
(dists2Tot[regions==i].mean())])
if 0: # If using depth only
meanPos = np.array([(np.nonzero(regions==i)[0].mean()),
(np.nonzero(regions==i)[1].mean()),
(im8bit[regions==i].mean())])
avgColor[regions==i,:] = meanPos - bodyMean
if not np.isnan(meanPos[0]) and meanPos[0] != 0.0:
tmp = np.nonzero(regions==i)
argPos = [int(tmp[0].mean()),int(tmp[1].mean())]
regionLabels.append([i, meanPos-bodyMean, argPos])
goodRegions += 1
regions[regions==i] = goodRegions
# print np.sum(np.equal(regions,i))
else:
regions[regions==i] = 0
regionCount = regions.max()
#Reindex
regionCount = len(regionLabels)
for lab, j in zip(regionLabels, range(regionCount)):
lab.append(j)
# mapRegionToIndex.append(lab[0])
# (Euclidan) Distance matrix
distMatrix = np.zeros([regionCount, regionCount])
for i_data,i_lab in zip(regionLabels, range(regionCount)):
for j_data,j_lab in zip(regionLabels, range(regionCount)):
if i_lab <= j_lab:
# distMatrix[i_lab,j_lab] = np.sqrt(((i_data[1][0]-j_data[1][0])**2)+((i_data[1][1]-j_data[1][1])**2)+.5*((i_data[1][2]-j_data[1][2])**2))
distMatrix[i_lab,j_lab] = np.sqrt(np.sum((i_data[1]-j_data[1])**2))
distMatrix = np.maximum(distMatrix, distMatrix.T)
distMatrix += 1000*eye(regionCount)
# distMatrix[distMatrix > 400] = 1000
edges = distMatrix.argmin(0)
if 0:
''' Draw edges based on closest node '''
imLines = deepcopy(regions)
for i in range(regionCount):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
cv2.circle(imLines, pt1, radius=0, color=125, thickness=3)
for i in range(regionCount):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
pt2 = (regionLabels[edges[i]][2][1],regionLabels[edges[i]][2][0])
cv2.line(imLines, pt1, pt2, 100)
mstEdges, edgeDict = MinimumSpanningTree(distMatrix)
# ''' Refine MST '''
# edgeDict, deletedInds = PruneEdges(edgeDict, maxLength=2)
# for i in deletedInds[-1::-1]:
# del regionLabels[i]
# #Reindex
# regionCount = len(regionLabels)
# for lab, j in zip(regionLabels, range(regionCount)):
# lab.append(j)
# # mapRegionToIndex.append(lab[0])
# # (Euclidan) Distance matrix
# distMatrix = np.zeros([regionCount, regionCount])
# for i_data,i_lab in zip(regionLabels, range(regionCount)):
# for j_data,j_lab in zip(regionLabels, range(regionCount)):
# if i_lab <= j_lab:
# # distMatrix[i_lab,j_lab] = np.sqrt(((i_data[1][0]-j_data[1][0])**2)+((i_data[1][1]-j_data[1][1])**2)+.5*((i_data[1][2]-j_data[1][2])**2))
# distMatrix[i_lab,j_lab] = np.sqrt(np.sum((i_data[1]-j_data[1])**2))
# distMatrix = np.maximum(distMatrix, distMatrix.T)
# distMatrix += 1000*eye(regionCount)
# edges = distMatrix.argmin(0)
# mstEdges, edgeDict = MinimumSpanningTree(distMatrix)
# figure(1); imshow(objTmp[:,:,2])
''' Draw edges based on minimum spanning tree '''
imLines = deepcopy(regions)
for i in range(1,regionCount):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
cv2.circle(imLines, pt1, radius=0, color=125, thickness=3)
# mstEdges = np.array(mstEdges) + 1
# Draw line for all edges
if 1:
for i in range(len(mstEdges)):
try:
pt1 = (regionLabels[mstEdges[i][0]][2][1],regionLabels[mstEdges[i][0]][2][0])
pt2 = (regionLabels[mstEdges[i][1]][2][1],regionLabels[mstEdges[i][1]][2][0])
cv2.line(imLines, pt1, pt2, 100)
except:
pass
figure(2); imshow(imLines)
''' Draw line between all core nodes '''
# Draw circles
imLines = deepcopy(regions)
for i in range(1,regionCount):
pt1 = (regionLabels[i][2][1],regionLabels[i][2][0])
cv2.circle(imLines, pt1, radius=0, color=125, thickness=3)
leafPaths = GetLeafLengths(edgeDict)
leafLengths = [len(x) for x in leafPaths]
core = [x for x in edgeDict.keys() if len(edgeDict[x]) > 2]
branchesSet = set()
for i in leafPaths:
for j in i:
branchesSet.add(j)
core = np.sort(list(set(range(regionCount)).difference(branchesSet)))
# core = [x for x in edgeDict.keys() if len(edgeDict[x]) > 2]
for i in range(len(core)-1):
pt1 = (regionLabels[core[i]][2][1], regionLabels[core[i]][2][0])
pt2 = (regionLabels[core[i+1]][2][1],regionLabels[core[i+1]][2][0])
cv2.line(imLines, pt1, pt2, 150)
# Draw line for all leafs
for i in range(len(leafPaths)):
if len(leafPaths[i]) > 3:
color = 125
else:
color = 100
for j in range(len(leafPaths[i])-1):
pt1 = (regionLabels[leafPaths[i][j]][2][1],regionLabels[leafPaths[i][j]][2][0])
pt2 = (regionLabels[leafPaths[i][j+1]][2][1],regionLabels[leafPaths[i][j+1]][2][0])
cv2.line(imLines, pt1, pt2, color)
#Draw head and hands
pt1 = (regionLabels[core[0]][2][1],regionLabels[core[0]][2][0])
cv2.circle(imLines, pt1, radius=10, color=150, thickness=1)
for i in xrange(len(leafLengths)):
if leafLengths[i] >= 4:
pt1 = (regionLabels[leafPaths[i][0]][2][1],regionLabels[leafPaths[i][0]][2][0])
cv2.circle(imLines, pt1, radius=10, color=125, thickness=1)
figure(3); imshow(imLines)
if 1:
imLines = deepcopy(regions)
imLines[imLines>0] = 20
for i in range(len(mstEdges)):
pt1 = (regionLabels[mstEdges[i][0]][2][1],regionLabels[mstEdges[i][0]][2][0])
pt2 = (regionLabels[mstEdges[i][1]][2][1],regionLabels[mstEdges[i][1]][2][0])
cv2.line(imLines, pt1, pt2, 3)
# head, body, arm, legs
# potentialPoses = [np.array([[500, 30, 0], [50, 44, -27], [-18, -150, 25]]),
# np.array([[500, 30, 0], [107, 44, 0], [-18, 150, 25]]),
# np.array([[0, 30, 0], [107, 44, 0], [-18, -150, 25]]),
# np.array([[200, 30, 0], [107, 144, 0], [-18, -150, 25]])]
# np.array([[500, 0, -25], [-107, 144, 100], [-18, -150, 25]])]
potentialPoses = [np.array([regionLabels[3][1], regionLabels[27][1], regionLabels[24][1],regionLabels[55][1]]),
np.array([regionLabels[7][1], regionLabels[30][1], regionLabels[22][1],regionLabels[53][1]]),
np.array([regionLabels[5][1], regionLabels[22][1], regionLabels[29][1],regionLabels[54][1]]),
np.array([regionLabels[0][1], regionLabels[23][1], regionLabels[24][1],regionLabels[55][1]])]
potentialLabels = [np.array([regionLabels[3][2], regionLabels[27][2], regionLabels[24][2],regionLabels[55][2]]),
np.array([regionLabels[7][2], regionLabels[30][2], regionLabels[22][2],regionLabels[53][2]]),
np.array([regionLabels[5][2], regionLabels[22][2], regionLabels[29][2],regionLabels[54][2]]),
np.array([regionLabels[0][2], regionLabels[23][2], regionLabels[24][2],regionLabels[55][2]])]
# transitionMatrix = np.matrix([[.1,.45, .45],[.45,.1, .45],[.45,.45,.1]])
# transitionMatrix = np.matrix([[.5,.25, .25],[.25,.5, .25],[.25,.25,.5]])
# transitionMatrix = np.matrix([[.9,.05, .05],[.05,.9, .05],[.05,.05,.9]])
transitionMatrix = np.matrix([[.55,.15, .15, .15],[.15,.55, .15, .15],[.15,.15,.55, .15],[.15,.15,.15,.55]])
# transitionMatrix = np.matrix([[.7,.1, .1, .1],[.1,.7, .1, .1],[.1,.1,.7, .1],[.1,.1,.1,.7]])
# transitionMatrix = np.matrix([[1,.0, .0, .0],[.0,1, .0, .0],[.0,.0,1, .0],[.0,.0,.0,1]])
# transitionMatrix = np.matrix([[0,1.0,1.0,1.0],[1.0,.0,1.0,1.0],[1.0,1.0,.0,1.0],[1.0,1.0,1.0,.0]])
# transitionMatrix = np.matrix([[.0,.0, .0, .0],[.0,0, .0, .0],[.0,.0,0, .0],[.0,.0,.0,0]])
rootNodeInd = core[int(len(core)/2)]
rootNode = Node(index_=rootNodeInd, children_=edgeDict[rootNodeInd], pos_=regionLabels[rootNodeInd][1])
beliefs = []
ims = []
for guessPose,i in zip(potentialPoses, range(len(potentialPoses))):
print "-----"
# print guessPose
t1 = time.time()
rootNode.calcAll(guessPose)
print "Time:", time.time() - t1
beliefs.append(rootNode.calcTotalBelief())
print beliefs[-1]
rootNode.drawAll()
ims.append(deepcopy(imLines))
pts = potentialLabels[i]
for j,j_i in zip(pts, range(len(pts))):
print j
cv2.circle(ims[-1], (j[1], j[0]), radius=15, color=20*j_i+10, thickness=2)
subplot(1,4,i+1)
imshow(ims[-1])
print "Best pose:", np.argmax(beliefs)
subplot(1,4,np.argmax(beliefs)+1)
xlabel("**Best**")
# imshow(imLines)
|
|
from .operators import RelativeOperand
class What(object):
"""Specify 'what' a Query retrieves."""
Out = 0
In = 1
Both = 2
OutE = 3
InE = 4
BothE = 5
OutV = 6
InV = 7
Eval = 8
Coalesce = 9
If = 10
IfNull = 11
Expand = 12
First = 13
Last = 14
Count = 15
Min = 16
Max = 17
Avg = 18
Mode = 19
Median = 20
Percentile = 21
Variance = 22
StdDev = 23
Sum = 24
Date = 25
SysDate = 26
Format = 27
Dijkstra = 28
ShortestPath = 29
Distance = 30
Distinct = 31
UnionAll = 32
Intersect = 33
Difference = 34
SymmetricDifference = 35
Set = 36
List = 37
Map = 38
TraversedElement = 39
TraversedEdge = 40
TraversedVertex = 41
Any = 42
All = 43
class FunctionWhat(What, RelativeOperand):
def __init__(self, func, args):
self.func = func
self.args = args
self.name_override = None
def as_(self, name_override):
self.name_override = name_override
return self
def eval_(exp):
return FunctionWhat(What.Eval, (exp,))
def coalesce(*params):
return FunctionWhat(What.Coalesce, params)
def if_(cond, con, alt):
return FunctionWhat(What.If, (cond, con, alt))
def ifnull(field, value):
return FunctionWhat(What.IfNull, (field, value))
def expand(field):
return FunctionWhat(What.Expand, (field,))
def first(field):
return FunctionWhat(What.First, (field,))
def last(field):
return FunctionWhat(What.Last, (field,))
def count(field):
return FunctionWhat(What.Count, (field,))
def min(field, *more):
return FunctionWhat(What.Min, [field] + more)
def max(field, *more):
return FunctionWhat(What.Max, [field] + more)
def avg(field):
return FunctionWhat(What.Avg, (field,))
def mode(field):
return FunctionWhat(What.Mode, (field,))
def median(field):
return FunctionWhat(What.Median, (field,))
def percentile(field, *quantiles):
return FunctionWhat(What.Percentile, (field, quantiles))
def variance(field):
return FunctionWhat(What.Variance, (field,))
def stddev(field):
return FunctionWhat(What.StdDev, (field,))
def sum(field):
return FunctionWhat(What.Sum, (field,))
def date(date_str, fmt=None, tz=None):
return FunctionWhat(What.Date, (date_str, fmt, tz))
def sysdate(fmt, tz=None):
return FunctionWhat(What.SysDate, (fmt, tz))
def format(fmt_str, *args):
return FunctionWhat(What.Format, (fmt_str, args))
class EdgeDirection(object):
OUT = 0
IN = 0
BOTH = 0
def dijkstra(src, dst, weight_field, direction=EdgeDirection.OUT):
return FunctionWhat(What.Dijkstra, (src, dst, weight_field, direction))
def shortest_path(src, dst, direction=EdgeDirection.BOTH, edge_class=None):
return FunctionWhat(What.ShortestPath, (src, dst, direction, edge_class))
def distance(x_field, y_field, x_value, y_value):
return FunctionWhat(What.Distance, (x_field, y_field, x_value, y_value))
def distinct(field):
return FunctionWhat(What.Distinct, (field,))
def unionall(field, *more):
return FunctionWhat(What.UnionAll, (field, more))
def intersect(field, *more):
return FunctionWhat(What.Intersect, (field, more))
def difference(field, *more):
return FunctionWhat(What.Difference, (field, more))
def symmetric_difference(field, *more):
return FunctionWhat(What.SymmetricDifference, (field, more))
def set(field):
return FunctionWhat(What.Set, (field,))
def list(field):
return FunctionWhat(What.List, (field,))
def map(key, value):
return FunctionWhat(What.Map, (key, value))
def traversed_element(index, items=1):
return FunctionWhat(What.TraversedElement, (index, items))
def traversed_edge(index, items=1):
return FunctionWhat(What.TraversedEdge, (index, items))
def traversed_vertex(index, items=1):
return FunctionWhat(What.TraversedVertex, (index, items))
def any():
return FunctionWhat(What.Any, None)
def all():
return FunctionWhat(What.All, None)
class ChainableWhat(What):
def __init__(self, chain, props):
self.chain = chain
self.props = props
self.name_override = None
def as_(self, name_override):
if type(self) is not ChainableWhat:
# Prevent further chaining
self = ChainableWhat(self.chain, self.props)
self.name_override = name_override
return self
class ElementWhat(ChainableWhat):
def __getattr__(self, attr):
if type(self) is not ElementWhat:
# Prevent further chaining
self = ElementWhat(self.chain, self.props)
self.props.append(attr)
return self
def __call__(self):
raise TypeError(
'{} is not callable here.'.format(
repr(self.props[-1]) if self.props else 'Query function'))
class VertexWhat(ElementWhat):
def __init__(self, chain):
super(VertexWhat, self).__init__(chain, [])
def out(self, *labels):
self.chain.append((What.Out, labels))
return self
def in_(self, *labels):
self.chain.append((What.In, labels))
return self
def both(self, *labels):
self.chain.append((What.Both, labels))
return self
def outE(self, *labels):
chain = self.chain
chain.append((What.OutE, labels))
return EdgeWhat(chain)
def inE(self, *labels):
chain = self.chain
chain.append((What.InE, labels))
return EdgeWhat(chain)
def bothE(self, *labels):
chain = self.chain
chain.append((What.BothE, labels))
return EdgeWhat(chain)
class VertexWhatBegin(object):
def __init__(self, func):
self.func = func
def __call__(self, *labels):
return VertexWhat([(self.func, labels)])
out = VertexWhatBegin(What.Out)
in_ = VertexWhatBegin(What.In)
both = VertexWhatBegin(What.Both)
outE = VertexWhatBegin(What.OutE)
inE = VertexWhatBegin(What.InE)
bothE = VertexWhatBegin(What.BothE)
class EdgeWhat(ElementWhat):
def __init__(self, chain):
super(EdgeWhat, self).__init__(chain, [])
def outV(self):
chain = self.chain
chain.append(tuple(What.OutV))
return VertexWhat(chain)
def inV(self):
chain = self.chain
chain.append(tuple(What.InV))
return VertexWhat(chain)
class EdgeWhatBegin(object):
def __init__(self, func):
self.func = func
def __call__(self):
return EdgeWhat([(self.func,)])
outV = EdgeWhatBegin(What.OutV)
inV = EdgeWhatBegin(What.InV)
|
|
# -*- coding: utf-8 -*-
import json
try:
from django.contrib.admin.options import (RenameBaseModelAdminMethods as
ModelAdminMetaClass)
except ImportError:
from django.forms.widgets import (MediaDefiningClass as ModelAdminMetaClass)
import re
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.utils import get_cms_setting
from cms.utils.compat import DJANGO_1_4
from cms.utils.compat.metaclasses import with_metaclass
from cms.utils.placeholder import get_placeholder_conf
from cms.utils.compat.dj import force_unicode, python_2_unicode_compatible
from cms.exceptions import SubClassNeededError, Deprecated
from cms.models import CMSPlugin
from django.core.urlresolvers import reverse
from django.contrib import admin
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import ModelForm
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
class CMSPluginBaseMetaclass(ModelAdminMetaClass):
"""
Ensure the CMSPlugin subclasses have sane values and set some defaults if
they're not given.
"""
def __new__(cls, name, bases, attrs):
super_new = super(CMSPluginBaseMetaclass, cls).__new__
parents = [base for base in bases if isinstance(base, CMSPluginBaseMetaclass)]
if not parents:
# If this is CMSPluginBase itself, and not a subclass, don't do anything
return super_new(cls, name, bases, attrs)
new_plugin = super_new(cls, name, bases, attrs)
# validate model is actually a CMSPlugin subclass.
if not issubclass(new_plugin.model, CMSPlugin):
raise SubClassNeededError(
"The 'model' attribute on CMSPluginBase subclasses must be "
"either CMSPlugin or a subclass of CMSPlugin. %r on %r is not."
% (new_plugin.model, new_plugin)
)
# validate the template:
if not hasattr(new_plugin, 'render_template'):
raise ImproperlyConfigured(
"CMSPluginBase subclasses must have a render_template attribute"
)
# Set the default form
if not new_plugin.form:
form_meta_attrs = {
'model': new_plugin.model,
'exclude': ('position', 'placeholder', 'language', 'plugin_type')
}
form_attrs = {
'Meta': type('Meta', (object,), form_meta_attrs)
}
new_plugin.form = type('%sForm' % name, (ModelForm,), form_attrs)
# Set the default fieldsets
if not new_plugin.fieldsets:
basic_fields = []
advanced_fields = []
for f in new_plugin.model._meta.fields:
if not f.auto_created and f.editable:
if hasattr(f, 'advanced'):
advanced_fields.append(f.name)
else: basic_fields.append(f.name)
if advanced_fields:
new_plugin.fieldsets = [
(
None,
{
'fields': basic_fields
}
),
(
_('Advanced options'),
{
'fields' : advanced_fields,
'classes' : ('collapse',)
}
)
]
# Set default name
if not new_plugin.name:
new_plugin.name = re.sub("([a-z])([A-Z])", "\g<1> \g<2>", name)
return new_plugin
@python_2_unicode_compatible
class CMSPluginBase(with_metaclass(CMSPluginBaseMetaclass, admin.ModelAdmin)):
name = ""
module = _("Generic") # To be overridden in child classes
form = None
change_form_template = "admin/cms/page/plugin/change_form.html"
frontend_edit_template = 'cms/toolbar/plugin.html'
# Should the plugin be rendered in the admin?
admin_preview = False
render_template = None
# Should the plugin be rendered at all, or doesn't it have any output?
render_plugin = True
model = CMSPlugin
text_enabled = False
page_only = False
allow_children = False
child_classes = None
require_parent = False
parent_classes = None
disable_child_plugin = False
cache = get_cms_setting('PLUGIN_CACHE')
opts = {}
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
def __init__(self, model=None, admin_site=None):
if admin_site:
super(CMSPluginBase, self).__init__(self.model, admin_site)
self.object_successfully_changed = False
# variables will be overwritten in edit_view, so we got required
self.cms_plugin_instance = None
self.placeholder = None
self.page = None
def render(self, context, instance, placeholder):
context['instance'] = instance
context['placeholder'] = placeholder
return context
@property
def parent(self):
return self.cms_plugin_instance.parent
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
We just need the popup interface here
"""
context.update({
'preview': not "no_preview" in request.GET,
'is_popup': True,
'plugin': self.cms_plugin_instance,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
})
return super(CMSPluginBase, self).render_change_form(request, context, add, change, form_url, obj)
def has_add_permission(self, request, *args, **kwargs):
"""Permission handling change - if user is allowed to change the page
he must be also allowed to add/change/delete plugins..
Not sure if there will be plugin permission requirement in future, but
if, then this must be changed.
"""
return self.cms_plugin_instance.has_change_permission(request)
has_delete_permission = has_change_permission = has_add_permission
def save_model(self, request, obj, form, change):
"""
Override original method, and add some attributes to obj
This have to be made, because if object is newly created, he must know
where he lives.
Attributes from cms_plugin_instance have to be assigned to object, if
is cms_plugin_instance attribute available.
"""
if getattr(self, "cms_plugin_instance"):
# assign stuff to object
fields = self.cms_plugin_instance._meta.fields
for field in fields:
# assign all the fields - we can do this, because object is
# subclassing cms_plugin_instance (one to one relation)
value = getattr(self.cms_plugin_instance, field.name)
setattr(obj, field.name, value)
# remember the saved object
self.saved_object = obj
return super(CMSPluginBase, self).save_model(request, obj, form, change)
def response_change(self, request, obj):
"""
Just set a flag, so we know something was changed, and can make
new version if reversion installed.
New version will be created in admin.views.edit_plugin
"""
self.object_successfully_changed = True
return super(CMSPluginBase, self).response_change(request, obj)
def response_add(self, request, obj, **kwargs):
"""
Just set a flag, so we know something was changed, and can make
new version if reversion installed.
New version will be created in admin.views.edit_plugin
"""
self.object_successfully_changed = True
if not DJANGO_1_4:
post_url_continue = reverse('admin:cms_page_edit_plugin',
args=(obj._get_pk_val(),),
current_app=self.admin_site.name)
kwargs.setdefault('post_url_continue', post_url_continue)
return super(CMSPluginBase, self).response_add(request, obj, **kwargs)
def log_addition(self, request, object):
pass
def log_change(self, request, object, message):
pass
def log_deletion(self, request, object, object_repr):
pass
def icon_src(self, instance):
"""
Overwrite this if text_enabled = True
Return the URL for an image to be used for an icon for this
plugin instance in a text editor.
"""
return ""
def icon_alt(self, instance):
"""
Overwrite this if necessary if text_enabled = True
Return the 'alt' text to be used for an icon representing
the plugin object in a text editor.
"""
return "%s - %s" % (force_unicode(self.name), force_unicode(instance))
def get_fieldsets(self, request, obj=None):
"""
Same as from base class except if there are no fields, show an info message.
"""
fieldsets = super(CMSPluginBase, self).get_fieldsets(request, obj)
for name, data in fieldsets:
if data.get('fields'): # if fieldset with non-empty fields is found, return fieldsets
return fieldsets
if self.inlines:
return [] # if plugin has inlines but no own fields return empty fieldsets to remove empty white fieldset
try: # if all fieldsets are empty (assuming there is only one fieldset then) add description
fieldsets[0][1]['description'] = _('There are no further settings for this plugin. Please press save.')
except KeyError:
pass
return fieldsets
def get_child_classes(self, slot, page):
template = None
if page:
template = page.template
## config overrides..
ph_conf = get_placeholder_conf('child_classes', slot, template, default={})
child_classes = ph_conf.get(self.__class__.__name__, None)
if child_classes:
return child_classes
if self.child_classes:
return self.child_classes
else:
from cms.plugin_pool import plugin_pool
installed_plugins = plugin_pool.get_all_plugins(slot, page)
return [cls.__name__ for cls in installed_plugins]
def get_parent_classes(self, slot, page):
template = None
if page:
template = page.template
## config overrides..
ph_conf = get_placeholder_conf('parent_classes', slot, template, default={})
parent_classes = ph_conf.get(self.__class__.__name__, None)
if parent_classes:
return parent_classes
elif self.parent_classes:
return self.parent_classes
else:
return None
def get_action_options(self):
return self.action_options
def requires_reload(self, action):
actions = self.get_action_options()
reload_required = False
if action in actions:
options = actions[action]
reload_required = options.get('requires_reload', False)
return reload_required
def get_plugin_urls(self):
"""
Return URL patterns for which the plugin wants to register
views for.
"""
return []
def plugin_urls(self):
return self.get_plugin_urls()
plugin_urls = property(plugin_urls)
def get_extra_placeholder_menu_items(self, request, placeholder):
pass
def get_extra_global_plugin_menu_items(self, request, plugin):
pass
def get_extra_local_plugin_menu_items(self, request, plugin):
pass
def __repr__(self):
return smart_str(self.name)
def __str__(self):
return self.name
#===========================================================================
# Deprecated APIs
#===========================================================================
@property
def pluginmedia(self):
raise Deprecated(
"CMSPluginBase.pluginmedia is deprecated in favor of django-sekizai"
)
def get_plugin_media(self, request, context, plugin):
raise Deprecated(
"CMSPluginBase.get_plugin_media is deprecated in favor of django-sekizai"
)
class PluginMenuItem(object):
def __init__(self, name, url, data, question=None):
self.name = name
self.url = url
self.data = json.dumps(data)
self.question = question
|
|
# -*- coding: utf-8 -*-
"""The parser and parser plugin presets."""
import yaml
from plaso.containers import artifacts
from plaso.lib import errors
class ParserPreset(object):
"""Parser and parser plugin preset.
Attributes:
name (str): name of the preset.
operating_systems (list[OperatingSystemArtifact]): operating system
artifact attribute containers, that specify to which operating
systems the preset applies.
parsers (list[str]): names of parser and parser plugins.
"""
def __init__(self, name, parsers):
"""Initializes a parser and parser plugin preset.
Attributes:
name (str): name of the preset.
parsers (list[str]): names of parser and parser plugins.
"""
super(ParserPreset, self).__init__()
self.name = name
self.operating_systems = []
self.parsers = parsers
class ParserPresetsManager(object):
"""The parsers and plugin presets manager."""
def __init__(self):
"""Initializes a parser and parser plugin presets manager."""
super(ParserPresetsManager, self).__init__()
self._definitions = {}
def _ReadOperatingSystemArtifactValues(self, operating_system_values):
"""Reads an operating system artifact from a dictionary.
Args:
operating_system_values (dict[str, object]): operating system values.
Returns:
OperatingSystemArtifact: an operating system artifact attribute container.
Raises:
MalformedPresetError: if the format of the operating system values are
not set or incorrect.
"""
if not operating_system_values:
raise errors.MalformedPresetError('Missing operating system values.')
family = operating_system_values.get('family', None)
product = operating_system_values.get('product', None)
version = operating_system_values.get('version', None)
if not family and not product:
raise errors.MalformedPresetError(
'Invalid operating system missing family and product.')
return artifacts.OperatingSystemArtifact(
family=family, product=product, version=version)
def _ReadParserPresetValues(self, preset_definition_values):
"""Reads a parser preset from a dictionary.
Args:
preset_definition_values (dict[str, object]): preset definition values.
Returns:
ParserPreset: a parser preset.
Raises:
MalformedPresetError: if the format of the preset definition is not set
or incorrect, or the preset of a specific operating system has already
been set.
"""
if not preset_definition_values:
raise errors.MalformedPresetError('Missing preset definition values.')
name = preset_definition_values.get('name', None)
if not name:
raise errors.MalformedPresetError(
'Invalid preset definition missing name.')
parsers = preset_definition_values.get('parsers', None)
if not parsers:
raise errors.MalformedPresetError(
'Invalid preset definition missing parsers.')
parser_preset = ParserPreset(name, parsers)
for operating_system_values in preset_definition_values.get(
'operating_systems', []):
operating_system = self._ReadOperatingSystemArtifactValues(
operating_system_values)
parser_preset.operating_systems.append(operating_system)
return parser_preset
def _ReadPresetsFromFileObject(self, file_object):
"""Reads parser and parser plugin presets from a file-like object.
Args:
file_object (file): file-like object containing the parser and parser
plugin presets definitions.
Yields:
ParserPreset: a parser preset.
Raises:
MalformedPresetError: if one or more plugin preset definitions are
malformed.
"""
yaml_generator = yaml.safe_load_all(file_object)
last_preset_definition = None
for yaml_definition in yaml_generator:
try:
preset_definition = self._ReadParserPresetValues(yaml_definition)
except errors.MalformedPresetError as exception:
error_location = 'At start'
if last_preset_definition:
error_location = 'After: {0:s}'.format(last_preset_definition.name)
raise errors.MalformedPresetError(
'{0:s} {1!s}'.format(error_location, exception))
yield preset_definition
last_preset_definition = preset_definition
def GetNames(self):
"""Retrieves the preset names.
Returns:
list[str]: preset names in alphabetical order.
"""
return sorted(self._definitions.keys())
def GetParsersByPreset(self, preset_name):
"""Retrieves the parser and plugin names of a specific preset.
Args:
preset_name (str): name of the preset.
Returns:
list[str]: parser and plugin names in alphabetical order.
Raises:
KeyError: if the preset does not exist.
"""
lookup_name = preset_name.lower()
preset_definition = self._definitions.get(lookup_name, None)
if preset_definition is None:
raise KeyError('Preset: {0:s} is not defined'.format(preset_name))
return sorted(preset_definition.parsers)
def GetPresetByName(self, name):
"""Retrieves a specific preset definition by name.
Args:
name (str): name of the preset.
Returns:
ParserPreset: a parser preset or None if not available.
"""
lookup_name = name.lower()
return self._definitions.get(lookup_name, None)
def GetPresetsByOperatingSystem(self, operating_system):
"""Retrieves preset definitions for a specific operating system.
Args:
operating_system (OperatingSystemArtifact): an operating system artifact
attribute container.
Returns:
list[PresetDefinition]: preset definition that correspond with the
operating system.
"""
preset_definitions = []
for preset_definition in self._definitions.values():
for preset_operating_system in preset_definition.operating_systems:
if preset_operating_system.IsEquivalent(operating_system):
preset_definitions.append(preset_definition)
return preset_definitions
def GetPresetsInformation(self):
"""Retrieves the presets information.
Returns:
list[tuple]: containing:
str: preset name.
str: comma separated parser and plugin names that are defined by
the preset.
"""
presets_information = []
for name, parser_preset in sorted(self._definitions.items()):
preset_information_tuple = (name, ', '.join(parser_preset.parsers))
# TODO: refactor to pass PresetDefinition.
presets_information.append(preset_information_tuple)
return presets_information
def ReadFromFile(self, path):
"""Reads parser and parser plugin presets from a file.
Args:
path (str): path of file that contains the the parser and parser plugin
presets configuration.
Raises:
MalformedPresetError: if one or more plugin preset definitions are
malformed.
"""
self._definitions = {}
with open(path, 'r') as file_object:
for preset_definition in self._ReadPresetsFromFileObject(file_object):
self._definitions[preset_definition.name] = preset_definition
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script tests the installer with test cases specified in the config file.
For each test case, it checks that the machine states after the execution of
each command match the expected machine states. For more details, take a look at
the design documentation at http://goo.gl/Q0rGM6
"""
import argparse
import datetime
import inspect
import json
import os
import subprocess
import sys
import time
import unittest
import _winreg
from variable_expander import VariableExpander
import verifier_runner
def LogMessage(message):
"""Logs a message to stderr.
Args:
message: The message string to be logged.
"""
now = datetime.datetime.now()
frameinfo = inspect.getframeinfo(inspect.currentframe().f_back)
filename = os.path.basename(frameinfo.filename)
line = frameinfo.lineno
sys.stderr.write('[%s:%s(%s)] %s\n' % (now.strftime('%m%d/%H%M%S'),
filename, line, message))
class Config:
"""Describes the machine states, actions, and test cases.
Attributes:
states: A dictionary where each key is a state name and the associated value
is a property dictionary describing that state.
actions: A dictionary where each key is an action name and the associated
value is the action's command.
tests: An array of test cases.
"""
def __init__(self):
self.states = {}
self.actions = {}
self.tests = []
class InstallerTest(unittest.TestCase):
"""Tests a test case in the config file."""
def __init__(self, name, test, config, variable_expander, quiet):
"""Constructor.
Args:
name: The name of this test.
test: An array of alternating state names and action names, starting and
ending with state names.
config: The Config object.
variable_expander: A VariableExpander object.
"""
super(InstallerTest, self).__init__()
self._name = name
self._test = test
self._config = config
self._variable_expander = variable_expander
self._quiet = quiet
self._verifier_runner = verifier_runner.VerifierRunner()
self._clean_on_teardown = True
def __str__(self):
"""Returns a string representing the test case.
Returns:
A string created by joining state names and action names together with
' -> ', for example, 'Test: clean -> install chrome -> chrome_installed'.
"""
return '%s: %s\n' % (self._name, ' -> '.join(self._test))
def id(self):
"""Returns the name of the test."""
# Overridden from unittest.TestCase so that id() contains the name of the
# test case from the config file in place of the name of this class's test
# function.
return unittest.TestCase.id(self).replace(self._testMethodName, self._name)
def runTest(self):
"""Run the test case."""
# |test| is an array of alternating state names and action names, starting
# and ending with state names. Therefore, its length must be odd.
self.assertEqual(1, len(self._test) % 2,
'The length of test array must be odd')
state = self._test[0]
self._VerifyState(state)
# Starting at index 1, we loop through pairs of (action, state).
for i in range(1, len(self._test), 2):
action = self._test[i]
if not self._quiet:
LogMessage('Beginning action %s' % action)
RunCommand(self._config.actions[action], self._variable_expander)
if not self._quiet:
LogMessage('Finished action %s' % action)
state = self._test[i + 1]
self._VerifyState(state)
# If the test makes it here, it means it was successful, because RunCommand
# and _VerifyState throw an exception on failure.
self._clean_on_teardown = False
def tearDown(self):
"""Cleans up the machine if the test case fails."""
if self._clean_on_teardown:
RunCleanCommand(True, self._variable_expander)
def shortDescription(self):
"""Overridden from unittest.TestCase.
We return None as the short description to suppress its printing.
The default implementation of this method returns the docstring of the
runTest method, which is not useful since it's the same for every test case.
The description from the __str__ method is informative enough.
"""
return None
def _VerifyState(self, state):
"""Verifies that the current machine state matches a given state.
Args:
state: A state name.
"""
if not self._quiet:
LogMessage('Verifying state %s' % state)
try:
self._verifier_runner.VerifyAll(self._config.states[state],
self._variable_expander)
except AssertionError as e:
# If an AssertionError occurs, we intercept it and add the state name
# to the error message so that we know where the test fails.
raise AssertionError("In state '%s', %s" % (state, e))
def RunCommand(command, variable_expander):
"""Runs the given command from the current file's directory.
This function throws an Exception if the command returns with non-zero exit
status.
Args:
command: A command to run. It is expanded using Expand.
variable_expander: A VariableExpander object.
"""
expanded_command = variable_expander.Expand(command)
script_dir = os.path.dirname(os.path.abspath(__file__))
exit_status = subprocess.call(expanded_command, shell=True, cwd=script_dir)
if exit_status != 0:
raise Exception('Command %s returned non-zero exit status %s' % (
expanded_command, exit_status))
def DeleteGoogleUpdateRegistration(system_level, variable_expander):
"""Deletes Chrome's registration with Google Update.
Args:
system_level: True if system-level Chrome is to be deleted.
variable_expander: A VariableExpander object.
"""
root = (_winreg.HKEY_LOCAL_MACHINE if system_level
else _winreg.HKEY_CURRENT_USER)
key_name = variable_expander.Expand('$CHROME_UPDATE_REGISTRY_SUBKEY')
try:
key_handle = _winreg.OpenKey(root, key_name, 0,
_winreg.KEY_SET_VALUE |
_winreg.KEY_WOW64_32KEY)
_winreg.DeleteValue(key_handle, 'pv')
except WindowsError:
# The key isn't present, so there is no value to delete.
pass
def RunCleanCommand(force_clean, variable_expander):
"""Puts the machine in the clean state (i.e. Chrome not installed).
Args:
force_clean: A boolean indicating whether to force cleaning existing
installations.
variable_expander: A VariableExpander object.
"""
# TODO(sukolsak): Handle Chrome SxS installs.
interactive_option = '--interactive' if not force_clean else ''
for system_level in (False, True):
level_option = '--system-level' if system_level else ''
command = ('python uninstall_chrome.py '
'--chrome-long-name="$CHROME_LONG_NAME" '
'--no-error-if-absent %s %s' %
(level_option, interactive_option))
RunCommand(command, variable_expander)
if force_clean:
DeleteGoogleUpdateRegistration(system_level, variable_expander)
def MergePropertyDictionaries(current_property, new_property):
"""Merges the new property dictionary into the current property dictionary.
This is different from general dictionary merging in that, in case there are
keys with the same name, we merge values together in the first level, and we
override earlier values in the second level. For more details, take a look at
http://goo.gl/uE0RoR
Args:
current_property: The property dictionary to be modified.
new_property: The new property dictionary.
"""
for key, value in new_property.iteritems():
if key not in current_property:
current_property[key] = value
else:
assert(isinstance(current_property[key], dict) and
isinstance(value, dict))
# This merges two dictionaries together. In case there are keys with
# the same name, the latter will override the former.
current_property[key] = dict(
current_property[key].items() + value.items())
def ParsePropertyFiles(directory, filenames):
"""Parses an array of .prop files.
Args:
property_filenames: An array of Property filenames.
directory: The directory where the Config file and all Property files
reside in.
Returns:
A property dictionary created by merging all property dictionaries specified
in the array.
"""
current_property = {}
for filename in filenames:
path = os.path.join(directory, filename)
new_property = json.load(open(path))
MergePropertyDictionaries(current_property, new_property)
return current_property
def ParseConfigFile(filename):
"""Parses a .config file.
Args:
config_filename: A Config filename.
Returns:
A Config object.
"""
with open(filename, 'r') as fp:
config_data = json.load(fp)
directory = os.path.dirname(os.path.abspath(filename))
config = Config()
config.tests = config_data['tests']
for state_name, state_property_filenames in config_data['states']:
config.states[state_name] = ParsePropertyFiles(directory,
state_property_filenames)
for action_name, action_command in config_data['actions']:
config.actions[action_name] = action_command
return config
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--build-dir', default='out',
help='Path to main build directory (the parent of the '
'Release or Debug directory)')
parser.add_argument('--target', default='Release',
help='Build target (Release or Debug)')
parser.add_argument('--force-clean', action='store_true', default=False,
help='Force cleaning existing installations')
parser.add_argument('-q', '--quiet', action='store_true', default=False,
help='Reduce test runner output')
parser.add_argument('--write-full-results-to', metavar='FILENAME',
help='Path to write the list of full results to.')
parser.add_argument('--config', metavar='FILENAME',
help='Path to test configuration file')
parser.add_argument('test', nargs='*',
help='Name(s) of tests to run.')
args = parser.parse_args()
if not args.config:
parser.error('missing mandatory --config FILENAME argument')
mini_installer_path = os.path.join(args.build_dir, args.target,
'mini_installer.exe')
assert os.path.exists(mini_installer_path), ('Could not find file %s' %
mini_installer_path)
suite = unittest.TestSuite()
# Set the env var used by mini_installer.exe to decide to not show UI.
os.environ['MINI_INSTALLER_TEST'] = '1'
config = ParseConfigFile(args.config)
variable_expander = VariableExpander(mini_installer_path)
RunCleanCommand(args.force_clean, variable_expander)
for test in config.tests:
# If tests were specified via |tests|, their names are formatted like so:
test_name = '%s/%s/%s' % (InstallerTest.__module__,
InstallerTest.__name__,
test['name'])
if not args.test or test_name in args.test:
suite.addTest(InstallerTest(test['name'], test['traversal'], config,
variable_expander, args.quiet))
verbosity = 2 if not args.quiet else 1
result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
if args.write_full_results_to:
with open(args.write_full_results_to, 'w') as fp:
json.dump(_FullResults(suite, result, {}), fp, indent=2)
fp.write('\n')
return 0 if result.wasSuccessful() else 1
# TODO(dpranke): Find a way for this to be shared with the mojo and other tests.
TEST_SEPARATOR = '.'
def _FullResults(suite, result, metadata):
"""Convert the unittest results to the Chromium JSON test result format.
This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
"""
full_results = {}
full_results['interrupted'] = False
full_results['path_delimiter'] = TEST_SEPARATOR
full_results['version'] = 3
full_results['seconds_since_epoch'] = time.time()
for md in metadata:
key, val = md.split('=', 1)
full_results[key] = val
all_test_names = _AllTestNames(suite)
failed_test_names = _FailedTestNames(result)
full_results['num_failures_by_type'] = {
'FAIL': len(failed_test_names),
'PASS': len(all_test_names) - len(failed_test_names),
}
full_results['tests'] = {}
for test_name in all_test_names:
value = {}
value['expected'] = 'PASS'
if test_name in failed_test_names:
value['actual'] = 'FAIL'
value['is_unexpected'] = True
else:
value['actual'] = 'PASS'
_AddPathToTrie(full_results['tests'], test_name, value)
return full_results
def _AllTestNames(suite):
test_names = []
# _tests is protected pylint: disable=W0212
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
test_names.extend(_AllTestNames(test))
else:
test_names.append(test.id())
return test_names
def _FailedTestNames(result):
return set(test.id() for test, _ in result.failures + result.errors)
def _AddPathToTrie(trie, path, value):
if TEST_SEPARATOR not in path:
trie[path] = value
return
directory, rest = path.split(TEST_SEPARATOR, 1)
if directory not in trie:
trie[directory] = {}
_AddPathToTrie(trie[directory], rest, value)
if __name__ == '__main__':
sys.exit(main())
|
|
#! /usr/bin/env python
import unittest
import operator
import cPickle
import copy
import sys
import numpy
import itertools
import mvn
sqrt = mvn.sqrt
Mvn = mvn.Mvn
Matrix = mvn.Matrix
import mvn.helpers as helpers
import mvn.test.fixture as fixture
fix = fixture.MvnFixture(fixture.lookup['last'])
class myTests(unittest.TestCase):
def setUp(self):
fix.reset()
class commuteTester(myTests):
def testRight(self):
self.assertTrue( fix.A+fix.B == fix.B+fix.A )
self.assertTrue( fix.A*fix.B == fix.B*fix.A )
self.assertTrue( fix.B-fix.A == (-fix.A)+fix.B )
self.assertTrue( fix.A/fix.B == (fix.B**-1)*fix.A )
self.assertTrue( fix.A & fix.B == fix.B & fix.A )
self.assertTrue( fix.A | fix.B == fix.B | fix.A )
self.assertTrue( fix.A ^ fix.B == fix.B ^ fix.A )
class creationTester(myTests):
def testFromData(self):
self.assertTrue( Mvn.fromData(fix.A) == fix.A )
data=[1, 2, 3]
one = Mvn.fromData(data)
self.assertTrue(one.ndim == 1)
data=[[1, 2, 3]]
many=Mvn.fromData(data)
self.assertTrue( many.mean == data )
self.assertTrue( Matrix(many.var) == numpy.zeros )
self.assertTrue( many.vectors == numpy.zeros )
self.assertTrue( many.cov == numpy.zeros )
def testFromCov(self):
self.assertTrue(
Mvn.fromCov(fix.A.cov, mean=fix.A.mean) ==
fix.A
)
self.assertTrue(
Mvn.fromCov(Matrix.zeros([0, 0])) == Mvn()
)
def testZeros(self):
n=abs(fix.N)
Z=Mvn.zeros(n)
self.assertTrue( Z.mean == Matrix.zeros )
self.assertTrue( Z.var.size == 0 )
self.assertTrue( Z.vectors.size == 0 )
self.assertTrue( Z**-1 == Mvn.infs)
def testInfs(self):
n=abs(fix.N)
inf=Mvn.infs(n)
self.assertTrue( inf.mean == Matrix.zeros )
self.assertTrue( inf.var.size == inf.mean.size == n )
self.assertTrue( Matrix(inf.var) == Matrix.infs )
self.assertTrue( inf.vectors == Matrix.eye )
self.assertTrue( inf**-1 == Mvn.zeros )
def testEye(self):
n=abs(fix.N)
eye=Mvn.eye(n)
self.assertTrue(eye.mean == Matrix.zeros)
self.assertTrue(eye.var.size == eye.mean.size == n)
self.assertTrue(Matrix(eye.var) == Matrix.ones)
self.assertTrue(eye.vectors == Matrix.eye)
self.assertTrue(eye**-1 == eye)
def testCopy(self):
A2=fix.A.copy(deep = True)
self.assertTrue( A2 == fix.A )
self.assertTrue( A2 is not fix.A )
self.assertTrue( A2.mean is not fix.A.mean )
fix.A.copy(fix.B,deep= True)
self.assertTrue( fix.B == fix.A )
self.assertTrue( fix.B is not fix.A )
self.assertTrue( fix.A.mean is not fix.B.mean )
A2=fix.A.copy(deep= False)
self.assertTrue( A2 == fix.A )
self.assertTrue( A2 is not fix.A )
self.assertTrue( A2.mean is fix.A.mean )
fix.A.copy(fix.B,deep= False)
self.assertTrue( fix.B == fix.A )
self.assertTrue( fix.B is not fix.A )
self.assertTrue( fix.A.mean is fix.B.mean )
class densityTester(myTests):
def testDensity(self):
"""
Another way to think of the & operation is as doing a
pointwize product of the probability (densities) and then
normalizing the total probability of 1.
But it works in both directions, blend and un-blend
is like multiply and divide.
"""
if not (fix.A.flat or fix.B.flat):
#remember you can undo a blend.
self.assertTrue((~fix.B) & fix.A & fix.B == fix.A)
#setup
AB = fix.A & fix.B
A_B = fix.A & ~fix.B
locations = AB.getX()
# A&B == k*A.*B
Da = fix.A.density(locations)
Db = fix.B.density(locations)
Dab = (AB).density(locations)
ratio = Dab/(Da*Db)
self.assertTrue(Matrix(ratio) == ratio[0])
# A&(~B) == k*A./B
Da_b = (A_B).density(locations)
ratio = Da_b/(Da/Db)
self.assertTrue(Matrix(0) == ratio.var())
#log
Ea = fix.A.entropy(locations)
Eb = fix.B.entropy(locations)
Eab = (AB).entropy(locations)
delta = Eab-(Ea+Eb)
self.assertTrue(Matrix(0) == delta.var())
Ea_b = (A_B).entropy(locations)
delta = Ea_b-(Ea-Eb)
self.assertTrue(Matrix(0) == delta.var())
def testDensity2(self):
data = fix.A.sample([5, 5])
self.assertTrue(
Matrix(fix.A.density(data)) ==
numpy.exp(-fix.A.entropy(data))
)
class equalityTester(myTests):
def testEq(self):
# always equal if same object
self.assertTrue( fix.A == fix.A )
# still equal if copy
self.assertTrue( fix.A == fix.A.copy() )
self.assertTrue( fix.A is not fix.A.copy() )
def testCosmetic(self):
self.assertTrue( fix.A == fix.A.square() )
self.assertTrue( fix.A == fix.A.copy() )
self.assertTrue( fix.A == fix.A.inflate() )
self.assertTrue( fix.A == fix.A.inflate().squeeze() )
def testInf(self):
self.assertTrue(
Mvn(mean= [1, 0, 0], vectors= [1, 0, 0], var= numpy.inf) ==
Mvn(mean= [0, 0, 0], vectors= [1, 0, 0], var= numpy.inf)
)
def testNot(self):
self.assertTrue( fix.A != fix.B )
class signTester(myTests):
def testPlus(self):
self.assertTrue( fix.A == +fix.A )
self.assertTrue( fix.A == ++fix.A )
self.assertTrue( fix.A is not +fix.A )
self.assertTrue( fix.A+fix.A == 2*fix.A )
self.assertTrue( fix.A+fix.A+fix.A == 3*fix.A )
def testMinus(self):
self.assertTrue( -fix.A == -1*fix.A )
self.assertTrue( fix.A == --fix.A )
self.assertTrue( fix.A--fix.B == fix.A+fix.B )
self.assertTrue( fix.A-fix.B == fix.A+(-fix.B) )
def testPos(self):
self.assertTrue( fix.A == +fix.A == ++fix.A )
def testNeg(self):
self.assertTrue( -fix.A == -1*fix.A )
self.assertTrue( fix.A == -1*-1*fix.A )
def testAdd(self):
self.assertTrue( (fix.A+fix.A) == (2*fix.A) )
self.assertTrue( (fix.A+fix.A).mean == (2*fix.A).mean )
self.assertTrue( (fix.A+fix.A).mean == 2*fix.A.mean )
n=abs(fix.N)
self.assertTrue(
sum(itertools.repeat(fix.A,n),Mvn.zeros(fix.A.ndim)) ==
fix.A*n
)
self.assertTrue(
sum(itertools.repeat(-fix.A,n),Mvn.zeros(fix.A.ndim)) ==
fix.A*(-n)
)
self.assertTrue( fix.A+fix.B == Mvn(
mean=fix.A.mean+fix.B.mean,
vectors=numpy.vstack([fix.A.vectors,fix.B.vectors]),
var = numpy.concatenate([fix.A.var,fix.B.var]),
))
self.assertTrue( (fix.A+fix.A).mean == (2*fix.A).mean )
self.assertTrue( (fix.A+fix.A).mean == 2*fix.A.mean )
self.assertTrue( (fix.A+fix.B).mean == fix.A.mean+fix.B.mean )
self.assertTrue( (fix.A+fix.B).cov == fix.A.cov+fix.B.cov )
def testSub(self):
self.assertTrue( fix.B+(-fix.A) == fix.B+(-1)*fix.A == fix.B-fix.A )
self.assertTrue( (fix.B-fix.A)+fix.A == fix.B )
self.assertTrue(
fix.A-fix.A ==
Mvn(mean= numpy.zeros_like(fix.A.mean))
)
self.assertTrue( (fix.A-fix.B)+fix.B == fix.A )
self.assertTrue( (fix.A-fix.B).mean == fix.A.mean - fix.B.mean )
self.assertTrue( (fix.A-fix.B).cov== fix.A.cov - fix.B.cov )
self.assertTrue(
(fix.A+fix.B*(-fix.E)).mean ==
fix.A.mean - fix.B.mean
)
self.assertTrue( (fix.A+fix.B*(-fix.E)).cov == fix.A.cov + fix.B.cov )
self.assertTrue( fix.A-fix.B == -(fix.B-fix.A) )
self.assertTrue( fix.A+(fix.B-fix.B) == fix.A )
class productTester(myTests):
def testMulTypes(self):
self.assertTrue( isinstance(fix.A*fix.B,Mvn) )
self.assertTrue( isinstance(fix.A*fix.M,Mvn) )
self.assertTrue( isinstance(fix.M.T*fix.A,Matrix) )
self.assertTrue( isinstance(fix.A*fix.K1,Mvn) )
self.assertTrue( isinstance(fix.K1*fix.A,Mvn) )
def testMul(self):
self.assertTrue( fix.A**2 == fix.A*fix.A )
def testMvnMul(self):
self.assertTrue(
(fix.A*fix.B).cov ==
(fix.A*fix.B.transform()+fix.B*fix.A.transform()).cov/2
)
if not (fix.A.flat or fix.B.flat):
self.assertTrue(
(fix.A*fix.B).mean ==
(fix.A*fix.B.transform()+fix.B*fix.A.transform()).mean/2
)
self.assertTrue( fix.M.T*fix.B == fix.M.T*fix.B.transform() )
if not fix.A.flat:
self.assertTrue( fix.A**2 == fix.A*fix.A.transform() )
self.assertTrue( fix.A*(fix.B**0+fix.B**0) == fix.A*(2*fix.B**0) )
self.assertTrue(
(fix.A*fix.B**0 + fix.A*fix.B**0).cov ==
(2*fix.A*fix.B**0).cov
)
self.assertTrue( fix.A*fix.A == fix.A**2 )
self.assertTrue( fix.A*fix.B == fix.B*fix.A )
if not fix.A.flat:
self.assertTrue( fix.A*fix.A == fix.A*fix.A.transform() )
def testScalarMul(self):
self.assertTrue( fix.A+fix.A == 2*fix.A )
self.assertTrue( (2*fix.A).mean == 2*fix.A.mean )
self.assertTrue( (2*fix.A.cov) == 2*fix.A.cov )
self.assertTrue( fix.A*(fix.K1+fix.K2) == fix.A*fix.K1+fix.A*fix.K2 )
self.assertTrue( (fix.A*(fix.K1*fix.E)).mean == fix.K1*fix.A.mean )
self.assertTrue(
(fix.A*(fix.K1*fix.E)).cov == fix.A.cov*abs(fix.K1)**2 )
self.assertTrue( (fix.K1*fix.A)*fix.K2 == fix.K1*(fix.A*fix.K2) )
self.assertTrue(
(2*fix.B**0).transform() == sqrt(2)*(fix.B**0).transform()
)
self.assertTrue( fix.A*fix.K1 == fix.K1*fix.A )
self.assertTrue( (fix.A*fix.K1).mean == fix.K1*fix.A.mean )
self.assertTrue( (fix.A*fix.K1).cov == (fix.A.cov)*fix.K1 )
self.assertTrue( (fix.A*(fix.E*fix.K1)).mean == fix.A.mean*fix.K1 )
self.assertTrue(
(fix.A*(fix.E*fix.K1)).cov ==
(fix.E*fix.K1).H*fix.A.cov*(fix.E*fix.K1)
)
def testMixedMul(self):
self.assertTrue( fix.K1*fix.A*fix.M == fix.A*fix.K1*fix.M )
self.assertTrue( fix.K1*fix.A*fix.M == fix.A*fix.M*fix.K1 )
def testMatrixMul(self):
self.assertTrue( (fix.A*fix.M)*fix.M2.H == fix.A*(fix.M*fix.M2.H) )
self.assertTrue( (fix.M*fix.M2.H)*fix.A == fix.M*(fix.M2.H*fix.A) )
self.assertTrue( (fix.A*fix.M).cov == fix.M.H*fix.A.cov*fix.M )
self.assertTrue( (fix.A**2).transform() == fix.A.cov )
self.assertTrue( fix.A*fix.E == fix.A )
self.assertTrue( (-fix.A)*fix.E == -fix.A )
self.assertTrue( (fix.A*fix.M).cov == fix.M.H*fix.A.cov*fix.M )
self.assertTrue( (fix.A*fix.M).mean == fix.A.mean*fix.M )
def testDiv(self):
self.assertTrue( fix.A/fix.B == fix.A*fix.B**(-1) )
m=fix.M*fix.M2.T
self.assertTrue( fix.A/m == fix.A*(m**(-1)) )
self.assertTrue( fix.A/fix.K1 == fix.A*(fix.K1**(-1)) )
self.assertTrue( fix.K1/fix.A == fix.K1*(fix.A**(-1)) )
self.assertTrue( fix.M.H/fix.A == fix.M.H*(fix.A**(-1)) )
class propertyTester(myTests):
def testNdim(self):
self.assertTrue( fix.A.ndim == fix.A.mean.size )
def testShape(self):
self.assertTrue( fix.A.vectors.shape == fix.A.shape )
self.assertTrue( (fix.A.var.size,fix.A.mean.size) == fix.A.shape )
self.assertTrue( fix.A.shape[1] == fix.A.ndim )
def testCov(self):
self.assertTrue(
fix.A.vectors.H*numpy.diagflat(fix.A.var)*fix.A.vectors ==
fix.A.cov
)
self.assertTrue( fix.A.transform()**2 == abs(fix.A).cov )
self.assertTrue( fix.A.transform(2) == abs(fix.A).cov )
if not(fix.A.flat and fix.N<0):
self.assertTrue(
fix.A.transform()**fix.N ==
fix.A.transform(fix.N)
)
def testCov2(self):
self.assertTrue( fix.A.cov == (fix.A**2).transform() )
self.assertTrue( fix.A.cov == fix.A.transform()*fix.A.transform() )
self.assertTrue( fix.A.cov == fix.A.transform()**2 )
self.assertTrue( fix.A.cov == fix.A.transform(2) )
self.assertTrue(
(
fix.A*fix.B.transform() +
fix.B*fix.A.transform()
).cov/2 ==
(fix.A*fix.B).cov
)
def testScaled(self):
self.assertTrue( fix.A.scaled.H*fix.A.scaled == abs(fix.A).cov )
self.assertTrue( Matrix(helpers.mag2(fix.A.scaled)) == fix.A.var )
self.assertTrue( fix.A.vectors.H*fix.A.scaled == fix.A.transform() )
def testVectors(self):
if fix.A.shape[0] == fix.A.shape[1]:
self.assertTrue( fix.A.vectors.H*fix.A.vectors == Matrix.eye )
else:
a = fix.A.inflate()
self.assertTrue( a.vectors.H*a.vectors == Matrix.eye )
self.assertTrue(fix.A.vectors*fix.A.vectors.H == Matrix.eye)
self.assertTrue((fix.A*fix.M).cov == fix.M.H*fix.A.cov*fix.M)
self.assertTrue(
(fix.A*fix.M).vectors*(fix.A*fix.M).vectors.H ==
Matrix.eye
)
def testTransform(self):
self.assertTrue( fix.A.transform() == fix.A.transform(1) )
self.assertTrue( fix.A.transform() == fix.A.scaled.H*fix.A.vectors )
class mergeTester(myTests):
def testStack(self):
fix.AB= Mvn.stack(fix.A,fix.B)
self.assertTrue( fix.AB[:,:fix.A.ndim] == fix.A )
self.assertTrue( fix.AB[:,fix.A.ndim:] == fix.B )
self.assertTrue( Mvn.stack(Mvn.infs(2),Mvn.infs(5)) == Mvn.infs(7) )
self.assertTrue( Mvn.stack(Mvn.zeros(2),Mvn.zeros(5)) == Mvn.zeros(7) )
class powerTester(myTests):
def testIntPowers(self):
N = abs(fix.N)
self.assertTrue( fix.A.transform(N) == (fix.A**N).transform() )
self.assertTrue( fix.A.transform(N) == fix.A.transform()**N )
N = -abs(fix.N)
if not fix.A.flat:
self.assertTrue( fix.A.transform(N) == (fix.A**N).transform() )
self.assertTrue( fix.A.transform(N) == fix.A.transform()**N )
def testMorePowers(self):
self.assertTrue(
(fix.A**fix.K1).transform()**2 ==
fix.A.transform(fix.K1)**2
)
self.assertTrue( fix.A**fix.K1*fix.A**fix.K2 == fix.A**(fix.K1+fix.K2))
self.assertTrue( fix.A**fix.K1/fix.A**fix.K2 == fix.A**(fix.K1-fix.K2))
self.assertTrue( fix.A*fix.A**fix.K2 == fix.A**(1+fix.K2))
self.assertTrue( fix.A/fix.A**fix.K2 == fix.A**(1-fix.K2))
self.assertTrue( fix.A**fix.K1*fix.A == fix.A**(fix.K1+1))
self.assertTrue( fix.A**fix.K1/fix.A == fix.A**(fix.K1-1))
def testZeroPow(self):
self.assertTrue( fix.A**0*fix.A == fix.A )
self.assertTrue( fix.A*fix.A**0 == fix.A )
self.assertTrue( Matrix((fix.A**0).var) == numpy.ones )
def testZeroFlat(self):
if not fix.A.flat:
self.assertTrue( fix.A**0 == fix.A**(-1)*fix.A )
self.assertTrue( fix.A**0 == fix.A*fix.A**(-1) )
self.assertTrue( fix.A**0 == fix.A/fix.A )
self.assertTrue(
(fix.A**0).mean ==
fix.A.mean*(fix.A**-1).transform()
)
self.assertTrue(
(fix.A**0).mean ==
fix.A.mean*fix.A.transform(-1)
)
def testOnePow(self):
self.assertTrue( fix.A == fix.A**1 )
self.assertTrue( -fix.A == (-fix.A)**1 )
if not fix.A.flat:
self.assertTrue( fix.A == (fix.A**-1)**-1 )
def testRealPow(self):
self.assertTrue( fix.A*fix.A == fix.A**2 )
self.assertTrue( fix.A/fix.A**-1 == fix.A**2 )
self.assertTrue(
fix.A.mean*fix.A.transform(0) ==
((fix.A**-1)**-1).mean
)
k1 = fix.K1
k2 = fix.K2
self.assertTrue( (fix.A**k1).transform() == fix.A.transform(k1) )
self.assertTrue( (fix.A**k1)*(fix.A**k2) == fix.A**(k1+k2) )
self.assertTrue( fix.A**k1/fix.A**k2 == fix.A**(k1-k2) )
if not fix.A.flat:
self.assertTrue(
fix.A**k1 == (
fix.A*fix.A.transform(k1-1) +
Mvn(mean=fix.A.mean-fix.A.mean*fix.A.transform(0))
))
class widthTester(myTests):
def testWidth(self):
self.assertTrue(
Matrix([fix.A[:,n].var[0] for n in range(fix.A.ndim)]) ==
fix.A.width()**2
)
self.assertTrue(
Matrix(fix.A.corr.diagonal()) ==
Matrix.ones
)
norm = fix.A/fix.A.width()
self.assertTrue(norm.corr == norm.cov)
self.assertTrue(
Matrix([norm[:,n].var[0] for n in range(norm.ndim)]) ==
Matrix.ones
)
self.assertTrue(
Matrix((fix.A**0).var) ==
Matrix.ones
)
data = fix.A.sample(100)
a = Mvn.fromData(data)
self.assertTrue(Matrix(numpy.std (data,0)) == a.width() )
self.assertTrue(Matrix(numpy.var (data,0)) == a.width()**2)
self.assertTrue(Matrix(numpy.mean(data,0)) == a.mean )
class linalgTester(myTests):
def testTrace(self):
self.assertTrue(
Matrix(numpy.trace(fix.A.transform(0))) ==
fix.A.shape[0]
)
self.assertTrue( Matrix(fix.A.trace()) == fix.A.var.sum() )
self.assertTrue( Matrix(fix.A.trace()) == numpy.trace(fix.A.cov) )
def testDet(self):
self.assertTrue( Matrix(fix.A.det()) == numpy.linalg.det(fix.A.cov) )
self.assertTrue( Matrix(fix.A.det()) ==
0 if
fix.A.shape[0] != fix.A.shape[1] else
numpy.prod(fix.A.var)
)
def testDist2(self):
if not fix.A.flat:
self.assertTrue(
Matrix((fix.A**0).dist2(numpy.zeros((1,fix.ndim)))) ==
helpers.mag2((fix.A**0).mean)
)
def testSquare(self):
vectors = Matrix(helpers.ascomplex(numpy.random.randn(
numpy.random.randint(1,10),numpy.random.randint(1,10),2
)))
cov = vectors.H*vectors
Xcov = vectors*vectors.H
(Xval,Xvec) = numpy.linalg.eigh(Xcov)
vec = Xvec.H*vectors
self.assertTrue( vec.H*vec == cov )
class givenTester(myTests):
def testGivenScalar(self):
a = fix.A.given(dims= 0,value= 1)
self.assertTrue( a.mean[:, 0] == 1 )
self.assertTrue( a.vectors[:, 0] == numpy.zeros )
a=fix.A.copy(deep= True)
a[:, 0] = 1
self.assertTrue( a == fix.A.given(dims= 0, value= 1) )
def testGivenLinear(self):
L1 = Mvn(mean= [0, 0], vectors=[[1, 1],[1, -1]], var=[numpy.inf, 0.5])
L2=Mvn(mean=[1, 0], vectors=[0, 1], var=numpy.inf)
self.assertTrue( L1.given(dims=0, value=1) == L1&L2 )
self.assertTrue( (L1&L2).mean == [1, 1] )
self.assertTrue( (L1&L2).cov == [[0, 0], [0, 2]] )
def testGivenMvn(self):
Y=Mvn(mean=[0, 1], vectors=Matrix.eye, var=[numpy.inf, 1])
X=Mvn(mean=[1, 0], vectors=Matrix.eye, var=[1, numpy.inf])
x=Mvn(mean=1, var=1)
self.assertTrue( Y.given(dims=0, value=x) == X&Y )
def testGivenVector(self):
self.assertTrue(
givenVector(fix.A, dims=0, value=1) ==
fix.A.given(dims=0, value=1)
)
class chainTester(myTests):
def testBasic(self):
self.assertTrue( fix.A.chain() == fix.A*numpy.hstack([fix.E, fix.E]) )
self.assertTrue(
fix.A.chain(transform=fix.M) ==
fix.A*numpy.hstack([fix.E, fix.M])
)
def testMoore(self):
self.assertTrue( fix.A.chain(fix.B) == mooreChain(fix.A, fix.B) )
b=fix.B*fix.M
self.assertTrue( fix.A.chain(b, fix.M) == mooreChain(fix.A, b, fix.M) )
def testStacks(self):
dataA=fix.A.sample(100)
a=Mvn.fromData(dataA)
#a and a are correlated
self.assertTrue(
a.chain()==
Mvn.fromData(numpy.hstack([dataA, dataA]))
)
#a and a*M are corelated
self.assertTrue(
a.chain(transform=fix.M) ==
dataA*numpy.hstack([fix.E, fix.M])
)
self.assertTrue(
a.chain(transform= fix.M) ==
Mvn.fromData(numpy.hstack([dataA, dataA*fix.M]))
)
self.assertTrue(
a.chain(fix.B*fix.M,fix.M) ==
a.chain(transform= fix.M)+Mvn.stack(Mvn.zeros(a.ndim), fix.B*fix.M)
)
def testAnd(self):
"""
__and__ is a shortcut across mvn.chain and mvn.given
this is to show the relationship
I haven't figured yet out how a the 'transform' parameter to chain works
with __and__, it probably involves the psudo-inverse of the transform.
I think the answer is on the wikipedia kalman-filtering page
"""
measurment = fix.B.mean
sensor = fix.B.copy()
sensor.mean = Matrix.zeros(sensor.mean.shape)
joint = fix.A.chain(sensor)
measured = joint.copy()
measured[:, fix.ndim:] = measurment
self.assertTrue(measured[:, :fix.ndim] == fix.A&fix.B)
class inversionTester(myTests):
def testAbs(self):
self.assertTrue( (fix.A.var >= 0).all() )
self.assertTrue( abs(fix.A) == abs(~fix.A) )
def testNeg(self):
IA = fix.A.copy()
IA.var = -IA.var
self.assertTrue( IA == ~fix.A )
self.assertTrue( Matrix((~fix.A).var) == (-fix.A).var )
self.assertTrue( Matrix((~fix.A).var) == -(fix.A.var) )
def testInvariant(self):
self.assertTrue( (~fix.A).mean == fix.A.mean )
self.assertTrue( (~fix.A).vectors == fix.A.vectors )
self.assertTrue( (~fix.A).cov == (-fix.A).cov )
self.assertTrue( (~fix.A).cov == -(fix.A.cov) )
def testDoubleNegative(self):
self.assertTrue( ~~fix.A == fix.A )
self.assertTrue( ~(~fix.A&~fix.B) == fix.A & fix.B )
self.assertTrue( (~fix.A & ~fix.B) == ~(fix.A & fix.B) )
def testParadoxes(self):
self.assertTrue(
(fix.A & ~fix.A) ==
Mvn(mean= fix.A.mean, vectors= fix.A.vectors, var= Matrix.infs)
)
self.assertTrue( (fix.A & ~fix.A)*fix.A.vectors.H == Mvn.infs )
self.assertTrue(
fix.A & (fix.B & ~fix.B) ==
fix.A & Mvn(
mean= fix.B.mean,
vectors= fix.B.vectors,
var= Matrix.infs
)
)
if not fix.B.flat:
self.assertTrue( fix.A == fix.A & (fix.B & ~fix.B) )
self.assertTrue( (fix.A&~fix.B) & fix.B == (fix.A&fix.B) & ~fix.B )
self.assertTrue( (fix.A&fix.B) & ~fix.B == fix.A & (fix.B&~fix.B) )
self.assertTrue( not numpy.isfinite((fix.A & ~fix.A).var).any() )
P = fix.A.copy()
P.var = P.var/0.0
self.assertTrue( P == (fix.A & ~fix.A) )
def testPow(self):
self.assertTrue(
( fix.A)**(-1) + (~fix.A)**(-1) ==
Mvn.zeros
)
self.assertTrue(
(( fix.A)**(-1) + (~fix.A)**(-1))**-1 ==
Mvn.zeros(fix.A.ndim)**-1
)
class blendTester(myTests):
def testCommutativity(self):
self.assertTrue( fix.A & fix.B == fix.B & fix.A)
def testSelf(self):
self.assertTrue( (fix.A & fix.A).cov == fix.A.cov/2)
self.assertTrue( (fix.A & fix.A).mean == fix.A.mean)
def testNotFlat(self):
if not (fix.A.flat or fix.B.flat):
self.assertTrue( fix.A & fix.B == 1/(1/fix.A+1/fix.B))
self.assertTrue(
fix.A & -fix.A ==
Mvn(mean= numpy.zeros(fix.ndim))**-1
)
self.assertTrue(
fix.A & ~fix.A ==
Mvn(mean= numpy.zeros(fix.ndim))**-1
)
self.assertTrue( fix.A & fix.B == wiki(fix.A,fix.B))
self.assertTrue( fix.A**-1 == fix.A*fix.A**-2)
self.assertTrue(
fix.A & fix.B ==
(fix.A*fix.A**-2+fix.B*fix.B**-2)**-1
)
D = fix.A*(fix.A.cov)**(-1) + fix.B*(fix.B.cov)**(-1)
self.assertTrue( wiki(fix.A,fix.B) == D*(D.cov)**(-1))
self.assertTrue( fix.A & fix.B == wiki(fix.A,fix.B))
if not (fix.A.flat or fix.B.flat or fix.C.flat):
abc=numpy.random.permutation([fix.A, fix.B, fix.C])
self.assertTrue( fix.A & fix.B & fix.C == helpers.parallel(*abc))
self.assertTrue(
fix.A & fix.B & fix.C ==
reduce(operator.and_, abc)
)
self.assertTrue(
(fix.A & fix.B) & fix.C ==
fix.A & (fix.B & fix.C)
)
def testKnownValues1(self):
L1=Mvn(mean= [1, 0], vectors= [0, 1], var= numpy.inf)
L2=Mvn(mean= [0, 1], vectors= [1, 0], var= numpy.inf)
self.assertTrue( (L1&L2).mean == [1, 1])
self.assertTrue( (L1&L2).var.size == 0)
def testKnownValues2(self):
L1=Mvn(mean= [0, 0], vectors= [1, 1], var= numpy.inf)
L2=Mvn(mean= [0, 1], vectors= [1, 0], var= numpy.inf)
self.assertTrue( (L1&L2).mean == [1, 1])
self.assertTrue( (L1&L2).var.size == 0)
def testKnownValues3(self):
L1=Mvn(mean= [0, 0], vectors= Matrix.eye, var=[1, 1])
L2=Mvn(mean= [0, 1], vectors= [1, 0], var= numpy.inf)
self.assertTrue( (L1&L2).mean == [0, 1] )
self.assertTrue( (L1&L2).var == 1 )
self.assertTrue( (L1&L2).vectors == [1, 0] )
class quadTester(myTests):
def testDerivation(self):
Na = 25
#get some data from A
Da = Matrix(fix.A.sample(Na))
#and remake the multivariates
A = Mvn.fromData(Da)
# take all the dot products
dots = (numpy.array(Da)**2).sum(1)
self.assertTrue( Matrix(dots) == numpy.diag(Da*Da.H) )
Mean = Matrix(dots.mean())
Var = Matrix(dots.var())
self.assertTrue( Mean == numpy.trace(Da*Da.H)/Na )
self.assertTrue( Mean == numpy.trace(Da.H*Da/Na) )
self.assertTrue( Mean == (Da*Da.H).diagonal().mean() )
self.assertTrue( A.cov+A.mean.H*A.mean == (Da.H*Da)/Na )
self.assertTrue( Mean == numpy.trace(A.mean.H*A.mean + A.cov) )
self.assertTrue(
Mean ==
numpy.trace(A.mean.H*A.mean)+numpy.trace(A.cov)
)
self.assertTrue( Mean == A.mean*A.mean.H + A.trace() )
#definition of variance
self.assertTrue( Var == (numpy.array(Mean -dots)**2).mean() )
#expand it
self.assertTrue(
Var == (
Mean**2
-2*numpy.multiply(Mean,dots) + dots**2
).mean()
)
#distribute the calls to mean()
self.assertTrue(
Var == Mean**2 - 2*Mean*dots.mean() + (dots**2).mean()
)
#but Mean == dot.mean(), so
self.assertTrue( Var == (dots**2).mean() - Mean**2 )
self.assertTrue( Var == (dots**2).sum()/Na - Mean**2 )
self.assertTrue( Var == ((Da*Da.H).diagonal()**2).sum()/Na - Mean**2 )
self.assertTrue(
Var ==
Matrix((Da*Da.H).diagonal())*
Matrix((Da*Da.H).diagonal()).H/Na-
Mean**2
)
self.assertTrue(
Mean ==
(Matrix((Da*Da.H).diagonal())*
Matrix.ones((Na,1))/Na)
)
self.assertTrue(
Mean**2 ==
(Matrix((Da*Da.H).diagonal())*
Matrix.ones((Na,1))/Na)**2
)
self.assertTrue(
Mean**2 ==
Matrix((Da*Da.H).diagonal() *
Matrix.ones((Na,1))/Na) *
Matrix((Da*Da.H).diagonal() *
Matrix.ones((Na,1))/Na)
)
self.assertTrue(
Mean**2 ==
Matrix((Da*Da.H).diagonal()) *
Matrix.ones((Na, 1))*Matrix.ones((1, Na))/Na**2 *
Matrix((Da*Da.H).diagonal()).H
)
self.assertTrue(
Var ==
Matrix((Da*Da.H).diagonal())*
Matrix((Da*Da.H).diagonal()).H/Na
-
Matrix((Da*Da.H).diagonal())*
Matrix.ones((Na, 1))*Matrix.ones((1, Na))/Na**2*
Matrix((Da*Da.H).diagonal()).H
)
self.assertTrue(
Var ==
Matrix((Da*Da.H).diagonal())*
Matrix((Da*Da.H).diagonal()).H/Na
-
(Matrix((Da*Da.H).diagonal())*
Matrix((Da*Da.H).diagonal()).H.sum()).sum()/Na/Na
)
self.assertTrue(
Var ==
Matrix((Da*Da.H).diagonal())/Na *
Matrix((Da*Da.H).diagonal()).H
-
Matrix((Da*Da.H).diagonal())/Na *
(numpy.trace(Da*Da.H) *
Matrix.ones((Na,1)))/Na
)
self.assertTrue(
Var ==
Matrix((Da*Da.H).diagonal())/Na *
(
Matrix((Da*Da.H).diagonal()).H
-
(numpy.trace(Da*Da.H) *
Matrix.ones((Na,1)))/Na
)
)
self.assertTrue(
Var ==
Matrix((Da*Da.H).diagonal())/Na *
(
Matrix((Da*Da.H).diagonal()).H
-Mean
)
)
#there's a connection in between here that I don't understand
#wiki: this is the Reference value
wVar = 2*numpy.trace(A.cov*A.cov)+4*A.mean*A.cov*A.mean.H
self.assertTrue(
wVar ==
2*numpy.trace(
A.cov*
A.vectors.H*numpy.diagflat(A.var)*A.vectors
)
+
4*numpy.trace(
A.mean.H*A.mean*
A.vectors.H*numpy.diagflat(A.var)*A.vectors
)
)
self.assertTrue(
wVar ==
2*numpy.trace(
A.cov+
A.vectors.H*numpy.diagflat(A.var)*A.vectors
)
+
numpy.trace(
4*A.mean *
A.vectors.H*numpy.diagflat(A.var)*A.vectors *
A.mean.H
)
)
self.assertTrue(
wVar ==
2*numpy.trace(
A.cov
*A.vectors.H*numpy.diagflat(A.var)*A.vectors
) +
numpy.trace(
4*A.mean *
A.vectors.H*numpy.diagflat(A.var)*A.vectors *
A.mean.H
)
)
self.assertTrue(
wVar ==
2*numpy.trace(A.cov*A.cov)
+
4*A.mean*A.cov*A.mean.H
)
self.assertTrue(
wVar ==
2*(A*A).trace()
+
4*(A*A.mean.H).trace()
)
self.assertTrue(
A.quad() ==
Mvn(
mean= A.mean*A.mean.H + A.trace(),
var= 2*(A*A).trace()+4*(A*A.mean.H).trace()
)
)
class innerTester(myTests):
def testDerivation(self):
A = fix.A
B = fix.B
Na = 20
Nb = 10
N = Na*Nb
#get some data from A and B
Da = Matrix(A.sample(Na))
Db = Matrix(B.sample(Nb))
#and remake the multivariates based on the samples you just took
A = Mvn.fromData(Da)
B = Mvn.fromData(Db)
# take every possible combination of dot products
dot = numpy.array(Da*Db.H)
#the population mean
Mean = Matrix(dot.mean())
#the population variance
Var = Matrix(dot.var())
#should equal the distribution mean
self.assertTrue( Mean == A.mean*B.mean.H )
#definition of variance
self.assertTrue( Var == (numpy.array(Mean -dot)**2).mean() )
#expand it
self.assertTrue(
Var == (
Mean**2
-
2*numpy.multiply(Mean, dot) + dot**2
).mean()
)
#diftribute the calls to mean()
self.assertTrue(
Var ==
Mean**2
-2*Mean*dot.mean()
+(dot**2).mean()
)
#but Mean == dot.mean(), so
self.assertTrue(
Var ==
(dot**2).mean() - Mean**2
)
dot = Matrix(dot)
self.assertTrue( Var == numpy.trace(dot*dot.H)/N - Mean**2 )
#factor everything
self.assertTrue(
Var ==
numpy.trace(Da*Db.H*Db*Da.H)/Na/Nb
-
(A.mean*B.mean.H)**2
)
#rotate the trace
self.assertTrue(
Var ==
numpy.trace(Da.H*Da*Db.H*Db)/Na/Nb
-
(A.mean*B.mean.H)**2
)
#group the data's
self.assertTrue(
Var ==
numpy.trace((Da.H*Da)*(Db.H*Db))/Na/Nb
-
(A.mean*B.mean.H)**2
)
#distribute the N's
self.assertTrue(
Var ==
numpy.trace((Da.H*Da)/Na*(Db.H*Db)/Nb)
-
(A.mean*B.mean.H)**2
)
#from the definition of mean and cov
self.assertTrue( A.cov+A.mean.H*A.mean == (Da.H*Da)/Na )
self.assertTrue( B.cov+B.mean.H*B.mean == (Db.H*Db)/Nb )
#replace
self.assertTrue(
Var ==
numpy.trace(
(A.cov+A.mean.H*A.mean)*
(B.cov+B.mean.H*B.mean)
)
-
(A.mean*B.mean.H)**2
)
#multiply it out
self.assertTrue( Var ==
numpy.trace(
A.cov*B.cov +
A.mean.H*A.mean*B.cov +
A.cov*B.mean.H*B.mean +
A.mean.H*A.mean*B.mean.H*B.mean
) - (
A.mean*B.mean.H)**2
)
#distribute the calls to trace
self.assertTrue( Var ==
numpy.trace(A.cov*B.cov) +
numpy.trace(A.mean.H*A.mean*B.cov) +
numpy.trace(A.cov*B.mean.H*B.mean) +
numpy.trace(A.mean.H*A.mean*B.mean.H*B.mean) -
(A.mean*B.mean.H)**2
)
#rotate traces
self.assertTrue( Var ==
numpy.trace(A.cov*B.cov) +
numpy.trace(A.mean*B.cov*A.mean.H) +
numpy.trace(B.mean*A.cov*B.mean.H) +
numpy.trace(A.mean*B.mean.H*B.mean*A.mean.H) -
(A.mean*B.mean.H)**2
)
#remove traces for scalars
self.assertTrue( Var ==
numpy.trace(A.cov*B.cov) +
A.mean*B.cov*A.mean.H +
B.mean*A.cov*B.mean.H +
(A.mean*B.mean.H)*(B.mean*A.mean.H) -
(A.mean*B.mean.H)**2
)
#cancel means
self.assertTrue( Var ==
numpy.trace(A.cov*B.cov) +
A.mean*B.cov*A.mean.H +
B.mean*A.cov*B.mean.H
)
#avoid covariance matrixes
self.assertTrue( Var ==
(A*B).trace() +
(B*A.mean.H).trace() +
(A*B.mean.H).trace()
)
self.assertTrue(
A.inner(B) ==
Mvn(
mean= A.mean*B.mean.H,
var= (A*B).trace() + (B*A.mean.H).trace() + (A*B.mean.H).trace()
)
)
self.assertTrue( A.inner(B) == B.inner(A) )
class outerTester(myTests):
def testDerivation(self):
A = fix.A
B = fix.B
Na = 20
Nb = 10
N=Na*Nb
#get some data from A and B
Da = A.sample(Na)
Db = B.sample(Nb)
#and remake the multivariates based on the samples you just took
A = Mvn.fromData(Da)
B = Mvn.fromData(Db)
out = numpy.outer(Da, Db).reshape((Na, A.ndim, Nb, B.ndim))
self.assertTrue(
Matrix(numpy.outer(Da[0, :], Db[0, :])) ==
out[0, :, 0, :]
)
result = out.mean(2).mean(0)
self.assertTrue( numpy.outer(A.mean, B.mean) == Matrix(result))
self.assertTrue( A.outer(B) == Matrix(result))
self.assertTrue( B.outer(A) == Matrix(result).H)
def wiki(P, M):
"""
:param P:
:param M:
Direct implementation of the wikipedia blending algorithm
"""
yk = M.mean-P.mean
Sk = P.cov+M.cov
Kk = P.cov*Sk.I
return Mvn.fromCov(
mean= (P.mean + yk*Kk.H),
cov= (Matrix.eye(P.ndim)-Kk)*P.cov
)
def givenVector(self, dims, value):
"""
:param dims:
:param value:
direct implementation of the "given" algorithm in
Andrew moore's data-mining/gussian slides
>>> assert givenVector(A,dims=0,value=1)==A.given(dims=0,value=1)
"""
fixed = helpers.binindex(dims, self.ndim)
if fixed.all():
return Mvn.fromData(value)
free =~ fixed
Mu = self[:, free]
Mv = self[:, fixed]
#TODO: cleanup
u = self.vectors[:, free]
v = self.vectors[:, fixed]
uv = numpy.multiply(u.H, self.var)*v
result = Mu-(Mv-value)**-1*uv.H
#create the mean, for the new object,and set the values of interest
mean = numpy.zeros([1, self.ndim], dtype=result.mean.dtype)
mean[:, fixed] = value
mean[:, free] = result.mean
#create empty vectors for the new object
vectors=numpy.zeros([
result.shape[0],
self.ndim,
],result.vectors.dtype)
vectors[:, fixed] = 0
vectors[:, free] = result.vectors
return type(self)(
mean= mean,
vectors= vectors,
var= result.var
)
def mooreChain(self, sensor, transform=None):
"""
:param sensor:
:param transform:
given a distribution of actual values and an Mvn to act as a sensor
this method returns the joint distribution of real and measured values
the, optional, transform parameter describes how to transform from actual
space to sensor space
"""
if transform is None:
transform = Matrix.eye(self.ndim)
T = (self*transform+sensor)
vv = self.cov
return type(self).fromCov(
mean = numpy.hstack([self.mean, T.mean]),
cov = numpy.vstack([
numpy.hstack([vv, vv*transform]),
numpy.hstack([(vv*transform).H, T.cov]),
])
)
class refereceTester(myTests):
def testMooreChain(self):
#when including a sensor, noise is added to those new dimensions
self.assertTrue(
fix.A.chain(fix.B) ==
mooreChain(fix.A, fix.B)
)
self.assertTrue(
fix.A.chain(fix.B*fix.M, fix.M) ==
mooreChain(fix.A, fix.B*fix.M, fix.M)
)
def testWiki(self):
if not (fix.A.flat or fix.B.flat):
self.assertTrue( fix.A & fix.B == wiki(fix.A, fix.B) )
#The quickest way to prove it's equivalent is by examining these:
self.assertTrue( fix.A**-1 == fix.A*fix.A**-2 )
self.assertTrue(
fix.A & fix.B ==
(fix.A*fix.A**-2+fix.B*fix.B**-2)**-1
)
D = fix.A*(fix.A.cov)**(-1) + fix.B*(fix.B.cov)**(-1)
self.assertTrue( wiki(fix.A, fix.B) == D*(D.cov)**(-1) )
assert fix.A & fix.B == wiki(fix.A, fix.B)
def getTests(fixture=None):
testCases = [
value for (name, value) in globals().iteritems()
if isinstance(value, type) and issubclass(value, myTests)
]
if fixture is None:
return testCases
jar=cPickle.dumps(fixture)
testCases = [
unittest.makeSuite(
type(tc.__name__, (tc,), {'jar':jar})
) for tc in testCases
]
return testCases
|
|
import re
import sre_constants
from itertools import chain
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from Orange.widgets import gui
from Orange.widgets.settings import Setting, ContextSetting
from Orange.widgets.widget import OWWidget
from Orange.data import Table
from orangecontrib.text.corpus import Corpus
class Input:
DATA = 'Data'
class Output:
CORPUS = "Corpus"
class OWCorpusViewer(OWWidget):
name = "Corpus Viewer"
description = "Display corpus contents."
icon = "icons/CorpusViewer.svg"
priority = 30
inputs = [(Input.DATA, Table, 'set_data')]
outputs = [(Output.CORPUS, Corpus)]
# Settings.
selected_document = ContextSetting(0)
search_features = ContextSetting([0]) # features included in search
display_features = ContextSetting([0]) # features for display
autocommit = Setting(True)
def __init__(self):
super().__init__()
self.corpus = None # Corpus
self.corpus_docs = None # Documents generated from Corpus
self.output_mask = None # Output corpus indices
self.document_contents = None # QTextDocument
self.document_holder = None # QTextEdit
self.features = [] # all attributes
# ---- CONTROL AREA ----
# Filtering results.
filter_result_box = gui.widgetBox(self.controlArea, 'Info')
self.info_all = gui.label(filter_result_box, self, 'All documents:')
self.info_fil = gui.label(filter_result_box, self, 'After filtering:')
# Search features
self.search_listbox = gui.listBox(
self.controlArea, self, 'search_features', 'features',
selectionMode=QtGui.QListView.ExtendedSelection,
box='Search features', callback=self.regenerate_documents,)
# Display features
self.display_listbox = gui.listBox(
self.controlArea, self, 'display_features', 'features',
selectionMode=QtGui.QListView.ExtendedSelection,
box='Display features', callback=self.show_document, )
# Auto-commit box.
gui.auto_commit(self.controlArea, self, 'autocommit', 'Send data', 'Auto send is on')
# ---- MAIN AREA ----
# Search
self.filter_input = gui.lineEdit(self.mainArea, self, '',
orientation=Qt.Horizontal,
label='RegExp Filter:')
self.filter_input.textChanged.connect(self.refresh_search)
h_box = gui.widgetBox(self.mainArea, orientation=Qt.Horizontal, addSpace=True)
h_box.layout().setSpacing(0)
# Document list.
self.document_table = QTableView()
self.document_table.setSelectionBehavior(QTableView.SelectRows)
self.document_table.setSelectionMode(QTableView.ExtendedSelection)
self.document_table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.document_table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.document_table.horizontalHeader().setVisible(False)
h_box.layout().addWidget(self.document_table)
self.document_table_model = QStandardItemModel(self)
self.document_table.setModel(self.document_table_model)
self.document_table.setFixedWidth(200)
self.document_table.selectionModel().selectionChanged.connect(self.show_document)
# Document contents.
self.document_holder = QTextEdit()
self.document_holder.setReadOnly(True)
self.document_holder.setLineWrapMode(QTextEdit.WidgetWidth)
h_box.layout().addWidget(self.document_holder)
# --- DATA LOADING ---
def set_data(self, data=None):
self.reset_widget() # Clear any old data.
if data is not None:
self.corpus = data
if isinstance(data, Table):
self.corpus = Corpus.from_table(data.domain, data)
self.load_features()
self.regenerate_documents()
# Send the corpus to output.
self.send(Output.CORPUS, self.corpus)
def reset_widget(self):
# Corpus.
self.corpus = None
self.corpus_docs = None
self.output_mask = None
# Widgets.
self.search_listbox.clear()
self.display_listbox.clear()
self.document_holder.clear()
self.filter_input.clear()
self.update_info_display()
# Models/vars.
self.features.clear()
self.search_features.clear()
self.display_features.clear()
self.document_table_model.clear()
# Warnings.
self.warning(0)
self.warning(1)
def load_features(self):
self.search_features = []
self.display_features = []
if self.corpus is not None:
domain = self.corpus.domain
self.features = list(filter(
lambda x: not x.attributes.get('bow_feature', False),
chain(domain.variables, domain.metas)))
# FIXME: Select features based on ContextSetting
self.search_features = list(range(len(self.features)))
self.display_features = list(range(len(self.features)))
def load_documents(self):
""" Loads documents into the left scrolling area. """
if not self.corpus or not self.corpus_docs:
return
search_keyword = self.filter_input.text().strip('|')
try:
is_match = re.compile(search_keyword, re.IGNORECASE).search
except sre_constants.error:
return
should_filter = bool(search_keyword)
self.output_mask = []
self.document_table_model.clear()
for i, (document, document_contents) in enumerate(zip(self.corpus, self.corpus_docs)):
has_hit = not should_filter or is_match(document_contents)
if has_hit:
item = QStandardItem()
item.setData('Document {}'.format(i+1), Qt.DisplayRole)
item.setData(document, Qt.UserRole)
self.document_table_model.appendRow(item)
self.output_mask.append(i)
if self.document_table_model.rowCount() > 0:
self.document_table.selectRow(0) # Select the first document.
else:
self.document_contents.clear()
self._invalidate_selection()
def show_document(self):
""" Show the selected document in the right area. """
self.warning(1)
if len(self.display_features) == 0 and self.corpus is not None:
self.warning(1, 'No features selected for display.')
self.clear_text_highlight() # Clear.
self.document_contents = QTextDocument(undoRedoEnabled=False)
self.document_contents.setDefaultStyleSheet('td { padding: 5px 15px 15xp 5px; }')
documents = []
for index in self.document_table.selectionModel().selectedRows():
document = ['<table>']
for feat_index in self.display_features:
meta_name = self.features[feat_index].name
document.append(
'<tr><td><strong>{0}:</strong></td><td>{1}</td></tr>'.format(
meta_name, index.data(Qt.UserRole)[meta_name].value))
document.append('</table><hr/>')
documents.append(document)
self.document_contents.setHtml(''.join(chain.from_iterable(documents)))
self.document_holder.setDocument(self.document_contents)
self.highlight_document_hits()
# --- WIDGET SEARCH ---
def regenerate_documents(self):
self.corpus_docs = None
self.warning(0)
if self.corpus is not None:
feats = [self.features[i] for i in self.search_features]
if len(feats) == 0:
self.warning(0, 'No features included in search.')
self.corpus_docs = self.corpus.documents_from_features(feats)
self.refresh_search()
def refresh_search(self):
self.load_documents()
self.highlight_document_hits()
self.update_info_display()
def highlight_document_hits(self):
search_keyword = self.filter_input.text().strip('|')
self.clear_text_highlight()
if not search_keyword:
self.update_info_display()
return
# Format of the highlighting.
text_format = QtGui.QTextCharFormat()
text_format.setBackground(QtGui.QBrush(QtGui.QColor('#b3d8fe')))
# Regular expression to match.
regex = QtCore.QRegExp(search_keyword)
regex.setCaseSensitivity(Qt.CaseInsensitive)
cursor = self.document_contents.find(regex, 0)
prev_position = None
cursor.beginEditBlock()
while cursor.position() not in (-1, prev_position):
cursor.mergeCharFormat(text_format)
prev_position = cursor.position()
cursor = self.document_contents.find(regex, cursor.position())
cursor.endEditBlock()
def update_info_display(self):
if self.corpus is not None:
self.info_all.setText('All documents: {}'.format(len(self.corpus)))
self.info_fil.setText('After filtering: {}'.format(self.document_table_model.rowCount()))
else:
self.info_all.setText('All documents:')
self.info_fil.setText('After filtering:')
def clear_text_highlight(self):
text_format = QtGui.QTextCharFormat()
text_format.setBackground(QtGui.QBrush(QtGui.QColor('#ffffff')))
cursor = self.document_holder.textCursor()
cursor.setPosition(0)
cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor, 1)
cursor.mergeCharFormat(text_format)
# --- MISC ---
def commit(self):
if self.output_mask is not None:
output_corpus = Corpus.from_corpus(self.corpus.domain, self.corpus,
row_indices=self.output_mask)
self.send(Output.CORPUS, output_corpus)
def _invalidate_selection(self):
self.commit()
if __name__ == '__main__':
app = QApplication([])
widget = OWCorpusViewer()
widget.show()
corpus = Corpus.from_file('bookexcerpts')
widget.set_data(corpus)
app.exec()
|
|
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2011 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
from io import BytesIO
from urllib.parse import urlencode
from owslib.util import (
testXMLValue,
nspath_eval,
ServiceException,
Authentication,
# openURL,
)
from owslib.etree import etree
from owslib.fgdc import Metadata
from owslib.iso import MD_Metadata
from owslib.ows import (
OwsCommon,
ServiceIdentification,
ServiceProvider,
Constraint,
Parameter,
OperationsMetadata,
BoundingBox
)
from owslib.fes import FilterCapabilities
from owslib.crs import Crs
from owslib.feature import WebFeatureService_
from owslib.feature.common import (
WFSCapabilitiesReader,
AbstractContentMetadata,
)
from owslib.namespaces import Namespaces
from owslib.util import log, openURL
def get_namespaces():
n = Namespaces()
return n.get_namespaces(["gmd", "gml", "gmi", "ogc", "ows", "wfs"])
namespaces = get_namespaces()
class WebFeatureService_1_1_0(WebFeatureService_):
"""Abstraction for OGC Web Feature Service (WFS).
Implements IWebFeatureService.
"""
def __new__(
self,
url,
version,
xml,
parse_remote_metadata=False,
timeout=30,
headers=None,
username=None,
password=None,
auth=None,
):
""" overridden __new__ method
@type url: string
@param url: url of WFS capabilities document
@type xml: string
@param xml: elementtree object
@type parse_remote_metadata: boolean
@param parse_remote_metadata: whether to fully process MetadataURL elements
@param headers: HTTP headers to send with requests
@param timeout: time (in seconds) after which requests should timeout
@param username: service authentication username
@param password: service authentication password
@param auth: instance of owslib.util.Authentication
@return: initialized WebFeatureService_1_1_0 object
"""
obj = object.__new__(self)
obj.__init__(
url,
version,
xml,
parse_remote_metadata,
timeout,
headers=headers,
username=username,
password=password,
auth=auth,
)
return obj
def __getitem__(self, name):
""" check contents dictionary to allow dict like access to service layers"""
if name in list(self.__getattribute__("contents").keys()):
return self.__getattribute__("contents")[name]
else:
raise KeyError("No content named %s" % name)
def __init__(
self,
url,
version,
xml=None,
parse_remote_metadata=False,
timeout=30,
headers=None,
username=None,
password=None,
auth=None,
):
"""Initialize."""
if auth:
if username:
auth.username = username
if password:
auth.password = password
else:
auth = Authentication(username, password)
super(WebFeatureService_1_1_0, self).__init__(auth)
self.url = url
self.version = version
self.headers = headers
self.timeout = timeout
self._capabilities = None
self.owscommon = OwsCommon("1.0.0")
reader = WFSCapabilitiesReader(self.version, headers=self.headers, auth=self.auth)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
self._buildMetadata(parse_remote_metadata)
def _buildMetadata(self, parse_remote_metadata=False):
"""set up capabilities metadata objects: """
self.updateSequence = self._capabilities.attrib.get("updateSequence")
# ServiceIdentification
val = self._capabilities.find(
nspath_eval("ows:ServiceIdentification", namespaces)
)
if val is not None:
self.identification = ServiceIdentification(val, self.owscommon.namespace)
# ServiceProvider
val = self._capabilities.find(
nspath_eval("ows:ServiceProvider", namespaces)
)
if val is not None:
self.provider = ServiceProvider(val, self.owscommon.namespace)
# ServiceOperations metadata
self.operations = []
for elem in self._capabilities.findall(
nspath_eval("ows:OperationsMetadata/ows:Operation", namespaces)
):
self.operations.append(OperationsMetadata(elem, self.owscommon.namespace))
self.constraints = {}
for elem in self._capabilities.findall(
nspath_eval("ows:OperationsMetadata/ows:Constraint", namespaces)
):
self.constraints[elem.attrib["name"]] = Constraint(
elem, self.owscommon.namespace
)
self.parameters = {}
for elem in self._capabilities.findall(
nspath_eval("ows:OperationsMetadata/ows:Parameter", namespaces)
):
self.parameters[elem.attrib["name"]] = Parameter(
elem, self.owscommon.namespace
)
# FilterCapabilities
val = self._capabilities.find(
nspath_eval("ogc:Filter_Capabilities", namespaces)
)
self.filters = FilterCapabilities(val)
# serviceContents metadata: our assumption is that services use a top-level
# layer as a metadata organizer, nothing more.
self.contents = {}
features = self._capabilities.findall(
nspath_eval("wfs:FeatureTypeList/wfs:FeatureType", namespaces)
)
if features is not None:
for feature in features:
cm = ContentMetadata(feature, parse_remote_metadata, headers=self.headers, auth=self.auth)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [
f.text for f in self._capabilities.findall("Capability/Exception/Format")
]
def getcapabilities(self):
"""Request and return capabilities document from the WFS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WFSCapabilitiesReader(self.version, auth=self.auth)
return openURL(
reader.capabilities_url(self.url), timeout=self.timeout,
headers=self.headers, auth=self.auth
)
def items(self):
"""supports dict-like items() access"""
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getfeature(
self,
typename=None,
filter=None,
bbox=None,
featureid=None,
featureversion=None,
propertyname=None,
maxfeatures=None,
srsname=None,
outputFormat=None,
method="Get",
startindex=None,
sortby=None,
):
"""Request and return feature data as a file-like object.
Parameters
----------
typename : list
List of typenames (string)
filter : string
XML-encoded OGC filter expression.
bbox : tuple
(left, bottom, right, top) in the feature type's coordinates.
featureid : list
List of unique feature ids (string)
featureversion : string
Default is most recent feature version.
propertyname : list
List of feature property names. For Get request, '*' matches all.
For Post request, leave blank (None) to get all properties.
maxfeatures : int
Maximum number of features to be returned.
method : string
Qualified name of the HTTP DCP method to use.
srsname: string
EPSG code to request the data in
outputFormat: string (optional)
Requested response format of the request.
startindex: int (optional)
Start position to return feature set (paging in combination with maxfeatures)
sortby: list (optional)
List of property names whose values should be used to order
(upon presentation) the set of feature instances that
satify the query.
There are 3 different modes of use
1) typename and bbox (simple spatial query). It is assumed, that
bbox coordinates are given *always* in the east,north order
2) typename and filter (more expressive)
3) featureid (direct access to known features)
"""
try:
base_url = next(
(
m.get("url")
for m in self.getOperationByName("GetFeature").methods
if m.get("type").lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
request = {"service": "WFS", "version": self.version, "request": "GetFeature"}
if method.lower() == "get":
if not isinstance(typename, list):
typename = [typename]
if srsname is not None:
request["srsname"] = str(srsname)
# Check, if desired SRS is supported by the service for each
# typename. Warning will be thrown if that SRS is not allowed."
for name in typename:
_ = self.getSRS(srsname, name)
# check featureid
if featureid:
request["featureid"] = ",".join(featureid)
# bbox
elif bbox and typename:
request["bbox"] = self.getBBOXKVP(bbox, typename)
# or filter
elif filter and typename:
request["filter"] = str(filter)
assert len(typename) > 0
request["typename"] = ",".join(typename)
if propertyname is None:
propertyname = "*"
if not isinstance(propertyname, list):
propertyname = [propertyname]
request["propertyname"] = ",".join(propertyname)
if sortby is not None:
if not isinstance(sortby, list):
sortby = [sortby]
request["sortby"] = ",".join(sortby)
if featureversion is not None:
request["featureversion"] = str(featureversion)
if maxfeatures is not None:
request["maxfeatures"] = str(maxfeatures)
if startindex is not None:
request["startindex"] = str(startindex)
if outputFormat is not None:
request["outputFormat"] = outputFormat
data = urlencode(request)
log.debug("Making request: %s?%s" % (base_url, data))
elif method.lower() == "post":
base_url, data = self.getPOSTGetFeatureRequest(
typename=typename,
filter=filter,
bbox=bbox,
featureid=featureid,
featureversion=featureversion,
propertyname=propertyname,
maxfeatures=maxfeatures,
outputFormat=outputFormat,
method='Post',
startindex=startindex,
sortby=sortby,
)
u = openURL(base_url, data, method, timeout=self.timeout,
headers=self.headers, auth=self.auth)
# check for service exceptions, rewrap, and return
# We're going to assume that anything with a content-length > 32k
# is data. We'll check anything smaller.
if "Content-Length" in u.info():
length = int(u.info()["Content-Length"])
have_read = False
else:
data = u.read()
have_read = True
length = len(data)
if length < 32000:
if not have_read:
data = u.read()
try:
tree = etree.fromstring(data)
except BaseException:
# Not XML
return BytesIO(data)
else:
if tree.tag == "{%s}ServiceExceptionReport" % namespaces["ogc"]:
se = tree.find(nspath_eval("ServiceException", namespaces["ogc"]))
raise ServiceException(str(se.text).strip())
else:
return BytesIO(data)
else:
if have_read:
return BytesIO(data)
return u
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata(AbstractContentMetadata):
"""Abstraction for WFS metadata.
Implements IMetadata.
"""
def __init__(self, elem, parse_remote_metadata=False, timeout=30, headers=None, auth=None):
"""."""
super(ContentMetadata, self).__init__(headers=headers, auth=auth)
self.id = testXMLValue(elem.find(nspath_eval("wfs:Name", namespaces)))
self.title = testXMLValue(elem.find(nspath_eval("wfs:Title", namespaces)))
self.abstract = testXMLValue(elem.find(nspath_eval("wfs:Abstract", namespaces)))
self.keywords = [
f.text
for f in elem.findall(nspath_eval("ows:Keywords/ows:Keyword", namespaces))
]
# bbox
self.boundingBoxWGS84 = None
b = BoundingBox(
elem.find(nspath_eval("ows:WGS84BoundingBox", namespaces)),
namespaces["ows"],
)
if b is not None:
try:
self.boundingBoxWGS84 = (
float(b.minx),
float(b.miny),
float(b.maxx),
float(b.maxy),
)
except TypeError:
self.boundingBoxWGS84 = None
# crs options
self.crsOptions = [
Crs(srs.text)
for srs in elem.findall(nspath_eval("wfs:OtherSRS", namespaces))
]
dsrs = testXMLValue(elem.find(nspath_eval("wfs:DefaultSRS", namespaces)))
if dsrs is not None: # first element is default srs
self.crsOptions.insert(0, Crs(dsrs))
# verbs
self.verbOptions = [
op.text
for op in elem.findall(
nspath_eval("wfs:Operations/wfs:Operation", namespaces)
)
]
# output formats
self.outputFormats = [
op.text
for op in elem.findall(
nspath_eval("wfs:OutputFormats/wfs:Format", namespaces)
)
]
# MetadataURLs
self.metadataUrls = []
for m in elem.findall(nspath_eval("wfs:MetadataURL", namespaces)):
metadataUrl = {
"type": testXMLValue(m.attrib["type"], attrib=True),
"format": testXMLValue(m.attrib["format"], attrib=True),
"url": testXMLValue(m),
}
self.metadataUrls.append(metadataUrl)
if parse_remote_metadata:
self.parse_remote_metadata(timeout)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.timepositions = None
self.defaulttimeposition = None
def parse_remote_metadata(self, timeout=30):
"""Parse remote metadata for MetadataURL of format 'text/xml' and add it as metadataUrl['metadata']"""
for metadataUrl in self.metadataUrls:
if (
metadataUrl["url"] is not None and metadataUrl["format"].lower() == "text/xml"
):
try:
content = openURL(metadataUrl["url"], timeout=timeout, headers=self.headers, auth=self.auth)
doc = etree.fromstring(content.read())
if metadataUrl["type"] == "FGDC":
mdelem = doc.find(".//metadata")
if mdelem is not None:
metadataUrl["metadata"] = Metadata(mdelem)
else:
metadataUrl["metadata"] = None
elif metadataUrl["type"] in ["TC211", "19115", "19139"]:
mdelem = doc.find(
".//" + nspath_eval("gmd:MD_Metadata", namespaces)
) or doc.find(
".//" + nspath_eval("gmi:MI_Metadata", namespaces)
)
if mdelem is not None:
metadataUrl["metadata"] = MD_Metadata(mdelem)
else:
metadataUrl["metadata"] = None
except Exception:
metadataUrl["metadata"] = None
|
|
# Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import log as logging
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import atomic
LOG = logging.getLogger(__name__)
class NeutronScenario(scenario.OpenStackScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
RESOURCE_NAME_PREFIX = "rally_net_"
RESOURCE_NAME_LENGTH = 16
SUBNET_IP_VERSION = 4
# TODO(rkiran): modify in case LBaaS-v2 requires
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
LB_PROTOCOL_PORT = 80
HM_TYPE = "PING"
HM_MAX_RETRIES = 3
HM_DELAY = 20
HM_TIMEOUT = 10
def _warn_about_deprecated_name_kwarg(self, resource, kwargs):
"""Warn about use of a deprecated 'name' kwarg and replace it.
Many of the functions in this class previously accepted a
'name' keyword argument so that the end user could explicitly
name their resources. That is no longer permitted, so when a
user includes a 'name' kwarg we warn about it, and replace it
with a random name.
This cannot be a decorator because _update_v1_pool() takes its
arguments in a different way than the other update functions
that this helper is used in.
:param resource: A neutron resource object dict describing the
resource that the name is being set for. In
particular, this must have have a single key
that is the resource type, and a single value
that is itself a dict including the "id" key.
:param kwargs: The keyword arg dict that the user supplied,
which will be modified in-place.
:returns: None; kwargs is modified in situ.
"""
if "name" in kwargs:
kwargs["name"] = self._generate_random_name()
LOG.warning(_("Cannot set name of %(type)s %(id)s explicitly; "
"setting to random string %(name)s") %
{"type": list(resource.keys())[0],
"id": list(resource.values())[0]["id"],
"name": kwargs["name"]})
def _get_network_id(self, network, **kwargs):
"""Get Neutron network ID for the network name.
param network: str, network name/id
param kwargs: dict, network options
returns: str, Neutron network-id
"""
networks = self._list_networks(atomic_action=False)
for net in networks:
if (net["name"] == network) or (net["id"] == network):
return net["id"]
msg = (_("Network %s not found.") % network)
raise exceptions.NotFoundException(message=msg)
@atomic.action_timer("neutron.create_network")
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args.setdefault("name", self._generate_random_name())
return self.clients("neutron").create_network(
{"network": network_create_args})
@atomic.optional_action_timer("neutron.list_networks")
def _list_networks(self, **kwargs):
"""Return user networks list.
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:param kwargs: network list options
"""
return self.clients("neutron").list_networks(**kwargs)["networks"]
@atomic.action_timer("neutron.update_network")
def _update_network(self, network, network_update_args):
"""Update the network.
This atomic function updates the network with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
self._warn_about_deprecated_name_kwarg(network, network_update_args)
body = {"network": network_update_args}
return self.clients("neutron").update_network(
network["network"]["id"], body)
@atomic.action_timer("neutron.delete_network")
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.clients("neutron").delete_network(network["id"])
@atomic.action_timer("neutron.create_subnet")
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
network_id = network["network"]["id"]
if not subnet_create_args.get("cidr"):
start_cidr = start_cidr or "10.2.0.0/24"
subnet_create_args["cidr"] = (
network_wrapper.generate_cidr(start_cidr=start_cidr))
subnet_create_args["network_id"] = network_id
subnet_create_args.setdefault(
"name", self._generate_random_name("rally_subnet_"))
subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION)
return self.clients("neutron").create_subnet(
{"subnet": subnet_create_args})
@atomic.action_timer("neutron.list_subnets")
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.clients("neutron").list_subnets()["subnets"]
@atomic.action_timer("neutron.update_subnet")
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet.
This atomic function updates the subnet with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
self._warn_about_deprecated_name_kwarg(subnet, subnet_update_args)
body = {"subnet": subnet_update_args}
return self.clients("neutron").update_subnet(
subnet["subnet"]["id"], body)
@atomic.action_timer("neutron.delete_subnet")
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.clients("neutron").delete_subnet(subnet["subnet"]["id"])
@atomic.action_timer("neutron.create_router")
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args.setdefault(
"name", self._generate_random_name("rally_router_"))
if external_gw:
for network in self._list_networks():
if network.get("router:external"):
external_network = network
gw_info = {"network_id": external_network["id"],
"enable_snat": True}
router_create_args.setdefault("external_gateway_info",
gw_info)
return self.clients("neutron").create_router(
{"router": router_create_args})
@atomic.action_timer("neutron.list_routers")
def _list_routers(self):
"""Returns user routers list."""
return self.clients("neutron").list_routers()["routers"]
@atomic.action_timer("neutron.delete_router")
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.clients("neutron").delete_router(router["router"]["id"])
@atomic.action_timer("neutron.update_router")
def _update_router(self, router, router_update_args):
"""Update the neutron router.
This atomic function updates the router with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
self._warn_about_deprecated_name_kwarg(router, router_update_args)
body = {"router": router_update_args}
return self.clients("neutron").update_router(
router["router"]["id"], body)
@atomic.action_timer("neutron.create_port")
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
port_create_args["network_id"] = network["network"]["id"]
port_create_args.setdefault(
"name", self._generate_random_name("rally_port_"))
return self.clients("neutron").create_port({"port": port_create_args})
@atomic.action_timer("neutron.list_ports")
def _list_ports(self):
"""Return user ports list."""
return self.clients("neutron").list_ports()["ports"]
@atomic.action_timer("neutron.update_port")
def _update_port(self, port, port_update_args):
"""Update the neutron port.
This atomic function updates port with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
self._warn_about_deprecated_name_kwarg(port, port_update_args)
body = {"port": port_update_args}
return self.clients("neutron").update_port(port["port"]["id"], body)
@atomic.action_timer("neutron.delete_port")
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.clients("neutron").delete_port(port["port"]["id"])
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
subnets = []
network = self._create_network(network_create_args or {})
for i in range(subnets_per_network):
subnet = self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
subnets.append(subnet)
return network, subnets
@atomic.action_timer("neutron.add_interface_router")
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").add_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@atomic.action_timer("neutron.remove_interface_router")
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.clients("neutron").remove_interface_router(
router["id"], {"subnet_id": subnet["id"]})
@atomic.optional_action_timer("neutron.create_pool")
def _create_lb_pool(self, subnet_id, **pool_create_args):
"""Create LB pool(v1)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:param atomic_action: True if this is an atomic action. added
and handled by the
optional_action_timer() decorator
:returns: dict, neutron lb pool
"""
args = {"lb_method": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self._generate_random_name("rally_pool_"),
"subnet_id": subnet_id}
args.update(pool_create_args)
return self.clients("neutron").create_pool({"pool": args})
def _create_v1_pools(self, networks, **pool_create_args):
"""Create LB pools(v1)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
pools = []
for net in networks:
subnets.extend(net.get("subnets", []))
with atomic.ActionTimer(self, "neutron.create_%s_pools" %
len(subnets)):
for subnet_id in subnets:
pools.append(self._create_lb_pool(
subnet_id, atomic_action=False, **pool_create_args))
return pools
@atomic.action_timer("neutron.list_pools")
def _list_v1_pools(self, **kwargs):
"""Return user lb pool list(v1)."""
return self.clients("neutron").list_pools(**kwargs)
@atomic.action_timer("neutron.delete_pool")
def _delete_v1_pool(self, pool):
"""Delete neutron pool.
:param pool: Pool object
"""
self.clients("neutron").delete_pool(pool["id"])
@atomic.action_timer("neutron.update_pool")
def _update_v1_pool(self, pool, **pool_update_args):
"""Update pool.
This atomic function updates the pool with pool_update_args.
:param pool: Pool object
:param pool_update_args: dict, POST /lb/pools update options
:returns: updated neutron pool dict
"""
self._warn_about_deprecated_name_kwarg(pool, pool_update_args)
body = {"pool": pool_update_args}
return self.clients("neutron").update_pool(pool["pool"]["id"], body)
def _create_v1_vip(self, pool, **vip_create_args):
"""Create VIP(v1)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self._generate_random_name("rally_vip_"),
"pool_id": pool["pool"]["id"],
"subnet_id": pool["pool"]["subnet_id"]}
args.update(vip_create_args)
return self.clients("neutron").create_vip({"vip": args})
@atomic.action_timer("neutron.list_vips")
def _list_v1_vips(self, **kwargs):
"""Return user lb vip list(v1)."""
return self.clients("neutron").list_vips(**kwargs)
@atomic.action_timer("neutron.delete_vip")
def _delete_v1_vip(self, vip):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_vip(vip["id"])
@atomic.action_timer("neutron.update_vip")
def _update_v1_vip(self, vip, **vip_update_args):
"""Updates vip.
This atomic function updates vip name and admin state
:param vip: Vip object
:param vip_update_args: dict, POST /lb/vips update options
:returns: updated neutron vip dict
"""
self._warn_about_deprecated_name_kwarg(vip, vip_update_args)
body = {"vip": vip_update_args}
return self.clients("neutron").update_vip(vip["vip"]["id"], body)
@atomic.action_timer("neutron.create_floating_ip")
def _create_floatingip(self, floating_network, **floating_ip_args):
"""Create floating IP with floating_network.
param: floating_network: str, external network to create floating IP
param: floating_ip_args: dict, POST /floatingips create options
returns: dict, neutron floating IP
"""
floating_network_id = self._get_network_id(
floating_network)
args = {"floating_network_id": floating_network_id}
args.update(floating_ip_args)
return self.clients("neutron").create_floatingip({"floatingip": args})
@atomic.action_timer("neutron.list_floating_ips")
def _list_floating_ips(self, **kwargs):
"""Return floating IPs list."""
return self.clients("neutron").list_floatingips(**kwargs)
@atomic.action_timer("neutron.delete_floating_ip")
def _delete_floating_ip(self, floating_ip):
"""Delete floating IP.
:param: dict, floating IP object
"""
return self.clients("neutron").delete_floatingip(floating_ip["id"])
def _create_v1_healthmonitor(self, atomic_action=True,
**healthmonitor_create_args):
"""Create LB healthmonitor.
This atomic function creates healthmonitor with the provided
healthmonitor_create_args.
:param atomic_action: True if this is an atomic action
:param healthmonitor_create_args: dict, POST /lb/healthmonitors
:returns: neutron healthmonitor dict
"""
args = {"type": self.HM_TYPE,
"delay": self.HM_DELAY,
"max_retries": self.HM_MAX_RETRIES,
"timeout": self.HM_TIMEOUT}
args.update(healthmonitor_create_args)
if atomic_action:
with atomic.ActionTimer(self, "neutron.create_healthmonitor"):
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
@atomic.action_timer("neutron.list_healthmonitors")
def _list_v1_healthmonitors(self, **kwargs):
"""List LB healthmonitors.
This atomic function lists all helthmonitors.
:param kwargs: optional parameters
:returns neutron lb healthmonitor list
"""
return self.clients("neutron").list_health_monitors(**kwargs)
@atomic.action_timer("neutron.delete_healthmonitor")
def _delete_v1_healthmonitor(self, healthmonitor):
"""Delete neutron healthmonitor.
:param healthmonitor: neutron healthmonitor dict
"""
self.clients("neutron").delete_health_monitor(healthmonitor["id"])
@atomic.action_timer("neutron.update_healthmonitor")
def _update_v1_healthmonitor(self, healthmonitor,
**healthmonitor_update_args):
"""Update neutron healthmonitor.
:param healthmonitor: neutron lb healthmonitor dict
:param healthmonitor_update_args: POST /lb/healthmonitors
update options
:returns updated neutron lb healthmonitor dict
"""
body = {"health_monitor": healthmonitor_update_args}
return self.clients("neutron").update_health_monitor(
healthmonitor["health_monitor"]["id"], body)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import ad_group_extension_setting
from google.ads.googleads.v8.services.types import (
ad_group_extension_setting_service,
)
from .base import AdGroupExtensionSettingServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupExtensionSettingServiceGrpcTransport(
AdGroupExtensionSettingServiceTransport
):
"""gRPC backend transport for AdGroupExtensionSettingService.
Service to manage ad group extension settings.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group_extension_setting(
self,
) -> Callable[
[ad_group_extension_setting_service.GetAdGroupExtensionSettingRequest],
ad_group_extension_setting.AdGroupExtensionSetting,
]:
r"""Return a callable for the get ad group extension setting method over gRPC.
Returns the requested ad group extension setting in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetAdGroupExtensionSettingRequest],
~.AdGroupExtensionSetting]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group_extension_setting" not in self._stubs:
self._stubs[
"get_ad_group_extension_setting"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.AdGroupExtensionSettingService/GetAdGroupExtensionSetting",
request_serializer=ad_group_extension_setting_service.GetAdGroupExtensionSettingRequest.serialize,
response_deserializer=ad_group_extension_setting.AdGroupExtensionSetting.deserialize,
)
return self._stubs["get_ad_group_extension_setting"]
@property
def mutate_ad_group_extension_settings(
self,
) -> Callable[
[
ad_group_extension_setting_service.MutateAdGroupExtensionSettingsRequest
],
ad_group_extension_setting_service.MutateAdGroupExtensionSettingsResponse,
]:
r"""Return a callable for the mutate ad group extension
settings method over gRPC.
Creates, updates, or removes ad group extension settings.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`CriterionError <>`__ `DatabaseError <>`__ `DateError <>`__
`DistinctError <>`__ `ExtensionSettingError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__
`OperationAccessDeniedError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__
Returns:
Callable[[~.MutateAdGroupExtensionSettingsRequest],
~.MutateAdGroupExtensionSettingsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_ad_group_extension_settings" not in self._stubs:
self._stubs[
"mutate_ad_group_extension_settings"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.AdGroupExtensionSettingService/MutateAdGroupExtensionSettings",
request_serializer=ad_group_extension_setting_service.MutateAdGroupExtensionSettingsRequest.serialize,
response_deserializer=ad_group_extension_setting_service.MutateAdGroupExtensionSettingsResponse.deserialize,
)
return self._stubs["mutate_ad_group_extension_settings"]
__all__ = ("AdGroupExtensionSettingServiceGrpcTransport",)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .TProtocol import TProtocolBase, TProtocolException
from thrift.Thrift import TApplicationException, TMessageType
from .TBinaryProtocol import TBinaryProtocolAccelerated
from .TCompactProtocol import TCompactProtocolAccelerated
from thrift.transport.THeaderTransport import THeaderTransport
class THeaderProtocol(TProtocolBase):
"""Pass through header protocol (transport can set)"""
T_BINARY_PROTOCOL = 0
T_JSON_PROTOCOL = 1
T_COMPACT_PROTOCOL = 2
__proto = None
__proto_id = None
def get_protocol_id(self):
return self.__proto_id
def reset_protocol(self):
if self.__proto_id == self.trans.get_protocol_id():
return
proto_id = self.trans.get_protocol_id()
if proto_id == self.T_BINARY_PROTOCOL:
self.__proto = TBinaryProtocolAccelerated(self.trans,
self.strictRead, True)
elif proto_id == self.T_COMPACT_PROTOCOL:
self.__proto = TCompactProtocolAccelerated(self.trans)
else:
raise TApplicationException(TProtocolException.INVALID_PROTOCOL,
"Unknown protocol requested")
self.__proto_id = proto_id
def __init__(self, trans, strictRead=False,
client_types=None, client_type=None):
"""Create a THeaderProtocol instance
@param transport(TTransport) The underlying transport.
@param strictRead(bool) Turn on strictRead if using TBinaryProtocol
@param client_types([THeaderTransport.HEADERS_CLIENT_TYPE, ...])
List of client types to support. Defaults to
HEADERS_CLIENT_TYPE only.
"""
if isinstance(trans, THeaderTransport):
trans._THeaderTransport__supported_client_types = set(
client_types or (THeaderTransport.HEADERS_CLIENT_TYPE,))
if client_type is not None:
trans._THeaderTransport__client_type = client_type
htrans = trans
else:
htrans = THeaderTransport(trans, client_types, client_type)
TProtocolBase.__init__(self, htrans)
self.strictRead = strictRead
self.reset_protocol()
def writeMessageBegin(self, name, type, seqid):
self.__proto.writeMessageBegin(name, type, seqid)
def writeMessageEnd(self):
self.__proto.writeMessageEnd()
def writeStructBegin(self, name):
self.__proto.writeStructBegin(name)
def writeStructEnd(self):
self.__proto.writeStructEnd()
def writeFieldBegin(self, name, type, id):
self.__proto.writeFieldBegin(name, type, id)
def writeFieldEnd(self):
self.__proto.writeFieldEnd()
def writeFieldStop(self):
self.__proto.writeFieldStop()
def writeMapBegin(self, ktype, vtype, size):
self.__proto.writeMapBegin(ktype, vtype, size)
def writeMapEnd(self):
self.__proto.writeMapEnd()
def writeListBegin(self, etype, size):
self.__proto.writeListBegin(etype, size)
def writeListEnd(self):
self.__proto.writeListEnd()
def writeSetBegin(self, etype, size):
self.__proto.writeSetBegin(etype, size)
def writeSetEnd(self):
self.__proto.writeSetEnd()
def writeBool(self, bool):
self.__proto.writeBool(bool)
def writeByte(self, byte):
self.__proto.writeByte(byte)
def writeI16(self, i16):
self.__proto.writeI16(i16)
def writeI32(self, i32):
self.__proto.writeI32(i32)
def writeI64(self, i64):
self.__proto.writeI64(i64)
def writeDouble(self, dub):
self.__proto.writeDouble(dub)
def writeFloat(self, flt):
self.__proto.writeFloat(flt)
def writeString(self, str):
self.__proto.writeString(str)
def readMessageBegin(self):
#Read the next frame, and change protocols if needed
try:
self.trans._reset_protocol()
self.reset_protocol()
except TApplicationException as ex:
if self.__proto:
self.writeMessageBegin(b"", TMessageType.EXCEPTION, 0)
ex.write(self)
self.writeMessageEnd()
self.trans.flush()
return self.__proto.readMessageBegin()
def readMessageEnd(self):
return self.__proto.readMessageEnd()
def readStructBegin(self):
return self.__proto.readStructBegin()
def readStructEnd(self):
return self.__proto.readStructEnd()
def readFieldBegin(self):
return self.__proto.readFieldBegin()
def readFieldEnd(self):
return self.__proto.readFieldEnd()
def readMapBegin(self):
return self.__proto.readMapBegin()
def readMapEnd(self):
return self.__proto.readMapEnd()
def readListBegin(self):
return self.__proto.readListBegin()
def readListEnd(self):
return self.__proto.readListEnd()
def readSetBegin(self):
return self.__proto.readSetBegin()
def readSetEnd(self):
return self.__proto.readSetEnd()
def readBool(self):
return self.__proto.readBool()
def readByte(self):
return self.__proto.readByte()
def readI16(self):
return self.__proto.readI16()
def readI32(self):
return self.__proto.readI32()
def readI64(self):
return self.__proto.readI64()
def readDouble(self):
return self.__proto.readDouble()
def readFloat(self):
return self.__proto.readFloat()
def readString(self):
return self.__proto.readString()
class THeaderProtocolFactory(object):
def __init__(self, strictRead=False, client_types=None, client_type=None):
self.strictRead = strictRead
self.client_types = client_types
self.client_type = client_type
def getProtocol(self, trans):
prot = THeaderProtocol(trans, self.strictRead, self.client_types,
self.client_type)
return prot
|
|
from unittest import TestCase
from src.Escuderia import Escuderia
from src.Piloto import Piloto
from src.Circuito import Circuito
from src.GranPremio import GranPremio
from mock import patch
__author__ = 'MAMISHO'
class TestGranPremio(TestCase):
def test_extraer_participantes_1(self):
"""
Test extraer participantes existentes
Este test comprueba que los participantes fueron
agregados correctamente en el gran premio, y
se comprueba la funcionalidad de extraer participantes
que se usa en otros modulos.
:param p1: Piloto de la escuderia e1
:param p2: Piloto de la escuderia e1
:param p3: Piloto de la escuderia e2
:param p4: Piloto de la escuderia e2
:param p5: Piloto de la escuderia e3
:param p6: Piloto de la escuderia e4
:param e1: Escuderia para agregar al circuito
:param e2: Escuderia para agregar al circuito
:param e3: Escuderia para agregar al circuito
:param c1: Circuito del gran premio
:param es: Diccionario de circuitos para crear el gran premio
:param gp: Gran premio para la prueba
:param participantes: Objeto que se usa para comprobar la funcionalidad
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
p4 = Piloto("DDD", "Piloto D")
p5 = Piloto("EEE", "Piloto E")
p6 = Piloto("FFF", "Piloto F")
e1 = Escuderia("Escuderia 1")
e2 = Escuderia("Escuderia 2")
e3 = Escuderia("Escuderia 3")
c1 = Circuito("Circuito 1")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e2.agregar_piloto(p3)
e2.agregar_piloto(p4)
e3.agregar_piloto(p5)
e3.agregar_piloto(p6)
es = {e1.nombre: e1, e2.nombre: e2, e3.nombre: e3}
gp = GranPremio("Gran Premio 1", c1, es, "USA", "2014")
participantes = {}
participantes = gp.extraer_participantes(es)
print participantes
# self.assertDictContainsSubset(participantes, es, None)
self.assertIn(p1.idPiloto, participantes)
self.assertIn(p2.idPiloto, participantes)
self.assertIn(p3.idPiloto, participantes)
self.assertIn(p4.idPiloto, participantes)
self.assertIn(p5.idPiloto, participantes)
self.assertIn(p6.idPiloto, participantes)
def test_extraer_participantes_2(self):
"""
Test extraer participantes no exitentes
Este test comprueba que no se extraigan participantes
que no se encuentran en el gran premio
:param p1: Piloto de la escuderia e1
:param p2: Piloto de la escuderia e1
:param p3: Piloto de la escuderia e2
:param p4: Piloto de la escuderia e2
:param p5: Piloto de la escuderia e3
:param p6: Piloto de la escuderia e4
:param p7: Participante que no pertenece en el gran premio, y que no debe aparecer en el diccionario
:param e1: Escuderia para agregar al circuito
:param e2: Escuderia para agregar al circuito
:param e3: Escuderia para agregar al circuito
:param c1: circuito del gran premio
:param es: Diccionario de circuitos para crear el gran premio
:param gp: Gran premio para la prueba
:param participantes: Diccionario que se usa para comprobar la funcionalidad
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
p4 = Piloto("DDD", "Piloto D")
p5 = Piloto("EEE", "Piloto E")
p6 = Piloto("FFF", "Piloto F")
p7 = Piloto("GGG", "Piloto G")
e1 = Escuderia("Escuderia 1")
e2 = Escuderia("Escuderia 2")
e3 = Escuderia("Escuderia 3")
c1 = Circuito("Circuito 1")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e2.agregar_piloto(p3)
e2.agregar_piloto(p4)
e3.agregar_piloto(p5)
e3.agregar_piloto(p6)
es = {e1.nombre: e1, e2.nombre: e2, e3.nombre: e3}
gp = GranPremio("Gran Premio 1", c1, es, "USA", "2014")
participantes = {}
participantes = gp.extraer_participantes(es)
self.assertIsNot(p7.idPiloto, participantes)
@patch('src.GranPremio.GranPremio.set_clasificacion')
def test_set_clasificacion(self, raw_input):
"""
Test set_clasificacion
Este test comprueba que se pueda introducir
parametros por teclado segun el programa solicite al usuario.
Se hace uso de patch desde la libreria de mock
:param p1: Piloto de la escuderia e1
:param p2: Piloto de la escuderia e1
:param p3: Piloto de la escuderia e2
:param p4: Piloto de la escuderia e2
:param p5: Piloto de la escuderia e3
:param p6: Piloto de la escuderia e4
:param e1: Escuderia para agregar al circuito
:param e2: Escuderia para agregar al circuito
:param e3: Escuderia para agregar al circuito
:param c1: circuito del gran premio
:param es: Diccionario de circuitos para crear el gran premio
:param gp: Gran premio para realizar el test
:param participantes: Diccionario que se usa para comprobar la funcionalidad
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
p4 = Piloto("DDD", "Piloto D")
p5 = Piloto("EEE", "Piloto E")
p6 = Piloto("FFF", "Piloto F")
e1 = Escuderia("Escuderia 1")
e2 = Escuderia("Escuderia 2")
e3 = Escuderia("Escuderia 3")
c1 = Circuito("Circuito 1")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e2.agregar_piloto(p3)
e2.agregar_piloto(p4)
e3.agregar_piloto(p5)
e3.agregar_piloto(p6)
es = {e1.nombre: e1, e2.nombre: e2, e3.nombre: e3}
ng = GranPremio("Gran Premio 1", c1, es, "USA", "2014")
self.assertIsNotNone(ng.set_clasificacion())
def test_set_vuelta_rapida(self):
"""
Test set vuelta rapida
Este test comprueba que se pueda introducir una vuelta rapida
Se pasan los siguientes parametros
:param p1: Piloto de la escuderia e1
:param p2: Piloto de la escuderia e1
:param p3: Piloto de la escuderia e2
:param p4: Piloto de la escuderia e2
:param p5: Piloto de la escuderia e3
:param p6: Piloto de la escuderia e4
:param e1: Escuderia para agregar al circuito
:param e2: Escuderia para agregar al circuito
:param e3: Escuderia para agregar al circuito
:param c1: circuito del gran premio
:param es: Diccionario de circuitos para crear el gran premio
:param gp: Gran premio para realizar la prueba
"""
p1 = Piloto("AAA", "Piloto A")
p2 = Piloto("BBB", "Piloto B")
p3 = Piloto("CCC", "Piloto C")
p4 = Piloto("DDD", "Piloto D")
p5 = Piloto("EEE", "Piloto E")
p6 = Piloto("FFF", "Piloto F")
e1 = Escuderia("Escuderia 1")
e2 = Escuderia("Escuderia 2")
e3 = Escuderia("Escuderia 3")
c1 = Circuito("Circuito 1")
e1.agregar_piloto(p1)
e1.agregar_piloto(p2)
e2.agregar_piloto(p3)
e2.agregar_piloto(p4)
e3.agregar_piloto(p5)
e3.agregar_piloto(p6)
es = {e1.nombre: e1, e2.nombre: e2, e3.nombre: e3}
gp = GranPremio("Gran Premio 1", c1, es, "USA", "2014")
self.assertEqual(gp.set_vuelta_rapida("1:14:123", "AAA"), True)
|
|
from south.db import db
from django.db import models
from dinette.models import *
class Migration:
def forwards(self, orm):
# Adding model 'DinetteUserProfile'
db.create_table('dinette_dinetteuserprofile', (
('id', orm['dinette.DinetteUserProfile:id']),
('user', orm['dinette.DinetteUserProfile:user']),
('last_activity', orm['dinette.DinetteUserProfile:last_activity']),
('userrank', orm['dinette.DinetteUserProfile:userrank']),
('last_posttime', orm['dinette.DinetteUserProfile:last_posttime']),
('photo', orm['dinette.DinetteUserProfile:photo']),
('signature', orm['dinette.DinetteUserProfile:signature']),
))
db.send_create_signal('dinette', ['DinetteUserProfile'])
# Adding model 'Ftopics'
db.create_table('dinette_ftopics', (
('id', orm['dinette.Ftopics:id']),
('category', orm['dinette.Ftopics:category']),
('subject', orm['dinette.Ftopics:subject']),
('slug', orm['dinette.Ftopics:slug']),
('message', orm['dinette.Ftopics:message']),
('file', orm['dinette.Ftopics:file']),
('attachment_type', orm['dinette.Ftopics:attachment_type']),
('filename', orm['dinette.Ftopics:filename']),
('viewcount', orm['dinette.Ftopics:viewcount']),
('replies', orm['dinette.Ftopics:replies']),
('created_on', orm['dinette.Ftopics:created_on']),
('updated_on', orm['dinette.Ftopics:updated_on']),
('posted_by', orm['dinette.Ftopics:posted_by']),
('announcement_flag', orm['dinette.Ftopics:announcement_flag']),
('is_closed', orm['dinette.Ftopics:is_closed']),
('is_sticky', orm['dinette.Ftopics:is_sticky']),
('is_hidden', orm['dinette.Ftopics:is_hidden']),
))
db.send_create_signal('dinette', ['Ftopics'])
# Adding model 'SiteConfig'
db.create_table('dinette_siteconfig', (
('id', orm['dinette.SiteConfig:id']),
('name', orm['dinette.SiteConfig:name']),
('tag_line', orm['dinette.SiteConfig:tag_line']),
))
db.send_create_signal('dinette', ['SiteConfig'])
# Adding model 'Category'
db.create_table('dinette_category', (
('id', orm['dinette.Category:id']),
('name', orm['dinette.Category:name']),
('slug', orm['dinette.Category:slug']),
('description', orm['dinette.Category:description']),
('ordering', orm['dinette.Category:ordering']),
('super_category', orm['dinette.Category:super_category']),
('created_on', orm['dinette.Category:created_on']),
('updated_on', orm['dinette.Category:updated_on']),
('posted_by', orm['dinette.Category:posted_by']),
))
db.send_create_signal('dinette', ['Category'])
# Adding model 'Reply'
db.create_table('dinette_reply', (
('id', orm['dinette.Reply:id']),
('topic', orm['dinette.Reply:topic']),
('posted_by', orm['dinette.Reply:posted_by']),
('message', orm['dinette.Reply:message']),
('file', orm['dinette.Reply:file']),
('attachment_type', orm['dinette.Reply:attachment_type']),
('filename', orm['dinette.Reply:filename']),
('created_on', orm['dinette.Reply:created_on']),
('updated_on', orm['dinette.Reply:updated_on']),
))
db.send_create_signal('dinette', ['Reply'])
# Adding model 'SuperCategory'
db.create_table('dinette_supercategory', (
('id', orm['dinette.SuperCategory:id']),
('name', orm['dinette.SuperCategory:name']),
('description', orm['dinette.SuperCategory:description']),
('ordering', orm['dinette.SuperCategory:ordering']),
('created_on', orm['dinette.SuperCategory:created_on']),
('updated_on', orm['dinette.SuperCategory:updated_on']),
('posted_by', orm['dinette.SuperCategory:posted_by']),
))
db.send_create_signal('dinette', ['SuperCategory'])
def backwards(self, orm):
# Deleting model 'DinetteUserProfile'
db.delete_table('dinette_dinetteuserprofile')
# Deleting model 'Ftopics'
db.delete_table('dinette_ftopics')
# Deleting model 'SiteConfig'
db.delete_table('dinette_siteconfig')
# Deleting model 'Category'
db.delete_table('dinette_category')
# Deleting model 'Reply'
db.delete_table('dinette_reply')
# Deleting model 'SuperCategory'
db.delete_table('dinette_supercategory')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dinette.category': {
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderated_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cposted'", 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '110', 'db_index': 'True'}),
'super_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dinette.SuperCategory']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dinette.dinetteuserprofile': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_posttime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'userrank': ('django.db.models.fields.CharField', [], {'default': "'Junior Member'", 'max_length': '30'})
},
'dinette.ftopics': {
'announcement_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'attachment_type': ('django.db.models.fields.CharField', [], {'default': "'nofile'", 'max_length': '20'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dinette.Category']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "'dummyname.txt'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'replies': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1034', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'viewcount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'dinette.reply': {
'attachment_type': ('django.db.models.fields.CharField', [], {'default': "'nofile'", 'max_length': '20'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "'dummyname.txt'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dinette.Ftopics']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dinette.siteconfig': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tag_line': ('django.db.models.fields.TextField', [], {'max_length': '100'})
},
'dinette.supercategory': {
'accessgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['dinette']
|
|
# -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2010 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============
Deprecations
==============
Deprecations.
"""
if __doc__:
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import os as _os
import types as _types
from . import _exceptions
from . import _graph
from . import _util
from . import util as _old_util
from . import _version
from .integration import wtf_service as _wtf_service
from .markup.soup import filters as _filters
class Deprecator(object):
"""
Deprecation proxy class
The class basically emits a deprecation warning on access.
:IVariables:
`__todeprecate` : any
Object to deprecate
`__warn` : ``callable``
Warn function
"""
def __new__(cls, todeprecate, message=None):
"""
Construct
:Parameters:
`todeprecate` : any
Object to deprecate
`message` : ``str``
Custom message. If omitted or ``None``, a default message is
generated.
:Return: Deprecator instance
:Rtype: `Deprecator`
"""
# pylint: disable = unidiomatic-typecheck
if type(todeprecate) is _types.MethodType:
call = cls(todeprecate.im_func, message=message)
@_util.decorating(todeprecate.im_func)
def func(*args, **kwargs):
""" Wrapper to build a new method """
# pylint: disable = not-callable
return call(*args, **kwargs)
return _types.MethodType(func, None, todeprecate.im_class)
elif cls == Deprecator and callable(todeprecate):
res = CallableDeprecator(todeprecate, message=message)
if type(todeprecate) is _types.FunctionType:
res = _util.decorating(todeprecate)(res)
return res
return object.__new__(cls)
def __init__(self, todeprecate, message=None):
"""
Initialization
:Parameters:
`todeprecate` : any
Object to deprecate
`message` : ``str``
Custom message. If omitted or ``None``, a default message is
generated.
"""
self.__todeprecate = todeprecate
if message is None:
# pylint: disable = unidiomatic-typecheck
if type(todeprecate) is _types.FunctionType:
name = todeprecate.__name__
else:
name = todeprecate.__class__.__name__
message = "%s.%s is deprecated." % (todeprecate.__module__, name)
if _os.environ.get('EPYDOC_INSPECTOR') == '1':
def warn():
""" Dummy to not clutter epydoc output """
pass
else:
def warn():
""" Emit the message """
_exceptions.DeprecationWarning.emit(message, stacklevel=3)
self.__warn = warn
def __getattr__(self, name):
""" Get attribute with deprecation warning """
self.__warn()
return getattr(self.__todeprecate, name)
def __iter__(self):
""" Get iterator with deprecation warning """
self.__warn()
return iter(self.__todeprecate)
class CallableDeprecator(Deprecator):
""" Callable proxy deprecation class """
def __call__(self, *args, **kwargs):
""" Call with deprecation warning """
self._Deprecator__warn()
return self._Deprecator__todeprecate(*args, **kwargs)
if True:
# pylint: disable = protected-access
_old_util.CallableDeprecator = Deprecator(
CallableDeprecator,
"tdi.util.CallableDeprecator is no longer public. Don't use it."
)
_old_util.Deprecator = Deprecator(
Deprecator,
"tdi.util.Deprecator is no longer public. Don't use it."
)
_old_util.Version = Deprecator(
_version.Version,
"tdi.util.Version is no longer public. Don't use it."
)
_old_util.DependencyGraph = Deprecator(
_graph.DependencyGraph,
"tdi.util.DependencyGraph is no longer public. Don't use it."
)
_old_util.DependencyCycle = _graph.DependencyCycle
_old_util.parse_content_type = Deprecator(
_filters._parse_content_type,
"tdi.util.parse_content_type is no longer public. Don't use it."
)
_old_util.find_public = Deprecator(
_util.find_public,
"tdi.util.find_public is no longer public. Don't use it."
)
_old_util.Property = Deprecator(
_util.Property,
"tdi.util.Property is no longer public. Don't use it."
)
_old_util.decorating = Deprecator(
_util.decorating,
"tdi.util.decorating is no longer public. Don't use it."
)
_old_util.load_dotted = Deprecator(
_wtf_service._load_dotted,
"tdi.util.load_dotted is no longer public. Don't use it."
)
def make_dotted(name):
"""
Generate a dotted module
:Parameters:
`name` : ``str``
Fully qualified module name (like ``tdi.util``)
:Return: The module object of the last part and the information
whether the last part was newly added (``(module, bool)``)
:Rtype: ``tuple``
:Exceptions:
- `ImportError` : The module name was horribly invalid
"""
import imp as _imp
import sys as _sys
sofar, parts = [], name.split('.')
oldmod = None
for part in parts:
if not part:
raise ImportError("Invalid module name %r" % (name,))
partname = ".".join(sofar + [part])
try:
# pylint: disable = not-callable
fresh, mod = False, _old_util.load_dotted(partname)
except ImportError:
mod = _imp.new_module(partname)
mod.__path__ = []
fresh = mod == _sys.modules.setdefault(partname, mod)
if oldmod is not None:
setattr(oldmod, part, mod)
oldmod = mod
sofar.append(part)
return mod, fresh
_old_util.make_dotted = Deprecator(
make_dotted,
"tdi.util.make_dotted is no longer public. Don't use it."
)
|
|
from model_bakery import baker
from django.conf import settings
from django.test import TestCase
from sponsors.forms import (
SponsorshipsBenefitsForm,
SponsorshipApplicationForm,
Sponsor,
SponsorContactForm,
SponsorContactFormSet,
SponsorBenefitAdminInlineForm,
SponsorBenefit,
Sponsorship,
SponsorshipsListForm,
SendSponsorshipNotificationForm, SponsorRequiredAssetsForm, SponsorshipBenefitAdminForm,
)
from sponsors.models import SponsorshipBenefit, SponsorContact, RequiredTextAssetConfiguration, \
RequiredImgAssetConfiguration, ImgAsset, RequiredTextAsset, SponsorshipPackage
from .utils import get_static_image_file_as_upload
from ..models.enums import AssetsRelatedTo
class SponsorshipsBenefitsFormTests(TestCase):
def setUp(self):
self.psf = baker.make("sponsors.SponsorshipProgram", name="PSF")
self.wk = baker.make("sponsors.SponsorshipProgram", name="Working Group")
self.program_1_benefits = baker.make(
SponsorshipBenefit, program=self.psf, _quantity=3
)
self.program_2_benefits = baker.make(
SponsorshipBenefit, program=self.wk, _quantity=5
)
self.package = baker.make("sponsors.SponsorshipPackage", advertise=True)
self.package.benefits.add(*self.program_1_benefits)
self.package.benefits.add(*self.program_2_benefits)
# packages without associated packages
self.add_ons = baker.make(SponsorshipBenefit, program=self.psf, _quantity=2)
# a la carte benefits
self.a_la_carte = baker.make(SponsorshipBenefit, program=self.psf, a_la_carte=True, _quantity=2)
def test_specific_field_to_select_add_ons(self):
form = SponsorshipsBenefitsForm()
choices = list(form.fields["add_ons_benefits"].choices)
self.assertEqual(len(self.add_ons), len(choices))
for benefit in self.add_ons:
self.assertIn(benefit.id, [c[0] for c in choices])
def test_benefits_organized_by_program(self):
form = SponsorshipsBenefitsForm()
field1, field2 = sorted(form.benefits_programs, key=lambda f: f.name)
self.assertEqual("benefits_psf", field1.name)
self.assertEqual("PSF Benefits", field1.label)
choices = list(field1.field.choices)
self.assertEqual(len(self.program_1_benefits), len(choices))
for benefit in self.program_1_benefits:
self.assertIn(benefit.id, [c[0] for c in choices])
self.assertEqual("benefits_working_group", field2.name)
self.assertEqual("Working Group Benefits", field2.label)
choices = list(field2.field.choices)
self.assertEqual(len(self.program_2_benefits), len(choices))
for benefit in self.program_2_benefits:
self.assertIn(benefit.id, [c[0] for c in choices])
def test_specific_field_to_select_a_la_carte_benefits(self):
form = SponsorshipsBenefitsForm()
choices = list(form.fields["a_la_carte_benefits"].choices)
self.assertEqual(len(self.a_la_carte), len(choices))
for benefit in self.a_la_carte:
self.assertIn(benefit.id, [c[0] for c in choices])
def test_package_list_only_advertisable_ones(self):
ads_pkgs = baker.make('SponsorshipPackage', advertise=True, _quantity=2)
baker.make('SponsorshipPackage', advertise=False)
form = SponsorshipsBenefitsForm()
field = form.fields.get("package")
self.assertEqual(3, field.queryset.count())
def test_invalidate_form_without_benefits(self):
form = SponsorshipsBenefitsForm(data={})
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
form = SponsorshipsBenefitsForm(
data={"benefits_psf": [self.program_1_benefits[0].id], "package": self.package.id}
)
self.assertTrue(form.is_valid())
def test_validate_form_without_package_but_with_a_la_carte_benefits(self):
benefit = self.a_la_carte[0]
form = SponsorshipsBenefitsForm(
data={"a_la_carte_benefits": [benefit.id]}
)
self.assertTrue(form.is_valid())
self.assertEqual([], form.get_benefits())
self.assertEqual([benefit], form.get_benefits(include_a_la_carte=True))
def test_should_not_validate_form_without_package_with_add_ons_and_a_la_carte_benefits(self):
data = {
"a_la_carte_benefits": [self.a_la_carte[0]],
"add_ons_benefits": [self.add_ons[0]],
}
form = SponsorshipsBenefitsForm(data=data)
self.assertFalse(form.is_valid())
def test_benefits_conflicts_helper_property(self):
benefit_1, benefit_2 = baker.make("sponsors.SponsorshipBenefit", _quantity=2)
benefit_1.conflicts.add(*self.program_1_benefits)
benefit_2.conflicts.add(*self.program_2_benefits)
form = SponsorshipsBenefitsForm()
map = form.benefits_conflicts
# conflicts are symmetrical relationships
self.assertEqual(
2 + len(self.program_1_benefits) + len(self.program_2_benefits), len(map)
)
self.assertEqual(
sorted(map[benefit_1.id]), sorted(b.id for b in self.program_1_benefits)
)
self.assertEqual(
sorted(map[benefit_2.id]), sorted(b.id for b in self.program_2_benefits)
)
for b in self.program_1_benefits:
self.assertEqual(map[b.id], [benefit_1.id])
for b in self.program_2_benefits:
self.assertEqual(map[b.id], [benefit_2.id])
def test_invalid_form_if_any_conflict(self):
benefit_1 = baker.make("sponsors.SponsorshipBenefit", program=self.wk)
benefit_1.conflicts.add(*self.program_1_benefits)
self.package.benefits.add(benefit_1)
data = {"benefits_psf": [b.id for b in self.program_1_benefits], "package": self.package.id}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
data["benefits_working_group"] = [benefit_1.id]
form = SponsorshipsBenefitsForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn(
"The application has 1 or more benefits that conflicts.",
form.errors["__all__"],
)
def test_get_benefits_from_cleaned_data(self):
benefit = self.program_1_benefits[0]
data = {"benefits_psf": [benefit.id],
"add_ons_benefits": [b.id for b in self.add_ons],
"package": self.package.id}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
benefits = form.get_benefits()
self.assertEqual(1, len(benefits))
self.assertIn(benefit, benefits)
benefits = form.get_benefits(include_add_ons=True)
self.assertEqual(3, len(benefits))
self.assertIn(benefit, benefits)
for add_on in self.add_ons:
self.assertIn(add_on, benefits)
def test_package_only_benefit_without_package_should_not_validate(self):
SponsorshipBenefit.objects.all().update(package_only=True)
data = {"benefits_psf": [self.program_1_benefits[0]]}
form = SponsorshipsBenefitsForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn(
"You must pick a package to include the selected benefits.",
form.errors["__all__"],
)
def test_package_only_benefit_with_wrong_package_should_not_validate(self):
SponsorshipBenefit.objects.all().update(package_only=True)
package = baker.make("sponsors.SponsorshipPackage", advertise=True)
package.benefits.add(*SponsorshipBenefit.objects.all())
data = {
"benefits_psf": [self.program_1_benefits[0]],
"package": baker.make("sponsors.SponsorshipPackage", advertise=True).id, # other package
}
form = SponsorshipsBenefitsForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn(
"The application has 1 or more package only benefits but wrong sponsor package.",
form.errors["__all__"][0],
)
data = {
"benefits_psf": [self.program_1_benefits[0]],
"package": package.id,
}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
def test_benefit_with_no_capacity_should_not_validate(self):
SponsorshipBenefit.objects.all().update(capacity=0)
data = {"benefits_psf": [self.program_1_benefits[0]], "package": self.package.id}
form = SponsorshipsBenefitsForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn(
"The application has 1 or more benefits with no capacity.",
form.errors["__all__"],
)
def test_benefit_with_soft_capacity_should_validate(self):
SponsorshipBenefit.objects.all().update(capacity=0, soft_capacity=True)
data = {"benefits_psf": [self.program_1_benefits[0]], "package": self.package.id}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
def test_get_package_return_selected_package(self):
data = {"benefits_psf": [self.program_1_benefits[0]], "package": self.package.id}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
self.assertEqual(self.package, form.get_package())
def test_get_package_get_or_create_a_la_carte_only_package(self):
data = {"a_la_carte_benefits": [self.a_la_carte[0].id]}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
self.assertEqual(1, SponsorshipPackage.objects.count())
# should create package if it doesn't exist yet
package = form.get_package()
self.assertEqual("A La Carte Only", package.name)
self.assertEqual("a-la-carte-only", package.slug)
self.assertEqual(175, package.logo_dimension)
self.assertEqual(0, package.sponsorship_amount)
self.assertFalse(package.advertise)
self.assertEqual(2, SponsorshipPackage.objects.count())
# re-use previously created package for subsequent applications
data = {"a_la_carte_benefits": [self.a_la_carte[0].id]}
form = SponsorshipsBenefitsForm(data=data)
self.assertTrue(form.is_valid())
self.assertEqual(package, form.get_package())
self.assertEqual(2, SponsorshipPackage.objects.count())
class SponsorshipApplicationFormTests(TestCase):
def setUp(self):
self.data = {
"name": "CompanyX",
"primary_phone": "+14141413131",
"mailing_address_line_1": "4th street",
"mailing_address_line_2": "424",
"city": "New York",
"state": "NY",
"postal_code": "10212",
"country": "US",
"contact-0-name": "Bernardo",
"contact-0-email": "[email protected]",
"contact-0-phone": "+1999999999",
"contact-0-primary": True,
"contact-TOTAL_FORMS": 1,
"contact-MAX_NUM_FORMS": 5,
"contact-MIN_NUM_FORMS": 1,
"contact-INITIAL_FORMS": 1,
}
self.files = {
"web_logo": get_static_image_file_as_upload("psf-logo.png", "logo.png")
}
def test_required_fields(self):
required_fields = [
"name",
"web_logo",
"primary_phone",
"mailing_address_line_1",
"city",
"postal_code",
"country",
"__all__",
]
form = SponsorshipApplicationForm(
{
"contact-TOTAL_FORMS": 0,
"contact-MAX_NUM_FORMS": 5,
"contact-MIN_NUM_FORMS": 1,
"contact-INITIAL_FORMS": 1,
}
)
self.assertFalse(form.is_valid())
self.assertEqual(len(required_fields), len(form.errors), msg=form.errors)
for required in required_fields:
self.assertIn(required, form.errors)
def test_create_sponsor_with_valid_data(self):
user = baker.make(settings.AUTH_USER_MODEL)
form = SponsorshipApplicationForm(self.data, self.files, user=user)
self.assertTrue(form.is_valid(), form.errors)
sponsor = form.save()
self.assertTrue(sponsor.pk)
self.assertEqual(sponsor.name, "CompanyX")
self.assertTrue(sponsor.web_logo)
self.assertEqual(sponsor.primary_phone, "+14141413131")
self.assertEqual(sponsor.mailing_address_line_1, "4th street")
self.assertEqual(sponsor.mailing_address_line_2, "424")
self.assertEqual(sponsor.city, "New York")
self.assertEqual(sponsor.state, "NY")
self.assertEqual(sponsor.postal_code, "10212")
self.assertEqual(sponsor.country, "US")
self.assertEqual(sponsor.country.name, "United States of America")
self.assertEqual(sponsor.description, "")
self.assertIsNone(sponsor.print_logo.name)
self.assertEqual(sponsor.landing_page_url, "")
contact = sponsor.contacts.get()
self.assertEqual(contact.name, "Bernardo")
self.assertEqual(contact.email, "[email protected]")
self.assertEqual(contact.phone, "+1999999999")
self.assertIsNone(contact.user)
def test_create_sponsor_with_valid_data_for_non_required_inputs(
self,
):
self.data["description"] = "Important company"
self.data["landing_page_url"] = "https://companyx.com"
self.data["twitter_handle"] = "@companyx"
self.files["print_logo"] = get_static_image_file_as_upload(
"psf-logo_print.png", "logo_print.png"
)
form = SponsorshipApplicationForm(self.data, self.files)
self.assertTrue(form.is_valid(), form.errors)
sponsor = form.save()
self.assertEqual(sponsor.description, "Important company")
self.assertTrue(sponsor.print_logo)
self.assertFalse(form.user_with_previous_sponsors)
self.assertEqual(sponsor.landing_page_url, "https://companyx.com")
self.assertEqual(sponsor.twitter_handle, "@companyx")
def test_use_previous_user_sponsor(self):
contact = baker.make(SponsorContact, user__email="[email protected]")
self.data = {"sponsor": contact.sponsor.id}
form = SponsorshipApplicationForm(self.data, self.files, user=contact.user)
self.assertTrue(form.is_valid(), form.errors)
saved_sponsor = form.save()
self.assertTrue(form.user_with_previous_sponsors)
self.assertEqual(saved_sponsor, contact.sponsor)
self.assertEqual(Sponsor.objects.count(), 1)
self.assertEqual(saved_sponsor.contacts.get(), contact)
def test_invalidate_form_if_user_selects_sponsort_from_other_user(self):
contact = baker.make(SponsorContact, user__email="[email protected]")
self.data = {"sponsor": contact.sponsor.id}
other_user = baker.make(settings.AUTH_USER_MODEL)
form = SponsorshipApplicationForm(self.data, self.files, user=other_user)
self.assertFalse(form.is_valid())
self.assertFalse(form.user_with_previous_sponsors)
self.assertIn("sponsor", form.errors)
self.assertEqual(1, len(form.errors))
def test_invalidate_form_if_sponsor_with_sponsorships(self):
contact = baker.make(SponsorContact, user__email="[email protected]")
self.data = {"sponsor": contact.sponsor.id}
prev_sponsorship = baker.make("sponsors.Sponsorship", sponsor=contact.sponsor)
form = SponsorshipApplicationForm(self.data, self.files, user=contact.user)
self.assertFalse(form.is_valid())
self.assertIn("sponsor", form.errors)
prev_sponsorship.status = prev_sponsorship.FINALIZED
prev_sponsorship.save()
form = SponsorshipApplicationForm(self.data, self.files, user=contact.user)
self.assertTrue(form.is_valid())
def test_create_multiple_contacts_and_user_contact(self):
user_email = "[email protected]"
self.data.update(
{
"contact-1-name": "Secondary",
"contact-1-email": user_email,
"contact-1-phone": "+1123123123",
"contact-TOTAL_FORMS": 2,
}
)
user = baker.make(settings.AUTH_USER_MODEL, email=user_email.upper())
form = SponsorshipApplicationForm(self.data, self.files, user=user)
self.assertTrue(form.is_valid(), form.errors)
sponsor = form.save()
self.assertEqual(2, sponsor.contacts.count())
c1, c2 = sorted(sponsor.contacts.all(), key=lambda c: c.name)
self.assertEqual(c1.name, "Bernardo")
self.assertTrue(c1.primary) # first contact should be the primary one
self.assertIsNone(c1.user)
self.assertEqual(c2.name, "Secondary")
self.assertFalse(c2.primary)
self.assertEqual(c2.user, user)
def test_invalidate_form_if_no_primary_contact(self):
self.data.pop("contact-0-primary")
user = baker.make(settings.AUTH_USER_MODEL)
form = SponsorshipApplicationForm(self.data, self.files, user=user)
self.assertFalse(form.is_valid())
msg = "You have to mark at least one contact as the primary one."
self.assertIn(msg, form.errors["__all__"])
class SponsorContactFormSetTests(TestCase):
def setUp(self):
self.data = {
"contact-TOTAL_FORMS": 0,
"contact-MAX_NUM_FORMS": 5,
"contact-MIN_NUM_FORMS": 1,
"contact-INITIAL_FORMS": 1,
}
def test_contact_formset(self):
sponsor = baker.make(Sponsor)
self.data.update(
{
"contact-0-name": "Bernardo",
"contact-0-email": "[email protected]",
"contact-0-phone": "+1999999999",
"contact-1-name": "Foo",
"contact-1-email": "[email protected]",
"contact-1-phone": "+1111111111",
"contact-TOTAL_FORMS": 2,
}
)
formset = SponsorContactFormSet(self.data, prefix="contact")
self.assertTrue(formset.is_valid())
for form in formset.forms:
contact = form.save(commit=False)
contact.sponsor = sponsor
contact.save()
self.assertEqual(2, SponsorContact.objects.count())
def test_invalidate_formset_if_no_form(self):
self.data["contact-TOTAL_FORMS"] = 0
formset = SponsorContactFormSet(self.data, prefix="contact")
self.assertFalse(formset.is_valid())
class SponsorBenefitAdminInlineFormTests(TestCase):
def setUp(self):
self.benefit = baker.make(SponsorshipBenefit)
self.sponsorship = baker.make(Sponsorship)
self.data = {
"sponsorship_benefit": self.benefit.pk,
"sponsorship": self.sponsorship.pk,
"benefit_internal_value": 200,
}
def test_required_fields_for_new_sponsor_benefit(self):
required_fields = [
"sponsorship",
]
form = SponsorBenefitAdminInlineForm({})
self.assertFalse(form.is_valid())
for required in required_fields:
self.assertIn(required, form.errors)
self.assertEqual(len(required_fields), len(form.errors))
def test_create_new_sponsor_benefit_for_sponsorship(self):
form = SponsorBenefitAdminInlineForm(data=self.data)
self.assertTrue(form.is_valid(), form.errors)
sponsor_benefit = form.save()
sponsor_benefit.refresh_from_db()
self.assertEqual(sponsor_benefit.sponsorship, self.sponsorship)
self.assertEqual(sponsor_benefit.sponsorship_benefit, self.benefit)
self.assertEqual(sponsor_benefit.name, self.benefit.name)
self.assertEqual(sponsor_benefit.description, self.benefit.description)
self.assertEqual(sponsor_benefit.program, self.benefit.program)
self.assertEqual(sponsor_benefit.benefit_internal_value, 200)
def test_update_existing_sponsor_benefit(self):
sponsor_benefit = baker.make(
SponsorBenefit,
sponsorship=self.sponsorship,
sponsorship_benefit=self.benefit,
)
new_benefit = baker.make(SponsorshipBenefit, a_la_carte=True)
self.data["sponsorship_benefit"] = new_benefit.pk
form = SponsorBenefitAdminInlineForm(data=self.data, instance=sponsor_benefit)
self.assertTrue(form.is_valid(), form.errors)
form.save()
sponsor_benefit.refresh_from_db()
self.assertEqual(1, SponsorBenefit.objects.count())
self.assertEqual(sponsor_benefit.sponsorship, self.sponsorship)
self.assertEqual(sponsor_benefit.sponsorship_benefit, new_benefit)
self.assertEqual(sponsor_benefit.name, new_benefit.name)
self.assertEqual(sponsor_benefit.description, new_benefit.description)
self.assertEqual(sponsor_benefit.program, new_benefit.program)
self.assertEqual(sponsor_benefit.benefit_internal_value, 200)
self.assertTrue(sponsor_benefit.added_by_user)
self.assertTrue(sponsor_benefit.a_la_carte)
def test_do_not_update_sponsorship_if_it_doesn_change(self):
sponsor_benefit = baker.make(
SponsorBenefit,
sponsorship=self.sponsorship,
sponsorship_benefit=self.benefit,
)
form = SponsorBenefitAdminInlineForm(data=self.data, instance=sponsor_benefit)
self.assertTrue(form.is_valid(), form.errors)
form.save()
sponsor_benefit.refresh_from_db()
self.benefit.name = "new name"
self.benefit.save()
self.assertEqual(1, SponsorBenefit.objects.count())
self.assertEqual(sponsor_benefit.sponsorship, self.sponsorship)
self.assertEqual(sponsor_benefit.sponsorship_benefit, self.benefit)
self.assertNotEqual(sponsor_benefit.name, "new name")
self.assertEqual(sponsor_benefit.benefit_internal_value, 200)
def test_update_existing_benefit_features(self):
sponsor_benefit = baker.make(
SponsorBenefit,
sponsorship=self.sponsorship,
sponsorship_benefit=self.benefit,
)
# existing benefit depends on logo
baker.make_recipe('sponsors.tests.logo_at_download_feature', sponsor_benefit=sponsor_benefit)
# new benefit requires text instead of logo
new_benefit = baker.make(SponsorshipBenefit)
baker.make(RequiredTextAssetConfiguration, benefit=new_benefit, internal_name='foo',
related_to=AssetsRelatedTo.SPONSORSHIP.value)
self.data["sponsorship_benefit"] = new_benefit.pk
form = SponsorBenefitAdminInlineForm(data=self.data, instance=sponsor_benefit)
self.assertTrue(form.is_valid(), form.errors)
form.save()
sponsor_benefit.refresh_from_db()
self.assertEqual(sponsor_benefit.features.count(), 1)
self.assertIsInstance(sponsor_benefit.features.get(), RequiredTextAsset)
class SponsorshipsFormTestCase(TestCase):
def test_list_all_sponsorships_as_choices_by_default(self):
sponsorships = baker.make(Sponsorship, _quantity=3)
form = SponsorshipsListForm()
qs = form.fields["sponsorships"].queryset
self.assertEqual(3, qs.count())
for sponsorship in sponsorships:
self.assertIn(sponsorship, qs)
def test_init_form_from_sponsorship_benefit(self):
benefit = baker.make(SponsorshipBenefit)
sponsor_benefit = baker.make(SponsorBenefit, sponsorship_benefit=benefit)
other_benefit = baker.make(SponsorshipBenefit)
baker.make(SponsorBenefit, sponsorship_benefit=other_benefit)
form = SponsorshipsListForm.with_benefit(benefit)
with self.assertNumQueries(1):
qs = list(form.fields["sponsorships"].queryset)
self.assertEqual(1, len(qs))
self.assertIn(sponsor_benefit.sponsorship, qs)
self.assertEqual(benefit, form.sponsorship_benefit)
class SponsorContactFormTests(TestCase):
def test_ensure_model_form_configuration(self):
expected_fields = ["name", "email", "phone", "primary", "administrative", "accounting"]
meta = SponsorContactForm._meta
self.assertEqual(set(expected_fields), set(meta.fields))
self.assertEqual(SponsorContact, meta.model)
class SendSponsorshipNotificationFormTests(TestCase):
def setUp(self):
self.notification = baker.make("sponsors.SponsorEmailNotificationTemplate")
self.data = {
"notification": self.notification.pk,
"contact_types": [SponsorContact.MANAGER_CONTACT, SponsorContact.ADMINISTRATIVE_CONTACT],
}
def test_required_fields(self):
required_fields = set(["__all__", "contact_types"])
form = SendSponsorshipNotificationForm({})
self.assertFalse(form.is_valid())
self.assertEqual(required_fields, set(form.errors))
def test_get_contact_types_list(self):
form = SendSponsorshipNotificationForm(self.data)
self.assertTrue(form.is_valid())
self.assertEqual(self.data["contact_types"], form.cleaned_data["contact_types"])
self.assertEqual(self.notification, form.get_notification())
def test_form_error_if_notification_and_email_custom_content(self):
self.data["content"] = "email content"
form = SendSponsorshipNotificationForm(self.data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
def test_form_error_if_not_notification_and_neither_custom_content(self):
self.data.pop("notification")
form = SendSponsorshipNotificationForm(self.data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
def test_validate_form_with_custom_content(self):
self.data.pop("notification")
self.data.update({"content": "content", "subject": "subject"})
form = SendSponsorshipNotificationForm(self.data)
self.assertTrue(form.is_valid())
notification = form.get_notification()
self.assertEqual("content", notification.content)
self.assertEqual("subject", notification.subject)
self.assertIsNone(notification.pk)
class SponsorRequiredAssetsFormTest(TestCase):
def setUp(self):
self.sponsorship = baker.make(Sponsorship, sponsor__name="foo")
self.required_text_cfg = baker.make(
RequiredTextAssetConfiguration,
related_to=AssetsRelatedTo.SPONSORSHIP.value,
internal_name="Text Input",
_fill_optional=True,
)
self.required_img_cfg = baker.make(
RequiredImgAssetConfiguration,
related_to=AssetsRelatedTo.SPONSOR.value,
internal_name="Image Input",
_fill_optional=True,
)
self.benefits = baker.make(
SponsorBenefit, sponsorship=self.sponsorship, _quantity=3
)
def test_build_form_with_no_fields_if_no_required_asset(self):
form = SponsorRequiredAssetsForm(instance=self.sponsorship)
self.assertEqual(len(form.fields), 0)
self.assertFalse(form.has_input)
def test_build_form_fields_from_required_assets(self):
text_asset = self.required_text_cfg.create_benefit_feature(self.benefits[0])
img_asset = self.required_img_cfg.create_benefit_feature(self.benefits[1])
form = SponsorRequiredAssetsForm(instance=self.sponsorship)
fields = dict(form.fields)
self.assertEqual(len(fields), 2)
self.assertEqual(type(text_asset.as_form_field()), type(fields["text_input"]))
self.assertEqual(type(img_asset.as_form_field()), type(fields["image_input"]))
self.assertTrue(form.has_input)
def test_build_form_fields_from_specific_list_of_required_assets(self):
text_asset = self.required_text_cfg.create_benefit_feature(self.benefits[0])
img_asset = self.required_img_cfg.create_benefit_feature(self.benefits[1])
form = SponsorRequiredAssetsForm(instance=self.sponsorship, required_assets_ids=[text_asset.pk])
fields = dict(form.fields)
self.assertEqual(len(fields), 1)
self.assertEqual(type(text_asset.as_form_field()), type(fields["text_input"]))
def test_save_info_for_text_asset(self):
text_asset = self.required_text_cfg.create_benefit_feature(self.benefits[0])
data = {"text_input": "submitted data"}
form = SponsorRequiredAssetsForm(instance=self.sponsorship, data=data)
self.assertTrue(form.is_valid())
form.update_assets()
self.assertEqual("submitted data", text_asset.value)
def test_save_info_for_image_asset(self):
img_asset = self.required_img_cfg.create_benefit_feature(self.benefits[0])
files = {"image_input": get_static_image_file_as_upload("psf-logo.png", "logo.png")}
form = SponsorRequiredAssetsForm(instance=self.sponsorship, data={}, files=files)
self.assertTrue(form.is_valid())
form.update_assets()
asset = ImgAsset.objects.get()
expected_url = f"/media/sponsors-app-assets/{asset.uuid}.png"
self.assertEqual(expected_url, img_asset.value.url)
def test_load_initial_from_assets_and_force_field_if_previous_Data(self):
img_asset = self.required_img_cfg.create_benefit_feature(self.benefits[0])
text_asset = self.required_text_cfg.create_benefit_feature(self.benefits[0])
files = {"image_input": get_static_image_file_as_upload("psf-logo.png", "logo.png")}
form = SponsorRequiredAssetsForm(instance=self.sponsorship, data={"text_input": "data"}, files=files)
self.assertTrue(form.is_valid())
form.update_assets()
form = SponsorRequiredAssetsForm(instance=self.sponsorship, data={}, files=files)
self.assertTrue(form.fields["image_input"].initial)
self.assertTrue(form.fields["text_input"].initial)
self.assertTrue(form.fields["text_input"].required)
self.assertTrue(form.fields["image_input"].required)
def test_raise_error_if_form_initialized_without_instance(self):
self.assertRaises(TypeError, SponsorRequiredAssetsForm)
class SponsorshipBenefitAdminFormTests(TestCase):
def setUp(self):
self.program = baker.make("sponsors.SponsorshipProgram")
def test_required_fields(self):
required = {"name", "program"}
form = SponsorshipBenefitAdminForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(set(form.errors), required)
def test_a_la_carte_benefit_cannot_have_package(self):
data = {"name": "benefit", "program": self.program.pk, "a_la_carte": True}
form = SponsorshipBenefitAdminForm(data=data)
self.assertTrue(form.is_valid())
package = baker.make("sponsors.SponsorshipPackage")
data["packages"] = [package.pk]
form = SponsorshipBenefitAdminForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn("__all__", form.errors)
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, absolute_import, unicode_literals
from functools import wraps
import contextlib
import copy
import json
import logging
import os
import os.path
import re
import shutil
import string
import subprocess
import sys
import tempfile
import tarfile
import time
import requests
from collections import namedtuple
from datetime import datetime
from io import BytesIO
from hashlib import sha256
from osbs.repo_utils import RepoConfiguration, RepoInfo, AdditionalTagsConfig
from osbs.constants import (OS_CONFLICT_MAX_RETRIES, OS_CONFLICT_WAIT,
GIT_MAX_RETRIES, GIT_BACKOFF_FACTOR, GIT_FETCH_RETRY,
OS_NOT_FOUND_MAX_RETRIES, OS_NOT_FOUND_MAX_WAIT,
USER_WARNING_LEVEL, USER_WARNING_LEVEL_NAME)
# This was moved to a separate file - import here for external API compatibility
from osbs.utils.labels import Labels # noqa: F401
from six.moves import http_client
from six.moves.urllib.parse import urlparse
try:
# py3
if not hasattr(datetime.now(), 'timestamp'):
raise ImportError
import dateutil.parser
except ImportError:
# py2 workaround in get_time_from_rfc3339() below
from time import strptime
from calendar import timegm
from dockerfile_parse import DockerfileParser
from osbs.exceptions import (OsbsException, OsbsResponseException,
OsbsValidationException, OsbsCommitNotFound)
logger = logging.getLogger(__name__)
ClonedRepoData = namedtuple('ClonedRepoData', ['repo_path', 'commit_id', 'commit_depth'])
class RegistryURI(object):
# Group 0: URI without path -- allowing empty value -- including:
# - Group 1: optional 'http://' / 'https://'
# - Group 2: hostname and port
# Group 3: path, including:
# - Group 4: optional API version, 'v' followed by a number
versionre = re.compile(r'((https?://)?([^/]*))(/(v\d+)?)?$')
def __init__(self, uri):
match = self.versionre.match(uri)
if not match:
raise ValueError('Invalid registry URI {}'.format(uri))
groups = match.groups()
self.docker_uri = groups[2]
self.version = groups[4] or 'v2'
self.scheme = groups[1] or ''
if self.version == 'v1':
raise OsbsValidationException('Invalid API version requested in {}'.format(uri))
@property
def uri(self):
return self.scheme + self.docker_uri
def __repr__(self):
return self.uri
class TarWriter(object):
def __init__(self, outfile, directory=None):
mode = "w|bz2"
if hasattr(outfile, "write"):
self.tarfile = tarfile.open(fileobj=outfile, mode=mode)
else:
self.tarfile = tarfile.open(name=outfile, mode=mode)
self.directory = directory or ""
def __enter__(self):
return self
def __exit__(self, typ, val, tb):
self.tarfile.close()
def write_file(self, name, content):
buf = BytesIO(content)
arcname = os.path.join(self.directory, name)
ti = tarfile.TarInfo(arcname)
ti.size = len(content)
self.tarfile.addfile(ti, fileobj=buf)
class TarReader(object):
TarFile = namedtuple('TarFile', ['filename', 'fileobj'])
def __init__(self, infile):
mode = "r|bz2"
if hasattr(infile, "read"):
self.tarfile = tarfile.open(fileobj=infile, mode=mode)
else:
self.tarfile = tarfile.open(name=infile, mode=mode)
def __iter__(self):
return self
def __next__(self):
ti = self.tarfile.next() # pylint: disable=next-method-called
if ti is None:
self.close()
raise StopIteration()
return self.TarFile(ti.name, self.tarfile.extractfile(ti))
next = __next__ # py2 compatibility
def close(self):
self.tarfile.close()
def graceful_chain_get(d, *args):
if not d:
return None
t = copy.deepcopy(d)
for arg in args:
try:
t = t[arg]
except (IndexError, KeyError):
return None
return t
def graceful_chain_del(d, *args):
if not d:
return
for arg in args[:-1]:
try:
d = d[arg]
except (IndexError, KeyError):
return
try:
del d[args[-1]]
except (IndexError, KeyError):
pass
def has_triggers(build_config):
return graceful_chain_get(build_config, 'spec', 'triggers') is not None
def clean_triggers(orig, new):
if not has_triggers(new) and has_triggers(orig):
orig['spec']['triggers'] = [t for t in orig['spec']['triggers']
if t.get('type', None) != 'ImageChange']
def buildconfig_update(orig, new, remove_nonexistent_keys=False):
"""Performs update of given `orig` BuildConfig with values from `new` BuildConfig.
Both BuildConfigs have to be represented as `dict`s.
This function:
- adds all key/value pairs to `orig` from `new` that are missing
- replaces values in `orig` for keys that are in both
- removes key/value pairs from `orig` for keys that are not in `new`,
but only in dicts nested inside `strategy` key
(see https://github.com/containerbuildsystem/osbs-client/pull/273#issuecomment-148038314)
"""
if isinstance(orig, dict) and isinstance(new, dict):
clean_triggers(orig, new)
if remove_nonexistent_keys:
missing = set(orig.keys()) - set(new.keys())
for k in missing:
orig.pop(k)
for k, v in new.items():
if k == 'strategy':
remove_nonexistent_keys = True
if isinstance(orig.get(k), dict) and isinstance(v, dict):
buildconfig_update(orig[k], v, remove_nonexistent_keys)
else:
orig[k] = v
@contextlib.contextmanager
def checkout_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES,
branch=None, depth=None):
"""
clone provided git repo to target_dir, optionally checkout provided commit
yield the ClonedRepoData and delete the repo when finished
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD
"""
tmpdir = tempfile.mkdtemp()
target_dir = target_dir or os.path.join(tmpdir, "repo")
try:
yield clone_git_repo(git_url, target_dir, commit, retry_times, branch, depth)
finally:
shutil.rmtree(tmpdir)
def clone_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES, branch=None,
depth=None):
"""
clone provided git repo to target_dir, optionally checkout provided commit
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD
"""
retry_delay = GIT_BACKOFF_FACTOR
target_dir = target_dir or os.path.join(tempfile.mkdtemp(), "repo")
commit = commit or "master"
logger.info("cloning git repo '%s'", git_url)
logger.debug("url = '%s', dir = '%s', commit = '%s'",
git_url, target_dir, commit)
cmd = ["git", "clone"]
if branch:
cmd += ["-b", branch, "--single-branch"]
if depth:
cmd += ["--depth", str(depth)]
elif depth:
logger.warning("branch not provided for %s, depth setting ignored", git_url)
depth = None
cmd += [git_url, target_dir]
logger.debug("cloning '%s'", cmd)
repo_commit = ''
repo_depth = None
for counter in range(retry_times + 1):
try:
# we are using check_output, even though we aren't using
# the return value, but we will get 'output' in exception
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
try:
repo_commit, repo_depth = reset_git_repo(target_dir, commit, depth)
except OsbsCommitNotFound as exc:
raise OsbsCommitNotFound("Commit {} is not reachable in branch {}, reason: {}"
.format(commit, branch, exc))
break
except subprocess.CalledProcessError as exc:
if counter != retry_times:
logger.info("retrying command '%s':\n '%s'", cmd, exc.output)
time.sleep(retry_delay * (2 ** counter))
else:
raise OsbsException("Unable to clone git repo '%s' "
"branch '%s'" % (git_url, branch),
cause=exc, traceback=sys.exc_info()[2])
return ClonedRepoData(target_dir, repo_commit, repo_depth)
def reset_git_repo(target_dir, git_reference, retry_depth=None):
"""
hard reset git clone in target_dir to given git_reference
:param target_dir: str, filesystem path where the repo is cloned
:param git_reference: str, any valid git reference
:param retry_depth: int, if the repo was cloned with --shallow, this is the expected
depth of the commit
:return: str and int, commit ID of HEAD and commit depth of git_reference
"""
deepen = retry_depth or 0
base_commit_depth = 0
for _ in range(GIT_FETCH_RETRY):
try:
if not deepen:
cmd = ['git', 'rev-list', '--count', git_reference]
base_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - 1
cmd = ["git", "reset", "--hard", git_reference]
logger.debug("Resetting current HEAD: '%s'", cmd)
subprocess.check_call(cmd, cwd=target_dir)
break
except subprocess.CalledProcessError:
if not deepen:
raise OsbsCommitNotFound('cannot find commit {} in repo {}'.format(
git_reference, target_dir))
deepen *= 2
cmd = ["git", "fetch", "--depth", str(deepen)]
subprocess.check_call(cmd, cwd=target_dir)
logger.debug("Couldn't find commit %s, increasing depth with '%s'", git_reference,
cmd)
else:
raise OsbsCommitNotFound('cannot find commit {} in repo {}'.format(
git_reference, target_dir))
cmd = ["git", "rev-parse", "HEAD"]
logger.debug("getting SHA-1 of provided ref '%s'", git_reference)
commit_id = subprocess.check_output(cmd, cwd=target_dir, universal_newlines=True)
commit_id = commit_id.strip()
logger.info("commit ID = %s", commit_id)
final_commit_depth = None
if not deepen:
cmd = ['git', 'rev-list', '--count', 'HEAD']
final_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - base_commit_depth
return commit_id, final_commit_depth
@contextlib.contextmanager
def paused_builds(osbs, quota_name=None, ignore_quota_errors=False):
try:
logger.info("pausing builds")
try:
osbs.pause_builds(quota_name=quota_name)
except OsbsResponseException as e:
if ignore_quota_errors and (e.status_code == requests.codes.FORBIDDEN):
logger.warning("Ignoring resourcequota error")
else:
raise
yield osbs
finally:
logger.info("resuming builds")
try:
osbs.resume_builds(quota_name=quota_name)
except OsbsResponseException as e:
if ignore_quota_errors and (e.status_code == requests.codes.FORBIDDEN):
logger.warning("Ignoring resourcequota error")
else:
raise
def looks_like_git_hash(git_ref):
return all(ch in string.hexdigits for ch in git_ref) and len(git_ref) == 40
def get_repo_info(git_uri, git_ref, git_branch=None, depth=None):
with checkout_git_repo(git_uri, commit=git_ref, branch=git_branch,
depth=depth) as code_dir_info:
code_dir = code_dir_info.repo_path
depth = code_dir_info.commit_depth
dfp = DockerfileParser(os.path.join(code_dir), cache_content=True)
config = RepoConfiguration(git_uri=git_uri, git_ref=git_ref, git_branch=git_branch,
dir_path=code_dir, depth=depth)
tags_config = AdditionalTagsConfig(dir_path=code_dir,
tags=config.container.get('tags', set()))
repo_info = RepoInfo(dfp, config, tags_config)
return repo_info
def git_repo_humanish_part_from_uri(git_uri):
git_uri = git_uri.rstrip('/')
if git_uri.endswith("/.git"):
git_uri = git_uri[:-5]
elif git_uri.endswith(".git"):
git_uri = git_uri[:-4]
return os.path.basename(git_uri)
def get_time_from_rfc3339(rfc3339):
"""
return time tuple from an RFC 3339-formatted time string
:param rfc3339: str, time in RFC 3339 format
:return: float, seconds since the Epoch
"""
try:
# py 3
dt = dateutil.parser.parse(rfc3339, ignoretz=False)
return dt.timestamp()
except NameError:
# py 2
# Decode the RFC 3339 date with no fractional seconds (the
# format Origin provides). Note that this will fail to parse
# valid ISO8601 timestamps not in this exact format.
time_tuple = strptime(rfc3339, '%Y-%m-%dT%H:%M:%SZ')
return timegm(time_tuple)
def utcnow():
"""
Return current time in UTC.
This function is created to make mocking in unit tests easier.
"""
return datetime.utcnow()
VALID_BUILD_CONFIG_NAME_CHARS = re.compile('[-a-z0-9]')
VALID_LABEL_CHARS = re.compile(r'[-a-z0-9\.]')
LABEL_MAX_CHARS = 63
def sanitize_strings_for_openshift(str1, str2='', limit=LABEL_MAX_CHARS, separator='-',
label=True):
"""
OpenShift requires labels to be no more than 64 characters and forbids any characters other
than alphanumerics, ., and -. BuildConfig names are similar, but cannot contain /.
Sanitize and concatanate one or two strings to meet OpenShift's requirements. include an
equal number of characters from both strings if the combined length is more than the limit.
"""
filter_chars = VALID_LABEL_CHARS if label else VALID_BUILD_CONFIG_NAME_CHARS
str1_san = ''.join(filter(filter_chars.match, list(str1)))
str2_san = ''.join(filter(filter_chars.match, list(str2)))
str1_chars = []
str2_chars = []
groups = ((str1_san, str1_chars), (str2_san, str2_chars))
size = len(separator)
limit = min(limit, LABEL_MAX_CHARS)
for i in range(max(len(str1_san), len(str2_san))):
for group, group_chars in groups:
if i < len(group):
group_chars.append(group[i])
size += 1
if size >= limit:
break
else:
continue
break
final_str1 = ''.join(str1_chars).strip(separator)
final_str2 = ''.join(str2_chars).strip(separator)
return separator.join(filter(None, (final_str1, final_str2)))
def make_name_from_git(repo, branch, limit=53, separator='-', hash_size=5):
"""
return name string representing the given git repo and branch
to be used as a build name.
NOTE: Build name will be used to generate pods which have a
limit of 64 characters and is composed as:
<buildname>-<buildnumber>-<podsuffix>
rhel7-1-build
Assuming '-XXXX' (5 chars) and '-build' (6 chars) as default
suffixes, name should be limited to 53 chars (64 - 11).
OpenShift is very peculiar in which BuildConfig names it
allows. For this reason, only certain characters are allowed.
Any disallowed characters will be removed from repo and
branch names.
:param repo: str, the git repository to be used
:param branch: str, the git branch to be used
:param limit: int, max name length
:param separator: str, used to separate the repo and branch in name
:return: str, name representing git repo and branch.
"""
branch = branch or 'unknown'
full = urlparse(repo).path.lstrip('/') + branch
repo = git_repo_humanish_part_from_uri(repo)
shaval = sha256(full.encode('utf-8')).hexdigest()
hash_str = shaval[:hash_size]
limit = limit - len(hash_str) - 1
sanitized = sanitize_strings_for_openshift(repo, branch, limit, separator, False)
return separator.join(filter(None, (sanitized, hash_str)))
def wrap_name_from_git(prefix, suffix, *args, **kwargs):
"""
wraps the result of make_name_from_git in a suffix and postfix
adding separators for each.
see docstring for make_name_from_git for a full list of parameters
"""
# 64 is maximum length allowed by OpenShift
# 2 is the number of dashes that will be added
prefix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(prefix)))
suffix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(suffix)))
kwargs['limit'] = kwargs.get('limit', 64) - len(prefix) - len(suffix) - 2
name_from_git = make_name_from_git(*args, **kwargs)
return '-'.join([prefix, name_from_git, suffix])
def get_instance_token_file_name(instance):
"""Return the token file name for the given instance."""
return '{}/.osbs/{}.token'.format(os.path.expanduser('~'), instance)
def sanitize_version(version):
"""
Take parse_version() output and standardize output from older
setuptools' parse_version() to match current setuptools.
"""
if hasattr(version, 'base_version'):
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
cleaned_version = '{}.{}.{}'.format(major, minor, micro)
return cleaned_version
def retry_on_conflict(func):
@wraps(func)
def retry(*args, **kwargs):
# Only retry when OsbsResponseException was raised due to a conflict
def should_retry_cb(ex):
return ex.status_code == http_client.CONFLICT
retry_func = RetryFunc(OsbsResponseException, should_retry_cb=should_retry_cb)
return retry_func.go(func, *args, **kwargs)
return retry
def retry_on_not_found(func):
@wraps(func)
def retry(*args, **kwargs):
# Only retry when OsbsResponseException was raised due to not found
def should_retry_cb(ex):
return ex.status_code == http_client.NOT_FOUND
retry_func = RetryFunc(OsbsResponseException, should_retry_cb=should_retry_cb,
retry_times=OS_NOT_FOUND_MAX_RETRIES,
retry_delay=OS_NOT_FOUND_MAX_WAIT)
return retry_func.go(func, *args, **kwargs)
return retry
def retry_on_gateway_timeout(func):
@wraps(func)
def retry(*args, **kwargs):
# Only retry when OsbsResponseException was raised due to gateway error
def should_retry_cb(ex):
return ex.status_code == http_client.GATEWAY_TIMEOUT
retry_func = RetryFunc(OsbsResponseException, should_retry_cb=should_retry_cb,
retry_times=OS_NOT_FOUND_MAX_RETRIES,
retry_delay=OS_NOT_FOUND_MAX_WAIT)
return retry_func.go(func, *args, **kwargs)
return retry
def retry_on_exception(exception_type):
def do_retry_on_exception(func):
@wraps(func)
def retry(*args, **kwargs):
return RetryFunc(exception_type).go(func, *args, **kwargs)
return retry
return do_retry_on_exception
def user_warning_log_handler(self, message):
"""
Take arguments to transform them into JSON data
and send them into the logger with USER_WARNING level
"""
assert isinstance(message, str)
content = {
'message': message,
}
msg = json.dumps(content)
self._log(USER_WARNING_LEVEL, msg, None)
class RetryFunc(object):
def __init__(self, exception_type, should_retry_cb=None,
retry_times=OS_CONFLICT_MAX_RETRIES, retry_delay=OS_CONFLICT_WAIT):
self.exception_type = exception_type
self.should_retry_cb = should_retry_cb or (lambda ex: True)
self.retry_times = retry_times
self.retry_delay = retry_delay
def go(self, func, *args, **kwargs):
for counter in range(self.retry_times + 1):
try:
return func(*args, **kwargs)
except self.exception_type as ex:
if self.should_retry_cb(ex) and counter != self.retry_times:
logger.info("retrying on exception: %s", ex.message)
logger.debug("attempt %d to call %s", counter + 1, func.__name__)
time.sleep(self.retry_delay * (2 ** counter))
else:
raise
class ImageName(object):
def __init__(self, registry=None, namespace=None, repo=None, tag=None):
self.registry = registry
self.namespace = namespace
self.repo = repo
self.tag = tag or 'latest'
@classmethod
def parse(cls, image_name):
result = cls()
if isinstance(image_name, cls):
logger.debug("Attempting to parse ImageName %s as an ImageName", image_name)
return image_name
# registry.org/namespace/repo:tag
s = image_name.split('/', 2)
if len(s) == 2:
if '.' in s[0] or ':' in s[0]:
result.registry = s[0]
else:
result.namespace = s[0]
elif len(s) == 3:
result.registry = s[0]
result.namespace = s[1]
result.repo = s[-1]
for sep in '@:':
try:
result.repo, result.tag = result.repo.rsplit(sep, 1)
except ValueError:
continue
break
return result
def to_str(self, registry=True, tag=True, explicit_tag=False,
explicit_namespace=False):
if self.repo is None:
raise RuntimeError('No image repository specified')
result = self.get_repo(explicit_namespace)
if tag and self.tag and ':' in self.tag:
result = '{0}@{1}'.format(result, self.tag)
elif tag and self.tag:
result = '{0}:{1}'.format(result, self.tag)
elif tag and explicit_tag:
result = '{0}:{1}'.format(result, 'latest')
if registry and self.registry:
result = '{0}/{1}'.format(self.registry, result)
return result
def get_repo(self, explicit_namespace=False):
result = self.repo
if self.namespace:
result = '{0}/{1}'.format(self.namespace, result)
elif explicit_namespace:
result = '{0}/{1}'.format('library', result)
return result
def enclose(self, organization):
if self.namespace == organization:
return
repo_parts = [self.repo]
if self.namespace:
repo_parts.insert(0, self.namespace)
self.namespace = organization
self.repo = '-'.join(repo_parts)
def __str__(self):
return self.to_str(registry=True, tag=True)
def __repr__(self):
return (
"ImageName(registry={s.registry!r}, namespace={s.namespace!r},"
" repo={s.repo!r}, tag={s.tag!r})"
).format(s=self)
def __eq__(self, other):
return (type(self) == type(other) and # pylint: disable=unidiomatic-typecheck
self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.to_str())
def copy(self):
return ImageName(
registry=self.registry,
namespace=self.namespace,
repo=self.repo,
tag=self.tag)
class UserWarningsStore(object):
def __init__(self):
# (asctime (platform:arch)? - name) - levelname - message
self.regex = r' - '.join((r'^.+', USER_WARNING_LEVEL_NAME, r'(\{.*\})$'))
self._user_warnings = set()
def is_user_warning(self, line):
return re.match(self.regex, line)
def store(self, line):
"""
Extract data from given log record with USER_WARNING level
and store an understandable message in set
"""
data_search = re.search(self.regex, line)
if not data_search:
message = 'Incorrect given logline for storing user warnings: %s'
logger.error(message, line)
return
try:
data = json.loads(data_search.group(1))
except ValueError:
message = 'Incorrect JSON data input for a user warning: %s'
logger.error(message, data_search.group(1))
return
message = data['message']
self._user_warnings.add(message)
def __iter__(self):
for user_warning in self._user_warnings:
yield user_warning
def __str__(self):
return '\n'.join(self._user_warnings)
def __len__(self):
return len(self._user_warnings)
def __bool__(self):
return bool(self._user_warnings)
def stringify_values(d):
"""All non-string values in dictionary will be json serialized.
Example of usage is for openshift annotations which must be strings only.
:param dict d: dict with values of various types
:return: new dict with values converted to string
"""
assert isinstance(d, dict)
return {
k: val if isinstance(val, str) else json.dumps(val)
for k, val in d.items()
}
|
|
import math
import os.path
from PyQt4 import Qt
from lttngc import utils, model
from lttngc.power2_spinbox import QLttngcPowerTwoSpinBox
class QLttngcAddChannelDialog(utils.QCommonDialog, utils.QtUiLoad,
utils.AcceptDialog):
_UI_NAME = 'add-channel'
_ACCEPT_ICON_NAME = 'add'
def __init__(self, initial_domain, suggested_name):
super().__init__()
self._setup_ui()
self._setup_signals()
self._set_initial_config(initial_domain, suggested_name)
def _set_initial_config(self, domain, name):
default_channel_attr = model.ChannelAttributes.create_default(domain)
self.set_domain_channel_attributes(domain, default_channel_attr)
self._name_edit.setText(name)
self._set_default_focus()
def _setup_ui(self):
self._load_ui()
self._setup_spinbox()
self._setup_icons()
self._advanced_options_box.hide()
self.layout().setSizeConstraint(Qt.QLayout.SetFixedSize);
def _setup_spinbox(self):
self._subbuf_count_spinbox = QLttngcPowerTwoSpinBox()
self._subbuf_count_spinbox.setMinimum(1)
self._subbuf_count_spinbox.setMaximum(1024 * 1024)
self._subbuf_count_spinbox.setToolTip('Number of sub-buffers')
self._subbuf_layout.insertWidget(0, self._subbuf_count_spinbox)
def _set_default_focus(self):
self._name_edit.setFocus()
def _setup_icons(self):
self._init_icons()
utils.set_widget_icons([
(self._kernel_radio, 'tux'),
(self._user_radio, 'user'),
])
def _set_kernel_visibility(self):
self._kernel_radio.setChecked(True)
self._mmap_radio.setVisible(True)
self._splice_radio.setVisible(True)
self._global_radio.setVisible(True)
self._per_uid_radio.setVisible(False)
self._per_pid_radio.setVisible(False)
def _set_kernel_default_config(self):
domain = model.Domain.KERNEL
default_channel_attr = model.ChannelAttributes.create_default(domain)
self.set_domain_channel_attributes(domain, default_channel_attr)
self._set_default_focus()
def _set_user_visibility(self):
self._user_radio.setChecked(True)
self._mmap_radio.setVisible(True)
self._splice_radio.setVisible(False)
self._global_radio.setVisible(False)
self._per_uid_radio.setVisible(True)
self._per_pid_radio.setVisible(True)
def _set_user_default_config(self):
domain = model.Domain.USER
default_channel_attr = model.ChannelAttributes.create_default(domain)
self.set_domain_channel_attributes(domain, default_channel_attr)
self._set_default_focus()
def _setup_signals(self):
self._kernel_radio.clicked.connect(self._set_kernel_default_config)
self._user_radio.clicked.connect(self._set_user_default_config)
self._tracefile_ucount_usize_radio.clicked.connect(self._tracefile_ucount_usize_clicked)
self._tracefile_ucount_msize_radio.clicked.connect(self._tracefile_ucount_msize_clicked)
self._tracefile_mcount_msize_radio.clicked.connect(self._tracefile_mcount_msize_clicked)
self._read_timer_stopped_check.clicked.connect(self._read_timer_stopped_check_clicked)
self._switch_timer_stopped_check.clicked.connect(self._switch_timer_stopped_check_clicked)
self._show_advanced_options_btn.clicked.connect(self._show_advanced_options)
def _show_advanced_options(self):
self._show_advanced_options_btn.hide()
self._advanced_options_box.show()
def _tracefile_ucount_usize_clicked(self):
self._tracefile_count_edit.setEnabled(False)
self._tracefile_size_edit.setEnabled(False)
self._tracefile_b_lbl.setEnabled(False)
def _tracefile_ucount_msize_clicked(self):
self._tracefile_count_edit.setEnabled(False)
self._tracefile_size_edit.setEnabled(True)
self._tracefile_b_lbl.setEnabled(True)
def _tracefile_mcount_msize_clicked(self):
self._tracefile_count_edit.setEnabled(True)
self._tracefile_size_edit.setEnabled(True)
self._tracefile_b_lbl.setEnabled(True)
def _read_timer_stopped_check_clicked(self, checked):
self._read_timer_period_edit.setEnabled(not checked)
self._read_timer_us_lbl.setEnabled(not checked)
def _switch_timer_stopped_check_clicked(self, checked):
self._switch_timer_period_edit.setEnabled(not checked)
self._switch_timer_us_lbl.setEnabled(not checked)
@property
def domain(self):
if self._kernel_radio.isChecked():
return model.Domain.KERNEL
else:
return model.Domain.USER
@property
def output_type(self):
if self._mmap_radio.isChecked():
return model.ChannelOutputType.MMAP
else:
return model.ChannelOutputType.SPLICE
@property
def mode(self):
if self._discard_radio.isChecked():
return model.ChannelMode.DISCARD
else:
return model.ChannelMode.OVERWRITE
@property
def buffer_scheme(self):
if self._global_radio.isChecked():
return model.ChannelBufferScheme.GLOBAL
elif self._per_uid_radio.isChecked():
return model.ChannelBufferScheme.PER_UID
else:
return model.ChannelBufferScheme.PER_PID
@property
def channel_attributes(self):
attr = model.ChannelAttributes()
attr.mode = self.mode
attr.subbuf_size = self._subbuf_size_edit.text()
attr.subbuf_count = self._subbuf_count_spinbox.value()
attr.output_type = self.output_type
attr.buffer_scheme = self.buffer_scheme
if self._tracefile_ucount_msize_radio.isChecked():
attr.tracefile_size = self._tracefile_size_edit.text()
elif self._tracefile_mcount_msize_radio.isChecked():
attr.tracefile_count = self._tracefile_count_edit.value()
attr.tracefile_size = self._tracefile_size_edit.text()
if not self._read_timer_stopped_check.isChecked():
sec = utils.usec_to_sec(int(self._read_timer_period_edit.text()))
attr.read_timer_interval = sec
if not self._switch_timer_stopped_check.isChecked():
sec = utils.usec_to_sec(int(self._switch_timer_period_edit.text()))
attr.switch_timer_interval = sec
return attr
def set_domain_channel_attributes(self, domain, attr):
# domain
if domain == model.Domain.KERNEL:
self._set_kernel_visibility()
else:
self._set_user_visibility()
# mode
if attr.mode == model.ChannelMode.DISCARD:
self._discard_radio.click()
else:
self._overwrite_radio.click()
# sub-buffer count and size
self._subbuf_count_spinbox.setValue(attr.subbuf_count)
b, prefix = utils.bytes_to_human_prefix(attr.subbuf_size)
if b == math.floor(b):
txt = '{}{}'.format(math.floor(b), prefix)
else:
txt = str(attr.subbuf_size)
self._subbuf_size_edit.setText(txt)
# output type
if attr.output_type == model.ChannelOutputType.MMAP:
self._mmap_radio.setChecked(True)
else:
self._splice_radio.setChecked(True)
# buffer scheme
if attr.buffer_scheme == model.ChannelBufferScheme.GLOBAL:
self._global_radio.setChecked(True)
elif attr.buffer_scheme == model.ChannelBufferScheme.PER_UID:
self._per_uid_radio.setChecked(True)
else:
self._per_pid_radio.setChecked(True)
# read timer interval
if attr.read_timer_interval is not None:
usec = utils.sec_to_usec(attr.read_timer_interval)
self._read_timer_period_edit.setText(str(usec))
self._read_timer_stopped_check.setChecked(False)
self._read_timer_period_edit.setEnabled(True)
else:
self._read_timer_period_edit.setText('')
self._read_timer_stopped_check.setChecked(True)
self._read_timer_period_edit.setEnabled(False)
# switch timer interval
if attr.switch_timer_interval is not None:
usec = utils.sec_to_usec(attr.switch_timer_interval)
self._switch_timer_period_edit.setText(str(usec))
self._switch_timer_stopped_check.setChecked(False)
self._switch_timer_period_edit.setEnabled(True)
else:
self._switch_timer_period_edit.setText('')
self._switch_timer_stopped_check.setChecked(True)
self._switch_timer_period_edit.setEnabled(False)
# trace files
if attr.tracefile_size is not None:
self._tracefile_ucount_msize_radio.click()
self._tracefile_size_edit.setText(str(attr.tracefile_size))
if attr.tracefile_count is not None:
self._tracefile_mcount_msize_radio.click()
self._tracefile_count_edit.setText(str(attr.tracefile_count))
else:
self._tracefile_ucount_usize_radio.click()
|
|
# L-System scene generator
# Cmpt 461 Final Project
# Allen Pike, [email protected]
# This program takes an L-System definition, and outputs a .pbrt scene file of that model.
# The .pbrt scene file can be included into a master scene file and rendered with the
# raytracer pbrt.
## Todo:
# Implement command-line arguments.
# Make stochastic tree.
# Make field of stochastic trees.
# Default values.
import random
import sys
n = 4
delta = 22
axiom = 'F'
productions = {}
cylRadius = 0.1 # Initial radius of segments.
leafRadius = 0.05 # Radius of leaf segments.
shrinkFactor = 1.4 # Shrinking effect of branching.
print 'Texture "b" "color" "imagemap" "string filename" "bark.exr"'
print 'Texture "l" "color" "imagemap" "string filename" "leaf.exr"'
bark = 'Material "matte" "texture Kd" "b"'
leaf = 'Material "matte" "texture Kd" "l"'
# L-System definitions.
# Comment out one of these to have it generate that L-System.
#p10-a: Koch curve
n = 4
delta = 90.0
axiom = "F-F-F-F"
productions = {'F': 'FF-F-F-F-F-F+F'}
#p10-4: Koch curve
#n = 4
#delta = 90.0
#axiom = "F-F-F-F"
#productions = {'F': 'F-F+F-F-F'}
#p12-a: Hexagonal Gosper Curve
#n = 4
#delta = 60.0
#axiom = "F"
#productions = {'F': 'F+f++f-F--FF-f+', 'f' : '-F+ff++f+F--F-f'}
#p12-a: Sierpinski gasket
#n = 6
#delta = 60.0
#axiom = "f"
#productions = {'F': 'f+F+f', 'f' : 'F-f-F'}
#p20: 3D Hilbert curve
#n = 3
#delta = 90.0
#axiom = "A"
#productions = {'A': 'B-F+CFC+F-D&F^D-F+&&CFC+F+B//',
# 'B': 'A&F^CFB^F^D^^-F-D^|F^B|FC^F^A//',
# 'C': '|D^|F^B-F+C^F^A&&FA&F^C+F+B^F^D//',
# 'D': '|CFB-F+B|FA&F^A&&FB-F+B|FC//'}
#p25-a
#n = 5
#delta = 25.7
#axiom = "F"
#productions = {'F': 'F[+F]F[-F]F'}
#p25-c
#n = 4
#delta = 22.5
#axiom = "F"
#productions = {'F': 'FF-[-F+F+F]+[+F-F-F]'}
#p25-d
#n = 7
#delta = 20.0
#axiom = "X"
#productions = {'X': 'F[+X]F[-X]+X', 'F': 'FF'}
#p27 - flower
#n = 5
#delta = 18
#bark = leaf
#leafRadius = 0.15 # Radius of leaf segments.
#flowerColors = ['[4.0 0.1 2.0]', '[0.1 2.0 4.0]', '[4.0 1.0 0.1]', '[4.0 5.0 2.0]', '[1.0 1.0 3.0]']
#flower = 'Material "matte" "color Kd" ' + flowerColors[random.randint(0, len(flowerColors)-1)]
#axiom = 'P'
#cylRadius = 0.3 # Initial radius of segments.
#productions = { 'P': 'I + [P + O] - - // [--L] I [++L] - [P O] ++ PO',
# 'I': 'F S [// && L] [// ^^ L] F S',
# 'S': [(33, 'S [// && L] [// ^^ L] F S'), (33, 'S F S'), (34, 'S')],
# 'L': '[`{+f-ff-f+ | +f-ff-f}]',
# 'O': '[&&& D `/ W //// W //// W //// W //// W]',
# 'D': 'FF',
# 'W': '[`^F] [<&&&& -f+f | -f+f>]'
# }
#26: bush/tree
#n = 7
#delta = 22.5
#cylRadius = 1.0
#axiom = "A"
#productions = {'A': '[&FL!A]/////`[&FL!A]///////`[&FL!A]',
# 'F': 'S ///// F',
# 'S': 'F L',
# 'L': '[```^^{-f+f+f-|-f+f+f}]'}
#26: tree with leaves
# n = 7
# delta = 22.5
# cylRadius = 1.0
# axiom = "A"
# productions = {'A': '[&FLA]/////[&FLA]///////`[&FLA]',
# 'F': 'S ///// F',
# 'S': 'F',
# 'L': '[Q^^Q][Q\\\\Q]'}
# print '# Building L-System with %s productions and %s iterations.' % (len(productions) , n)
# print '# Initial axiom is %s.' % (axiom,)
current = axiom # The working pattern
next = ""
for i in range(n): # For each iteration
for sym in range(len(current)): # For each symbol in the current pattern
#print '# %s: ' % (current[sym],),
found = 0
for search, replace in productions.iteritems(): # For each production
if (current[sym] == search): # Found a symbol to replace
if (type(replace) is list): # Select an option based on chance
choice = random.randint(0, 99)
optionsSeeked = 0
for chance, option in replace:
optionsSeeked = optionsSeeked + chance
if optionsSeeked >= choice: # Found your choice
replacement = option
break
else:
replacement = replace # It's a simple string.
next = next + replacement
#print '%s -> %s.' % (search, replace)
found = 1
break
if not found:
#print 'copied.'
next = next + current[sym]
current = next # This iteration is done, pass the pattern along.
next = ""
print "# Iteration %s complete, having arrived at %s.\n" %(i, current)
system = current
# We now have the final L-System. Interpret these rules as a turtle-drawing algorithm.
# Initialize
print bark
drawSize = 1.0
for i in range(len(system)):
if system[i] == "F" or system[i] == "f":
print 'Shape "cylinder" "float radius" [%s] "float zmin" [0.0] "float zmax" [%s]' % (cylRadius, drawSize)
print "Translate 0.0 0.0 %s" % (drawSize)
elif system[i] == "+":
print "Rotate %s 0.0 1.0 0.0" % (delta)
elif system[i] == "-":
print "Rotate -%s 0.0 1.0 0.0" % (delta)
elif system[i] == "&":
print "Rotate %s 1.0 0.0 0.0" % (delta)
elif system[i] == "^":
print "Rotate -%s 1.0 0.0 0.0" % (delta)
elif system[i] == "\\":
print "Rotate %s 0.0 0.0 1.0" % (delta)
elif system[i] == "/":
print "Rotate -%s 0.0 0.0 1.0" % (delta)
elif system[i] == "|":
print "Rotate 180 0.0 1.0 0.0"
elif system[i] == "Q":
print leaf
print "Translate 0.0 0.0 %s" % (1.0 + cylRadius)
rot = random.uniform(0, 80)
rx = random.uniform(0, 0.5)
ry = random.uniform(0, 0.5)
rz = random.uniform(0, 0.5)
print 'Rotate %s %s %s %s' % (rot, rx, ry, rz)
print 'Shape "disk" "float radius" [%s]' % (random.uniform(0.2, 0.6))
print 'Rotate %s -%s -%s -%s' % (rot, rx, ry, rz)
print "Rotate 180 0.0 1.0 0.0"
print "Translate 0.0 0.0 %s" % (1.0 + cylRadius)
print bark
elif system[i] == "[": # Branch.
cylRadius = cylRadius / shrinkFactor # Shrink.
print "AttributeBegin"
print "Translate 0.0 0.0 %s" % (-cylRadius)
elif system[i] == "]": # Unbranch.
cylRadius = cylRadius * shrinkFactor # Grow.
print "AttributeEnd"
elif system[i] == "{" or system[i] == "<":
storedRadius = cylRadius
cylRadius = leafRadius
if system[i] == "{":
drawSize = 0.7
print leaf
else:
print flower
elif system[i] == "}" or system[i] == ">":
cylRadius = storedRadius
drawSize = 1.0
print bark
else:
print "# Not a drawing symbol: %s" % (system[i])
|
|
#!/usr/bin/python
# coding: utf-8
# imports
import socket
import sys
import hashlib
import binascii
import collections
# classes
class SockClient(object):
"""SockClient for handling the connection to the server"""
def __init__(self):
# Creates a TCP/IP socket
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, e:
print >> sys.stderr, e
sys.exit()
def __receiveBytes(self, amount):
try:
received = self.client.recv(amount)
except socket.error, e:
print >> sys.stderr, e
self.client.close()
sys.exit()
# Debug
print "\nReceived: %d" % len(received)
return received
def __getPacketLength(self):
packetlength = self.__receiveBytes(2)
# Debug
print "\n\nPacket Length: %d\n - Bytes: %s\n - Hex: %s" % \
(int(''.join([ x.encode('hex') for x in packetlength ]),16),
[ ord(x) for x in packetlength ],
[ x.encode('hex') for x in packetlength])
return packetlength
def __getMD5Sum(self):
md5sum = self.__receiveBytes(16)
# Debug
print "\n\nMD5 Sum: %s\n - Bytes: %s\n - Hex: %s" % \
(md5sum.encode('hex'),
[ ord(x) for x in md5sum ],
[ x.encode('hex') for x in md5sum])
return md5sum
def __getData(self, amount):
data = self.__receiveBytes(amount)
# Debug
print "\n\nData: %s\n - Bytes: %s\n - Hex: %s" % \
(data.encode('hex'),
[ ord(x) for x in data ],
[ x.encode('hex') for x in data])
return data
def __getParityByte(self):
parity = self.__receiveBytes(1)
# Debug
print "\n\nParity: %s\n - Bytes: %s\n - Hex: %s" % \
(parity.encode('hex'),
[ ord(x) for x in parity ],
[ x.encode('hex') for x in parity])
return parity
def __checkMessageParity(self, bits):
num_1bits = bits.count('1')
# Check if parity byte exists
if(int(bits[len(bits)-8:]) > 1):
print "Parity byte does not exists!"
else:
if(bits[:len(bits)-8].count('1') % 2 == 0):
print "Message number of 1 bits is Even (%d), checking parity byte..." % bits[:len(bits)-8].count('1')
print "Parity byte is %s" % bits[len(bits)-8:]
else:
print "Message number of 1 bits is ODD (%d), checking parity byte..." % bits[:len(bits)-8].count('1')
print "Parity byte is %s" % bits[len(bits)-8:]
if(num_1bits % 2 == 0):
print "Even number of 1 bits (%d), message parity is ok" % num_1bits
return 0
else:
print "Odd number of 1 bits (%d), message parity is not ok" % num_1bits
return 1
def __checkDataMD5Sum(self, data, message_md5):
newmd5 = hashlib.md5()
newmd5.update(data)
md5sum = newmd5.hexdigest()
if(md5sum == message_md5):
print "Data MD5 sum is OK %s == %s" % (message_md5, md5sum)
else:
print "Data MD5 sum is NOT ok %s != %s" % (message_md5, md5sum)
def __getMostCommonByte(self, data):
counts = collections.Counter([ x.encode('hex') for x in data]).most_common()
self.mostcommonbyte = counts[0][0]
print "Most commom byte in data is hex: %s" % self.mostcommonbyte
def __getCipherKey(self):
self.cipherkey = int(self.mostcommonbyte,16) ^ 0x20
print "Cipherkey: Int: %s - Hex: %s" % (self.cipherkey, hex(self.cipherkey)[2:])
def __decodeData(self, data):
mdata = [ x.encode('hex') for x in data ]
decodedmessage = [ chr(int(x,16) ^ self.cipherkey) for x in mdata ]
print decodedmessage
print "Decoded data hex: %s" % [ x.encode('hex') for x in decodedmessage]
decodedmessage = ''.join(decodedmessage)
print "\nDecoded data str: %s" % decodedmessage
return decodedmessage
def __createDecodedMessagePacket(self, decodedmessage):
nm_length = 2 + 16 + len(decodedmessage) + 1
hexnmlength = hex(nm_length)[2:]
if (len(hexnmlength) == 3):
hexnmlength = '0'+hexnmlength
print "\nNM length: %d - Hex: %s" % (nm_length, hexnmlength)
message_length = [hexnmlength[i:i+2] for i in range(0, len(hexnmlength), 2)]
# Miau por falta de conhecimento como adicionar 0's em 2 bytes hex no python
if(nm_length <= 0xff):
print 'True'
zb = ['00']
zb.extend(message_length)
nm_length = zb
print nm_length
else:
nm_length = message_length
# Fim do Miau
nm_newmd5 = hashlib.md5()
nm_newmd5.update(decodedmessage)
md5sum = nm_newmd5.hexdigest()
print "\nNM decoded data MD5 sum: %s" % md5sum
nm_md5sum = [md5sum[i:i+2] for i in range(0, len(md5sum), 2)]
print nm_md5sum
nm_decodedmessage = [ x.encode('hex') for x in decodedmessage]
nm_parity = 0x0
nm_message = []
nm_message.extend(nm_length)
nm_message.extend(nm_md5sum)
nm_message.extend(nm_decodedmessage)
print "NM message: "
print nm_message
nm_binary = (bin(int(''.join(nm_message), 16))[2:]).zfill(len(''.join(nm_message)) * 4)
print "\nNM binary: %s" % nm_binary
nm_parity = self.__checkMessageParity(nm_binary)
nm_parity = [nm_parity]
nm_parity = [''.join('{:02x}'.format(x) for x in nm_parity)]
nm_message.extend(nm_parity)
# Recheck message parity
nm_binary = (bin(int(''.join(nm_message), 16))[2:]).zfill(len(''.join(nm_message)) * 4)
nm_parity = self.__checkMessageParity(nm_binary)
print "\nNM binary: %s" % nm_binary
print "NM message: "
print nm_message
createdmessage = ''.join(nm_message)
print "NM message str: %s" % createdmessage
return createdmessage
def getEncryptedMessage(self):
print "Client: Receiving new message..."
packetlength = self.__getPacketLength()
md5sum = self.__getMD5Sum()
data = self.__getData(int(''.join([ x.encode('hex') for x in packetlength ]),16) - 16 - 2 - 1)
parity = self.__getParityByte()
message = packetlength + md5sum + data + parity
binarymessage = (bin(int(message.encode('hex'), 16))[2:]).zfill(len(message.encode('hex')) * 4)
print "\n\nMessage: %s\n - Hex: %s\n - Bin: %s" % \
([ ord(x) for x in message ],
message.encode('hex'),
binarymessage)
self.__checkMessageParity(binarymessage)
self.__checkDataMD5Sum(data, md5sum.encode('hex'))
self.__getMostCommonByte(data)
self.__getCipherKey()
return data
def getDecodedMessage(self, encryptedMessagedata):
decodedmessage = self.__decodeData(encryptedMessagedata)
return decodedmessage
def sendDecodedMessage(self, decodedmessage):
print "Client: Creating decoded message..."
createdmessage = self.__createDecodedMessagePacket(decodedmessage)
print "Client: Sending decoded message..."
try:
self.client.send(createdmessage.decode('hex'))
except socket.error, e:
print "Error sending decoded data: %s" % e
sys.exit(1)
print "Client: Decoded message has been successfully sent!"
def getServerResponse(self):
print "Client: Getting server response..."
packetlength = self.__getPacketLength()
md5sum = self.__getMD5Sum()
data = self.__getData(int(''.join([ x.encode('hex') for x in packetlength ]),16) - 16 - 2 - 1)
parity = self.__getParityByte()
message = packetlength + md5sum + data + parity
binarymessage = (bin(int(message.encode('hex'), 16))[2:]).zfill(len(message.encode('hex')) * 4)
print "\n\nMessage: %s\n - Hex: %s\n - Bin: %s" % \
([ ord(x) for x in message ],
message.encode('hex'),
binarymessage)
self.__checkMessageParity(binarymessage)
self.__checkDataMD5Sum(data, md5sum.encode('hex'))
return data
def connect(self, address, port):
try:
self.client.connect((address, port))
except socket.error, e:
print >> sys.stderr, e
self.client.close()
sys.exit()
def disconnect(self):
self.client.close()
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for encapsulating the robot arm, gripper, and bracelet."""
import abc
from typing import List, Optional, Sequence, Generic, TypeVar
from dm_control import composer
from dm_control import mjcf
from dm_control.mjcf import traversal_utils
from dm_robotics.geometry import geometry
from dm_robotics.moma import effector
from dm_robotics.moma import prop
from dm_robotics.moma import sensor as moma_sensor
from dm_robotics.moma.models.end_effectors.robot_hands import robot_hand
from dm_robotics.moma.models.robots.robot_arms import robot_arm
from dm_robotics.moma.utils import ik_solver
import numpy as np
Arm = TypeVar('Arm', bound=robot_arm.RobotArm)
Gripper = TypeVar('Gripper', bound=robot_hand.AnyRobotHand)
class Robot(abc.ABC, Generic[Arm, Gripper]):
"""Abstract base class for MOMA robotic arms and their attachments."""
@property
@abc.abstractmethod
def name(self):
pass
@property
@abc.abstractmethod
def sensors(self) -> Sequence[moma_sensor.Sensor]:
pass
@property
@abc.abstractmethod
def effectors(self) -> List[effector.Effector]:
pass
@property
@abc.abstractmethod
def arm_effector(self) -> effector.Effector:
pass
@property
@abc.abstractmethod
def gripper_effector(self) -> Optional[effector.Effector]:
pass
@property
@abc.abstractmethod
def arm(self) -> Arm:
pass
@property
@abc.abstractmethod
def gripper(self) -> Gripper:
pass
@property
@abc.abstractmethod
def wrist_ft(self):
pass
@property
@abc.abstractmethod
def wrist_cameras(self):
"""Returns a sequence of wrist cameras (if any)."""
@property
@abc.abstractmethod
def arm_base_site(self):
"""Returns a site at the base of the arm."""
@property
@abc.abstractmethod
def arm_frame(self):
pass
@abc.abstractmethod
def position_gripper(self, physics: mjcf.Physics, position: np.ndarray,
quaternion: np.ndarray):
"""Moves the gripper ik point position to the (pos, quat) pose tuple."""
@abc.abstractmethod
def position_arm_joints(self, physics, joint_angles):
"""Positions the arm joints to the given angles."""
class StandardRobot(Generic[Arm, Gripper], Robot[Arm, Gripper]):
"""A Robot class representing the union of arm, gripper, and bracelet."""
def __init__(self,
arm: Arm,
arm_base_site_name: str,
gripper: Gripper,
robot_sensors: Sequence[moma_sensor.Sensor],
arm_effector: effector.Effector,
gripper_effector: Optional[effector.Effector],
wrist_ft: Optional[composer.Entity] = None,
wrist_cameras: Optional[Sequence[prop.Camera]] = None,
name: str = 'robot'):
"""Robot constructor.
Args:
arm: The robot arm Entity.
arm_base_site_name: The label of the base site of the arm model, so that
we can position the robot base in the world.
gripper: The gripper Entity to attach to the arm.
robot_sensors: List of abstract Sensors that are associated with this
robot.
arm_effector: An effector for the robot arm.
gripper_effector: An effector for the robot gripper.
wrist_ft: Optional wrist force-torque Entity to add between the arm and
gripper in the kinematic chain.
wrist_cameras: Optional list of camera props attached to the robot wrist.
name: A unique name for the robot.
"""
self._arm = arm
self._gripper = gripper
self._robot_sensors = robot_sensors
self._arm_effector = arm_effector
self._gripper_effector = gripper_effector
self._wrist_ft = wrist_ft
self._wrist_cameras = wrist_cameras or []
self._name = name
# Site for the robot "base" for reporting wrist-site pose observations.
self._arm_base_site = self._arm.mjcf_model.find('site', arm_base_site_name)
self._gripper_ik_site = self._gripper.tool_center_point
@property
def name(self) -> str:
return self._name
@property
def sensors(self) -> Sequence[moma_sensor.Sensor]:
return self._robot_sensors
@property
def effectors(self) -> List[effector.Effector]:
effectors = [self._arm_effector]
if self.gripper_effector is not None:
assert self.gripper_effector is not None # This placates pytype.
effectors.append(self.gripper_effector)
return effectors
@property
def arm_effector(self) -> effector.Effector:
return self._arm_effector
@property
def gripper_effector(self) -> Optional[effector.Effector]:
return self._gripper_effector
@property
def arm(self) -> Arm:
return self._arm
@property
def gripper(self) -> Gripper:
return self._gripper
@property
def wrist_ft(self):
return self._wrist_ft
@property
def wrist_cameras(self) -> Sequence[prop.Camera]:
return self._wrist_cameras
@property
def arm_base_site(self):
"""Returns a site at the base of the arm."""
return self._arm_base_site
@property
def arm_frame(self):
return traversal_utils.get_attachment_frame(self._arm.mjcf_model)
def position_gripper(self, physics: mjcf.Physics, position: np.ndarray,
quaternion: np.ndarray):
"""Moves the gripper ik point position to the (pos, quat) pose tuple.
Args:
physics: An MJCF physics.
position: The cartesian position of the desired pose given in the world
frame.
quaternion: The quaternion (wxyz) giving the desired orientation of the
gripper in the world frame.
Raises:
ValueError: If the gripper cannot be placed at the desired pose.
"""
# Initialize the ik solver. We create a new version of the solver at every
# solve because there is no guarantee that the mjcf_model has not been
# modified.
mjcf_model = self._arm.mjcf_model.root_model
solver = ik_solver.IkSolver(
mjcf_model, self._arm.joints, self._gripper_ik_site)
qpos = solver.solve(ref_pose=geometry.Pose(position, quaternion))
if qpos is None:
if self.gripper_effector is not None:
gripper_prefix = self.gripper_effector.prefix # pytype: disable=attribute-error
else:
gripper_prefix = 'gripper'
raise ValueError('IK Failed to converge to the desired target pose'
f'{geometry.Pose(position, quaternion)} '
f'for {gripper_prefix} of {self._arm.name}')
self.position_arm_joints(physics, qpos)
def position_arm_joints(self, physics, joint_angles):
self.arm.set_joint_angles(physics, joint_angles)
def standard_compose(
arm: Arm,
gripper: Gripper,
wrist_ft: Optional[composer.Entity] = None,
wrist_cameras: Sequence[prop.Camera] = ()
) -> None:
"""Creates arm and attaches gripper."""
if wrist_ft:
wrist_ft.attach(gripper)
arm.attach(wrist_ft)
else:
arm.attach(gripper)
for cam in wrist_cameras:
arm.attach(cam, arm.wrist_site)
|
|
import gzip
import inspect
import warnings
from cStringIO import StringIO
from scrapy.utils.trackref import object_ref
from twisted.trial import unittest
from scrapy.spider import Spider, BaseSpider
from scrapy.http import Request, Response, TextResponse, XmlResponse, HtmlResponse
from scrapy.contrib.spiders.init import InitSpider
from scrapy.contrib.spiders import CrawlSpider, Rule, XMLFeedSpider, \
CSVFeedSpider, SitemapSpider
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.exceptions import ScrapyDeprecationWarning
class SpiderTest(unittest.TestCase):
spider_class = Spider
def setUp(self):
warnings.simplefilter("always")
def tearDown(self):
warnings.resetwarnings()
def test_base_spider(self):
spider = self.spider_class("example.com")
self.assertEqual(spider.name, 'example.com')
self.assertEqual(spider.start_urls, [])
def test_start_requests(self):
spider = self.spider_class('example.com')
start_requests = spider.start_requests()
self.assertTrue(inspect.isgenerator(start_requests))
self.assertEqual(list(start_requests), [])
def test_spider_args(self):
"""Constructor arguments are assigned to spider attributes"""
spider = self.spider_class('example.com', foo='bar')
self.assertEqual(spider.foo, 'bar')
def test_spider_without_name(self):
"""Constructor arguments are assigned to spider attributes"""
self.assertRaises(ValueError, self.spider_class)
self.assertRaises(ValueError, self.spider_class, somearg='foo')
class InitSpiderTest(SpiderTest):
spider_class = InitSpider
class XMLFeedSpiderTest(SpiderTest):
spider_class = XMLFeedSpider
def test_register_namespace(self):
body = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:x="http://www.google.com/schemas/sitemap/0.84"
xmlns:y="http://www.example.com/schemas/extras/1.0">
<url><x:loc>http://www.example.com/Special-Offers.html</loc><y:updated>2009-08-16</updated><other value="bar" y:custom="fuu"/></url>
<url><loc>http://www.example.com/</loc><y:updated>2009-08-16</updated><other value="foo"/></url>
</urlset>"""
response = XmlResponse(url='http://example.com/sitemap.xml', body=body)
class _XMLSpider(self.spider_class):
itertag = 'url'
namespaces = (
('a', 'http://www.google.com/schemas/sitemap/0.84'),
('b', 'http://www.example.com/schemas/extras/1.0'),
)
def parse_node(self, response, selector):
yield {
'loc': selector.xpath('a:loc/text()').extract(),
'updated': selector.xpath('b:updated/text()').extract(),
'other': selector.xpath('other/@value').extract(),
'custom': selector.xpath('other/@b:custom').extract(),
}
for iterator in ('iternodes', 'xml'):
spider = _XMLSpider('example', iterator=iterator)
output = list(spider.parse(response))
self.assertEqual(len(output), 2, iterator)
self.assertEqual(output, [
{'loc': [u'http://www.example.com/Special-Offers.html'],
'updated': [u'2009-08-16'],
'custom': [u'fuu'],
'other': [u'bar']},
{'loc': [],
'updated': [u'2009-08-16'],
'other': [u'foo'],
'custom': []},
], iterator)
class CSVFeedSpiderTest(SpiderTest):
spider_class = CSVFeedSpider
class CrawlSpiderTest(SpiderTest):
test_body = """<html><head><title>Page title<title>
<body>
<p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
spider_class = CrawlSpider
def test_process_links(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
name="test"
allowed_domains=['example.org']
rules = (
Rule(SgmlLinkExtractor(), process_links="dummy_process_links"),
)
def dummy_process_links(self, links):
return links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 3)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEquals([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html',
'http://example.org/nofollow.html'])
def test_process_links_filter(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
import re
name="test"
allowed_domains=['example.org']
rules = (
Rule(SgmlLinkExtractor(), process_links="filter_process_links"),
)
_test_regex = re.compile('nofollow')
def filter_process_links(self, links):
return [link for link in links
if not self._test_regex.search(link.url)]
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 2)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEquals([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html'])
def test_process_links_generator(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
name="test"
allowed_domains=['example.org']
rules = (
Rule(SgmlLinkExtractor(), process_links="dummy_process_links"),
)
def dummy_process_links(self, links):
for link in links:
yield link
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 3)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEquals([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html',
'http://example.org/nofollow.html'])
class SitemapSpiderTest(SpiderTest):
spider_class = SitemapSpider
BODY = "SITEMAP"
f = StringIO()
g = gzip.GzipFile(fileobj=f, mode='w+b')
g.write(BODY)
g.close()
GZBODY = f.getvalue()
def test_get_sitemap_body(self):
spider = self.spider_class("example.com")
r = XmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
r = HtmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), None)
r = Response(url="http://www.example.com/favicon.ico", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), None)
r = Response(url="http://www.example.com/sitemap", body=self.GZBODY, headers={"content-type": "application/gzip"})
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
r = TextResponse(url="http://www.example.com/sitemap.xml", body=self.BODY)
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.GZBODY)
self.assertEqual(spider._get_sitemap_body(r), self.BODY)
class BaseSpiderDeprecationTest(unittest.TestCase):
def test_basespider_is_deprecated(self):
with warnings.catch_warnings(record=True) as w:
class MySpider1(BaseSpider):
pass
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, ScrapyDeprecationWarning)
self.assertEqual(w[0].lineno, inspect.getsourcelines(MySpider1)[1])
def test_basespider_issubclass(self):
class MySpider2(Spider):
pass
class MySpider2a(MySpider2):
pass
class Foo(object):
pass
class Foo2(object_ref):
pass
assert issubclass(MySpider2, BaseSpider)
assert issubclass(MySpider2a, BaseSpider)
assert not issubclass(Foo, BaseSpider)
assert not issubclass(Foo2, BaseSpider)
def test_basespider_isinstance(self):
class MySpider3(Spider):
name = 'myspider3'
class MySpider3a(MySpider3):
pass
class Foo(object):
pass
class Foo2(object_ref):
pass
assert isinstance(MySpider3(), BaseSpider)
assert isinstance(MySpider3a(), BaseSpider)
assert not isinstance(Foo(), BaseSpider)
assert not isinstance(Foo2(), BaseSpider)
def test_crawl_spider(self):
assert issubclass(CrawlSpider, Spider)
assert issubclass(CrawlSpider, BaseSpider)
assert isinstance(CrawlSpider(name='foo'), Spider)
assert isinstance(CrawlSpider(name='foo'), BaseSpider)
if __name__ == '__main__':
unittest.main()
|
|
# Flask dependancy
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import jsonify
from flask import url_for
from flask import flash
from flask import session as session_object
from flask import make_response
from functools import wraps
# ORM: SQLAlchemy
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
# OAuth2Client
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
# Utility Libraries
import random
import string
import httplib2
import json
import requests
# Model classes
from database_setup import Base
from database_setup import Country
from database_setup import Missile
from database_setup import User
# Creation of app
app = Flask(__name__)
# Read the credentials from the client _secrets file for Google Authentication
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
# Connect to missiles database
engine = create_engine('sqlite:///lots_of_missiles.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# User Helper Functions
def login_required(f):
'''Decorator function to check if the user is logged in'''
@wraps(f)
def decorated_function(*args, **kwargs):
if 'username' not in session_object:
flash("Authorization fail: Access denied.")
return redirect('/login')
else:
return f(*args, **kwargs)
return decorated_function
def create_user(session_object):
newUser = User(
name=session_object['username'],
email=session_object['email'],
picture=session_object['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=session_object['email']).one()
return user.id
def get_user_by_id(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def get_userid_by_email(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# ||------------------------------------------------||
# || Routes for the login, logout and OAuth. ||
# || Can be used for all flask based applications ||
# ||------------------------------------------------||
@app.route('/login')
def login():
# Protection against Session riding
state = ''.join(random.choice(
string.ascii_uppercase + string.digits) for x in xrange(32))
# Store the state in the login session object
session_object['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Check for valid state
if request.args.get('state') != session_object['state']:
response = make_response(json.dumps(
'Invalid state parameter. This could be due to a session riding \
attack.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Authorization code from Google
code = request.data
try:
# Constuction of a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check for validity of access token
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Error handling
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
# User ID verification
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("User ID mismatch."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Client ID Verification
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("App Client ID mistach."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = session_object.get('credentials')
stored_gplus_id = session_object.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'User active.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
session_object['provider'] = 'google'
session_object['credentials'] = credentials.to_json()
session_object['gplus_id'] = gplus_id
# Get user info
url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
g_response = requests.get(url, params=params)
data = g_response.json()
session_object['username'] = data['name']
session_object['picture'] = data['picture']
session_object['email'] = data['email']
user_id = get_userid_by_email(session_object['email'])
if not user_id:
user_id = create_user(session_object)
session_object['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += session_object['username']
output += '!</h1>'
output += '<img src="'
output += session_object['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;\
-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % session_object['username'])
print "done!"
return output
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
credentials = session_object.get('credentials')
if credentials is None:
response = make_response(
json.dumps('User Inactive.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
credentials = json.loads(credentials)
access_token = credentials.get('access_token')
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Google logged out.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
# Token validity check
response = make_response(
json.dumps('Invalid token.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
# Check for valid state
if request.args.get('state') != session_object['state']:
response = make_response(json.dumps(
'Invalid state parameter. This could be due to a session \
riding attack.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Facebook Access Token
access_token = request.data
# Read the client secret from the file
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=\
fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Get user details
url = "https://graph.facebook.com/v2.4/me"
# Remove expiry
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
# Save the details to the session object
session_object['provider'] = 'facebook'
session_object['username'] = data["name"]
session_object['email'] = data["email"]
session_object['facebook_id'] = data["id"]
stored_token = token.split("=")[1]
session_object['access_token'] = stored_token
# Accessing the picture using facebook oauth
url = 'https://graph.facebook.com/v2.4/me/picture?%s&redirect=0&\
height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
session_object['picture'] = data["data"]["url"]
# User ID Check
user_id = get_userid_by_email(session_object['email'])
if not user_id:
user_id = create_user(session_object)
session_object['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += session_object['username']
output += '!</h1>'
output += '<img src="'
output += session_object['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;\
-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % session_object['username'])
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = session_object['facebook_id']
access_token = session_object['access_token']
url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (
facebook_id, access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
return "Facebook logged out"
@app.route('/logout')
def logout():
if 'provider' in session_object:
if session_object['provider'] == 'google':
gdisconnect()
del session_object['credentials']
if session_object['provider'] == 'facebook':
fbdisconnect()
del session_object['facebook_id']
del session_object['username']
del session_object['email']
del session_object['picture']
del session_object['user_id']
flash("You have successfully been logged out.")
return redirect(url_for('showCountries'))
else:
flash("You were not logged in")
return redirect(url_for('showCountries'))
# ||------------------------------------------------||
# || REST Implementation for the application ||
# ||------------------------------------------------||
@app.route('/country/JSON')
def countriesJSON():
'''Returns the list of countries in JSON format'''
countries = session.query(Country).all()
return jsonify(countries=[i.serialize for i in countries])
@app.route('/country/<int:country_id>/JSON')
def countryMissilesJSON(country_id):
'''Returns the missiles for a particular country in JSON format'''
country = session.query(Country).filter_by(id=country_id).one()
missiles = session.query(Missile).filter_by(country_id=country_id).all()
return jsonify(missiles=[i.serialize for i in missiles])
@app.route('/country/<int:country_id>/<int:missile_id>/JSON')
def missileJSON(country_id, missile_id):
'''Returns the missile details for a particular missile in JSON format'''
missile = session.query(Missile).filter_by(id=missile_id).one()
return jsonify(missile=missile.serialize)
# ||------------------------------------------------||
# || Main routes for the application ||
# ||------------------------------------------------||
@app.route('/')
def showCountries():
'''Method to show all the countries currently added to application'''
missiles = session.query(Missile).order_by(asc(Missile.name))
countries = session.query(Country).order_by(asc(Country.name))
if 'username' not in session_object:
return render_template('public_missiles.html',
missiles=missiles,
countries=countries)
else:
return render_template('private_missiles.html',
missiles=missiles,
countries=countries)
@app.route('/country/new', methods=['GET', 'POST'])
@login_required
def newCountry():
'''Method to add a new country'''
if request.method == 'POST':
newCountry = Country(name=request.form['name'],
user_id=session_object['user_id'])
session.add(newCountry)
flash('Succesfully added new country: %s' % newCountry.name)
session.commit()
return redirect(url_for('showCountries'))
else:
return render_template('new-country.html')
@app.route('/country/<int:country_id>/edit/', methods=['GET', 'POST'])
@login_required
def editCountry(country_id):
'''Edit a country from the application'''
editedCountry = session.query(Country).filter_by(id=country_id).one()
if editedCountry.user_id != session_object['user_id']:
return """<script>(function() {alert("Access denied. Only creator \
can edit."); window.location.href = "/";})();</script>"""
if request.method == 'POST':
if request.form['name']:
editedCountry.name = request.form['name']
flash('Country successfully edited %s' % editedCountry.name)
return redirect(url_for('showCountries'))
else:
return render_template('edit-country.html',
country=editedCountry)
@app.route('/country/<int:country_id>/delete/', methods=['GET', 'POST'])
@login_required
def deleteCountry(country_id):
'''Delete a country from the application'''
countryToDelete = session.query(Country).filter_by(id=country_id).one()
if countryToDelete.user_id != session_object['user_id']:
return """<script>(function() {alert("Access denied. Only creator \
can delete."); window.location.href = "/";})();</script>"""
if request.method == 'POST':
session.delete(countryToDelete)
flash('%s successfully deleted' % countryToDelete.name)
session.commit()
return redirect(url_for('showCountries',
country_id=country_id))
else:
return render_template('delete-country.html',
country=countryToDelete)
@app.route('/country/<int:country_id>/')
@app.route('/country/<int:country_id>/missiles/')
def showMissiles(country_id):
'''Show missiles belonging to a country depending on authorization'''
country = session.query(Country).filter_by(id=country_id).one()
countries = session.query(Country).order_by(asc(Country.name))
creator = get_user_by_id(country.user_id)
missiles = session.query(Missile).filter_by(country_id=country_id).all()
if 'username' not in session_object:
return render_template('public_missiles.html',
missiles=missiles,
country=country,
countries=countries,
creator=creator)
else:
return render_template('private_missiles.html',
missiles=missiles,
country=country,
countries=countries,
creator=creator)
@app.route('/country/<int:country_id>/missiles/new/', methods=['GET', 'POST'])
@login_required
def newMissile(country_id):
'''Method to add a missile to country'''
country = session.query(Country).filter_by(id=country_id).one()
if request.method == 'POST':
newMissile = Missile(name=request.form['name'],
country_id=country_id,
description=request.form['description'],
link=request.form['link'],
user_id=session_object['user_id'])
session.add(newMissile)
session.commit()
flash('New missile %s successfully created' % (newMissile.name))
return redirect(url_for('showMissiles',
country_id=country_id))
else:
return render_template('new-missile.html',
country_id=country_id)
@app.route('/country/<int:country_id>/missile/<int:missile_id>/edit',
methods=['GET', 'POST'])
@login_required
def editMissile(country_id, missile_id):
'''Method to edit a missile'''
editedMissile = session.query(Missile).filter_by(id=missile_id).one()
country = session.query(Country).filter_by(id=country_id).one()
if editedMissile.user_id != session_object['user_id']:
return """<script>(function() {alert("Access denied. Only creator \
can edit."); window.location.href = "/";})();</script>"""
if request.method == 'POST':
if request.form['name']:
editedMissile.name = request.form['name']
if request.form['description']:
editedMissile.description = request.form['description']
if request.form['link']:
editedMissile.link = request.form['link']
session.add(editedMissile)
session.commit()
flash('Missile successfully edited')
return redirect(url_for('showMissiles',
country_id=country_id))
else:
return render_template('edit-missile.html',
country_id=country_id,
missile_id=missile_id,
item=editedMissile)
@app.route('/country/<int:country_id>/missiles/<int:missile_id>/delete',
methods=['GET', 'POST'])
@login_required
def deleteMissile(country_id, missile_id):
'''Method to delete a missile'''
country = session.query(Country).filter_by(id=country_id).one()
missileToDelete = session.query(Missile).filter_by(id=missile_id).one()
if missileToDelete.user_id != session_object['user_id']:
return """<script>(function() {alert("Access denied. Only creator \
can delete."); window.location.href = "/";})();</script>"""
if request.method == 'POST':
session.delete(missileToDelete)
session.commit()
flash('Missile successfully deleted')
return redirect(url_for('showMissiles', country_id=country_id))
else:
return render_template('delete-missile.html', item=missileToDelete)
if __name__ == '__main__':
app.secret_key = "secret key"
app.debug = True
app.run(host='0.0.0.0', port=8080)
|
|
"""
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import sys
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.utils import lru_cache
from django.utils import six
from django.utils.decorators import ContextDecorator
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class FixedOffset(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
Kept as close as possible to the reference version. __init__ was changed
to make its arguments optional, according to Python's requirement that
tzinfo subclasses can be instantiated without arguments.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time. Taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept as close as possible to the reference version. __init__ was added to
delay the computation of STDOFFSET, DSTOFFSET and DSTDIFF which is
performed at import time in the example.
Subclasses contain further improvements.
"""
def __init__(self):
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
def get_fixed_timezone(offset):
"""
Returns a tzinfo instance with a fixed offset from UTC.
"""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return FixedOffset(offset, name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@lru_cache.lru_cache()
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
return pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
return LocalTimezone()
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone=None):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if timezone is None:
timezone = get_current_timezone()
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=None)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
|
import discord
from discord.ext import commands
import time
import clashroyale as clashroyaleAPI
import itertools
import re
from datetime import datetime
BOTCOMMANDER_ROLES = ["Family Representative", "Clan Manager",
"Clan Deputy", "Co-Leader", "Hub Officer", "admin"]
creditIcon = "https://i.imgur.com/TP8GXZb.png"
credits = "Bot by GR8 | Titan"
class clashroyale:
"""Live statistics for Clash Royale"""
def __init__(self, bot):
self.bot = bot
self.auth = self.bot.get_cog('crtools').auth
self.tags = self.bot.get_cog('crtools').tags
self.clans = self.bot.get_cog('crtools').clans
self.constants = self.bot.get_cog('crtools').constants
self.clash = clashroyaleAPI.OfficialAPI(self.auth.getOfficialToken(), is_async=True)
def grouper(self, iterable, n):
args = [iter(iterable)] * n
return itertools.zip_longest(*args)
def getCards(self, maxPlayers):
"""Converts maxPlayers to Cards"""
cards = {
"50": 25,
"100": 100,
"200": 400,
"1000": 2000
}
return cards[str(maxPlayers)]
def getCoins(self, maxPlayers):
"""Converts maxPlayers to Coins"""
coins = {
"50": 175,
"100": 700,
"200": 2800,
"1000": 14000
}
return coins[str(maxPlayers)]
async def cleanTime(self, time):
"""Converts time to timestamp"""
return int(datetime.strptime(time, '%Y%m%dT%H%M%S.%fZ').timestamp()) + 7200
def camelToString(self, label):
"""Convert from camel case to normal"""
return re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', label)
def emoji(self, name):
"""Emoji by name."""
for emoji in self.bot.get_all_emojis():
if emoji.name == name.replace(" ", "").replace("-", "").replace(".", ""):
return '<:{}:{}>'.format(emoji.name, emoji.id)
return ''
async def getClanEmoji(self, tag):
"""Check if emoji exists for the clan"""
clankey = await self.clans.getClanKey(tag.strip("#"))
if clankey is not None:
return await self.clans.getClanData(clankey, 'emoji')
return self.emoji("clan")
def getLeagueEmoji(self, trophies):
"""Get clan war League Emoji"""
mapLeagues = {
"legendleague": [3000, 99999],
"gold3league": [2500, 2999],
"gold2league": [2000, 2499],
"goldleague": [1500, 1999],
"silver3league": [1200, 1499],
"silver2league": [900, 1199],
"silverleague": [600, 899],
"bronze3league": [400, 599],
"bronze2league": [200, 399],
"bronzeleague": [0, 199]
}
for league in mapLeagues.keys():
if mapLeagues[league][0] <= trophies <= mapLeagues[league][1]:
return self.emoji(league)
def getArenaEmoji(self, trophies):
"""Get Arena and League Emoji"""
arenaMap = {
"arena1": [0, 399],
"arena2": [400, 799],
"arena3": [800, 1099],
"arena4": [1100, 1399],
"arena5": [1400, 1699],
"arena6": [1700, 1999],
"arena7": [2000, 2299],
"arena8": [2300, 2599],
"arena9": [2600, 2999],
"arena10": [3000, 3399],
"arena11": [3400, 3799],
"arena12": [3800, 3999],
"league1": [4000, 4299],
"league2": [4300, 4599],
"league3": [4600, 4899],
"league4": [4900, 5199],
"league5": [5200, 5499],
"league6": [5500, 5799],
"league7": [5800, 6099],
"league8": [6100, 6399],
"league9": [6400, 9999]
}
for arena in arenaMap.keys():
if arenaMap[arena][0] <= trophies <= arenaMap[arena][1]:
return self.emoji(arena)
async def getClanLeader(self, members):
"""Return clan leader from a list of members"""
for member in members:
if member.role == "leader":
arenaFormat = member.arena.name.replace(' ', '').lower()
return "{} {}".format(self.emoji(arenaFormat), member.name)
async def getCreaterName(self, tag, members: list):
"""Return clan leader from a list of members"""
for member in members:
if member.tag == tag:
return member.name
return ""
async def sec2tme(self, sec):
"""Converts seconds to readable time"""
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
if h is 0:
if m is 0:
return "{} seconds".format(s)
else:
return "{} minutes, {} secs".format(m, s)
else:
return "{} hour, {} mins".format(h, m)
async def clanwarReadiness(self, cards):
"""Calculate clanwar readiness"""
readiness = {}
leagueLevels = {
"legendary": 12,
"gold": 11,
"silver": 10,
"bronze": 9
}
for league in leagueLevels.keys():
readiness[league] = {"name": league.capitalize(),
"percent": 0,
"cards": [],
"levels": str(leagueLevels[league])}
for card in cards:
if await self.constants.get_new_level(card) >= leagueLevels[league]:
readiness[league]["cards"].append(card.name)
readiness[league]["percent"] = int((len(readiness[league]["cards"]) / len(cards)) * 100)
readiness["gold"]["cards"] = list(set(readiness["gold"]["cards"]) -
set(readiness["legendary"]["cards"]))
readiness["silver"]["cards"] = list(set(readiness["silver"]["cards"]) -
set(readiness["gold"]["cards"]) -
set(readiness["legendary"]["cards"]))
readiness["bronze"]["cards"] = list(set(readiness["bronze"]["cards"]) -
set(readiness["silver"]["cards"]) -
set(readiness["gold"]["cards"]) -
set(readiness["legendary"]["cards"]))
return readiness
@commands.command(pass_context=True, aliases=['clashprofile'])
async def clashProfile(self, ctx, member: discord.Member=None):
"""View your Clash Royale Profile Data and Statstics."""
member = member or ctx.message.author
await self.bot.type()
try:
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You need to first save your profile using ``{}save #GAMETAG``".format(ctx.prefix))
arenaFormat = profiledata.arena.name.replace(' ', '').lower()
games1v1 = profiledata.battle_count -(profiledata.battle_count - (profiledata.wins + profiledata.losses))
embed = discord.Embed(color=0xFAA61A)
embed.set_author(name=profiledata.name + " ("+profiledata.tag+")",
icon_url=await self.constants.get_clan_image(profiledata),
url="https://royaleapi.com/player/"+profiledata.tag.strip("#"))
embed.set_thumbnail(url="https://gr8z.github.io/cr-api-assets/arenas/{}.png".format(arenaFormat))
embed.add_field(name="Trophies", value="{} {:,}".format(self.emoji(arenaFormat), profiledata.trophies), inline=True)
embed.add_field(name="Highest Trophies", value="{} {:,}".format(self.getArenaEmoji(profiledata.best_trophies),
profiledata.best_trophies), inline=True)
embed.add_field(name="Level", value=self.emoji("level{}".format(profiledata.exp_level)), inline=True)
if profiledata.exp_level > 12:
embed.add_field(name="Star Points", value="{} {:,}".format(self.emoji("starLevel"), profiledata.star_points), inline=True)
if profiledata.clan is not None:
embed.add_field(name="Clan {}".format(profiledata.role.capitalize()),
value="{} {}".format(await self.getClanEmoji(profiledata.clan.tag), profiledata.clan.name), inline=True)
embed.add_field(name="Cards Found", value="{} {}/90".format(self.emoji("card"), len(profiledata.cards)), inline=True)
embed.add_field(name="Favourite Card", value="{} {}".format(self.emoji(profiledata.current_favourite_card.name),
profiledata.current_favourite_card.name), inline=True)
embed.add_field(name="Games Played", value="{} {:,}".format(self.emoji("battle"), profiledata.battle_count), inline=True)
embed.add_field(name="Tourney Games Played", value="{} {:,}".format(self.emoji("tourney"), profiledata.tournament_battle_count), inline=True)
embed.add_field(name="Wins", value="{} {:,} ({:.1f}%)".format(self.emoji("blueCrown"), profiledata.wins, (profiledata.wins/games1v1)*100), inline=True)
embed.add_field(name="Losses", value="{} {:,} ({:.1f}%)".format(self.emoji("redCrown"), profiledata.losses, (profiledata.losses/games1v1)*100), inline=True)
embed.add_field(name="Three Crown Wins", value="{} {:,} ({:.1f}%)".format(self.emoji("3crown"), profiledata.three_crown_wins, (profiledata.three_crown_wins/profiledata.battle_count)*100), inline=True)
embed.add_field(name="Friendly Wins", value="{} {:,}".format(self.emoji("members"), profiledata.achievements[9].value), inline=True)
embed.add_field(name="War Day Wins", value="{} {}".format(self.emoji("warwin"), profiledata.war_day_wins), inline=True)
embed.add_field(name="Total Donations", value="{} {:,}".format(self.emoji("card"), profiledata.total_donations), inline=True)
embed.add_field(name="Donations Recieved", value="{} {:,}".format(self.emoji("card"), profiledata.clan_cards_collected), inline=True)
embed.add_field(name="Challenge Max Wins", value="{} {}".format(self.emoji("tourney"), profiledata.challenge_max_wins), inline=True)
embed.add_field(name="Challenge Cards Won", value="{} {:,}".format(self.emoji("cards"), profiledata.challenge_cards_won), inline=True)
embed.add_field(name="Tournament Cards Won", value="{} {:,}".format(self.emoji("cards"), profiledata.tournament_cards_won), inline=True)
embed.add_field(name="Hosted/Joined Tourneys", value="{} {:,}/{:,}".format(self.emoji("tourney"), profiledata.achievements[6].value, profiledata.achievements[7].value), inline=True)
embed.add_field(name="Clans Joined", value="{} {:,}".format(self.emoji("clan"), profiledata.achievements[0].value), inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
@commands.command(pass_context=True)
async def chests(self, ctx, member: discord.Member=None):
"""View your upcoming chest cycle for Clash Royale."""
member = member or ctx.message.author
await self.bot.type()
try:
profiletag = await self.tags.getTagCR(member.id)
chestdata = (await self.clash.get_player_chests(profiletag)).get("items")
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You need to first save your profile using ``{}save #GAMETAG``".format(ctx.prefix))
mapEmoji = {
'Silver Chest': 'silver',
'Golden Chest': 'gold',
'Giant Chest': 'giant',
'Epic Chest': 'epic',
'Mega Lightning Chest': 'mlc',
'Magical Chest': 'magic',
'Legendary Chest': 'legendary'
}
valuechestText, specialChestText = "", ""
for chest in chestdata:
if chest.index < 9:
valuechestText += self.emoji(mapEmoji[chest.name]) + " "
else:
emojiChest = self.emoji(mapEmoji[chest.name])
specialChestText += "{} +{} ".format(emojiChest, chest.index + 1)
embed = discord.Embed(title="", color=0xFAA61A, description="Your Upcoming chests.")
embed.set_thumbnail(url="https://cdn.discordapp.com/emojis/380832387195469826.png")
embed.set_author(name="{} (#{})".format(member.name, profiletag))
embed.add_field(name="Upcoming Chests", value=valuechestText, inline=False)
embed.add_field(name="Special Chests", value=specialChestText, inline=False)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
@commands.command(pass_context=True, aliases=['clashdeck'])
async def clashDeck(self, ctx, member: discord.Member=None):
"""View yours or other's clash royale Deck"""
member = member or ctx.message.author
await self.bot.type()
try:
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You need to first save your profile using ``{}save #GAMETAG``".format(ctx.prefix))
message = ctx.message
message.content = ctx.prefix + "deck gl " + await self.constants.decklink_url(profiledata.current_deck)
message.author = member
await self.bot.process_commands(message)
@commands.command(pass_context=True, aliases=['cwr'])
async def clanwarreadiness(self, ctx, member: discord.Member=None):
"""View yours or other's clash royale CWR"""
member = member or ctx.message.author
await self.bot.type()
try:
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
leagues = await self.clanwarReadiness(profiledata.cards)
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You need to first save your profile using ``{}save #GAMETAG``".format(ctx.prefix))
embed = discord.Embed(color=0xFAA61A, description="Clan War Readiness")
embed.set_author(name=profiledata.name + " ("+profiledata.tag+")",
icon_url=await self.constants.get_clan_image(profiledata),
url="https://royaleapi.com/player/"+profiledata.tag.strip("#"))
embed.add_field(name="War Day Wins", value="{} {}".format(self.emoji("warwin"), profiledata.war_day_wins), inline=True)
embed.add_field(name="War Cards Collected", value="{} {}".format(self.emoji("card"), profiledata.clan_cards_collected), inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
for league in leagues.keys():
f_title = "{} League (Lvl {}) - {}%".format(leagues[league]["name"], leagues[league]["levels"], leagues[league]["percent"])
groups = self.grouper(leagues[league]["cards"], 30)
for index, cards in enumerate(groups):
value = ""
for card in cards:
if card is not None:
value += self.emoji(card)
embed.add_field(name=f_title if index == 0 else '\u200b', value=value, inline=False)
await self.bot.say(embed=embed)
@commands.command(pass_context=True)
async def clan(self, ctx, clantag):
"""View Clash Royale Clan statistics and information """
await self.bot.type()
clantag = await self.tags.formatTag(clantag)
if not await self.tags.verifyTag(clantag):
return await self.bot.say("The ID you provided has invalid characters. Please try again.")
try:
clandata = await self.clash.get_clan(clantag)
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
embed = discord.Embed(description=clandata.description, color=0xFAA61A)
embed.set_author(name=clandata.name + " ("+clandata.tag+")",
icon_url=await self.constants.get_clan_image(clandata),
url="https://legendclans.com/clanInfo/"+clandata.tag.strip("#"))
embed.set_thumbnail(url=await self.constants.get_clan_image(clandata))
embed.add_field(name="Members", value="{} {}/50".format(self.emoji("members"), clandata.get("members")), inline=True)
embed.add_field(name="Leader", value=await self.getClanLeader(clandata.member_list), inline=True)
embed.add_field(name="Donations", value="{} {:,}".format(self.emoji("cards"), clandata.donations_per_week), inline=True)
embed.add_field(name="Score", value="{} {:,}".format(self.emoji("PB"), clandata.clan_score), inline=True)
embed.add_field(name="War Trophies",
value="{} {:,}".format(self.getLeagueEmoji(clandata.clan_war_trophies), clandata.clan_war_trophies), inline=True)
embed.add_field(name="Required Trophies",
value="{} {:,}".format(self.emoji("crtrophy"), clandata.required_trophies), inline=True)
embed.add_field(name="Status", value=":envelope_with_arrow: {}".format(self.camelToString(clandata.type).capitalize()), inline=True)
if clandata.location.is_country:
embed.add_field(name="Country",
value=":flag_{}: {}".format(await self.constants.get_region_key(clandata.location.id),
clandata.location.name), inline=True)
else:
embed.add_field(name="Location", value=":earth_americas: {}".format(clandata.location.name), inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
@commands.command(pass_context=True, aliases=['cw'])
async def tournament(self, ctx, tag, password=None):
"""View Clash Royale Tournament Information """
await self.bot.type()
tag = await self.tags.formatTag(tag)
if not await self.tags.verifyTag(tag):
return await self.bot.say("The ID you provided has invalid characters. Please try again.")
try:
tourneydata = await self.clash.get_tournament(tag)
except clashroyaleAPI.NotFoundError:
return await self.bot.say("Error: Tournament not found. Please double check your #TAG")
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
maxPlayers = tourneydata.max_capacity
embed = discord.Embed(title="Click this link to join the Tournament in Clash Royale!",
url="https://legendclans.com/tournaments?id={}&pass={}".format(tag, password), color=0xFAA61A)
embed.set_thumbnail(url='https://statsroyale.com/images/tournament.png')
embed.set_author(name="{} ({})".format(tourneydata.name, tourneydata.tag),
url="https://royaleapi.com/tournament/" + tourneydata.tag.strip("#"))
embed.add_field(name="Players", value="{} {}/{}".format(self.emoji("members"),
tourneydata.capacity,
maxPlayers), inline=True)
embed.add_field(name="Status", value=self.camelToString(tourneydata.status).capitalize(), inline=True)
tourneydata.open = True if tourneydata.type == "open" else False
if not tourneydata.open:
if password is not None:
embed.add_field(name="Password", value="```{}```".format(password), inline=True)
else:
return await self.bot.say("Error: Please enter a tournament password.")
await self.bot.delete_message(ctx.message)
if tourneydata.status != "ended":
tourneydata.created_time = await self.cleanTime(tourneydata.created_time)
if tourneydata.status != "inProgress":
startTime = await self.sec2tme((tourneydata.created_time + tourneydata.preparation_duration) - int(time.time()))
embed.add_field(name="Starts In", value=startTime, inline=True)
endTime = await self.sec2tme((tourneydata.created_time + tourneydata.preparation_duration + tourneydata.duration) - int(time.time()))
embed.add_field(name="Ends In", value=endTime, inline=True)
embed.add_field(name="Hosted By", value=await self.getCreaterName(tourneydata.creator_tag, tourneydata.members_list), inline=True)
if tourneydata.first_place_card_prize > 0:
cards = self.getCards(maxPlayers)
coins = self.getCoins(maxPlayers)
embed.add_field(name="Top prize", value="{} {} {} {}".format(self.emoji("tournamentcards"),
cards,
self.emoji("coin"),
coins), inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
@commands.command(pass_context=True, no_pm=True)
async def save(self, ctx, profiletag: str, member: discord.Member=None):
""" save your Clash Royale Profile Tag
Example:
[p]save #CRRYTPTT @GR8
[p]save #CRRYRPCC
"""
server = ctx.message.server
author = ctx.message.author
profiletag = await self.tags.formatTag(profiletag)
if not await self.tags.verifyTag(profiletag):
return await self.bot.say("The ID you provided has invalid characters. Please try again.")
await self.bot.type()
allowed = False
if member is None:
allowed = True
elif member.id == author.id:
allowed = True
else:
botcommander_roles = [discord.utils.get(server.roles, name=r) for r in BOTCOMMANDER_ROLES]
botcommander_roles = set(botcommander_roles)
author_roles = set(author.roles)
if len(author_roles.intersection(botcommander_roles)):
allowed = True
if not allowed:
return await self.bot.say("You dont have enough permissions to set tags for others.")
member = member or ctx.message.author
try:
profiledata = await self.clash.get_player(profiletag)
checkUser = await self.tags.getUserCR(server.members, profiletag)
if checkUser is not None:
return await self.bot.say("Error, This Player ID is already linked with **" + checkUser.display_name + "**")
await self.tags.linkTagCR(profiletag, member.id)
embed = discord.Embed(color=discord.Color.green())
avatar = member.avatar_url if member.avatar else member.default_avatar_url
embed.set_author(name='{} (#{}) has been successfully saved.'.format(profiledata.name, profiletag),
icon_url=avatar)
await self.bot.say(embed=embed)
except clashroyaleAPI.NotFoundError:
return await self.bot.say("We cannot find your ID in our database, please try again.")
except clashroyaleAPI.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
def setup(bot):
bot.add_cog(clashroyale(bot))
|
|
import salt
import salt.client
import salt.gdc.groups
from openstack_dashboard.settings import SALT_MASTER_CONFIG
from openstack_dashboard.settings import SALT_SLS_DIR
from openstack_dashboard.settings import SALT_SLS_REPO_DIR
import yaml
import operator
from os import listdir
from os.path import isfile,isdir
from openstack_dashboard.dashboards.groups.groupmember import Member,Group
class SlsGoFru_HighLevelKey():
# Example:
# Input:
# hash= {'a': {'B': {1: 2}}, 1: 2, 'R': {'b': {'1': 2, 'T': {'a': 1}}}, 'b': {1: 2, 3: {'1': {'3': {'a': 1, '3': 2}}}}, '4': {'3': {1: 2, 3: 4}}}
# high_level_key = 'b'
# phrase='a'
# key_of_incoming_hash = None (always use None)
# Output:
# {'3': 1, 'T': 1}
def __init__(self, high_level_key = None , key_of_incoming_hash = None , hash=None , phrase=None):
repos_inside_high_level_key=getattr(self,'repos_inside_high_level_key',{})
if high_level_key == None:
for key in hash.keys():
if phrase == key:
if key_of_incoming_hash!= None:
repos_inside_high_level_key[key_of_incoming_hash] = hash[key]
elif (isinstance(hash[key], dict)):
instance = SlsGoFru_HighLevelKey(high_level_key = None, key_of_incoming_hash = key , hash=hash[key],phrase=phrase)
for key in instance.repos_inside_high_level_key.keys():
repos_inside_high_level_key[key] = instance.repos_inside_high_level_key[key]
setattr(self, 'repos_inside_high_level_key', repos_inside_high_level_key)
else:
if (isinstance(hash, dict)):
for key in hash.keys():
if (high_level_key == key):
instance = SlsGoFru_HighLevelKey(high_level_key = None, key_of_incoming_hash = key , hash=hash[key],phrase=phrase)
else:
instance = SlsGoFru_HighLevelKey(high_level_key = high_level_key, key_of_incoming_hash = key , hash=hash[key],phrase=phrase)
for key in instance.repos_inside_high_level_key.keys():
repos_inside_high_level_key[key] = instance.repos_inside_high_level_key[key]
setattr(self, 'repos_inside_high_level_key', repos_inside_high_level_key)
class SlsGoFru():
# Example:
# Input:
# hash= { 'a': {'B': {1: 2}}, 1: 2, 'b': {1: 2, 3: {'1': { '3': {'a': 1, '3': 2} } } } , '4': { '3': {1: 2, 3: 4} } }
# phrase = "3"
# key_of_incoming_hash = None (always use None)
# Output:
# {'1': {'a': 1, '3': 2}, '4': {1: 2, 3: 4}}
def __init__(self, key_of_incoming_hash = None , hash=None , phrase=None):
found_repos=getattr(self,'found_repos',{})
for key in hash.keys():
if phrase == key:
if key_of_incoming_hash!= None:
found_repos[key_of_incoming_hash] = hash[key]
elif (isinstance(hash[key], dict)):
instance = SlsGoFru(key_of_incoming_hash = key , hash=hash[key],phrase=phrase)
for key in instance.found_repos.keys():
found_repos[key] = instance.found_repos[key]
setattr(self, 'found_repos', found_repos)
class DirGoFru():
def __init__(self, dir_path = None):
if dir_path != None:
dir_content=getattr(self,'dir_content',[])
try:
content=listdir(dir_path)
except OSError:
return dir_files
for sls_file_name in content:
full_sls_path = (dir_path+"/"+sls_file_name)
if isfile(full_sls_path):
dir_content.append(full_sls_path)
elif isdir(full_sls_path):
new_dir = DirGoFru(dir_path = full_sls_path).dir_content
for file_path in new_dir:
dir_content.append(file_path)
setattr(self, 'dir_content', dir_content)
def get_repo_matrix():
return {"zypper":["alias",
"autorefresh",
"baseurl",
"cache",
"enabled",
"gpgcheck",
"gpgautoimport",
"keeppackages",
"mirrorlist",
"metadataPath",
"name",
"packagesPath",
"priority",
"refresh",
"type",
"url"],
"yum":["baseurl",
"comments",
"enabled",
"failovermethod",
"file",
"gpgcheck",
"gpgkey",
"metadata_expire",
"mirrorlist",
"metalink",
"name",
"skip_if_unavailable",
"file"],
"deb":["refresh_db",
"dist",
"file",
"ppa_auth",
"keyid",
"keyserver",
"key_url",
"line",
"uri",
"architectures"
"comps",
"disabled",
"type",
"consolidate",
"comments"]}
def sls_is_repofile(sls_file_hash=None):
if sls_file_hash == None:
return False
table = {"zypper":0,"yum":0,"deb":0}
repo_matrix = get_repo_matrix()
for repo_type in repo_matrix:
if repo_type == "zypper":
for repo_key in repo_matrix[repo_type]:
if repo_key in sls_file_hash:
table[repo_type]+=1
if repo_type == "yum":
for repo_key in repo_matrix[repo_type]:
if repo_key in sls_file_hash:
table[repo_key]+=1
if repo_type == "deb":
for repo_key in repo_matrix[repo_type]:
if repo_key in sls_file_hash:
table[repo_key]+=1
sorted_table = sorted(table.items(), key=operator.itemgetter(1))
(repo_name,count)=sorted_table.pop()
if count == 0:
return False
else:
return repo_name
def create_repo():
pass
def remove_repo():
pass
def edit_repo():
pass
def get_environment(env_name=None):
try:
master_config_file = open(SALT_MASTER_CONFIG,"r")
master_config = yaml.load('\n'.join(master_config_file.readlines()))
master_config_file.close()
if env_name == None:
return master_config.get("file_roots",None)
else:
environments = master_config.get("file_roots",None)
if environments!=None:
return environments.get(env_name,None)
except OSError:
print 'No such file or directory: %s'%(SALT_SLS_REPO_DIR)
return False
def get_directory_content(dir_path = None):
if dir_path != None:
dir_files = []
try:
dir_files=DirGoFru(dir_path=dir_path).dir_content
return dir_files
except OSError:
return dir_files
def list_something_inside_by_key(env_name=None,key_phrase="pkgrepo.managed"):
"""By default as you can see it returns repolists """
repo_content = []
try:
if env_name == None:
environments = get_environment()
for env in environments:
for directory in environments[env]:
content=get_directory_content(dir_path=directory)
for sls_file_name in content:
sls_file = open(sls_file_name,"r")
try:
sls_file_data = yaml.load('\n'.join(sls_file.readlines()))
sls_file.close()
except:
sls_file_data = None
sls_file.close()
if (isinstance(sls_file_data, dict)):
repo_content.append(SlsGoFru(hash=sls_file_data,phrase=key_phrase).found_repos)
else:
env_dirs = get_environment(env_name)
env_files = []
for env_dir in env_dirs:
content=get_directory_content(dir_path=env_dir)
for env_file in content:
env_files.append(env_file)
for sls_file_name in env_files:
sls_file = open(sls_file_name,"r")
try:
sls_file_data = yaml.load('\n'.join(sls_file.readlines()))
sls_file.close()
except:
sls_file_data = None
sls_file.close()
if (isinstance(sls_file_data, dict)):
repo_content.append(SlsGoFru(hash=sls_file_data,phrase=key_phrase).found_repos)
return repo_content
except OSError:
print 'No such file or directory: %s'%(SALT_SLS_REPO_DIR)
return False
def subscribe_instance_to_repo():
pass
def unsubscribe_instance_from_repo():
pass
def restart_master():
pass
def mod_master_config():
pass
def highstate(instance_name="*"):
info=local.cmd(instance_name,'state.highstate')
return info
def list_instance_repository_subscription(instance_name = None , env_name = None):
data = []
environments = get_environment()
if env_name != None:
if environments.get(env_name,None) == None: return []
environments = {env_name:environments[env_name]}
all_repos_in_all_environments = list_something_inside_by_key(key_phrase="pkgrepo.managed")
repositories = list_something_inside_by_key(key_phrase="pkgrepo.managed")
repository_names = []
for repository in repositories:
for repository_name in repository.keys():
repository_names.append(repository_name)
print '--- repository_names ---'
print repository_names
print '--- ---'
for env in environments:
for directory in environments[env]:
content=get_directory_content(dir_path=directory)
for sls_file_name in content:
sls_file = open(sls_file_name,"r")
try:
sls_file_data = yaml.load('\n'.join(sls_file.readlines()))
sls_file.close()
except:
sls_file_data = None
sls_file.close()
continue
if (isinstance(sls_file_data, dict)):
collected_data = (SlsGoFru(hash=sls_file_data,phrase=instance_name).found_repos)
instance_repository_set = []
if (collected_data not in data) and (collected_data!={}):
if (collected_data.get(env,None)!=None):
for repository_highlevel_name in collected_data[env]:
#
#
# Go inside and find repository hidden under high level key
#
#
print '-- repository_highlevel_name --'
print repository_highlevel_name
print '-- --'
if repository_highlevel_name not in repository_names:
for directory in environments[env]:
content=get_directory_content(dir_path=directory)
for sls_file_name in content:
sls_file = open(sls_file_name,"r")
try:
sls_file = open(sls_file_name,"r")
sls_file_data = yaml.load('\n'.join(sls_file.readlines()))
sls_file.close()
except:
sls_file_data = None
sls_file.close()
continue
if (isinstance(sls_file_data, dict)):
instance = SlsGoFru_HighLevelKey(high_level_key = repository_highlevel_name , hash=sls_file_data , phrase="pkgrepo.managed")
print '- repos_inside_high_level_key -'
print instance.repos_inside_high_level_key
print '- -'
for key in instance.repos_inside_high_level_key.keys():
instance_repository_set.append(key)
else:
instance_repository_set.append(repository_highlevel_name)
data.append({env:instance_repository_set})
return data
def get_groups_sls():
gm = salt.gdc.groups.GdcMatcher()
groups = gm.get_groups()
group_instances = []
for group in groups:
group_instances.append(Group(member_group_name=group,members=gm.get_by_group(group)))
return group_instances
def get_group_members_sls(group_name):
gm = salt.gdc.groups.GdcMatcher()
members = gm.get_by_group(group_name)
member_instances = []
for member in members:
member_instances.append(Member(member_name=member,member_group_names=gm.get_by_host(member)))
return member_instances
def minions_list_sls():
gm = salt.gdc.groups.GdcMatcher()
return gm.get_all_hosts()
def get_members_sls_custom():
gm = salt.gdc.groups.GdcMatcher()
member_list = []
for member_name in minions_list_sls():
member_list.append(Member(member_name = member_name,
member_group_names = gm.get_by_host(member_name)))
return member_list
def get_group_members_simple_sls(group_name=None):
gm = salt.gdc.groups.GdcMatcher()
member_list = []
for member_name in gm.get_by_group(group_name):
member_list.append(member_name)
return member_list
def get_member_sls(member_name=None):
member_groups = []
if member_name!= None:
gm = salt.gdc.groups.GdcMatcher()
member_groups = gm.get_by_host(member_name)
return Member(member_name=member_name,member_group_names=member_groups)
def update_member_sls(member_name=None,member_type='instance',member_group_names=[]):
gm = salt.gdc.groups.GdcMatcher()
current_member_groups = gm.get_by_host(member_name)
add_group = []
remove_group = []
for new_group in member_group_names:
if new_group not in current_member_groups:
add_group.append(new_group)
for old_group in current_member_groups:
if old_group not in member_group_names:
remove_group.append(old_group)
for group in add_group:
gm.join_to_group([member_name.encode('ascii', 'ignore')],group.encode('ascii', 'ignore'))
for group in remove_group:
gm.remove_from_group([member_name.encode('ascii', 'ignore')],group.encode('ascii', 'ignore'))
return gm.get_by_host(member_name)
def del_group(group_name=None):
gm = salt.gdc.groups.GdcMatcher()
gm.del_group([group_name])
def remove_everywhere(member_name=None):
gm = salt.gdc.groups.GdcMatcher()
gm.remove_everywhere([member_name])
def create_group_sls(group_name=None):
gm = salt.gdc.groups.GdcMatcher()
gm.add_group({group_name.encode('ascii', 'ignore'):''})
|
|
# mako/cache.py
# Copyright 2006-2021 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import util
_cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class Cache:
"""Represents a data content cache made available to the module
space of a specific :class:`.Template` object.
.. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, str) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(
key, creation_function, **self._get_cache_kw(kw, context)
)
def set(self, key, value, **kw):
r"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
r"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
r"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate("render_body", __M_defname="render_body")
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate("render_%s" % name, __M_defname="render_%s" % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop("__M_defname", None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault("context", context)
return tmpl_kw
class CacheImpl:
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
r"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
r"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
r"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
r"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
|
|
import os
import re
import stat
import subprocess
import sublime
import sublime_plugin
from Vintageous.ex import ex_error
from Vintageous.ex import shell
from Vintageous.ex.ex_error import Display
from Vintageous.ex.ex_error import ERR_CANT_FIND_DIR_IN_CDPATH
from Vintageous.ex.ex_error import ERR_CANT_MOVE_LINES_ONTO_THEMSELVES
from Vintageous.ex.ex_error import ERR_CANT_WRITE_FILE
from Vintageous.ex.ex_error import ERR_EMPTY_BUFFER
from Vintageous.ex.ex_error import ERR_FILE_EXISTS
from Vintageous.ex.ex_error import ERR_INVALID_ADDRESS
from Vintageous.ex.ex_error import ERR_NO_FILE_NAME
from Vintageous.ex.ex_error import ERR_OTHER_BUFFER_HAS_CHANGES
from Vintageous.ex.ex_error import ERR_READONLY_FILE
from Vintageous.ex.ex_error import ERR_UNSAVED_CHANGES
from Vintageous.ex.ex_error import show_error
from Vintageous.ex.ex_error import show_message
from Vintageous.ex.ex_error import show_status
from Vintageous.ex.ex_error import show_not_implemented
from Vintageous.ex.ex_error import VimError
from Vintageous.ex.parser.parser import parse_command_line
from Vintageous.ex.plat.windows import get_oem_cp
from Vintageous.ex.plat.windows import get_startup_info
from Vintageous.state import State
from Vintageous.vi import abbrev
from Vintageous.vi import utils
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.vi.core import ViWindowCommandBase
from Vintageous.vi.mappings import Mappings
from Vintageous.vi.search import find_all_in_range
from Vintageous.vi.settings import set_global
from Vintageous.vi.settings import set_local
from Vintageous.vi.sublime import has_dirty_buffers
from Vintageous.vi.utils import adding_regions
from Vintageous.vi.utils import first_sel
from Vintageous.vi.utils import modes
from Vintageous.vi.utils import R
from Vintageous.vi.utils import resolve_insertion_point_at_b
from Vintageous.vi.utils import row_at
GLOBAL_RANGES = []
CURRENT_LINE_RANGE = {'left_ref': '.', 'left_offset': 0,
'left_search_offsets': [], 'right_ref': None,
'right_offset': 0, 'right_search_offsets': []}
def changing_cd(f, *args, **kwargs):
def inner(*args, **kwargs):
try:
state = State(args[0].view)
except AttributeError:
state = State(args[0].window.active_view())
old = os.getcwd()
try:
# FIXME: Under some circumstances, like when switching projects to
# a file whose _cmdline_cd has not been set, _cmdline_cd might
# return 'None'. In such cases, change to the actual current
# directory as a last measure. (We should probably fix this anyway).
os.chdir(state.settings.vi['_cmdline_cd'] or old)
f(*args, **kwargs)
finally:
os.chdir(old)
return inner
def get_view_info(v):
"""gathers data to be displayed by :ls or :buffers
"""
path = v.file_name()
if path:
parent, leaf = os.path.split(path)
parent = os.path.basename(parent)
path = os.path.join(parent, leaf)
else:
path = v.name() or str(v.buffer_id())
leaf = v.name() or 'untitled'
status = []
if not v.file_name():
status.append("t")
if v.is_dirty():
status.append("*")
if v.is_read_only():
status.append("r")
if status:
leaf += ' (%s)' % ', '.join(status)
return [leaf, path]
class ExTextCommandBase(sublime_plugin.TextCommand):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def serialize_sel(self):
sels = [(r.a, r.b) for r in list(self.view.sel())]
self.view.settings().set('ex_data', {'prev_sel': sels})
def deserialize_sel(self, name='next_sel'):
return self.view.settings().get('ex_data')[name] or []
def set_sel(self):
sel = self.deserialize_sel()
self.view.sel().clear()
self.view.sel().add_all([sublime.Region(b) for (a, b) in sel])
def set_next_sel(self, data):
self.view.settings().set('ex_data', {'next_sel': data})
def set_mode(self):
state = State(self.view)
state.enter_normal_mode()
self.view.run_command('vi_enter_normal_mode')
def run(self, edit, *args, **kwargs):
self.serialize_sel()
self.run_ex_command(edit, *args, **kwargs)
self.set_sel()
self.set_mode()
class ExGoto(ViWindowCommandBase):
def run(self, command_line):
if not command_line:
# No-op: user issues ':'.
return
parsed = parse_command_line(command_line)
r = parsed.line_range.resolve(self._view)
line_nr = row_at(self._view, r.a) + 1
# TODO: .enter_normal_mode has access to self.state.mode
self.enter_normal_mode(mode=self.state.mode)
self.state.enter_normal_mode()
self.window.run_command('_vi_add_to_jump_list')
self.window.run_command('_vi_go_to_line', {'line': line_nr, 'mode': self.state.mode})
self.window.run_command('_vi_add_to_jump_list')
self._view.show(self._view.sel()[0])
class ExShellOut(sublime_plugin.TextCommand):
"""
Command: :!{cmd}
:!!
http://vimdoc.sourceforge.net/htmldoc/various.html#:!
"""
_last_command = None
@changing_cd
def run(self, edit, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
shell_cmd = parsed.command.command
if shell_cmd == '!':
if not _last_command:
return
shell_cmd = ExShellOut._last_command
# TODO: store only successful commands.
ExShellOut._last_command = shell_cmd
try:
if not parsed.line_range.is_empty:
shell.filter_thru_shell(
view=self.view,
edit=edit,
regions=[parsed.line_range.resolve(self.view)],
cmd=shell_cmd)
else:
# TODO: Read output into output panel.
# shell.run_and_wait(self.view, shell_cmd)
out = shell.run_and_read(self.view, shell_cmd)
output_view = self.view.window().create_output_panel('vi_out')
output_view.settings().set("line_numbers", False)
output_view.settings().set("gutter", False)
output_view.settings().set("scroll_past_end", False)
output_view = self.view.window().create_output_panel('vi_out')
output_view.run_command('append', {'characters': out,
'force': True,
'scroll_to_end': True})
self.view.window().run_command("show_panel", {"panel": "output.vi_out"})
except NotImplementedError:
show_not_implemented()
class ExShell(ViWindowCommandBase):
"""Ex command(s): :shell
Opens a shell at the current view's directory. Sublime Text keeps a virtual
current directory that most of the time will be out of sync with the actual
current directory. The virtual current directory is always set to the
current view's directory, but it isn't accessible through the API.
"""
def open_shell(self, command):
return subprocess.Popen(command, cwd=os.getcwd())
@changing_cd
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
if sublime.platform() == 'linux':
term = self.view.settings().get('VintageousEx_linux_terminal')
term = term or os.environ.get('COLORTERM') or os.environ.get("TERM")
if not term:
sublime.status_message("Vintageous: Not terminal name found.")
return
try:
self.open_shell([term, '-e', 'bash']).wait()
except Exception as e:
print(e)
sublime.status_message("Vintageous: Error while executing command through shell.")
return
elif sublime.platform() == 'osx':
term = self.view.settings().get('VintageousEx_osx_terminal')
term = term or os.environ.get('COLORTERM') or os.environ.get("TERM")
if not term:
sublime.status_message("Vintageous: Not terminal name found.")
return
try:
self.open_shell([term, '-e', 'bash']).wait()
except Exception as e:
print(e)
sublime.status_message("Vintageous: Error while executing command through shell.")
return
elif sublime.platform() == 'windows':
self.open_shell(['cmd.exe', '/k']).wait()
else:
# XXX OSX (make check explicit)
show_not_implemented()
class ExReadShellOut(sublime_plugin.TextCommand):
'''
Command: :r[ead] [++opt] [name]
:{range}r[ead] [++opt] [name]
:[range]r[ead] !{cmd}
http://vimdoc.sourceforge.net/htmldoc/insert.html#:r
'''
@changing_cd
def run(self, edit, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
r = parsed.line_range.resolve(self.view)
target_point = min(r.end(), self.view.size())
if parsed.command.command:
if sublime.platform() == 'linux':
# TODO: make shell command configurable.
the_shell = self.view.settings().get('linux_shell')
the_shell = the_shell or os.path.expandvars("$SHELL")
if not the_shell:
sublime.status_message("Vintageous: No shell name found.")
return
try:
p = subprocess.Popen([the_shell, '-c', parsed.command.command],
stdout=subprocess.PIPE)
except Exception as e:
print(e)
sublime.status_message("Vintageous: Error while executing command through shell.")
return
self.view.insert(edit, target_point, p.communicate()[0][:-1].decode('utf-8').strip() + '\n')
elif sublime.platform() == 'windows':
p = subprocess.Popen(['cmd.exe', '/C', parsed.command.command],
stdout=subprocess.PIPE,
startupinfo=get_startup_info()
)
cp = 'cp' + get_oem_cp()
rv = p.communicate()[0].decode(cp)[:-2].strip()
self.view.insert(edit, target_point, rv.strip() + '\n')
else:
show_not_implemented()
# Read a file into the current view.
else:
# According to Vim's help, :r should read the current file's content
# if no file name is given, but Vim doesn't do that.
# TODO: implement reading a file into the buffer.
show_not_implemented()
return
class ExPromptSelectOpenFile(ViWindowCommandBase):
'''
Command: :ls[!]
:buffers[!]
:files[!]
http://vimdoc.sourceforge.net/htmldoc/windows.html#:ls
'''
def run(self, command_line=''):
self.file_names = [get_view_info(view) for view in self.window.views()]
self.view_ids = [view.id() for view in self.window.views()]
self.window.show_quick_panel(self.file_names, self.on_done)
def on_done(self, index):
if index == -1:
return
sought_id = self.view_ids[index]
for view in self.window.views():
# TODO: Start looking in current group.
if view.id() == sought_id:
self.window.focus_view(view)
class ExMap(ViWindowCommandBase):
"""
Command: :map {lhs} {rhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:map
"""
def run(self, command_line=''):
# def run(self, edit, mode=None, count=None, cmd=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
if not (parsed.command.keys and parsed.command.command):
show_not_implemented('Showing mappings now implemented')
return
mappings = Mappings(self.state)
mappings.add(modes.NORMAL, parsed.command.keys, parsed.command.command)
mappings.add(modes.OPERATOR_PENDING, parsed.command.keys, parsed.command.command)
mappings.add(modes.VISUAL, parsed.command.keys, parsed.command.command)
class ExUnmap(ViWindowCommandBase):
'''
Command: :unm[ap] {lhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:unmap
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
unmap = parse_command_line(command_line)
mappings = Mappings(self.state)
try:
mappings.remove(modes.NORMAL, unmap.command.keys)
mappings.remove(modes.OPERATOR_PENDING, unmap.command.keys)
mappings.remove(modes.VISUAL, unmap.command.keys)
except KeyError:
sublime.status_message('Vintageous: Mapping not found.')
class ExNmap(ViWindowCommandBase):
"""
Command: :nm[ap] {lhs} {rhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:nmap
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
nmap_command = parse_command_line(command_line)
keys, command = (nmap_command.command.keys,
nmap_command.command.command)
mappings = Mappings(self.state)
mappings.add(modes.NORMAL, keys, command)
class ExNunmap(ViWindowCommandBase):
"""
Command: :nun[map] {lhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:nunmap
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
nunmap_command = parse_command_line(command_line)
mappings = Mappings(self.state)
try:
mappings.remove(modes.NORMAL, nunmap_command.command.keys)
except KeyError:
sublime.status_message('Vintageous: Mapping not found.')
class ExOmap(ViWindowCommandBase):
"""
Command: :om[ap] {lhs} {rhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:omap
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
omap_command = parse_command_line(command_line)
keys, command = (omap_command.command.keys,
omap_command.command.command)
mappings = Mappings(self.state)
mappings.add(modes.OPERATOR_PENDING, keys, command)
class ExOunmap(ViWindowCommandBase):
"""
Command: :ou[nmap] {lhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:ounmap
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
ounmap_command = parse_command_line(command_line)
mappings = Mappings(self.state)
try:
mappings.remove(modes.OPERATOR_PENDING, ounmap_command.command.keys)
except KeyError:
sublime.status_message('Vintageous: Mapping not found.')
class ExVmap(ViWindowCommandBase):
"""
Command: :vm[ap] {lhs} {rhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:vmap
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
vmap_command = parse_command_line(command_line)
keys, command = (vmap_command.command.keys,
vmap_command.command.command)
mappings = Mappings(self.state)
mappings.add(modes.VISUAL, keys, command)
mappings.add(modes.VISUAL_LINE, keys, command)
mappings.add(modes.VISUAL_BLOCK, keys, command)
class ExVunmap(ViWindowCommandBase):
"""
Command: :vu[nmap] {lhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:vunmap
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
vunmap_command = parse_command_line(command_line)
mappings = Mappings(self.state)
try:
mappings.remove(modes.VISUAL, vunmap_command.command.keys)
mappings.remove(modes.VISUAL_LINE, vunmap_command.command.keys)
mappings.remove(modes.VISUAL_BLOCK, vunmap_command.command.keys)
except KeyError:
sublime.status_message('Vintageous: Mapping not found.')
class ExAbbreviate(ViWindowCommandBase):
'''
Command: :ab[breviate]
http://vimdoc.sourceforge.net/htmldoc/map.html#:abbreviate
'''
def run(self, command_line=''):
if not command_line:
self.show_abbreviations()
return
parsed = parse_command_line(command_line)
if not (parsed.command.short and parsed.command.full):
show_not_implemented(':abbreviate not fully implemented')
return
abbrev.Store().set(parsed.command.short, parsed.command.full)
def show_abbreviations(self):
abbrevs = ['{0} --> {1}'.format(item['trigger'], item['contents'])
for item in
abbrev.Store().get_all()]
self.window.show_quick_panel(abbrevs,
None, # Simply show the list.
flags=sublime.MONOSPACE_FONT)
class ExUnabbreviate(ViWindowCommandBase):
'''
Command: :una[bbreviate] {lhs}
http://vimdoc.sourceforge.net/htmldoc/map.html#:unabbreviate
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
if not parsed.command.short:
return
abbrev.Store().erase(parsed.command.short)
class ExPrintWorkingDir(ViWindowCommandBase):
'''
Command: :pw[d]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:pwd
'''
@changing_cd
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
show_status(os.getcwd())
class ExWriteFile(ViWindowCommandBase):
'''
Command :w[rite] [++opt]
:w[rite]! [++opt]
:[range]w[rite][!] [++opt]
:[range]w[rite] [++opt] {file}
:[range]w[rite]! [++opt] {file}
:[range]w[rite][!] [++opt] >>
:[range]w[rite][!] [++opt] >> {file}
:[range]w[rite] [++opt] {!cmd}
http://vimdoc.sourceforge.net/htmldoc/editing.html#:write
'''
def check_is_readonly(self, fname):
'''
Returns `True` if @fname is read-only on the filesystem.
@fname
Path to a file.
'''
if not fname:
return
try:
mode = os.stat(fname)
read_only = (stat.S_IMODE(mode.st_mode) & stat.S_IWUSR != stat.S_IWUSR)
except FileNotFoundError:
return
return read_only
@changing_cd
def run(self, command_line=''):
if not command_line:
raise ValueError('empty command line; that seems to be an error')
parsed = parse_command_line(command_line)
if parsed.command.options:
show_not_implemented("++opt isn't implemented for :write")
return
if parsed.command.command:
show_not_implemented('!cmd not implememted for :write')
return
if not self._view:
return
if parsed.command.appends:
self.do_append(parsed)
return
if parsed.command.command:
show_not_implemented("!cmd isn't implemented for :write")
return
if parsed.command.target_file:
self.do_write(parsed)
return
if not self._view.file_name():
show_error(VimError(ERR_NO_FILE_NAME))
return
read_only = (self.check_is_readonly(self._view.file_name())
or self._view.is_read_only())
if read_only and not parsed.command.forced:
utils.blink()
show_error(VimError(ERR_READONLY_FILE))
return
self.window.run_command('save')
def do_append(self, parsed_command):
if parsed_command.command.target_file:
self.do_append_to_file(parsed_command)
return
r = None
if parsed_command.line_range.is_empty:
# If the user didn't provide any range data, Vim appends whe whole buffer.
r = R(0, self._view.size())
else:
r = parsed_command.line_range.resolve(self._view)
text = self._view.substr(r)
text = text if text.startswith('\n') else '\n' + text
location = resolve_insertion_point_at_b(first_sel(self._view))
self._view.run_command('append', {'characters': text})
utils.replace_sel(self._view, R(self._view.line(location).a))
self.enter_normal_mode(mode=self.state.mode)
self.state.enter_normal_mode()
def do_append_to_file(self, parsed_command):
r = None
if parsed_command.line_range.is_empty:
# If the user didn't provide any range data, Vim writes whe whole buffer.
r = R(0, self._view.size())
else:
r = parsed_command.line_range.resolve(self._view)
fname = parsed_command.command.target_file
if not parsed_command.command.forced and not os.path.exists(fname):
show_error(VimError(ERR_CANT_WRITE_FILE))
return
try:
with open(fname, 'at') as f:
text = self._view.substr(r)
f.write(text)
# TODO: make this `show_info` instead.
show_status('Appended to ' + os.path.abspath(fname))
return
except IOError as e:
print('Vintageous: could not write file')
print('Vintageous ============')
print(e)
print('=======================')
return
def do_write(self, ex_command):
fname = ex_command.command.target_file
if not ex_command.command.forced:
if os.path.exists(fname):
utils.blink()
show_error(VimError(ERR_FILE_EXISTS))
return
if self.check_is_readonly(fname):
utils.blink()
show_error(VimError(ERR_READONLY_FILE))
return
region = None
if ex_command.line_range.is_empty:
# If the user didn't provide any range data, Vim writes whe whole buffer.
region = R(0, self._view.size())
else:
region = ex_command.line_range.resolve(self._view)
assert region is not None, "range cannot be None"
try:
expanded_path = os.path.expandvars(os.path.expanduser(fname))
expanded_path = os.path.abspath(expanded_path)
with open(expanded_path, 'wt') as f:
text = self._view.substr(region)
f.write(text)
# FIXME: Does this do what we think it does?
self._view.retarget(expanded_path)
self.window.run_command('save')
except IOError as e:
# TODO: Add logging.
show_error(VimError(ERR_CANT_WRITE_FILE))
print('Vintageous ==============================================')
print (e)
print('=========================================================')
class ExWriteAll(ViWindowCommandBase):
'''
Commmand: :wa[ll][!]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:wa
'''
@changing_cd
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
forced = parsed.command.forced
# TODO: read-only views don't get properly saved.
for v in (v for v in self.window.views() if v.file_name()):
if v.is_read_only() and not forced:
continue
v.run_command('save')
class ExFile(ViWindowCommandBase):
'''
Command: :f[file][!]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:file
'''
def run(self, command_line=''):
# XXX figure out what the right params are. vim's help seems to be
# wrong
if self._view.file_name():
fname = self._view.file_name()
else:
fname = 'untitled'
attrs = ''
if self._view.is_read_only():
attrs = 'readonly'
if self._view.is_dirty():
attrs = 'modified'
lines = 'no lines in the buffer'
if self._view.rowcol(self._view.size())[0]:
lines = self._view.rowcol(self._view.size())[0] + 1
# fixme: doesn't calculate the buffer's % correctly
if not isinstance(lines, str):
vr = self._view.visible_region()
start_row, end_row = self._view.rowcol(vr.begin())[0], \
self._view.rowcol(vr.end())[0]
mid = (start_row + end_row + 2) / 2
percent = float(mid) / lines * 100.0
msg = fname
if attrs:
msg += " [%s]" % attrs
if isinstance(lines, str):
msg += " -- %s --" % lines
else:
msg += " %d line(s) --%d%%--" % (lines, int(percent))
sublime.status_message('Vintageous: %s' % msg)
class ExMove(ExTextCommandBase):
'''
Command: :[range]m[ove] {address}
http://vimdoc.sourceforge.net/htmldoc/change.html#:move
'''
def run_ex_command(self, edit, command_line=''):
assert command_line, 'expected non-empty command line'
move_command = parse_command_line(command_line)
if move_command.command.address is None:
show_error(VimError(ERR_INVALID_ADDRESS))
return
source = move_command.line_range.resolve(self.view)
if any(s.contains(source) for s in self.view.sel()):
show_error(VimError(ERR_CANT_MOVE_LINES_ONTO_THEMSELVES))
return
destination = move_command.command.address.resolve(self.view)
if destination == source:
return
text = self.view.substr(source)
if destination.end() >= self.view.size():
text = '\n' + text.rstrip()
if destination == R(-1):
destination = R(0)
if destination.end() < source.begin():
self.view.erase(edit, source)
self.view.insert(edit, destination.end(), text)
self.set_next_sel([[destination.a, destination.b]])
return
self.view.insert(edit, destination.end(), text)
self.view.erase(edit, source)
self.set_next_sel([[destination.a, destination.a]])
class ExCopy(ExTextCommandBase):
'''
Command: :[range]co[py] {address}
http://vimdoc.sourceforge.net/htmldoc/change.html#:copy
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run_ex_command(self, edit, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
unresolved = parsed.command.calculate_address()
if unresolved is None:
show_error(VimError(ERR_INVALID_ADDRESS))
return
# TODO: how do we signal row 0?
target_region = unresolved.resolve(self.view)
address = None
if target_region == R(-1, -1):
address = 0
else:
row = utils.row_at(self.view, target_region.begin()) + 1
address = self.view.text_point(row, 0)
source = parsed.line_range.resolve(self.view)
text = self.view.substr(source)
if address >= self.view.size():
address = self.view.size()
text = '\n' + text[:-1]
self.view.insert(edit, address, text)
cursor_dest = self.view.line(address + len(text) - 1).begin()
self.set_next_sel([(cursor_dest, cursor_dest)])
class ExOnly(ViWindowCommandBase):
"""
Command: :on[ly][!]
http://vimdoc.sourceforge.net/htmldoc/windows.html#:only
"""
def run(self, command_line=''):
if not command_line:
raise ValueError('empty command line; that seems wrong')
parsed = parse_command_line(command_line)
if not parsed.command.forced and has_dirty_buffers(self.window):
show_error(VimError(ERR_OTHER_BUFFER_HAS_CHANGES))
return
current_id = self._view.id()
for view in self.window.views():
if view.id() == current_id:
continue
if view.is_dirty():
view.set_scratch(True)
view.close()
class ExDoubleAmpersand(ViWindowCommandBase):
'''
Command: :[range]&[&][flags] [count]
http://vimdoc.sourceforge.net/htmldoc/change.html#:&
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
new_command_line = '{0}substitute///{1} {2}'.format(
str(parsed.line_range),
''.join(parsed.command.params['flags']),
parsed.command.params['count'],
)
self.window.run_command('ex_substitute', {
'command_line': new_command_line.strip()
})
class ExSubstitute(sublime_plugin.TextCommand):
'''
Command :s[ubstitute]
http://vimdoc.sourceforge.net/htmldoc/change.html#:substitute
'''
last_pattern = None
last_flags = []
last_replacement = ''
def run(self, edit, command_line=''):
if not command_line:
raise ValueError('no command line passed; that seems wrong')
# ST commands only accept Json-encoded parameters.
# We parse the command line again because the alternative is to
# serialize the parsed command line before calling this command.
# Parsing twice seems simpler.
parsed = parse_command_line(command_line)
pattern = parsed.command.pattern
replacement = parsed.command.replacement
count = parsed.command.count
flags = parsed.command.flags
# :s
if not pattern:
pattern = ExSubstitute.last_pattern
replacement = ExSubstitute.last_replacement
# TODO: Don't we have to reuse the previous flags?
flags = []
count = 0
if not pattern:
sublime.status_message("Vintageous: no previous pattern available")
print("Vintageous: no previous pattern available")
return
ExSubstitute.last_pattern = pattern
ExSubstitute.last_replacement = replacement
ExSubstitute.last_flags = flags
computed_flags = 0
computed_flags |= re.IGNORECASE if ('i' in flags) else 0
try:
compiled_rx = re.compile(pattern, flags=computed_flags)
except Exception as e:
sublime.status_message(
"Vintageous: bad pattern '%s'" % (e.message, pattern))
print("Vintageous [regex error]: %s ... in pattern '%s'"
% (e.message, pattern))
return
# TODO: Implement 'count'
replace_count = 0 if (flags and 'g' in flags) else 1
target_region = parsed.line_range.resolve(self.view)
if 'c' in flags:
self.replace_confirming(edit, pattern, compiled_rx, replacement, replace_count, target_region)
return
line_text = self.view.substr(target_region)
new_text = re.sub(compiled_rx, replacement, line_text, count=replace_count)
self.view.replace(edit, target_region, new_text)
def replace_confirming(self, edit, pattern, compiled_rx, replacement,
replace_count, target_region):
last_row = row_at(self.view, target_region.b - 1)
start = target_region.begin()
while True:
match = self.view.find(pattern, start)
# no match or match out of range -- stop
if (match == R(-1)) or (row_at(self.view, match.a) > last_row):
self.view.show(first_sel(self.view).begin())
return
size_before = self.view.size()
with adding_regions(self.view, 's_confirm', [match], 'comment'):
self.view.show(match.a, True)
if sublime.ok_cancel_dialog("Confirm replacement?"):
text = self.view.substr(match)
substituted = re.sub(compiled_rx, replacement, text, count=replace_count)
self.view.replace(edit, match, substituted)
start = match.b + (self.view.size() - size_before)
class ExDelete(ExTextCommandBase):
'''
Command: :[range]d[elete] [x]
:[range]d[elete] [x] {count}
http://vimdoc.sourceforge.net/htmldoc/change.html#:delete
'''
def select(self, regions, register):
self.view.sel().clear()
to_store = []
for r in regions:
self.view.sel().add(r)
if register:
to_store.append(self.view.substr(self.view.full_line(r)))
if register:
text = ''.join(to_store)
if not text.endswith('\n'):
text = text + '\n'
state = State(self.view)
state.registers[register] = [text]
def run_ex_command(self, edit, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
r = parsed.line_range.resolve(self.view)
if r == R(-1, -1):
r = self.view.full_line(0)
self.select([r], parsed.command.params['register'])
self.view.erase(edit, r)
self.set_next_sel([(r.a, r.a)])
class ExGlobal(ViWindowCommandBase):
"""Ex command(s): :global
Command: :[range]g[lobal]/{pattern}/[cmd]
:[range]g[lobal]!/{pattern}/[cmd]
:global filters lines where a pattern matches and then applies the supplied
action to all those lines.
Examples:
:10,20g/FOO/delete
This command deletes all lines between line 10 and line 20 where 'FOO'
matches.
:g:XXX:s!old!NEW!g
This command replaces all instances of 'old' with 'NEW' in every line
where 'XXX' matches.
By default, :global searches all lines in the buffer.
If you want to filter lines where a pattern does NOT match, add an
exclamation point:
:g!/DON'T TOUCH THIS/delete
"""
most_recent_pat = None
def run(self, command_line=''):
assert command_line, 'expected non-empty command_line'
parsed = parse_command_line(command_line)
global_range = None
if parsed.line_range.is_empty:
global_range = R(0, self._view.size())
else:
global_range = parsed.line_range.resolve(self._view)
pattern = parsed.command.pattern
if pattern:
ExGlobal.most_recent_pat = pattern
else:
pattern = ExGlobal.most_recent_pat
# Should default to 'print'
subcmd = parsed.command.subcommand
try:
matches = find_all_in_range(self._view, pattern,
global_range.begin(), global_range.end())
except Exception as e:
msg = "Vintageous (global): %s ... in pattern '%s'" % (str(e), pattern)
sublime.status_message(msg)
print(msg)
return
if not matches or not parsed.command.subcommand.cooperates_with_global:
return
matches = [self._view.full_line(r.begin()) for r in matches]
matches = [[r.a, r.b] for r in matches]
self.window.run_command(subcmd.target_command, {
'command_line': str(subcmd),
# Ex commands cooperating with :global must accept this additional
# parameter.
'global_lines': matches,
})
class ExPrint(ViWindowCommandBase):
'''
Command: :[range]p[rint] [flags]
:[range]p[rint] {count} [flags]
http://vimdoc.sourceforge.net/htmldoc/various.html#:print
'''
def run(self, command_line='', global_lines=None):
assert command_line, 'expected non-empty command line'
if self._view.size() == 0:
show_error(VimError(ERR_EMPTY_BUFFER))
return
parsed = parse_command_line(command_line)
r = parsed.line_range.resolve(self._view)
lines = self.get_lines(r, global_lines)
display = self.window.new_file()
display.set_scratch(True)
if 'l' in parsed.command.flags:
display.settings().set('draw_white_space', 'all')
for (text, row) in lines:
characters = ''
if '#' in parsed.command.flags:
characters = "{} {}".format(row, text).lstrip()
else:
characters = text.lstrip()
display.run_command('append', {'characters': characters})
def get_lines(self, parsed_range, global_lines):
# FIXME: this is broken.
# If :global called us, ignore the parsed range.
if global_lines:
return [(self._view.substr(R(a, b)), row_at(self._view, a)) for (a, b) in global_lines]
to_display = []
for line in self._view.full_line(parsed_range):
text = self._view.substr(line)
to_display.append((text, row_at(self._view, line.begin())))
return to_display
class ExQuitCommand(ViWindowCommandBase):
'''
Command: :q[uit][!]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:q
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
quit_command = parse_command_line(command_line)
view = self._view
if quit_command.command.forced:
view.set_scratch(True)
if view.is_dirty() and not quit_command.command.forced:
show_error(VimError(ERR_UNSAVED_CHANGES))
return
if not view.file_name() and not quit_command.command.forced:
show_error(VimError(ERR_NO_FILE_NAME))
return
self.window.run_command('close')
if len(self.window.views()) == 0:
self.window.run_command('close')
return
# FIXME: Probably doesn't work as expected.
# Close the current group if there aren't any views left in it.
if not self.window.views_in_group(self.window.active_group()):
self.window.run_command('ex_unvsplit')
class ExQuitAllCommand(ViWindowCommandBase):
"""
Command: :qa[ll][!]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:qa
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
if parsed.command.forced:
for v in self.window.views():
if v.is_dirty():
v.set_scratch(True)
elif has_dirty_buffers(self.window):
sublime.status_message("There are unsaved changes!")
return
self.window.run_command('close_all')
self.window.run_command('exit')
class ExWriteAndQuitCommand(ViWindowCommandBase):
"""
Command: :wq[!] [++opt] {file}
Write and then close the active buffer.
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
# TODO: implement this
if parsed.command.forced:
show_not_implemented()
return
if self._view.is_read_only():
sublime.status_message("Can't write a read-only buffer.")
return
if not self._view.file_name():
sublime.status_message("Can't save a file without name.")
return
self.window.run_command('save')
self.window.run_command('ex_quit', {'command_line': 'quit'})
class ExBrowse(ViWindowCommandBase):
'''
:bro[wse] {command}
http://vimdoc.sourceforge.net/htmldoc/editing.html#:browse
'''
def run(self, command_line):
assert command_line, 'expected a non-empty command line'
self.window.run_command('prompt_open_file', {
'initial_directory': self.state.settings.vi['_cmdline_cd']
})
class ExEdit(ViWindowCommandBase):
"""
Command: :e[dit] [++opt] [+cmd]
:e[dit]! [++opt] [+cmd]
:e[dit] [++opt] [+cmd] {file}
:e[dit]! [++opt] [+cmd] {file}
:e[dit] [++opt] [+cmd] #[count]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:edit
"""
@changing_cd
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
if parsed.command.file_name:
file_name = os.path.expanduser(
os.path.expandvars(parsed.command.file_name))
if self._view.is_dirty() and not parsed.command.forced:
show_error(VimError(ERR_UNSAVED_CHANGES))
return
if os.path.isdir(file_name):
# TODO: Open a file-manager in a buffer.
show_message('Cannot open directory', displays=Display.ALL)
# 'prompt_open_file' does not accept initial root parameter
# self.window.run_command('prompt_open_file', {'path': file_name})
return
if not os.path.isabs(file_name):
file_name = os.path.join(
self.state.settings.vi['_cmdline_cd'],
file_name)
if not os.path.exists(file_name):
msg = '"{0}" [New File]'.format(os.path.basename(file_name))
parent = os.path.dirname(file_name)
if parent and not os.path.exists(parent):
msg = '"{0}" [New DIRECTORY]'.format(parsed.command.file_name)
self.window.open_file(file_name)
# Give ST some time to load the new view.
sublime.set_timeout(
lambda: show_message(msg, displays=Display.ALL), 150)
return
show_not_implemented(
'not implemented case for :edit ({0})'.format(command_line))
return
if parsed.command.forced or not self._view.is_dirty():
self._view.run_command('revert')
return
if self._view.is_dirty():
show_error(VimError(ERR_UNSAVED_CHANGES))
return
show_error(VimError(ERR_UNSAVED_CHANGES))
class ExCquit(ViWindowCommandBase):
'''
Command: :cq[uit][!]
http://vimdoc.sourceforge.net/htmldoc/quickfix.html#:cquit
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command_line'
self.window.run_command('exit')
class ExExit(ViWindowCommandBase):
"""
Command: :[range]exi[t][!] [++opt] [file]
:xit
http://vimdoc.sourceforge.net/htmldoc/editing.html#:exit
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
if self._view.is_dirty():
self.window.run_command('save')
self.window.run_command('close')
if len(self.window.views()) == 0:
self.window.run_command('exit')
class ExListRegisters(ViWindowCommandBase):
'''
Command :reg[isters] {arg}
Lists registers in quick panel and saves selected to `"` register.
In Vintageous, registers store lists of values (due to multiple selections).
http://vimdoc.sourceforge.net/htmldoc/change.html#:registers
'''
def run(self, command_line):
def show_lines(line_count):
lines_display = '... [+{0}]'.format(line_count - 1)
return lines_display if line_count > 1 else ''
parsed = parse_command_line(command_line)
# TODO: implement arguments.
pairs = [(k, v) for (k, v) in self.state.registers.to_dict().items() if v]
pairs = [(k, repr(v[0]), len(v)) for (k, v) in pairs]
pairs = ['"{0} {1} {2}'.format(k, v, show_lines(lines)) for (k, v, lines) in pairs]
self.window.show_quick_panel(pairs, self.on_done, flags=sublime.MONOSPACE_FONT)
def on_done(self, idx):
"""Save selected value to `"` register."""
if idx == -1:
return
value = list(self.state.registers.to_dict().values())[idx]
self.state.registers['"'] = [value]
class ExNew(ViWindowCommandBase):
"""Ex command(s): :[N]new [++opt] [+cmd]
http://vimdoc.sourceforge.net/htmldoc/windows.html#:new
"""
@changing_cd
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
self.window.run_command('new_file')
class ExYank(sublime_plugin.TextCommand):
"""
Command: :[range]y[ank] [x] {count}
http://vimdoc.sourceforge.net/htmldoc/windows.html#:yank
"""
def run(self, edit, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
register = parsed.command.register
line_range = parsed.line_range.resolve(self.view)
if not register:
register = '"'
text = self.view.substr(line_range)
state = State(self.view)
state.registers[register] = [text]
# TODO: o_O?
if register == '"':
state.registers['0'] = [text]
class TabControlCommand(ViWindowCommandBase):
def run(self, command, file_name=None, forced=False):
view_count = len(self.window.views())
(group_index, view_index) = self.window.get_view_index(self._view)
if command == 'open':
if not file_name: # TODO: file completion
self.window.run_command('show_overlay', {
'overlay': 'goto',
'show_files': True,
})
else:
cur_dir = os.path.dirname(self._view.file_name())
self.window.open_file(os.path.join(cur_dir, file_name))
elif command == 'next':
self.window.run_command('select_by_index', {
'index': (view_index + 1) % view_count})
elif command == 'prev':
self.window.run_command('select_by_index', {
'index': (view_index + view_count - 1) % view_count})
elif command == "last":
self.window.run_command('select_by_index', {'index': view_count - 1})
elif command == "first":
self.window.run_command('select_by_index', {'index': 0})
elif command == 'only':
quit_command_line = 'quit' + '' if not forced else '!'
group = self.window.views_in_group(group_index)
if any(view.is_dirty() for view in group):
show_error(VimError(ERR_OTHER_BUFFER_HAS_CHANGES))
return
for view in group:
if view.id() == self._view.id():
continue
self.window.focus_view(view)
self.window.run_command('ex_quit', {
'command_line': quit_command_line})
self.window.focus_view(self._view)
else:
show_message("Unknown TabControl Command", displays=Display.ALL)
class ExTabOpenCommand(sublime_plugin.WindowCommand):
def run(self, file_name=None):
self.window.run_command('tab_control', {
'command': 'open', 'file_name': file_name}, )
class ExTabnextCommand(ViWindowCommandBase):
'''
Command: :tabn[ext]
http://vimdoc.sourceforge.net/htmldoc/tabpage.html#:tabnext
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
self.window.run_command("tab_control", {"command": "next"}, )
class ExTabprevCommand(ViWindowCommandBase):
'''
Command: :tabp[revious]
http://vimdoc.sourceforge.net/htmldoc/tabpage.html#:tabprevious
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
self.window.run_command("tab_control", {"command": "prev"}, )
class ExTablastCommand(ViWindowCommandBase):
'''
Command: :tabl[ast]
http://vimdoc.sourceforge.net/htmldoc/tabpage.html#:tablast
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
self.window.run_command("tab_control", {"command": "last"}, )
class ExTabfirstCommand(ViWindowCommandBase):
'''
Command: :tabf[irst]
http://vimdoc.sourceforge.net/htmldoc/tabpage.html#:tabfirst
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
self.window.run_command("tab_control", {"command": "first"}, )
class ExTabonlyCommand(ViWindowCommandBase):
'''
Command: :tabo[only]
http://vimdoc.sourceforge.net/htmldoc/tabpage.html#:tabonly
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
self.window.run_command("tab_control", {"command": "only", "forced": parsed.command.forced})
class ExCdCommand(ViWindowCommandBase):
'''
Command: :cd[!]
:cd[!] {path}
:cd[!] -
Print or change the current directory.
:cd without an argument behaves as in Unix for all platforms.
http://vimdoc.sourceforge.net/htmldoc/editing.html#:cd
'''
@changing_cd
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
if self._view.is_dirty() and not parsed.command.forced:
show_error(VimError(ERR_UNSAVED_CHANGES))
return
if not parsed.command.path:
self.state.settings.vi['_cmdline_cd'] = os.path.expanduser("~")
self._view.run_command('ex_print_working_dir')
return
# TODO: It seems there a few symbols that are always substituted when they represent a
# filename. We should have a global method of substiting them.
if parsed.command.path == '%:h':
fname = self._view.file_name()
if fname:
self.state.settings.vi['_cmdline_cd'] = os.path.dirname(fname)
self._view.run_command('ex_print_working_dir')
return
path = os.path.realpath(os.path.expandvars(os.path.expanduser(parsed.command.path)))
if not os.path.exists(path):
# TODO: Add error number in ex_error.py.
show_error(VimError(ERR_CANT_FIND_DIR_IN_CDPATH))
return
self.state.settings.vi['_cmdline_cd'] = path
self._view.run_command('ex_print_working_dir')
class ExCddCommand(ViWindowCommandBase):
"""
Command (non-standard): :cdd[!]
Non-standard command to change the current directory to the active
view's directory.
In Sublime Text, the current directory doesn't follow the active view, so
it's convenient to be able to align both easily.
XXX: Is the above still true?
(This command may be removed at any time.)
"""
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
if self._view.is_dirty() and not parsed.command.forced:
show_error(VimError(ERR_UNSAVED_CHANGES))
return
path = os.path.dirname(self._view.file_name())
try:
self.state.settings.vi['_cmdline_cd'] = path
show_status(path)
except IOError:
show_error(VimError(ERR_CANT_FIND_DIR_IN_CDPATH))
class ExVsplit(ViWindowCommandBase):
'''
Command: :[N]vs[plit] [++opt] [+cmd] [file]
http://vimdoc.sourceforge.net/htmldoc/windows.html#:vsplit
'''
MAX_SPLITS = 4
LAYOUT_DATA = {
1: {"cells": [[0,0, 1, 1]], "rows": [0.0, 1.0], "cols": [0.0, 1.0]},
2: {"cells": [[0,0, 1, 1], [1, 0, 2, 1]], "rows": [0.0, 1.0], "cols": [0.0, 0.5, 1.0]},
3: {"cells": [[0,0, 1, 1], [1, 0, 2, 1], [2, 0, 3, 1]], "rows": [0.0, 1.0], "cols": [0.0, 0.33, 0.66, 1.0]},
4: {"cells": [[0,0, 1, 1], [1, 0, 2, 1], [2, 0, 3, 1], [3,0, 4, 1]], "rows": [0.0, 1.0], "cols": [0.0, 0.25, 0.50, 0.75, 1.0]},
}
def run(self, command_line=''):
parsed = parse_command_line(command_line)
file_name = parsed.command.params['file_name']
groups = self.window.num_groups()
if groups >= ExVsplit.MAX_SPLITS:
show_message("Can't create more groups.", displays=Display.ALL)
return
old_view = self._view
pos = ":{0}:{1}".format(*old_view.rowcol(old_view.sel()[0].b))
current_file_name = old_view.file_name() + pos
self.window.run_command('set_layout', ExVsplit.LAYOUT_DATA[groups + 1])
# TODO: rename this param.
if file_name:
existing = self.window.find_open_file(file_name)
pos = ''
if existing:
pos = ":{0}:{1}".format(*existing.rowcol(existing.sel()[0].b))
self.open_file(file_name + pos)
return
# No file name provided; clone current view into new group.
self.open_file(current_file_name)
def open_file(self, file_name):
flags = (sublime.FORCE_GROUP | sublime.ENCODED_POSITION)
self.window.open_file(file_name, group=(self.window.num_groups() - 1),
flags=flags)
class ExUnvsplit(ViWindowCommandBase):
'''
Command: :unvsplit
Non-standard Vim command.
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
groups = self.window.num_groups()
if groups == 1:
sublime.status_message("Vintageous: Can't delete more groups.")
return
# If we don't do this, cloned views will be moved to the previous group and kept around.
# We want to close them instead.
self.window.run_command('close')
self.window.run_command('set_layout', ExVsplit.LAYOUT_DATA[groups - 1])
class ExSetLocal(ViWindowCommandBase):
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
option = parsed.command.option
value = parsed.command.value
if option.endswith('?'):
show_not_implemented()
return
try:
set_local(self._view, option, value)
except KeyError:
sublime.status_message("Vintageuos: No such option.")
except ValueError:
sublime.status_message("Vintageous: Invalid value for option.")
class ExSet(ViWindowCommandBase):
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
option = parsed.command.option
value = parsed.command.value
print (locals())
if option.endswith('?'):
show_not_implemented()
return
try:
set_global(self._view, option, value)
except KeyError:
sublime.status_message("Vintageuos: No such option.")
except ValueError:
sublime.status_message("Vintageous: Invalid value for option.")
class ExLet(ViWindowCommandBase):
'''
Command: :let {var-name} = {expr1}
http://vimdoc.sourceforge.net/htmldoc/eval.html#:let
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
parsed = parse_command_line(command_line)
self.state.variables.set(parsed.command.variable_name,
parsed.command.variable_value)
class ExWriteAndQuitAll(ViWindowCommandBase):
'''
Commmand: :wqa[ll] [++opt]
:xa[ll]
http://vimdoc.sourceforge.net/htmldoc/editing.html#:wqall
'''
def run(self, command_line=''):
assert command_line, 'expected non-empty command line'
if not all(v.file_name() for v in self.window.views()):
show_error(VimError(ERR_NO_FILE_NAME))
utils.blink()
return
if any(v.is_read_only() for v in self.window.views()):
show_error(VimError(ERR_READONLY_FILE))
utils.blink()
return
self.window.run_command('save_all')
assert not any(v.is_dirty() for v in self.window.views())
self.window.run_command('close_all')
self.window.run_command('exit')
|
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
217
no peepholes
218
don't clip gradient
lag=64
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
global source
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=None,#[500] * 5,
on_power_thresholds=[5] * 5,
max_input_power=500,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
subsample_target=4,
input_padding=3,
include_diff=False,
clip_appliance_power=False,
lag=64
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.0001),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
|
"""
Definition of network object.
"""
import numpy as num
import matplotlib.pyplot as plt
import copy
import itertools
import logging
from kreveik.classes import *
import kreveik.probes as probes
from kreveik import network
class TopologicalNetwork(ProbeableObj):
"""
This object is a stripped down network, designated to be a core
object for all network-like objects, like sub-graphs and motifs.
"""
def __init__ (self,adjacency_matrix):
ProbeableObj.__init__(self)
self.adjacency = num.array(adjacency_matrix,dtype=bool)
self.code = str(len(self.adjacency))+"-"+str(reduce(lambda x,y : 2*x+y,
self.adjacency.flatten()*1))
self.n_nodes = len(self.adjacency)
def text_plot(self):
for i in range(len(self.adjacency)):
for j in range(len(self.adjacency)):
if self.adjacency[i][j]==True:
print str(j)+"--->"+str(i)
def indegree(self):
return self.adjacency.sum(axis=0)
def outdegree(self):
return self.adjacency.sum(axis=1)
def plot(self):
"""Opens a window, draws the graph into the window.
Requires Tk, and of course a windowing system.
"""
import Tkinter as tk
import math
window= tk.Tk()
canvas_size = 400
drawing = tk.Canvas(window, height=canvas_size, width=canvas_size, background="white")
n_nodes = self.n_nodes
radius = 150
node_radius = 10
drawing.create_text(200,10,text = "Network:"+str(id(self)))
list_of_coordinates = [(radius*math.sin(2*math.pi*n/n_nodes)+canvas_size/2,radius*math.cos(2*math.pi*n/n_nodes)+canvas_size/2) for n in range(n_nodes)]
for linksto,node in enumerate(self.adjacency):
for linksfrom,link in enumerate(node):
if linksto == linksfrom and link==True:
angle = math.atan2(list_of_coordinates[linksto][1]-200,
list_of_coordinates[linksto][0]-200)
drawing.create_line(list_of_coordinates[linksto][0]+node_radius*math.cos(angle),
list_of_coordinates[linksto][1]+node_radius*math.sin(angle),
list_of_coordinates[linksto][0]+node_radius*2*(math.cos(angle+20)),
list_of_coordinates[linksto][1]+node_radius*2*math.sin(angle+20),
list_of_coordinates[linksto][0]+node_radius*4*(math.cos(angle)),
list_of_coordinates[linksto][1]+node_radius*4*math.sin(angle),
list_of_coordinates[linksto][0]+node_radius*2*math.cos(angle-20),
list_of_coordinates[linksto][1]+node_radius*2*(math.sin(angle-20)),
list_of_coordinates[linksto][0]+node_radius*math.cos(angle),
list_of_coordinates[linksto][1]+node_radius*math.sin(angle),
smooth=True,joinstyle="round",fill="black",width=2,arrow="last"
)
elif link == True:
angle = math.atan2(list_of_coordinates[linksto][1]-list_of_coordinates[linksfrom][1],
list_of_coordinates[linksto][0]-list_of_coordinates[linksfrom][0])
drawing.create_line(list_of_coordinates[linksfrom][0]+node_radius*math.cos(angle),
list_of_coordinates[linksfrom][1]+node_radius*math.sin(angle),
list_of_coordinates[linksto][0]-node_radius*math.cos(angle),
list_of_coordinates[linksto][1]-node_radius*math.sin(angle),
fill="black",width=2,arrow="last")
for node_ctr,(x,y) in enumerate(list_of_coordinates):
if type(self) != Network:
node_color = "white"
text_color = "black"
elif self.state == num.array([[]]):
node_color = "white"
text_color = "black"
else:
if self.state[-1][node_ctr] == True:
node_color = "black"
text_color = "white"
else:
node_color = "white"
text_color = "black"
drawing.create_oval(x-node_radius,y-node_radius,x+node_radius,y+node_radius,width=2,fill=node_color)
drawing.create_text(x,y,text = str(node_ctr),fill = text_color, font="Arial")
drawing.pack()
window.mainloop()
def save_plot(self,filename):
"""
Saves the image as filename.ps in the working directory
Requires Tk, and of course a windowing system.
"""
import Tkinter as tk
import math
window= tk.Tk()
canvas_size = 400
drawing = tk.Canvas(window, height=canvas_size, width=canvas_size, background="white")
n_nodes = self.n_nodes
radius = 150
node_radius = 10
drawing.create_text(200,10,text = "Network:"+str(id(self)))
drawing.pack()
list_of_coordinates = [(radius*math.sin(2*math.pi*n/n_nodes)+canvas_size/2,radius*math.cos(2*math.pi*n/n_nodes)+canvas_size/2) for n in range(n_nodes)]
for linksto,node in enumerate(self.adjacency):
for linksfrom,link in enumerate(node):
if linksto == linksfrom and link==True:
angle = math.atan2(list_of_coordinates[linksto][1]-200,
list_of_coordinates[linksto][0]-200)
drawing.create_line(list_of_coordinates[linksto][0]+node_radius*math.cos(angle),
list_of_coordinates[linksto][1]+node_radius*math.sin(angle),
list_of_coordinates[linksto][0]+node_radius*2*(math.cos(angle+20)),
list_of_coordinates[linksto][1]+node_radius*2*math.sin(angle+20),
list_of_coordinates[linksto][0]+node_radius*4*(math.cos(angle)),
list_of_coordinates[linksto][1]+node_radius*4*math.sin(angle),
list_of_coordinates[linksto][0]+node_radius*2*math.cos(angle-20),
list_of_coordinates[linksto][1]+node_radius*2*(math.sin(angle-20)),
list_of_coordinates[linksto][0]+node_radius*math.cos(angle),
list_of_coordinates[linksto][1]+node_radius*math.sin(angle),
smooth=True,joinstyle="round",fill="black",width=2,arrow="last"
)
elif link == True:
angle = math.atan2(list_of_coordinates[linksto][1]-list_of_coordinates[linksfrom][1],
list_of_coordinates[linksto][0]-list_of_coordinates[linksfrom][0])
drawing.create_line(list_of_coordinates[linksfrom][0]+node_radius*math.cos(angle),
list_of_coordinates[linksfrom][1]+node_radius*math.sin(angle),
list_of_coordinates[linksto][0]-node_radius*math.cos(angle),
list_of_coordinates[linksto][1]-node_radius*math.sin(angle),
fill="black",width=2,arrow="last")
for node_ctr,(x,y) in enumerate(list_of_coordinates):
if type(self) != Network:
node_color = "white"
text_color = "black"
elif self.state == num.array([[]]):
node_color = "white"
text_color = "black"
else:
if self.state[-1][node_ctr] == True:
node_color = "black"
text_color = "white"
else:
node_color = "white"
text_color = "black"
drawing.create_oval(x-node_radius,y-node_radius,x+node_radius,y+node_radius,width=2,fill=node_color)
drawing.create_text(x,y,text = str(node_ctr),fill = text_color, font="Arial")
drawing.update()
drawing.pack()
drawing.postscript(file=filename+".ps")
window.destroy() # This destroys the window that is
# generated for the postscript extraction
# We actually need a Tk setting which disables
# rendering
# TODO (mehmet.ali.anil)
#window.mainloop
def laplacian(self):
"""
Returns the graph laplacian of the network
"""
symmetric = self.adjacency+self.adjacency.T-num.diag(self.adjacency.diagonal())
degrees = num.diag(symmetric.sum(axis=0))
laplacian = degrees-symmetric
return laplacian
def directed_laplacian(self):
"""
Returns the laplacian of the network. It differs from laplacian function by using
the original adjacency matrix, not the symmetricised version of it.
"""
original = self.adjacency-num.diag(self.adjacency.diagonal())
degrees = num.diag(original.sum(axis=0)+original.sum(axis=1))
laplacian = degrees-original
return laplacian
def indegree_laplacian(self):
"""
Returns the laplacian composed of in-degrees of the nodes
"""
original = self.adjacency-num.diag(self.adjacency.diagonal())
degrees = num.diag(original.sum(axis=1))
laplacian = degrees-original
return laplacian
def outdegree_laplacian(self):
"""
Returns the laplacian composed of out-degrees of the nodes
"""
original = self.adjacency-num.diag(self.adjacency.diagonal())
degrees = num.diag(original.sum(axis=0))
laplacian = degrees-original
return laplacian
def laplacian_eigvals(self):
"""
Returns an ordered array of eigenvalues of the laplacian.
"""
ordered_list = []
values = []
eigenvalues = num.linalg.eigvals(self.laplacian())
for i in range(len(self.adjacency)):
values.append(eigenvalues[i])
for i in range(len(self.adjacency)):
ordered_list.append(min(values))
values.remove(min(values))
return num.array(ordered_list)
def spectral_distance(self,other):
"""
Computes spectral distance between networks.
"""
difference = self.laplacian_eigvals()-other.laplacian_eigvals()
distance = difference * difference
spec_distance = distance.sum()
return spec_distance
def diameter(self):
"""
Computes diameter of a network which means the maximum value of
the minimum number of edges between every pair of nodes.
Note: diameter cannot be bigger than the number of nodes for
connected networks. This problem is eliminated by returning
number of nodes plus 1 for disconnected networks.
"""
symmetric = self.adjacency+self.adjacency.T-num.diag(self.adjacency.diagonal())
adj=symmetric*1
new_adjacency=adj
summed_adjacency=adj
result=0
for j in range(len(self.adjacency)+1):
result=result+1
if num.alltrue(summed_adjacency):
break
else:
new_adjacency=num.dot(new_adjacency, adj)
summed_adjacency=summed_adjacency+new_adjacency
return result
def is_connected(self):
"""
Returns True if the graph is connected, False if not.
uses the algorithm explained in
http://keithbriggs.info/documents/graph-eigenvalues.pdf
"""
symmetric = self.adjacency+self.adjacency.T-num.diag(
self.adjacency.diagonal())
if (0 in symmetric.sum(axis=0) or 0 in symmetric.sum(axis=1)):
return False
degrees = num.diagflat(symmetric.sum(axis=0))
laplacian = degrees-symmetric
determinant = num.linalg.det(laplacian +num.ones((len(laplacian),len(laplacian) )))
return not(num.allclose(determinant,0.0))
def remove_self_connection(self):
"""
Removes self connections of the nodes in the network.
"""
diagonal = num.diag(num.diag(self.adjacency))
new_adjacency = self.adjacency - diagonal
self.adjacency = new_adjacency
def copy(self):
"""
Returns a copy of the Topological Network object.
"""
return copy.deepcopy(self)
def save(self,filename):
"""
Saves the Network as an object to a file specified.
"""
import pickle
try:
filehandler = open(filename+".net", 'w')
pickle.dump(self,filehandler)
except pickle.PickleError:
logging.error("The object failed to be pickled.")
class Motif(TopologicalNetwork):
"""
Motif is a
"""
def __init__(self, adjacency_matrix):
TopologicalNetwork.__init__(self, adjacency_matrix)
self.degree = len(adjacency_matrix)
def __eq__(self,other):
permutation_list = itertools.permutations(range(self.degree),self.degree)
for permutation in permutation_list:
if num.sum(self.indegree()) != num.sum(other.indegree()):
return False
newarray = num.zeros((len(self.adjacency),len(self.adjacency)),dtype=bool)
#newarray[[node_init,node_end]] = newarray[[node_end,node_init]]
#newarray[:,[node_init,node_end]] = newarray[:,[node_end,node_init]]
for rowctr,row in enumerate(self.adjacency):
for colctr,col in enumerate(row):
if col == True:
newarray[permutation[rowctr]][permutation[colctr]]= True
if num.all(newarray == other.adjacency):
return True
return False
class Network(TopologicalNetwork,Element):
'''
Network Class
Input Arguments
adjacency_matrix
mask
state_vec
'''
def __init__ (self,adjacency_matrix,mask,function,state_vec=None):
Element.__init__(self)
TopologicalNetwork.__init__(self,adjacency_matrix)
self.n_nodes= num.size(adjacency_matrix,0)
self.mask=mask
if state_vec == None:
state_vec= (num.random.random((1,self.n_nodes))< 0.5)
self.state=num.array(state_vec)
self.function = function
def __str__(self):
return str(id(self))
def info(self):
'''
Prints out an identification of the Network.
Prints:
Id
Mothers
Children
Orbits
Score
Adjacency matrix
sTate
masK
'''
print "This network is : "+str(id(self))+"."
print "Nodes: "+str(self.n_nodes)
print "Score: "+str(self.score)
print "Its children are: "
for child in self.children:
print " "+str(child)
print "It has the following adjacency matrix: "
print self.adjacency
print "The following are the masks for each node: "
for (num,node) in enumerate(self.mask):
print str(num)+" th node : "+str(node)
print "The following are the states with respect to time "
for (t,state) in enumerate(self.state):
print "t= "+str(t)+" : "+str(node)
print "The scorer is : "
print self.scorer
def __getitem__(self, index):
"""
nth item of a network object is the state that it is in, in the nth
iteration
"""
if index > len(self.state):
raise IndexError
return self.state[index]
def __contains__(self, state):
"""
Returns a boolean according to whether a network includes the state
"""
item = num.array(state*True)
return item in self.state
def __call__ (self,state):
"""
When a network is called as a function, it sets the initial condition
as the given vector, finds the equilibrium of that state.
"""
self.set_state(state)
self.search_equilibrium(2**self.n_nodes,state,orbit_extraction=False,def_advance=1)
def advance(self,times,start_from=None,*args):
'''
Advances the state in the phase space a given number of times.
If a starter state is given, the initial condition is taken as the given state.
If not, the last state is used instead.
Input Arguments
times -> the number of iterations to be taken.
starter_state -> the initial state to be used
'''
if start_from != None:
self.set_state(start_from)
newstate = self.function(self,self.state[-1],times)
self.state = num.append(self.state,newstate,axis=0)
self.populate_probes(probes.advance)
def set_state(self,state):
"""
Flushes the state of the system, and sets the new state as the given one
"""
if type(state) == int:
state = [int(strings)==True for strings in list(num.binary_repr(
(state),width=self.n_nodes))]
state_bool = [i == True for i in state]
state = [list(state_bool)]
self.state = num.array(state)
def plot_state(self,last=20):
'''
Plots the last 20 states as a black and white strips vertically.
The vertical axis is time, whereas each strip is a single state.
Input Arguments
last -> the number of states that will be plotted
'''
# Take the state vector, convert the list of arrays into a 2d array, then show it as an image
# Black and white.
# plt.imshow(self.state[-last:],cmap=plt.cm.binary,interpolation='nearest')
plt.show()
def plot_equilibria(self):
"""Creates a plot of the equilibria for all possible initial conditions
in the phase space. Every point in the phase space corresponds to the
length of the orbit that initial condition is attracted to.
"""
rowsandcols = 2**(len(self.adjacency)/2)
if self.n_nodes % 2 == 0:
im_matrix = self.equilibria.reshape((rowsandcols,rowsandcols))
if self.n_nodes % 2 == 1:
im_matrix = self.equilibria.reshape((rowsandcols,rowsandcols*2))
# plt.imshow(im_matrix,cmap=plt.cm.gray,interpolation='nearest')
plt.grid()
plt.colorbar()
plt.show()
def search_equilibrium(self,chaos_limit,starter_state,orbit_extraction=False,def_advance=1):
'''
Searches for an equilibrium point, or a limit cycle.
Returns the state vector, or the state vector list, if the equilibrium is a limit cycle.
If no equilibrium is found, returns False.
Input Arguments:
starter_state -> the initial state vector that the state will evolve on.
chaos_limit -> the degree that an orbit will be considered as chaotic.
The calculation will stop when this point is reached.
orbit_extraction -> True when every individual orbit is recorded with its degree.
'''
self.set_state(starter_state)
starter_state = self.state[-1]
for ctr in xrange(chaos_limit):
self.advance(def_advance)
row = num.all(self.state[-1] == self.state, axis=1)
where = num.where(row==True)
if len(where[0])> 1:
frst_where = where[0][0]
scnd_where = where[0][1]
orbit_length = scnd_where-frst_where
orbit = None
location = reduce(lambda x,y : 2*x+y, starter_state)
if orbit_extraction:
orbit = self.state[frst_where:scnd_where]
self.populate_probes(probes.search_equilibrium)
trajectory_length = frst_where+1
return (orbit_length,orbit,trajectory_length)
def populate_equilibria(self,orbit_extraction=False):
'''
Creates all possible initial conditions by listing all possible 2^n boolean states.
Then runs populate_equilibrium for each of them.
populate_equilibrium returns orbits and-or degrees of the orbits.
Gives scores to each of the networks, depending on the degree of the orbit each initial condition
rests on.
Input Arguments:
normalize -> normalizes the scores to the value given.
'''
if not(hasattr(self,"equilibria")):
self.equilibria = num.zeros(2**self.n_nodes)
if not(hasattr(self,"orbits")):
if orbit_extraction:
self.orbits = num.array([None]*2**self.n_nodes)
self.equilibria = num.zeros(2**self.n_nodes)
if orbit_extraction:
self.orbits = num.array([None]*2**self.n_nodes)
binspace = range(0,num.power(2,self.n_nodes))
unit_advance = 1
for location,state in enumerate(binspace):
result = self.search_equilibrium(2**self.n_nodes,state,orbit_extraction,def_advance=unit_advance)
(orbit_length,orbit,trajectory_length) = result
if orbit_extraction:
self.orbits[location] = orbit
self.equilibria[location] = orbit_length
unit_advance = trajectory_length
self.populate_probes(probes.populate_equilibria)
def search_all_orbits(self):
"""
Searches orbits for all initial conditions.
Returns the list of orbits for each initial state.
"""
import numpy as num
binspace = range(0,num.power(2,self.n_nodes))
orbits_of_initials=[]
for state in binspace:
(orbit_length,orbit) = self.search_equilibrium(2**self.n_nodes,state,True)
orbits_of_initials.append(orbit)
return orbits_of_initials
#def initial_states_of_orbits(self):
# """
# TODO
# """
# orbit_list=[]
# initial_states=num.zeros(num.power(2,self.n_nodes))
# count=0
# binspace = range(0,num.power(2,self.n_nodes))
# all_orbits=self.search_all_orbits()
# for i in range(len(all_orbits)):
# if initial_states[i]==0:
# for j in range(len(all_orbits)):
#
# count=count+1
|
|
"""
The methods for loading Home Assistant components.
This module has quite some complex parts. I have tried to add as much
documentation as possible to keep it understandable.
Components can be accessed via hass.components.switch from your code.
If you want to retrieve a platform that is part of a component, you should
call get_component(hass, 'switch.your_platform'). In both cases the config
directory is checked to see if it contains a user provided version. If not
available it will check the built-in components and platforms.
"""
import functools as ft
import importlib
import logging
import sys
from types import ModuleType
from typing import Optional, Set, TYPE_CHECKING, Callable, Any, TypeVar, List # noqa pylint: disable=unused-import
from homeassistant.const import PLATFORM_FORMAT
# Typing imports that create a circular dependency
# pylint: disable=using-constant-test,unused-import
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant # NOQA
CALLABLE_T = TypeVar('CALLABLE_T', bound=Callable) # noqa pylint: disable=invalid-name
PREPARED = False
DEPENDENCY_BLACKLIST = {'config'}
_LOGGER = logging.getLogger(__name__)
DATA_KEY = 'components'
PACKAGE_CUSTOM_COMPONENTS = 'custom_components'
PACKAGE_BUILTIN = 'homeassistant.components'
LOOKUP_PATHS = [PACKAGE_CUSTOM_COMPONENTS, PACKAGE_BUILTIN]
class LoaderError(Exception):
"""Loader base error."""
class ComponentNotFound(LoaderError):
"""Raised when a component is not found."""
def __init__(self, domain: str) -> None:
"""Initialize a component not found error."""
super().__init__("Component {} not found.".format(domain))
self.domain = domain
class CircularDependency(LoaderError):
"""Raised when a circular dependency is found when resolving components."""
def __init__(self, from_domain: str, to_domain: str) -> None:
"""Initialize circular dependency error."""
super().__init__("Circular dependency detected: {} -> {}.".format(
from_domain, to_domain))
self.from_domain = from_domain
self.to_domain = to_domain
def set_component(hass, # type: HomeAssistant
comp_name: str, component: Optional[ModuleType]) -> None:
"""Set a component in the cache.
Async friendly.
"""
cache = hass.data.setdefault(DATA_KEY, {})
cache[comp_name] = component
def get_platform(hass, # type: HomeAssistant
domain: str, platform_name: str) -> Optional[ModuleType]:
"""Try to load specified platform.
Example invocation: get_platform(hass, 'light', 'hue')
Async friendly.
"""
# If the platform has a component, we will limit the platform loading path
# to be the same source (custom/built-in).
component = _load_file(hass, platform_name, LOOKUP_PATHS)
# Until we have moved all platforms under their component/own folder, it
# can be that the component is None.
if component is not None:
base_paths = [component.__name__.rsplit('.', 1)[0]]
else:
base_paths = LOOKUP_PATHS
platform = _load_file(
hass, PLATFORM_FORMAT.format(domain=domain, platform=platform_name),
base_paths)
if platform is not None:
return platform
# Legacy platform check: light/hue.py
platform = _load_file(
hass, PLATFORM_FORMAT.format(domain=platform_name, platform=domain),
base_paths)
if platform is None:
if component is None:
extra = ""
else:
extra = " Search path was limited to path of component: {}".format(
base_paths[0])
_LOGGER.error("Unable to find platform %s.%s", platform_name, extra)
return None
if platform.__name__.startswith(PACKAGE_CUSTOM_COMPONENTS):
_LOGGER.warning(
"Integrations need to be in their own folder. Change %s/%s.py to "
"%s/%s.py. This will stop working soon.",
domain, platform_name, platform_name, domain)
return platform
def get_component(hass, # type: HomeAssistant
comp_or_platform: str) -> Optional[ModuleType]:
"""Try to load specified component.
Async friendly.
"""
comp = _load_file(hass, comp_or_platform, LOOKUP_PATHS)
if comp is None:
_LOGGER.error("Unable to find component %s", comp_or_platform)
return comp
def _load_file(hass, # type: HomeAssistant
comp_or_platform: str,
base_paths: List[str]) -> Optional[ModuleType]:
"""Try to load specified file.
Looks in config dir first, then built-in components.
Only returns it if also found to be valid.
Async friendly.
"""
try:
return hass.data[DATA_KEY][comp_or_platform] # type: ignore
except KeyError:
pass
cache = hass.data.get(DATA_KEY)
if cache is None:
if hass.config.config_dir is None:
_LOGGER.error("Can't load components - config dir is not set")
return None
# Only insert if it's not there (happens during tests)
if sys.path[0] != hass.config.config_dir:
sys.path.insert(0, hass.config.config_dir)
cache = hass.data[DATA_KEY] = {}
for path in ('{}.{}'.format(base, comp_or_platform)
for base in base_paths):
try:
module = importlib.import_module(path)
# In Python 3 you can import files from directories that do not
# contain the file __init__.py. A directory is a valid module if
# it contains a file with the .py extension. In this case Python
# will succeed in importing the directory as a module and call it
# a namespace. We do not care about namespaces.
# This prevents that when only
# custom_components/switch/some_platform.py exists,
# the import custom_components.switch would succeed.
# __file__ was unset for namespaces before Python 3.7
if getattr(module, '__file__', None) is None:
continue
_LOGGER.info("Loaded %s from %s", comp_or_platform, path)
cache[comp_or_platform] = module
if module.__name__.startswith(PACKAGE_CUSTOM_COMPONENTS):
_LOGGER.warning(
'You are using a custom component for %s which has not '
'been tested by Home Assistant. This component might '
'cause stability problems, be sure to disable it if you '
'do experience issues with Home Assistant.',
comp_or_platform)
return module
except ImportError as err:
# This error happens if for example custom_components/switch
# exists and we try to load switch.demo.
# Ignore errors for custom_components, custom_components.switch
# and custom_components.switch.demo.
white_listed_errors = []
parts = []
for part in path.split('.'):
parts.append(part)
white_listed_errors.append(
"No module named '{}'".format('.'.join(parts)))
if str(err) not in white_listed_errors:
_LOGGER.exception(
("Error loading %s. Make sure all "
"dependencies are installed"), path)
return None
class ModuleWrapper:
"""Class to wrap a Python module and auto fill in hass argument."""
def __init__(self,
hass, # type: HomeAssistant
module: ModuleType) -> None:
"""Initialize the module wrapper."""
self._hass = hass
self._module = module
def __getattr__(self, attr: str) -> Any:
"""Fetch an attribute."""
value = getattr(self._module, attr)
if hasattr(value, '__bind_hass'):
value = ft.partial(value, self._hass)
setattr(self, attr, value)
return value
class Components:
"""Helper to load components."""
def __init__(
self,
hass # type: HomeAssistant
) -> None:
"""Initialize the Components class."""
self._hass = hass
def __getattr__(self, comp_name: str) -> ModuleWrapper:
"""Fetch a component."""
component = get_component(self._hass, comp_name)
if component is None:
raise ImportError('Unable to load {}'.format(comp_name))
wrapped = ModuleWrapper(self._hass, component)
setattr(self, comp_name, wrapped)
return wrapped
class Helpers:
"""Helper to load helpers."""
def __init__(
self,
hass # type: HomeAssistant
) -> None:
"""Initialize the Helpers class."""
self._hass = hass
def __getattr__(self, helper_name: str) -> ModuleWrapper:
"""Fetch a helper."""
helper = importlib.import_module(
'homeassistant.helpers.{}'.format(helper_name))
wrapped = ModuleWrapper(self._hass, helper)
setattr(self, helper_name, wrapped)
return wrapped
def bind_hass(func: CALLABLE_T) -> CALLABLE_T:
"""Decorate function to indicate that first argument is hass."""
setattr(func, '__bind_hass', True)
return func
def component_dependencies(hass, # type: HomeAssistant
comp_name: str) -> Set[str]:
"""Return all dependencies and subdependencies of components.
Raises CircularDependency if a circular dependency is found.
Async friendly.
"""
return _component_dependencies(hass, comp_name, set(), set())
def _component_dependencies(hass, # type: HomeAssistant
comp_name: str, loaded: Set[str],
loading: Set) -> Set[str]:
"""Recursive function to get component dependencies.
Async friendly.
"""
component = get_component(hass, comp_name)
if component is None:
raise ComponentNotFound(comp_name)
loading.add(comp_name)
for dependency in getattr(component, 'DEPENDENCIES', []):
# Check not already loaded
if dependency in loaded:
continue
# If we are already loading it, we have a circular dependency.
if dependency in loading:
raise CircularDependency(comp_name, dependency)
dep_loaded = _component_dependencies(
hass, dependency, loaded, loading)
loaded.update(dep_loaded)
loaded.add(comp_name)
loading.remove(comp_name)
return loaded
|
|
"""
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ying Liu, Cisco Systems, Inc.
#
"""
from webob import exc
from extensions import _pprofiles as pprofiles_view
from quantum.api import api_common as common
from quantum.common import exceptions as qexception
from quantum.common import extensions
from quantum.manager import QuantumManager
from quantum.plugins.cisco.common import cisco_exceptions as exception
from quantum.plugins.cisco.common import cisco_faults as faults
class Portprofile(object):
"""extension class Portprofile"""
def __init__(self):
pass
@classmethod
def get_name(cls):
""" Returns Ext Resource Name """
return "Cisco Port Profile"
@classmethod
def get_alias(cls):
""" Returns Ext Resource alias """
return "Cisco Port Profile"
@classmethod
def get_description(cls):
""" Returns Ext Resource Description """
return "Portprofile include QoS information"
@classmethod
def get_namespace(cls):
""" Returns Ext Resource Namespace """
return "http://docs.ciscocloud.com/api/ext/portprofile/v1.0"
@classmethod
def get_updated(cls):
""" Returns Ext Resource Updated time """
return "2011-07-23T13:25:27-06:00"
@classmethod
def get_resources(cls):
""" Returns all defined resources """
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
member_actions = {'associate_portprofile': "PUT",
'disassociate_portprofile': "PUT"}
controller = PortprofilesController(QuantumManager.get_plugin())
return [extensions.ResourceExtension('portprofiles', controller,
parent=parent_resource,
member_actions=member_actions)]
class PortprofilesController(common.QuantumController):
""" portprofile API controller
based on QuantumController """
def __init__(self, plugin):
self._resource_name = 'portprofile'
self._plugin = plugin
self._portprofile_ops_param_list = [{
'param-name': 'portprofile_name',
'required': True}, {
'param-name': 'qos_name',
'required': True}, {
'param-name': 'assignment',
'required': False}]
self._assignprofile_ops_param_list = [{
'param-name': 'network-id',
'required': True}, {
'param-name': 'port-id',
'required': True}]
self._serialization_metadata = {
"application/xml": {
"attributes": {
"portprofile": ["id", "name"],
},
},
}
def index(self, request, tenant_id):
""" Returns a list of portprofile ids """
return self._items(request, tenant_id, is_detail=False)
def _items(self, request, tenant_id, is_detail):
""" Returns a list of portprofiles. """
portprofiles = self._plugin.get_all_portprofiles(tenant_id)
builder = pprofiles_view.get_view_builder(request)
result = [builder.build(portprofile, is_detail)['portprofile']
for portprofile in portprofiles]
return dict(portprofiles=result)
# pylint: disable-msg=E1101
def show(self, request, tenant_id, id):
""" Returns portprofile details for the given portprofile id """
try:
portprofile = self._plugin.get_portprofile_details(
tenant_id, id)
builder = pprofiles_view.get_view_builder(request)
#build response with details
result = builder.build(portprofile, True)
return dict(portprofiles=result)
except exception.PortProfileNotFound as exp:
return faults.Fault(faults.PortprofileNotFound(exp))
def create(self, request, tenant_id):
""" Creates a new portprofile for a given tenant """
#look for portprofile name in request
try:
req_params = \
self._parse_request_params(request,
self._portprofile_ops_param_list)
except exc.HTTPError as exp:
return faults.Fault(exp)
portprofile = self._plugin.\
create_portprofile(tenant_id,
req_params['portprofile_name'],
req_params['qos_name'])
builder = pprofiles_view.get_view_builder(request)
result = builder.build(portprofile)
return dict(portprofiles=result)
def update(self, request, tenant_id, id):
""" Updates the name for the portprofile with the given id """
try:
req_params = \
self._parse_request_params(request,
self._portprofile_ops_param_list)
except exc.HTTPError as exp:
return faults.Fault(exp)
try:
portprofile = self._plugin.\
rename_portprofile(tenant_id,
id, req_params['portprofile_name'])
builder = pprofiles_view.get_view_builder(request)
result = builder.build(portprofile, True)
return dict(portprofiles=result)
except exception.PortProfileNotFound as exp:
return faults.Fault(faults.PortprofileNotFound(exp))
def delete(self, request, tenant_id, id):
""" Destroys the portprofile with the given id """
try:
self._plugin.delete_portprofile(tenant_id, id)
return exc.HTTPOk()
except exception.PortProfileNotFound as exp:
return faults.Fault(faults.PortprofileNotFound(exp))
def associate_portprofile(self, request, tenant_id, id):
""" associate a portprofile to the port """
content_type = request.best_match_content_type()
try:
req_params = \
self._parse_request_params(request,
self._assignprofile_ops_param_list)
except exc.HTTPError as exp:
return faults.Fault(exp)
net_id = req_params['network-id'].strip()
port_id = req_params['port-id'].strip()
try:
self._plugin.associate_portprofile(tenant_id,
net_id, port_id,
id)
return exc.HTTPOk()
except exception.PortProfileNotFound as exp:
return faults.Fault(faults.PortprofileNotFound(exp))
except qexception.PortNotFound as exp:
return faults.Fault(faults.PortNotFound(exp))
def disassociate_portprofile(self, request, tenant_id, id):
""" Disassociate a portprofile from a port """
content_type = request.best_match_content_type()
try:
req_params = \
self._parse_request_params(request,
self._assignprofile_ops_param_list)
except exc.HTTPError as exp:
return faults.Fault(exp)
net_id = req_params['network-id'].strip()
port_id = req_params['port-id'].strip()
try:
self._plugin. \
disassociate_portprofile(tenant_id,
net_id, port_id, id)
return exc.HTTPOk()
except exception.PortProfileNotFound as exp:
return faults.Fault(faults.PortprofileNotFound(exp))
except qexception.PortNotFound as exp:
return faults.Fault(faults.PortNotFound(exp))
|
|
import ujson as json
from v20.base_entity import BaseEntity
from v20.base_entity import EntityDict
from v20.request import Request
from v20 import spec_properties
class Candlestick(BaseEntity):
"""
The Candlestick representation
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.instrument_Candlestick
def __init__(self, **kwargs):
"""
Create a new Candlestick instance
"""
super(Candlestick, self).__init__()
#
# The start time of the candlestick
#
self.time = kwargs.get("time")
#
# The candlestick data based on bids. Only provided if bid-based
# candles were requested.
#
self.bid = kwargs.get("bid")
#
# The candlestick data based on asks. Only provided if ask-based
# candles were requested.
#
self.ask = kwargs.get("ask")
#
# The candlestick data based on midpoints. Only provided if midpoint-
# based candles were requested.
#
self.mid = kwargs.get("mid")
#
# The number of prices created during the time-range represented by the
# candlestick.
#
self.volume = kwargs.get("volume")
#
# A flag indicating if the candlestick is complete. A complete
# candlestick is one whose ending time is not in the future.
#
self.complete = kwargs.get("complete")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new Candlestick from a dict (generally from loading a
JSON response). The data used to instantiate the Candlestick is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('bid') is not None:
data['bid'] = \
ctx.instrument.CandlestickData.from_dict(
data['bid'], ctx
)
if data.get('ask') is not None:
data['ask'] = \
ctx.instrument.CandlestickData.from_dict(
data['ask'], ctx
)
if data.get('mid') is not None:
data['mid'] = \
ctx.instrument.CandlestickData.from_dict(
data['mid'], ctx
)
return Candlestick(**data)
class CandlestickData(BaseEntity):
"""
The price data (open, high, low, close) for the Candlestick representation.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.instrument_CandlestickData
def __init__(self, **kwargs):
"""
Create a new CandlestickData instance
"""
super(CandlestickData, self).__init__()
#
# The first (open) price in the time-range represented by the
# candlestick.
#
self.o = kwargs.get("o")
#
# The highest price in the time-range represented by the candlestick.
#
self.h = kwargs.get("h")
#
# The lowest price in the time-range represented by the candlestick.
#
self.l = kwargs.get("l")
#
# The last (closing) price in the time-range represented by the
# candlestick.
#
self.c = kwargs.get("c")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new CandlestickData from a dict (generally from loading a
JSON response). The data used to instantiate the CandlestickData is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('o') is not None:
data['o'] = ctx.convert_decimal_number(
data.get('o')
)
if data.get('h') is not None:
data['h'] = ctx.convert_decimal_number(
data.get('h')
)
if data.get('l') is not None:
data['l'] = ctx.convert_decimal_number(
data.get('l')
)
if data.get('c') is not None:
data['c'] = ctx.convert_decimal_number(
data.get('c')
)
return CandlestickData(**data)
class OrderBook(BaseEntity):
"""
The representation of an instrument's order book at a point in time
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.instrument_OrderBook
def __init__(self, **kwargs):
"""
Create a new OrderBook instance
"""
super(OrderBook, self).__init__()
#
# The order book's instrument
#
self.instrument = kwargs.get("instrument")
#
# The time when the order book snapshot was created.
#
self.time = kwargs.get("time")
#
# The price (midpoint) for the order book's instrument at the time of
# the order book snapshot
#
self.price = kwargs.get("price")
#
# The price width for each bucket. Each bucket covers the price range
# from the bucket's price to the bucket's price + bucketWidth.
#
self.bucketWidth = kwargs.get("bucketWidth")
#
# The partitioned order book, divided into buckets using a default
# bucket width. These buckets are only provided for price ranges which
# actually contain order or position data.
#
self.buckets = kwargs.get("buckets")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new OrderBook from a dict (generally from loading a JSON
response). The data used to instantiate the OrderBook is a shallow copy
of the dict passed in, with any complex child types instantiated
appropriately.
"""
data = data.copy()
if data.get('price') is not None:
data['price'] = ctx.convert_decimal_number(
data.get('price')
)
if data.get('bucketWidth') is not None:
data['bucketWidth'] = ctx.convert_decimal_number(
data.get('bucketWidth')
)
if data.get('buckets') is not None:
data['buckets'] = [
ctx.instrument.OrderBookBucket.from_dict(d, ctx)
for d in data.get('buckets')
]
return OrderBook(**data)
class OrderBookBucket(BaseEntity):
"""
The order book data for a partition of the instrument's prices.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.instrument_OrderBookBucket
def __init__(self, **kwargs):
"""
Create a new OrderBookBucket instance
"""
super(OrderBookBucket, self).__init__()
#
# The lowest price (inclusive) covered by the bucket. The bucket covers
# the price range from the price to price + the order book's
# bucketWidth.
#
self.price = kwargs.get("price")
#
# The percentage of the total number of orders represented by the long
# orders found in this bucket.
#
self.longCountPercent = kwargs.get("longCountPercent")
#
# The percentage of the total number of orders represented by the short
# orders found in this bucket.
#
self.shortCountPercent = kwargs.get("shortCountPercent")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new OrderBookBucket from a dict (generally from loading a
JSON response). The data used to instantiate the OrderBookBucket is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('price') is not None:
data['price'] = ctx.convert_decimal_number(
data.get('price')
)
if data.get('longCountPercent') is not None:
data['longCountPercent'] = ctx.convert_decimal_number(
data.get('longCountPercent')
)
if data.get('shortCountPercent') is not None:
data['shortCountPercent'] = ctx.convert_decimal_number(
data.get('shortCountPercent')
)
return OrderBookBucket(**data)
class PositionBook(BaseEntity):
"""
The representation of an instrument's position book at a point in time
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.instrument_PositionBook
def __init__(self, **kwargs):
"""
Create a new PositionBook instance
"""
super(PositionBook, self).__init__()
#
# The position book's instrument
#
self.instrument = kwargs.get("instrument")
#
# The time when the position book snapshot was created
#
self.time = kwargs.get("time")
#
# The price (midpoint) for the position book's instrument at the time
# of the position book snapshot
#
self.price = kwargs.get("price")
#
# The price width for each bucket. Each bucket covers the price range
# from the bucket's price to the bucket's price + bucketWidth.
#
self.bucketWidth = kwargs.get("bucketWidth")
#
# The partitioned position book, divided into buckets using a default
# bucket width. These buckets are only provided for price ranges which
# actually contain order or position data.
#
self.buckets = kwargs.get("buckets")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new PositionBook from a dict (generally from loading a
JSON response). The data used to instantiate the PositionBook is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('price') is not None:
data['price'] = ctx.convert_decimal_number(
data.get('price')
)
if data.get('bucketWidth') is not None:
data['bucketWidth'] = ctx.convert_decimal_number(
data.get('bucketWidth')
)
if data.get('buckets') is not None:
data['buckets'] = [
ctx.instrument.PositionBookBucket.from_dict(d, ctx)
for d in data.get('buckets')
]
return PositionBook(**data)
class PositionBookBucket(BaseEntity):
"""
The position book data for a partition of the instrument's prices.
"""
#
# Format string used when generating a summary for this object
#
_summary_format = ""
#
# Format string used when generating a name for this object
#
_name_format = ""
#
# Property metadata for this object
#
_properties = spec_properties.instrument_PositionBookBucket
def __init__(self, **kwargs):
"""
Create a new PositionBookBucket instance
"""
super(PositionBookBucket, self).__init__()
#
# The lowest price (inclusive) covered by the bucket. The bucket covers
# the price range from the price to price + the position book's
# bucketWidth.
#
self.price = kwargs.get("price")
#
# The percentage of the total number of positions represented by the
# long positions found in this bucket.
#
self.longCountPercent = kwargs.get("longCountPercent")
#
# The percentage of the total number of positions represented by the
# short positions found in this bucket.
#
self.shortCountPercent = kwargs.get("shortCountPercent")
@staticmethod
def from_dict(data, ctx):
"""
Instantiate a new PositionBookBucket from a dict (generally from
loading a JSON response). The data used to instantiate the
PositionBookBucket is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('price') is not None:
data['price'] = ctx.convert_decimal_number(
data.get('price')
)
if data.get('longCountPercent') is not None:
data['longCountPercent'] = ctx.convert_decimal_number(
data.get('longCountPercent')
)
if data.get('shortCountPercent') is not None:
data['shortCountPercent'] = ctx.convert_decimal_number(
data.get('shortCountPercent')
)
return PositionBookBucket(**data)
class EntitySpec(object):
"""
The instrument.EntitySpec wraps the instrument module's type definitions
and API methods so they can be easily accessed through an instance of a v20
Context.
"""
Candlestick = Candlestick
CandlestickData = CandlestickData
OrderBook = OrderBook
OrderBookBucket = OrderBookBucket
PositionBook = PositionBook
PositionBookBucket = PositionBookBucket
def __init__(self, ctx):
self.ctx = ctx
def candles(
self,
instrument,
**kwargs
):
"""
Fetch candlestick data for an instrument.
Args:
instrument:
Name of the Instrument
price:
The Price component(s) to get candlestick data for. Can contain
any combination of the characters "M" (midpoint candles) "B"
(bid candles) and "A" (ask candles).
granularity:
The granularity of the candlesticks to fetch
count:
The number of candlesticks to return in the reponse. Count
should not be specified if both the start and end parameters
are provided, as the time range combined with the graularity
will determine the number of candlesticks to return.
fromTime:
The start of the time range to fetch candlesticks for.
toTime:
The end of the time range to fetch candlesticks for.
smooth:
A flag that controls whether the candlestick is "smoothed" or
not. A smoothed candlestick uses the previous candle's close
price as its open price, while an unsmoothed candlestick uses
the first price from its time range as its open price.
includeFirst:
A flag that controls whether the candlestick that is covered by
the from time should be included in the results. This flag
enables clients to use the timestamp of the last completed
candlestick received to poll for future candlesticks but avoid
receiving the previous candlestick repeatedly.
dailyAlignment:
The hour of the day (in the specified timezone) to use for
granularities that have daily alignments.
alignmentTimezone:
The timezone to use for the dailyAlignment parameter.
Candlesticks with daily alignment will be aligned to the
dailyAlignment hour within the alignmentTimezone. Note that
the returned times will still be represented in UTC.
weeklyAlignment:
The day of the week used for granularities that have weekly
alignment.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/instruments/{instrument}/candles'
)
request.set_path_param(
'instrument',
instrument
)
request.set_param(
'price',
kwargs.get('price')
)
request.set_param(
'granularity',
kwargs.get('granularity')
)
request.set_param(
'count',
kwargs.get('count')
)
request.set_param(
'from',
kwargs.get('fromTime')
)
request.set_param(
'to',
kwargs.get('toTime')
)
request.set_param(
'smooth',
kwargs.get('smooth')
)
request.set_param(
'includeFirst',
kwargs.get('includeFirst')
)
request.set_param(
'dailyAlignment',
kwargs.get('dailyAlignment')
)
request.set_param(
'alignmentTimezone',
kwargs.get('alignmentTimezone')
)
request.set_param(
'weeklyAlignment',
kwargs.get('weeklyAlignment')
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('instrument') is not None:
parsed_body['instrument'] = \
jbody.get('instrument')
if jbody.get('granularity') is not None:
parsed_body['granularity'] = \
jbody.get('granularity')
if jbody.get('candles') is not None:
parsed_body['candles'] = [
self.ctx.instrument.Candlestick.from_dict(d, self.ctx)
for d in jbody.get('candles')
]
elif str(response.status) == "400":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def price(
self,
instrument,
**kwargs
):
"""
Fetch a price for an instrument. Accounts are not associated in any way
with this endpoint.
Args:
instrument:
Name of the Instrument
time:
The time at which the desired price is in effect. The current
price is returned if no time is provided.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/instruments/{instrument}/price'
)
request.set_path_param(
'instrument',
instrument
)
request.set_param(
'time',
kwargs.get('time')
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('price') is not None:
parsed_body['price'] = \
self.ctx.pricing_common.Price.from_dict(
jbody['price'],
self.ctx
)
elif str(response.status) == "400":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def prices(
self,
instrument,
**kwargs
):
"""
Fetch a range of prices for an instrument. Accounts are not associated
in any way with this endpoint.
Args:
instrument:
Name of the Instrument
fromTime:
The start of the time range to fetch prices for.
toTime:
The end of the time range to fetch prices for. The current time
is used if this parameter is not provided.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/instruments/{instrument}/price/range'
)
request.set_path_param(
'instrument',
instrument
)
request.set_param(
'from',
kwargs.get('fromTime')
)
request.set_param(
'to',
kwargs.get('toTime')
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('prices') is not None:
parsed_body['prices'] = [
self.ctx.pricing_common.Price.from_dict(d, self.ctx)
for d in jbody.get('prices')
]
elif str(response.status) == "400":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def order_book(
self,
instrument,
**kwargs
):
"""
Fetch an order book for an instrument.
Args:
instrument:
Name of the Instrument
time:
The time of the snapshot to fetch. If not specified, then the
most recent snapshot is fetched.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/instruments/{instrument}/orderBook'
)
request.set_path_param(
'instrument',
instrument
)
request.set_param(
'time',
kwargs.get('time')
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('orderBook') is not None:
parsed_body['orderBook'] = \
self.ctx.instrument.OrderBook.from_dict(
jbody['orderBook'],
self.ctx
)
elif str(response.status) == "400":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
def position_book(
self,
instrument,
**kwargs
):
"""
Fetch a position book for an instrument.
Args:
instrument:
Name of the Instrument
time:
The time of the snapshot to fetch. If not specified, then the
most recent snapshot is fetched.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/instruments/{instrument}/positionBook'
)
request.set_path_param(
'instrument',
instrument
)
request.set_param(
'time',
kwargs.get('time')
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('positionBook') is not None:
parsed_body['positionBook'] = \
self.ctx.instrument.PositionBook.from_dict(
jbody['positionBook'],
self.ctx
)
elif str(response.status) == "400":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response
|
|
"""
MODULE : translator.py
Purpose : Contains the main translation function.
Import Acronym : TRANS
"""
# List of Imports Begin
import debug as DEBUG
import instr3ac as INSTRUCTION
import registers as REG
import global_objects as G
import translator as TRANS
import library as LIB
import mips_assembly as ASM
# List of Imports End
def Translate(instr):
if instr.instrType.is_DECLARE():
if instr.inp1.is_HASH_VARIABLE():
G.CurrRegAddrTable.DumpDirtyVars()
G.CurrRegAddrTable.Reset()
G.AllocMap = {}
LIB.Translate_initHash(instr.inp1)
elif instr.instrType.is_EXIT():
G.CurrRegAddrTable.DumpDirtyVars()
G.AsmText.AddText(G.INDENT + "li %s, 10"%(REG.v0))
G.AsmText.AddText(G.INDENT + "syscall")
elif instr.instrType.is_GOTO():
G.CurrRegAddrTable.DumpDirtyVars()
G.AsmText.AddText(G.INDENT + "j %s"%(instr.jmpTarget))
elif instr.instrType.is_CALL():
G.CurrRegAddrTable.DumpDirtyVars()
G.AsmText.AddText(G.INDENT + "jal %s"%(instr.jmpLabel))
if instr.dest.is_VARIABLE():
GenCode_CallAssignment(instr)
elif instr.instrType.is_PRINT():
if instr.ContainsHashAccess():
G.CurrRegAddrTable.DumpDirtyVars()
LIB.Translate_Printf(instr.IOArgs)
elif instr.instrType.is_READ():
if instr.ContainsHashAccess():
G.CurrRegAddrTable.DumpDirtyVars()
LIB.Translate_Scanf(instr.IOArgs)
elif instr.instrType.is_ALLOC():
if instr.ContainsHashAccess():
G.CurrRegAddrTable.DumpDirtyVars()
GenCode_Alloc(instr)
elif instr.instrType.is_RETURN():
if not instr.inp1.is_NONE():
reg = SetupRegister(instr.inp1, REG.v0)
if reg != REG.v0:
G.AsmText.AddText(G.INDENT + "move %s, %s\n"%(REG.v0, reg), "Storing return value in $v0")
G.CurrRegAddrTable.DumpDirtyVars()
stackSpaceRequired = G.StackSpaceMap[G.CurrFunction] + 24
G.AsmText.AddText(G.INDENT + "move $sp, $fp", "Restore the stack pointer")
#G.AsmText.AddText(G.INDENT + "lw $fp, %d($sp)"%(stackSpaceRequired-4), "Reload the fp from previous call")
#G.AsmText.AddText(G.INDENT + "lw $ra, %d($sp)"%(stackSpaceRequired-8), "Reload the ra of current call")
G.AsmText.AddText("%s_return:"%G.CurrFunction)
G.AsmText.AddText(G.INDENT + "jr $ra")
elif instr.instrType.is_IFGOTO() or instr.instrType.is_STRIFGOTO():
# We can safely clobber registers here because this is the last
# instruction of the basic block
if (instr.dest.is_HASH_VARIABLE() or
instr.inp1.is_HASH_VARIABLE() or
instr.inp2.is_HASH_VARIABLE()):
G.CurrRegAddrTable.DumpDirtyVars()
G.CurrRegAddrTable.Reset()
G.AllocMap = {}
G.CurrRegAddrTable.DumpDirtyVars()
Translate_IFGOTO(instr)
elif instr.instrType.is_ASSIGN():
if (instr.dest.is_HASH_VARIABLE() or
instr.inp1.is_HASH_VARIABLE() or
instr.inp2.is_HASH_VARIABLE()):
G.CurrRegAddrTable.DumpDirtyVars()
G.CurrRegAddrTable.Reset()
G.AllocMap = {}
Translate_ASSIGN(instr)
def SetupRegister(inp, regComp, tempReg=REG.t9, useImmediate=False):
# Setup the input in a register, using regComp, if required
reg = None
if inp.is_SCALAR_VARIABLE():
# This variable has already been loaded into a register,
# as register allocation has been done for this instruction
try:
reg = G.AllocMap[inp.value]
except:
# This can only happen if this variable is an index of an array
# in which case, we directly load it from its register or from
# memory. It can also happen when we're dealing with hashes as it
# requires a function call and everything will be wiped out.
if inp.IsRegisterAllocated():
reg = inp.GetCurrReg()
else:
G.AsmText.AddText(inp.CopyToRegister(regComp)[:-1])
reg = regComp
elif inp.is_NUMBER():
if useImmediate:
reg = str(inp.value)
else:
reg = regComp
G.AsmText.AddText(reg.LoadImmediate(inp.value))
elif inp.is_STRING():
reg = regComp
G.AsmText.AddText(inp.CopyToRegister(reg)[:-1])
elif inp.is_ARRAY_VARIABLE():
# First we need the index
regInp = None
if inp.key.is_NUMBER():
G.AsmText.AddText(tempReg.LoadImmediate(inp.key.value), "Load index for the array access")
else:
regInp = SetupRegister(inp.key, regComp)
G.AsmText.AddText(G.INDENT + "move %s, %s"%(tempReg, regInp), "Load index for the array access")
# Load the array address in regComp
G.AsmText.AddText(G.INDENT + "la %s, %s"%(regComp, ASM.GetArrAddr(inp.value)), "Load array address")
# We move the index value to tempReg to multiply it by 4
G.AsmText.AddText(G.INDENT + "sll %s, %s, 2"%(tempReg, tempReg), "Multiply index by 4")
G.AsmText.AddText(G.INDENT + "add %s, %s, %s"%(regComp, regComp, tempReg), "Add index as an offset to array address")
G.AsmText.AddText(G.INDENT + "lw %s, 0(%s)"%(regComp, regComp), "Extract array value")
reg = regComp
elif inp.is_HASH_VARIABLE():
# First we need the key
regInp = None
if inp.key.is_NUMBER():
G.AsmText.AddText(tempReg.LoadImmediate(inp.key.value), "Load key for the hash access")
else:
regInp = SetupRegister(inp.key, regComp)
G.AsmText.AddText(G.INDENT + "move %s, %s"%(tempReg, regInp), "Load key for the hash access")
LIB.Translate_getValue(inp, tempReg, regComp)
reg = regComp
DEBUG.Assert(reg, "Line %d: Unable to setup register for %s."%(G.CurrInstruction.lineID, str(inp.value)))
return reg
def Translate_IFGOTO(instr):
optype = INSTRUCTION.OperationType
cmp_ops = [optype.LT, optype.GT, optype.LEQ, optype.GEQ, optype.EQ, optype.NE]
DEBUG.Assert(instr.opType.opType in cmp_ops,"Invalid operator for IFGOTO.")
# If operands are strings
if StrTranslate_IFGOTO(instr):
return
# Instead of separately handling the cases in which one or both of
# the operands is a number, load both operands into registers and
# operate only on the registers.
reg1 = SetupRegister(instr.inp1, REG.tmpUsageRegs[0])
reg2 = SetupRegister(instr.inp2, REG.tmpUsageRegs[1], useImmediate=True)
if instr.opType.is_EQ():
G.AsmText.AddText(G.INDENT + "beq %s, %s, %s"%(reg1, reg2, instr.jmpTarget))
elif instr.opType.is_NE():
G.AsmText.AddText(G.INDENT + "bne %s, %s, %s"%(reg1, reg2, instr.jmpTarget))
elif instr.opType.is_LT():
if reg2 == "0":
G.AsmText.AddText(G.INDENT + "bltz %s, %s"%(reg1, instr.jmpTarget))
else:
G.AsmText.AddText(G.INDENT + "slt %s, %s, %s"%(reg1, reg1, reg2))
G.AsmText.AddText(G.INDENT + "bgtz %s, %s"%(reg1, instr.jmpTarget))
elif instr.opType.is_GT():
if reg2 == "0":
G.AsmText.AddText(G.INDENT + "bgtz %s, %s"%(reg1, instr.jmpTarget))
else:
G.AsmText.AddText(G.INDENT + "sgt %s, %s, %s"%(reg1, reg1, reg2))
G.AsmText.AddText(G.INDENT + "bgtz %s, %s"%(reg1, instr.jmpTarget))
elif instr.opType.is_LEQ():
if reg2 == "0":
G.AsmText.AddText(G.INDENT + "blez %s, %s"%(reg1, instr.jmpTarget))
else:
G.AsmText.AddText(G.INDENT + "sle %s, %s, %s"%(reg1, reg1, reg2))
G.AsmText.AddText(G.INDENT + "bgtz %s, %s"%(reg1, instr.jmpTarget))
elif instr.opType.is_GEQ():
if reg2 == "0":
G.AsmText.AddText(G.INDENT + "bgez %s, %s"%(reg1, instr.jmpTarget))
else:
G.AsmText.AddText(G.INDENT + "sge %s, %s, %s"%(reg1, reg1, reg2))
G.AsmText.AddText(G.INDENT + "bgtz %s, %s"%(reg1, instr.jmpTarget))
def StrTranslate_IFGOTO(instr):
if instr.instrType.is_STRIFGOTO():
LIB.Translate_StrCmp(instr.inp1,instr.inp2)
if instr.opType.is_EQ():
G.AsmText.AddText(G.INDENT + "beqz $v0, %s"%(instr.jmpTarget))
if instr.opType.is_NE():
G.AsmText.AddText(G.INDENT + "bne $v0, %s, %s"%(REG.zero, instr.jmpTarget))
elif instr.opType.is_GEQ():
G.AsmText.AddText(G.INDENT + "bgez $v0, %s"%(instr.jmpTarget))
elif instr.opType.is_LEQ():
G.AsmText.AddText(G.INDENT + "blez $v0, %s"%(instr.jmpTarget))
elif instr.opType.is_LT():
G.AsmText.AddText(G.INDENT + "bgtz $v0, %s"%(instr.jmpTarget))
elif instr.opType.is_GT():
G.AsmText.AddText(G.INDENT + "bltz $v0, %s"%(instr.jmpTarget))
return True
return False
def Translate_ASSIGN(instr):
if (not instr.opType.is_NONE()) and (not instr.inp2.is_NONE()):
# dest = inp1 OP inp2
reg1 = SetupRegister(instr.inp1,REG.tmpUsageRegs[0])
if (instr.opType.is_DIV() or instr.opType.is_MULT() or instr.opType.is_MOD()):
reg2 = SetupRegister(instr.inp2,REG.tmpUsageRegs[1])
else:
reg2 = SetupRegister(instr.inp2,REG.tmpUsageRegs[1], useImmediate=True)
if instr.dest.is_SCALAR_VARIABLE():
reg3 = SetupDestRegScalar(instr.dest, REG.tmpUsageRegs[-1])
GenCode_3OPASSIGN(instr, reg3, reg1, reg2)
if reg3 == REG.tmpUsageRegs[-1]:
G.AsmText.AddText(G.INDENT + "sw %s, %s"%(reg3, ASM.GetVarAddr(instr.dest)), "Store it back")
elif instr.dest.is_ARRAY_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegArray(instr.dest, regComp, tempReg)
# We will reuse tempReg as the dest register. We will then write it back to the
# address location in the array
GenCode_3OPASSIGN(instr, tempReg, reg1, reg2)
# Store back the value
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(tempReg, regComp), "Array is a dest. Storing back the value")
elif instr.dest.is_HASH_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegHash(instr.dest, regComp, tempReg) # The value of key is stored in tempReg
GenCode_3OPASSIGN(instr, regComp, reg1, reg2)
LIB.Translate_alloc(reg1) # Hack for now. Everything has been dumped anyway
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(regComp, reg1), "Load value into allocated memory")
LIB.Translate_addElement(instr.dest, tempReg, reg1)
elif instr.opType.is_NONE():
# dest = inp1
if instr.dest.is_SCALAR_VARIABLE():
reg3 = SetupDestRegScalar(instr.dest, REG.tmpUsageRegs[-1])
if instr.inp1.is_NUMBER():
G.AsmText.AddText(G.INDENT + "li %s, %s"%(reg3, str(instr.inp1.value)))
else:
reg1 = SetupRegister(instr.inp1, REG.tmpUsageRegs[1])
if reg1 != reg3:
G.AsmText.AddText(G.INDENT + "move %s, %s"%(reg3, reg1))
if reg3 == REG.tmpUsageRegs[-1]:
G.AsmText.AddText(G.INDENT + "sw %s, %s"%(reg3, ASM.GetVarAddr(instr.dest)), "Store it back")
elif instr.dest.is_ARRAY_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegArray(instr.dest, regComp, tempReg)
# We will reuse tempReg as the dest register. We will then write it back to the
# address location in the array
if instr.inp1.is_NUMBER():
G.AsmText.AddText(G.INDENT + "li %s, %s"%(tempReg, str(instr.inp1.value)))
else:
reg1 = SetupRegister(instr.inp1, REG.tmpUsageRegs[0])
G.AsmText.AddText(G.INDENT + "move %s, %s"%(tempReg, reg1))
# Store back the value
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(tempReg, regComp))
elif instr.dest.is_HASH_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegHash(instr.dest, regComp, tempReg) # The value of key is stored in tempReg
# Store the value in regComp
if instr.inp1.is_NUMBER():
G.AsmText.AddText(G.INDENT + "li %s, %s"%(regComp, str(instr.inp1.value)))
else:
reg1 = SetupRegister(instr.inp1, REG.tmpUsageRegs[0])
G.AsmText.AddText(G.INDENT + "move %s, %s"%(regComp, reg1))
LIB.Translate_alloc(REG.tmpUsageRegs[0])
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(regComp, REG.tmpUsageRegs[0]), "Load value into allocated memory")
LIB.Translate_addElement(instr.dest, tempReg, REG.tmpUsageRegs[0])
elif instr.inp2.is_NONE():
# dest = OP inp1
reg1 = SetupRegister(instr.inp1,REG.tmpUsageRegs[0])
if instr.dest.is_SCALAR_VARIABLE():
reg3 = SetupDestRegScalar(instr.dest, REG.tmpUsageRegs[-1])
GenCode_2OPASSIGN(instr, reg3, reg1)
if reg3 == REG.tmpUsageRegs[-1]:
G.AsmText.AddText(G.INDENT + "sw %s, %s"%(reg3, ASM.GetVarAddr(instr.dest)), "Store it back")
elif instr.dest.is_ARRAY_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegArray(instr.dest, regComp, tempReg)
# We will reuse tempReg as the dest register. We will then write it back to the
# address location in the array
GenCode_2OPASSIGN(instr, tempReg, reg1)
# Store back the value
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(tempReg, regComp))
elif instr.dest.is_HASH_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegHash(instr.dest, regComp, tempReg) # The value of key is stored in tempReg
GenCode_2OPASSIGN(instr, regComp, reg1)
LIB.Translate_alloc(REG.tmpUsageRegs[1])
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(regComp, REG.tmpUsageRegs[1]), "Load value into allocated memory")
LIB.Translate_addElement(instr.dest, tempReg, REG.tmpUsageRegs[1])
def GenCode_3OPASSIGN(instr, regDest, regInp1, regInp2):
# Currently ignoring overflows everywhere
if instr.opType.is_PLUS():
G.AsmText.AddText(G.INDENT + "addu %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s + %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_MINUS():
G.AsmText.AddText(G.INDENT + "subu %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s - %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_MULT():
G.AsmText.AddText(G.INDENT + "multu %s, %s"%(regInp1, regInp2))
G.AsmText.AddText(G.INDENT + "mflo %s"%(regDest),
"%s = %s * %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_DIV():
G.AsmText.AddText(G.INDENT + "div %s, %s"%(regInp1, regInp2))
G.AsmText.AddText(G.INDENT + "mflo %s"%(regDest),
"%s = %s / %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_MOD():
G.AsmText.AddText(G.INDENT + "div %s, %s"%(regInp1, regInp2))
G.AsmText.AddText(G.INDENT + "mfhi %s"%(regDest),
"%s = %s mod %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_LT():
G.AsmText.AddText(G.INDENT + "slt %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s < %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_GT():
G.AsmText.AddText(G.INDENT + "sgt %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s > %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_GEQ():
G.AsmText.AddText(G.INDENT + "sge %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s >= %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_LEQ():
G.AsmText.AddText(G.INDENT + "sle %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s <= %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_EQ():
G.AsmText.AddText(G.INDENT + "seq %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s == %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_NE():
G.AsmText.AddText(G.INDENT + "sne %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s != %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_BOR():
G.AsmText.AddText(G.INDENT + "or %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s | %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_BAND():
G.AsmText.AddText(G.INDENT + "and %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s & %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_BXOR():
G.AsmText.AddText(G.INDENT + "xor %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s ^ %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_LSHIFT():
G.AsmText.AddText(G.INDENT + "sllv %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s << %s"%(instr.dest, instr.inp1, instr.inp2))
elif instr.opType.is_RSHIFT():
G.AsmText.AddText(G.INDENT + "srlv %s, %s, %s"%(regDest, regInp1, regInp2),
"%s = %s >> %s"%(instr.dest, instr.inp1, instr.inp2))
else:
raise Exception("%s : Instruction not recognized in 3OPAssign"%(instr))
def GenCode_2OPASSIGN(instr, regDest, regInp):
# Ignoring Overflow in negation operation
if instr.opType.is_BNOT():
G.AsmText.AddText(G.INDENT + "not %s, %s"%(regDest, regInp),
"%s = ~%s"%(instr.dest, instr.inp1))
elif instr.opType.is_MINUS():
G.AsmText.AddText(G.INDENT + "negu %s, %s"%(regDest, regInp),
"%s = -%s"%(instr.dest, instr.inp1))
elif instr.opType.is_PLUS():
G.AsmText.AddText(G.INDENT + "move %s, %s"%(regDest, regInp),
"%s = +%s"%(instr.dest, instr.inp1))
else:
raise Exception("%s : Instruction not recognized in 2OPAssign"%(instr))
def GenCode_CallAssignment(instr):
if instr.dest.is_SCALAR_VARIABLE():
G.AsmText.AddText(G.INDENT + "sw %s, %s"%(REG.v0, ASM.GetVarAddr(instr.dest)), "Store function return directly into the memory address")
elif instr.dest.is_ARRAY_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegArray(instr.dest, regComp, tempReg)
# Store back the value
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(REG.v0, regComp), "Store function return directly into the memory address")
elif instr.dest.is_HASH_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegHash(instr.dest, regComp, tempReg) # The value of key is stored in tempReg
G.AsmText.AddText(G.INDENT + "move %s, %s"%(REG.tmpUsageRegs[1], REG.v0), "Store return value of function call so it is not overwritten by alloc")
LIB.Translate_alloc(REG.tmpUsageRegs[0])
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(REG.tmpUsageRegs[1], REG.tmpUsageRegs[0]), "Load value into allocated memory")
LIB.Translate_addElement(instr.dest, tempReg, REG.tmpUsageRegs[0])
def GenCode_Alloc(instr):
if instr.dest.is_SCALAR_VARIABLE():
LIB.Translate_alloc(REG.v0, instr.inp1)
G.AsmText.AddText(G.INDENT + "sw %s, %s"%(REG.v0, ASM.GetVarAddr(instr.dest)), "Store function return directly into the memory address")
elif instr.dest.is_ARRAY_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegArray(instr.dest, regComp, tempReg)
LIB.Translate_alloc(REG.v0, instr.inp1)
# Store back the value
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(REG.v0, regComp), "Store function return directly into the memory address")
elif instr.dest.is_HASH_VARIABLE():
tempReg = REG.tmpUsageRegs[-1]
regComp = REG.tmpUsageRegs[2]
SetupDestRegHash(instr.dest, regComp, tempReg) # The value of key is stored in tempReg
LIB.Translate_alloc(REG.tmpUsageRegs[0])
LIB.Translate_alloc(REG.v0, instr.inp1)
G.AsmText.AddText(G.INDENT + "sw %s, 0(%s)"%(REG.v0, REG.tmpUsageRegs[0]), "Load value into allocated memory")
LIB.Translate_addElement(instr.dest, tempReg, REG.tmpUsageRegs[0])
def SetupDestRegScalar(dest, tmpReg=REG.tmpUsageRegs[-1]):
return SetupRegister(dest, tmpReg)
def SetupDestRegArray(dest, regComp, tempReg=REG.tmpUsageRegs[-1]):
if dest.key.is_NUMBER():
G.AsmText.AddText(tempReg.LoadImmediate(dest.key.value), "Load index for array access")
else:
regInp = SetupRegister(dest.key, regComp)
G.AsmText.AddText(G.INDENT + "move %s, %s"%(tempReg, regInp), "Load index for array access")
# Load the array address in regComp
G.AsmText.AddText(G.INDENT + "la %s, %s"%(regComp, ASM.GetArrAddr(dest.value)), "Load array address")
# We move the index value to tempReg to multiply it by 4
G.AsmText.AddText(G.INDENT + "sll %s, %s, 2"%(tempReg, tempReg), "Multiply index by 4")
G.AsmText.AddText(G.INDENT + "add %s, %s, %s"%(regComp, regComp, tempReg), "Add index as an offset to array address")
def SetupDestRegHash(dest, regComp, tempReg=REG.tmpUsageRegs[-1]):
if dest.key.is_NUMBER():
G.AsmText.AddText(tempReg.LoadImmediate(dest.key.value), "Load key for the hash access")
else:
regInp = SetupRegister(dest.key, regComp)
G.AsmText.AddText(G.INDENT + "move %s, %s"%(tempReg, regInp), "Load key for the hash access")
|
|
'''
Created on 2013-01-22
@author: levi
'''
import unittest
import time
import sys
from symbolic_state_space import SymbolicStateSpace
from t_core.matcher import Matcher
from t_core.messages import Packet
from himesis_utils import graph_to_dot
# all runs are the same transformation, but with different metamodel elements
# the purpose is to do scalability testing with multiple configurations and multiple sets of rules
####GEHANs IMPORTS for GM2AUTOSAR transformation -START
## transformation -start
from GM2AUTOSAR_MM.transformation.Himesis.HConnectPPortPrototype import HConnectPPortPrototype
from GM2AUTOSAR_MM.transformation.Himesis.HConnectRPortPrototype import HConnectRPortPrototype
from GM2AUTOSAR_MM.transformation.Himesis.HConnECU2VirtualDevice import HConnECU2VirtualDevice
from GM2AUTOSAR_MM.transformation.Himesis.HConnVirtualDeviceToDistributable import HConnVirtualDeviceToDistributable
from GM2AUTOSAR_MM.transformation.Himesis.HMapDistributable import HMapDistributable
from GM2AUTOSAR_MM.transformation.Himesis.HMapECU2FiveElements import HMapECU2FiveElements
from GM2AUTOSAR_MM.transformation.Himesis.HMapVirtualDevice import HMapVirtualDevice
from GM2AUTOSAR_MM.transformation.Himesis.HMapECU2FiveElementsFAULTY import HMapECU2FiveElementsFAULTY
from GM2AUTOSAR_MM.transformation.Himesis.HMapVirtualDeviceFAULTY import HMapVirtualDeviceFAULTY
## transformation -end
##Backward Matchers -start
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnectPPortPrototype_Back_CompositionType2ECULHS import HConnectPPortPrototype_Back_CompositionType2ECULHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnectRPortPrototype_Back_CompositionType2ECULHS import HConnectRPortPrototype_Back_CompositionType2ECULHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnECU2VirtualDevice_Back_EcuInst2ECULHS import HConnECU2VirtualDevice_Back_EcuInst2ECULHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnECU2VirtualDevice_Back_STEM2VirtualDeviceLHS import HConnECU2VirtualDevice_Back_STEM2VirtualDeviceLHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnECU2VirtualDevice_Back_SystemMapping2ECULHS import HConnECU2VirtualDevice_Back_SystemMapping2ECULHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnVirtualDevice2Distributable_Back_ComponentPrototype2DistributableLHS import HConnVirtualDevice2Distributable_Back_ComponentPrototype2DistributableLHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnVirtualDevice2Distributable_Back_CompositionType2ECULHS import HConnVirtualDevice2Distributable_Back_CompositionType2ECULHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnVirtualDevice2Distributable_Back_SCTEMc2DistributableLHS import HConnVirtualDevice2Distributable_Back_SCTEMc2DistributableLHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnVirtualDevice2Distributable_Back_STEM2VirtualDeviceLHS import HConnVirtualDevice2Distributable_Back_STEM2VirtualDeviceLHS
##Backward Matchers -end
##Backward Matchers Complete -start
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnectPPortPrototype_Back_CompleteLHS import HConnectPPortPrototype_Back_CompleteLHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnectRPortPrototype_Back_CompleteLHS import HConnectRPortPrototype_Back_CompleteLHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnECU2VirtualDevice_Back_CompleteLHS import HConnECU2VirtualDevice_Back_CompleteLHS
from GM2AUTOSAR_MM.backward_matchers.Himesis.HConnVirtualDevice2Distributable_Back_CompleteLHS import HConnVirtualDevice2Distributable_Back_CompleteLHS
##Backward Matchers Complete-end
##Properties -start
from GM2AUTOSAR_MM.Properties.positive.Himesis.HECUVDDistCompleteLHS import HECUVDDistCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HECUVDDistConnectedLHS import HECUVDDistConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HECUVDDistIsolatedLHS import HECUVDDistIsolatedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HECUSysTrivialTrueIsolatedLHS import HECUSysTrivialTrueIsolatedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HECUSysTrivialTrueConnectedLHS import HECUSysTrivialTrueConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HECUSysTrivialTrueCompleteLHS import HECUSysTrivialTrueCompleteLHS
#Properties from he MODELS paper
from GM2AUTOSAR_MM.Properties.positive.Himesis.HP1IsolatedLHS import HP1IsolatedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HP1ConnectedLHS import HP1ConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HP1CompleteLHS import HP1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HP2IsolatedLHS import HP2IsolatedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HP2ConnectedLHS import HP2ConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HP2CompleteLHS import HP2CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HS1IfClauseIsolatedConnectedLHS import HS1IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HS1IfClauseCompleteLHS import HS1IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HS1ThenClauseIsolatedConnectedLHS import HS1ThenClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HS1ThenClauseCompleteLHS import HS1ThenClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM1IfClauseIsolatedConnectedLHS import HM1IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM1IfClauseCompleteLHS import HM1IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM1ThenClausePart1IsolatedConnectedLHS import HM1ThenClausePart1IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM1ThenClausePart1CompleteLHS import HM1ThenClausePart1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM3IfClauseIsolatedConnectedLHS import HM3IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM3IfClauseCompleteLHS import HM3IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM3ThenClausePart1IsolatedConnectedLHS import HM3ThenClausePart1IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM3ThenClausePart1CompleteLHS import HM3ThenClausePart1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM2IfClauseIsolatedConnectedLHS import HM2IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM2IfClauseCompleteLHS import HM2IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM2ThenClausePart1IsolatedConnectedLHS import HM2ThenClausePart1IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM2ThenClausePart1CompleteLHS import HM2ThenClausePart1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM2ThenClausePart2IsolatedConnectedLHS import HM2ThenClausePart2IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM2ThenClausePart2CompleteLHS import HM2ThenClausePart2CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM4IfClauseIsolatedConnectedLHS import HM4IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM4IfClauseCompleteLHS import HM4IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM4ThenClausePart1IsolatedConnectedLHS import HM4ThenClausePart1IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM4ThenClausePart1CompleteLHS import HM4ThenClausePart1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM4ThenClausePart2IsolatedConnectedLHS import HM4ThenClausePart2IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM4ThenClausePart2CompleteLHS import HM4ThenClausePart2CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM5IfClauseIsolatedConnectedLHS import HM5IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM5IfClauseCompleteLHS import HM5IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM5ThenClausePart1IsolatedConnectedLHS import HM5ThenClausePart1IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM5ThenClausePart1CompleteLHS import HM5ThenClausePart1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM5ThenClausePart2IsolatedConnectedLHS import HM5ThenClausePart2IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM5ThenClausePart2CompleteLHS import HM5ThenClausePart2CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM6IfClauseIsolatedConnectedLHS import HM6IfClauseIsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM6IfClauseCompleteLHS import HM6IfClauseCompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM6ThenClausePart1IsolatedConnectedLHS import HM6ThenClausePart1IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM6ThenClausePart1CompleteLHS import HM6ThenClausePart1CompleteLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM6ThenClausePart2IsolatedConnectedLHS import HM6ThenClausePart2IsolatedConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HM6ThenClausePart2CompleteLHS import HM6ThenClausePart2CompleteLHS
#A property that should trivially NOT hold
from GM2AUTOSAR_MM.Properties.negative.Himesis.HTrivialFalseECUplusSystem1IsolatedLHS import HTrivialFalseECUplusSystem1IsolatedLHS
from GM2AUTOSAR_MM.Properties.negative.Himesis.HTrivialFalseECUplusSystem1ConnectedLHS import HTrivialFalseECUplusSystem1ConnectedLHS
from GM2AUTOSAR_MM.Properties.negative.Himesis.HTrivialFalseECUplusSystem1CompleteLHS import HTrivialFalseECUplusSystem1CompleteLHS
#A property with an Isolated pattern that has no matches - added for experimentation, has no significant meaning
from GM2AUTOSAR_MM.Properties.positive.Himesis.HIsolHasNoMatchIsolatedLHS import HIsolHasNoMatchIsolatedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HIsolHasNoMatchConnectedLHS import HIsolHasNoMatchConnectedLHS
from GM2AUTOSAR_MM.Properties.positive.Himesis.HIsolHasNoMatchCompleteLHS import HIsolHasNoMatchCompleteLHS
##Properties -end
####GEHANs IMPORTS for GM2AUTOSAR transformation -END
# rule overlap imports
from GM2AUTOSAR_MM.overlap_rules.Himesis.HConnectPPortPrototype_overlapLHS import HConnectPPortPrototype_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HConnectRPortPrototype_overlapLHS import HConnectRPortPrototype_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HConnECU2VirtualDevice_overlapLHS import HConnECU2VirtualDevice_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HConnVirtualDeviceToDistributable_overlapLHS import HConnVirtualDeviceToDistributable_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HMapDistributable_overlapLHS import HMapDistributable_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HMapECU2FiveElements_overlapLHS import HMapECU2FiveElements_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HMapVirtualDevice_overlapLHS import HMapVirtualDevice_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HMapECU2FiveElementsFAULTY_overlapLHS import HMapECU2FiveElementsFAULTY_overlapLHS
from GM2AUTOSAR_MM.overlap_rules.Himesis.HMapVirtualDeviceFAULTY_overlapLHS import HMapVirtualDeviceFAULTY_overlapLHS
from PropertyVerification.state_property import StateProperty
from PropertyVerification.atomic_state_property import AtomicStateProperty
from PropertyVerification.and_state_property import AndStateProperty
from PropertyVerification.or_state_property import OrStateProperty
from PropertyVerification.not_state_property import NotStateProperty
from PropertyVerification.implication_state_property import ImplicationStateProperty
from PropertyVerification.Not import Not #StateSpace Prop
from PropertyVerification.Implication import Implication #StateSpace Prop
from PropertyVerification.And import And #StateSpace Prop
from PropertyVerification.Or import Or #StateSpace Prop
from PropertyVerification.BACKUP_atomic_state_property import BKUPAtomicStateProperty
class Test(unittest.TestCase):
def setUp(self):
ConnectPPortPrototype_Back_CompositionType2ECU = Matcher(HConnectPPortPrototype_Back_CompositionType2ECULHS())
ConnectRPortPrototype_Back_CompositionType2ECU = Matcher(HConnectRPortPrototype_Back_CompositionType2ECULHS())
ConnECU2VirtualDevice_Back_EcuInst2ECU = Matcher(HConnECU2VirtualDevice_Back_EcuInst2ECULHS())
ConnECU2VirtualDevice_Back_STEM2VirtualDevice = Matcher(HConnECU2VirtualDevice_Back_STEM2VirtualDeviceLHS())
ConnECU2VirtualDevice_Back_SystemMapping2ECU = Matcher(HConnECU2VirtualDevice_Back_SystemMapping2ECULHS())
ConnVirtualDevice2Distributable_Back_ComponentPrototype2Distributable = Matcher(HConnVirtualDevice2Distributable_Back_ComponentPrototype2DistributableLHS())
ConnVirtualDevice2Distributable_Back_CompositionType2ECU = Matcher(HConnVirtualDevice2Distributable_Back_CompositionType2ECULHS())
ConnVirtualDevice2Distributable_Back_SCTEMc2Distributable = Matcher(HConnVirtualDevice2Distributable_Back_SCTEMc2DistributableLHS())
ConnVirtualDevice2Distributable_Back_STEM2VirtualDevice = Matcher(HConnVirtualDevice2Distributable_Back_STEM2VirtualDeviceLHS())
self.rules = { 'HMapECU2FiveElementsFAULTY': HMapECU2FiveElementsFAULTY(),
'HMapDistributable': HMapDistributable(),
'HMapVirtualDeviceFAULTY': HMapVirtualDeviceFAULTY(),
'HConnectPPortPrototype': HConnectPPortPrototype(),
'HConnectRPortPrototype': HConnectRPortPrototype(),
'HConnECU2VirtualDevice': HConnECU2VirtualDevice(),
'HConnVirtualDeviceToDistributable': HConnVirtualDeviceToDistributable()}
self.backwardPatterns = { 'HMapECU2FiveElementsFAULTY': [],
'HMapDistributable': [],
'HMapVirtualDeviceFAULTY': [],
'HConnectPPortPrototype': [ConnectPPortPrototype_Back_CompositionType2ECU],
'HConnectRPortPrototype': [ConnectRPortPrototype_Back_CompositionType2ECU],
'HConnECU2VirtualDevice': [ConnECU2VirtualDevice_Back_EcuInst2ECU, ConnECU2VirtualDevice_Back_STEM2VirtualDevice, ConnECU2VirtualDevice_Back_SystemMapping2ECU],
'HConnVirtualDeviceToDistributable': [ConnVirtualDevice2Distributable_Back_ComponentPrototype2Distributable, ConnVirtualDevice2Distributable_Back_CompositionType2ECU, ConnVirtualDevice2Distributable_Back_SCTEMc2Distributable, ConnVirtualDevice2Distributable_Back_STEM2VirtualDevice]}
self.backwardPatterns2Rules = {
ConnectPPortPrototype_Back_CompositionType2ECU: 'HConnectPPortPrototype',
ConnectRPortPrototype_Back_CompositionType2ECU: 'HConnectRPortPrototype',
ConnECU2VirtualDevice_Back_EcuInst2ECU: 'HConnECU2VirtualDevice',
ConnECU2VirtualDevice_Back_STEM2VirtualDevice: 'HConnECU2VirtualDevice',
ConnECU2VirtualDevice_Back_SystemMapping2ECU: 'HConnECU2VirtualDevice',
ConnVirtualDevice2Distributable_Back_ComponentPrototype2Distributable: 'HConnVirtualDeviceToDistributable',
ConnVirtualDevice2Distributable_Back_CompositionType2ECU: 'HConnVirtualDeviceToDistributable',
ConnVirtualDevice2Distributable_Back_SCTEMc2Distributable: 'HConnVirtualDeviceToDistributable',
ConnVirtualDevice2Distributable_Back_STEM2VirtualDevice: 'HConnVirtualDeviceToDistributable'}
self.backwardPatternsComplete = {
'HMapECU2FiveElementsFAULTY': [],
'HMapDistributable': [],
'HMapVirtualDeviceFAULTY': [],
'HConnectPPortPrototype': [Matcher(HConnectPPortPrototype_Back_CompleteLHS())],
'HConnectRPortPrototype': [Matcher(HConnectRPortPrototype_Back_CompleteLHS())],
'HConnECU2VirtualDevice': [Matcher(HConnECU2VirtualDevice_Back_CompleteLHS())],
'HConnVirtualDeviceToDistributable': [Matcher(HConnVirtualDevice2Distributable_Back_CompleteLHS())]}
self.matchRulePatterns = { 'HMapECU2FiveElementsFAULTY': Matcher(HMapECU2FiveElementsFAULTY_overlapLHS()),
'HMapDistributable': Matcher(HMapDistributable_overlapLHS()),
'HMapVirtualDeviceFAULTY': Matcher(HMapVirtualDeviceFAULTY_overlapLHS()),
'HConnectPPortPrototype': Matcher(HConnectPPortPrototype_overlapLHS()),
'HConnectRPortPrototype': Matcher(HConnectRPortPrototype_overlapLHS()),
'HConnECU2VirtualDevice': Matcher(HConnECU2VirtualDevice_overlapLHS()),
'HConnVirtualDeviceToDistributable': Matcher(HConnVirtualDeviceToDistributable_overlapLHS())}
def test_faulty_GM_transformation(self):
transformation = [[self.rules['HMapDistributable'], self.rules['HMapECU2FiveElementsFAULTY'], self.rules['HMapVirtualDeviceFAULTY']],
[self.rules['HConnECU2VirtualDevice'], self.rules['HConnVirtualDeviceToDistributable'], self.rules['HConnectPPortPrototype'], self.rules['HConnectRPortPrototype']]]
rulesIncludingBackLinks = [[],\
[transformation[1][0], transformation[1][1], transformation[1][2], transformation[1][3]]]
s = SymbolicStateSpace(self.rules, transformation, rulesIncludingBackLinks, self.backwardPatterns, self.backwardPatterns2Rules,\
self.backwardPatternsComplete, self.matchRulePatterns, 1, False)
s.build_symbolic_state_space()
self._print_states(s)
print '\n-------------------------------------------------------------'
# graph_to_dot('symbolic_exec', s.symbStateSpace[4][0], 1)
####REAL EXPERIMENTATION: Proving the 4 types of constraints in our MODELS paper
# The naming convention used for the properties (i.e., P1, P2..ETC) are the
# same convention used in my MODELS paper in Table 2.
P1atomic=AtomicStateProperty(HP1IsolatedLHS(),HP1ConnectedLHS(), HP1CompleteLHS())
P2atomic=AtomicStateProperty(HP2IsolatedLHS(),HP2ConnectedLHS(), HP2CompleteLHS())
S1IfClause=AtomicStateProperty(HS1IfClauseIsolatedConnectedLHS(), HS1IfClauseIsolatedConnectedLHS(), HS1IfClauseCompleteLHS())
S1ThenClause=AtomicStateProperty(HS1ThenClauseIsolatedConnectedLHS(), HS1ThenClauseIsolatedConnectedLHS(), HS1ThenClauseCompleteLHS())
M1IfClause=AtomicStateProperty(HM1IfClauseIsolatedConnectedLHS(),HM1IfClauseIsolatedConnectedLHS(),HM1IfClauseCompleteLHS())
M1ThenClause=AtomicStateProperty(HM1ThenClausePart1IsolatedConnectedLHS(),HM1ThenClausePart1IsolatedConnectedLHS(),HM1ThenClausePart1CompleteLHS())
M3IfClause=AtomicStateProperty(HM3IfClauseIsolatedConnectedLHS(),HM3IfClauseIsolatedConnectedLHS(), HM3IfClauseCompleteLHS())
M3ThenClause=AtomicStateProperty(HM3ThenClausePart1IsolatedConnectedLHS(), HM3ThenClausePart1IsolatedConnectedLHS(),HM3ThenClausePart1CompleteLHS())
M2IfClause=AtomicStateProperty(HM2IfClauseIsolatedConnectedLHS(),HM2IfClauseIsolatedConnectedLHS(),HM2IfClauseCompleteLHS())
M2ThenClause=AndStateProperty(AtomicStateProperty(HM2ThenClausePart1IsolatedConnectedLHS(),HM2ThenClausePart1IsolatedConnectedLHS(), HM2ThenClausePart1CompleteLHS()),NotStateProperty(AtomicStateProperty(HM2ThenClausePart2IsolatedConnectedLHS(),HM2ThenClausePart2IsolatedConnectedLHS(),HM2ThenClausePart2CompleteLHS())))
M4IfClause=AtomicStateProperty(HM4IfClauseIsolatedConnectedLHS(),HM4IfClauseIsolatedConnectedLHS(),HM4IfClauseCompleteLHS())
M4ThenClause=AndStateProperty(AtomicStateProperty(HM4ThenClausePart1IsolatedConnectedLHS(),HM4ThenClausePart1IsolatedConnectedLHS(), HM4ThenClausePart1CompleteLHS()),NotStateProperty(AtomicStateProperty(HM4ThenClausePart2IsolatedConnectedLHS(),HM4ThenClausePart2IsolatedConnectedLHS(),HM4ThenClausePart2CompleteLHS())))
M5IfClause=AtomicStateProperty(HM5IfClauseIsolatedConnectedLHS(),HM5IfClauseIsolatedConnectedLHS(),HM5IfClauseCompleteLHS())
M5ThenClause=AndStateProperty(AtomicStateProperty(HM5ThenClausePart1IsolatedConnectedLHS(),HM5ThenClausePart1IsolatedConnectedLHS(), HM5ThenClausePart1CompleteLHS()),NotStateProperty(AtomicStateProperty(HM5ThenClausePart2IsolatedConnectedLHS(),HM5ThenClausePart2IsolatedConnectedLHS(),HM5ThenClausePart2CompleteLHS())))
M6IfClause=AtomicStateProperty(HM6IfClauseIsolatedConnectedLHS(),HM6IfClauseIsolatedConnectedLHS(),HM6IfClauseCompleteLHS())
M6ThenClause=AndStateProperty(AtomicStateProperty(HM6ThenClausePart1IsolatedConnectedLHS(),HM6ThenClausePart1IsolatedConnectedLHS(), HM6ThenClausePart1CompleteLHS()),NotStateProperty(AtomicStateProperty(HM6ThenClausePart2IsolatedConnectedLHS(),HM6ThenClausePart2IsolatedConnectedLHS(),HM6ThenClausePart2CompleteLHS())))
#
# #andprop=AndStateProperty(AndStateProperty(atomic1,atomic2),atomic1)
# #P1atomicOldImpl=BKUPAtomicStateProperty(HP1IsolatedLHS(),HP1ConnectedLHS(), HP1CompleteLHS())
# #P2atomicOldImpl=BKUPAtomicStateProperty(HP2IsolatedLHS(),HP2ConnectedLHS(), HP2CompleteLHS())
#
# trivatomicprop=AtomicStateProperty(HECUSysTrivialTrueIsolatedLHS(),HECUSysTrivialTrueConnectedLHS(), HECUSysTrivialTrueCompleteLHS())
#
# #NOTE: Even if you are verifying an ANDstateProperty where the 2 operands are the same AtomicStateProperty, then store two copies of the AtomicStateProperty in 2 different variables
# #Why? variables in this case are references to objects. So if you want the 2 copies of the same AtomicStateProperty to have different values set for certain attributes, then you must store them in 2 different variables
# trivnegativeprop=AtomicStateProperty(HTrivialFalseECUplusSystem1IsolatedLHS(),HTrivialFalseECUplusSystem1ConnectedLHS(),HTrivialFalseECUplusSystem1CompleteLHS())
# trivnegativepropcopy=AtomicStateProperty(HTrivialFalseECUplusSystem1IsolatedLHS(),HTrivialFalseECUplusSystem1ConnectedLHS(),HTrivialFalseECUplusSystem1CompleteLHS())
# trivatomicpropOldImpl=BKUPAtomicStateProperty(HECUSysTrivialTrueIsolatedLHS(),HECUSysTrivialTrueConnectedLHS(), HECUSysTrivialTrueCompleteLHS())
# trivnegativepropOldImpl=BKUPAtomicStateProperty(HTrivialFalseECUplusSystem1IsolatedLHS(),HTrivialFalseECUplusSystem1ConnectedLHS(),HTrivialFalseECUplusSystem1CompleteLHS())
# IsolHasNoMatch=AtomicStateProperty(HIsolHasNoMatchIsolatedLHS(), HIsolHasNoMatchConnectedLHS(), HIsolHasNoMatchCompleteLHS())
# #trivnegativepropOldImpl.verify(s)
# #finalresult=StateProperty.verifyCompositeStateProperty(s, P1atomic)
# #StateProperty.verifyCompositeStateProperty(s, OrStateProperty(P2atomic,trivnegativeprop))
finalresult=StateProperty.verifyCompositeStateProperty(s, ImplicationStateProperty(M4IfClause, M4ThenClause))
# finalresult=StateProperty.verifyCompositeStateProperty(s, ImplicationStateProperty(S1IfClause,S1ThenClause))
print ('finalresult : ')
print (finalresult)
#Experimenting with using framework1 and framework 2 together
#Not(StateProperty.verifyCompositeStateProperty(s, OrStateProperty(trivnegativeprop,trivnegativeprop))).verify()
#Or( StateProperty.verifyCompositeStateProperty(s, OrStateProperty(P1atomic,P2atomic)) , StateProperty.verifyCompositeStateProperty(s, OrStateProperty(trivnegativeprop, trivnegativeprop)) ).verify()
###DUMMY EXPERIMENTATION: Verifying simple atomic formulae and propositional logic formulae
###To verify AtomicProp only use the following two lines:
#AtomicProperty(HECUSysTrivialTrueIsolatedLHS(),HECUSysTrivialTrueConnectedLHS(), HECUSysTrivialTrueCompleteLHS()).verify(s)
#simpleProp=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#simpleProp.verify(s)
###To verify NotProp, use the following lines
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#NotProperty(atomicProperty).verify(s)
###To verify AndProp, use the following lines
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#AndProperty(atomicProperty,atomicProperty).verify(s)
###To verify OrProp, use the following lines
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#OrProperty(atomicProperty,atomicProperty).verify(s)
###To verify ImplicationProp, use the following lines
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#ImplicationProperty(atomicProperty,atomicProperty).verify(s)
###To verify complex propositional logic formulae, use the following lines
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#OrProperty(NotProperty(atomicProperty),atomicProperty).verify(s)
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#AndProperty(NotProperty(atomicProperty),atomicProperty).verify(s)
#atomicProperty=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#ImplicationProperty(NotProperty(atomicProperty),NotProperty(atomicProperty)).verify(s)
###To verify 2 properties in 1 complex propositional logic formulae, use the following lines
#atomicprop1=AtomicProperty(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
#atomicprop2=AtomicProperty(HECUSysTrivialTrueIsolatedLHS(),HECUSysTrivialTrueConnectedLHS(), HECUSysTrivialTrueCompleteLHS())
#OrProperty(NotProperty(atomicprop1),NotProperty(atomicprop2)).verify(s)
#ImplicationProperty(NotProperty(atomicprop1),atomicprop2).verify(s)
#ORIGINAL CODE FROM LEVI
#transformation = [[HMapDistributable(), HMapECU2FiveElements(), HMapVirtualDevice()],
# [HConnECU2VirtualDevice(), HConnVirtualDeviceToDistributable()],
# [HConnectPPortPrototype(), HConnectRPortPrototype()]]
#
#rulesIncludingBackLinks = [[],\
# [transformation[1][0], transformation[1][1]],\
# [transformation[2][0], transformation[2][1]]]
#
#s = SymbolicStateSpace(transformation, rulesIncludingBackLinks, self.backwardPatterns, self.backwardPatterns2Rules,\
#self.overlapRulePatterns, self.multipleSameBackwardLinkRule, 1, False)
#s.build_symbolic_state_space()
#
#self._print_states(s)
#print '\n'
#print 'Built ' + str(len(s.symbStateSpace)) + ' states.'
#
#s.verify_property(HECUVDDistIsolatedLHS(), HECUVDDistConnectedLHS(), HECUVDDistCompleteLHS())
def _print_states(self,s):
for state in s.symbStateSpace:
print "----------"
if state == ():
print 'Empty'
else:
for s in state:
print s
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test']
unittest.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.orchestration.airflow.service_v1beta1.types import image_versions
from .base import ImageVersionsTransport, DEFAULT_CLIENT_INFO
class ImageVersionsGrpcTransport(ImageVersionsTransport):
"""gRPC backend transport for ImageVersions.
Readonly service to query available ImageVersions.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "composer.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "composer.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_image_versions(
self,
) -> Callable[
[image_versions.ListImageVersionsRequest],
image_versions.ListImageVersionsResponse,
]:
r"""Return a callable for the list image versions method over gRPC.
List ImageVersions for provided location.
Returns:
Callable[[~.ListImageVersionsRequest],
~.ListImageVersionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_image_versions" not in self._stubs:
self._stubs["list_image_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.orchestration.airflow.service.v1beta1.ImageVersions/ListImageVersions",
request_serializer=image_versions.ListImageVersionsRequest.serialize,
response_deserializer=image_versions.ListImageVersionsResponse.deserialize,
)
return self._stubs["list_image_versions"]
def close(self):
self.grpc_channel.close()
__all__ = ("ImageVersionsGrpcTransport",)
|
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Hoverlabel(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.hoverlabel"
_valid_props = {
"align",
"bgcolor",
"bordercolor",
"font",
"grouptitlefont",
"namelength",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
Returns
-------
Any
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of all hover labels on graph
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of all hover labels on graph.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# font
# ----
@property
def font(self):
"""
Sets the default hover label font used by all traces on the
graph.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# grouptitlefont
# --------------
@property
def grouptitlefont(self):
"""
Sets the font for group titles in hover (unified modes).
Defaults to `hoverlabel.font`.
The 'grouptitlefont' property is an instance of Grouptitlefont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.hoverlabel.Grouptitlefont`
- A dict of string/value properties that will be passed
to the Grouptitlefont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.hoverlabel.Grouptitlefont
"""
return self["grouptitlefont"]
@grouptitlefont.setter
def grouptitlefont(self, val):
self["grouptitlefont"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
Returns
-------
int
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
bgcolor
Sets the background color of all hover labels on graph
bordercolor
Sets the border color of all hover labels on graph.
font
Sets the default hover label font used by all traces on
the graph.
grouptitlefont
Sets the font for group titles in hover (unified
modes). Defaults to `hoverlabel.font`.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
"""
def __init__(
self,
arg=None,
align=None,
bgcolor=None,
bordercolor=None,
font=None,
grouptitlefont=None,
namelength=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
bgcolor
Sets the background color of all hover labels on graph
bordercolor
Sets the border color of all hover labels on graph.
font
Sets the default hover label font used by all traces on
the graph.
grouptitlefont
Sets the font for group titles in hover (unified
modes). Defaults to `hoverlabel.font`.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("grouptitlefont", None)
_v = grouptitlefont if grouptitlefont is not None else _v
if _v is not None:
self["grouptitlefont"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
import networkx
from operator import itemgetter
from PyGNA import Utility
from PyGNA import NetworkFrames
import pickle
import copy
import random
import math
import pylab
import sys
class GTrieNode:
def __init__(self):
self.depth = 0
self.is_leaf = False
self.label = None
self.graph = networkx.Graph()
self.parent_sym_conditions = []
self.final_sym_condition = []
self.out_links = []
self.in_links = []
self.out_link_states = []
self.in_link_states = []
self.node_states = []
self.children = []
self.parent = None
self.match_count = 0
self.probability = 1.
def setDepth(self, depth):
self.depth = depth
def getDepth(self):
return self.depth
def setLeaf(self, isleaf):
self.is_leaf = isleaf
def isLeaf(self):
return self.is_leaf
def setLabel(self, label):
self.label = label
def getLabel(self):
return self.label
def setGraph(self, graph):
self.graph = graph.copy()
def getGraph(self, copy=False):
if copy:
return self.graph.copy()
else:
return self.graph
def setOutLinks(self, links):
self.out_links = links
def getOutLinks(self):
return self.out_links
def setOutLinkStates(self, links):
for index in range(0,len(links)):
if links[index] == 1.0:
self.out_link_states.append(self.graph.edge[self.graph.nodes()[self.depth]][self.graph.nodes()[index]])
else:
self.out_link_states.append({})
def getOutLinkStates(self):
return self.out_link_states
def setInLinks(self, links):
self.in_links = links
def getInLinks(self):
return self.in_links
def setInLinkStates(self, links):
for index in range(0,len(links)):
if links[index] == 1.0:
self.in_link_states.append(self.graph.edge[self.graph.nodes()[index]][self.graph.nodes()[self.depth]])
else:
self.in_link_states.append({})
def getInLinkStates(self):
return self.in_link_states
def setNodeStates(self, states):
self.node_states = states
def getNodeStates(self):
return self.node_states
def setParentSymConditions(self, conditions):
self.addParentSymConditions(conditions)
parent = self.getParent()
if parent != None:
while parent.getDepth() >= 1:
new_conditions = []
for condition in conditions:
if condition[0] in parent.getGraph().nodes() and condition[1] in parent.getGraph().nodes():
new_conditions.append(condition)
parent.addParentSymConditions(new_conditions)
parent = parent.getParent()
def addParentSymConditions(self, conditions):
self.parent_sym_conditions.append(conditions)
def getParentSymConditions(self):
return self.parent_sym_conditions
def setFinalSymCondition(self, condition):
self.final_sym_condition.append(condition)
def getFinalSymCondition(self):
return self.final_sym_condition
def insertChild(self, child):
child.depth = self.depth + 1
self.children.append(child)
def getChildren(self):
return self.children
def setParent(self, parent):
self.parent = parent
def getParent(self):
return self.parent
def getMatchCount(self):
return self.match_count
def incMatchCount(self):
self.match_count += 1
def clearMatchCount(self):
self.match_count = 0
def getProbability(self):
return self.probability
def setProbability(self, probability):
self.probability = probability
def areNodeStatesEqual(self, graph, k):
if self.getGraph().node[self.getGraph().nodes()[k]] == graph.node[graph.nodes()[k]]:
return True
else:
return False
def areEdgeStatesEqual(self, graph, k):
for first in range(k):
for second in range(k):
if self.getGraph().nodes()[first] in self.getGraph().edge and \
self.getGraph().nodes()[second] in self.getGraph().edge[self.getGraph().nodes()[first]]:
if not (graph.nodes()[first] in graph.edge and
graph.nodes()[second] in graph.edge[graph.nodes()[first]] and
self.getGraph().edge[self.getGraph().nodes()[first]][self.getGraph().nodes()[second]] ==
graph.edge[graph.nodes()[first]][graph.nodes()[second]]):
return False
return True
def areConditionsRespectedWeak(self, vertices):
valid_list = []
if len(vertices) > 0:
for conditions in self.parent_sym_conditions:
cond_test = []
for less, more in conditions:
less_index = self.graph.nodes().index(less)
more_index = self.graph.nodes().index(more)
if less_index <= len(vertices)-1 and more_index <= len(vertices)-1 and \
vertices[less_index] > vertices[more_index]:
cond_test.append(False)
break
valid_list.append(False) if False in cond_test else valid_list.append(True)
else:
return True
if len(valid_list) > 0:
valid = False
for test_valid in valid_list:
valid = valid or test_valid
return valid
else:
return True
def areConditionsRespectedStrict(self, vertices, candidate):
test_vertices = copy.deepcopy(vertices)
test_vertices.append(candidate)
valid_list = []
if len(vertices) > 0:
for conditions in self.final_sym_condition:
for less, more in conditions:
less_index = self.graph.nodes().index(less)
more_index = self.graph.nodes().index(more)
if less_index <= len(test_vertices)-1 and more_index <= len(test_vertices)-1 and \
test_vertices[less_index] > test_vertices[more_index]:
return False
return True
def getMinLabelForCurrentPos(self, v_used):
if v_used == []:
return 0
if len(self.parent_sym_conditions) > 0:
min_candidates = []
target_index = len(v_used)
for condition_set in self.parent_sym_conditions:
min_candidate = 0
condition_index = [(self.graph.nodes().index(less), self.graph.nodes().index(more)) for less,more in condition_set]
for condition in condition_index:
if target_index in condition:
if target_index == condition[0]:
print "Didn't Expect this!!"
else:
if min_candidate <= v_used[condition[0]]:
min_candidate = int(v_used[condition[0]])+1
min_candidates.append(min_candidate)
if len(min_candidates) > 0:
return min(min_candidates)
else:
return 0
else:
return 0
########################################################################
class GTrie:
"""
A python implementation of the modified version of G-Trie data structure described in
"G-tries: an efficient data structure for discovering netowrk motifs" by
Pedro Ribeiro and Fernando Silva"""
#----------------------------------------------------------------------
def __init__(self, include_null_graph=True):
"""Constructor"""
self.root = GTrieNode()
self.utility = Utility.utility()
self.null_graph = include_null_graph
self.matches = []
self.dict_matches = {}
self.max_matches = sys.maxint
def getMatches(self, labels=False):
if labels:
return self.dict_matches
else:
return self.matches
def clearMatchCount(self, node):
if node.isLeaf():
node.clearMatchCount()
for child in node.getChildren():
self.clearMatchCount(child)
def setMaxMatches(self, maximum):
self.max_matches = maximum
def setProbability(self, node, probability):
if probability == []:
node.setProbability(1)
else:
node.setProbability(probability[node.getDepth()])
for child in node.getChildren():
self.setProbability(child, probability)
def updateProbabilities(self, node):
parent = node.getParent()
if parent != None:
updateParent = True
for child in parent.getChildren():
if child.getMatchCount() < self.max_matches:
updateParent = False
break
if updateParent:
parent.setProbability(0.)
self.updateProbabilities(parent)
def get_subgraphs(self, node=None, subgraphs=[]):
if node is None:
self.get_subgraphs(self.root, subgraphs)
return subgraphs
else:
if node.isLeaf():
subgraphs.append(node.getGraph(copy=True))
for child in node.getChildren():
self.get_subgraphs(child, subgraphs)
def read(self, path):
self.root = pickle.load(open(path, "rb"))
def write(self, path):
pickle.dump(self.root, open(path,"wb"))
def GTCannon(self, Graph):
"""Turn graph into canonical form
Note: Relies on NetworkX articulation_points which is restricted to
undirected graphs"""
# Handle case where graph is empty.
if len(Graph.nodes()) == 0:
return Graph
lex_labeling = self.utility.lexicographicallyLargestLabeling(Graph)
Graph = networkx.relabel_nodes(Graph, lex_labeling,copy=True)
retGraph = Graph.copy()
last_degree = Graph.degree()
original_degree = Graph.degree()
canon_label = {}
label = len(Graph.nodes())
while len(Graph.nodes()) > 0:
articulations = list(networkx.articulation_points(Graph.to_undirected())) \
if networkx.is_directed(Graph) else list(networkx.articulation_points(Graph))
current_degrees = temp_degrees = Graph.degree()
#Remove articulation points from consideration
for nodes in articulations:
if nodes in temp_degrees:
temp_degrees.pop(nodes)
#Sort by degree
sorted_degrees = sorted(temp_degrees.iteritems(), key=itemgetter(1))
#Capture min degree
candidates = []
u_min = sorted_degrees.pop(0)
candidates.append(u_min)
#Collect candidates with same degree
while len(sorted_degrees) > 0 and sorted_degrees[0][1] == u_min[1]:
candidates.append(sorted_degrees.pop(0))
#Are there ties?
if len(candidates) > 1:
first_tie_candidates = []
sorted_last_degrees = []
for pair in candidates:
sorted_last_degrees.append((pair[0],last_degree[pair[0]]))
sorted_last_degrees = sorted(sorted_last_degrees, key=itemgetter(1))
u_min = sorted_last_degrees.pop(0)
first_tie_candidates.append(u_min)
while len(sorted_last_degrees) > 0 and sorted_last_degrees[0][1] == u_min[1]:
first_tie_candidates.append(sorted_last_degrees.pop())
#Still ties?
if len(first_tie_candidates) > 1:
sorted_original_degree = []
for pair in first_tie_candidates:
sorted_original_degree.append((pair[0],original_degree[pair[0]]))
sorted_original_degree = sorted(sorted_original_degree, key=itemgetter(1))
u_min = sorted_original_degree.pop(0)
Graph.remove_node(u_min[0])
canon_label[u_min[0]] = label
label -= 1
retGraph = networkx.relabel_nodes(retGraph, canon_label, copy=True)
return retGraph
def GTrieInsert(self, graph, label=None, states=False):
if not self.root.isLeaf() and self.null_graph:
self.insertRecursive(networkx.Graph(), [], networkx.adjacency_matrix(networkx.Graph()).todense(),
self.root, 0, label, states)
components = networkx.connected_components(graph.to_undirected()) \
if networkx.is_directed(graph) else networkx.connected_components(graph)
component_len = [1 for x in components if len(x) > 1]
if len(list(components)) > 1 and sum(component_len) > 1:
print "Illegal Graph Insert: Graph has more than one connnected component."
return
cannonGraph = self.GTCannon(graph.copy())
matrix = networkx.adjacency_matrix(cannonGraph).todense()
conditions = self.utility.symmetryConditions(cannonGraph)
self.insertRecursive(cannonGraph, conditions, matrix, self.root, 0, label, states)
def insertRecursive(self, graph, conditions, matrix, node, k, label, states):
if k == matrix.shape[0]:
node.is_leaf = True
node.setFinalSymCondition(conditions)
#print "Final - " + str(conditions)
node.setParentSymConditions(conditions)
#print "Leaf"
#pos=networkx.fruchterman_reingold_layout(node.getGraph())
#networkx.draw(node.getGraph(),pos)
#networkx.draw_networkx_edge_labels(node.getGraph(), pos)
#pylab.show()
if label != None:
node.setLabel(label)
else:
row = matrix[k,:k+1].tolist().pop(0)
column = matrix[:k+1,k].ravel().tolist().pop(0)
for child in node.children:
if states:
if child.out_links == row and child.in_links == column and \
child.areNodeStatesEqual(graph, k) and child.areEdgeStatesEqual(graph, k+1):
self.insertRecursive(graph, conditions, matrix, child, k+1, label, states)
return
else:
if child.out_links == row and child.in_links == column:
self.insertRecursive(graph, conditions, matrix, child, k+1, label, states)
return
new_child = GTrieNode()
new_child.setDepth(k)
new_child.setInLinks(column)
new_child.setOutLinks(row)
new_child.setGraph(graph.subgraph(graph.nodes()[:k+1]))
#new_child.setGraph(graph.subgraph(graph.nodes()[:k]))
new_child.setNodeStates([graph.node[x] for x in new_child.getGraph(copy=True).nodes()])
new_child.setInLinkStates(column)
new_child.setOutLinkStates(row)
node.insertChild(new_child)
new_child.setParent(node)
#print "Child."
#pos=networkx.fruchterman_reingold_layout(new_child.getGraph())
#networkx.draw(new_child.getGraph(),pos)
#networkx.draw_networkx_edge_labels(new_child.getGraph(), pos)
#pylab.show()
self.insertRecursive(graph, conditions, matrix, new_child, k+1, label, states)
def GTrieMatch(self, graph, probability=[], labels=False, states=False):
self.clearMatch()
self.setProbability(self.root, probability)
self.add_null_match(graph, self.root, labels)
for child in self.root.getChildren():
nodes_used = []
if random.random() <= child.getProbability():
self.match(graph, child, nodes_used, labels, states)
def add_null_match(self, graph, trie_node, labels):
if trie_node.getMatchCount() < self.max_matches and \
trie_node.isLeaf() and self.null_graph:
self.foundMatch(trie_node, networkx.Graph(), [], labels)
def match(self, graph, trie_node, nodes_used, labels, states):
matched_vertices = self.matchingVertices(graph, trie_node, nodes_used, states)
#Since there is potentially a cap on matches for a specific trie node,
#the matched_vertices are now randomized
random.shuffle(matched_vertices)
for node in matched_vertices:
if trie_node.getMatchCount() < self.max_matches and \
trie_node.isLeaf() and trie_node.areConditionsRespectedStrict(nodes_used, node):
match = copy.deepcopy(nodes_used)
match.append(node)
self.foundMatch(trie_node, graph, match, labels)
for child in trie_node.getChildren():
if random.random() <= child.getProbability():
new_used = copy.deepcopy(nodes_used)
new_used.append(node)
self.match(graph, child, new_used, labels, states)
def matchingVertices(self, graph, trie_node, nodes_used, states):
candidates = []
if not trie_node.areConditionsRespectedWeak(nodes_used):
return candidates
min_value = trie_node.getMinLabelForCurrentPos(nodes_used)
if nodes_used == []:
candidates = [x for x in graph.nodes() if x >= min_value]
else:
cand_graph = graph.to_undirected() if networkx.is_directed(graph) else graph
connections = [set(cand_graph.neighbors(x)) for x in nodes_used]
if trie_node.getGraph().degree(trie_node.getGraph().nodes()[len(nodes_used)]) == 0:
connections.append(set([x for x, y in graph.degree_iter() if y == 0]))
connections = list(set.union(*connections))
connections = [x for x in connections if x >= min_value]
candidates = [x for x in connections if x not in nodes_used]
#Testing the space reduction
#candidates.sort(key=lambda x: len(graph.neighbors(x)))
#candidates = [x for x in candidates if len(graph.neighbors(x)) == len(graph.neighbors(candidates[0]))]
#candidates = [x for x in candidates if x not in nodes_used]
#candidates = []
#if len(connections) > 0:
#candidates = [x for x in graph.neighbors(connections[0]) if x not in nodes_used]
vertices = []
for node in candidates:
cand_test = []
test_nodes = copy.deepcopy(nodes_used)
test_nodes.append(node)
if states:
if graph.node[node] == trie_node.getNodeStates()[len(nodes_used)]:
for i in range(0, len(trie_node.getInLinks())):
if ((trie_node.getInLinks()[i] == 1 and node in graph.edge[test_nodes[i]] and
trie_node.getInLinkStates()[i] == graph.edge[test_nodes[i]][node]) or
(trie_node.getInLinks()[i] == 0 and node not in graph.edge[test_nodes[i]])) and \
((trie_node.getOutLinks()[i] == 1 and test_nodes[i] in graph.edge[node] and
trie_node.getOutLinkStates()[i] == graph.edge[node][test_nodes[i]]) or
(trie_node.getOutLinks()[i] == 0 and test_nodes[i] not in graph.edge[node])):
cand_test.append(True)
else:
cand_test.append(False)
if False not in cand_test:
vertices.append(node)
else:
for i in range(0, len(trie_node.getInLinks())):
if ((trie_node.getInLinks()[i] == 1 and node in graph.edge[test_nodes[i]]) or
(trie_node.getInLinks()[i] == 0 and node not in graph.edge[test_nodes[i]])) and \
((trie_node.getOutLinks()[i] == 1 and test_nodes[i] in graph.edge[node]) or
(trie_node.getOutLinks()[i] == 0 and test_nodes[i] not in graph.edge[node])):
cand_test.append(True)
else:
cand_test.append(False)
if False not in cand_test:
vertices.append(node)
return vertices
def foundMatch(self, node, graph, match, labels):
if node.getMatchCount() == self.max_matches:
node.setProbability(0.)
self.updateProbabilities(node)
if node.getMatchCount() < self.max_matches:
node.incMatchCount()
if labels:
if node.getLabel() in self.dict_matches:
self.dict_matches[node.getLabel()].append(graph.subgraph(match).copy())
else:
self.dict_matches[node.getLabel()] = []
self.dict_matches[node.getLabel()].append(graph.subgraph(match).copy())
else:
matchGraph = graph.subgraph(match).copy()
self.matches.append(matchGraph)
#print str(matchGraph.nodes()) + str(matchGraph.edges())
def clearMatch(self):
self.matches = []
self.dict_matches = {}
self.clearMatchCount(self.root)
def createGTrieWithFour(self):
four_1 = networkx.Graph()
four_2 = networkx.Graph()
four_3 = networkx.Graph()
four_4 = networkx.Graph()
four_5 = networkx.Graph()
four_6 = networkx.Graph()
three_1 = networkx.Graph()
three_2 = networkx.Graph()
four_1.add_nodes_from([1,2,3,4])
four_2.add_nodes_from([1,2,3,4])
four_3.add_nodes_from([1,2,3,4])
four_4.add_nodes_from([1,2,3,4])
four_5.add_nodes_from([1,2,3,4])
four_6.add_nodes_from([1,2,3,4])
three_1.add_nodes_from([1,2,3])
three_2.add_nodes_from([1,2,3])
four_1.add_edges_from([(1,4),(2,4),(3,4)])
four_2.add_edges_from([(1,3),(1,4),(2,4)])
four_3.add_edges_from([(1,3),(1,4),(2,4),(3,4)])
four_4.add_edges_from([(1,3),(1,4),(2,3),(2,4)])
four_5.add_edges_from([(1,3),(1,4),(2,3),(2,4),(3,4)])
four_6.add_edges_from([(1,2),(1,3),(1,4),(2,3),(2,4),(3,4),])
three_1.add_edges_from([(1,2), (2,3), (1,3)])
three_2.add_edges_from([(1,2), (2,3)])
self.GTrieInsert(four_1)
self.GTrieInsert(four_2)
self.GTrieInsert(four_3)
self.GTrieInsert(four_4)
self.GTrieInsert(four_5)
self.GTrieInsert(four_6)
self.GTrieInsert(three_1)
self.GTrieInsert(three_2)
def insertEdgeStateTest(self, correct=False):
four_1 = networkx.Graph()
four_2 = networkx.Graph()
four_3 = networkx.Graph()
four_4 = networkx.Graph()
three_1 = networkx.Graph()
four_1.add_nodes_from([1,2,3,4])
four_2.add_nodes_from([1,2,3,4])
four_3.add_nodes_from([1,2,3,4])
four_4.add_nodes_from([1,2,3,4])
three_1.add_nodes_from([1,2,3])
four_1.add_edge(1,2,state=2)
four_1.add_edge(2,3,state=1)
four_1.add_edge(1,3,state=1)
four_1.add_edge(1,4,state=1)
four_2.add_edge(1,2,state=2)
four_2.add_edge(2,3,state=1)
four_2.add_edge(1,3,state=1)
four_2.add_edge(1,4,state=1)
four_2.add_edge(3,4,state=2)
four_3.add_edge(1,2,state=1)
four_3.add_edge(2,3,state=1)
four_3.add_edge(3,4,state=1)
four_3.add_edge(1,4,state=1)
four_4.add_edge(1,2,state=1)
four_4.add_edge(1,3,state=2)
four_4.add_edge(1,4,state=1)
three_1.add_edge(1,2, state=2)
three_1.add_edge(2,3,state=1)
three_1.add_edge(1,3,state=1)
if correct:
self.GTrieInsert(three_1,states=True)
self.GTrieInsert(four_1,states=True)
self.GTrieInsert(four_2,states=True)
self.GTrieInsert(four_3,states=True)
self.GTrieInsert(four_4,states=True)
else:
self.GTrieInsert(four_1,states=True)
self.GTrieInsert(four_2,states=True)
self.GTrieInsert(four_3,states=True)
self.GTrieInsert(four_4,states=True)
self.GTrieInsert(three_1,states=True)
def unconnectedNodeTest(self):
three_1 = networkx.Graph()
three_2 = networkx.Graph()
three_3 = networkx.Graph()
four_1 = networkx.Graph()
three_1.add_nodes_from([1,2,3])
three_2.add_nodes_from([1,2,3])
three_3.add_nodes_from([1,2,3])
four_1.add_nodes_from([1,2,3,4])
three_1.add_edges_from([(1,2)])
three_3.add_edges_from([(1,2),(2,3),(1,3)])
four_1.add_edges_from([(1,2),(2,3),(1,3)])
self.GTrieInsert(three_1)
self.GTrieInsert(three_2)
self.GTrieInsert(three_3)
self.GTrieInsert(four_1)
def realDataTest(self):
frames = NetworkFrames.NetworkFrames()
frames.readGraphML("insert_Graphs.graphML")
count = 0
for graph in frames.inputFrames:
self.GTrieInsert(graph, label=count,states=True)
count += 1
def insert_from_network_frames(self, path):
frames = NetworkFrames.NetworkFrames()
frames.readGraphML(path)
index = 0
for frame in frames.getInputNetworks():
self.GTrieInsert(frame, index)
index += 1
def empty_graph_test(self):
graph = networkx.Graph()
self.GTrieInsert(graph)
empty_test = networkx.Graph()
self.GTrieMatch(empty_test)
num_gtrie_matches = len(self.matches)
print self.matches
if __name__ == "__main__":
empty_tree = GTrie()
empty_tree.empty_graph_test()
'''directed_trie = GTrie()
directed_trie.insert_from_network_frames('ff_lhs.graphML')
directed_network = networkx.readwrite.read_graphml('test_network.graphML')
pos=networkx.fruchterman_reingold_layout(directed_network)
networkx.draw(directed_network,pos)
#networkx.draw_networkx_edge_labels(test_graph, pos)
pylab.show()
directed_trie.GTrieMatch(directed_network, labels=True)
#trie.read("GTrieTest.p")
#import cProfile
#import StringIO
#import pstats
#pr = cProfile.Profile()
#pr.enable()
#import time
#start = time.time()
#print "Num nodes: " + str(len(test_graph.nodes()))
#print "Num edges: " + str(len(test_graph.edges()))
#correct_trie.GTrieMatch(edge_state_test,[1,1,1,1,1], states=True)
#incorrect_trie.GTrieMatch(edge_state_test,[1,1,1,1,1], states=True)
#real_data_test.GTrieMatch(test_graph,[1,1,1,.01,.01],labels=True, states=True)
#elapsed = time.time() - start
#print "GTrie Elapsed Time: (3,5 complete graph)" + str(elapsed)
for key in directed_trie.dict_matches.iterkeys():
print "Length of key: " + str(key) + " is: " + str(len(directed_trie.dict_matches[key]))
print "Isomorphs: ", [(graph.nodes()) for graph in directed_trie.dict_matches[key]]
#print len(correct_trie.matches)
#print len(incorrect_trie.matches)
#num_gtrie_matches = len(trie.matches)
#print trie.matches '''
'''
#pr.disable()
#s = StringIO.StringIO()
#sortby = 'cumulative'
#ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
#ps.print_stats()
#print s.getvalue()
grochow_sum = 0
#start = time.time()
#third_match = trie.utility.findSubgraphInstances(test_graph, four_1)
#elapsed = time.time() - start
#print "Elapsed time (Grochow-Kellis four_1): " + str(elapsed)
#grochow_sum += len(third_match)
#print num_gtrie_matches - grochow_sum
#start = time.time()
#third_match = trie.utility.findSubgraphInstances(test_graph, four_2)
#elapsed = time.time() - start
#print "Elapsed time (Grochow-Kellis four_2): " + str(elapsed)
#grochow_sum += len(third_match)
#print num_gtrie_matches - grochow_sum
start = time.time()
first_match = trie.utility.findSubgraphInstances(test_graph, four_3)
elapsed = time.time() - start
print "Elapsed time (Grochow-Kellis four_3): " + str(elapsed)
grochow_sum += len(first_match)
print grochow_sum
#start = time.time()
#first_match = trie.utility.findSubgraphInstances(test_graph, four_4)
#elapsed = time.time() - start
#print "Elapsed time (Grochow-Kellis four_4): " + str(elapsed)
#grochow_sum += len(first_match)
#print num_gtrie_matches - grochow_sum
start = time.time()
second_match = trie.utility.findSubgraphInstances(test_graph, four_5)
elapsed = time.time() - start
print "Elapsed time (Grochow-Kellis four_5): " + str(elapsed)
grochow_sum += len(second_match)
print grochow_sum
#start = time.time()
#first_match = trie.utility.findSubgraphInstances(test_graph, four_6)
#elapsed = time.time() - start
#print "Elapsed time (Grochow-Kellis four_6): " + str(elapsed)
#grochow_sum += len(first_match)
#print num_gtrie_matches - grochow_sum
start = time.time()
first_match = trie.utility.findSubgraphInstances(test_graph, three_1)
elapsed = time.time() - start
print "Elapsed time (Grochow-Kellis three_1): " + str(elapsed)
grochow_sum += len(first_match)
print grochow_sum
#start = time.time()
#second_match = trie.utility.findSubgraphInstances(test_graph, three_2)
#elapsed = time.time() - start
#print "Elapsed time (Grochow-Kellis three_2): " + str(elapsed)
#grochow_sum += len(second_match)
#print num_gtrie_matches - grochow_sum'''
print "Done."
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains layer utilies for input validation and format conversion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.ops import variables
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except ValueError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def normalize_data_format(value):
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
padding = value.lower()
if padding not in {'valid', 'same'}:
raise ValueError('The `padding` argument must be one of "valid", "same". '
'Received: ' + str(padding))
return padding
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding == 'same':
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
input_length *= stride
if padding == 'valid':
input_length += max(filter_size - stride, 0)
elif padding == 'full':
input_length -= (stride + filter_size - 2)
return input_length
def smart_cond(pred, fn1, fn2, name=None):
"""Return either `fn1()` or `fn2()` based on the boolean predicate `pred`.
If `pred` is a bool or has a constant value, we return either `fn1()`
or `fn2()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `fn1` or `fn2`.
Raises:
TypeError is fn1 or fn2 is not callable.
"""
if not callable(fn1):
raise TypeError('`fn1` must be callable.')
if not callable(fn2):
raise TypeError('`fn2` must be callable.')
pred_value = constant_value(pred)
if pred_value is not None:
if pred_value:
return fn1()
else:
return fn2()
else:
return control_flow_ops.cond(pred, fn1, fn2, name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError is pred is not a Variable, Tensor or bool.
"""
if isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, variables.Variable):
pred_value = None
elif isinstance(pred, ops.Tensor):
pred_value = tensor_util.constant_value(pred)
else:
raise TypeError('`pred` must be a Tensor, a Variable, or a Python bool.')
return pred_value
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_aspath_list
short_description: Configure Autonomous System (AS) path lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and aspath_list category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
router_aspath_list:
description:
- Configure Autonomous System (AS) path lists.
default: null
type: dict
suboptions:
name:
description:
- AS path list name.
required: true
type: str
rule:
description:
- AS path list rule.
type: list
suboptions:
action:
description:
- Permit or deny route-based operations, based on the route's AS_PATH attribute.
type: str
choices:
- deny
- permit
id:
description:
- ID.
required: true
type: int
regexp:
description:
- Regular-expression to match the Border Gateway Protocol (BGP) AS paths.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure Autonomous System (AS) path lists.
fortios_router_aspath_list:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
router_aspath_list:
name: "default_name_3"
rule:
-
action: "deny"
id: "6"
regexp: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_aspath_list_data(json):
option_list = ['name', 'rule']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_aspath_list(data, fos):
vdom = data['vdom']
state = data['state']
router_aspath_list_data = data['router_aspath_list']
filtered_data = underscore_to_hyphen(filter_router_aspath_list_data(router_aspath_list_data))
if state == "present":
return fos.set('router',
'aspath-list',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('router',
'aspath-list',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_aspath_list']:
resp = router_aspath_list(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"router_aspath_list": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"rule": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["deny", "permit"]},
"id": {"required": True, "type": "int"},
"regexp": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
from copy import copy
import sqlalchemy as sa
from sqlalchemy_continuum.utils import tx_column_name
from tests import TestCase, create_test_cases
class VersionModelAccessorsTestCase(TestCase):
def test_previous_for_first_version(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
assert not article.versions[0].previous
def test_previous_for_live_parent(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.name = u'Updated name'
article.content = u'Updated content'
self.session.commit()
version = article.versions[1]
assert version.previous.name == u'Some article'
assert (
getattr(version.previous, tx_column_name(version)) ==
getattr(version, tx_column_name(version)) - 1
)
def test_previous_for_deleted_parent(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.commit()
versions = (
self.session.query(self.ArticleVersion)
.order_by(
getattr(
self.ArticleVersion,
self.options['transaction_column_name']
)
)
).all()
assert versions[1].previous.name == u'Some article'
def test_previous_chaining(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.name = u'Updated article'
self.session.commit()
self.session.delete(article)
self.session.commit()
version = (
self.session.query(self.ArticleVersion)
.order_by(
getattr(
self.ArticleVersion,
self.options['transaction_column_name']
)
)
).all()[-1]
assert version.previous.previous
def test_previous_two_versions(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article2 = self.Article()
article2.name = u'Second article'
article2.content = u'Second article'
self.session.add(article2)
self.session.commit()
article.name = u'Updated article'
self.session.commit()
article.name = u'Updated article 2'
self.session.commit()
assert article.versions[2].previous
assert article.versions[1].previous
assert article.versions[2].previous == article.versions[1]
assert article.versions[1].previous == article.versions[0]
def test_next_two_versions(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article2 = self.Article()
article2.name = u'Second article'
article2.content = u'Second article'
self.session.add(article2)
self.session.commit()
article.name = u'Updated article'
self.session.commit()
article.name = u'Updated article 2'
self.session.commit()
assert article.versions[0].next
assert article.versions[1].next
assert article.versions[0].next == article.versions[1]
assert article.versions[1].next == article.versions[2]
def test_next_for_last_version(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
assert not article.versions[0].next
def test_next_for_live_parent(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.name = u'Updated name'
article.content = u'Updated content'
self.session.commit()
version = article.versions[0]
assert version.next.name == u'Updated name'
def test_next_for_deleted_parent(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
version = article.versions[0]
self.session.delete(article)
self.session.commit()
assert version.next
def test_chaining_next(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
article.name = u'Updated article'
self.session.commit()
article.content = u'Updated content'
self.session.commit()
versions = article.versions.all()
version = versions[0]
assert version.next == versions[1]
assert version.next.next == versions[2]
def test_index_for_deleted_parent(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.commit()
versions = (
self.session.query(self.ArticleVersion)
.order_by(
getattr(
self.ArticleVersion,
self.options['transaction_column_name']
)
)
).all()
assert versions[0].index == 0
assert versions[1].index == 1
def test_index_for_live_parent(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
assert article.versions[0].index == 0
class VersionModelAccessorsWithCompositePkTestCase(TestCase):
def create_models(self):
class User(self.Model):
__tablename__ = 'user'
__versioned__ = copy(self.options)
first_name = sa.Column(sa.Unicode(255), primary_key=True)
last_name = sa.Column(sa.Unicode(255), primary_key=True)
email = sa.Column(sa.Unicode(255))
self.User = User
def test_previous_two_versions(self):
user = self.User(
first_name=u'Some user',
last_name=u'Some last_name',
)
self.session.add(user)
self.session.commit()
user2 = self.User(
first_name=u'Second user',
last_name=u'Second user',
)
self.session.add(user2)
self.session.commit()
user.email = u'Updated email'
self.session.commit()
user.email = u'Updated email 2'
self.session.commit()
assert user.versions[2].previous
assert user.versions[1].previous
assert user.versions[2].previous == user.versions[1]
assert user.versions[1].previous == user.versions[0]
def test_next_two_versions(self):
user = self.User()
user.first_name = u'Some user'
user.last_name = u'Some last_name'
self.session.add(user)
self.session.commit()
user2 = self.User()
user2.first_name = u'Second user'
user2.last_name = u'Second user'
self.session.add(user2)
self.session.commit()
user.email = u'Updated user'
self.session.commit()
user.email = u'Updated user 2'
self.session.commit()
assert user.versions[0].next
assert user.versions[1].next
assert user.versions[0].next == user.versions[1]
assert user.versions[1].next == user.versions[2]
create_test_cases(VersionModelAccessorsTestCase)
create_test_cases(VersionModelAccessorsWithCompositePkTestCase)
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.1 via (10,000,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.1)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("2.5"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.1"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.09"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.05"),
rbf_node.getrawchangeaddress(): Decimal("0.03")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.1")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 5000000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 5000001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 4990000})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 200000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 300000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 300000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 1000000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 2000000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.10000000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.10000000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.05000000"),
node.getrawchangeaddress(): Decimal("0.04900000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the buildbot steps for ChromeDriver except for update/compile."""
import optparse
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
import zipfile
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir, 'pylib'))
from common import chrome_paths
from common import util
import archive
GS_BUCKET = 'gs://chromedriver-prebuilts'
GS_ZIP_PREFIX = 'chromedriver2_prebuilts'
SLAVE_SCRIPT_DIR = os.path.join(_THIS_DIR, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, os.pardir,
'scripts', 'slave')
UPLOAD_SCRIPT = os.path.join(SLAVE_SCRIPT_DIR, 'skia', 'upload_to_bucket.py')
DOWNLOAD_SCRIPT = os.path.join(SLAVE_SCRIPT_DIR, 'gsutil_download.py')
def Archive(revision):
print '@@@BUILD_STEP archive@@@'
prebuilts = ['libchromedriver2.so', 'chromedriver2_server',
'chromedriver2_unittests', 'chromedriver2_tests']
build_dir = chrome_paths.GetBuildDir(prebuilts[0:1])
zip_name = '%s_r%s.zip' % (GS_ZIP_PREFIX, revision)
temp_dir = util.MakeTempDir()
zip_path = os.path.join(temp_dir, zip_name)
print 'Zipping prebuilts %s' % zip_path
f = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for prebuilt in prebuilts:
f.write(os.path.join(build_dir, prebuilt), prebuilt)
f.close()
cmd = [
sys.executable,
UPLOAD_SCRIPT,
'--source_filepath=%s' % zip_path,
'--dest_gsbase=%s' % GS_BUCKET
]
if util.RunCommand(cmd):
print '@@@STEP_FAILURE@@@'
def Download():
print '@@@BUILD_STEP Download chromedriver prebuilts@@@'
temp_dir = util.MakeTempDir()
zip_path = os.path.join(temp_dir, 'chromedriver2_prebuilts.zip')
cmd = [
sys.executable,
DOWNLOAD_SCRIPT,
'--url=%s' % GS_BUCKET,
'--partial-name=%s' % GS_ZIP_PREFIX,
'--dst=%s' % zip_path
]
if util.RunCommand(cmd):
print '@@@STEP_FAILURE@@@'
build_dir = chrome_paths.GetBuildDir(['host_forwarder'])
print 'Unzipping prebuilts %s to %s' % (zip_path, build_dir)
f = zipfile.ZipFile(zip_path, 'r')
f.extractall(build_dir)
f.close()
# Workaround for Python bug: http://bugs.python.org/issue15795
os.chmod(os.path.join(build_dir, 'chromedriver2_server'), 0700)
def MaybeRelease(revision):
# Version is embedded as: const char kChromeDriverVersion[] = "0.1";
with open(os.path.join(_THIS_DIR, 'chrome', 'version.cc'), 'r') as f:
version_line = filter(lambda x: 'kChromeDriverVersion' in x, f.readlines())
version = version_line[0].split('"')[1]
bitness = '32'
if util.IsLinux() and platform.architecture()[0] == '64bit':
bitness = '64'
zip_name = 'chromedriver2_%s%s_%s.zip' % (
util.GetPlatformName(), bitness, version)
site = 'https://code.google.com/p/chromedriver/downloads/list'
s = urllib2.urlopen(site)
downloads = s.read()
s.close()
if zip_name in downloads:
return 0
print '@@@BUILD_STEP releasing %s@@@' % zip_name
if util.IsWindows():
server_orig_name = 'chromedriver2_server.exe'
server_name = 'chromedriver.exe'
else:
server_orig_name = 'chromedriver2_server'
server_name = 'chromedriver'
server = os.path.join(chrome_paths.GetBuildDir([server_orig_name]),
server_orig_name)
print 'Zipping ChromeDriver server', server
temp_dir = util.MakeTempDir()
zip_path = os.path.join(temp_dir, zip_name)
f = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
f.write(server, server_name)
if util.IsLinux() or util.IsMac():
adb_commands = os.path.join(_THIS_DIR, 'chrome', 'adb_commands.py')
f.write(adb_commands, 'adb_commands.py')
f.close()
cmd = [
sys.executable,
os.path.join(_THIS_DIR, 'third_party', 'googlecode',
'googlecode_upload.py'),
'--summary', 'version of ChromeDriver2 r%s' % revision,
'--project', 'chromedriver',
'--user', '[email protected]',
'--label', 'Release',
zip_path
]
with open(os.devnull, 'wb') as no_output:
if subprocess.Popen(cmd, stdout=no_output, stderr=no_output).wait():
print '@@@STEP_FAILURE@@@'
def KillChromes():
chrome_map = {
'win': 'chrome.exe',
'mac': 'Chromium',
'linux': 'chrome',
}
if util.IsWindows():
cmd = ['taskkill', '/F', '/IM']
else:
cmd = ['killall', '-9']
cmd.append(chrome_map[util.GetPlatformName()])
util.RunCommand(cmd)
def CleanTmpDir():
tmp_dir = tempfile.gettempdir()
print 'cleaning temp directory:', tmp_dir
for file_name in os.listdir(tmp_dir):
if os.path.isdir(os.path.join(tmp_dir, file_name)):
print 'deleting sub-directory', file_name
shutil.rmtree(os.path.join(tmp_dir, file_name), True)
def WaitForLatestSnapshot(revision):
print '@@@BUILD_STEP wait_for_snapshot@@@'
while True:
snapshot_revision = archive.GetLatestRevision(archive.Site.SNAPSHOT)
if snapshot_revision >= revision:
break
print 'Waiting for snapshot >= %s, found %s' % (revision, snapshot_revision)
time.sleep(60)
print 'Got snapshot revision', snapshot_revision
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--android-package',
help='Application package name, if running tests on Android.')
parser.add_option(
'-r', '--revision', type='string', default=None,
help='Chromium revision')
options, _ = parser.parse_args()
if not options.android_package:
KillChromes()
CleanTmpDir()
if options.android_package:
Download()
else:
if not options.revision:
parser.error('Must supply a --revision')
if util.IsLinux() and platform.architecture()[0] == '64bit':
Archive(options.revision)
WaitForLatestSnapshot(options.revision)
cmd = [
sys.executable,
os.path.join(_THIS_DIR, 'run_all_tests.py'),
]
if options.android_package:
cmd.append('--android-package=' + options.android_package)
passed = (util.RunCommand(cmd) == 0)
if not options.android_package and passed:
MaybeRelease(options.revision)
if __name__ == '__main__':
main()
|
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import os
import numpy as np
import csv
import svm
import svmutil
import easyio
import kernels
import math
__all__ = ['load_libsvm_data', 'write_libsvm_kernel_matrix', 'write_libsvm_input',
'replace_labels_libsvm', 'parse_deva_detection_file', 'get_weight_vector_svm_model_linear']
def load_libsvm_data_help(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def load_libsvm_data(file_libsvm):
"""
load libsvm basic format (one data per row) into numpy matrices, and also parse labels
"""
[labels, data_] = load_libsvm_data_help(file_libsvm)
def convert_datum(datum_in):
dim = max(datum_in.keys())
data_out = []
for d in range(1, int(dim+1)):
data_out.append(datum_in[d])
return data_out
data_converted = map(convert_datum, data_)
data = np.matrix(data_converted)
return (labels, data)
def write_libsvm_kernel_matrix(labels, idxs, matrix_np,
fileout):
"""
Given all the information needed to write a kernel matrix in libsvm,
write it out to a target file
"""
fout = open(fileout, 'w')
[n, dim] = matrix_np.shape
for i in range(n): # every line
fout.write('%d '%labels[i])
fout.write('0:%d '%idxs[i])
for j in range(dim):
fout.write('%d:%g '%(j+1, matrix_np[i,j]))
fout.write('\n')
fout.close()
def write_libsvm_input(labels, data_rowwise, file_out, f_print_labels, f_print_value):
n_ = len(labels)
[n,dim] = data_rowwise.shape
if n_ != n:
print 'Input error: n_ != n: %d != %d'%(n_,n)
return
fout = open(file_out, 'w')
for i in range(n):
i_label = labels[i]
i_str = ''
i_str += (f_print_labels(i_label) + ' ')
for j in range(1,1+dim):
i_str += (str(j) + ':' + f_print_value(data_rowwise[i,j-1]) + ' ')
fout.write('%s\n'%i_str)
fout.close()
def retrieve_labels(filein):
"""
Return only the labels from SVM files
"""
fin_ = open(filein, 'r')
fin = csv.reader(fin_, delimiter=' ')
labels = []
for line in fin:
labels.append(line[0])
fin_.close()
return labels
def retrieve_scores_libsvm(filein, label):
"""
Return the scores of a particular label from a libsvm output file
"""
fin_ = open(filein, 'r')
fin = csv.reader(fin_, delimiter= ' ')
str_label = str(label)
line_header = fin.next()
id_column = None
for (e, id) in zip(line_header, range(len(line_header))):
if e == str_label:
id_column = id
break;
scores = []
for line in fin:
scores.append(float(line[id_column]))
fin_.close()
return scores
def replace_labels_libsvm(labels_new, filein, fileout):
"""
Only replace the labels of libSVM files, to avoid duplicate computations
"""
fin_ = open(filein, 'r')
fin = csv.reader(fin_, delimiter=' ')
fout = open(fileout, 'w')
count = 0
for line in fin:
line[0] = str(labels_new[count]) # replace the label to a new one
fout.write(' '.join(line))
fout.write('\n')
count += 1
fin_.close()
fout.close()
def write_deva_detection_file(clipids, scores, eid, fileout, write_mode='w'):
"""
Given all score information, write deva detection file
"""
fout = open(fileout, write_mode)
clipids_scores = zip(clipids, scores)
if write_mode == 'w':
fout.write('"TrialID","Score"\n')
for (clipid, score) in clipids_scores:
lineid = "%06d.E%03d"%(clipid, eid)
fout.write('"%s","%f"\n'%(lineid, score))
fout.close()
def parse_deva_detection_file(file_in):
"""
Read a DEVA detection file, and outputs a CLIPSx3 matrix
where
Col 1 : clipid
Col 2 : target event
Col 3 : score
"""
fin_ = open(file_in, 'r')
fin = csv.reader(fin_, delimiter=',')
lines = [line for line in fin]
lines = lines[1::] # get rid of the first line header (perhaps better way to do this?)
mat = np.zeros([len(lines), 3])
count = 0
for line in lines:
mat[count][0] = int(line[0][0:6])
mat[count][1] = int(line[0][-3::])
mat[count][2] = float(line[1])
count += 1
return mat
def convert_SV_to_nparray(SVdict):
"""
Convert support vector (SV) of libSVM model in dict type, to numpy array
"""
key_max = max(SVdict.keys())
nparray = np.zeros(key_max+1)
for (k,v) in SVdict.iteritems():
if k >= 0:
nparray[k] = v
return nparray
def get_weight_vector_svm_model_linear(file_svm_model_linear, target_class = 1):
"""
Given a filename for a linear SVM model (in libSVM format),
return a linear weight vector
Two-class SVM is assumed, where weights will be translated to represent target_class (default 1)
"""
svm_model_linear = svmutil.svm_load_model(file_svm_model_linear)
SVs = svm_model_linear.get_SV() # list of dict
sv_coef = svm_model_linear.get_sv_coef()
labels = svm_model_linear.get_labels()
# Find target class idx
idx_target = None
for idx in range(len(labels)):
if labels[idx] == target_class:
idx_target = idx
break
assert(not(idx_target==None))
multiplier = 1.0
if idx_target == 1:
multiplier *= -1.0
# weight vector to be returned
w = None
for (wi, svi) in zip(sv_coef, SVs):
vi = convert_SV_to_nparray(svi)
wvi = wi*vi
if w==None:
w = wvi
else:
w += wvi
w *= multiplier
return w
def get_linear_coeffs_from_svm_model_nonlinear(model_libsvm, SVs, target_class = 1):
"""
Given non-linear libsvm model and support vectors,
computed weighted sum of SVs using libsvm weights for the target class
@param model_libsvm: loaded nonlinear libsvm model
@param SVs: support vectors in numpy array format (row-wise)
@param target_class: target class label (default 1)
@return: weighted sum of SVs using libsvm weights
"""
# Find target class idx within model
labels = model_libsvm.get_labels()
idx_target = None
for idx in range(len(labels)):
if labels[idx] == target_class:
idx_target = idx
break
assert(not(idx_target==None))
# if target class idx is 1 (not 0), then, flip the sign of weighted sum
multiplier = 1.0
if idx_target == 1:
multiplier *= -1.0
# get SV coefficients
sv_coefs = model_libsvm.get_sv_coef()
# compute weighted sum
wsum = None
for (wi, svi) in zip(sv_coefs, SVs):
if wsum == None:
wsum = wi * svi
else:
wsum += (wi * svi)
wsum *= multiplier
return wsum
def prepare_precomputed_matrix(data):
"""
Converts a numpy 2D array data, into a form that can be used as an input for libSVM
@param data: numpy 2D array with size being *testdata(row)-by-training(col)*, with kernel values
@return: return a data list which is ready to be directly used as an input for libsvm
"""
n = data.shape[0]
indices = np.array([range(1,n+1)]).T
return (np.concatenate([indices, data], axis=1).tolist())
#def get_precomputed_matrix_from_svm_model(svm_model, func_kernel, data_train, data_test,
# flag_prob=True):
# """
# Given a non-linear SVM model (in libSVM format),
# returns a pre-computed kernel matrix, and optional probabilities.
# NOTE: this implementation is not memory efficient, although simple. Consider using 'apply_compact_svm'.
# NOTE: NOT TESTED!!
#
# @param svm_model: parsed svm_model (via provided API from libSVM)
# @param func_kernel: func_kernel(x1,x2) computes the kernel value between x1 & x2
# @param data_train: n-by-d row-wise data matrix
# @param data_test: m-by-d row-wise data matrix
# @return: (matrix_kernel, scores) where matrix_kernel is in libSVM list of list format
# """
#
# print 'libsvm_tools.get_precomputed_matrix_from_svm_model: \n not tested..! Remove this msg after testing'
#
# # row ids of support vectors within training matrix
# # idxs were stored in SVM model as 1-base
# idxs_train_SVs = map(lambda x: int(x[0]), svm_model.get_SV())
#
# # compute kernel matrix, but, only against SVs from training data (to save computation)
# n = data_train.shape[0]
# m = data_test.shape[0]
# matrix_kernel = np.zeros((m, n+1)).tolist()
# for i in range(m):
# for j in idxs_train_SVs:
# matrix_kernel[i][j] = func_kernel(data_test[i], data_train[j-1])
#
# scores = None
# if flag_prob:
# options_te = '-b 1 '
# scores = np.array(svmutil.svm_predict_simple(matrix_kernel, svm_model, options_te))
#
# return (matrix_kernel, scores)
def get_SV_idxs_nonlinear_svm(svm_model):
""" From nonlinear SVM model, get idxs of SVs (w.r.t. training data)
"""
idxs_train_SVs = map(lambda x: int(x[0]), svm_model.get_SV())
return idxs_train_SVs
# def get_SV_weights_nonlinear_svm(svm_model, target_class = 1):
# """ From nonlinear SVM model, get weights for SVs, for the given target_class
# """
# idx_target = get_column_idx_for_class(svm_model, target_class)
# weights = np.array([ws[idx_target] for ws in svm_model.get_sv_coef()])
# return weights
def get_SV_weights_nonlinear_svm(svm_model, target_class=1, flag_manual_sign_flip=False):
""" From nonlinear SVM model, get weights for SVs, for the given target_class.
Only works for 1-vs-all training.
@todo: this implementation is not fully vetted, although it seems to be working during ECD learning
"""
# general implementation not working anymore with libSVM 3.12
# idx_target = get_column_idx_for_class(svm_model, target_class)
# weights = np.array([ws[idx_target] for ws in svm_model.get_sv_coef()])
weights = (np.array(svm_model.get_sv_coef())).flatten()
if flag_manual_sign_flip:
idx_target = get_column_idx_for_class(svm_model, target_class)
if idx_target != 0:
weights *= -1
return weights
def get_compact_nonlinear_svm(svm_model, data_train_orig):
"""
Given a non-linear SVM model, remove zero-weighted SVs, and also produce compact training data with SVs only
@param svm_model: loaded (non-linear) svm model
@param data_train_orig: n-by-d row-wise training data matrix in numpy format
@return: an updatedsvm_model & support vectors sub-selected from data_train_orig
"""
n_SVs = svm_model.l
idxs_train_SVs = get_SV_idxs_nonlinear_svm(svm_model)
[_, d] = data_train_orig.shape
SVs = np.zeros([n_SVs, d]) # initialize memory
for i in range(n_SVs):
idx = idxs_train_SVs[i]
svm_model.SV[i][0].value = i+1 #idx is 1-base
SVs[i] = data_train_orig[idx-1]# use 0-base
return (svm_model, SVs)
def write_compact_nonlinear_svm(file_compact_svm, target_class,
file_svm_model, svm_model=None,
file_SVs=None, SVs=None,
str_kernel=None):
"""
Writes a textfile with all the necessary file locations for (nonlinear) libSVM agent
All the component files of 'file_compact_svm' will be written in the same directory
@param file_compact_svm: file to be written with all the information below
@param target_class: integer target class, e.g., 0 or 30.
@param file_svm_model: filename to the compact svm model to be written
@param file_SVs: filename to the support vectors (only applicable if nonlinear SVM)
@param str_kernel: string of kernel function to be used (e.g., kernels.ngd etc)
@param svm_model: actual svm_model from get_compact_nonlinear_svm, which will be saved at file_svm_model (if not already)
@param SVs: actual support vectors in numpy format to be saved (if not already), generated by get_compact_linear_svm
@return: 1 if success
"""
dir_compact = os.path.dirname(file_compact_svm)
if svm_model:
svmutil.svm_save_model(os.path.join(dir_compact, file_svm_model), svm_model)
if SVs is not None:
np.save(os.path.join(dir_compact, file_SVs), SVs)
with open(file_compact_svm, 'wb') as fin:
fin.write('file_svm_model=%s\n'%file_svm_model)
fin.write('target_class=%d\n'%target_class)
if file_SVs:
fin.write('file_SVs=%s\n'%file_SVs)
if str_kernel:
fin.write('str_kernel=%s\n'%str_kernel)
fin.flush()
def parse_compact_nonlinear_svm(file_compact_svm, flag_load_model=True):
"""
Parse configurations and/or actual models, based on
a config file written by write_compact_nonlinear_svm.
"""
print 'Loading (compact) nonlinear SVM configuration:\n%s...'%file_compact_svm
model = dict()
model['file_svm_model'] = None
model['svm_model'] = None
model['target_class'] = None
model['file_SVs'] = None
model['SVs'] = None
model['str_kernel'] = None
model['func_kernel'] = None
model_keys = model.keys()
with open(file_compact_svm) as fin:
for line in fin:
strs = line.strip().split('=')
if len(strs) == 2:
key = strs[0].strip()
if key in model_keys:
model[key] = strs[1].strip()
# string to integer
model['target_class'] = int(model['target_class'])
print model
if flag_load_model:
print '... finding kernel..'
model['func_kernel'] = getattr(kernels, model['str_kernel'])
dir_compact = os.path.dirname(file_compact_svm)
print '... loading SVM model..'
model['svm_model'] = svmutil.svm_load_model(os.path.join(dir_compact, model['file_svm_model']))
print '... loading SVs (may take some time)..'
tmp = os.path.join(dir_compact, model['file_SVs'])
if not os.path.exists(tmp):
tmp += '.npy'
model['SVs'] = np.load(tmp)
return model
def get_column_idx_for_class(model, target_class):
"""
Given a libSVM model, find the 0-base column index of the corresponding target_class label
This is necessary since labels can be encoded in arbitrary order in libSVM models
@param model: libSVM model
@param target_class: integer label
@return: index of the target_class
"""
idx_target = None
for idx in range(model.nr_class):
if model.label[idx] == target_class:
idx_target = idx
break
return idx_target
def apply_common_nonlinear_svm(model, kernel_matrix, kernel_matrix_recounting = None,
target_class = 1, model_is_compact = True):
""" Common routine to apply libSVM on test data, once the input data is structured in common format.
This uses a custom test implementation which bypasses libsvm routine (faster).
This implementation is very memory intensive for recounting to have kernel_matrix_recounting ready.
@todo: This routine will generalize to full SVM as well, and used within EAG training CV as well
@param model: libsvm model
@param kernel_matrix: 2D array of kernel values, rows are test data, cols are training data (maybe SVs)
@type kernel_matrix: numpy.array
@param kernel_matrix_recounting: (optional) 3D array of kernel values, dim0: test data, dim1: training data, dim2: feature dimensions
@type kernel_matrix_recounting: numpy.array
@param target_class: the target class encoded in model, default = 1.
@type target_class: int
@param model_is_compact: Set to True (default),if compact SVM (only SVs are embedded among all used training examples).
If 'full' SVM model is used, then, set to False for correct behavior.
@type model_is_compact: bool
@return: dictionary with 'probs','margins', and optional 'margins_mer'
@rtype: dictionary with multiple numpy.array entries
"""
idx_target = get_column_idx_for_class(model, target_class)
if not model_is_compact:
idxs_SVs = get_SV_idxs_nonlinear_svm(model) # 1 base
idxs_SVs = [ (_idx - 1) for _idx in idxs_SVs] # 0 base
kernel_matrix = kernel_matrix[:, idxs_SVs]
if kernel_matrix_recounting is not None:
kernel_matrix_recounting = kernel_matrix_recounting[:, idxs_SVs, :]
# compute margins
# this part needs to be updated, to select SV row/columns
weights = get_SV_weights_nonlinear_svm(model, target_class = target_class)
margins = np.dot(kernel_matrix, weights)
# compute probs, using platt scaling
rho = model.rho[0]
probA = model.probA[0]
probB = model.probB[0]
probs = 1.0 / (1.0 + np.exp((margins - rho) * probA + probB))
# compute margins_recoutning
margins_recounting = None
if kernel_matrix_recounting is not None:
tmp = kernel_matrix_recounting.shape
margins_recounting = np.zeros((tmp[0], tmp[2]))
for i in range(tmp[0]):
margins_recounting[i] = np.dot(kernel_matrix_recounting[i].T, weights)
if idx_target == 1:
margins = -margins
probs = 1.0 - probs
if margins_recounting is not None:
margins_recounting = -margins_recounting
outputs = dict()
outputs['margins'] = margins
outputs['probs'] = probs
outputs['margins_recounting'] = margins_recounting
return outputs
def apply_full_nonlinear_svm(model, data, report_margins_recounting=False):
""" Apply parsed full SVM model (original libSVM model with embedded SVs)
This is a custom implementation which bypasses libsvm routine (faster).
@param model: model parsed by event_agent_generator.parse_full_SVM_model
@param data: row-wise data vector/matrix in numpy format
@type data: numpy.array
@param report_margins_recounting: if True, report bin-wise contribution towards margin for every data_test
@type report_margins_recounting: bool
@return: dictionary with 'probs' and 'margins', which are each numpy array of floats
@rtype: dictionary of float numpy arrays
todo: add mer
"""
print '#training samples loaded by full SVM model: %d' %model['train_data'].shape[0]
matrices = kernels.compute_kernel_matrix(data, model['train_data'],
func_kernel = model['func_kernel'],
recounting = report_margins_recounting)
outputs = apply_common_nonlinear_svm(model['svm_model'],
kernel_matrix = matrices['kernel_matrix'],
kernel_matrix_recounting = matrices['kernel_matrix_recounting'],
target_class = model['target_class'],
model_is_compact = False)
return outputs
def apply_common_nonlinear_svm_memory_light(model, func_kernel, SVs, data,
target_class=1,
report_margins_recounting=False):
""" Common routine to apply nonlinear compact libSVM on test data,
Uses smaller memory foot print during recounting, than 'apply_common_nonlinear_svm'
@param model: libsvm model
@param func_kernel: kernel function
@param SVs: row-wise support vector matrix
@param data: test data in numpy format
@param target_class: target class
@type target_class: int
@param report_margins_recounting: if True, report recounting per data as well
@return: dictionary with 'probs','margins', and optional 'margins_mer'
@rtype: dictionary with multiple numpy.array entries
"""
# get SV weights
weights = get_SV_weights_nonlinear_svm(model, target_class=target_class)
# compute kernel_matrix and kernel_matrix_recounting
# in memory efficient way
n1 = data.shape[0]
dim = len(data[0])
n2 = SVs.shape[0]
# kernel matrix is |data|-by-|SVs|
kernel_matrix = np.zeros((n1, n2))
margins_recounting = None
if report_margins_recounting:
margins_recounting = np.zeros((n1, dim))
_tmp_in = np.zeros((1, dim))
for i in range(n1):
_tmp_in[0] = data[i]
# _tmp_out['kernel_matrix']: 1-by-|SVs|
# _tmp_out['kernel_matrix_recounting']: 1 x|SVs| x dim
_tmp_out = kernels.compute_kernel_matrix(_tmp_in, SVs, func_kernel=func_kernel,
recounting=report_margins_recounting)
kernel_matrix[i] = _tmp_out['kernel_matrix'][0]
if report_margins_recounting:
margins_recounting[i] = np.dot(_tmp_out['kernel_matrix_recounting'][0].T, weights)
# this part needs to be updated further for more generalization, to select SV row/columns
margins = np.dot(kernel_matrix, weights)
# compute probs, using platt scaling
rho = model.rho[0]
probA = model.probA[0]
probB = model.probB[0]
probs = 1.0 / (1.0 + np.exp((margins - rho) * probA + probB))
idx_target = get_column_idx_for_class(model, target_class)
if idx_target == 1:
margins = -margins
probs = 1.0 - probs
if margins_recounting is not None:
margins_recounting = -margins_recounting
outputs = dict()
outputs['margins'] = margins
outputs['probs'] = probs
outputs['margins_recounting'] = margins_recounting
return outputs
def apply_compact_nonlinear_svm(model, data, use_approx = False,
report_margins_recounting=False):
""" Apply parsed compact SVM model to new data.
This is a custom implementation which bypasses libsvm routine (faster).
@param model: model parsed from 'parse_compact_nonlinear_svm'
@param data: row-wise data vector/matrix in numpy format
@type data: numpy.array
@param report_margins_recounting: if True, report bin-wise contribution towards margin for every data_test
@type report_margins_recounting: bool
@return: dictionary with 'probs','margins','margins_recounting' which are each numpy array of floats
@rtype: dictionary of multiple numpy.array
"""
if use_approx:
svm_model_approx = compute_approx_nonlinear_SVM(model, model['SVs'])
outputs = apply_approx_nonlinear_SVM(svm_model_approx, data,
report_margins_recounting = report_margins_recounting)
else:
# handle report_margins_recounting
if not report_margins_recounting:
# speedy version without MER
matrices = kernels.compute_kernel_matrix(data, model['SVs'], func_kernel=model['func_kernel'],
recounting=report_margins_recounting)
outputs = apply_common_nonlinear_svm(model['svm_model'],
kernel_matrix=matrices['kernel_matrix'],
kernel_matrix_recounting=matrices['kernel_matrix_recounting'],
target_class=model['target_class'])
else:
# memory light implementation to deal with MER
outputs = apply_common_nonlinear_svm_memory_light(model['svm_model'], model['func_kernel'],
model['SVs'], data,
target_class=model['target_class'],
report_margins_recounting=report_margins_recounting)
return outputs
def learn_compact_nonlinear_svm(file_libsvm_model0,
file_SVs,
file_libsvm_model1,
file_svm_compact,
str_kernel, options_train,
target_class,
labels,
data, file_data,
kernel_matrix, file_kernel_matrix, kernel_matrix_type, flag_kernel_compute,
splits, func_sort, logfile):
"""
@param file_libsvm_model0: file path for the leanred SVM model to be saved in libsvm format
@param file_SVs: filename of support vectors to be saved in numpy format
@param file_libsvm_model1: file path for compact SVM, still stored in libsvm format
@param file_svm_compact: file path to the full compact svm model to be written (with many other info)
@param str_kernel: string of kernel function to be used (e.g., kernels.ngd etc)
@param options_train: list of lisbsvm training strings to be tried, e.g., ['-b 1 -c 1','-b 1 -c 1000']
@param options_test: libsvm test option string to be used during cross-validation, e.g., '-b 1'
@param target_class: target positive class
@param labels: ground truth labels in integers.
Positive integers for event kit positives, Negatives for event kit negs, zero for None.
@param data: training data, numpy row-wise matrix. If None and kernel_matrix does not exist, then, read from file_data
@param file_data: file path to the input training 'data'. If data is None, then read from this file
@param kernel_matrix: kernel matrix
@param file_kernel_matrix: if kernel matrix is None, and this path is not, then, loaded from this file.
if flag_kernel_compute==True, then, computed kernel is saved to this file.
@param kernel_matrix_type: 'numpy' (square numpy matrix) or 'libsvm' (2dim list-type ready for libsvm)
@param flag_kernel_compute: if True, re-compute kernel matrix
@param splits: integer-based splits in numpy vector, e.g., [1 1 2 2 3 3] for 6 data in three splits
@param file_scores: if not None, save the scores generated by SVM during cross validation
@param func_error: func_error(labels, scores, target_label) outputs error to sort svm parameters
@param logfile: log file where info will be written, e.g., the pairs of options_train & func_error outputs
"""
_log = None
if logfile:
_log = open(logfile, 'wb')
# Learn nonlinear SVM model & save this initial model (before compactization) in libsvm format
model0 = None
# add training code with full data training
svmutil.svm_save_model(file_libsvm_model0, model0)
_log.write('Saved initial nonlinear SVM (model0) at: %s\n'%file_libsvm_model0)
# computed compact SVM model 'model1'
(model1, SVs) = get_compact_nonlinear_svm(model0, data)
# write compact SVM model, with all the required information
write_compact_nonlinear_svm(file_svm_compact, target_class,
file_libsvm_model1, svm_model=model1,
file_SVs=file_SVs, SVs=SVs,
str_kernel=str_kernel)
_log.write('Saved compact nonlinear SVM at: %s\n'%file_svm_compact)
_log.close()
#########################################################################
# Approximate non-linear SVM model
# - only valid for testing
# - built from a compact SVM
# - Based on Maji's paper on efficient approximation of additive models
#########################################################################
def compute_approx_nonlinear_SVM(svm_model_compact, n_approx=500, verbose = False):
"""
Given a non-linear SVM model, remove zero-weighted SVs, and also produce compact training data with SVs only
Based on Maji's paper on efficient approximation of additive models.
@param svm_model: loaded (non-linear) svm model, by 'parse_compact_nonlinear_svm'
@param SVs: support vectors (NOTE: do processing as needed a priori)
@n_approx: the scope of approximation, i.e., the number of bins to approximate each dimension, higher more accurate & slower/memory-intensive
@return: approximate SVM model
"""
# MODEL OUTPUT
svm_model_approx = dict()
svm_model_approx['str_kernel'] = svm_model_compact['str_kernel']
str_kernel = svm_model_approx['str_kernel']
svm_model_approx['target_class'] = svm_model_compact['target_class']
model_orig = svm_model_compact['svm_model'] # SVM model
svm_model_approx['rho'] = model_orig.rho[0]
svm_model_approx['probA'] = model_orig.probA[0]
svm_model_approx['probB'] = model_orig.probB[0]
svm_model_approx['target_index'] = get_column_idx_for_class(model_orig, svm_model_compact['target_class'])
svm_model_approx['n_approx'] = n_approx
SVs = svm_model_compact['SVs']
feat_dim = SVs.shape[1] # dimension of features
vals_max = np.amax(SVs, axis=0)
vals_min = np.amin(SVs, axis=0)
# approximation grid map
input_grids = np.zeros((n_approx, feat_dim))
for i in range(feat_dim):
input_grids[:,i] = np.linspace(vals_min[i], vals_max[i], num=n_approx)
# step size for each bin
step_sizes = (input_grids[1,:] - input_grids[0,:])
for i in range(feat_dim):
step_sizes[i] = input_grids[1,i] - input_grids[0,i]
# SVM model coefficients for SVs
n_SVs = model_orig.l
_sv_coef = model_orig.get_sv_coef()
sv_coef = np.zeros(n_SVs)
for (k, v) in enumerate(_sv_coef):
sv_coef[k] = v[0]
func_additive_func = None
if str_kernel == 'hik':
func_additive_func = lambda x,y: np.amin(np.vstack((x,y)), axis=0)
else:
# ADD MORE additive functions based on kernel function here
raise Exception('Unknown kernel function'%str_kernel)
# output grid map for all input grid map values
output_grids = np.zeros((n_approx, feat_dim))
for i in range(feat_dim):
if (verbose) == True and (i%200 == 0):
print 'libsvmtools.compute_approx: computing feature dim i=%d / %d'%(i, feat_dim)
for j in range(n_approx):
output_grids[j,i] = (sv_coef * func_additive_func((np.ones(n_SVs)*input_grids[j,i]), SVs[:,i])).sum()
# for k in range(n_SVs):
# output_grids[j,i] += sv_coef[k][0] * func_additive_func(input_grids[j,i], SVs[k,i])
svm_model_approx['input_grids'] = input_grids
svm_model_approx['output_grids'] = output_grids
svm_model_approx['vals_max'] = vals_max
svm_model_approx['vals_min'] = vals_min
svm_model_approx['step_sizes'] = step_sizes
return svm_model_approx
def linear_interpolate(x0, y0, x1, y1, x):
"""
Given (x0, y0), and (x1, y1), and an x in-between,
predict y via interpolation
"""
if x1 == x0:
y = 0
else:
y = y0 + (y1-y0)*((x-x0)/(x1-x0))
return y
def apply_approx_nonlinear_SVM(svm_model_approx, data_test,
report_margins_recounting=False, verbose = False):
"""
Apply approximate SVM model, learned from 'compute_approx_nonlinear_SVM'
@param svm_model: loaded (non-linear) svm model
@param data_test: row-wise test data in numpy array format
@param report_margins_recounting: if True, report bin-wise contribution towards margin for every data_test
@type report_margins_recounting: bool
@return: dictionary of results, such as 'probs', 'margins', 'margins_mer'
@rtype: dict of numpy arrays
"""
# number of data
n = data_test.shape[0]
input_grids = svm_model_approx['input_grids']
output_grids = svm_model_approx['output_grids']
feature_dim = output_grids.shape[1]
n_bins = output_grids.shape[0]
vals_min = svm_model_approx['vals_min']
vals_max = svm_model_approx['vals_max']
step_sizes = svm_model_approx['step_sizes']
eps = math.pow(2.0, -52)
# bin-wise contribution towards margin, for every data
margins_recounting = np.zeros((n,feature_dim))
for (i, data) in enumerate(data_test):
if (verbose == True) and (i%100 ==0):
print 'libsvmtools.apply_approx_nonlinear_SVM: i= %d / %d'%(i, len(data))
for k in range(feature_dim):
if step_sizes[k] < eps: # constant along that dimension
margins_recounting[i,k] = output_grids[0,k]
#margins[i] += output_grids[0,k]
else:
v = data[k]
if v >= vals_max[k]:
margins_recounting[i,k] = output_grids[n_bins-1, k]
elif data[k] < vals_min[k]:
margins_recounting[i,k] = linear_interpolate(0,0, vals_min[k], output_grids[0,k], v)
else:
idx_map = int(math.floor((v - vals_min[k]) / step_sizes[k]))
try:
margins_recounting[i,k] = linear_interpolate(input_grids[idx_map,k], output_grids[idx_map,k],
input_grids[idx_map+1,k], output_grids[idx_map+1,k],
v)
except:
idx_map = len(input_grids) - 2
margins_recounting[i,k] = linear_interpolate(input_grids[idx_map,k], output_grids[idx_map,k],
input_grids[idx_map+1,k], output_grids[idx_map+1,k],
v)
# margins per data
margins = np.zeros(n) - svm_model_approx['rho']
margins += np.sum(margins_recounting, axis=1)
# probs through platt scaling
probs = 1.0 / (1.0 + np.exp((margins * svm_model_approx['probA']) + svm_model_approx['probB']))
if svm_model_approx['target_index'] == 1:
probs = 1.0 - probs
margins = -margins
margins_recounting = -margins_recounting
outputs = dict()
outputs['probs'] = probs
outputs['margins'] = margins
if report_margins_recounting:
outputs['margins_recounting'] = margins_recounting
return outputs
def write_approx_nonlinear_SVM(filename, svm_model_approx):
import cPickle
with open(filename, 'wb') as fout:
cPickle.dump(svm_model_approx, fout)
def load_approx_nonlinear_SVM(filename):
import cPickle
fin = open(filename, 'rb')
svm_model_approx = cPickle.load(fin)
fin.close()
return svm_model_approx
def write_approx_nonlinear_SVM_numpy(filename, svm_model_approx):
"""
Write Approximate Nonlinear SVM
"""
str_kernel = svm_model_approx['str_kernel']
param_array0 = np.zeros((1, svm_model_approx['input_grids'].shape[1]))
idx_kernel = None
for (str_kernel, idx) in kernels.IDXS_KERNELS:
if str_kernel == str_kernel:
idx_kernel = idx
break
param_array0[0,0] = idx_kernel
param_array0[0,1] = svm_model_approx['target_class']
param_array0[0,2] = svm_model_approx['target_index']
param_array0[0,3] = svm_model_approx['rho']
param_array0[0,4] = svm_model_approx['probA']
param_array0[0,5] = svm_model_approx['probB']
param_array0[0,6] = svm_model_approx['n_approx']
param_array1 = np.vstack((param_array0,
svm_model_approx['vals_max'],
svm_model_approx['vals_min'],
svm_model_approx['step_sizes'],
svm_model_approx['input_grids'],
svm_model_approx['output_grids']))
np.save(filename, param_array1)
def load_approx_nonlinear_SVM_numpy(filename):
"""
Load Approximate Nonlinear SVM
"""
param_array1 = np.load(filename)
idx_kernel = param_array1[0,0]
str_kernel = None
for (_str_kernel, idx) in kernels.IDXS_KERNELS:
if idx == idx_kernel:
str_kernel = _str_kernel
break
svm_model_approx = dict()
svm_model_approx['str_kernel'] = str_kernel
svm_model_approx['target_class'] = int(param_array1[0,1])
svm_model_approx['target_index'] = int(param_array1[0,2])
svm_model_approx['rho'] = param_array1[0,3]
svm_model_approx['probA'] = param_array1[0,4]
svm_model_approx['probB'] = param_array1[0,5]
svm_model_approx['n_approx'] = n_approx = int(param_array1[0,6])
svm_model_approx['vals_max'] = param_array1[1,:]
svm_model_approx['vals_min'] = param_array1[2,:]
svm_model_approx['step_sizes'] = param_array1[3,:]
svm_model_approx['input_grids'] = param_array1[4:(4+n_approx), :]
svm_model_approx['output_grids'] = param_array1[(4+n_approx):, :]
return svm_model_approx
|
|
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/rob/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
|
import itertools
import operator
import re
from copy import deepcopy
from scrapy import log
from scrapy.http import Request, HtmlResponse, FormRequest
try:
from scrapy.spider import Spider
except ImportError:
# BaseSpider class was deprecated in Scrapy 0.21
from scrapy.spider import BaseSpider as Spider
from scrapely.htmlpage import HtmlPage, dict_to_page
from scrapely.extraction import InstanceBasedLearningExtractor
from loginform import fill_login_form
from slybot.item import SlybotItem, create_slybot_item_descriptor
from slybot.extractors import apply_extractors
from slybot.utils import iter_unique_scheme_hostname, htmlpage_from_response
from slybot.linkextractor import HtmlLinkExtractor, RssLinkExtractor, create_linkextractor_from_specs
from slybot.generic_form import GenericForm
def _process_extracted_data(extracted_data, item_descriptor, htmlpage):
processed_data = []
for exdict in extracted_data or ():
processed_attributes = []
for key, value in exdict.items():
if key == "variants":
processed_attributes.append(("variants", _process_extracted_data(value, item_descriptor, htmlpage)))
elif not key.startswith("_sticky"):
field_descriptor = item_descriptor.attribute_map.get(key)
if field_descriptor:
value = [field_descriptor.adapt(x, htmlpage) for x in value]
processed_attributes.append((key, value))
processed_data.append(processed_attributes)
return [dict(p) for p in processed_data]
class IblSpider(Spider):
def __init__(self, name, spec, item_schemas, all_extractors, **kw):
super(IblSpider, self).__init__(name, **kw)
spec = deepcopy(spec)
for key, val in kw.items():
if isinstance(val, basestring) and key in ['start_urls', 'exclude_patterns', 'follow_patterns', 'allowed_domains']:
val = val.splitlines()
spec[key] = val
self._item_template_pages = sorted((
[t['scrapes'], dict_to_page(t, 'annotated_body'),
t.get('extractors', [])] \
for t in spec['templates'] if t.get('page_type', 'item') == 'item'
), key=lambda pair: pair[0])
# generate ibl extractor for links pages
_links_pages = [dict_to_page(t, 'annotated_body')
for t in spec['templates'] if t.get('page_type') == 'links']
_links_item_descriptor = create_slybot_item_descriptor({'fields': {}})
self._links_ibl_extractor = InstanceBasedLearningExtractor([(t, _links_item_descriptor) for t in _links_pages]) \
if _links_pages else None
self._ipages = [page for _, page, _ in self._item_template_pages]
self.html_link_extractor = HtmlLinkExtractor()
self.rss_link_extractor = RssLinkExtractor()
self.build_url_filter(spec)
self.itemcls_info = {}
for itemclass_name, triplets in itertools.groupby(self._item_template_pages, operator.itemgetter(0)):
page_extractors_pairs = map(operator.itemgetter(1, 2), triplets)
schema = item_schemas[itemclass_name]
item_cls = SlybotItem.create_iblitem_class(schema)
page_descriptor_pairs = []
for page, template_extractors in page_extractors_pairs:
item_descriptor = create_slybot_item_descriptor(schema)
apply_extractors(item_descriptor, template_extractors, all_extractors)
page_descriptor_pairs.append((page, item_descriptor))
extractor = InstanceBasedLearningExtractor(page_descriptor_pairs)
self.itemcls_info[itemclass_name] = {
'class': item_cls,
'descriptor': item_descriptor,
'extractor': extractor,
}
self.login_requests = []
self.form_requests = []
self._start_requests = []
self.generic_form = GenericForm(**kw)
self._create_init_requests(spec.get("init_requests", []))
self._process_start_urls(spec)
self.allowed_domains = spec.get('allowed_domains',
self._get_allowed_domains(self._ipages))
if not self.allowed_domains:
self.allowed_domains = None
def _process_start_urls(self, spec):
self.start_urls = spec.get('start_urls')
for url in self.start_urls:
self._start_requests.append(Request(url, callback=self.parse, dont_filter=True))
def _create_init_requests(self, spec):
for rdata in spec:
if rdata["type"] == "login":
request = Request(url=rdata.pop("loginurl"), meta=rdata,
callback=self.parse_login_page, dont_filter=True)
self.login_requests.append(request)
elif rdata["type"] == "form":
self.form_requests.append(self.get_generic_form_start_request(rdata))
elif rdata["type"] == "start":
self._start_requests.append(self._create_start_request_from_specs(rdata))
def parse_login_page(self, response):
username = response.request.meta["username"]
password = response.request.meta["password"]
args, url, method = fill_login_form(response.url, response.body, username, password)
return FormRequest(url, method=method, formdata=args, callback=self.after_login, dont_filter=True)
def after_login(self, response):
for result in self.parse(response):
yield result
for req in self._start_requests:
yield req
def get_generic_form_start_request(self, form_descriptor):
file_fields = list(self.generic_form.get_url_field(form_descriptor))
if file_fields:
(field_index, field_descriptor) = file_fields.pop(0)
form_descriptor['field_index'] = field_index
return FormRequest(self.generic_form.get_value(field_descriptor), meta=form_descriptor,
callback=self.parse_field_url_page, dont_filter=True)
else:
return Request(url=form_descriptor.pop("form_url"), meta=form_descriptor,
callback=self.parse_form_page, dont_filter=True)
def parse_field_url_page(self, response):
form_descriptor = response.request.meta
field_index = form_descriptor['field_index']
field_descriptor = form_descriptor['fields'][field_index]
self.generic_form.set_values_url_field(field_descriptor, response.body)
yield self.get_generic_form_start_request(form_descriptor)
def parse_form_page(self, response):
try:
for (args, url, method) in self.generic_form.fill_generic_form(response.url,
response.body,
response.request.meta):
yield FormRequest(url, method=method, formdata=args,
callback=self.after_form_page, dont_filter=True)
except Exception, e:
self.log(str(e), log.WARNING)
for req in self._start_requests:
yield req
def after_form_page(self, response):
for result in self.parse(response):
yield result
def _get_allowed_domains(self, templates):
urls = [x.url for x in templates]
urls += [x.url for x in self._start_requests]
return [x[1] for x in iter_unique_scheme_hostname(urls)]
def _requests_to_follow(self, htmlpage):
if self._links_ibl_extractor is not None:
extracted = self._links_ibl_extractor.extract(htmlpage)[0]
if extracted:
extracted_regions = extracted[0].get('_links', [])
seen = set()
for region in extracted_regions:
htmlregion = HtmlPage(htmlpage.url, htmlpage.headers, region, encoding=htmlpage.encoding)
for request in self._request_to_follow_from_region(htmlregion):
if request.url in seen:
continue
seen.add(request.url)
yield request
else:
for request in self._request_to_follow_from_region(htmlpage):
yield request
def _request_to_follow_from_region(self, htmlregion):
seen = set()
for link in self.html_link_extractor.links_to_follow(htmlregion):
request = self._filter_link(link, seen)
if request is not None:
yield request
def _filter_link(self, link, seen):
url = link.url
if self.url_filterf(link):
# filter out duplicate urls, later we should handle link text
if url not in seen:
seen.add(url)
request = Request(url)
if link.text:
request.meta['link_text'] = link.text
return request
def start_requests(self):
start_requests = []
if self.login_requests:
start_requests = self.login_requests
elif self.form_requests:
start_requests = self.form_requests
else:
start_requests = self._start_requests
for req in start_requests:
yield req
def _create_start_request_from_specs(self, info):
url = info["url"]
lspecs = info.get("link_extractor")
if lspecs:
linkextractor = create_linkextractor_from_specs(lspecs)
def _callback(spider, response):
for link in linkextractor.links_to_follow(response):
yield Request(url=link.url, callback=spider.parse)
return Request(url=url, callback=_callback)
return Request(url=url, callback=self.parse)
def parse(self, response):
"""Main handler for all downloaded responses"""
content_type = response.headers.get('Content-Type', '')
if isinstance(response, HtmlResponse):
return self.handle_html(response)
elif "application/rss+xml" in content_type:
return self.handle_rss(response)
else:
self.log("Ignoring page with content-type=%r: %s" % (content_type, \
response.url), level=log.DEBUG)
return []
def _process_link_regions(self, htmlpage, link_regions):
"""Process link regions if any, and generate requests"""
if link_regions:
for link_region in link_regions:
htmlregion = HtmlPage(htmlpage.url, htmlpage.headers, \
link_region, encoding=htmlpage.encoding)
for request in self._requests_to_follow(htmlregion):
yield request
else:
for request in self._requests_to_follow(htmlpage):
yield request
def handle_rss(self, response):
seen = set()
for link in self.rss_link_extractor.links_to_follow(response):
request = self._filter_link(link, seen)
if request:
yield request
def handle_html(self, response):
htmlpage = htmlpage_from_response(response)
items, link_regions = self.extract_items(htmlpage)
for item in items:
yield item
for request in self._process_link_regions(htmlpage, link_regions):
yield request
def extract_items(self, htmlpage):
"""This method is also called from UI webservice to extract items"""
items = []
link_regions = []
for item_cls_name, info in self.itemcls_info.iteritems():
item_descriptor = info['descriptor']
extractor = info['extractor']
extracted, _link_regions = self._do_extract_items_from(
htmlpage,
item_descriptor,
extractor,
item_cls_name,
)
items.extend(extracted)
link_regions.extend(_link_regions)
return items, link_regions
def _do_extract_items_from(self, htmlpage, item_descriptor, extractor, item_cls_name):
extracted_data, template = extractor.extract(htmlpage)
link_regions = []
for ddict in extracted_data or []:
link_regions.extend(ddict.pop("_links", []))
processed_data = _process_extracted_data(extracted_data, item_descriptor, htmlpage)
items = []
item_cls = self.itemcls_info[item_cls_name]['class']
for processed_attributes in processed_data:
item = item_cls(processed_attributes)
item['url'] = htmlpage.url
item['_type'] = item_cls_name
item['_template'] = str(template.id)
items.append(item)
return items, link_regions
def build_url_filter(self, spec):
"""make a filter for links"""
respect_nofollow = spec.get('respect_nofollow', True)
patterns = spec.get('follow_patterns')
if spec.get("links_to_follow") == "none":
url_filterf = lambda x: False
elif patterns:
pattern = patterns[0] if len(patterns) == 1 else "(?:%s)" % '|'.join(patterns)
follow_pattern = re.compile(pattern)
if respect_nofollow:
url_filterf = lambda x: follow_pattern.search(x.url) and not x.nofollow
else:
url_filterf = lambda x: follow_pattern.search(x.url)
elif respect_nofollow:
url_filterf = lambda x: not x.nofollow
else:
url_filterf = bool
# apply exclude patterns
exclude_patterns = spec.get('exclude_patterns')
if exclude_patterns:
pattern = exclude_patterns[0] if len(exclude_patterns) == 1 else "(?:%s)" % '|'.join(exclude_patterns)
exclude_pattern = re.compile(pattern)
self.url_filterf = lambda x: not exclude_pattern.search(x.url) and url_filterf(x)
else:
self.url_filterf = url_filterf
|
|
from statistics import mean, stdev
import visa
class Keithley2400:
def __init__(self, gpib_addr=23):
"""
Constructor for Keithley 2400 Sourcemeter
:param gpib_addr: GPIB address (configured on Keithley 2400)
"""
self._gpib_addr = str(gpib_addr)
self._resource_manager = visa.ResourceManager()
self._instrument = self._resource_manager.open_resource("GPIB::{}".format(self.gpib_addr))
@property
def gpib_addr(self):
"""Returns the GPIB address of the Keithley 2400 Sourcemeter."""
return self._gpib_addr
# source functions
@property
def source_type(self):
"""Gets or sets the source type of the Keithley 2400 SourceMeter.
Expected strings for setting: 'voltage', 'current'"""
response = self._instrument.query("source:function:mode?").strip()
source_type = {'VOLT': 'voltage', 'CURR': 'current'}
return source_type[response]
@source_type.setter
def source_type(self, value):
if value.lower() == 'voltage' or value.lower() == 'v':
source = 'voltage'
self._instrument.write("source:function:mode {}".format(source.lower()))
elif value.lower() == 'current' or value.lower() == 'i':
source = 'current'
self._instrument.write('source:function:mode {}'.format(source.lower()))
else:
raise RuntimeError('Not a valid source type.')
@property
def source_mode(self):
"""Gets or sets the mode of the source.
Expected strings for setting: 'fixed', 'sweep', 'list'"""
# TODO: test
return self._instrument.query('source:' + self.source_type.lower() + ':mode?')
@source_mode.setter
def source_mode(self, mode):
if mode.lower() in ('fixed', 'sweep', 'list'):
self._instrument.write('source:' + self.source_type.lower() + ':mode {}'.format(mode))
else:
raise RuntimeError('Mode is not one of [fixed | sweep | list]')
@property
def source_value(self):
"""Get or set the numeric value of the source chosen from Keithley2400.source_type."""
# TODO: test
return self._instrument.query('source:' + self.source_type.lower() + ':level?')
@source_value.setter
def source_value(self, value):
self._instrument.write("source:function:mode " + self.source_type.lower())
self._instrument.write("source:" + self.source_type.lower() + ":range " + str(value))
self._instrument.write("source:" + self.source_type.lower() + ":level " + str(value))
@property
def measure_type(self):
"""The type of measurement the Keithley 2400 SourceMeter will make.
Expected strings for setting: 'voltage', 'current', 'resistance'
"""
measure_type = {'VOLT:DC': 'voltage', 'CURR:DC': 'current', 'RES': 'resistance'}
measure_type_response = self._instrument.query("sense:function?").strip().replace('\"', '').split(',')[-1]
return measure_type[measure_type_response]
@measure_type.setter
def measure_type(self, value):
measure_type = {'voltage': '\'VOLTAGE:DC\'', 'current': '\'CURRENT:DC\'', 'resistance': 'RESISTANCE'}
if value.lower() in measure_type:
self._instrument.write("sense:function:ON {}".format(measure_type[value.lower()]))
else:
raise RuntimeError('Expected a value from [\'voltage\'|\'current\'|\'resistance\'')
# Resistance sensing
@property
def resistance_ohms_mode(self):
"""Gets or sets the resistance mode.
Expected strings for setting: 'manual', 'auto'"""
modes = {'MAN': 'manual', 'AUTO': 'auto'}
response = self._instrument.query('sense:resistance:mode?').strip()
return modes[response]
@resistance_ohms_mode.setter
def resistance_ohms_mode(self, value):
modes = {'manual': 'MAN', 'auto': 'AUTO'}
if value.lower() in modes.keys():
self._instrument.write('sense:resistance:mode {}'.format(modes[value.lower()]))
else:
raise RuntimeError('Expected a value from [\'manual\'|\'auto\']')
@property
def expected_ohms_reading(self):
"""Gets or sets the expected range of a resistance reading from the device under test."""
response = self._instrument.query('sense:resistance:range?').strip()
return float(response)
@expected_ohms_reading.setter
def expected_ohms_reading(self, value):
if isinstance(value, int) or isinstance(value, float):
self._instrument.write('sense:resistance:range {}'.format(value))
else:
raise RuntimeError('Expected an int or float.')
@property
def four_wire_sensing(self):
"""Gets the status of or sets four-wire sensing.
Expected booleans for setting: True, False."""
response = self._instrument.query('system:rsense?').strip()
return bool(int(response))
@four_wire_sensing.setter
def four_wire_sensing(self, value):
if isinstance(value, bool):
self._instrument.write('system:rsense {}'.format(int(value)))
else:
raise RuntimeError('Expected boolean value.')
# Voltage sensing and compliance
@property
def expected_voltage_reading(self):
"""Gets or sets the expected voltage reading from the device under test."""
response = self._instrument.query('sense:voltage:RANGE?').strip()
return float(response)
@expected_voltage_reading.setter
def expected_voltage_reading(self, value):
if isinstance(value, int) or isinstance(value, float):
self._instrument.write('sense:voltage:range {}'.format(value))
else:
raise RuntimeError('Expected an int or float.')
@property
def voltage_compliance(self):
"""Gets or sets the voltage compliance.
Expected range of floats: 200e-6 <= x <= 210"""
response = self._instrument.query("SENS:VOLT:PROT:LEV?").strip()
return float(response)
@voltage_compliance.setter
def voltage_compliance(self, value):
if 200e-6 <= value <= 210:
self._instrument.write("SENS:VOLT:PROT {}".format(str(value)))
else:
raise RuntimeError('Voltage compliance cannot be set. Value must be between 200 \u03BC' + 'V and 210 V.')
def within_voltage_compliance(self):
"""Queries if the measured voltage is within the set compliance.
:returns: boolean"""
response = self._instrument.query('SENS:VOLT:PROT:TRIP?').strip()
return not bool(int(response))
# Current sensing and compilance
@property
def expected_current_reading(self):
"""Gets or sets the expected current reading from the device under test."""
response = self._instrument.query('sense:current:range?').strip()
return float(response)
@expected_current_reading.setter
def expected_current_reading(self, value):
if isinstance(value, int) or isinstance(value, float):
self._instrument.write('sense:current:range {}'.format(value))
else:
RuntimeError('Expected an int or float.')
@property
def current_compliance(self):
"""Sets or gets the current compliance level in Amperes."""
response = self._instrument.query("SENS:CURR:PROT:LEV?").strip()
return float(response)
@current_compliance.setter
def current_compliance(self, value):
if 1e-9 <= value <= 1.05:
self._instrument.write("SENS:CURR:PROT {}".format(str(value)))
else:
raise RuntimeError('Current compliance cannot be set. Value must be between 1 nA and 1.05 A.')
def within_current_compliance(self):
"""Queries if the measured current is within the set compliance.
:returns: boolean"""
response = self._instrument.query('SENS:CURR:PROT:TRIP?').strip()
return not bool(int(response))
# Output configuration
@property
def output(self):
"""Gets or sets the source output of the Keithley 2400.
Expected input: boolean
:returns: boolean"""
output = {'0': False, '1': True}
response = self._instrument.query("OUTP?").strip()
return output[response]
@output.setter
def output(self, value):
if value:
self._instrument.write("OUTP ON")
else:
self._instrument.write("OUTP OFF")
@property
def output_off_mode(self):
"""Gets or sets the output mode when the output is off.
Expected input strings: 'himp', 'normal', 'zero', 'guard'
:returns: description of the output's off mode"""
modes = {'HIMP': 'high impedance', 'NORM': 'normal', 'ZERO': 'zero', 'GUAR': 'guard'}
response = self._instrument.query('OUTP:SMOD?').strip()
return modes[response]
@output_off_mode.setter
def output_off_mode(self, value):
modes = {'high impedance': 'HIMP', 'himp': 'HIMP', 'normal': 'NORM', 'norm': 'NORM',
'zero': 'ZERO', '0': 'ZERO', 'guard': 'GUARD'}
self._instrument.write('OUTP:SMOD {}'.format(modes[value.lower()]))
# Data acquisition
def read(self, *measurements):
"""
Reads data from the Keithley 2400. Equivalent to the command :INIT; :FETCH?
Multiple string arguments may be used. For example::
keithley.read('voltage', 'current')
keithley.read('time')
The first line returns a list in the form [voltage, current] and the second line
returns a list in the form [time].
Note: The returned lists contains the values in the order that you requested.
:param str *measurements: Any number of arguments that are from: 'voltage', 'current', 'resistance', 'time'
:return list measure_list: A list of the arithmetic means in the order of the given arguments
:return list measure_stdev_list: A list of the standard deviations (if more than 1 measurement) in the order
of the given arguments
"""
response = self._instrument.query('read?').strip().split(',')
response = [float(x) for x in response]
read_types = {'voltage': 0, 'current': 1, 'resistance': 2, 'time': 3}
measure_list = []
measure_stdev_list = []
for measurement in measurements:
samples = response[read_types[measurement]::5]
measure_list.append(mean(samples))
if len(samples) > 1:
measure_stdev_list.append(stdev(samples))
return measure_list, measure_stdev_list
# Trigger functions
@property
def trace_delay(self):
"""The amount of time the SourceMeter waits after the trigger to perform Device Action."""
return float(self._instrument.query('trigger:delay?').strip())
@trace_delay.setter
def trace_delay(self, delay):
if isinstance(delay, float) or isinstance(delay, int):
if 0.0 <= delay <= 999.9999:
self._instrument.write('trigger:delay {}'.format(delay))
else:
raise RuntimeError('Expected delay to be between 0.0 and 999.9999 seconds.')
else:
raise RuntimeError('Expected delay to be an int or float.')
@property
def trigger(self):
"""Gets or sets the type of trigger to be used.
Expected strings for setting: 'immediate', 'tlink', 'timer', 'manual', 'bus',
'nst', 'pst', 'bst' (see source code for other possibilities)"""
triggers = {'IMM': 'immediate', 'TLIN': 'trigger link', 'TIM': 'timer',
'MAN': 'manual', 'BUS': 'bus trigger', 'NST': 'low SOT pulse',
'PST': 'high SOT pulse', 'BST': 'high or low SOT pulse'}
return triggers[self._instrument.query('trigger:source?')]
@trigger.setter
def trigger(self, trigger):
triggers = {
'imm': 'IMM', 'immediate': 'IMM',
'tlin': 'TLIN', 'tlink': 'TLIN', 'trigger link': 'TLIN',
'tim': 'TIM', 'timer': 'TIM',
'man': 'MAN', 'manual': 'MAN',
'bus': 'BUS', 'bus trigger': 'BUS',
'nst': 'NST', 'low SOT pulse': 'NST',
'pst': 'PST', 'high SOT pulse': 'PST',
'bst': 'BST', 'high or low SOT pulse': 'BST'
}
if trigger.lower() in triggers.keys():
self._instrument.query('trigger:source {}'.format(trigger))
else:
raise RuntimeError('Unexpected trigger input. See documentation for details.')
@property
def trigger_count(self):
"""Gets or sets the number of triggers
Expected integer value range: 1 <= n <= 2500"""
return float(self._instrument.query('trigger:count?').strip())
@trigger_count.setter
def trigger_count(self, num_triggers):
if isinstance(num_triggers, int):
if 1 <= num_triggers <= 2500:
self._instrument.write('trigger:count {}'.format(num_triggers))
else:
raise RuntimeError('Trigger count expected to be between 1 and 2500.')
else:
raise RuntimeError('Trigger count expected to be type int.')
def initiate_cycle(self):
"""Initiates source or measure cycle, taking the SourceMeter out of an idle state."""
self._instrument.write('initiate')
def abort_cycle(self):
"""Aborts the source or measure cycle, bringing the SourceMeter back into an idle state."""
self._instrument.write('abort')
# Data storage / Buffer functions
# Note: :trace:data? and :read? are two separate buffers of
# maximum size 2500 readings.
@property
def num_readings_in_buffer(self):
"""Gets the number of readings that are stored in the buffer."""
return int(self._instrument.query('trace:points:actual?').strip())
@property
def trace_points(self):
"""Gets or sets the size of the buffer
Expected integer value range: 1 <= n <= 2500"""
return int(self._instrument.query('trace:points?').strip())
@trace_points.setter
def trace_points(self, num_points):
if isinstance(num_points, int):
if 1 <= num_points <= 2500:
self._instrument.write('trace:points {}'.format(num_points))
else:
raise RuntimeError('Keithley 2400 SourceMeter may only have 1 to 2500 buffer points.')
else:
raise RuntimeError('Expected type of num_points: int.')
def trace_feed_source(self, value):
"""Sets the source of the trace feed.
Expected strings: 'sense', 'calculate1', 'calculate2'"""
if value in ('sense', 'calculate1', 'calculate2'):
self._instrument.write('trace:feed {}'.format(value))
else:
raise RuntimeError('Unexpected trace source type. See documentation for details.')
def read_trace(self):
"""Read contents of buffer."""
trace = self._instrument.query('trace:data?').strip().split(',')
trace_list = [float(x) for x in trace]
return trace_list
def clear_trace(self):
"""Clear the buffer."""
self._instrument.query('trace:clear')
def buffer_memory_status(self):
"""Check buffer memory status."""
response = self._instrument.query('trace:free?')
return response
def fill_buffer(self):
"""Fill buffer and stop."""
self._instrument.write('trace:feed:control next')
def disable_buffer(self):
"""Disables the buffer."""
self._instrument.write('trace:feed:control never')
# Sweeping
# TODO: implement these!!!
@property
def sweep_start(self):
"""To be implemented."""
pass
@sweep_start.setter
def sweep_start(self, start):
pass
@property
def sweep_end(self):
"""To be implemented."""
pass
@sweep_end.setter
def sweep_end(self, end):
pass
@property
def sweep_center(self):
"""To be implemented."""
pass
@sweep_center.setter
def sweep_center(self, center):
pass
@property
def sweep_span(self):
"""To be implemented."""
pass
@sweep_span.setter
def sweep_span(self, span):
pass
@property
def sweep_ranging(self):
"""To be implemented."""
pass
@sweep_ranging.setter
def sweep_ranging(self, _range):
pass
@property
def sweep_scale(self):
"""To be implemented."""
pass
@sweep_scale.setter
def sweep_scale(self, scale):
pass
@property
def sweep_points(self):
"""To be implemented."""
pass
@sweep_points.setter
def sweep_points(self, num_points):
pass
@property
def sweep_direction(self):
"""To be implemented."""
pass
@sweep_direction.setter
def sweep_direction(self, direction):
pass
# Ramping commands
def ramp_to_zero(self):
pass
def ramp_to_setpoint(self, setpoint: float, step: float, wait: float):
pass
# Common commands
def clear_status(self):
"""Clears all event registers and Error Queue."""
self._instrument.write('*cls')
def reset_to_defaults(self):
"""Resets to defaults of Sourcemeter."""
self._instrument.write('*rst')
def identify(self):
"""Returns manufacturer, model number, serial number, and firmware revision levels."""
response = self._instrument.write('*idn?')
return {'manufacturer': response[0],
'model': response[1],
'serial number': response[2],
'firmware revision level': response[3]
}
def send_bus_trigger(self):
"""Sends a bus trigger to SourceMeter."""
self._instrument.write('*trg')
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
from desktop.conf import CONNECTORS_BLACKLIST, CONNECTORS_WHITELIST
from desktop.lib.exceptions_renderable import PopupException
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
CONNECTOR_TYPES = [
{
'dialect': 'hive',
'nice_name': 'Hive',
'description': 'Recommended',
'category': 'editor',
'interface': 'hiveserver2',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 10000},
{'name': 'is_llap', 'value': False}, # cf. _get_session_by_id() or create a separate connector
{'name': 'use_sasl', 'value': True},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'dialect': 'hive',
'nice_name': 'Hive',
'description': 'Via SqlAlchemy interface',
'category': 'editor',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'hive://localhost:10000'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'nice_name': 'Impala',
'dialect': 'impala',
'interface': 'hiveserver2',
'category': 'editor',
'description': '',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 21050},
{'name': 'impersonation_enabled', 'value': False},
{'name': 'use_sasl', 'value': False},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'nice_name': 'Impala',
'dialect': 'impala',
'interface': 'sqlalchemy',
'category': 'editor',
'description': 'Via SqlAlchemy interface',
'settings': [
{'name': 'url', 'value': 'impala://localhost:21050'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'nice_name': 'Druid',
'dialect': 'druid',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'druid://localhost:8082/druid/v2/sql/'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'ksqlDB',
'dialect': 'ksql',
'interface': 'ksql',
'settings': [
{'name': 'url', 'value': 'http://localhost:8088'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': True,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Flink SQL',
'dialect': 'flink',
'interface': 'flink',
'settings': [
{'name': 'url', 'value': 'http://localhost:8083'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SparkSQL',
'dialect': 'sparksql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'hive://localhost:10000'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': 'Via Thrift Server and SqlAlchemy interface',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SparkSQL',
'dialect': 'sparksql',
'interface': 'hiveserver2',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 10000},
{'name': 'impersonation_enabled', 'value': False},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'use_sasl', 'value': True},
],
'category': 'editor',
'description': 'Via Thrift Server and Hive interface',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': True,
}
},
{
'nice_name': 'SparkSQL',
'dialect': 'sparksql',
'interface': 'livy',
'settings': [
{'name': 'api_url', 'value': 'http://localhost:8998'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': 'Via Livy server',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Phoenix SQL',
'dialect': 'phoenix',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'phoenix://localhost:8765/'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'MySQL',
'dialect': 'mysql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'mysql://username:password@localhost:3306/hue'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'PostgreSQL',
'dialect': 'postgresql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'postgresql://username:password@localhost:5432/hue'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Trino (Presto SQL)',
'dialect': 'presto',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'presto://localhost:8080/tpch'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'has_impersonation', 'value': False},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Dasksql',
'dialect': 'dasksql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'presto://localhost:8080/catalog/default'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'has_impersonation', 'value': False},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': False,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
}
},
{
'nice_name': 'Elasticsearch SQL',
'dialect': 'elasticsearch',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'elasticsearch+http://localhost:9200/'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': False,
'has_optimizer_values': False,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Calcite',
'dialect': 'calcite',
'interface': 'sqlalchemy',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 10000},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Athena',
'dialect': 'athena',
'interface': 'sqlalchemy',
'settings': [
{
'name': 'url',
'value': 'awsathena+rest://XXXXXXXXXXXXXXXXXXXX:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX@athena.us-west-2.amazonaws.com:'
'443/default?s3_staging_dir=s3://gethue-athena/scratch'
},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Redshift',
'dialect': 'redshift',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'edshift+psycopg2://[email protected]:5439/database'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Snowflake',
'dialect': 'snowflake',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'snowflake://{user}:{password}@{account}/{database}'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Big Query',
'dialect': 'bigquery',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'bigquery://project-XXXXXX/dataset_name'},
{'name': 'credentials_json', 'value': '{"type": "service_account", ...}'}
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Oracle',
'dialect': 'oracle',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'oracle://user:password@localhost'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Clickhouse',
'dialect': 'clickhouse',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'clickhouse://localhost:8123'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Solr SQL',
'dialect': 'solr',
'interface': 'solr',
'settings': [
{'name': 'url', 'value': 'solr://user:password@localhost:8983/solr/<collection>[?use_ssl=true|false]'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SQL Database',
'dialect': 'sql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'name://projectName/datasetName'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SQL Database (JDBC)',
'dialect': 'sql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'jdbc:db2://localhost:50000/SQOOP'},
{'name': 'driver', 'value': 'com.ibm.db2.jcc.DB2Driver'},
{'name': 'user', 'value': 'hue'},
{'name': 'password', 'value': 'hue'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': 'Deprecated: older way to connect to any database.',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': False,
'has_optimizer_values': False,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SqlFlow',
'dialect': 'sqlflow',
'interface': 'sqlflow',
'settings': [
{'name': 'url', 'value': 'localhost:50051'},
{'name': 'datasource', 'value': 'hive://localhost:10000/iris'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{'nice_name': 'PySpark', 'dialect': 'pyspark', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'Spark', 'dialect': 'spark', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'Pig', 'dialect': 'pig', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'Java', 'dialect': 'java', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'HDFS', 'dialect': 'hdfs', 'interface': 'rest',
'settings': [
{'name': 'server_url', 'value': 'http://localhost:50070/webhdfs/v1'},
{'name': 'default_fs', 'value': 'fs_defaultfs=hdfs://localhost:8020'}
],
'category': 'browsers', 'description': '', 'properties': {}
},
{'nice_name': 'YARN', 'dialect': 'yarn', 'settings': [], 'category': 'browsers', 'description': '', 'properties': {}},
{'nice_name': 'S3', 'dialect': 's3', 'settings': [], 'category': 'browsers', 'description': '', 'properties': {}},
{'nice_name': 'ADLS', 'dialect': 'adls-v1', 'settings': [], 'category': 'browsers', 'description': '', 'properties': {}},
# HBase
# Solr
{
'nice_name': 'Hive Metastore',
'dialect': 'hms',
'interface': 'hiveserver2',
'settings': [{'name': 'server_host', 'value': ''}, {'name': 'server_port', 'value': ''},],
'category': 'catalogs',
'description': '',
'properties': {}
},
{
'nice_name': 'Atlas', 'dialect': 'atlas', 'interface': 'rest', 'settings': [], 'category': 'catalogs', 'description': '',
'properties': {}
},
{
'nice_name': 'Navigator', 'dialect': 'navigator', 'interface': 'rest', 'settings': [], 'category': 'catalogs',
'description': '',
'properties': {}
},
{'nice_name': 'Optimizer', 'dialect': 'optimizer', 'settings': [], 'category': 'optimizers', 'description': '', 'properties': {}},
{'nice_name': 'Oozie', 'dialect': 'oozie', 'settings': [], 'category': 'schedulers', 'description': '', 'properties': {}},
{'nice_name': 'Celery', 'dialect': 'celery', 'settings': [], 'category': 'schedulers', 'description': '', 'properties': {}},
]
CONNECTOR_TYPES = [connector for connector in CONNECTOR_TYPES if connector['dialect'] not in CONNECTORS_BLACKLIST.get()]
if CONNECTORS_WHITELIST.get():
CONNECTOR_TYPES = [connector for connector in CONNECTOR_TYPES if connector['dialect'] in CONNECTORS_WHITELIST.get()]
CATEGORIES = [
{"name": 'Editor', 'type': 'editor', 'description': ''},
{"name": 'Browsers', 'type': 'browsers', 'description': ''},
{"name": 'Catalogs', 'type': 'catalogs', 'description': ''},
{"name": 'Optimizers', 'type': 'optimizers', 'description': ''},
{"name": 'Schedulers', 'type': 'schedulers', 'description': ''},
{"name": 'Plugins', 'type': 'plugins', 'description': ''},
]
def get_connectors_types():
return CONNECTOR_TYPES
def get_connector_categories():
return CATEGORIES
def get_connector_by_type(dialect, interface):
instance = [
connector
for connector in get_connectors_types() if connector['dialect'] == dialect and connector['interface'] == interface
]
if instance:
return instance[0]
else:
raise PopupException(_('No connector with the type %s found.') % type)
|
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any, cast, Tuple, TYPE_CHECKING, Union, Dict
from cirq._doc import document
from cirq.ops import common_gates, raw_types, identity
from cirq.type_workarounds import NotImplementedType
if TYPE_CHECKING:
import cirq
from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation
from cirq.value.product_state import (
_XEigenState,
_YEigenState,
_ZEigenState,
) # coverage: ignore
class Pauli(raw_types.Gate, metaclass=abc.ABCMeta):
"""Represents the Pauli gates.
This is an abstract class with no public subclasses. The only instances
of private subclasses are the X, Y, or Z Pauli gates defined below.
"""
_XYZ: Tuple['Pauli', 'Pauli', 'Pauli']
@staticmethod
def by_index(index: int) -> 'Pauli':
return Pauli._XYZ[index % 3]
@staticmethod
def by_relative_index(p: 'Pauli', relative_index: int) -> 'Pauli':
return Pauli._XYZ[(p._index + relative_index) % 3]
def __init__(self, index: int, name: str) -> None:
self._index = index
self._name = name
def num_qubits(self):
return 1
def _commutes_(self, other: Any, atol: float) -> Union[bool, NotImplementedType, None]:
if not isinstance(other, Pauli):
return NotImplemented
return self is other
def third(self, second: 'Pauli') -> 'Pauli':
return Pauli._XYZ[(-self._index - second._index) % 3]
def relative_index(self, second: 'Pauli') -> int:
"""Relative index of self w.r.t. second in the (X, Y, Z) cycle."""
return (self._index - second._index + 1) % 3 - 1
def phased_pauli_product(
self, other: Union['cirq.Pauli', 'identity.IdentityGate']
) -> Tuple[complex, Union['cirq.Pauli', 'identity.IdentityGate']]:
if self == other:
return 1, identity.I
if other is identity.I:
return 1, self
return 1j ** cast(Pauli, other).relative_index(self), self.third(cast(Pauli, other))
def __gt__(self, other):
if not isinstance(other, Pauli):
return NotImplemented
return (self._index - other._index) % 3 == 1
def __lt__(self, other):
if not isinstance(other, Pauli):
return NotImplemented
return (other._index - self._index) % 3 == 1
def on(self, *qubits: 'cirq.Qid') -> 'SingleQubitPauliStringGateOperation':
"""Returns an application of this gate to the given qubits.
Args:
*qubits: The collection of qubits to potentially apply the gate to.
Raises:
ValueError: If more than one qubit is acted upon.
"""
if len(qubits) != 1:
raise ValueError(f'Expected a single qubit, got <{qubits!r}>.')
from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation
return SingleQubitPauliStringGateOperation(self, qubits[0])
@property
def _canonical_exponent(self):
"""Overrides EigenGate._canonical_exponent in subclasses."""
return 1
class _PauliX(Pauli, common_gates.XPowGate):
def __init__(self):
Pauli.__init__(self, index=0, name='X')
common_gates.XPowGate.__init__(self, exponent=1.0)
def __pow__(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:
return common_gates.XPowGate(exponent=exponent) if exponent != 1 else _PauliX()
def _with_exponent(self: '_PauliX', exponent: 'cirq.TParamVal') -> common_gates.XPowGate:
return self.__pow__(exponent)
@classmethod
def _from_json_dict_(cls, exponent, global_shift, **kwargs):
assert global_shift == 0
assert exponent == 1
return Pauli._XYZ[0]
@property
def basis(self: '_PauliX') -> Dict[int, '_XEigenState']:
from cirq.value.product_state import _XEigenState
return {
+1: _XEigenState(+1),
-1: _XEigenState(-1),
}
class _PauliY(Pauli, common_gates.YPowGate):
def __init__(self):
Pauli.__init__(self, index=1, name='Y')
common_gates.YPowGate.__init__(self, exponent=1.0)
def __pow__(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:
return common_gates.YPowGate(exponent=exponent) if exponent != 1 else _PauliY()
def _with_exponent(self: '_PauliY', exponent: 'cirq.TParamVal') -> common_gates.YPowGate:
return self.__pow__(exponent)
@classmethod
def _from_json_dict_(cls, exponent, global_shift, **kwargs):
assert global_shift == 0
assert exponent == 1
return Pauli._XYZ[1]
@property
def basis(self: '_PauliY') -> Dict[int, '_YEigenState']:
from cirq.value.product_state import _YEigenState
return {
+1: _YEigenState(+1),
-1: _YEigenState(-1),
}
class _PauliZ(Pauli, common_gates.ZPowGate):
def __init__(self):
Pauli.__init__(self, index=2, name='Z')
common_gates.ZPowGate.__init__(self, exponent=1.0)
def __pow__(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:
return common_gates.ZPowGate(exponent=exponent) if exponent != 1 else _PauliZ()
def _with_exponent(self: '_PauliZ', exponent: 'cirq.TParamVal') -> common_gates.ZPowGate:
return self.__pow__(exponent)
@classmethod
def _from_json_dict_(cls, exponent, global_shift, **kwargs):
assert global_shift == 0
assert exponent == 1
return Pauli._XYZ[2]
@property
def basis(self: '_PauliZ') -> Dict[int, '_ZEigenState']:
from cirq.value.product_state import _ZEigenState
return {
+1: _ZEigenState(+1),
-1: _ZEigenState(-1),
}
X = _PauliX()
document(
X,
"""The Pauli X gate.
Matrix:
[[0, 1],
[1, 0]]
""",
)
Y = _PauliY()
document(
Y,
"""The Pauli Y gate.
Matrix:
[[0, -i],
[i, 0]]
""",
)
Z = _PauliZ()
document(
Z,
"""The Pauli Z gate.
Matrix:
[[1, 0],
[0, -1]]
""",
)
Pauli._XYZ = (X, Y, Z)
|
|
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver.
"""
from distutils.version import LooseVersion
import os
import mock
import mox
from cinder import exception
from cinder.image import glance
from cinder.openstack.common import units
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import vmware_images
from cinder.volume.drivers.vmware import volumeops
class FakeVim(object):
@property
def service_content(self):
return mox.MockAnything()
@property
def client(self):
return mox.MockAnything()
def Login(self, session_manager, userName, password):
return mox.MockAnything()
def Logout(self, session_manager):
pass
def TerminateSession(self, session_manager, sessionId):
pass
def SessionIsActive(self, session_manager, sessionID, userName):
pass
class FakeTaskInfo(object):
def __init__(self, state, result=None):
self.state = state
self.result = result
class FakeError(object):
def __init__(self):
self.localizedMessage = None
self.error = FakeError()
class FakeMor(object):
def __init__(self, type, val):
self._type = type
self.value = val
class FakeObject(object):
def __init__(self):
self._fields = {}
def __setitem__(self, key, value):
self._fields[key] = value
def __getitem__(self, item):
return self._fields[item]
class FakeManagedObjectReference(object):
def __init__(self, lis=None):
self.ManagedObjectReference = lis or []
class FakeDatastoreSummary(object):
def __init__(self, freeSpace, capacity, datastore=None, name=None):
self.freeSpace = freeSpace
self.capacity = capacity
self.datastore = datastore
self.name = name
class FakeSnapshotTree(object):
def __init__(self, tree=None, name=None,
snapshot=None, childSnapshotList=None):
self.rootSnapshotList = tree
self.name = name
self.snapshot = snapshot
self.childSnapshotList = childSnapshotList
class FakeElem(object):
def __init__(self, prop_set=None):
self.propSet = prop_set
class FakeProp(object):
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class FakeRetrieveResult(object):
def __init__(self, objects, token):
self.objects = objects
self.token = token
class FakeObj(object):
def __init__(self, obj=None):
self.obj = obj
class VMwareEsxVmdkDriverTestCase(test.TestCase):
"""Test class for VMwareEsxVmdkDriver."""
IP = 'localhost'
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'cinder-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
VMDK_DRIVER = vmdk.VMwareEsxVmdkDriver
def setUp(self):
super(VMwareEsxVmdkDriverTestCase, self).setUp()
self._config = mox.MockObject(configuration.Configuration)
self._config.append_config_values(mox.IgnoreArg())
self._config.vmware_host_ip = self.IP
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config)
api_retry_count = self._config.vmware_api_retry_count,
task_poll_interval = self._config.vmware_task_poll_interval,
self._session = api.VMwareAPISession(self.IP, self.USERNAME,
self.PASSWORD, api_retry_count,
task_poll_interval,
create_session=False)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self.MAX_OBJECTS)
self._vim = FakeVim()
def test_retry(self):
"""Test Retry."""
class TestClass(object):
def __init__(self):
self.counter1 = 0
self.counter2 = 0
@api.Retry(max_retry_count=2, inc_sleep_time=0.001,
exceptions=(Exception))
def fail(self):
self.counter1 += 1
raise exception.CinderException('Fail')
@api.Retry(max_retry_count=2)
def success(self):
self.counter2 += 1
return self.counter2
test_obj = TestClass()
self.assertRaises(exception.CinderException, test_obj.fail)
self.assertEqual(test_obj.counter1, 3)
ret = test_obj.success()
self.assertEqual(1, ret)
def test_create_session(self):
"""Test create_session."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.ReplayAll()
self._session.create_session()
m.UnsetStubs()
m.VerifyAll()
def test_do_setup(self):
"""Test do_setup."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.ReplayAll()
self._driver.do_setup(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_check_for_setup_error(self):
"""Test check_for_setup_error."""
self._driver.check_for_setup_error()
def test_get_volume_stats(self):
"""Test get_volume_stats."""
stats = self._driver.get_volume_stats()
self.assertEqual(stats['vendor_name'], 'VMware')
self.assertEqual(stats['driver_version'], self._driver.VERSION)
self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI')
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['total_capacity_gb'], 'unknown')
self.assertEqual(stats['free_capacity_gb'], 'unknown')
def test_create_volume(self):
"""Test create_volume."""
driver = self._driver
host = mock.sentinel.host
rp = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
driver._select_ds_for_volume = mock.MagicMock()
driver._select_ds_for_volume.return_value = (host, rp, folder,
summary)
# invoke the create_volume call
volume = {'name': 'fake_volume'}
driver.create_volume(volume)
# verify calls made
driver._select_ds_for_volume.assert_called_once_with(volume)
# test create_volume call when _select_ds_for_volume fails
driver._select_ds_for_volume.side_effect = error_util.VimException('')
self.assertRaises(error_util.VimFaultException, driver.create_volume,
volume)
def test_success_wait_for_task(self):
"""Test successful wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
result = FakeMor('VirtualMachine', 'my_vm')
success_task_info = FakeTaskInfo('success', result=result)
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(success_task_info)
m.ReplayAll()
ret = self._session.wait_for_task(mox.IgnoreArg())
self.assertEqual(ret.result, result)
m.UnsetStubs()
m.VerifyAll()
def test_failed_wait_for_task(self):
"""Test failed wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
failed_task_info = FakeTaskInfo('failed')
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(failed_task_info)
m.ReplayAll()
self.assertRaises(error_util.VimFaultException,
self._session.wait_for_task,
mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_without_backing(self):
"""Test delete_volume without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing('hello_world').AndReturn(None)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_with_backing(self):
"""Test delete_volume with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
FakeMor('Task', 'my_task')
m.StubOutWithMock(self._volumeops, 'get_backing')
m.StubOutWithMock(self._volumeops, 'delete_backing')
self._volumeops.get_backing('hello_world').AndReturn(backing)
self._volumeops.delete_backing(backing)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_create_export(self):
"""Test create_export."""
self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_ensure_export(self):
"""Test ensure_export."""
self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_remove_export(self):
"""Test remove_export."""
self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_terminate_connection(self):
"""Test terminate_connection."""
self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(),
force=mox.IgnoreArg())
def test_create_backing_in_inventory_multi_hosts(self):
"""Test _create_backing_in_inventory scanning multiple hosts."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
host1 = FakeObj(obj=FakeMor('HostSystem', 'my_host1'))
host2 = FakeObj(obj=FakeMor('HostSystem', 'my_host2'))
retrieve_result = FakeRetrieveResult([host1, host2], None)
m.StubOutWithMock(self._volumeops, 'get_hosts')
self._volumeops.get_hosts().AndReturn(retrieve_result)
m.StubOutWithMock(self._driver, '_create_backing')
volume = FakeObject()
volume['name'] = 'vol_name'
backing = FakeMor('VirtualMachine', 'my_back')
mux = self._driver._create_backing(volume, host1.obj, {})
mux.AndRaise(error_util.VimException('Maintenance mode'))
mux = self._driver._create_backing(volume, host2.obj, {})
mux.AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'cancel_retrieval')
self._volumeops.cancel_retrieval(retrieve_result)
m.StubOutWithMock(self._volumeops, 'continue_retrieval')
m.ReplayAll()
result = self._driver._create_backing_in_inventory(volume)
self.assertEqual(result, backing)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_select_datastore_summary(self):
"""Test _select_datastore_summary."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
datastore3 = FakeMor('Datastore', 'my_ds_3')
datastore4 = FakeMor('Datastore', 'my_ds_4')
datastores = [datastore1, datastore2, datastore3, datastore4]
m.StubOutWithMock(self._volumeops, 'get_summary')
summary1 = FakeDatastoreSummary(5, 100)
summary2 = FakeDatastoreSummary(25, 100)
summary3 = FakeDatastoreSummary(50, 100)
summary4 = FakeDatastoreSummary(75, 100)
self._volumeops.get_summary(
datastore1).MultipleTimes().AndReturn(summary1)
self._volumeops.get_summary(
datastore2).MultipleTimes().AndReturn(summary2)
self._volumeops.get_summary(
datastore3).MultipleTimes().AndReturn(summary3)
self._volumeops.get_summary(
datastore4).MultipleTimes().AndReturn(summary4)
m.StubOutWithMock(self._volumeops, 'get_connected_hosts')
host1 = FakeMor('HostSystem', 'my_host_1')
host2 = FakeMor('HostSystem', 'my_host_2')
host3 = FakeMor('HostSystem', 'my_host_3')
host4 = FakeMor('HostSystem', 'my_host_4')
self._volumeops.get_connected_hosts(
datastore1).MultipleTimes().AndReturn([host1, host2, host3, host4])
self._volumeops.get_connected_hosts(
datastore2).MultipleTimes().AndReturn([host1, host2, host3])
self._volumeops.get_connected_hosts(
datastore3).MultipleTimes().AndReturn([host1, host2])
self._volumeops.get_connected_hosts(
datastore4).MultipleTimes().AndReturn([host1, host2])
m.ReplayAll()
summary = self._driver._select_datastore_summary(1, datastores)
self.assertEqual(summary, summary1)
summary = self._driver._select_datastore_summary(10, datastores)
self.assertEqual(summary, summary2)
summary = self._driver._select_datastore_summary(40, datastores)
self.assertEqual(summary, summary4)
self.assertRaises(error_util.VimException,
self._driver._select_datastore_summary,
100, datastores)
m.UnsetStubs()
m.VerifyAll()
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = mock.sentinel.datastores
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
driver._get_storage_profile.assert_called_once_with(volume)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
def test_get_disk_type(self):
"""Test _get_disk_type."""
volume = FakeObject()
volume['volume_type_id'] = None
self.assertEqual(vmdk.VMwareEsxVmdkDriver._get_disk_type(volume),
'thin')
def test_init_conn_with_instance_no_backing(self):
"""Test initialize_connection with instance and without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
volume['volume_type_id'] = None
connector = {'instance': 'my_instance'}
self._volumeops.get_backing(volume['name'])
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
resource_pool = FakeMor('ResourcePool', 'my_rp')
datastores = [FakeMor('Datastore', 'my_ds')]
self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool))
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1)
self._driver._get_folder_ds_summary(volume, resource_pool,
datastores).AndReturn((folder,
summary))
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._volumeops, 'create_backing')
self._volumeops.create_backing(volume['name'],
volume['size'] * units.Mi,
mox.IgnoreArg(), folder,
resource_pool, host,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_without_instance(self):
"""Test initialize_connection without instance and a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
connector = {}
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_without_backing(self):
"""Test vmdk.create_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_with_backing(self):
"""Test vmdk.create_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snapshot_name'
snapshot['display_description'] = 'snapshot_desc'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'create_snapshot')
self._volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_when_attached(self):
"""Test vmdk.create_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.create_snapshot, snapshot)
def test_delete_snapshot_without_backing(self):
"""Test delete_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_with_backing(self):
"""Test delete_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['name'] = 'snapshot_name'
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'delete_snapshot')
self._volumeops.delete_snapshot(backing,
snapshot['name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_when_attached(self):
"""Test delete_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.delete_snapshot, snapshot)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = mock.sentinel.volume
fake_size = 1
src_vref = {'name': 'src_snapshot_name', 'size': fake_size}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
src_vmdk = "[datastore] src_vm/src_vm.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
mock_vops.get_vmdk_path.assert_called_once_with(backing)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
_extend_virtual_disk)
def _test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
"""Test _create_backing_by_copying."""
fake_volume = {'size': 2, 'name': 'fake_volume-0000000000001'}
fake_size = 1
fake_src_vmdk_path = "[datastore] src_vm/src_vm.vmdk"
fake_backing = mock.sentinel.backing
fake_vmdk_path = mock.sentinel.path
#"[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
create_backing.return_value = fake_backing
volumeops.get_vmdk_path.return_value = fake_vmdk_path
volumeops.get_dc.return_value = fake_dc
# Test with fake_volume['size'] greater than fake_size
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
create_backing.assert_called_once_with(fake_volume)
volumeops.get_vmdk_path.assert_called_once_with(fake_backing)
volumeops.get_dc.assert_called_once_with(fake_backing)
volumeops.delete_vmdk_file.assert_called_once_with(fake_vmdk_path,
fake_dc)
volumeops.copy_vmdk_file.assert_called_once_with(fake_dc,
fake_src_vmdk_path,
fake_vmdk_path)
_extend_virtual_disk.assert_called_once_with(fake_volume['size'],
fake_vmdk_path,
fake_dc)
# Reset all the mocks and test with fake_volume['size']
# not greater than fake_size
_extend_virtual_disk.reset_mock()
fake_size = 2
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
self.assertFalse(_extend_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 1}
fake_size = snapshot['volume_size']
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
src_vmdk = "[datastore] src_vm/src_vm-001.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
mock_vops.get_vmdk_path.assert_called_once_with(snap_moref)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
def _test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
fake_name = u'volume-00000001'
new_size = '21'
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = mock.sentinel.datastore
fake_summary.name = 'fake_name'
fake_backing = mock.sentinel.backing
volume_ops.get_backing.return_value = fake_backing
# If there is enough space in the datastore, where the volume is
# located, then the rest of this method will not be called.
self._driver.extend_volume(fake_vol, new_size)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
self.assertFalse(_select_ds_for_volume.called)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
# If there is not enough space in the datastore, where the volume is
# located, then the rest of this method will be called. The first time
# _extend_virtual_disk is called, VimFaultException is raised. The
# second time it is called, there is no exception.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
# When _select_ds_for_volume raises no exception.
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
self._driver.extend_volume(fake_vol, new_size)
_select_ds_for_volume.assert_called_with(new_size)
volume_ops.get_backing.assert_called_with(fake_name)
volume_ops.relocate_backing.assert_called_with(fake_backing,
fake_summary.datastore,
fake_rp,
fake_host)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
volume_ops.move_backing_to_folder.assert_called_with(fake_backing,
fake_folder)
# If get_backing raises error_util.VimException,
# this exception will be caught for volume extend.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
volume_ops.get_backing.side_effect = error_util.VimException('Error')
self.assertRaises(error_util.VimException, self._driver.extend_volume,
fake_vol, new_size)
# If _select_ds_for_volume raised an exception, the rest code will
# not be called.
_extend_virtual_disk.reset_mock()
volume_ops.get_backing.reset_mock()
volume_ops.relocate_backing.reset_mock()
volume_ops.move_backing_to_folder.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
_select_ds_for_volume.side_effect = error_util.VimException('Error')
self.assertRaises(error_util.VimException, self._driver.extend_volume,
fake_vol, new_size)
_extend_virtual_disk.assert_called_once_with(fake_name, new_size)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
def test_copy_image_to_volume_non_vmdk(self):
"""Test copy_image_to_volume for a non-vmdk disk format."""
fake_context = mock.sentinel.context
fake_image_id = 'image-123456789'
fake_image_meta = {'disk_format': 'novmdk'}
image_service = mock.Mock()
image_service.show.return_value = fake_image_meta
fake_volume = {'name': 'fake_name', 'size': 1}
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
@mock.patch.object(vmware_images, 'fetch_flat_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_flat_vmdk_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_vmdk(self, volume_ops, session,
_create_backing_in_inventory,
_get_ds_name_flat_vmdk_path,
_extend_vmdk_virtual_disk,
fetch_flat_image):
"""Test copy_image_to_volume with an acceptable vmdk disk format."""
self._test_copy_image_to_volume_vmdk(volume_ops, session,
_create_backing_in_inventory,
_get_ds_name_flat_vmdk_path,
_extend_vmdk_virtual_disk,
fetch_flat_image)
def _test_copy_image_to_volume_vmdk(self, volume_ops, session,
_create_backing_in_inventory,
_get_ds_name_flat_vmdk_path,
_extend_vmdk_virtual_disk,
fetch_flat_image):
cookies = session.vim.client.options.transport.cookiejar
fake_context = mock.sentinel.context
fake_image_id = 'image-id'
fake_image_meta = {'disk_format': 'vmdk',
'size': 2 * units.Gi,
'properties': {'vmware_disktype': 'preallocated'}}
image_service = mock.Mock(glance.GlanceImageService)
fake_size = 3
fake_volume = {'name': 'volume_name', 'size': fake_size}
fake_backing = mock.sentinel.backing
fake_datastore_name = 'datastore1'
flat_vmdk_path = 'myvolumes/myvm-flat.vmdk'
fake_host = mock.sentinel.host
fake_datacenter = mock.sentinel.datacenter
fake_datacenter_name = mock.sentinel.datacenter_name
timeout = self._config.vmware_image_transfer_timeout_secs
image_service.show.return_value = fake_image_meta
_create_backing_in_inventory.return_value = fake_backing
_get_ds_name_flat_vmdk_path.return_value = (fake_datastore_name,
flat_vmdk_path)
volume_ops.get_host.return_value = fake_host
volume_ops.get_dc.return_value = fake_datacenter
volume_ops.get_entity_name.return_value = fake_datacenter_name
# If the volume size is greater than the image size,
# _extend_vmdk_virtual_disk will be called.
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
image_service.show.assert_called_with(fake_context, fake_image_id)
_create_backing_in_inventory.assert_called_with(fake_volume)
_get_ds_name_flat_vmdk_path.assert_called_with(fake_backing,
fake_volume['name'])
volume_ops.get_host.assert_called_with(fake_backing)
volume_ops.get_dc.assert_called_with(fake_host)
volume_ops.get_entity_name.assert_called_with(fake_datacenter)
fetch_flat_image.assert_called_with(fake_context, timeout,
image_service,
fake_image_id,
image_size=fake_image_meta['size'],
host=self.IP,
data_center_name=
fake_datacenter_name,
datastore_name=fake_datastore_name,
cookies=cookies,
file_path=flat_vmdk_path)
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_size)
self.assertFalse(volume_ops.delete_backing.called)
# If the volume size is not greater then than the image size,
# _extend_vmdk_virtual_disk will not be called.
_extend_vmdk_virtual_disk.reset_mock()
fake_size = 2
fake_volume['size'] = fake_size
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(_extend_vmdk_virtual_disk.called)
self.assertFalse(volume_ops.delete_backing.called)
# If fetch_flat_image raises an Exception, delete_backing
# will be called.
fetch_flat_image.side_effect = exception.CinderException
self.assertRaises(exception.CinderException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
volume_ops.delete_backing.assert_called_with(fake_backing)
@mock.patch.object(vmware_images, 'fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self,
volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image)
def _test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
fake_context = mock.Mock()
fake_backing = mock.sentinel.backing
fake_image_id = 'image-id'
size = 5 * units.Gi
size_gb = float(size) / units.Gi
fake_volume_size = 1 + size_gb
adapter_type = 'ide'
fake_image_meta = {'disk_format': 'vmdk', 'size': size,
'properties': {'vmware_disktype': 'streamOptimized',
'vmware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.sentinel.summary
fake_summary.name = "datastore-1"
fake_vm_create_spec = mock.sentinel.spec
fake_disk_type = 'thin'
vol_name = 'fake_volume name'
fake_volume = {'name': vol_name, 'size': fake_volume_size,
'volume_type_id': None}
cf = session.vim.client.factory
vm_import_spec = cf.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = fake_vm_create_spec
timeout = self._config.vmware_image_transfer_timeout_secs
image_service.show.return_value = fake_image_meta
volumeops.get_create_spec.return_value = fake_vm_create_spec
volumeops.get_backing.return_value = fake_backing
# If _select_ds_for_volume raises an exception, get_create_spec
# will not be called.
_select_ds_for_volume.side_effect = error_util.VimException('Error')
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(volumeops.get_create_spec.called)
# If the volume size is greater then than the image size,
# _extend_vmdk_virtual_disk will be called.
_select_ds_for_volume.side_effect = None
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
profile_id = 'profile-1'
get_profile_id.return_value = profile_id
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
image_service.show.assert_called_with(fake_context, fake_image_id)
_select_ds_for_volume.assert_called_with(fake_volume)
get_profile_id.assert_called_once_with(fake_volume)
volumeops.get_create_spec.assert_called_with(fake_volume['name'],
0,
fake_disk_type,
fake_summary.name,
profile_id,
adapter_type)
self.assertTrue(fetch_optimized_image.called)
fetch_optimized_image.assert_called_with(fake_context, timeout,
image_service,
fake_image_id,
session=session,
host=self.IP,
resource_pool=fake_rp,
vm_folder=fake_folder,
vm_create_spec=
vm_import_spec,
image_size=size)
_extend_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume_size)
self.assertFalse(volumeops.get_backing.called)
self.assertFalse(volumeops.delete_backing.called)
# If the volume size is not greater then than the image size,
# _extend_vmdk_virtual_disk will not be called.
fake_volume_size = size_gb
fake_volume['size'] = fake_volume_size
_extend_virtual_disk.reset_mock()
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(_extend_virtual_disk.called)
self.assertFalse(volumeops.get_backing.called)
self.assertFalse(volumeops.delete_backing.called)
# If fetch_stream_optimized_image raises an exception,
# get_backing and delete_backing will be called.
fetch_optimized_image.side_effect = exception.CinderException
self.assertRaises(exception.CinderException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
volumeops.get_backing.assert_called_with(fake_volume['name'])
volumeops.delete_backing.assert_called_with(fake_backing)
def test_copy_volume_to_image_non_vmdk(self):
"""Test copy_volume_to_image for a non-vmdk disk format."""
m = self.mox
image_meta = FakeObject()
image_meta['disk_format'] = 'novmdk'
volume = FakeObject()
volume['name'] = 'vol-name'
volume['instance_uuid'] = None
volume['attached_host'] = None
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_when_attached(self):
"""Test copy_volume_to_image when volume is attached."""
m = self.mox
volume = FakeObject()
volume['instance_uuid'] = 'my_uuid'
m.ReplayAll()
self.assertRaises(exception.InvalidVolume,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_vmdk(self):
"""Test copy_volume_to_image for a valid vmdk disk format."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
image_id = 'image-id-1'
image_meta = FakeObject()
image_meta['disk_format'] = 'vmdk'
image_meta['id'] = image_id
image_meta['name'] = image_id
image_service = FakeObject()
vol_name = 'volume-123456789'
project_id = 'project-owner-id-123'
volume = FakeObject()
volume['name'] = vol_name
size_gb = 5
size = size_gb * units.Gi
volume['size'] = size_gb
volume['project_id'] = project_id
volume['instance_uuid'] = None
volume['attached_host'] = None
# volumeops.get_backing
backing = FakeMor("VirtualMachine", "my_vm")
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing(vol_name).AndReturn(backing)
# volumeops.get_vmdk_path
datastore_name = 'datastore1'
file_path = 'my_folder/my_nested_folder/my_vm.vmdk'
vmdk_file_path = '[%s] %s' % (datastore_name, file_path)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path)
# vmware_images.upload_image
timeout = self._config.vmware_image_transfer_timeout_secs
host_ip = self.IP
m.StubOutWithMock(vmware_images, 'upload_image')
vmware_images.upload_image(mox.IgnoreArg(), timeout, image_service,
image_id, project_id, session=self._session,
host=host_ip, vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=size,
image_name=image_id,
image_version=1)
m.ReplayAll()
self._driver.copy_volume_to_image(mox.IgnoreArg(), volume,
image_service, image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_retrieve_properties_ex_fault_checker(self):
"""Test retrieve_properties_ex_fault_checker is called."""
m = self.mox
class FakeVim(vim.Vim):
def __init__(self):
pass
@property
def client(self):
class FakeRetrv(object):
def RetrievePropertiesEx(self, collector):
pass
def __getattr__(self, name):
if name == 'service':
return FakeRetrv()
return FakeRetrv()
def RetrieveServiceContent(self, type='ServiceInstance'):
return mox.MockAnything()
_vim = FakeVim()
m.ReplayAll()
# retrieve_properties_ex_fault_checker throws authentication error
self.assertRaises(error_util.VimFaultException,
_vim.RetrievePropertiesEx, mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
def _test_extend_vmdk_virtual_disk(self, volume_ops):
fake_backing = mock.sentinel.backing
fake_vmdk_path = "[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
fake_name = 'fake_name'
fake_size = 7
# If the backing is None, get_vmdk_path and get_dc
# will not be called
volume_ops.get_backing.return_value = None
volume_ops.get_vmdk_path.return_value = fake_vmdk_path
volume_ops.get_dc.return_value = fake_dc
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_backing.assert_called_once_with(fake_name)
self.assertFalse(volume_ops.get_vmdk_path.called)
self.assertFalse(volume_ops.get_dc.called)
self.assertFalse(volume_ops.extend_virtual_disk.called)
# Reset the mock and set the backing with a fake,
# all the mocks should be called.
volume_ops.get_backing.reset_mock()
volume_ops.get_backing.return_value = fake_backing
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_vmdk_path.assert_called_once_with(fake_backing)
volume_ops.get_dc.assert_called_once_with(fake_backing)
volume_ops.extend_virtual_disk.assert_called_once_with(fake_size,
fake_vmdk_path,
fake_dc)
# Test the exceptional case for extend_virtual_disk
volume_ops.extend_virtual_disk.side_effect = error_util.VimException(
'VimException raised.')
self.assertRaises(error_util.VimException,
self._driver._extend_vmdk_virtual_disk,
fake_name, fake_size)
class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
"""Test class for VMwareVcVmdkDriver."""
VMDK_DRIVER = vmdk.VMwareVcVmdkDriver
DEFAULT_VC_VERSION = '5.5'
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._config.vmware_host_version = self.DEFAULT_VC_VERSION
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config)
def test_get_pbm_wsdl_location(self):
# no version returns None
wsdl = self._driver._get_pbm_wsdl_location(None)
self.assertIsNone(wsdl)
def expected_wsdl(version):
driver_dir = os.path.join(os.path.dirname(__file__), '..',
'volume', 'drivers', 'vmware')
driver_abs_dir = os.path.abspath(driver_dir)
return 'file://' + os.path.join(driver_abs_dir, 'wsdl', version,
'pbmService.wsdl')
# verify wsdl path for different version strings
with mock.patch('os.path.exists') as path_exists:
path_exists.return_value = True
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5'))
self.assertEqual(expected_wsdl('5'), wsdl)
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5'))
self.assertEqual(expected_wsdl('5.5'), wsdl)
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5.1'))
self.assertEqual(expected_wsdl('5.5'), wsdl)
# if wsdl path does not exist, then it returns None
path_exists.return_value = False
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5'))
self.assertIsNone(wsdl)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_get_vc_version(self, session):
# test config overrides fetching from VC server
version = self._driver._get_vc_version()
self.assertEqual(self.DEFAULT_VC_VERSION, version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
self.assertEqual(LooseVersion('6.0.1'), version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup(self, session, _get_vc_version, _get_pbm_wsdl_location):
session = session.return_value
# pbm is disabled
vc_version = LooseVersion('5.0')
_get_vc_version.return_value = vc_version
self._driver.do_setup(mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
_get_vc_version.assert_called_once_with()
# pbm is enabled and invalid pbm wsdl location
vc_version = LooseVersion('5.5')
_get_vc_version.reset_mock()
_get_vc_version.return_value = vc_version
_get_pbm_wsdl_location.return_value = None
self.assertRaises(error_util.VMwareDriverException,
self._driver.do_setup,
mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
_get_vc_version.assert_called_once_with()
_get_pbm_wsdl_location.assert_called_once_with(vc_version)
# pbm is enabled and valid pbm wsdl location
vc_version = LooseVersion('5.5')
_get_vc_version.reset_mock()
_get_vc_version.return_value = vc_version
_get_pbm_wsdl_location.reset_mock()
_get_pbm_wsdl_location.return_value = 'fake_pbm_location'
self._driver.do_setup(mock.ANY)
self.assertTrue(self._driver._storage_policy_enabled)
_get_vc_version.assert_called_once_with()
_get_pbm_wsdl_location.assert_called_once_with(vc_version)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
extend_virtual_disk)
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore = FakeMor('Datastore', 'my_ds')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.StubOutWithMock(self._volumeops, 'create_folder')
self._volumeops.create_folder(mox.IgnoreArg(),
self._config.vmware_volume_folder)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing_and_relocation(self):
"""Test initialize_connection with backing being relocated."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore1],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore2)
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1, datastore1)
self._driver._get_folder_ds_summary(volume, resource_pool,
[datastore1]).AndReturn((folder,
summary))
m.StubOutWithMock(self._volumeops, 'relocate_backing')
self._volumeops.relocate_backing(backing, datastore1,
resource_pool, host)
m.StubOutWithMock(self._volumeops, 'move_backing_to_folder')
self._volumeops.move_backing_to_folder(backing, folder)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_linked(self, volume_ops, _extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - linked."""
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name',
'name': 'snapshot_name',
'volume_size': 2}
fake_type = volumeops.LINKED_CLONE_TYPE
fake_backing = mock.sentinel.backing
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_type,
None)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_full(self, volume_ops, _select_ds_for_volume,
_extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - full."""
fake_host = mock.sentinel.host
fake_backing = mock.sentinel.backing
fake_folder = mock.sentinel.folder
fake_datastore = mock.sentinel.datastore
fake_resource_pool = mock.sentinel.resourcePool
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = fake_datastore
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name',
'volume_size': 2}
_select_ds_for_volume.return_value = (fake_host,
fake_resource_pool,
fake_folder, fake_summary)
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
_select_ds_for_volume.assert_called_with(fake_volume)
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_datastore)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 2}
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing.assert_called_once_with(volume,
backing,
snap_moref,
default_clone_type,
snapshot['volume_size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with clone type - full."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name', 'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
driver._clone_backing.assert_called_once_with(volume,
backing,
None,
default_clone_type,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_with_backing(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume with clone type - linked."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'available',
'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
driver._clone_backing = mock.MagicMock()
mock_vops.create_snapshot = mock.MagicMock()
mock_vops.create_snapshot.return_value = mock.sentinel.snapshot
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
name = 'snapshot-%s' % volume['id']
mock_vops.create_snapshot.assert_called_once_with(backing, name, None)
driver._clone_backing.assert_called_once_with(volume,
backing,
mock.sentinel.snapshot,
linked_clone,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_when_attached(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume linked clone when volume is attached."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
# invoke the create_volume_from_snapshot api
self.assertRaises(exception.InvalidVolume,
driver.create_cloned_volume,
volume,
src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_storage_profile(self, get_volume_type_extra_specs):
"""Test vmdk _get_storage_profile."""
# volume with no type id returns None
volume = FakeObject()
volume['volume_type_id'] = None
sp = self._driver._get_storage_profile(volume)
self.assertEqual(None, sp, "Without a volume_type_id no storage "
"profile should be returned.")
# profile associated with the volume type should be returned
fake_id = 'fake_volume_id'
volume['volume_type_id'] = fake_id
get_volume_type_extra_specs.return_value = 'fake_profile'
profile = self._driver._get_storage_profile(volume)
self.assertEqual('fake_profile', profile)
spec_key = 'vmware:storage_profile'
get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key)
# None should be returned when no storage profile is
# associated with the volume type
get_volume_type_extra_specs.return_value = False
profile = self._driver._get_storage_profile(volume)
self.assertIsNone(profile)
@mock.patch('cinder.volume.drivers.vmware.vim_util.'
'convert_datastores_to_hubs')
@mock.patch('cinder.volume.drivers.vmware.vim_util.'
'convert_hubs_to_datastores')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_filter_ds_by_profile(self, volumeops, session, hubs_to_ds,
ds_to_hubs):
"""Test vmdk _filter_ds_by_profile() method."""
volumeops = volumeops.return_value
session = session.return_value
# Test with no profile id
datastores = [mock.sentinel.ds1, mock.sentinel.ds2]
profile = 'fake_profile'
volumeops.retrieve_profile_id.return_value = None
self.assertRaises(error_util.VimException,
self._driver._filter_ds_by_profile,
datastores, profile)
volumeops.retrieve_profile_id.assert_called_once_with(profile)
# Test with a fake profile id
profileId = 'fake_profile_id'
filtered_dss = [mock.sentinel.ds1]
# patch method calls from _filter_ds_by_profile
volumeops.retrieve_profile_id.return_value = profileId
pbm_cf = mock.sentinel.pbm_cf
session.pbm.client.factory = pbm_cf
hubs = [mock.sentinel.hub1, mock.sentinel.hub2]
ds_to_hubs.return_value = hubs
volumeops.filter_matching_hubs.return_value = mock.sentinel.hubs
hubs_to_ds.return_value = filtered_dss
# call _filter_ds_by_profile with a fake profile
actual_dss = self._driver._filter_ds_by_profile(datastores, profile)
# verify return value and called methods
self.assertEqual(filtered_dss, actual_dss,
"Wrong filtered datastores returned.")
ds_to_hubs.assert_called_once_with(pbm_cf, datastores)
volumeops.filter_matching_hubs.assert_called_once_with(hubs,
profileId)
hubs_to_ds.assert_called_once_with(mock.sentinel.hubs, datastores)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
driver._storage_policy_enabled = True
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = [mock.sentinel.datastore1, mock.sentinel.datastore2]
filtered_dss = [mock.sentinel.datastore1]
profile = mock.sentinel.profile
def filter_ds(datastores, storage_profile):
return filtered_dss
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.vmfolder
volumeops.create_folder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._get_storage_profile.return_value = profile
driver._filter_ds_by_profile = mock.MagicMock(side_effect=filter_ds)
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
volumeops.create_folder.assert_called_once_with(mock.sentinel.vmfolder,
self.VOLUME_FOLDER)
driver._get_storage_profile.assert_called_once_with(volume)
driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
@mock.patch.object(vmware_images, 'fetch_flat_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_flat_vmdk_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_vmdk(self, volume_ops, session,
_create_backing_in_inventory,
_get_ds_name_flat_vmdk_path,
_extend_vmdk_virtual_disk,
fetch_flat_image):
"""Test copy_image_to_volume with an acceptable vmdk disk format."""
self._test_copy_image_to_volume_vmdk(volume_ops, session,
_create_backing_in_inventory,
_get_ds_name_flat_vmdk_path,
_extend_vmdk_virtual_disk,
fetch_flat_image)
@mock.patch.object(vmware_images, 'fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
@mock.patch.object(VMDK_DRIVER, '_get_folder_ds_summary')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_with_params(self, vops, get_folder_ds_summary):
resource_pool = mock.sentinel.resource_pool
vops.get_dss_rp.return_value = (mock.Mock(), resource_pool)
folder = mock.sentinel.folder
summary = mock.sentinel.summary
get_folder_ds_summary.return_value = (folder, summary)
volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1}
host = mock.Mock()
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
self._driver._create_backing(volume, host, create_params)
vops.create_backing_disk_less.assert_called_once_with('vol-1',
folder,
resource_pool,
host,
summary.name,
None)
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with('vol-1',
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'ide')
|
|
from piston.handler import BaseHandler, rc
from systems.models import System, SystemRack,SystemStatus,NetworkAdapter,KeyValue
from truth.models import Truth, KeyValue as TruthKeyValue
from dhcp.DHCP import DHCP as DHCPInterface
from dhcp.models import DHCP
from MacroExpansion import MacroExpansion
from KeyValueTree import KeyValueTree
import re
try:
import json
except:
from django.utils import simplejson as json
from django.test.client import Client
from MozInvAuthorization.KeyValueACL import KeyValueACL
from settings import API_ACCESS
class KeyValueHandler(BaseHandler):
allowed_methods = API_ACCESS
def create(self, request, key_value_id=None):
if 'system_id' in request.POST:
post_key = request.POST.get('key')
post_value = request.POST.get('value')
system_id = request.POST.get('system_id')
n = KeyValue()
system = System.objects.get(id=system_id)
if re.search('^nic\.(\d+)\.ipv4_address', str(post_key).strip() ):
try:
acl = KeyValueACL(request)
acl.check_ip_not_exist_other_system(system, post_value)
except Exception, e:
resp = rc.FORBIDDEN
resp.write(e)
return resp
try:
n.system = system
if 'key' in request.POST:
n.key = request.POST['key']
if 'value' in request.POST:
n.value = request.POST['value']
n.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i}' % (n.id))
except:
resp = rc.NOT_FOUND
resp.write('Unable to Create Key/Value Pair')
return resp
elif 'truth_name' in request.POST:
n = TruthKeyValue()
truth = Truth.objects.get(name=request.POST['truth_name'])
n.truth = truth
if 'key' in request.POST:
n.key = request.POST['key']
if 'value' in request.POST:
n.value = request.POST['value']
try:
n.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i}' % (n.id))
except:
resp = rc.NOT_FOUND
resp.write('Unable to Create Key/Value Pair')
return resp
else:
resp = rc.NOT_FOUND
resp.write('system_id or truth_name required')
def build_validation_array(self):
input_regex_array = []
output_regex_array = []
error_message_array = []
ipv4_regex = re.compile(r'((2[0-5]|1[0-9]|[0-9])?[0-9]\.){3}((2[0-5]|1[0-9]|[0-9])?[0-9])')
true_false_regex = re.compile('(^True$|^False$)')
input_regex_array.append(re.compile('nic\.\d+\.ipv4_address\.\d+'))
output_regex_array.append(ipv4_regex)
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.scope\.netmask$'))
output_regex_array.append(ipv4_regex)
error_message_array.append('Requires Subnet Mask')
input_regex_array.append(re.compile('^is_dhcp_scope$'))
output_regex_array.append(re.compile(true_false_regex))
error_message_array.append('Requires True|False')
input_regex_array.append(re.compile('^dhcp\.scope\.start$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.scope\.end$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.pool\.start$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.pool\.end$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.option\.ntp_server\.\d+$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.dns_server\.\d+$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.option_router\.\d+$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.option\.subnet_mask\.\d+$'))
output_regex_array.append(re.compile(ipv4_regex))
error_message_array.append('Requires IP Address')
input_regex_array.append(re.compile('^dhcp\.pool\.allow_booting\.\d+$'))
output_regex_array.append(re.compile(true_false_regex))
error_message_array.append('Requires True|False')
input_regex_array.append(re.compile('^dhcp\.pool\.allow_bootp\.\d+$'))
output_regex_array.append(re.compile(true_false_regex))
error_message_array.append('Requires True|False')
input_regex_array.append(re.compile('^nic\.\d+\.mac_address\.\d+$'))
output_regex_array.append(re.compile('^([0-9a-f]{2}([:-]|$)){6}$', re.I))
error_message_array.append('Requires Mac Address XX:XX:XX:XX:XX:XX')
return input_regex_array, output_regex_array, error_message_array
def validate(self, key, passed_value):
error_message = None
return_regex = None
return_bool = True
input_regex_array, output_regex_array, error_message_array = self.build_validation_array()
## Here we loop through all of the possible input validation array. If they key matches one, then we need to validate the value for the key/value pair
for i in range(0, len(input_regex_array)):
if input_regex_array[i].match(key):
return_regex = output_regex_array[i]
error_message = error_message_array[i];
continue
## Check if we should validate the value portion of the key/value pair. No use validating it if the key doesn't require it
if return_regex is not None:
if return_regex.match(passed_value) is None:
return_bool = False
else:
error_message = None
return return_bool, error_message
def update(self, request, key_value_id=None):
###TODO This whole method is not functioning correctly. Just for version 2. Not getting the system_id or truth_id from the poster firefox plugin
if 'system_id' in request.POST:
n = None
found = False
post_key = request.POST.get('key')
post_value = request.POST.get('value')
system_id = request.POST.get('system_id')
key_validated, validation_error_string = self.validate(post_key, post_value)
if re.search('^nic\.(\d+)\.ipv4_address', str(post_key).strip() ):
try:
acl = KeyValueACL(request)
system = System.objects.get(id=system_id)
acl.check_ip_not_exist_other_system(system, post_value)
except Exception, e:
resp = rc.FORBIDDEN
resp.write(e)
return resp
if key_validated is False:
resp = rc.FORBIDDEN
resp.write('Validation Failed for %s %s' % (request.POST['key'], validation_error_string) )
return resp
try:
n = KeyValue.objects.get(id=key_value_id,key=request.POST['key'])
system = System.objects.get(id=request.POST['system_id'])
found = True
except Exception, e:
#print e
found = False
if found is False:
try:
system = System.objects.get(id=request.POST['system_id'])
n = KeyValue.objects.get(system=system,key=request.POST['key'])
found = True
except:
found = False
if found is False:
resp = rc.NOT_FOUND
resp.write('Neither system_id or truth_id found')
return resp
if n is not None:
n.system = system
if 'value' in request.POST and n is not None:
n.value = request.POST['value']
if n is not None:
try:
n.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i}' % (n.id))
except:
resp = rc.NOT_FOUND
resp.write('Unable to Create Key/Value Pair')
return resp
elif 'truth_id' in request.POST or 'truth_id' in request.PUT:
try:
truth = Truth.objects.get(name=key_value_id)
n = TruthKeyValue.objects.get(truth=truth,key=request.POST['key'])
if 'value' in request.POST:
n.value = request.POST['value']
except:
pass
try:
n.save()
resp = rc.ALL_OK
resp.write('json = {"id":%i}' % (n.id))
except Exception, e:
resp = rc.NOT_FOUND
resp.write('Unable to Update Key/Value Pair %s' % e)
return resp
else:
resp = rc.NOT_FOUND
resp.write('Neither system_id or truth_id found')
return resp
def read(self, request, key_value_id=None):
#if keystore get var is set return the whole keystore
if 'keystore' in request.GET:
#if key get var is set return the keystore based on the existance of this key
if 'key' in request.GET:
base = KeyValue.objects.filter(key=request.GET['keystore']).filter(keyvalue_set__contains=request.GET['key'])
tmp_list = []
for row in base:
matches = re.match("\$\{(.*)\}", row.value)
if matches is not None:
m = MacroExpansion(matches.group(1))
row.value = m.output()
for r in base:
key_name = 'host:%s:%s' % (r.system.hostname, r.key)
tmp_list[key_name] = r.value
if 'key' not in request.GET:
tree = KeyValueTree(request.GET['keystore']).final
return tree
elif 'key_type' in request.GET:
key_type = request.GET['key_type']
tmp_list = []
if key_type == 'dhcp_scopes':
#Get keystores from truth that have dhcp.is_scope = True
base = TruthKeyValue.objects.filter(key='dhcp.is_scope',value='True')
#Iterate through the list and get all of the key/value pairs
for row in base:
keyvalue = TruthKeyValue.objects.filter(truth=row.truth)
tmp_dict = {}
for kv in keyvalue:
tmp_dict[kv.key] = kv.value
tmp_list.append(tmp_dict)
return tmp_list
if key_type == 'system_by_reverse_dns_zone':
#Get keystores from truth that have dhcp.is_scope = True
keyvalue_pairs = KeyValue.objects.filter(key__contains='reverse_dns_zone',value=request.GET['zone']).filter(key__startswith='nic.')
#Iterate through the list and get all of the key/value pairs
tmp_list = []
for row in keyvalue_pairs:
keyvalue = KeyValue.objects.filter(system=row.system)
tmp_dict = {}
for kv in keyvalue:
tmp_dict[kv.key] = kv.value
tmp_dict['hostname'] = row.system.hostname
appendable = True
for the_items in tmp_list:
if 'hostname' not in the_items:
appendable = True
elif the_items['hostname'] == row.system.hostname:
appendable = False
if appendable is True:
tmp_list.append(tmp_dict)
#tmp_list = list(set(tmp_list))
return tmp_list
if key_type == 'system_by_scope':
#Get keystores from truth that have dhcp.is_scope = True
keyvalue_pairs = KeyValue.objects.filter(key__contains='dhcp_scope',value=request.GET['scope']).filter(key__startswith='nic.')
#Iterate through the list and get all of the key/value pairs
tmp_list = []
for row in keyvalue_pairs:
keyvalue = KeyValue.objects.filter(system=row.system)
tmp_dict = {}
for kv in keyvalue:
tmp_dict[kv.key] = kv.value
tmp_dict['hostname'] = row.system.hostname
appendable = True
for the_items in tmp_list:
if 'hostname' not in the_items:
appendable = True
elif the_items['hostname'] == row.system.hostname:
appendable = False
if appendable is True:
tmp_list.append(tmp_dict)
#tmp_list = list(set(tmp_list))
return tmp_list
if key_type == 'adapters_by_system':
#Get keystores from truth that have dhcp.is_scope = True
system = None
try:
system = System.objects.get(hostname=request.GET['system'])
except:
system = None
if not system:
try:
system = System.objects.get(id=request.GET['system'])
except:
system = None
if not system:
resp = rc.NOT_FOUND
resp.write('json = {"error_message":"Unable to find system"}')
return resp
keyvalue_pairs = KeyValue.objects.filter(key__startswith='nic.').filter(system=system).order_by('key')
#Iterate through the list and get all of the key/value pairs
tmp_dict = {}
adapter_ids = []
final_list = []
for kv in keyvalue_pairs:
tmp_dict[kv.key] = kv.value
for k in tmp_dict.iterkeys():
matches = re.match('nic\.(\d+).*',k)
if matches.group is not None:
if matches.group(1) not in adapter_ids:
adapter_ids.append(matches.group(1))
adapter_ids.sort()
for a in adapter_ids:
adapter_name = ''
mac_address = ''
dhcp_hostname = ''
dhcp_scope = ''
dhcp_filename = ''
ipv4_address = ''
dhcp_domain_name_servers = ''
option_hostname = ""
if 'nic.%s.ipv4_address.0' % a in tmp_dict:
ipv4_address = tmp_dict['nic.%s.ipv4_address.0' % a]
if 'nic.%s.name.0' % a in tmp_dict:
adapter_name = tmp_dict['nic.%s.name.0' % a]
if 'nic.%s.mac_address.0' % a in tmp_dict:
mac_address = tmp_dict['nic.%s.mac_address.0' % a]
if 'nic.%s.option_hostname.0' % a in tmp_dict:
option_hostname = tmp_dict['nic.%s.option_hostname.0' % a]
if 'nic.%s.dhcp_scope.0' % a in tmp_dict:
dhcp_scope = tmp_dict['nic.%s.dhcp_scope.0' % a]
if 'nic.%s.dhcp_filename.0' % a in tmp_dict:
dhcp_filename = tmp_dict['nic.%s.dhcp_filename.0' % a]
if 'nic.%s.dhcp_domain_name_servers.0' % a in tmp_dict:
dhcp_domain_name_servers = tmp_dict['nic.%s.dhcp_domain_name_servers.0' % a]
try:
final_list.append({
'system_hostname':system.hostname,
'ipv4_address':ipv4_address,
'adapter_name':adapter_name,
'mac_address':mac_address,
'option_hostname':option_hostname,
'dhcp_scope':dhcp_scope,
'dhcp_filename':dhcp_filename,
'dhcp_domain_name_servers':dhcp_domain_name_servers,
}
)
except Exception, e:
pass
#tmp_list.append(tmp_dict)
return final_list
if key_type == 'adapters_by_system_and_zone':
#Get keystores from truth that have dhcp.is_scope = True
zone = request.GET['zone']
system = System.objects.get(hostname=request.GET['system'])
keyvalue_pairs = KeyValue.objects.filter(key__startswith='nic.').filter(system=system).order_by('key')
#Iterate through the list and get all of the key/value pairs
tmp_dict = {}
adapter_ids = []
final_list = []
for kv in keyvalue_pairs:
tmp_dict[kv.key] = kv.value
for k in tmp_dict.iterkeys():
matches = re.match('nic\.(\d+).*',k)
if matches.group is not None:
dhcp_scope_match = 'nic.%s.reverse_dns_zone.0' % matches.group(1)
if matches.group(1) not in adapter_ids and dhcp_scope_match in tmp_dict and tmp_dict[dhcp_scope_match] == zone:
#if matches.group(1) not in adapter_ids and 'nic.%s.dhcp_scope.0' % matches.group(1) in tmp_dict and tmp_dict['nic.%s.dhcp_scope.0' % matches.group(1)] == dhcp_scope:
adapter_ids.append(matches.group(1))
adapter_ids.sort()
for a in adapter_ids:
adapter_name = ''
mac_address = ''
dhcp_hostname = ''
dhcp_filename = ''
dhcp_domain_name = ''
ipv4_address = ''
if 'nic.%s.ipv4_address.0' % a in tmp_dict:
ipv4_address = tmp_dict['nic.%s.ipv4_address.0' % a]
if 'nic.%s.name.0' % a in tmp_dict:
adapter_name = tmp_dict['nic.%s.name.0' % a]
if 'nic.%s.mac_address.0' % a in tmp_dict:
mac_address = tmp_dict['nic.%s.mac_address.0' % a]
if 'nic.%s.dhcp_hostname.0' % a in tmp_dict:
dhcp_hostname = tmp_dict['nic.%s.dhcp_hostname.0' % a]
if 'nic.%s.dhcp_filename.0' % a in tmp_dict:
dhcp_filename = tmp_dict['nic.%s.dhcp_filename.0' % a]
if 'nic.%s.dhcp_domain_name.0' % a in tmp_dict:
dhcp_domain_name = tmp_dict['nic.%s.dhcp_domain_name.0' % a]
final_list.append({'system_hostname':system.hostname, 'ipv4_address':ipv4_address})
#tmp_list.append(tmp_dict)
return final_list
if 'key_type' in request.GET and request.GET['key_type'] == 'key_by_system':
try:
hostname = request.GET.get('hostname')
key = request.GET.get('key')
system = System.objects.get(hostname=hostname)
objects = KeyValue.objects.filter(key=key, system=system)
tmp = []
for obj in objects:
tmp.append({'key': obj.key, 'value': obj.value})
resp = rc.ALL_OK
resp.write("json = {'data': %s}" % json.dumps(tmp))
except:
resp = rc.NOT_FOUND
resp.write('json = {"error_message":"Unable to find Key or system"}')
return resp
if key_type == 'adapters_by_system_and_scope':
#Get keystores from truth that have dhcp.is_scope = True
dhcp_scope = request.GET['dhcp_scope']
system = System.objects.get(hostname=request.GET['system'])
keyvalue_pairs = KeyValue.objects.filter(key__startswith='nic.').filter(system=system).order_by('key')
#Iterate through the list and get all of the key/value pairs
tmp_dict = {}
adapter_ids = []
final_list = []
for kv in keyvalue_pairs:
tmp_dict[kv.key] = kv.value
for k in tmp_dict.iterkeys():
matches = re.match('nic\.(\d+).*',k)
if matches.group is not None:
dhcp_scope_match = 'nic.%s.dhcp_scope.0' % matches.group(1)
ip_address_match = 'nic.%s.ipv4_address.0' % matches.group(1)
if matches.group(1) not in adapter_ids and ip_address_match in tmp_dict and dhcp_scope_match in tmp_dict and tmp_dict[dhcp_scope_match] == dhcp_scope:
#if matches.group(1) not in adapter_ids and 'nic.%s.dhcp_scope.0' % matches.group(1) in tmp_dict and tmp_dict['nic.%s.dhcp_scope.0' % matches.group(1)] == dhcp_scope:
adapter_ids.append(matches.group(1))
adapter_ids.sort()
for a in adapter_ids:
adapter_name = ''
mac_address = ''
dhcp_hostname = ''
dhcp_filename = ''
dhcp_domain_name = ''
ipv4_address = ''
dhcp_domain_name_servers = ''
if 'nic.%s.ipv4_address.0' % a in tmp_dict:
ipv4_address = tmp_dict['nic.%s.ipv4_address.0' % a]
if 'nic.%s.name.0' % a in tmp_dict:
adapter_name = tmp_dict['nic.%s.name.0' % a]
if 'nic.%s.mac_address.0' % a in tmp_dict:
mac_address = tmp_dict['nic.%s.mac_address.0' % a]
if 'nic.%s.dhcp_hostname.0' % a in tmp_dict and 'nic.%s.option_hostname.0' % a not in tmp_dict:
dhcp_hostname = tmp_dict['nic.%s.dhcp_hostname.0' % a]
if 'nic.%s.option_hostname.0' % a in tmp_dict:
dhcp_hostname = tmp_dict['nic.%s.option_hostname.0' % a]
if 'nic.%s.dhcp_filename.0' % a in tmp_dict:
dhcp_filename = tmp_dict['nic.%s.dhcp_filename.0' % a]
if 'nic.%s.dhcp_domain_name.0' % a in tmp_dict:
dhcp_domain_name = tmp_dict['nic.%s.dhcp_domain_name.0' % a]
if 'nic.%s.dhcp_domain_name_servers.0' % a in tmp_dict:
dhcp_domain_name_servers = tmp_dict['nic.%s.dhcp_domain_name_servers.0' % a]
final_list.append({'system_hostname':system.hostname, 'ipv4_address':ipv4_address, 'adapter_name':adapter_name, 'mac_address':mac_address, 'option_hostname': dhcp_hostname, 'dhcp_hostname':dhcp_hostname, 'dhcp_filename':dhcp_filename, 'dhcp_domain_name':dhcp_domain_name, 'dhcp_domain_name_servers':dhcp_domain_name_servers})
#tmp_list.append(tmp_dict)
return final_list
elif 'key' in request.GET and request.GET['key'] > '':
tmp_list = {}
try:
base = KeyValue.objects.filter(key=request.GET['key'])
for row in base:
matches = re.match("\$\{(.*)\}", row.value)
if matches is not None:
m = MacroExpansion(matches.group(1))
row.value = m.output()
for r in base:
key_name = 'host:%s:%s' % (r.system.hostname, r.key)
tmp_list[key_name] = r.value
except Exception, e:
pass
try:
base = TruthKeyValue.objects.filter(key=request.GET['key'])
for row in base:
matches = re.match("\$\{(.*)\}", row.value)
if matches is not None:
m = MacroExpansion(matches.group(1))
row.value = m.output()
for r in base:
key_name = 'truth:%s:%s' % (r.truth.name, r.key)
tmp_list[key_name] = r.value
except Exception, e:
pass
return tmp_list
elif 'value' in request.GET:
tmp_list = {}
try:
base = KeyValue.objects.filter(value=request.GET['value'])
for row in base:
matches = re.match("\$\{(.*)\}", row.value)
if matches is not None:
m = MacroExpansion(matches.group(1))
row.value = m.output()
for r in base:
key_name = 'host:%s:%s' % (r.system.hostname, r.key)
tmp_list[key_name] = r.value
except Exception, e:
pass
try:
base = TruthKeyValue.objects.filter(value=request.GET['value'])
for row in base:
matches = re.match("\$\{(.*)\}", row.value)
if matches is not None:
m = MacroExpansion(matches.group(1))
row.value = m.output()
for r in base:
key_name = 'truth:%s:%s' % (r.truth.name, r.key)
tmp_list[key_name] = r.value
except Exception, e:
pass
return tmp_list
def delete(self, request, key_value_id=None):
if 'key_type' in request.GET and request.GET['key_type'] == 'delete_all_network_adapters':
#Get keystores from truth that have dhcp.is_scope = True
try:
system_hostname = request.GET['system_hostname']
system = System.objects.get(hostname=system_hostname)
KeyValue.objects.filter(key__startswith='nic', system=system).delete()
resp = rc.ALL_OK
resp.write('json = {"id":"0"}')
except:
resp = rc.NOT_FOUND
resp.write('json = {"error_message":"Unable to Delete}')
return resp
if 'key_type' in request.GET and request.GET['key_type'] == 'delete_network_adapter':
#Get keystores from truth that have dhcp.is_scope = True
try:
adapter_number = request.GET['adapter_number']
system_hostname = request.GET['system_hostname']
system = System.objects.get(hostname=system_hostname)
KeyValue.objects.filter(key__startswith='nic.%s' % adapter_number, system=system).delete()
#KeyValue.objects.filter(key__startswith='nic.0', system=system).delete()
resp = rc.ALL_OK
resp.write('json = {"id":"14"}')
except:
resp = rc.NOT_FOUND
resp.write('json = {"error_message":"Unable to Delete}')
return resp
if 'key_type' in request.GET and request.GET['key_type'] == 'delete_key_by_system':
try:
system_hostname = request.GET.get('system')
key = request.GET.get('key')
system = System.objects.get(hostname=system_hostname)
KeyValue.objects.filter(key=key, system=system).delete()
resp = rc.ALL_OK
resp.write('json = {"id":"14"}')
except:
resp = rc.NOT_FOUND
resp.write('json = {"error_message":"Unable to Delete}')
return resp
if 'key_type' not in request.GET:
if 'system_id' in request.GET:
try:
n = KeyValue.objects.get(id=key_value_id)
n.delete()
resp = rc.ALL_OK
resp.write('json = {"id":"%s"}' % str(key_value_id))
except:
resp = rc.NOT_FOUND
return resp
if 'truth_id' in request.GET:
try:
n = TruthKeyValue.objects.get(id=key_value_id)
n.delete()
resp = rc.ALL_OK
resp.write('json = {"id":"%s"}' % str(key_value_id))
except:
resp = rc.NOT_FOUND
return resp
resp = rc.ALL_OK
resp.write('json = {"id":"1"}')
return resp
|
|
"""Ambry Library User Administration CLI
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
__all__ = ['command_name', 'make_parser', 'run_command']
command_name = 'ui'
from ambry.cli import prt, fatal, warn, err
def make_parser(cmd):
config_p = cmd.add_parser(command_name, help='Manage the user interface')
config_p.set_defaults(command=command_name)
cmd = config_p.add_subparsers(title='UI commands', help='UI commands')
sp = cmd.add_parser('info', help='Print information about the UI')
sp.set_defaults(subcommand=ui_info)
sp = cmd.add_parser('start', help='Run the web user interface')
sp.set_defaults(command=command_name)
sp.set_defaults(subcommand=start_ui)
sp.add_argument('-H', '--host', help="Server host.", default='localhost')
sp.add_argument('-p', '--port', help="Server port", default=8080)
sp.add_argument('-P', '--use-proxy', action='store_true',
help="Setup for using a proxy in front of server, using werkzeug.contrib.fixers.ProxyFix")
sp.add_argument('-d', '--debug', action='store_true', help="Set debugging mode", default=False)
sp.add_argument('-N', '--no-accounts', action='store_true', help="Don't setup remotes and accounts", default=False)
sp = cmd.add_parser('user', help='Manage users')
sp.set_defaults(command=command_name)
ucmd = sp.add_subparsers(title='User Commands', help='Sub-commands for managing users')
usp = ucmd.add_parser('add', help='Add a user')
usp.set_defaults(subcommand=add_user)
usp.add_argument('-a', '--admin', action='store_true', default = False, help="Make the user an administrator")
usp.add_argument('-p', '--password', help="Reset the password")
usp.add_argument('-s', '--secret', action='store_true', default=False, help="Regenerate the API secret")
usp.add_argument('user_name', help='Name of user')
usp = ucmd.add_parser('admin', help='Add or remove admin privledges')
usp.set_defaults(subcommand=user_admin)
usp.add_argument('-r', '--remove', action='store_true', default = False, help="Remove, rather than add, the privledge")
usp.add_argument('user_name', help='Name of user')
usp = ucmd.add_parser('remove', help='Remove a user')
usp.set_defaults(subcommand=remove_user)
usp.add_argument('user_name', help='Name of user')
usp = ucmd.add_parser('list', help='List users')
usp.set_defaults(subcommand=list_users)
sp = cmd.add_parser('init', help='Initialize some library database values for the ui')
sp.set_defaults(subcommand=db_init)
sp.add_argument('-t', '--title', help="Set the library title")
sp.add_argument('-v', '--virt-host', help="Set the virtual host name")
sp = cmd.add_parser('run_args', help='Print evalable environmental vars for running the UI')
sp.set_defaults(subcommand=run_args)
sp = cmd.add_parser('notebook', help='Run jupyter notebook')
sp.set_defaults(subcommand=start_notebook)
sp.add_argument('-H', '--host', help="Server host.", default='localhost')
sp.add_argument('-p', '--port', help="Server port.", default=None)
sp.add_argument('-w', '--no-browser', action='store_true', default = False, help="Don't open the webbrowser")
def run_command(args, rc):
from ambry.library import new_library
from ambry.cli import global_logger
try:
l = new_library(rc)
l.logger = global_logger
except Exception as e:
l = None
args.subcommand(args, l, rc) # Note the calls to sp.set_defaults(subcommand=...)
def no_command(args, l, rc):
raise NotImplementedError()
def start_ui(args, l, rc):
from ambry_ui import app
import ambry_ui.views
import ambry_ui.jsonviews
import ambry_ui.api
import ambry_ui.user
import webbrowser
import socket
from ambry.util import random_string, set_url_part
if args.use_proxy:
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
if not args.debug:
webbrowser.open("http://{}:{}".format(args.host, args.port))
else:
import logging
import os
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
os.environ["AMBRY_UI_DEBUG"] = 'true' # DOn't know why this needs to be done, but it does.
#prt("Running at http://{}:{}".format(args.host, args.port))
if not app.config.get('AMBRY_ADMIN_PASS') and not args.no_accounts:
app.config['AMBRY_ADMIN_PASS'] = random_string(20)
app.config['LOGGED_IN_USER'] = 'admin'
l.logger.info("Setting admin password to: {}".format(app.config['AMBRY_ADMIN_PASS'] ))
db_init(args,l,rc)
try:
app.config['SECRET_KEY'] = 'secret' # To Ensure logins persist
app.config["WTF_CSRF_SECRET_KEY"] = 'secret'
app.run(host=args.host, port=int(args.port), debug=args.debug)
except socket.error as e:
warn("Failed to start ui: {}".format(e))
def run_args(args, l, rc):
ui_config = l.ui_config
db_init(args, l, rc)
prt('export AMBRY_UI_SECRET={} AMBRY_UI_CSRF_SECRET={} AMBRY_UI_TITLE="{}" '
.format(ui_config['secret'], ui_config['csrf_secret'], ui_config['website_title'] ))
def db_init(args, l, rc):
from uuid import uuid4
import os
ui_config = l.ui_config
if not 'secret' in ui_config:
ui_config['secret'] = str(uuid4())
if not 'csrf_secret' in ui_config:
ui_config['csrf_secret'] = str(uuid4())
if hasattr(args, 'title') and args.title:
ui_config['website_title'] = args.title
elif not 'website_title' in ui_config:
ui_config['website_title'] = os.getenv('AMBRY_UI_TITLE', 'Ambry Data Library')
if hasattr(args, 'virt_host') and args.virt_host:
ui_config['virtual_host'] = args.virt_host
elif not ui_config['virtual_host']:
ui_config['virtual_host'] = None
l.database.commit()
def ui_info(args, l, rc):
from tabulate import tabulate
from __meta__ import __version__
records = []
records.append(['version', __version__])
records.append(['title', l.ui_config['website_title']])
records.append(['vhost', l.ui_config['virtual_host']])
prt(tabulate(records))
def add_user(args, l, rc):
"""Add or update a user"""
from ambry.util import random_string
from getpass import getpass
account = l.find_or_new_account(args.user_name)
account.major_type = 'user'
account.access_key = args.user_name
if args.admin:
account.minor_type = 'admin'
if not account.encrypted_secret or args.secret:
account.secret = random_string(20)
prt("Secret: {}".format(account.secret))
if args.password:
password = args.password
elif not account.encrypted_password:
password = getpass().strip()
else:
password = None
if password:
account.encrypt_password(password)
assert account.test(password)
account.url = None
l.commit()
def user_admin(args, l, rc):
"""Add or update a user"""
from ambry.orm.exc import NotFoundError
try:
account = l.account(args.user_name)
if account.major_type != 'user':
raise NotFoundError()
if args.remove:
account.minor_type = None
else:
account.minor_type = 'admin'
l.commit()
except NotFoundError:
warn("No account found for {}".format(args.user_name))
def remove_user(args, l, rc):
from ambry.orm.exc import NotFoundError
try:
account = l.account(args.user_name)
if account.major_type != 'user':
raise NotFoundError()
l.delete_account(account)
l.commit()
except NotFoundError:
warn("No account found for {}".format(args.user_name))
def list_users(args, l, rc):
from ambry.util import drop_empty
from tabulate import tabulate
headers = 'Id User Type Secret'.split()
records = []
for k in l.accounts.keys():
acct = l.account(k)
if acct.major_type == 'user':
try:
secret = acct.secret
except Exception as e:
secret = str(e) # "<corrupt secret>"
records.append([acct.account_id, acct.user_id, acct.minor_type, secret])
if not records:
return
records = drop_empty([headers] + records)
prt(tabulate(records[1:], records[0]))
def start_notebook(args, l, rc):
from notebook.notebookapp import NotebookApp
import sys
sys.argv = ['ambry']
app = NotebookApp.instance()
app._library = l
app.contents_manager_class = 'ambry_ui.jupyter.AmbryContentsManager'
app.open_browser = not args.no_browser
app.ip = args.host
if args.port is not None:
app.port = int(args.port)
app.initialize(None)
app.start()
|
|
import requests
import json
import re
import fileinput
from optparse import OptionParser
import subprocess
import os
import sys
import time
from perfrunner.settings import ClusterSpec
from perfrunner.utils.install import CouchbaseInstaller
from perfrunner.utils.cluster import TestConfig, ClusterManager
from logger import logger
import urllib3
from perfrunner.helpers.rest import RestHelper
import paramiko
"""
# An evolving thing - takes as input:
- a build version
- a spec file
What it does:
- install the spec file on the build version
- activate the beer sample bucket
- run the tests from Keshav -flag an error if they deviate
"""
UPPER_BOUND = 1.10
LOWER_BOUND = 0.90
ARGS = None
def get_time_in_millisec(t):
try:
time_unit = t[-2:]
if time_unit == 'ms':
return float(t[:-2])
elif time_unit == u"\u00b5s":
return float(t[:-2]) / 1000
elif 'm' in t and 'ms' not in t:
t1 = t.split('m')
return int(t1[0]) * 60000 + float(t1[1][:-1]) * 1000
elif time_unit[0].isdigit and time_unit[1] == 's':
return float(t[:-1]) * 1000
else:
print '********unknown time unit', t
except:
print 'bad time', t
def generate_query(stmt):
stmt['max_parallelism'] = 1
if ARGS:
stmt['args'] = ARGS
return stmt
def generate_prepared_query(conn, q):
query = {'statement': 'PREPARE ' + q, 'max_parallelism': 1}
response = conn.request('POST', '/query/service', fields=query, encode_multipart=False)
body = json.loads(response.data.decode('utf8'))
name = str(body['results'][0]['name'])
stmt = {'prepared': '"' + name + '"'}
return generate_query(stmt)
def run_query(conn, request_desc, debug=False):
succeeded = True
query = generate_prepared_query(conn, request_desc['query'])
total_elapsed_time = 0.0
total_execution_time = 0.0
for i in range(0, request_desc['execution_count']):
"""if debug:
#t0 = time.time()"""
response = conn.request('POST', '/query/service', fields=query, encode_multipart=False)
response.read(cache_content=False)
body = json.loads(response.data.decode('utf8'))
total_elapsed_time = total_elapsed_time + get_time_in_millisec(body['metrics']['elapsedTime'])
total_execution_time = total_execution_time + get_time_in_millisec(body['metrics']['executionTime'])
avg_elapsed = float('{0:.2f}'.format(total_elapsed_time / request_desc['execution_count']))
avg_execution = float('{0:.2f}'.format(total_execution_time / request_desc['execution_count']))
log = 'Query {0} - average elapsed {1}, average execution time {2}.'.format(request_desc['query'], avg_elapsed,
avg_execution)
if avg_elapsed > (UPPER_BOUND * request_desc['expected_elapsed_time']):
log += ' Elapsed too long - expected {0}.'.format(avg_elapsed)
succeeded = False
if avg_execution > (UPPER_BOUND * request_desc['expected_execution_time']):
log += ' Execution too long - expected {0}.'.format(avg_execution)
succeeded = False
if avg_elapsed < (LOWER_BOUND * request_desc['expected_elapsed_time']):
log += ' Elapsed too short - expected {0}.'.format(avg_elapsed)
succeeded = False
if avg_execution < (LOWER_BOUND * request_desc['expected_execution_time']):
log += ' Execution too short - expected {0}.'.format(avg_execution)
succeeded = False
if succeeded:
logger.info(log)
else:
logger.error(log)
return succeeded
def execute_commands(conn, command_list, rest, host_ip):
failure_count = 0
for command in command_list:
# print 'command', command
command_succeeded = True
total_elapsed_time = 0.0
total_execution_time = 0.0
if 'index' in command:
key = 'index'
response = rest.exec_n1ql_stmnt(host_ip, command['index'])
body = response.json() # json.loads(response.data.decode('utf8'))
avg_elapsed = total_elapsed_time + get_time_in_millisec(body['metrics']['elapsedTime'])
avg_execution = total_execution_time + get_time_in_millisec(body['metrics']['executionTime'])
elif 'query' in command:
key = 'query'
query = generate_prepared_query(conn, command['query'])
for i in range(0, command['execution_count']):
response = conn.request('POST', '/query/service', fields=query, encode_multipart=False)
response.read(cache_content=False)
body = json.loads(response.data.decode('utf8'))
total_elapsed_time = total_elapsed_time + get_time_in_millisec(body['metrics']['elapsedTime'])
total_execution_time = total_execution_time + get_time_in_millisec(body['metrics']['executionTime'])
avg_elapsed = float('{0:.2f}'.format(total_elapsed_time / command['execution_count']))
avg_execution = float('{0:.2f}'.format(total_execution_time / command['execution_count']))
log = key + ' {0} - average elapsed {1}, average execution time {2}.'.format(command[key], avg_elapsed,
avg_execution)
if avg_elapsed > (UPPER_BOUND * command['expected_elapsed_time']):
log += ' Elapsed too long - expected {0}.'.format(command['expected_elapsed_time'])
command_succeeded = False
elif avg_elapsed < (LOWER_BOUND * command['expected_elapsed_time']):
log += ' Elapsed too short - expected {0}.'.format(command['expected_elapsed_time'])
command_succeeded = False
if avg_execution > (UPPER_BOUND * command['expected_execution_time']):
log += ' Execution too long - expected {0}.'.format(command['expected_execution_time'])
command_succeeded = False
elif avg_execution < (LOWER_BOUND * command['expected_execution_time']):
log += ' Execution too short - expected {0}.'.format(command['expected_execution_time'])
command_succeeded = False
if command_succeeded:
logger.info(log)
else:
failure_count = failure_count + 1
logger.error(log)
return failure_count == 0
def do_beer_queries(conn, rest, host_ip, remote):
remote.install_beer_samples()
rest.exec_n1ql_stmnt(host_ip, 'CREATE INDEX city ON `beer-sample`(city);')
rest.exec_n1ql_stmnt(host_ip, 'CREATE INDEX style ON `beer-sample`(style);')
command_list = []
command_list.append(
{'query': 'SELECT * FROM `beer-sample` USE KEYS["21st_amendment_brewery_cafe-amendment_pale_ale"];',
'expected_elapsed_time': 0.71, 'expected_execution_time': 0.7, 'execution_count': 10000})
command_list.append({'query': 'select * from `beer-sample` where city = "Lawton";', 'expected_elapsed_time': 1.42,
'expected_execution_time': 1.42, 'execution_count': 10000})
command_list.append(
{'query': 'select abv, brewery_id from `beer-sample` where style = "Imperial or Double India Pale Ale";',
'expected_elapsed_time': 11,
'expected_execution_time': 11, 'execution_count': 10000})
command_list.append(
{'query': 'select COUNT(*) from `beer-sample` where style = "Imperial or Double India Pale Ale";',
'expected_elapsed_time': 3.4, 'expected_execution_time': 3.4, 'execution_count': 10000})
command_list.append(
{'query': 'select SUM(abv) from `beer-sample` where style = "Imperial or Double India Pale Ale";',
'expected_elapsed_time': 11, 'expected_execution_time': 11, 'execution_count': 10000})
command_list.append({
'query': 'select abv, brewery_id from `beer-sample` where style = "Imperial or Double India Pale Ale" order by abv;',
'expected_elapsed_time': 14, 'expected_execution_time': 14, 'execution_count': 10000})
return execute_commands(conn, command_list, rest, host_ip)
def do_airline_benchmarks(conn, rest, host_ip, remote, cluster_spec):
if True:
"""resp = rest.create_bucket(host_ip + ':8091', 'ods', 1000, 0, 0, 'valueOnly', 4, None)"""
time.sleep(10)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host_ip, username=cluster_spec.ssh_credentials[0], password=cluster_spec.ssh_credentials[1])
except paramiko.SSHException:
print "ssh Connection Failed"
return False
cmd = '/opt/couchbase/bin/cbrestore /root/airline-test-data-updated couchbase://127.0.0.1:8091 -b ods -B ods -u {0} -p {1}'.format(
rest.rest_username, rest.rest_password)
stdin, stdout, stderr = ssh.exec_command(cmd)
for line in stdout.readlines():
pass
ssh.close()
command_list = []
command_list.append(
{'index': 'create primary index on ods;', 'expected_elapsed_time': 27000, 'expected_execution_time': 27000})
command_list.append(
{'index': 'CREATE INDEX IDX_ODS_TAIL_NBR ON ods(`TAIL_NBR`) WHERE (`TYPE` = "OPS_FLT_LEG") USING GSI;',
'expected_elapsed_time': 38000, 'expected_execution_time': 38000})
command_list.append(
{'query': "SELECT * FROM ods WHERE TYPE = 'OPS_FLT_LEG' AND TAIL_NBR = 'N518LR' ORDER BY GMT_EST_DEP_DTM ;",
'expected_elapsed_time': 6.1, 'expected_execution_time': 6.1, 'execution_count': 10})
# query 2
big_long_query2 = """
select
pilot.FILEN as pilot_filen,
min([p.PRFL_ACT_GMT_DEP_DTM, meta(p).id]) PRFL_ACT_GMT_DEP_DTM
from
( SELECT x.*
FROM ods x
where x.TYPE="CREW_ON_FLIGHT" AND
(
x.PRFL_ACT_GMT_DEP_DTM <= "2015-07-23T18:49:00Z"
)
) as p unnest array_concat(p.PILOT,p.CREW) pilot
WHERE
pilot.FILEN in (
select raw pilot1.FILEN
from ods f use keys [ "UA_22-07-2015_EWR_IAD_6049" ]
unnest array_concat(f.PILOT,f.CREW) pilot1
)
group by pilot.FILEN
UNION ALL
select
pilot.FILEN as pilot_filen,
min([p.GMT_EST_DEP_DTM, meta(p).id]) GMT_EST_DEP_DTM
from
(
SELECT y.*
FROM ods y
where y.TYPE="CREW_ON_FLIGHT" AND
(
y.GMT_EST_DEP_DTM <= "2015-07-23T18:49:00Z"
)
) as p unnest array_concat(y.PILOT,y.CREW) pilot
where
pilot.FILEN in (
select raw pilot1.FILEN
from ods f use keys [ "UA_22-07-2015_EWR_IAD_6049" ]
unnest array_concat(f.PILOT,f.CREW) pilot1
)"""
command_list.append({
'index': 'CREATE INDEX IDX_GMT_EST_DEP_DTM ON ods(`GMT_EST_DEP_DTM`) WHERE (`TYPE`="CREW_ON_FLIGHT") USING GSI;',
'expected_elapsed_time': 38000, 'expected_execution_time': 38000})
command_list.append({
'index': 'CREATE INDEX IDX_PRFL_ACT_GMT_DEP_DTM ON ods(`PRFL_ACT_GMT_DEP_DTM`) WHERE (`TYPE`="CREW_ON_FLIGHT") USING GSI;',
'expected_elapsed_time': 41000, 'expected_execution_time': 41000})
command_list.append(
{'query': big_long_query2, 'expected_elapsed_time': 536, 'expected_execution_time': 536, 'execution_count': 10})
# query 3
big_long_index3 = """
create index idx_query3 on ods(INBND_LCL_EST_ARR_DTM)
where TYPE="AIRCRAFT_ROUTING"
and substr(INBND_LCL_EST_ARR_DTM, 11) < "20:00:00"
and case when OUTBND_LCL_EST_DEP_DTM is missing then true else substr(OUTBND_LCL_EST_DEP_DTM, 11) > "08:00:00" end;
"""
big_long_query3 = """
SELECT INBND_DEST_ARPT_CD
from ods
where TYPE = "AIRCRAFT_ROUTING"
and INBND_LCL_EST_ARR_DTM > "2015-07-17"
and INBND_LCL_EST_ARR_DTM < "2015-07-25"
and substr(INBND_LCL_EST_ARR_DTM, 11) < "20:00:00"
and case when OUTBND_LCL_EST_DEP_DTM is missing then true else substr(OUTBND_LCL_EST_DEP_DTM, 11) > "08:00:00" end
order by INBND_DEST_ARPT_CD
limit 10;
"""
command_list.append({'index': big_long_index3, 'expected_elapsed_time': 64000, 'expected_execution_time': 64000})
command_list.append({'query': big_long_query3, 'expected_elapsed_time': 2500, 'expected_execution_time': 2500,
'execution_count': 10})
return execute_commands(conn, command_list, rest, host_ip)
def main():
usage = '%prog -v version -c cluster-spec'
parser = OptionParser(usage)
parser.add_option('-v', '--version', dest='version')
parser.add_option('-c', dest='cluster_spec_fname',
help='path to cluster specification file',
metavar='cluster.spec')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='enable verbose logging')
parser.add_option('-o', dest='toy',
help='optional toy build ID', metavar='couchstore')
parser.add_option('-t', dest='test_config_fname',
help='path to test configuration file',
metavar='my_test.test')
parser.add_option('-e', '--edition', dest='cluster_edition', default='enterprise',
help='the cluster edition (community or enterprise)')
parser.add_option('--url', dest='url', default=None,
help='The http URL to a Couchbase RPM that should be'
' installed. This overrides the URL to be installed.')
options, args = parser.parse_args()
cluster_spec = ClusterSpec()
cluster_spec.parse(options.cluster_spec_fname)
test_config = TestConfig()
test_config.parse(options.test_config_fname)
cm = ClusterManager(cluster_spec, test_config, options.verbose)
installer = CouchbaseInstaller(cluster_spec, options)
if True:
installer.install()
if cm.remote:
cm.tune_logging()
cm.restart_with_sfwi()
cm.restart_with_alternative_num_vbuckets()
cm.restart_with_alternative_num_cpus()
cm.restart_with_tcmalloc_aggressive_decommit()
cm.disable_moxi()
cm.configure_internal_settings()
cm.set_data_path()
cm.set_services()
cm.set_mem_quota()
cm.set_index_mem_quota()
cm.set_auth()
time.sleep(30)
"""host = cluster_spec.yield_masters().next()"""
host_ip = cluster_spec.yield_masters().next().split(':')[0]
URL = 'http://' + host_ip + ':8093'
logger.info('logging the URL: {}'.format(URL))
conn = urllib3.connection_from_url(URL)
rest = RestHelper(cluster_spec)
airline_result = do_airline_benchmarks(conn, rest, host_ip, installer.remote, cluster_spec)
beer_result = do_beer_queries(conn, rest, host_ip, installer.remote)
print 'beer_result is', beer_result
sys.exit(not (airline_result and beer_result))
if __name__ == "__main__":
if not main():
sys.exit(1)
|
|
#!/usr/bin/env python
"""Base class for api services."""
import contextlib
import datetime
import logging
import pprint
from protorpc import message_types
from protorpc import messages
import six
from six.moves import http_client
from six.moves import urllib
from apitools.base.py import credentials_lib
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools.base.py import util
__all__ = [
'ApiMethodInfo',
'ApiUploadInfo',
'BaseApiClient',
'BaseApiService',
'NormalizeApiEndpoint',
]
# TODO(craigcitro): Remove this once we quiet the spurious logging in
# oauth2client (or drop oauth2client).
logging.getLogger('oauth2client.util').setLevel(logging.ERROR)
_MAX_URL_LENGTH = 2048
class ApiUploadInfo(messages.Message):
"""Media upload information for a method.
Fields:
accept: (repeated) MIME Media Ranges for acceptable media uploads
to this method.
max_size: (integer) Maximum size of a media upload, such as 3MB
or 1TB (converted to an integer).
resumable_path: Path to use for resumable uploads.
resumable_multipart: (boolean) Whether or not the resumable endpoint
supports multipart uploads.
simple_path: Path to use for simple uploads.
simple_multipart: (boolean) Whether or not the simple endpoint
supports multipart uploads.
"""
accept = messages.StringField(1, repeated=True)
max_size = messages.IntegerField(2)
resumable_path = messages.StringField(3)
resumable_multipart = messages.BooleanField(4)
simple_path = messages.StringField(5)
simple_multipart = messages.BooleanField(6)
class ApiMethodInfo(messages.Message):
"""Configuration info for an API method.
All fields are strings unless noted otherwise.
Fields:
relative_path: Relative path for this method.
method_id: ID for this method.
http_method: HTTP verb to use for this method.
path_params: (repeated) path parameters for this method.
query_params: (repeated) query parameters for this method.
ordered_params: (repeated) ordered list of parameters for
this method.
description: description of this method.
request_type_name: name of the request type.
response_type_name: name of the response type.
request_field: if not null, the field to pass as the body
of this POST request. may also be the REQUEST_IS_BODY
value below to indicate the whole message is the body.
upload_config: (ApiUploadInfo) Information about the upload
configuration supported by this method.
supports_download: (boolean) If True, this method supports
downloading the request via the `alt=media` query
parameter.
"""
relative_path = messages.StringField(1)
method_id = messages.StringField(2)
http_method = messages.StringField(3)
path_params = messages.StringField(4, repeated=True)
query_params = messages.StringField(5, repeated=True)
ordered_params = messages.StringField(6, repeated=True)
description = messages.StringField(7)
request_type_name = messages.StringField(8)
response_type_name = messages.StringField(9)
request_field = messages.StringField(10, default='')
upload_config = messages.MessageField(ApiUploadInfo, 11)
supports_download = messages.BooleanField(12, default=False)
REQUEST_IS_BODY = '<request>'
def _LoadClass(name, messages_module):
if name.startswith('message_types.'):
_, _, classname = name.partition('.')
return getattr(message_types, classname)
elif '.' not in name:
return getattr(messages_module, name)
else:
raise exceptions.GeneratedClientError('Unknown class %s' % name)
def _RequireClassAttrs(obj, attrs):
for attr in attrs:
attr_name = attr.upper()
if not hasattr(obj, '%s' % attr_name) or not getattr(obj, attr_name):
msg = 'No %s specified for object of class %s.' % (
attr_name, type(obj).__name__)
raise exceptions.GeneratedClientError(msg)
def NormalizeApiEndpoint(api_endpoint):
if not api_endpoint.endswith('/'):
api_endpoint += '/'
return api_endpoint
class _UrlBuilder(object):
"""Convenient container for url data."""
def __init__(self, base_url, relative_path=None, query_params=None):
components = urllib.parse.urlsplit(urllib.parse.urljoin(
base_url, relative_path or ''))
if components.fragment:
raise exceptions.ConfigurationValueError(
'Unexpected url fragment: %s' % components.fragment)
self.query_params = urllib.parse.parse_qs(components.query or '')
if query_params is not None:
self.query_params.update(query_params)
self.__scheme = components.scheme
self.__netloc = components.netloc
self.relative_path = components.path or ''
@classmethod
def FromUrl(cls, url):
urlparts = urllib.parse.urlsplit(url)
query_params = urllib.parse.parse_qs(urlparts.query)
base_url = urllib.parse.urlunsplit((
urlparts.scheme, urlparts.netloc, '', None, None))
relative_path = urlparts.path or ''
return cls(
base_url, relative_path=relative_path, query_params=query_params)
@property
def base_url(self):
return urllib.parse.urlunsplit(
(self.__scheme, self.__netloc, '', '', ''))
@base_url.setter
def base_url(self, value):
components = urllib.parse.urlsplit(value)
if components.path or components.query or components.fragment:
raise exceptions.ConfigurationValueError(
'Invalid base url: %s' % value)
self.__scheme = components.scheme
self.__netloc = components.netloc
@property
def query(self):
# TODO(craigcitro): In the case that some of the query params are
# non-ASCII, we may silently fail to encode correctly. We should
# figure out who is responsible for owning the object -> str
# conversion.
return urllib.parse.urlencode(self.query_params, doseq=True)
@property
def url(self):
if '{' in self.relative_path or '}' in self.relative_path:
raise exceptions.ConfigurationValueError(
'Cannot create url with relative path %s' % self.relative_path)
return urllib.parse.urlunsplit((
self.__scheme, self.__netloc, self.relative_path, self.query, ''))
class BaseApiClient(object):
"""Base class for client libraries."""
MESSAGES_MODULE = None
_API_KEY = ''
_CLIENT_ID = ''
_CLIENT_SECRET = ''
_PACKAGE = ''
_SCOPES = []
_USER_AGENT = ''
def __init__(self, url, credentials=None, get_credentials=True, http=None,
model=None, log_request=False, log_response=False,
num_retries=5, credentials_args=None,
default_global_params=None, additional_http_headers=None):
_RequireClassAttrs(self, ('_package', '_scopes', 'messages_module'))
if default_global_params is not None:
util.Typecheck(default_global_params, self.params_type)
self.__default_global_params = default_global_params
self.log_request = log_request
self.log_response = log_response
self.__num_retries = 5
# We let the @property machinery below do our validation.
self.num_retries = num_retries
self._credentials = credentials
if get_credentials and not credentials:
credentials_args = credentials_args or {}
self._SetCredentials(**credentials_args)
self._url = NormalizeApiEndpoint(url)
self._http = http or http_wrapper.GetHttp()
# Note that "no credentials" is totally possible.
if self._credentials is not None:
self._http = self._credentials.authorize(self._http)
# TODO(craigcitro): Remove this field when we switch to proto2.
self.__include_fields = None
self.additional_http_headers = additional_http_headers or {}
# TODO(craigcitro): Finish deprecating these fields.
_ = model
self.__response_type_model = 'proto'
def _SetCredentials(self, **kwds):
"""Fetch credentials, and set them for this client.
Note that we can't simply return credentials, since creating them
may involve side-effecting self.
Args:
**kwds: Additional keyword arguments are passed on to GetCredentials.
Returns:
None. Sets self._credentials.
"""
args = {
'api_key': self._API_KEY,
'client': self,
'client_id': self._CLIENT_ID,
'client_secret': self._CLIENT_SECRET,
'package_name': self._PACKAGE,
'scopes': self._SCOPES,
'user_agent': self._USER_AGENT,
}
args.update(kwds)
# TODO(craigcitro): It's a bit dangerous to pass this
# still-half-initialized self into this method, but we might need
# to set attributes on it associated with our credentials.
# Consider another way around this (maybe a callback?) and whether
# or not it's worth it.
self._credentials = credentials_lib.GetCredentials(**args)
@classmethod
def ClientInfo(cls):
return {
'client_id': cls._CLIENT_ID,
'client_secret': cls._CLIENT_SECRET,
'scope': ' '.join(sorted(util.NormalizeScopes(cls._SCOPES))),
'user_agent': cls._USER_AGENT,
}
@property
def base_model_class(self):
return None
@property
def http(self):
return self._http
@property
def url(self):
return self._url
@classmethod
def GetScopes(cls):
return cls._SCOPES
@property
def params_type(self):
return _LoadClass('StandardQueryParameters', self.MESSAGES_MODULE)
@property
def user_agent(self):
return self._USER_AGENT
@property
def _default_global_params(self):
if self.__default_global_params is None:
self.__default_global_params = self.params_type()
return self.__default_global_params
def AddGlobalParam(self, name, value):
params = self._default_global_params
setattr(params, name, value)
@property
def global_params(self):
return encoding.CopyProtoMessage(self._default_global_params)
@contextlib.contextmanager
def IncludeFields(self, include_fields):
self.__include_fields = include_fields
yield
self.__include_fields = None
@property
def response_type_model(self):
return self.__response_type_model
@contextlib.contextmanager
def JsonResponseModel(self):
"""In this context, return raw JSON instead of proto."""
old_model = self.response_type_model
self.__response_type_model = 'json'
yield
self.__response_type_model = old_model
@property
def num_retries(self):
return self.__num_retries
@num_retries.setter
def num_retries(self, value):
util.Typecheck(value, six.integer_types)
if value < 0:
raise exceptions.InvalidDataError(
'Cannot have negative value for num_retries')
self.__num_retries = value
@contextlib.contextmanager
def WithRetries(self, num_retries):
old_num_retries = self.num_retries
self.num_retries = num_retries
yield
self.num_retries = old_num_retries
def ProcessRequest(self, method_config, request):
"""Hook for pre-processing of requests."""
if self.log_request:
logging.info(
'Calling method %s with %s: %s', method_config.method_id,
method_config.request_type_name, request)
return request
def ProcessHttpRequest(self, http_request):
"""Hook for pre-processing of http requests."""
http_request.headers.update(self.additional_http_headers)
if self.log_request:
logging.info('Making http %s to %s',
http_request.http_method, http_request.url)
logging.info('Headers: %s', pprint.pformat(http_request.headers))
if http_request.body:
# TODO(craigcitro): Make this safe to print in the case of
# non-printable body characters.
logging.info('Body:\n%s',
http_request.loggable_body or http_request.body)
else:
logging.info('Body: (none)')
return http_request
def ProcessResponse(self, method_config, response):
if self.log_response:
logging.info('Response of type %s: %s',
method_config.response_type_name, response)
return response
# TODO(craigcitro): Decide where these two functions should live.
def SerializeMessage(self, message):
return encoding.MessageToJson(
message, include_fields=self.__include_fields)
def DeserializeMessage(self, response_type, data):
"""Deserialize the given data as method_config.response_type."""
try:
message = encoding.JsonToMessage(response_type, data)
except (exceptions.InvalidDataFromServerError,
messages.ValidationError) as e:
raise exceptions.InvalidDataFromServerError(
'Error decoding response "%s" as type %s: %s' % (
data, response_type.__name__, e))
return message
def FinalizeTransferUrl(self, url):
"""Modify the url for a given transfer, based on auth and version."""
url_builder = _UrlBuilder.FromUrl(url)
if self.global_params.key:
url_builder.query_params['key'] = self.global_params.key
return url_builder.url
class BaseApiService(object):
"""Base class for generated API services."""
def __init__(self, client):
self.__client = client
self._method_configs = {}
self._upload_configs = {}
@property
def _client(self):
return self.__client
@property
def client(self):
return self.__client
def GetMethodConfig(self, method):
return self._method_configs[method]
def GetUploadConfig(self, method):
return self._upload_configs.get(method)
def GetRequestType(self, method):
method_config = self.GetMethodConfig(method)
return getattr(self.client.MESSAGES_MODULE,
method_config.request_type_name)
def GetResponseType(self, method):
method_config = self.GetMethodConfig(method)
return getattr(self.client.MESSAGES_MODULE,
method_config.response_type_name)
def __CombineGlobalParams(self, global_params, default_params):
"""Combine the given params with the defaults."""
util.Typecheck(global_params, (type(None), self.__client.params_type))
result = self.__client.params_type()
global_params = global_params or self.__client.params_type()
for field in result.all_fields():
value = global_params.get_assigned_value(field.name)
if value is None:
value = default_params.get_assigned_value(field.name)
if value not in (None, [], ()):
setattr(result, field.name, value)
return result
def __EncodePrettyPrint(self, query_info):
# The prettyPrint flag needs custom encoding: it should be encoded
# as 0 if False, and ignored otherwise (True is the default).
if not query_info.pop('prettyPrint', True):
query_info['prettyPrint'] = 0
# The One Platform equivalent of prettyPrint is pp, which also needs
# custom encoding.
if not query_info.pop('pp', True):
query_info['pp'] = 0
return query_info
def __ConstructQueryParams(self, query_params, request, global_params):
"""Construct a dictionary of query parameters for this request."""
# First, handle the global params.
global_params = self.__CombineGlobalParams(
global_params, self.__client.global_params)
global_param_names = util.MapParamNames(
[x.name for x in self.__client.params_type.all_fields()],
self.__client.params_type)
query_info = dict((param, getattr(global_params, param))
for param in global_param_names)
# Next, add the query params.
query_param_names = util.MapParamNames(query_params, type(request))
query_info.update((param, getattr(request, param, None))
for param in query_param_names)
query_info = dict((k, v) for k, v in query_info.items()
if v is not None)
query_info = self.__EncodePrettyPrint(query_info)
query_info = util.MapRequestParams(query_info, type(request))
for k, v in query_info.items():
if isinstance(v, six.text_type):
query_info[k] = v.encode('utf8')
elif isinstance(v, str):
query_info[k] = v.decode('utf8')
elif isinstance(v, datetime.datetime):
query_info[k] = v.isoformat()
return query_info
def __ConstructRelativePath(self, method_config, request,
relative_path=None):
"""Determine the relative path for request."""
python_param_names = util.MapParamNames(
method_config.path_params, type(request))
params = dict([(param, getattr(request, param, None))
for param in python_param_names])
params = util.MapRequestParams(params, type(request))
return util.ExpandRelativePath(method_config, params,
relative_path=relative_path)
def __FinalizeRequest(self, http_request, url_builder):
"""Make any final general adjustments to the request."""
if (http_request.http_method == 'GET' and
len(http_request.url) > _MAX_URL_LENGTH):
http_request.http_method = 'POST'
http_request.headers['x-http-method-override'] = 'GET'
http_request.headers[
'content-type'] = 'application/x-www-form-urlencoded'
http_request.body = url_builder.query
url_builder.query_params = {}
http_request.url = url_builder.url
def __ProcessHttpResponse(self, method_config, http_response):
"""Process the given http response."""
if http_response.status_code not in (http_client.OK,
http_client.NO_CONTENT):
raise exceptions.HttpError.FromResponse(http_response)
if http_response.status_code == http_client.NO_CONTENT:
# TODO(craigcitro): Find out why _replace doesn't seem to work
# here.
http_response = http_wrapper.Response(
info=http_response.info, content='{}',
request_url=http_response.request_url)
if self.__client.response_type_model == 'json':
return http_response.content
else:
response_type = _LoadClass(method_config.response_type_name,
self.__client.MESSAGES_MODULE)
return self.__client.DeserializeMessage(
response_type, http_response.content)
def __SetBaseHeaders(self, http_request, client):
"""Fill in the basic headers on http_request."""
# TODO(craigcitro): Make the default a little better here, and
# include the apitools version.
user_agent = client.user_agent or 'apitools-client/1.0'
http_request.headers['user-agent'] = user_agent
http_request.headers['accept'] = 'application/json'
http_request.headers['accept-encoding'] = 'gzip, deflate'
def __SetBody(self, http_request, method_config, request, upload):
"""Fill in the body on http_request."""
if not method_config.request_field:
return
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
if method_config.request_field == REQUEST_IS_BODY:
body_value = request
body_type = request_type
else:
body_value = getattr(request, method_config.request_field)
body_field = request_type.field_by_name(
method_config.request_field)
util.Typecheck(body_field, messages.MessageField)
body_type = body_field.type
if upload and not body_value:
# We're going to fill in the body later.
return
util.Typecheck(body_value, body_type)
http_request.headers['content-type'] = 'application/json'
http_request.body = self.__client.SerializeMessage(body_value)
def PrepareHttpRequest(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Prepares an HTTP request to be sent."""
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
util.Typecheck(request, request_type)
request = self.__client.ProcessRequest(method_config, request)
http_request = http_wrapper.Request(
http_method=method_config.http_method)
self.__SetBaseHeaders(http_request, self.__client)
self.__SetBody(http_request, method_config, request, upload)
url_builder = _UrlBuilder(
self.__client.url, relative_path=method_config.relative_path)
url_builder.query_params = self.__ConstructQueryParams(
method_config.query_params, request, global_params)
# It's important that upload and download go before we fill in the
# relative path, so that they can replace it.
if upload is not None:
upload.ConfigureRequest(upload_config, http_request, url_builder)
if download is not None:
download.ConfigureRequest(http_request, url_builder)
url_builder.relative_path = self.__ConstructRelativePath(
method_config, request, relative_path=url_builder.relative_path)
self.__FinalizeRequest(http_request, url_builder)
return self.__client.ProcessHttpRequest(http_request)
def _RunMethod(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Call this method with request."""
if upload is not None and download is not None:
# TODO(craigcitro): This just involves refactoring the logic
# below into callbacks that we can pass around; in particular,
# the order should be that the upload gets the initial request,
# and then passes its reply to a download if one exists, and
# then that goes to ProcessResponse and is returned.
raise exceptions.NotYetImplementedError(
'Cannot yet use both upload and download at once')
http_request = self.PrepareHttpRequest(
method_config, request, global_params, upload, upload_config,
download)
# TODO(craigcitro): Make num_retries customizable on Transfer
# objects, and pass in self.__client.num_retries when initializing
# an upload or download.
if download is not None:
download.InitializeDownload(http_request, client=self.client)
return
http_response = None
if upload is not None:
http_response = upload.InitializeUpload(
http_request, client=self.client)
if http_response is None:
http = self.__client.http
if upload and upload.bytes_http:
http = upload.bytes_http
http_response = http_wrapper.MakeRequest(
http, http_request, retries=self.__client.num_retries)
return self.ProcessHttpResponse(method_config, http_response)
def ProcessHttpResponse(self, method_config, http_response):
"""Convert an HTTP response to the expected message type."""
return self.__client.ProcessResponse(
method_config,
self.__ProcessHttpResponse(method_config, http_response))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.logging_ops."""
import os
import string
import sys
import tempfile
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class LoggingOpsTest(test.TestCase):
@test_util.run_deprecated_v1
def testAssertDivideByZero(self):
with self.cached_session() as sess:
epsilon = ops.convert_to_tensor(1e-20)
x = ops.convert_to_tensor(0.0)
y = ops.convert_to_tensor(1.0)
z = ops.convert_to_tensor(2.0)
# assert(epsilon < y)
# z / y
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, y), ["Divide-by-zero"])
]):
out = math_ops.div(z, y)
self.assertAllEqual(2.0, self.evaluate(out))
# assert(epsilon < x)
# z / x
#
# This tests printing out multiple tensors
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
]):
out = math_ops.div(z, x)
with self.assertRaisesOpError("less than x"):
self.evaluate(out)
@test_util.run_all_in_graph_and_eager_modes
class PrintV2Test(test.TestCase):
def testPrintOneTensor(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneStringTensor(self):
tensor = ops.convert_to_tensor([char for char in string.ascii_lowercase])
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "[\"a\" \"b\" \"c\" ... \"x\" \"y\" \"z\"]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneTensorVarySummarize(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=1)
self.evaluate(print_op)
expected = "[0 ... 9]"
self.assertIn((expected + "\n"), printed.contents())
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=2)
self.evaluate(print_op)
expected = "[0 1 ... 8 9]"
self.assertIn((expected + "\n"), printed.contents())
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=3)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=-1)
self.evaluate(print_op)
expected = "[0 1 2 3 4 5 6 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneVariable(self):
var = variables.Variable(math_ops.range(10))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(var)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTwoVariablesInStructWithAssignAdd(self):
var_one = variables.Variable(2.14)
plus_one = var_one.assign_add(1.0)
var_two = variables.Variable(math_ops.range(10))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
with self.captureWritesToStream(sys.stderr) as printed:
self.evaluate(plus_one)
print_op = logging_ops.print_v2(var_one, {"second": var_two})
self.evaluate(print_op)
expected = "3.14 {'second': [0 1 2 ... 7 8 9]}"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTwoTensors(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, tensor * 10)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9] [0 10 20 ... 70 80 90]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTwoTensorsDifferentSep(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, tensor * 10, sep="<separator>")
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]<separator>[0 10 20 ... 70 80 90]"
self.assertIn(expected + "\n", printed.contents())
def testPrintPlaceholderGeneration(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2("{}6", {"{}": tensor * 10})
self.evaluate(print_op)
expected = "{}6 {'{}': [0 10 20 ... 70 80 90]}"
self.assertIn((expected + "\n"), printed.contents())
def testPrintNoTensors(self):
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(23, [23, 5], {"6": 12})
self.evaluate(print_op)
expected = "23 [23, 5] {'6': 12}"
self.assertIn((expected + "\n"), printed.contents())
def testPrintFloatScalar(self):
for dtype in [dtypes.bfloat16, dtypes.half, dtypes.float32, dtypes.float64]:
tensor = ops.convert_to_tensor(43.5, dtype=dtype)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "43.5"
self.assertIn((expected + "\n"), printed.contents())
def testPrintStringScalar(self):
tensor = ops.convert_to_tensor("scalar")
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "scalar"
self.assertIn((expected + "\n"), printed.contents())
def testPrintStringScalarDifferentEnd(self):
tensor = ops.convert_to_tensor("scalar")
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, end="<customend>")
self.evaluate(print_op)
expected = "scalar<customend>"
self.assertIn(expected, printed.contents())
def testPrintComplexTensorStruct(self):
tensor = math_ops.range(10)
small_tensor = constant_op.constant([0.3, 12.4, -16.1])
big_tensor = math_ops.mul(tensor, 10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
"first:", tensor, "middle:",
{"small": small_tensor, "Big": big_tensor}, 10,
[tensor * 2, tensor])
self.evaluate(print_op)
# Note that the keys in the dict will always be sorted,
# so 'Big' comes before 'small'
expected = ("first: [0 1 2 ... 7 8 9] "
"middle: {'Big': [0 10 20 ... 70 80 90], "
"'small': [0.3 12.4 -16.1]} "
"10 [[0 2 4 ... 14 16 18], [0 1 2 ... 7 8 9]]")
self.assertIn((expected + "\n"), printed.contents())
def testPrintSparseTensor(self):
ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
val = [0, 10, 13, 4, 14, 32, 33]
shape = [5, 6]
sparse = sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(sparse)
self.evaluate(print_op)
expected = ("'SparseTensor(indices=[[0 0]\n"
" [1 0]\n"
" [1 3]\n"
" ...\n"
" [1 4]\n"
" [3 2]\n"
" [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])'")
self.assertIn((expected + "\n"), printed.contents())
def testPrintSparseTensorInDataStruct(self):
ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
val = [0, 10, 13, 4, 14, 32, 33]
shape = [5, 6]
sparse = sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2([sparse])
self.evaluate(print_op)
expected = ("['SparseTensor(indices=[[0 0]\n"
" [1 0]\n"
" [1 3]\n"
" ...\n"
" [1 4]\n"
" [3 2]\n"
" [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])']")
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneTensorStdout(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stdout) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=sys.stdout)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTensorsToFile(self):
fd, tmpfile_name = tempfile.mkstemp(".printv2_test")
tensor_0 = math_ops.range(0, 10)
print_op_0 = logging_ops.print_v2(tensor_0,
output_stream="file://"+tmpfile_name)
self.evaluate(print_op_0)
tensor_1 = math_ops.range(11, 20)
print_op_1 = logging_ops.print_v2(tensor_1,
output_stream="file://"+tmpfile_name)
self.evaluate(print_op_1)
try:
f = os.fdopen(fd, "r")
line_0 = f.readline()
expected_0 = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected_0 in line_0)
line_1 = f.readline()
expected_1 = "[11 12 13 ... 17 18 19]"
self.assertTrue(expected_1 in line_1)
os.close(fd)
os.remove(tmpfile_name)
except IOError as e:
self.fail(e)
def testInvalidOutputStreamRaisesError(self):
tensor = math_ops.range(10)
with self.assertRaises(ValueError):
print_op = logging_ops.print_v2(
tensor, output_stream="unknown")
self.evaluate(print_op)
@test_util.run_deprecated_v1
def testPrintOpName(self):
tensor = math_ops.range(10)
print_op = logging_ops.print_v2(tensor, name="print_name")
self.assertEqual(print_op.name, "print_name")
@test_util.run_deprecated_v1
def testNoDuplicateFormatOpGraphModeAfterExplicitFormat(self):
tensor = math_ops.range(10)
formatted_string = string_ops.string_format("{}", tensor)
print_op = logging_ops.print_v2(formatted_string)
self.evaluate(print_op)
graph_ops = ops.get_default_graph().get_operations()
format_ops = [op for op in graph_ops if op.type == "StringFormat"]
# Should be only 1 format_op for graph mode.
self.assertEqual(len(format_ops), 1)
def testPrintOneTensorEagerOnOpCreate(self):
with context.eager_mode():
tensor = math_ops.range(10)
expected = "[0 1 2 ... 7 8 9]"
with self.captureWritesToStream(sys.stderr) as printed:
logging_ops.print_v2(tensor)
self.assertIn((expected + "\n"), printed.contents())
def testPrintsOrderedInDefun(self):
with context.eager_mode():
@function.defun
def prints():
logging_ops.print_v2("A")
logging_ops.print_v2("B")
logging_ops.print_v2("C")
with self.captureWritesToStream(sys.stderr) as printed:
prints()
self.assertTrue(("A\nB\nC\n"), printed.contents())
def testPrintInDefunWithoutExplicitEvalOfPrint(self):
@function.defun
def f():
tensor = math_ops.range(10)
logging_ops.print_v2(tensor)
return tensor
expected = "[0 1 2 ... 7 8 9]"
with self.captureWritesToStream(sys.stderr) as printed_one:
x = f()
self.evaluate(x)
self.assertIn((expected + "\n"), printed_one.contents())
# We execute the function again to make sure it doesn't only print on the
# first call.
with self.captureWritesToStream(sys.stderr) as printed_two:
y = f()
self.evaluate(y)
self.assertIn((expected + "\n"), printed_two.contents())
class PrintGradientTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testPrintShape(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, [inp])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
def testPrintString(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, ["hello"])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
@test_util.run_deprecated_v1
def testPrintGradient(self):
inp = constant_op.constant(2.0, shape=[100, 32], name="in")
w = constant_op.constant(4.0, shape=[10, 100], name="w")
wx = math_ops.matmul(w, inp, name="wx")
wx_print = logging_ops.Print(wx, [w, w, w])
wx_grad = gradients_impl.gradients(wx, w)[0]
wx_print_grad = gradients_impl.gradients(wx_print, w)[0]
wxg = self.evaluate(wx_grad)
wxpg = self.evaluate(wx_print_grad)
self.assertAllEqual(wxg, wxpg)
if __name__ == "__main__":
test.main()
|
|
# -*- test-case-name: twisted.test.test_usage -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
twisted.python.usage is a module for parsing/handling the
command line of your program.
For information on how to use it, see
U{http://twistedmatrix.com/projects/core/documentation/howto/options.html},
or doc/core/howto/options.xhtml in your Twisted directory.
"""
from __future__ import print_function
from __future__ import division, absolute_import
# System Imports
import inspect
import os
import sys
import getopt
from os import path
import textwrap
# Sibling Imports
from twisted.python import reflect, util
from twisted.python.compat import _PY3
class UsageError(Exception):
pass
error = UsageError
class CoerceParameter(object):
"""
Utility class that can corce a parameter before storing it.
"""
def __init__(self, options, coerce):
"""
@param options: parent Options object
@param coerce: callable used to coerce the value.
"""
self.options = options
self.coerce = coerce
self.doc = getattr(self.coerce, 'coerceDoc', '')
def dispatch(self, parameterName, value):
"""
When called in dispatch, do the coerce for C{value} and save the
returned value.
"""
if value is None:
raise UsageError("Parameter '%s' requires an argument."
% (parameterName,))
try:
value = self.coerce(value)
except ValueError as e:
raise UsageError("Parameter type enforcement failed: %s" % (e,))
self.options.opts[parameterName] = value
class Options(dict):
"""
An option list parser class
C{optFlags} and C{optParameters} are lists of available parameters
which your program can handle. The difference between the two
is the 'flags' have an on(1) or off(0) state (off by default)
whereas 'parameters' have an assigned value, with an optional
default. (Compare '--verbose' and '--verbosity=2')
optFlags is assigned a list of lists. Each list represents
a flag parameter, as so::
optFlags = [['verbose', 'v', 'Makes it tell you what it doing.'],
['quiet', 'q', 'Be vewy vewy quiet.']]
As you can see, the first item is the long option name
(prefixed with '--' on the command line), followed by the
short option name (prefixed with '-'), and the description.
The description is used for the built-in handling of the
--help switch, which prints a usage summary.
C{optParameters} is much the same, except the list also contains
a default value::
optParameters = [['outfile', 'O', 'outfile.log', 'Description...']]
A coerce function can also be specified as the last element: it will be
called with the argument and should return the value that will be stored
for the option. This function can have a C{coerceDoc} attribute which
will be appended to the documentation of the option.
subCommands is a list of 4-tuples of (command name, command shortcut,
parser class, documentation). If the first non-option argument found is
one of the given command names, an instance of the given parser class is
instantiated and given the remainder of the arguments to parse and
self.opts[command] is set to the command name. For example::
subCommands = [
['inquisition', 'inquest', InquisitionOptions,
'Perform an inquisition'],
['holyquest', 'quest', HolyQuestOptions,
'Embark upon a holy quest']
]
In this case, C{"<program> holyquest --horseback --for-grail"} will cause
C{HolyQuestOptions} to be instantiated and asked to parse
C{['--horseback', '--for-grail']}. Currently, only the first sub-command
is parsed, and all options following it are passed to its parser. If a
subcommand is found, the subCommand attribute is set to its name and the
subOptions attribute is set to the Option instance that parses the
remaining options. If a subcommand is not given to parseOptions,
the subCommand attribute will be None. You can also mark one of
the subCommands to be the default::
defaultSubCommand = 'holyquest'
In this case, the subCommand attribute will never be None, and
the subOptions attribute will always be set.
If you want to handle your own options, define a method named
C{opt_paramname} that takes C{(self, option)} as arguments. C{option}
will be whatever immediately follows the parameter on the
command line. Options fully supports the mapping interface, so you
can do things like C{'self["option"] = val'} in these methods.
Shell tab-completion is supported by this class, for zsh only at present.
Zsh ships with a stub file ("completion function") which, for Twisted
commands, performs tab-completion on-the-fly using the support provided
by this class. The stub file lives in our tree at
C{twisted/python/twisted-completion.zsh}, and in the Zsh tree at
C{Completion/Unix/Command/_twisted}.
Tab-completion is based upon the contents of the optFlags and optParameters
lists. And, optionally, additional metadata may be provided by assigning a
special attribute, C{compData}, which should be an instance of
C{Completions}. See that class for details of what can and should be
included - and see the howto for additional help using these features -
including how third-parties may take advantage of tab-completion for their
own commands.
Advanced functionality is covered in the howto documentation,
available at
U{http://twistedmatrix.com/projects/core/documentation/howto/options.html},
or doc/core/howto/options.xhtml in your Twisted directory.
"""
subCommand = None
defaultSubCommand = None
parent = None
completionData = None
_shellCompFile = sys.stdout # file to use if shell completion is requested
def __init__(self):
super(Options, self).__init__()
self.opts = self
self.defaults = {}
# These are strings/lists we will pass to getopt
self.longOpt = []
self.shortOpt = ''
self.docs = {}
self.synonyms = {}
self._dispatch = {}
collectors = [
self._gather_flags,
self._gather_parameters,
self._gather_handlers,
]
for c in collectors:
(longOpt, shortOpt, docs, settings, synonyms, dispatch) = c()
self.longOpt.extend(longOpt)
self.shortOpt = self.shortOpt + shortOpt
self.docs.update(docs)
self.opts.update(settings)
self.defaults.update(settings)
self.synonyms.update(synonyms)
self._dispatch.update(dispatch)
__hash__ = object.__hash__
def opt_help(self):
"""
Display this help and exit.
"""
print(self.__str__())
sys.exit(0)
def opt_version(self):
"""
Display Twisted version and exit.
"""
from twisted import copyright
print("Twisted version:", copyright.version)
sys.exit(0)
#opt_h = opt_help # this conflicted with existing 'host' options.
def parseOptions(self, options=None):
"""
The guts of the command-line parser.
"""
if options is None:
options = sys.argv[1:]
# we really do need to place the shell completion check here, because
# if we used an opt_shell_completion method then it would be possible
# for other opt_* methods to be run first, and they could possibly
# raise validation errors which would result in error output on the
# terminal of the user performing shell completion. Validation errors
# would occur quite frequently, in fact, because users often initiate
# tab-completion while they are editing an unfinished command-line.
if len(options) > 1 and options[-2] == "--_shell-completion":
from twisted.python import _shellcomp
cmdName = path.basename(sys.argv[0])
_shellcomp.shellComplete(self, cmdName, options,
self._shellCompFile)
sys.exit(0)
try:
opts, args = getopt.getopt(options,
self.shortOpt, self.longOpt)
except getopt.error as e:
raise UsageError(str(e))
for opt, arg in opts:
if opt[1] == '-':
opt = opt[2:]
else:
opt = opt[1:]
optMangled = opt
if optMangled not in self.synonyms:
optMangled = opt.replace("-", "_")
if optMangled not in self.synonyms:
raise UsageError("No such option '%s'" % (opt,))
optMangled = self.synonyms[optMangled]
if isinstance(self._dispatch[optMangled], CoerceParameter):
self._dispatch[optMangled].dispatch(optMangled, arg)
else:
self._dispatch[optMangled](optMangled, arg)
if (getattr(self, 'subCommands', None)
and (args or self.defaultSubCommand is not None)):
if not args:
args = [self.defaultSubCommand]
sub, rest = args[0], args[1:]
for (cmd, short, parser, doc) in self.subCommands:
if sub == cmd or sub == short:
self.subCommand = cmd
self.subOptions = parser()
self.subOptions.parent = self
self.subOptions.parseOptions(rest)
break
else:
raise UsageError("Unknown command: %s" % sub)
else:
try:
self.parseArgs(*args)
except TypeError:
raise UsageError("Wrong number of arguments.")
self.postOptions()
def postOptions(self):
"""
I am called after the options are parsed.
Override this method in your subclass to do something after
the options have been parsed and assigned, like validate that
all options are sane.
"""
def parseArgs(self):
"""
I am called with any leftover arguments which were not options.
Override me to do something with the remaining arguments on
the command line, those which were not flags or options. e.g.
interpret them as a list of files to operate on.
Note that if there more arguments on the command line
than this method accepts, parseArgs will blow up with
a getopt.error. This means if you don't override me,
parseArgs will blow up if I am passed any arguments at
all!
"""
def _generic_flag(self, flagName, value=None):
if value not in ('', None):
raise UsageError("Flag '%s' takes no argument."
" Not even \"%s\"." % (flagName, value))
self.opts[flagName] = 1
def _gather_flags(self):
"""
Gather up boolean (flag) options.
"""
longOpt, shortOpt = [], ''
docs, settings, synonyms, dispatch = {}, {}, {}, {}
flags = []
reflect.accumulateClassList(self.__class__, 'optFlags', flags)
for flag in flags:
long, short, doc = util.padTo(3, flag)
if not long:
raise ValueError("A flag cannot be without a name.")
docs[long] = doc
settings[long] = 0
if short:
shortOpt = shortOpt + short
synonyms[short] = long
longOpt.append(long)
synonyms[long] = long
dispatch[long] = self._generic_flag
return longOpt, shortOpt, docs, settings, synonyms, dispatch
def _gather_parameters(self):
"""
Gather options which take a value.
"""
longOpt, shortOpt = [], ''
docs, settings, synonyms, dispatch = {}, {}, {}, {}
parameters = []
reflect.accumulateClassList(self.__class__, 'optParameters',
parameters)
synonyms = {}
for parameter in parameters:
long, short, default, doc, paramType = util.padTo(5, parameter)
if not long:
raise ValueError("A parameter cannot be without a name.")
docs[long] = doc
settings[long] = default
if short:
shortOpt = shortOpt + short + ':'
synonyms[short] = long
longOpt.append(long + '=')
synonyms[long] = long
if paramType is not None:
dispatch[long] = CoerceParameter(self, paramType)
else:
dispatch[long] = CoerceParameter(self, str)
return longOpt, shortOpt, docs, settings, synonyms, dispatch
def _gather_handlers(self):
"""
Gather up options with their own handler methods.
This returns a tuple of many values. Amongst those values is a
synonyms dictionary, mapping all of the possible aliases (C{str})
for an option to the longest spelling of that option's name
C({str}).
Another element is a dispatch dictionary, mapping each user-facing
option name (with - substituted for _) to a callable to handle that
option.
"""
longOpt, shortOpt = [], ''
docs, settings, synonyms, dispatch = {}, {}, {}, {}
dct = {}
reflect.addMethodNamesToDict(self.__class__, dct, "opt_")
for name in dct.keys():
method = getattr(self, 'opt_'+name)
takesArg = not flagFunction(method, name)
prettyName = name.replace('_', '-')
doc = getattr(method, '__doc__', None)
if doc:
## Only use the first line.
#docs[name] = doc.split('\n')[0]
docs[prettyName] = doc
else:
docs[prettyName] = self.docs.get(prettyName)
synonyms[prettyName] = prettyName
# A little slight-of-hand here makes dispatching much easier
# in parseOptions, as it makes all option-methods have the
# same signature.
if takesArg:
fn = lambda name, value, m=method: m(value)
else:
# XXX: This won't raise a TypeError if it's called
# with a value when it shouldn't be.
fn = lambda name, value=None, m=method: m()
dispatch[prettyName] = fn
if len(name) == 1:
shortOpt = shortOpt + name
if takesArg:
shortOpt = shortOpt + ':'
else:
if takesArg:
prettyName = prettyName + '='
longOpt.append(prettyName)
reverse_dct = {}
# Map synonyms
for name in dct.keys():
method = getattr(self, 'opt_' + name)
if method not in reverse_dct:
reverse_dct[method] = []
reverse_dct[method].append(name.replace('_', '-'))
for method, names in reverse_dct.items():
if len(names) < 2:
continue
longest = max(names, key=len)
for name in names:
synonyms[name] = longest
return longOpt, shortOpt, docs, settings, synonyms, dispatch
def __str__(self):
return self.getSynopsis() + '\n' + self.getUsage(width=None)
def getSynopsis(self):
"""
Returns a string containing a description of these options and how to
pass them to the executed file.
"""
default = "%s%s" % (path.basename(sys.argv[0]),
(self.longOpt and " [options]") or '')
if self.parent is None:
default = "Usage: %s%s" % (path.basename(sys.argv[0]),
(self.longOpt and " [options]") or '')
else:
default = '%s' % ((self.longOpt and "[options]") or '')
synopsis = getattr(self, "synopsis", default)
synopsis = synopsis.rstrip()
if self.parent is not None:
synopsis = ' '.join((self.parent.getSynopsis(),
self.parent.subCommand, synopsis))
return synopsis
def getUsage(self, width=None):
# If subOptions exists by now, then there was probably an error while
# parsing its options.
if hasattr(self, 'subOptions'):
return self.subOptions.getUsage(width=width)
if not width:
width = int(os.environ.get('COLUMNS', '80'))
if hasattr(self, 'subCommands'):
cmdDicts = []
for (cmd, short, parser, desc) in self.subCommands:
cmdDicts.append(
{'long': cmd,
'short': short,
'doc': desc,
'optType': 'command',
'default': None
})
chunks = docMakeChunks(cmdDicts, width)
commands = 'Commands:\n' + ''.join(chunks)
else:
commands = ''
longToShort = {}
for key, value in self.synonyms.items():
longname = value
if (key != longname) and (len(key) == 1):
longToShort[longname] = key
else:
if longname not in longToShort:
longToShort[longname] = None
else:
pass
optDicts = []
for opt in self.longOpt:
if opt[-1] == '=':
optType = 'parameter'
opt = opt[:-1]
else:
optType = 'flag'
optDicts.append(
{'long': opt,
'short': longToShort[opt],
'doc': self.docs[opt],
'optType': optType,
'default': self.defaults.get(opt, None),
'dispatch': self._dispatch.get(opt, None)
})
if not (getattr(self, "longdesc", None) is None):
longdesc = self.longdesc
else:
import __main__
if getattr(__main__, '__doc__', None):
longdesc = __main__.__doc__
else:
longdesc = ''
if longdesc:
longdesc = ('\n' +
'\n'.join(textwrap.wrap(longdesc, width)).strip()
+ '\n')
if optDicts:
chunks = docMakeChunks(optDicts, width)
s = "Options:\n%s" % (''.join(chunks))
else:
s = "Options: None\n"
return s + longdesc + commands
#def __repr__(self):
# XXX: It'd be cool if we could return a succinct representation
# of which flags and options are set here.
_ZSH = 'zsh'
_BASH = 'bash'
class Completer(object):
"""
A completion "action" - provides completion possibilities for a particular
command-line option. For example we might provide the user a fixed list of
choices, or files/dirs according to a glob.
This class produces no completion matches itself - see the various
subclasses for specific completion functionality.
"""
_descr = None
def __init__(self, descr=None, repeat=False):
"""
@type descr: C{str}
@param descr: An optional descriptive string displayed above matches.
@type repeat: C{bool}
@param repeat: A flag, defaulting to False, indicating whether this
C{Completer} should repeat - that is, be used to complete more
than one command-line word. This may ONLY be set to True for
actions in the C{extraActions} keyword argument to C{Completions}.
And ONLY if it is the LAST (or only) action in the C{extraActions}
list.
"""
if descr is not None:
self._descr = descr
self._repeat = repeat
def _getRepeatFlag(self):
if self._repeat:
return "*"
else:
return ""
_repeatFlag = property(_getRepeatFlag)
def _description(self, optName):
if self._descr is not None:
return self._descr
else:
return optName
def _shellCode(self, optName, shellType):
"""
Fetch a fragment of shell code representing this action which is
suitable for use by the completion system in _shellcomp.py
@type optName: C{str}
@param optName: The long name of the option this action is being
used for.
@type shellType: C{str}
@param shellType: One of the supported shell constants e.g.
C{twisted.python.usage._ZSH}
"""
if shellType == _ZSH:
return "%s:%s:" % (self._repeatFlag,
self._description(optName))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteFiles(Completer):
"""
Completes file names based on a glob pattern
"""
def __init__(self, globPattern='*', **kw):
Completer.__init__(self, **kw)
self._globPattern = globPattern
def _description(self, optName):
if self._descr is not None:
return "%s (%s)" % (self._descr, self._globPattern)
else:
return "%s (%s)" % (optName, self._globPattern)
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_files -g \"%s\"" % (self._repeatFlag,
self._description(optName),
self._globPattern,)
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteDirs(Completer):
"""
Completes directory names
"""
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_directories" % (self._repeatFlag,
self._description(optName))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteList(Completer):
"""
Completes based on a fixed list of words
"""
def __init__(self, items, **kw):
Completer.__init__(self, **kw)
self._items = items
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:(%s)" % (self._repeatFlag,
self._description(optName),
" ".join(self._items,))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteMultiList(Completer):
"""
Completes multiple comma-separated items based on a fixed list of words
"""
def __init__(self, items, **kw):
Completer.__init__(self, **kw)
self._items = items
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_values -s , '%s' %s" % (self._repeatFlag,
self._description(optName),
self._description(optName),
" ".join(self._items))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteUsernames(Completer):
"""
Complete usernames
"""
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_users" % (self._repeatFlag,
self._description(optName))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteGroups(Completer):
"""
Complete system group names
"""
_descr = 'group'
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_groups" % (self._repeatFlag,
self._description(optName))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteHostnames(Completer):
"""
Complete hostnames
"""
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_hosts" % (self._repeatFlag,
self._description(optName))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteUserAtHost(Completer):
"""
A completion action which produces matches in any of these forms::
<username>
<hostname>
<username>@<hostname>
"""
_descr = 'host | user@host'
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
# Yes this looks insane but it does work. For bonus points
# add code to grep 'Hostname' lines from ~/.ssh/config
return ('%s:%s:{_ssh;if compset -P "*@"; '
'then _wanted hosts expl "remote host name" _ssh_hosts '
'&& ret=0 elif compset -S "@*"; then _wanted users '
'expl "login name" _ssh_users -S "" && ret=0 '
'else if (( $+opt_args[-l] )); then tmp=() '
'else tmp=( "users:login name:_ssh_users -qS@" ) fi; '
'_alternative "hosts:remote host name:_ssh_hosts" "$tmp[@]"'
' && ret=0 fi}' % (self._repeatFlag,
self._description(optName)))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class CompleteNetInterfaces(Completer):
"""
Complete network interface names
"""
def _shellCode(self, optName, shellType):
if shellType == _ZSH:
return "%s:%s:_net_interfaces" % (self._repeatFlag,
self._description(optName))
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class Completions(object):
"""
Extra metadata for the shell tab-completion system.
@type descriptions: C{dict}
@ivar descriptions: ex. C{{"foo" : "use this description for foo instead"}}
A dict mapping long option names to alternate descriptions. When this
variable is defined, the descriptions contained here will override
those descriptions provided in the optFlags and optParameters
variables.
@type multiUse: C{list}
@ivar multiUse: ex. C{ ["foo", "bar"] }
An iterable containing those long option names which may appear on the
command line more than once. By default, options will only be completed
one time.
@type mutuallyExclusive: C{list} of C{tuple}
@ivar mutuallyExclusive: ex. C{ [("foo", "bar"), ("bar", "baz")] }
A sequence of sequences, with each sub-sequence containing those long
option names that are mutually exclusive. That is, those options that
cannot appear on the command line together.
@type optActions: C{dict}
@ivar optActions: A dict mapping long option names to shell "actions".
These actions define what may be completed as the argument to the
given option. By default, all files/dirs will be completed if no
action is given. For example::
{"foo" : CompleteFiles("*.py", descr="python files"),
"bar" : CompleteList(["one", "two", "three"]),
"colors" : CompleteMultiList(["red", "green", "blue"])}
Callables may instead be given for the values in this dict. The
callable should accept no arguments, and return a C{Completer}
instance used as the action in the same way as the literal actions in
the example above.
As you can see in the example above. The "foo" option will have files
that end in .py completed when the user presses Tab. The "bar"
option will have either of the strings "one", "two", or "three"
completed when the user presses Tab.
"colors" will allow multiple arguments to be completed, separated by
commas. The possible arguments are red, green, and blue. Examples::
my_command --foo some-file.foo --colors=red,green
my_command --colors=green
my_command --colors=green,blue
Descriptions for the actions may be given with the optional C{descr}
keyword argument. This is separate from the description of the option
itself.
Normally Zsh does not show these descriptions unless you have
"verbose" completion turned on. Turn on verbosity with this in your
~/.zshrc::
zstyle ':completion:*' verbose yes
zstyle ':completion:*:descriptions' format '%B%d%b'
@type extraActions: C{list}
@ivar extraActions: Extra arguments are those arguments typically
appearing at the end of the command-line, which are not associated
with any particular named option. That is, the arguments that are
given to the parseArgs() method of your usage.Options subclass. For
example::
[CompleteFiles(descr="file to read from"),
Completer(descr="book title")]
In the example above, the 1st non-option argument will be described as
"file to read from" and all file/dir names will be completed (*). The
2nd non-option argument will be described as "book title", but no
actual completion matches will be produced.
See the various C{Completer} subclasses for other types of things which
may be tab-completed (users, groups, network interfaces, etc).
Also note the C{repeat=True} flag which may be passed to any of the
C{Completer} classes. This is set to allow the C{Completer} instance
to be re-used for subsequent command-line words. See the C{Completer}
docstring for details.
"""
def __init__(self, descriptions={}, multiUse=[],
mutuallyExclusive=[], optActions={}, extraActions=[]):
self.descriptions = descriptions
self.multiUse = multiUse
self.mutuallyExclusive = mutuallyExclusive
self.optActions = optActions
self.extraActions = extraActions
def docMakeChunks(optList, width=80):
"""
Makes doc chunks for option declarations.
Takes a list of dictionaries, each of which may have one or more
of the keys 'long', 'short', 'doc', 'default', 'optType'.
Returns a list of strings.
The strings may be multiple lines,
all of them end with a newline.
"""
# XXX: sanity check to make sure we have a sane combination of keys.
maxOptLen = 0
for opt in optList:
optLen = len(opt.get('long', ''))
if optLen:
if opt.get('optType', None) == "parameter":
# these take up an extra character
optLen = optLen + 1
maxOptLen = max(optLen, maxOptLen)
colWidth1 = maxOptLen + len(" -s, -- ")
colWidth2 = width - colWidth1
# XXX - impose some sane minimum limit.
# Then if we don't have enough room for the option and the doc
# to share one line, they can take turns on alternating lines.
colFiller1 = " " * colWidth1
optChunks = []
seen = {}
for opt in optList:
if opt.get('short', None) in seen or opt.get('long', None) in seen:
continue
for x in opt.get('short', None), opt.get('long', None):
if x is not None:
seen[x] = 1
optLines = []
comma = " "
if opt.get('short', None):
short = "-%c" % (opt['short'],)
else:
short = ''
if opt.get('long', None):
long = opt['long']
if opt.get("optType", None) == "parameter":
long = long + '='
long = "%-*s" % (maxOptLen, long)
if short:
comma = ","
else:
long = " " * (maxOptLen + len('--'))
if opt.get('optType', None) == 'command':
column1 = ' %s ' % long
else:
column1 = " %2s%c --%s " % (short, comma, long)
if opt.get('doc', ''):
doc = opt['doc'].strip()
else:
doc = ''
if (opt.get("optType", None) == "parameter") \
and not (opt.get('default', None) is None):
doc = "%s [default: %s]" % (doc, opt['default'])
if (opt.get("optType", None) == "parameter") \
and opt.get('dispatch', None) is not None:
d = opt['dispatch']
if isinstance(d, CoerceParameter) and d.doc:
doc = "%s. %s" % (doc, d.doc)
if doc:
column2_l = textwrap.wrap(doc, colWidth2)
else:
column2_l = ['']
optLines.append("%s%s\n" % (column1, column2_l.pop(0)))
for line in column2_l:
optLines.append("%s%s\n" % (colFiller1, line))
optChunks.append(''.join(optLines))
return optChunks
def flagFunction(method, name=None):
"""
Determine whether a function is an optional handler for a I{flag} or an
I{option}.
A I{flag} handler takes no additional arguments. It is used to handle
command-line arguments like I{--nodaemon}.
An I{option} handler takes one argument. It is used to handle command-line
arguments like I{--path=/foo/bar}.
@param method: The bound method object to inspect.
@param name: The name of the option for which the function is a handle.
@type name: L{str}
@raise UsageError: If the method takes more than one argument.
@return: If the method is a flag handler, return C{True}. Otherwise return
C{False}.
"""
if _PY3:
reqArgs = len(inspect.signature(method).parameters)
if reqArgs > 1:
raise UsageError('Invalid Option function for %s' %
(name or method.__name__))
if reqArgs == 1:
return False
else:
reqArgs = len(inspect.getargspec(method).args)
if reqArgs > 2:
raise UsageError('Invalid Option function for %s' %
(name or method.__name__))
if reqArgs == 2:
return False
return True
def portCoerce(value):
"""
Coerce a string value to an int port number, and checks the validity.
"""
value = int(value)
if value < 0 or value > 65535:
raise ValueError("Port number not in range: %s" % (value,))
return value
portCoerce.coerceDoc = "Must be an int between 0 and 65535."
|
|
#!/usr/bin/env python
"""Provides tools for building Osx application bundles."""
import os
import re
import shutil
import fnmatch
from SCons.Builder import *
from SCons.Defaults import SharedCheck, ProgScan
from SCons.Script.SConscript import SConsEnvironment
###############################################################
#
###############################################################
def generate(env):
"""defines
env.InstallBundle() for installing a bundle into its dir.
A bundle has this structure: (filenames are case SENSITIVE)
sapphire.bundle/
Contents/
Info.plist (an XML key->value database; defined by BUNDLE_INFO_PLIST)
PkgInfo (trivially short; defined by value of BUNDLE_PKGINFO)
MacOS/
executable (the executable or shared lib, linked with Bundle())
Resources/
"""
if 'OSXBUNDLE' in env['TOOLS']: return
###############################################################
if env['PLATFORM'] == 'darwin':
env.Append(TOOLS = 'OSXBUNDLE')
# This requires some other tools:
TOOL_WRITE_VAL(env)
TOOL_SUBST(env)
###############################################################
def ensureWritable(nodes):
for node in nodes:
if os.path.exists(node.path) and not (os.stat(node.path)[0] & 0o200):
chmod(node.path, 0o777)
return nodes
###############################################################
def InstallBundle (env, target_dir, bundle):
"""Move a Mac OS-X bundle to its final destination"""
# check parameters!
if os.path.exists(target_dir) and not os.path.isdir (target_dir):
raise SCons.Errors.UserError("InstallBundle: %s needs to be a directory!"%(target_dir))
outputs = []
bundlefiles = env.arg2nodes (bundle, env.fs.File)
outDirNode = env.Dir(target_dir)
for bundlefile in bundlefiles:
outputs += ensureWritable (env.InstallAs (outDirNode.abspath + '/' + str (bundlefile), bundlefile))
return outputs
###############################################################
# Common type codes are BNDL for generic bundle and APPL for application.
###############################################################
def MakeBundle(env, bundledir, app,
typecode='BNDL',
key = 'com.tweakoz.osxbundle',
info_plist = 'Info.plist',
creator='toz',
icon_file='',
subst_dict=None,
resources=None):
"""Install a bundle into its dir, in the proper format"""
resources = resources or []
# Substitute construction vars:
for a in [bundledir, key, info_plist, icon_file, typecode, creator]:
a = env.subst(a)
if SCons.Util.is_List(app):
app = app[0]
if SCons.Util.is_String(app):
app = env.subst(app)
appbase = os.path.basename(app)
else:
appbase = os.path.basename(str(app))
if not ('.' in bundledir):
bundledir += '.$BUNDLEDIRSUFFIX'
bundledir = env.subst(bundledir) # substitute again
suffix=bundledir[bundledir.rfind('.'):]
if (suffix=='.app' and typecode != 'APPL' or
suffix!='.app' and typecode == 'APPL'):
raise SCons.Errors.UserError("MakeBundle: inconsistent dir suffix %s and type code %s: app bundles should end with .app and type code APPL."%(suffix, typecode))
else:
env.SideEffect (bundledir, app)
if subst_dict is None:
subst_dict={'%SHORTVERSION%': '0.0.1',#'$VERSION_NUM',
'%LONGVERSION%': '0.0.1',#'$VERSION_NAME',
'%YEAR%': '$COMPILE_YEAR',
'%BUNDLE_EXECUTABLE%': appbase,
'%ICONFILE%': os.path.basename(icon_file),
'%CREATOR%': creator,
'%TYPE%': typecode,
'%BUNDLE_KEY%': key}
inst_all = []
inst = env.Install(bundledir+'/Contents/MacOS', app)
inst_all.append (inst)
f=env.SubstInFile(bundledir+'/Contents/Info.plist', info_plist, SUBST_DICT=subst_dict)
#env.Depends(f, SCons.Node.Python.Value(key+creator+typecode+env['VERSION_NUM']+env['VERSION_NAME']))
inst_all.append (f)
inst_all.append (env.File (bundledir+'/Contents/PkgInfo'))
env.WriteVal(target=bundledir+'/Contents/PkgInfo',
source=SCons.Node.Python.Value(typecode+creator))
if icon_file!='':
resources.append(icon_file)
for r in resources:
inst_all.append (env.Install(bundledir+'/Contents/Resources', r))
return inst_all
# This is not a regular Builder; it's a wrapper function.
# So just make it available as a method of Environment.
SConsEnvironment.MakeBundle = MakeBundle
SConsEnvironment.InstallBundle = InstallBundle
###############################################################
#
###############################################################
def TOOL_SUBST(env):
"""Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
from the source to the target.
The values of SUBST_DICT first have any construction variables expanded
(its keys are not expanded).
If a value of SUBST_DICT is a python callable function, it is called and
the result is expanded as the value.
If there's more than one source and more than one target, each target gets
substituted from the corresponding source.
"""
env.Append(TOOLS = 'SUBST')
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
raise SCons.Errors.UserError("Can't read source file %s"%sourcefile)
for (k,v) in dict.items():
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
raise SCons.Errors.UserError("Can't write target file %s"%targetfile)
return 0 # success
def subst_in_file(target, source, env):
print(source, 'is now thought in folder', os.getcwd())
if not env.has_key('SUBST_DICT'):
raise SCons.Errors.UserError("SubstInFile requires SUBST_DICT to be set.")
d = dict(env['SUBST_DICT']) # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
else:
raise SCons.Errors.UserError("SubstInFile: key %s: %s must be a string or callable"%(k, repr(v)))
for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env):
"""This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)])
def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged.
"""
d = env['SUBST_DICT'].copy() # copy it
for (k,v) in d.items():
if callable(v):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
env.Depends(target, SCons.Node.Python.Value(d))
# Depends(target, source) # this doesn't help the install-sapphire-linux.sh problem
return target, source
subst_action=SCons.Action.Action(subst_in_file, subst_in_file_string)
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
###############################################################
#
###############################################################
def TOOL_WRITE_VAL(env):
env.Append(TOOLS = 'WRITE_VAL')
def write_val(target, source, env):
"""Write the contents of the first source into the target.
source is usually a Value() node, but could be a file."""
f = open(str(target[0]), 'wb')
f.write(source[0].get_contents())
f.close()
env['BUILDERS']['WriteVal'] = Builder(action=write_val)
###############################################################
#
###############################################################
def osx_copy( dest, source, env ):
from macostools import copy
copy( source, dest )
shutil.copymode(source, dest)
###############################################################
#
###############################################################
def exists(env):
return _detect(env)
|
|
import re
import logging
import json
import pathlib
import inspect
import numpy as np
from os import remove
from os.path import join, dirname, isfile
from typing import List, Tuple
from deprecated import deprecated
import openql as ql
from pycqed.utilities.general import suppress_stdout
from pycqed.utilities.general import is_more_recent
from pycqed.utilities.general import get_file_sha256_hash
log = logging.getLogger(__name__)
"""
FIXME:
concept should support:
- programs with 'runtime' parameters
- retrieval of return (measurement) data
- running a set of programs
- make-like 'update only if needed'
- multiple HW-platforms
- other compilers than OpenQL
"""
class OqlProgram:
# we use a class global variable 'output_dir' to replace the former OpenQL option of the same name. Since that is a
# global OpenQL option, it no longer has direct effect now we no longer use the OpenQL generated list of passes (see
# self._configure_compiler). Additionally, OpenQL options are reset upon ql.initialize, so the value does not persist.
output_dir = join(dirname(__file__), 'output')
def __init__(
self,
name: str,
platf_cfg: str,
nregisters: int = 32
):
"""
create OpenQL Program (and Platform)
Args:
name:
name of the program
platf_cfg:
path of the platform configuration file used by OpenQL
nregisters:
the number of classical registers required in the program
"""
# setup OpenQL
ql.initialize() # reset options, may initialize more functionality in the future
# set OpenQL log level before anything else
ql.set_option('log_level', 'LOG_WARNING')
# store/initialize some parameters
self.name = name
self._platf_cfg = platf_cfg
self.nregisters = nregisters # NB: not available via platform
self.filename = ""
self.sweep_points = None
# create Platform and Program
self.platform = ql.Platform('OpenQL_Platform', platf_cfg)
self.nqubits = self.platform.get_qubit_number()
self.program = ql.Program(
name,
self.platform,
self.nqubits,
self.nregisters
) # NB: unused if we use compile_cqasm()
# detect OpenQL backend ('eqasm_compiler') used by inspecting platf_cfg
eqasm_compiler = ''
with open(self._platf_cfg) as f:
for line in f:
if 'eqasm_compiler' in line:
m = re.search('"eqasm_compiler" *: *"(.*?)"', line)
eqasm_compiler = m.group(1)
break
if eqasm_compiler == '':
log.error(f"key 'eqasm_compiler' not found in file '{self._platf_cfg}'")
# determine architecture and extension of generated file
if eqasm_compiler == 'cc_light_compiler':
# NB: OpenQL no longer has a backend for CC-light
self._arch = 'CCL'
self._ext = '.qisa' # CC-light, QCC
else:
self._arch = 'CC'
self._ext = '.vq1asm' # CC
# save name of file that OpenQL will generate on compilation to allow uploading
# NB: for cQasm, the actual name is determined by 'pragma @ql.name' in the source, not by self.name,
# so users must maintain consistency
self.filename = join(OqlProgram.output_dir, self.name + self._ext)
def add_kernel(self, k: ql.Kernel) -> None:
self.program.add_kernel(k)
def create_kernel(
self,
kname: str
) -> ql.Kernel:
"""
Wrapper around constructor of openQL "Kernel" class.
"""
kname = kname.translate(
{ord(c): "_" for c in "!@#$%^&*()[]{};:,./<>?\|`~-=_+ "})
k = ql.Kernel(kname, self.platform, self.nqubits, self.nregisters)
return k
def compile(
self,
quiet: bool = False,
extra_pass_options: List[Tuple[str, str]] = None
) -> None:
"""
Compile an OpenQL Program created using the legacy Program/Kernel API.
Args:
quiet:
suppress all output (not recommended, because warnings are hidden)
extra_pass_options:
extra pass options for OpenQL. These consist of a tuple 'path, value' where path is structured as
"<passName>.<passOption>" and value is the option value, see
https://openql.readthedocs.io/en/latest/reference/python.html#openql.Compiler.set_option
See https://openql.readthedocs.io/en/latest/gen/reference_passes.html for passes and their options
"""
if quiet:
with suppress_stdout():
self.program.compile()
else: # show warnings
c = self._configure_compiler("", extra_pass_options)
c.compile(self.program)
def compile_cqasm(
self,
src: str,
extra_pass_options: List[Tuple[str, str]] = None
) -> None:
"""
Compile a string with cQasm source code.
Note that, contrary to the behaviour of compile(), the program runs just once by default, since looping can be
easily and more subtly performed in cQasm if desired.
Args:
src:
the cQasm source code string
extra_pass_options:
extra pass options for OpenQL. These consist of a tuple 'path, value' where path is structured as
"<passName>.<passOption>" and value is the option value, see
https://openql.readthedocs.io/en/latest/reference/python.html#openql.Compiler.set_option
See https://openql.readthedocs.io/en/latest/gen/reference_passes.html for passes and their options
"""
# save src to file (as needed by pass 'io.cqasm.Read')
src_filename = OqlProgram.output_dir+"/"+self.name+".cq"
pathlib.Path(src_filename).write_text(inspect.cleandoc(src))
c = self._configure_compiler(src_filename, extra_pass_options)
c.compile_with_frontend(self.platform)
# NB: used in clifford_rb_oql.py to skip both generation of RB sequences, and OpenQL compilation if
# contents of platf_cfg or clifford_rb_oql (i.e. the Python file that generates the RB sequence) have changed
def check_recompilation_needed_hash_based(
self,
clifford_rb_oql: str,
recompile: bool = True,
) -> dict:
"""
Similar functionality to the deprecated `check_recompilation_needed` but
based on a file that is generated alongside with the program file
containing hashes of the files that are relevant to the generation of the
RB sequences and that might be modified somewhat often
NB: Not intended for stand alone use!
The code invoking this function should later invoke:
`os.rename(recompile_dict["tmp_file"], recompile_dict["file"])` # FIXME: create member function for that
The behavior of this function depends on the recompile argument.
recompile:
True -> True, the program should be compiled
'as needed' -> compares filename to timestamp of config
and checks if the file exists, if required recompile.
False -> checks if the file exists, if it doesn't
compilation is required and raises a ValueError.
Use carefully, only if you know what you are doing!
Use 'as needed' to stay safe!
"""
hashes_ext = ".hashes"
tmp_ext = ".tmp"
rb_system_hashes_fn = self.filename + hashes_ext
tmp_fn = rb_system_hashes_fn + tmp_ext
platf_cfg_hash = get_file_sha256_hash(self._platf_cfg, return_hexdigest=True)
this_file_hash = get_file_sha256_hash(clifford_rb_oql, return_hexdigest=True)
file_hashes = {self._platf_cfg: platf_cfg_hash, clifford_rb_oql: this_file_hash}
_recompile = False
if not isfile(self.filename):
if recompile is False:
raise ValueError('No file:\n{}'.format(self.filename))
else:
# Force recompile, there is no program file
_recompile |= True # FIXME: why "|="?
# Determine if compilation is needed based on the hashed files
if not isfile(rb_system_hashes_fn):
# There is no file with the hashes, we must compile to be safe
_recompile |= True
else:
# Hashes exist, we use them to determine if recompilations is needed
with open(rb_system_hashes_fn) as json_file:
hashes_dict = json.load(json_file)
# Remove file to signal a compilation in progress
remove(rb_system_hashes_fn)
for fn in file_hashes.keys():
# Recompile becomes true if any of the hashed files has a different
# hash now
_recompile |= hashes_dict.get(fn, "") != file_hashes[fn]
# Write the updated hashes
# We use a temporary file such that for parallel compilations, if the
# process is interrupted before the end there will be no hash and
# recompilation will be forced
pathlib.Path(tmp_fn).parent.mkdir(parents=True, exist_ok=True)
pathlib.Path(tmp_fn).write_text(json.dumps(file_hashes))
res_dict = {
"file": rb_system_hashes_fn,
"tmp_file": tmp_fn
}
if recompile is False:
if _recompile is True:
log.warning(
"`{}` or\n`{}`\n might have been modified! Are you sure you didn't"
" want to compile?".format(self._platf_cfg, clifford_rb_oql)
)
res_dict["recompile"] = False
elif recompile is True:
# Enforce recompilation
res_dict["recompile"] = True
elif recompile == "as needed":
res_dict["recompile"] = _recompile
return res_dict
#############################################################################
# Calibration points
#############################################################################
"""
FIXME: while refactoring these from separate functions to class methods, it
was found that most functions returned the program (that was provided as a
parameter, which makes no sense), and that the return parameter was mostly
ignored (which makes no difference). The function documentation was
inconsistent with the actual code in this respect, probably as a result of
earlier refactoring.
Function 'add_multi_q_cal_points' would return different types dependent
on a boolean parameter 'return_comb', but no cases were found where this
parameter was set to True, so this behaviour was removed
"""
def add_single_qubit_cal_points(
self,
qubit_idx: int,
f_state_cal_pts: bool = False,
measured_qubits=None
) -> None:
"""
Adds single qubit calibration points to an OpenQL program
Args:
qubit_idx:
index of qubit
f_state_cal_pts:
if True, add calibration points for the 2nd exc. state
measured_qubits:
selects which qubits to perform readout on. If measured_qubits == None, it will default
to measuring the qubit for which there are cal points.
Returns:
"""
if measured_qubits == None:
measured_qubits = [qubit_idx]
for i in np.arange(2):
k = self.create_kernel("cal_gr_" + str(i))
k.prepz(qubit_idx)
k.gate('wait', measured_qubits, 0)
for measured_qubit in measured_qubits:
k.measure(measured_qubit)
k.gate('wait', measured_qubits, 0)
self.add_kernel(k)
for i in np.arange(2):
k = self.create_kernel("cal_ex_" + str(i))
k.prepz(qubit_idx)
k.gate('rx180', [qubit_idx])
k.gate('wait', measured_qubits, 0)
for measured_qubit in measured_qubits:
k.measure(measured_qubit)
k.gate('wait', measured_qubits, 0)
self.add_kernel(k)
if f_state_cal_pts:
for i in np.arange(2):
k = self.create_kernel("cal_f_" + str(i))
k.prepz(qubit_idx)
k.gate('rx180', [qubit_idx])
k.gate('rx12', [qubit_idx])
k.gate('wait', measured_qubits, 0)
for measured_qubit in measured_qubits:
k.measure(measured_qubit)
k.gate('wait', measured_qubits, 0)
self.add_kernel(k)
def add_two_q_cal_points(
self,
q0: int,
q1: int,
reps_per_cal_pt: int = 1,
f_state_cal_pts: bool = False,
# f_state_cal_pt_cw: int = 31,
measured_qubits=None,
interleaved_measured_qubits=None,
interleaved_delay=None,
nr_of_interleaves=1
) -> None:
"""
Adds two qubit calibration points to an OpenQL program
Args:
q0:
index of first qubit
q1:
index of second qubit
reps_per_cal_pt:
number of times to repeat each cal point
f_state_cal_pts:
if True, add calibration points for the 2nd exc. state
measured_qubits:
selects which qubits to perform readout on. If measured_qubits == None, it will default
to measuring the qubit for which there are cal points
interleaved_measured_qubits:
interleaved_delay:
nr_of_interleaves:
"""
kernel_list = [] # FIXME: not really used (anymore?)
combinations = (["00"] * reps_per_cal_pt +
["01"] * reps_per_cal_pt +
["10"] * reps_per_cal_pt +
["11"] * reps_per_cal_pt)
if f_state_cal_pts:
extra_combs = (['02'] * reps_per_cal_pt + ['20'] * reps_per_cal_pt +
['22'] * reps_per_cal_pt)
combinations += extra_combs
if measured_qubits == None:
measured_qubits = [q0, q1]
for i, comb in enumerate(combinations):
k = self.create_kernel('cal{}_{}'.format(i, comb))
k.prepz(q0)
k.prepz(q1)
if interleaved_measured_qubits:
for j in range(nr_of_interleaves):
for q in interleaved_measured_qubits:
k.measure(q)
k.gate("wait", [0, 1, 2, 3, 4, 5, 6], 0)
if interleaved_delay:
k.gate('wait', [0, 1, 2, 3, 4, 5, 6],
int(interleaved_delay * 1e9))
if comb[0] == '0':
k.gate('i', [q0])
elif comb[0] == '1':
k.gate('rx180', [q0])
elif comb[0] == '2':
k.gate('rx180', [q0])
# FIXME: this is a workaround
# k.gate('rx12', [q0])
k.gate('cw_31', [q0])
if comb[1] == '0':
k.gate('i', [q1])
elif comb[1] == '1':
k.gate('rx180', [q1])
elif comb[1] == '2':
k.gate('rx180', [q1])
# FIXME: this is a workaround
# k.gate('rx12', [q1])
k.gate('cw_31', [q1])
# Used to ensure timing is aligned
k.gate('wait', measured_qubits, 0)
for q in measured_qubits:
k.measure(q)
k.gate('wait', measured_qubits, 0)
kernel_list.append(k)
self.add_kernel(k)
def add_multi_q_cal_points(
self,
qubits: List[int],
combinations: List[str] = ["00", "01", "10", "11"],
reps_per_cal_pnt: int = 1,
f_state_cal_pt_cw: int = 9, # 9 is the one listed as rX12 in `mw_lutman`
nr_flux_dance: int = None,
flux_cw_list: List[str] = None
) -> None:
"""
Args:
qubits:
list of qubit indices
combinations:
list with the target multi-qubit state
e.g. ["00", "01", "10", "11"] or
["00", "01", "10", "11", "02", "20", "22"] or
["000", "010", "101", "111"]
reps_per_cal_pnt:
number of times to repeat each cal point
f_state_cal_pt_cw:
the cw_idx for the pulse to the ef transition.
nr_flux_dance:
flux_cw_list:
"""
comb_repeated = []
for state in combinations:
comb_repeated += [state] * reps_per_cal_pnt
state_to_gates = {
"0": ["i"],
"1": ["rx180"],
"2": ["rx180", "cw_{:02}".format(f_state_cal_pt_cw)],
}
for i, comb in enumerate(comb_repeated):
k = self.create_kernel('cal{}_{}'.format(i, comb))
# NOTE: for debugging purposes of the effect of fluxing on readout,
# prepend flux dance before calibration points
for q_state, q in zip(comb, qubits):
k.prepz(q)
k.gate("wait", [], 0) # alignment
if nr_flux_dance and flux_cw_list:
for i in range(int(nr_flux_dance)):
for flux_cw in flux_cw_list:
k.gate(flux_cw, [0])
k.gate("wait", [], 0)
# k.gate("wait", [], 20) # prevent overlap of flux with mw gates
for q_state, q in zip(comb, qubits):
for gate in state_to_gates[q_state]:
k.gate(gate, [q])
k.gate("wait", [], 0) # alignment
# k.gate("wait", [], 20) # alignment
# for q_state, q in zip(comb, qubits):
# k.prepz(q)
# for gate in state_to_gates[q_state]:
# k.gate(gate, [q])
# k.gate("wait", [], 0) # alignment
for q in qubits:
k.measure(q)
k.gate('wait', [], 0) # alignment
self.add_kernel(k)
def add_two_q_cal_points_special_cond_osc(
self,
q0: int,
q1: int,
q2=None,
reps_per_cal_pt: int = 1,
f_state_cal_pts: bool = False,
# f_state_cal_pt_cw: int = 31,
measured_qubits=None,
interleaved_measured_qubits=None,
interleaved_delay=None,
nr_of_interleaves=1
) -> None:
"""
Args:
q0:
q1:
q2:
reps_per_cal_pt:
number of times to repeat each cal point
f_state_cal_pts:
if True, add calibration points for the 2nd exc. state
measured_qubits:
selects which qubits to perform readout on. If measured_qubits == None, it will default
to measuring the qubit for which there are cal points.
interleaved_measured_qubits:
interleaved_delay:
nr_of_interleaves:
Returns:
"""
combinations = (["00"] * reps_per_cal_pt +
["01"] * reps_per_cal_pt +
["10"] * reps_per_cal_pt +
["11"] * reps_per_cal_pt)
if f_state_cal_pts:
extra_combs = (['02'] * reps_per_cal_pt + ['20'] * reps_per_cal_pt +
['22'] * reps_per_cal_pt)
combinations += extra_combs
if q2 is not None:
combinations += ["Park_0", "Park_1"]
if (measured_qubits == None) and (q2 is None):
measured_qubits = [q0, q1]
elif (measured_qubits == None):
measured_qubits = [q0, q1, q2]
for i, comb in enumerate(combinations):
k = self.create_kernel('cal{}_{}'.format(i, comb))
k.prepz(q0)
k.prepz(q1)
if q2 is not None:
k.prepz(q2)
if interleaved_measured_qubits:
for j in range(nr_of_interleaves):
for q in interleaved_measured_qubits:
k.measure(q)
k.gate("wait", [0, 1, 2, 3, 4, 5, 6], 0)
if interleaved_delay:
k.gate('wait', [0, 1, 2, 3, 4, 5, 6], int(interleaved_delay * 1e9))
if comb[0] == '0':
k.gate('i', [q0])
elif comb[0] == '1':
k.gate('rx180', [q0])
elif comb[0] == '2':
k.gate('rx180', [q0])
# FIXME: this is a workaround
# k.gate('rx12', [q0])
k.gate('cw_31', [q0])
if comb[1] == '0':
k.gate('i', [q1])
elif comb[1] == '1':
k.gate('rx180', [q1])
elif comb[1] == '2':
k.gate('rx180', [q1])
# FIXME: this is a workaround
# k.gate('rx12', [q1])
k.gate('cw_31', [q1])
if comb[0] == 'P' and comb[-1] == '0':
k.gate('i', [q2])
elif comb[0] == 'P' and comb[-1] == '1':
k.gate('rx180', [q2])
# Used to ensure timing is aligned
k.gate('wait', measured_qubits, 0)
for q in measured_qubits:
k.measure(q)
k.gate('wait', measured_qubits, 0)
self.add_kernel(k)
#############################################################################
# Private functions
#############################################################################
def _configure_compiler(
self,
cqasm_src_filename: str,
extra_pass_options: List[Tuple[str, str]] = None
) -> ql.Compiler:
# NB: for alternative ways to configure the compiler, see
# https://openql.readthedocs.io/en/latest/gen/reference_configuration.html#compiler-configuration
c = self.platform.get_compiler()
# remove default pass list (this also removes support for most *global* options as defined in
# https://openql.readthedocs.io/en/latest/gen/reference_options.html, except for 'log_level')
# NB: this defeats automatic backend selection by OpenQL based on key "eqasm_compiler"
c.clear_passes()
# add the passes we need
compiling_cqasm = cqasm_src_filename != ""
if compiling_cqasm:
# cQASM reader as very first step
c.append_pass(
'io.cqasm.Read',
'reader',
{
'cqasm_file': cqasm_src_filename
}
)
# decomposer for legacy decompositions (those defined in the "gate_decomposition" section)
# see https://openql.readthedocs.io/en/latest/gen/reference_passes.html#instruction-decomposer
c.append_pass(
'dec.Instructions',
# NB: don't change the name 'legacy', see:
# - https://openql.readthedocs.io/en/latest/gen/reference_passes.html#instruction-decomposer
# - https://openql.readthedocs.io/en/latest/gen/reference_passes.html#predicate-key
'legacy',
)
# report the initial qasm
c.append_pass(
'io.cqasm.Report',
'initial',
{
'output_suffix': '.cq',
'with_timing': 'no'
}
)
# schedule
c.append_pass(
'sch.ListSchedule',
'scheduler',
{
'resource_constraints': 'yes'
}
)
# report scheduled qasm
c.append_pass(
'io.cqasm.Report',
'scheduled',
{
'output_suffix': '.cq',
}
)
if self._arch == 'CC':
# generate code using CC backend
# NB: OpenQL >= 0.10 no longer has a CC-light backend
c.append_pass(
'arch.cc.gen.VQ1Asm',
'cc_backend'
)
# set compiler pass options
c.set_option('*.output_prefix', f'{OqlProgram.output_dir}/%N.%P')
if self._arch == 'CC':
c.set_option('cc_backend.output_prefix', f'{OqlProgram.output_dir}/%N')
c.set_option('scheduler.scheduler_target', 'alap')
if compiling_cqasm:
c.set_option('cc_backend.run_once', 'yes') # if you want to loop, write a cqasm loop
# finally, set user pass options
if extra_pass_options is not None:
for opt, val in extra_pass_options:
c.set_option(opt, val)
log.debug("\n" + c.dump_strategy())
return c
##########################################################################
# compatibility functions
# FIXME: these are deprecated, but note that many scripts use these.
# In many functions we return the program object for legacy
# compatibility, although we specify a return type of " -> None" for
# those that use PyCharm or an other tool aware of type inconsistencies
# (which is highly recommended)
##########################################################################
@deprecated(version='0.4', reason="use class OqlProgram")
def create_program(
name: str,
platf_cfg: str,
nregisters: int = 32
) -> OqlProgram:
return OqlProgram(name, platf_cfg, nregisters)
@deprecated(version='0.4', reason="use class OqlProgram")
def create_kernel(
kname: str,
program: OqlProgram
) -> ql.Kernel:
return program.create_kernel(kname)
@deprecated(version='0.4', reason="use class OqlProgram")
def compile(
p: OqlProgram,
quiet: bool = False,
extra_openql_options: List[Tuple[str,str]] = None
) -> None:
p.compile(quiet, extra_openql_options)
return p # legacy compatibility
@deprecated(version='0.4', reason="use class OqlProgram")
def add_single_qubit_cal_points(
p: OqlProgram,
qubit_idx: int,
f_state_cal_pts: bool = False,
measured_qubits=None
) -> None:
p.add_single_qubit_cal_points(qubit_idx, f_state_cal_pts, measured_qubits)
return p # legacy compatibility
@deprecated(version='0.4', reason="use class OqlProgram")
def add_two_q_cal_points(
p: OqlProgram,
q0: int,
q1: int,
reps_per_cal_pt: int = 1,
f_state_cal_pts: bool = False,
# f_state_cal_pt_cw: int = 31, # FIXME: iold, unused parameter
measured_qubits=None,
interleaved_measured_qubits=None,
interleaved_delay=None,
nr_of_interleaves=1
) -> None:
p.add_two_q_cal_points(q0, q1, reps_per_cal_pt, f_state_cal_pts, measured_qubits, interleaved_measured_qubits, interleaved_delay, nr_of_interleaves)
return p # legacy compatibility
@deprecated(version='0.4', reason="use class OqlProgram")
def add_multi_q_cal_points(
p: OqlProgram,
qubits: List[int],
combinations: List[str] = ["00", "01", "10", "11"],
reps_per_cal_pnt: int = 1,
f_state_cal_pt_cw: int = 9, # 9 is the one listed as rX12 in `mw_lutman`
nr_flux_dance: int = None,
flux_cw_list: List[str] = None,
return_comb=False
):
"""
Add a list of kernels containing calibration points in the program `p`
Args:
p : OpenQL program to add calibration points to
qubits : list of int
combinations : list with the target multi-qubit state
e.g. ["00", "01", "10", "11"] or
["00", "01", "10", "11", "02", "20", "22"] or
["000", "010", "101", "111"]
reps_per_cal_pnt : number of times to repeat each cal point
f_state_cal_pt_cw: the cw_idx for the pulse to the ef transition.
Returns:
p
"""
kernel_list = [] # Not sure if this is needed
comb_repetead = []
for state in combinations:
comb_repetead += [state] * reps_per_cal_pnt
state_to_gates = {
"0": ["i"],
"1": ["rx180"],
"2": ["rx180", "cw_{:02}".format(f_state_cal_pt_cw)],
}
for i, comb in enumerate(comb_repetead):
k = create_kernel('cal{}_{}'.format(i, comb), p)
# NOTE: for debugging purposes of the effect of fluxing on readout,
# prepend flux dance before calibration points
for q_state, q in zip(comb, qubits):
k.prepz(q)
k.gate("wait", [], 0) # alignment
if nr_flux_dance and flux_cw_list:
for i in range(int(nr_flux_dance)):
for flux_cw in flux_cw_list:
k.gate(flux_cw, [0])
k.gate("wait", [], 0)
# k.gate("wait", [], 20) # prevent overlap of flux with mw gates
for q_state, q in zip(comb, qubits):
for gate in state_to_gates[q_state]:
k.gate(gate, [q])
k.gate("wait", [], 0) # alignment
# k.gate("wait", [], 20) # prevent overlap of flux with measurement pulse
for q in qubits:
k.measure(q)
k.gate('wait', [], 0) # alignment
kernel_list.append(k)
p.add_kernel(k)
if return_comb:
return comb_repetead
else:
return p
@deprecated(version='0.4', reason="use class OqlProgram")
def add_two_q_cal_points_special_cond_osc(
p, q0: int, q1: int,
q2=None,
reps_per_cal_pt: int = 1,
f_state_cal_pts: bool = False,
# f_state_cal_pt_cw: int = 31,
measured_qubits=None,
interleaved_measured_qubits=None,
interleaved_delay=None,
nr_of_interleaves=1
) -> None:
p.add_two_q_cal_points_special_cond_osc(
q0, q1, q2,
reps_per_cal_pt,
f_state_cal_pts,
measured_qubits,
interleaved_measured_qubits,
interleaved_delay,
nr_of_interleaves
)
return p # legacy compatibility
# FIXME: move?
#############################################################################
# RamZZ measurement
#############################################################################
def measure_ramzz(k, qubit_idx: int, wait_time_ns: int):
"""
Helper function that adds a ramsey readout sequence to the specified qubit
on the specified kernel. Assumes that the qubit was already initialised.
Input pars:
k: Kernel to add ramsey readout sequence to
qubit_idx: Qubit to undergo ramsey sequence
wait_time_ns: Wait time in-between pi/2 pulses
Output pars:
None
"""
k.gate('ry90', [qubit_idx])
k.gate('wait', wait_time_ns, [qubit_idx])
k.gate('rym90', [qubit_idx])
k.measure(qubit_idx)
#############################################################################
# File modifications
#############################################################################
def is_compatible_openql_version_cc() -> bool:
"""
test whether OpenQL version is compatible with Central Controller
"""
return ql.get_version() > '0.10.0' # NB: 0.10.0 does not contain new CC backend yet
def clocks_to_s(time, clock_cycle=20e-9):
"""
Converts a time in clocks to a time in s
"""
return time * clock_cycle
#############################################################################
# Recompilation helpers
#############################################################################
def check_recompilation_needed_hash_based(
program_fn: str,
platf_cfg: str,
clifford_rb_oql: str,
recompile: bool = True,
):
raise DeprecationWarning("use OqlProgram.check_recompilation_needed_hash_based")
@deprecated(reason="Use `check_recompilation_needed_hash_based`!")
def check_recompilation_needed(
program_fn: str,
platf_cfg: str,
recompile=True
) -> bool:
"""
determines if compilation of a file is needed based on it's timestamp
and an optional recompile option
The behavior of this function depends on the recompile argument.
recompile:
True -> True, the program should be compiled
'as needed' -> compares filename to timestamp of config
and checks if the file exists, if required recompile.
False -> checks if the file exists, if it doesn't
compilation is required and raises a ValueError.
Use carefully, only if you know what you are doing!
Use 'as needed' to stay safe!
"""
if recompile is True:
return True # compilation is enforced
elif recompile == 'as needed':
# In case you ever think of a hash-based check mind that this
# function is called in parallel multiprocessing sometime!!!
if isfile(program_fn) and is_more_recent(program_fn, platf_cfg):
return False # program file is good for using
else:
return True # compilation is required
elif recompile is False:
if isfile(program_fn):
if is_more_recent(platf_cfg, program_fn):
log.warning("File {}\n is more recent"
"than program, use `recompile='as needed'` if you"
" don't know what this means!".format(platf_cfg))
return False
else:
raise ValueError('No file:\n{}'.format(platf_cfg))
else:
raise NotImplementedError(
'recompile should be True, False or "as needed"')
#############################################################################
# Multiple program loading helpers
#############################################################################
def load_range_of_oql_programs(
programs,
counter_param,
CC
) -> None:
"""
This is a helper function for running an experiment that is spread over
multiple OpenQL programs such as RB.
"""
program = programs[counter_param()]
counter_param((counter_param() + 1) % len(programs))
CC.eqasm_program(program.filename)
def load_range_of_oql_programs_from_filenames(
programs_filenames: list,
counter_param,
CC
) -> None:
"""
This is a helper function for running an experiment that is spread over
multiple OpenQL programs such as RB.
[2020-07-04] this is a modification of the above function such that only
the filename is passed and not a OpenQL program, allowing for parallel
program compilations using the multiprocessing of python (only certain
types of data can be returned from the processing running the
compilations in parallel)
"""
fn = programs_filenames[counter_param()]
counter_param((counter_param() + 1) % len(programs_filenames))
CC.eqasm_program(fn)
def load_range_of_oql_programs_varying_nr_shots(
programs,
counter_param,
CC,
detector
) -> None:
"""
This is a helper function for running an experiment that is spread over
multiple OpenQL programs of varying length such as GST.
Everytime the detector is called it will also modify the number of sweep
points in the detector.
"""
program = programs[counter_param()]
counter_param((counter_param() + 1) % len(programs))
CC.eqasm_program(program.filename)
detector.nr_shots = len(program.sweep_points)
|
|
import logging
import pymel.core as pmc
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from coconodz.etc.maya.ae.hooks import (DESIRED_HOOK,
OWNER,
remove_template_custom_content
)
from coconodz.etc.maya.qtutilities import maya_main_window
from coconodz.etc.maya import (applib,
callbacks,
decorators
)
from coconodz import SuppressEvents
import coconodz.nodegraph as nodegraph
from coconodz.lib import BaseWindow
LOG = logging.getLogger(name="CocoNodz.maya.nodegraph")
class MayaBaseWindow(MayaQWidgetDockableMixin, BaseWindow):
""" getting the DockableMixin class in to provide all
docking possibilities
"""
def __init__(self, parent):
super(MayaBaseWindow, self).__init__(parent)
class Nodzgraph(nodegraph.Nodegraph):
""" Maya Nodegraph widget implementation
"""
def __init__(self, parent=maya_main_window()):
super(Nodzgraph, self).__init__(parent)
# just providing docking features for Maya 2017 and newer
if int(pmc.about(api=True)) >= 201700:
self.window = MayaBaseWindow(parent)
# patch open_nodzgraph function
callbacks.AEHook.open_nodzgraph = self.open
# add node gategories
self.append_available_node_categories()
# setting the default attribute
self.configuration.default_slot = True
self.configuration.default_plug = True
self.configuration.default_attribute_name = "message"
self.configuration.default_attribute_data_type = "message"
def open(self):
""" opens the Nodegraph with dockable configuration settings
Returns:
"""
super(Nodzgraph, self).open(dockable=self.configuration.maya.docked,
area=self.configuration.maya.dock_area,
allowedArea=self.configuration.maya.allowed_dock_areas,
floating=self.configuration.maya.floating,
width=self.configuration.maya.width,
height=self.configuration.maya.height
)
def register_events(self):
super(Nodzgraph, self).register_events()
event_name_prefix = {callbacks: "host_"}
events_data = {callbacks: ["node_created",
"node_name_changed",
"node_deleted",
"connection_made",
"disconnection_made",
"before_scene_changes",
"after_scene_changes"]
}
# events factory to avoid unnecessary boilerplate
for obj, obj_events in events_data.iteritems():
for event in obj_events:
event_name = event_name_prefix[obj] + event
self.events.add_event(event_name,
adder=obj.__getattribute__("on_" + event),
adder_args=(self.__getattribute__("on_" + event_name),
)
)
self.events.attach_remover(event_name,
caller=callbacks.remove_callbacks_only,
callable_args=(self.events.data[event_name]["id_list"],
)
)
# behaves too differently to be part of the factory easily
self.events.add_event("ShadingEngine_template_hook",
adder=pmc.callbacks,
adder_kwargs={"addCallback": callbacks.add_template_custom_content,
"hook": DESIRED_HOOK,
"owner": OWNER
},
remover=remove_template_custom_content
)
def append_available_node_categories(self):
""" appends available node types in categories
Returns:
"""
available_node_types = self.graph.creation_field.available_items
for types in self.configuration.maya.available_node_categories:
node_types = pmc.listNodeTypes(types)
for node_type in node_types:
if not node_type in available_node_types:
available_node_types.append(node_type)
self.graph.creation_field.available_items = available_node_types
def display_selected_host_nodes(self):
""" adds selected host nodes and corresponding connections to the graph
Returns:
"""
nodes_dict = {node.name(): node.nodeType() for node in pmc.selected()
if node.nodeType() in self.creation_field.available_items}
nodes_attributes = applib.get_connected_attributes_in_node_tree(pmc.selected(),
node_types=self.creation_field.available_items)
node_connections = applib.get_connections(pmc.selected())
self.display_host_nodes(nodes_dict=nodes_dict,
attributes_dict=nodes_attributes,
connections_dict=node_connections)
def on_context_request(self, widget):
if isinstance(widget, nodegraph.NodeItem):
node = pmc.PyNode(widget.name)
if node:
self.attribute_context.available_items = applib.get_attribute_tree(pmc.PyNode(widget.name))
super(Nodzgraph, self).on_context_request(widget)
def on_about_attribute_create(self, node_name, attribute_name):
""" slot override
Args:
node_name:
attribute_name:
Returns:
"""
node = self.get_node_by_name(node_name)
attribute_type = pmc.PyNode("{0}.{1}".format(node_name, attribute_name)).type()
node.add_attribute(attribute_name, data_type=attribute_type)
def on_host_before_scene_changes(self, *args):
self.events.pause_events(exclude=["before_scene_changes", "after_scene_changes"])
@decorators.execute_deferred
def on_host_after_scene_changes(self, *args):
self.events.resume_paused_events()
@SuppressEvents("node_created")
def on_host_node_created(self, node_name, node_type):
""" slot extension
Args:
node_name:
node_type:
Returns:
"""
super(Nodzgraph, self).on_host_node_created(node_name, node_type)
@SuppressEvents("host_node_created")
def on_node_created(self, node):
host_node = pmc.createNode(node.node_type)
self.graph.rename_node(node, host_node.name())
super(Nodzgraph, self).on_node_created(node)
@SuppressEvents("host_node_name_changed")
def on_node_name_changed(self, node, old_name, new_name):
try:
host_node = pmc.PyNode(old_name)
except:
LOG.warning("Node {} doesn't exist.".format(old_name))
try:
host_node.rename(new_name)
except:
LOG.warning("Not able to rename {}'".format(old_name))
super(Nodzgraph, self).on_node_name_changed(node, old_name, new_name)
@SuppressEvents("node_name_changed")
def on_host_node_name_changed(self, new_name, old_name):
super(Nodzgraph, self).on_host_node_name_changed(new_name, old_name)
@SuppressEvents("host_connection_made")
def on_connection_made(self, connection):
""" slot extension
Args:
connection: ConnectionItem instance
Returns:
"""
plug_name = "{0}.{1}".format(connection.plugNode, connection.plugAttr)
socket_name = "{0}.{1}".format(connection.socketNode, connection.socketAttr)
try:
slot1 = pmc.PyNode(plug_name)
slot2 = pmc.PyNode(socket_name)
slot1 >> slot2
super(Nodzgraph, self).on_connection_made(connection)
except:
LOG.warning("Can not connect {0} to {1}".format(plug_name, socket_name))
@SuppressEvents("host_disconnection_made")
def on_disconnection_made(self, connection):
plug_name = "{0}.{1}".format(connection.plugNode, connection.plugAttr)
socket_name = "{0}.{1}".format(connection.socketNode, connection.socketAttr)
try:
slot1 = pmc.PyNode(plug_name)
slot2 = pmc.PyNode(socket_name)
slot1 // slot2
super(Nodzgraph, self).on_disconnection_made(connection)
except:
LOG.warning("Can not disconnect {0} to {1}".format(plug_name, socket_name))
@SuppressEvents(["connection_made", "plug_connected", "socket_connected"])
def on_host_connection_made(self, plug_name, socket_name):
""" slot extension
Args:
plug_name: name of the plug
socket_name: name of the socket
Returns:
"""
super(Nodzgraph, self).on_host_connection_made(plug_name, socket_name)
@SuppressEvents("host_node_deleted")
def on_nodes_deleted(self, nodeitems_list):
""" slot override
Args:
nodeitems_list:
Returns:
"""
for node in nodeitems_list:
try:
pmc.delete(node.name)
except RuntimeWarning:
LOG.warning("Not able to delete host node '{0}'".format(node.name), exc_info=True)
def on_nodes_selected(self, nodes_list):
selection = [_.name for _ in nodes_list if not _.node_type in self.RESERVED_NODETYPES]
pmc.select(selection)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.tfexample_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
class TFExampleDecoderTest(tf.test.TestCase):
def _EncodedFloatFeature(self, ndarray):
return tf.train.Feature(float_list=tf.train.FloatList(
value=ndarray.flatten().tolist()))
def _EncodedInt64Feature(self, ndarray):
return tf.train.Feature(int64_list=tf.train.Int64List(
value=ndarray.flatten().tolist()))
def _EncodedBytesFeature(self, tf_encoded):
with self.test_session():
encoded = tf_encoded.eval()
def BytesList(value):
return tf.train.BytesList(value=[value])
return tf.train.Feature(bytes_list=BytesList(encoded))
def _BytesFeature(self, ndarray):
values = ndarray.flatten().tolist()
for i in range(len(values)):
values[i] = values[i].encode('utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def _StringFeature(self, value):
value = value.encode('utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _Encoder(self, image, image_format):
assert image_format in ['jpeg', 'JPEG', 'png', 'PNG', 'raw', 'RAW']
if image_format in ['jpeg', 'JPEG']:
tf_image = tf.constant(image, dtype=tf.uint8)
return tf.image.encode_jpeg(tf_image)
if image_format in ['png', 'PNG']:
tf_image = tf.constant(image, dtype=tf.uint8)
return tf.image.encode_png(tf_image)
if image_format in ['raw', 'RAW']:
return tf.constant(image.tostring(), dtype=tf.string)
def GenerateImage(self, image_format, image_shape):
"""Generates an image and an example containing the encoded image.
Args:
image_format: the encoding format of the image.
image_shape: the shape of the image to generate.
Returns:
image: the generated image.
example: a TF-example with a feature key 'image/encoded' set to the
serialized image and a feature key 'image/format' set to the image
encoding format ['jpeg', 'JPEG', 'png', 'PNG', 'raw'].
"""
num_pixels = image_shape[0] * image_shape[1] * image_shape[2]
image = np.linspace(0, num_pixels-1, num=num_pixels).reshape(
image_shape).astype(np.uint8)
tf_encoded = self._Encoder(image, image_format)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': self._EncodedBytesFeature(tf_encoded),
'image/format': self._StringFeature(image_format)
}))
return image, example.SerializeToString()
def DecodeExample(self, serialized_example, item_handler, image_format):
"""Decodes the given serialized example with the specified item handler.
Args:
serialized_example: a serialized TF example string.
item_handler: the item handler used to decode the image.
image_format: the image format being decoded.
Returns:
the decoded image found in the serialized Example.
"""
serialized_example = tf.reshape(serialized_example, shape=[])
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features={
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value=image_format),
},
items_to_handlers={'image': item_handler}
)
[tf_image] = decoder.decode(serialized_example, ['image'])
return tf_image
def RunDecodeExample(self, serialized_example, item_handler, image_format):
tf_image = self.DecodeExample(serialized_example, item_handler,
image_format)
with self.test_session():
decoded_image = tf_image.eval()
# We need to recast them here to avoid some issues with uint8.
return decoded_image.astype(np.float32)
def testDecodeExampleWithJpegEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='jpeg',
image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
slim.tfexample_decoder.Image(),
image_format='jpeg')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithJPEGEncoding(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='JPEG',
image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
slim.tfexample_decoder.Image(channels=channels),
image_format='JPEG')
# Need to use a tolerance of 1 because of noise in the jpeg encode/decode
self.assertAllClose(image, decoded_image, atol=1.001)
def testDecodeExampleWithNoShapeInfo(self):
test_image_channels = [1, 3]
for channels in test_image_channels:
image_shape = (2, 3, channels)
_, serialized_example = self.GenerateImage(
image_format='jpeg',
image_shape=image_shape)
tf_decoded_image = self.DecodeExample(
serialized_example,
slim.tfexample_decoder.Image(shape=None, channels=channels),
image_format='jpeg')
self.assertEqual(tf_decoded_image.get_shape().ndims, 3)
def testDecodeExampleWithPngEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='png',
image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
slim.tfexample_decoder.Image(channels=channels),
image_format='png')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithPNGEncoding(self):
test_image_channels = [1, 3, 4]
for channels in test_image_channels:
image_shape = (2, 3, channels)
image, serialized_example = self.GenerateImage(
image_format='PNG',
image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
slim.tfexample_decoder.Image(channels=channels),
image_format='PNG')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRawEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='raw',
image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
slim.tfexample_decoder.Image(shape=image_shape),
image_format='raw')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithRAWEncoding(self):
image_shape = (2, 3, 3)
image, serialized_example = self.GenerateImage(
image_format='RAW',
image_shape=image_shape)
decoded_image = self.RunDecodeExample(
serialized_example,
slim.tfexample_decoder.Image(shape=image_shape),
image_format='RAW')
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithStringTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.array([[['ab'], ['cd'], ['ef']],
[['ghi'], ['jkl'], ['mnop']]])
example = tf.train.Example(features=tf.train.Features(feature={
'labels': self._BytesFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': tf.FixedLenFeature(
tensor_shape, tf.string, default_value=tf.constant(
'', shape=tensor_shape, dtype=tf.string))
}
items_to_handlers = {
'labels': slim.tfexample_decoder.Tensor('labels'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
labels = labels.astype(np_array.dtype)
self.assertTrue(np.array_equal(np_array, labels))
def testDecodeExampleWithFloatTensor(self):
np_array = np.random.rand(2, 3, 1).astype('f')
example = tf.train.Example(features=tf.train.Features(feature={
'array': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'array': tf.FixedLenFeature(np_array.shape, tf.float32)
}
items_to_handlers = {
'array': slim.tfexample_decoder.Tensor('array'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithInt64Tensor(self):
np_array = np.random.randint(1, 10, size=(2, 3, 1))
example = tf.train.Example(features=tf.train.Features(feature={
'array': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'array': tf.FixedLenFeature(np_array.shape, tf.int64)
}
items_to_handlers = {
'array': slim.tfexample_decoder.Tensor('array'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_array] = decoder.decode(serialized_example, ['array'])
self.assertAllEqual(tf_array.eval(), np_array)
def testDecodeExampleWithVarLenTensor(self):
np_array = np.array([[[1], [2], [3]],
[[4], [5], [6]]])
example = tf.train.Example(features=tf.train.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.Tensor('labels'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array.flatten())
def testDecodeExampleWithFixLenTensorWithShape(self):
np_array = np.array([[1, 2, 3],
[4, 5, 6]])
example = tf.train.Example(features=tf.train.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': tf.FixedLenFeature(np_array.shape, dtype=tf.int64),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.Tensor('labels',
shape=np_array.shape),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleWithVarLenTensorToDense(self):
np_array = np.array([[1, 2, 3],
[4, 5, 6]])
example = tf.train.Example(features=tf.train.Features(feature={
'labels': self._EncodedInt64Feature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'labels': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.Tensor('labels',
shape=np_array.shape),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels, np_array)
def testDecodeExampleShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]],
[[4], [5], [6]]])
example = tf.train.Example(features=tf.train.Features(feature={
'image': self._EncodedFloatFeature(np_image),
'image/shape': self._EncodedInt64Feature(np.array(np_image.shape)),
'labels': self._EncodedInt64Feature(np_labels),
'labels/shape': self._EncodedInt64Feature(np.array(np_labels.shape)),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'image': tf.VarLenFeature(dtype=tf.float32),
'image/shape': tf.VarLenFeature(dtype=tf.int64),
'labels': tf.VarLenFeature(dtype=tf.int64),
'labels/shape': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Tensor('image',
shape_keys='image/shape'),
'labels': slim.tfexample_decoder.Tensor('labels',
shape_keys='labels/shape'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleMultiShapeKeyTensor(self):
np_image = np.random.rand(2, 3, 1).astype('f')
np_labels = np.array([[[1], [2], [3]],
[[4], [5], [6]]])
height, width, depth = np_labels.shape
example = tf.train.Example(features=tf.train.Features(feature={
'image': self._EncodedFloatFeature(np_image),
'image/shape': self._EncodedInt64Feature(np.array(np_image.shape)),
'labels': self._EncodedInt64Feature(np_labels),
'labels/height': self._EncodedInt64Feature(np.array([height])),
'labels/width': self._EncodedInt64Feature(np.array([width])),
'labels/depth': self._EncodedInt64Feature(np.array([depth])),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'image': tf.VarLenFeature(dtype=tf.float32),
'image/shape': tf.VarLenFeature(dtype=tf.int64),
'labels': tf.VarLenFeature(dtype=tf.int64),
'labels/height': tf.VarLenFeature(dtype=tf.int64),
'labels/width': tf.VarLenFeature(dtype=tf.int64),
'labels/depth': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Tensor(
'image', shape_keys='image/shape'),
'labels': slim.tfexample_decoder.Tensor(
'labels',
shape_keys=['labels/height', 'labels/width', 'labels/depth']),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_image, tf_labels] = decoder.decode(serialized_example,
['image', 'labels'])
self.assertAllEqual(tf_image.eval(), np_image)
self.assertAllEqual(tf_labels.eval(), np_labels)
def testDecodeExampleWithSparseTensor(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
example = tf.train.Example(features=tf.train.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': tf.VarLenFeature(dtype=tf.int64),
'values': tf.VarLenFeature(dtype=tf.float32),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.SparseTensor(),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_values.shape)
def testDecodeExampleWithSparseTensorWithKeyShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = tf.train.Example(features=tf.train.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
'shape': self._EncodedInt64Feature(np_shape),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': tf.VarLenFeature(dtype=tf.int64),
'values': tf.VarLenFeature(dtype=tf.float32),
'shape': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.SparseTensor(shape_key='shape'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorWithGivenShape(self):
np_indices = np.array([[1], [2], [5]])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
example = tf.train.Example(features=tf.train.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': tf.VarLenFeature(dtype=tf.int64),
'values': tf.VarLenFeature(dtype=tf.float32),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.SparseTensor(shape=np_shape),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllEqual(labels.indices, np_indices)
self.assertAllEqual(labels.values, np_values)
self.assertAllEqual(labels.dense_shape, np_shape)
def testDecodeExampleWithSparseTensorToDense(self):
np_indices = np.array([1, 2, 5])
np_values = np.array([0.1, 0.2, 0.6]).astype('f')
np_shape = np.array([6])
np_dense = np.array([0.0, 0.1, 0.2, 0.0, 0.0, 0.6]).astype('f')
example = tf.train.Example(features=tf.train.Features(feature={
'indices': self._EncodedInt64Feature(np_indices),
'values': self._EncodedFloatFeature(np_values),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'indices': tf.VarLenFeature(dtype=tf.int64),
'values': tf.VarLenFeature(dtype=tf.float32),
}
items_to_handlers = {
'labels': slim.tfexample_decoder.SparseTensor(shape=np_shape,
densify=True),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_labels] = decoder.decode(serialized_example, ['labels'])
labels = tf_labels.eval()
self.assertAllClose(labels, np_dense)
def testDecodeExampleWithTensor(self):
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = tf.train.Example(features=tf.train.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map': tf.FixedLenFeature(
tensor_shape, tf.float32, default_value=tf.zeros(tensor_shape))
}
items_to_handlers = {
'depth': slim.tfexample_decoder.Tensor('image/depth_map')
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth)
def testDecodeExampleWithItemHandlerCallback(self):
np.random.seed(0)
tensor_shape = (2, 3, 1)
np_array = np.random.rand(2, 3, 1)
example = tf.train.Example(features=tf.train.Features(feature={
'image/depth_map': self._EncodedFloatFeature(np_array),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'image/depth_map': tf.FixedLenFeature(
tensor_shape, tf.float32, default_value=tf.zeros(tensor_shape))
}
def HandleDepth(keys_to_tensors):
depth = list(keys_to_tensors.values())[0]
depth += 1
return depth
items_to_handlers = {
'depth': slim.tfexample_decoder.ItemHandlerCallback(
'image/depth_map', HandleDepth)
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_depth] = decoder.decode(serialized_example, ['depth'])
depth = tf_depth.eval()
self.assertAllClose(np_array, depth-1)
def testDecodeImageWithItemHandlerCallback(self):
image_shape = (2, 3, 3)
for image_encoding in ['jpeg', 'png']:
image, serialized_example = self.GenerateImage(
image_format=image_encoding,
image_shape=image_shape)
with self.test_session():
def ConditionalDecoding(keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors['image/encoded']
image_format = keys_to_tensors['image/format']
def DecodePng():
return tf.image.decode_png(image_buffer, 3)
def DecodeJpg():
return tf.image.decode_jpeg(image_buffer, 3)
image = tf.case({
tf.equal(image_format, 'png'): DecodePng,
}, default=DecodeJpg, exclusive=True)
image = tf.reshape(image, image_shape)
return image
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg')
}
items_to_handlers = {
'image': slim.tfexample_decoder.ItemHandlerCallback(
['image/encoded', 'image/format'], ConditionalDecoding)
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
[tf_image] = decoder.decode(serialized_example, ['image'])
decoded_image = tf_image.eval()
if image_encoding == 'jpeg':
# For jenkins:
image = image.astype(np.float32)
decoded_image = decoded_image.astype(np.float32)
self.assertAllClose(image, decoded_image, rtol=.5, atol=1.001)
else:
self.assertAllClose(image, decoded_image, atol=0)
def testDecodeExampleWithBoundingBox(self):
num_bboxes = 10
np_ymin = np.random.rand(num_bboxes, 1)
np_xmin = np.random.rand(num_bboxes, 1)
np_ymax = np.random.rand(num_bboxes, 1)
np_xmax = np.random.rand(num_bboxes, 1)
np_bboxes = np.hstack([np_ymin, np_xmin, np_ymax, np_xmax])
example = tf.train.Example(features=tf.train.Features(feature={
'image/object/bbox/ymin': self._EncodedFloatFeature(np_ymin),
'image/object/bbox/xmin': self._EncodedFloatFeature(np_xmin),
'image/object/bbox/ymax': self._EncodedFloatFeature(np_ymax),
'image/object/bbox/xmax': self._EncodedFloatFeature(np_xmax),
}))
serialized_example = example.SerializeToString()
with self.test_session():
serialized_example = tf.reshape(serialized_example, shape=[])
keys_to_features = {
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
}
items_to_handlers = {
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features,
items_to_handlers)
[tf_bboxes] = decoder.decode(serialized_example, ['object/bbox'])
bboxes = tf_bboxes.eval()
self.assertAllClose(np_bboxes, bboxes)
if __name__ == '__main__':
tf.test.main()
|
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import MatrixOperations as MO
import HiptmairSetup
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class BlockTriInv(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.r_is,self.b_is)
OptDB = PETSc.Options()
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "umfpack"
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
xb = bb.duplicate()
xxr = bb.duplicate()
self.Dt.multTranspose(xr,xxr)
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = bu1.duplicate()
bu4 = bu1.duplicate()
self.Bt.multTranspose(xp,bu2)
self.Ct.multTranspose(xb,bu4)
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4+bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, -xp.array,xb.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class BlockTriApp(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.Ct = A.getSubMatrix(self.b_is,self.u_is)
self.Bt = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.r_is,self.b_is)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
xb = bb.duplicate()
#self.kspMX.solve(bb,xb)
xxr = bb.duplicate()
self.Dt.multTranspose(xr,xxr)
xb, its, self.HiptmairTime = HiptmairSetup.HiptmairApply(self.AA, bb, self.kspScalar, self.kspVector, self.G, self.P, self.tol)
bu1 = x.getSubVector(self.u_is)
bu2 = bu1.duplicate()
bu4 = bu1.duplicate()
self.Bt.multTranspose(xp,bu2)
self.Ct.multTranspose(xb,bu4)
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4+bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, -xp.array,xb.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
def SchurComplement(kspF, B):
n = min(B.size)
A = sp.csr_matrix((n, n))
row = []
column = []
data = np.zeros(0)
for i in range(n):
(y, u) = B.getVecs()
kspF.solve(B.getColumnVector(i), u)
B.multTranspose(u, y)
if i == 0:
data = y.array
row = np.linspace(0, n-1, n)
column = i*np.ones(n)
else:
row = np.concatenate([row, np.linspace(0,n-1,n)])
column = np.concatenate([column, i*np.ones(n)])
data = np.concatenate([data, y.array])
A = sp.csr_matrix((data, (row, column)), shape=(n, n))
return PETSc.Mat().createAIJ(size=A.shape, csr=(A.indptr, A.indices, A.data))
def FluidSchur(A, b):
if len(A) == 1:
print "exact Schur complement"
x = b.duplicate()
A[0].solve(b, x)
return x
else:
x1 = b.duplicate()
x2 = b.duplicate()
x3 = b.duplicate()
A[2].solve(b,x1)
A[1].mult(x1,x2)
A[0].solve(x2,x3)
return x3
class ApproxInv(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
self.HiptmairIts = 0
self.CGits = 0
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
self.FluidApprox = "Schur"
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
if A.type == 'python':
self.Ct = A.getPythonContext().getMatrix("Ct")
self.Bt = A.getPythonContext().getMatrix("Bt")
else:
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
OptDB = PETSc.Options()
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "umfpack"
self.kspA.setType('cg')
self.kspA.getPC().setType('hypre')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
if self.FluidApprox == "Schur":
Schur = SchurComplement(self.kspF, A.getSubMatrix(self.u_is, self.p_is))
kspS = PETSc.KSP()
kspS.create(comm=PETSc.COMM_WORLD)
pcS = kspMX.getPC()
kspS.setType('preonly')
pcS.setType('lu')
OptDB = PETSc.Options()
kspS.setOperators(Schur, Schur)
self.kspS = kspS
print "setup"
def apply(self, pc, x, y):
bu = x.getSubVector(self.u_is)
invF = bu.duplicate()
bp = x.getSubVector(self.p_is)
bb = x.getSubVector(self.b_is)
invMX = bb.duplicate()
br = x.getSubVector(self.r_is)
invL = br.duplicate()
self.kspF.solve(bu,invF)
x1 = bp.duplicate()
x2 = bp.duplicate()
if self.FluidApprox == "Schur":
invS = FluidSchur([self.kspA, self.Fp, self.kspQ], bp)
else:
invS = bp.duplicate()
self.kspS.solve(bp, invS)
self.kspMX.solve(bb,invMX)
self.kspScalar.solve(br,invL)
xp1 = invS.duplicate()
self.B.mult(invF, xp1)
if self.FluidApprox == "Schur":
barF = FluidSchur([self.kspA, self.Fp, self.kspQ], xp1)
else:
barF = invS.duplicate()
self.kspS.solve(xp1, barF)
xu1 = invF.duplicate()
barS = invF.duplicate()
self.B.multTranspose(invS, xu1)
self.kspF.solve(xu1, barS)
# outR = (L(D*invMx));
xr1 = invL.duplicate()
outR = invL.duplicate()
self.D.mult(invMX, xr1)
self.kspScalar(xr1, outR)
# outB = (Mx(C*barS) + invMx + Mx(D'*invL));
xb1 = invMX.duplicate()
xb2 = invMX.duplicate()
xb3 = invMX.duplicate()
xb4 = invMX.duplicate()
self.D.multTranspose(invL, xb1)
self.kspMX.solve(xb1, xb2)
self.C.mult(barS, xb3)
self.kspMX.solve(xb3, xb4)
outB = xb4 + invMX + xb2
# outP = barF - invS - Schur(B*F(C'*invMx));
xp1 = invF.duplicate()
xp2 = invF.duplicate()
xp3 = invS.duplicate()
self.C.multTranspose(invMX, xp1)
self.kspF.solve(xp1, xp2)
self.B.mult(xp2, xp3A)
if self.FluidApprox == "Schur":
xp4 = FluidSchur([self.kspA, self.Fp, self.kspQ], xp3)
else:
xp4 = invS.duplicate()
self.kspS.solve(xp3, xp4)
outP = barF - invS - xp4;
xu1 = invF.duplicate()
xu2 = invF.duplicate()
self.B.multTranspose(barF, xu1)
self.kspF.solve(xu1, xu2)
outU = invF - xu2 + barS;
y.array = (np.concatenate([outU.array, outP.array, outB.array, outR.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.adcpt_acfgm_dcl_pd8
@file marine-integrations/mi/dataset/parser/adcpt_acfgm_dcl_pd8.py
@author Sung Ahn
@brief Parser for the adcpt_acfgm_dcl_pd8 dataset driver
This file contains code for the adcpt_acfgm_dcl_pd8 parsers and code to produce data particles.
instrument and instrument recovered.
All records start with a timestamp.
DCL log records: timestamp [text] more text newline.
Sensor Data records: timestamp sensor_data newline.
Only sensor data records produce particles if properly formed.
Mal-formed sensor data records and all dcl log records produce no particles.
The sensor data record has the following format:
<DCL Timestamp> <Instrument Timestamp> <Ensemble Number>
<DCL Timestamp> Hdg: <Heading> Pitch: <Pitch> Roll: <Roll>
<DCL Timestamp> Temp: <Temperature> SoS: <Speed of Sound> BIT: <BIT>
<DCL Timestamp> Bin Dir Mag E/W N/S Vert Err Echo1 Echo2 Echo3 Echo4..
<DCL Timestamp> 1 <DIR 1> <MAG 1> <EW 1> <NS 1> <VERT 1> <ERR 1> <ECHO1 1> <ECHO2 1> <ECHO3 1> <ECHO4 1>
<DCL Timestamp> ...
<DCL Timestamp> <N> <DIR N> <MAG N> <EW N> <NS N> <VERT N> <ERR N> <ECHO1 N> <ECHO2 N> <ECHO3 N> <ECHO4 N>
Release notes:
Initial Release
"""
__author__ = 'Sung Ahn'
__license__ = 'Apache 2.0'
import re
import numpy
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.dataset.parser.dcl_file_common import DclInstrumentDataParticle, \
TIMESTAMP, START_METADATA, END_METADATA, START_GROUP, END_GROUP, SENSOR_GROUP_TIMESTAMP
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.parser.common_regexes import FLOAT_REGEX, UNSIGNED_INT_REGEX, INT_REGEX, \
SPACE_REGEX, ANY_CHARS_REGEX, ASCII_HEX_CHAR_REGEX
# Basic patterns
UINT = '(' + UNSIGNED_INT_REGEX + ')' # unsigned integer as a group
SINT = '(' + INT_REGEX + ')' # signed integer as a group
FLOAT = '(' + FLOAT_REGEX + ')' # floating point as a captured group
FLOAT_OR_DASH = '(' + FLOAT_REGEX + '|\-\-' + ')' # floating point as a captured group
MULTI_SPACE = SPACE_REGEX + '+'
ANY_NON_BRACKET_CHAR = r'[^\[\]]+'
# DCL Timestamp at the start of each record: YYYY/MM/DD HH:MM:SS.mmm
SENSOR_DATE = r'\d{4}/\d{2}/\d{2}' # Sensor Date: YYYY/MM/DD
SENSOR_TIME = r'\d{2}:\d{2}:\d{2}.\d{2}' # Sensor Time: HH:MM:SS.mm
TWO_HEX = '(' + ASCII_HEX_CHAR_REGEX + ASCII_HEX_CHAR_REGEX + ')'
# DCL Log record:
# Timestamp [Text]MoreText newline
DCL_LOG_PATTERN = TIMESTAMP + SPACE_REGEX # DCL controller timestamp
DCL_LOG_PATTERN += START_METADATA # Metadata record starts with '['
DCL_LOG_PATTERN += ANY_NON_BRACKET_CHAR # followed by text
DCL_LOG_PATTERN += END_METADATA # followed by ']'
DCL_LOG_PATTERN += ANY_CHARS_REGEX # followed by more text
DCL_LOG_MATCHER = re.compile(DCL_LOG_PATTERN)
# Header 1
# 2013/12/01 01:04:18.213 2013/12/01 01:00:27.53 00001\n
SENSOR_TIME_PATTERN = TIMESTAMP + MULTI_SPACE # DCL controller timestamp
SENSOR_TIME_PATTERN += START_GROUP + SENSOR_DATE + MULTI_SPACE # sensor date
SENSOR_TIME_PATTERN += SENSOR_TIME + END_GROUP + MULTI_SPACE # sensor time
SENSOR_TIME_PATTERN += UINT # Ensemble Number
SENSOR_TIME_MATCHER = re.compile(SENSOR_TIME_PATTERN)
# Header 2
# 2013/12/01 01:04:18.246 Hdg: 34.7 Pitch: 0.0 Roll: -1.1\n
SENSOR_HEAD_PATTERN = TIMESTAMP + MULTI_SPACE # DCL controller timestamp
SENSOR_HEAD_PATTERN += 'Hdg:' + MULTI_SPACE + FLOAT + MULTI_SPACE # Hdg
SENSOR_HEAD_PATTERN += 'Pitch:' + MULTI_SPACE + FLOAT + MULTI_SPACE # Pitch
SENSOR_HEAD_PATTERN += 'Roll:' + MULTI_SPACE + FLOAT # Roll
SENSOR_HEAD_MATCHER = re.compile(SENSOR_HEAD_PATTERN)
# Header 3
# 2013/12/01 01:04:18.279 Temp: 13.8 SoS: 1505 BIT: 00\n
SENSOR_TEMP_PATTERN = TIMESTAMP + MULTI_SPACE # DCL controller timestamp
SENSOR_TEMP_PATTERN += 'Temp:' + MULTI_SPACE + FLOAT + MULTI_SPACE # temp
SENSOR_TEMP_PATTERN += 'SoS:' + MULTI_SPACE + SINT + MULTI_SPACE # SoS
SENSOR_TEMP_PATTERN += 'BIT:' + MULTI_SPACE + TWO_HEX # sensor BIT
SENSOR_TEMP_MATCHER = re.compile(SENSOR_TEMP_PATTERN)
# Header 4
# 2013/12/01 01:04:18.363 Bin Dir Mag E/W N/S Vert Err Echo1 Echo2 Echo3 Echo4\n
IGNORE_HEADING_PATTERN = TIMESTAMP + MULTI_SPACE # DCL controller timestamp
IGNORE_HEADING_PATTERN += 'Bin' + MULTI_SPACE + ANY_CHARS_REGEX
IGNORE_HEADING_MATCHER = re.compile(IGNORE_HEADING_PATTERN)
# Sensor Data Record:
# 2013/12/01 01:04:18.446 1 14.6 47.5 12 46 -3 5 162 168 164 168\n
SENSOR_DATA_PATTERN = TIMESTAMP + MULTI_SPACE # DCL controller timestamp
SENSOR_DATA_PATTERN += UINT + MULTI_SPACE # bin
SENSOR_DATA_PATTERN += FLOAT_OR_DASH + MULTI_SPACE # Dir
SENSOR_DATA_PATTERN += FLOAT_OR_DASH + MULTI_SPACE # Mag
SENSOR_DATA_PATTERN += SINT + MULTI_SPACE # E/W
SENSOR_DATA_PATTERN += SINT + MULTI_SPACE # N/S
SENSOR_DATA_PATTERN += SINT + MULTI_SPACE # Vert
SENSOR_DATA_PATTERN += SINT + MULTI_SPACE # Err
SENSOR_DATA_PATTERN += UINT + MULTI_SPACE # Echo1
SENSOR_DATA_PATTERN += UINT + MULTI_SPACE # Echo2
SENSOR_DATA_PATTERN += UINT + MULTI_SPACE # Echo3
SENSOR_DATA_PATTERN += UINT # Echo4
SENSOR_DATA_MATCHER = re.compile(SENSOR_DATA_PATTERN)
# Empty Timestamp
# 2013/12/01 01:04:20.834\n
IGNORE_EMPTY_PATTERN = TIMESTAMP + '\s*$' # DCL controller timestamp
IGNORE_EMPTY_MATCHER = re.compile(IGNORE_EMPTY_PATTERN)
# The following are indices into groups()
# incremented after common timestamp values.
# i.e, match.groups()[INDEX]
# SENSOR_TIME_MATCHER produces the following groups.
SENSOR_TIME_SENSOR_DATE_TIME = 8
SENSOR_TIME_ENSEMBLE = 9
# SENSOR_HEAD_MATCHER produces the following groups.
HEAD_HEADING = 8
HEAD_PITCH = 9
HEAD_ROLL = 10
# SENSOR_TEMP_MATCHER produces the following groups.
TEMP_TEMP = 8
TEMP_SOS = 9
TEMP_HEX = 10
# SENSOR_DATA_MATCHER produces the following groups.
SENSOR_DATA_BIN = 8
# encoding function for water_velocity and water_direction
# the adcp puts "--" in the output when these can't be computed
# return fill value when found
def float_or_dash(val):
if val == '--':
return None
else:
return float(val)
# The following is used for DclInstrumentDataParticle._build_parsed_values() and defined as below:
# (parameter name, index into parsed_data[], encoding function)
PD8_DATA_MAP = [
('dcl_controller_timestamp', 0, str), # Last timestamp from the DCL controller
('instrument_timestamp', 8, str),
('ensemble_number', 9, int),
('heading', 10, float),
('pitch', 11, float),
('roll', 12, float),
('temperature', 13, float),
('speed_of_sound', 14, int),
('bit_result_demod_1', 15, int),
('bit_result_demod_0', 16, int),
('bit_result_timing', 17, int),
('water_direction', 18, lambda x: [float_or_dash(y) for y in x]),
('water_velocity', 19, lambda x: [float_or_dash(y) for y in x]),
('water_velocity_east', 20, lambda x: [int(y) for y in x]),
('water_velocity_north', 21, lambda x: [int(y) for y in x]),
('water_velocity_up', 22, lambda x: [int(y) for y in x]),
('error_velocity', 23, lambda x: [int(y) for y in x]),
('echo_intensity_beam1', 24, lambda x: [int(y) for y in x]),
('echo_intensity_beam2', 25, lambda x: [int(y) for y in x]),
('echo_intensity_beam3', 26, lambda x: [int(y) for y in x]),
('echo_intensity_beam4', 27, lambda x: [int(y) for y in x]),
('num_cells', 28, int)
]
class DataParticleType(BaseEnum):
ADCPT_ACFGM_PD8_DCL_INSTRUMENT = 'adcpt_acfgm_pd8_dcl_instrument'
ADCPT_ACFGM_PD8_DCL_INSTRUMENT_RECOVERED = 'adcpt_acfgm_pd8_dcl_instrument_recovered'
class AdcptAcfgmPd8InstrumentDataParticle(DclInstrumentDataParticle):
"""
Class for generating the adcpt_acfgm_dcl_pd8 instrument particle.
"""
def __init__(self, raw_data, *args, **kwargs):
super(AdcptAcfgmPd8InstrumentDataParticle, self).__init__(
raw_data, PD8_DATA_MAP, *args, **kwargs)
class AdcptAcfgmPd8DclInstrumentParticle(AdcptAcfgmPd8InstrumentDataParticle):
"""
Class for generating Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.ADCPT_ACFGM_PD8_DCL_INSTRUMENT
class AdcptAcfgmPd8DclInstrumentRecoveredParticle(AdcptAcfgmPd8InstrumentDataParticle):
"""
Class for generating Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.ADCPT_ACFGM_PD8_DCL_INSTRUMENT_RECOVERED
class AdcptAcfgmPd8Parser(SimpleParser):
"""
ADCPT ACFGM PD8 Parser.
"""
def recov_exception_callback(self, message):
log.warn(message)
self._exception_callback(RecoverableSampleException(message))
def parse_file(self):
"""
Open and read the file and parser the data within, and at the end of
this method self._record_buffer will be filled with all the particles in the file.
"""
while True: # loop through file looking for beginning of an adcp data burst
line = self._stream_handle.readline() # READ NEXT LINE
if line == "":
break
# Check if this is a DCL Log message
dcl_log_match = DCL_LOG_MATCHER.match(line)
if dcl_log_match:
# verified to be a regular DCL Log. Discard & move to next line.
continue # skip to next line in outer loop
line_match = SENSOR_TIME_MATCHER.match(line)
if line_match is None:
self.recov_exception_callback("Expected starting DCL Timestamp, received: %r" % line)
continue # skip to next line in outer loop
matches = line_match.groups()
sensor_data_list = []
# Save timestamp from the DCL controller log and it's parts
parsed_data = list(matches[SENSOR_GROUP_TIMESTAMP:SENSOR_TIME_SENSOR_DATE_TIME])
# Get instrument_timestamp & ensemble_number
parsed_data.append(matches[SENSOR_TIME_SENSOR_DATE_TIME])
parsed_data.append(matches[SENSOR_TIME_ENSEMBLE])
line = self._stream_handle.readline() # READ NEXT LINE
line_match = SENSOR_HEAD_MATCHER.match(line)
if line_match is None:
self.recov_exception_callback("Expecting Heading, Pitch, & Roll data, received: %r" % line)
continue # skip to next line in outer loop
matches = line_match.groups()
# Get head, pitch, & roll
parsed_data.append(matches[HEAD_HEADING])
parsed_data.append(matches[HEAD_PITCH])
parsed_data.append(matches[HEAD_ROLL])
line = self._stream_handle.readline() # READ NEXT LINE
line_match = SENSOR_TEMP_MATCHER.match(line)
if line_match is None:
self.recov_exception_callback("Expecting Temperature, Speed of Sound, & BIT data,"
" received: %r" % line)
continue # skip to next line in outer loop
matches = line_match.groups()
# Get temperature, speed of sound, & BIT values
parsed_data.append(matches[TEMP_TEMP])
parsed_data.append(matches[TEMP_SOS])
binary_string = '{0:08b}'.format(int(matches[TEMP_HEX], 16))
parsed_data.append(binary_string[3])
parsed_data.append(binary_string[4])
parsed_data.append(binary_string[6])
line = self._stream_handle.readline() # READ NEXT LINE
line_match = IGNORE_HEADING_MATCHER.match(line)
if line_match is None:
self.recov_exception_callback("Expecting Header, received: %s" % line)
continue # skip to next line in outer loop
# Start looking for sensor data
while True: # loop through all the velocity and echo data records
line = self._stream_handle.readline() # READ NEXT LINE
line_match = SENSOR_DATA_MATCHER.match(line)
if line_match is not None:
# Collect velocity data sextets and echo power quartets
sensor_data_list.append(line_match.groups()[SENSOR_DATA_BIN:])
else:
try:
# Transpose velocity data sextets and echo power quartets
np_array = numpy.array(sensor_data_list)
parsed_data.extend(np_array.transpose().tolist()[1:])
# Get number of cells
parsed_data.append(sensor_data_list[-1][0])
particle = self._extract_sample(self._particle_class,
None,
parsed_data,
None)
if particle is not None:
self._record_buffer.append(particle)
except Exception:
self.recov_exception_callback("Error parsing sensor data row,"
" received: %s" % line)
break # exit inner loop once a particle has been produced
|
|
import json
from functools import lru_cache
from typing import Dict, Iterable, Optional
import dateutil.parser as dp
import requests
from pydantic.class_validators import validator
from datahub.configuration.common import ConfigModel
from datahub.emitter.mce_builder import DEFAULT_ENV
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.sql import sql_common
from datahub.metadata.com.linkedin.pegasus2avro.common import (
AuditStamp,
ChangeAuditStamps,
)
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
ChartSnapshot,
DashboardSnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import (
ChartInfoClass,
ChartTypeClass,
DashboardInfoClass,
)
from datahub.utilities import config_clean
PAGE_SIZE = 25
chart_type_from_viz_type = {
"line": ChartTypeClass.LINE,
"big_number": ChartTypeClass.LINE,
"table": ChartTypeClass.TABLE,
"dist_bar": ChartTypeClass.BAR,
"area": ChartTypeClass.AREA,
"bar": ChartTypeClass.BAR,
"pie": ChartTypeClass.PIE,
"histogram": ChartTypeClass.HISTOGRAM,
"big_number_total": ChartTypeClass.LINE,
"dual_line": ChartTypeClass.LINE,
"line_multi": ChartTypeClass.LINE,
"treemap": ChartTypeClass.AREA,
"box_plot": ChartTypeClass.BAR,
}
class SupersetConfig(ConfigModel):
# See the Superset /security/login endpoint for details
# https://superset.apache.org/docs/rest-api
connect_uri: str = "localhost:8088"
username: Optional[str] = None
password: Optional[str] = None
provider: str = "db"
options: Dict = {}
env: str = DEFAULT_ENV
database_alias: Dict[str, str] = {}
@validator("connect_uri")
def remove_trailing_slash(cls, v):
return config_clean.remove_trailing_slashes(v)
def get_metric_name(metric):
if not metric:
return ""
if isinstance(metric, str):
return metric
label = metric.get("label")
if not label:
return ""
return label
def get_filter_name(filter_obj):
sql_expression = filter_obj.get("sqlExpression")
if sql_expression:
return sql_expression
clause = filter_obj.get("clause")
column = filter_obj.get("subject")
operator = filter_obj.get("operator")
comparator = filter_obj.get("comparator")
return f"{clause} {column} {operator} {comparator}"
class SupersetSource(Source):
config: SupersetConfig
report: SourceReport
platform = "superset"
def __hash__(self):
return id(self)
def __init__(self, ctx: PipelineContext, config: SupersetConfig):
super().__init__(ctx)
self.config = config
self.report = SourceReport()
login_response = requests.post(
f"{self.config.connect_uri}/api/v1/security/login",
None,
{
"username": self.config.username,
"password": self.config.password,
"refresh": True,
"provider": self.config.provider,
},
)
self.access_token = login_response.json()["access_token"]
self.session = requests.Session()
self.session.headers.update(
{
"Authorization": f"Bearer {self.access_token}",
"Content-Type": "application/json",
"Accept": "*/*",
}
)
# Test the connection
test_response = self.session.get(f"{self.config.connect_uri}/api/v1/database")
if test_response.status_code == 200:
pass
# TODO(Gabe): how should we message about this error?
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> Source:
config = SupersetConfig.parse_obj(config_dict)
return cls(ctx, config)
@lru_cache(maxsize=None)
def get_platform_from_database_id(self, database_id):
database_response = self.session.get(
f"{self.config.connect_uri}/api/v1/database/{database_id}"
).json()
sqlalchemy_uri = database_response.get("result", {}).get("sqlalchemy_uri")
return sql_common.get_platform_from_sqlalchemy_uri(sqlalchemy_uri)
@lru_cache(maxsize=None)
def get_datasource_urn_from_id(self, datasource_id):
dataset_response = self.session.get(
f"{self.config.connect_uri}/api/v1/dataset/{datasource_id}"
).json()
schema_name = dataset_response.get("result", {}).get("schema")
table_name = dataset_response.get("result", {}).get("table_name")
database_id = dataset_response.get("result", {}).get("database", {}).get("id")
database_name = (
dataset_response.get("result", {}).get("database", {}).get("database_name")
)
database_name = self.config.database_alias.get(database_name, database_name)
if database_id and table_name:
platform = self.get_platform_from_database_id(database_id)
platform_urn = f"urn:li:dataPlatform:{platform}"
dataset_urn = (
f"urn:li:dataset:("
f"{platform_urn},{database_name + '.' if database_name else ''}"
f"{schema_name + '.' if schema_name else ''}"
f"{table_name},{self.config.env})"
)
return dataset_urn
return None
def construct_dashboard_from_api_data(self, dashboard_data):
dashboard_urn = f"urn:li:dashboard:({self.platform},{dashboard_data['id']})"
dashboard_snapshot = DashboardSnapshot(
urn=dashboard_urn,
aspects=[],
)
modified_actor = f"urn:li:corpuser:{(dashboard_data.get('changed_by') or {}).get('username', 'unknown')}"
modified_ts = int(
dp.parse(dashboard_data.get("changed_on_utc", "now")).timestamp() * 1000
)
title = dashboard_data.get("dashboard_title", "")
# note: the API does not currently supply created_by usernames due to a bug, but we are required to
# provide a created AuditStamp to comply with ChangeAuditStamp model. For now, I sub in the last
# modified actor urn
last_modified = ChangeAuditStamps(
created=AuditStamp(time=modified_ts, actor=modified_actor),
lastModified=AuditStamp(time=modified_ts, actor=modified_actor),
)
dashboard_url = f"{self.config.connect_uri}{dashboard_data.get('url', '')}"
chart_urns = []
raw_position_data = dashboard_data.get("position_json", "{}")
position_data = (
json.loads(raw_position_data) if raw_position_data is not None else {}
)
for key, value in position_data.items():
if not key.startswith("CHART-"):
continue
chart_urns.append(
f"urn:li:chart:({self.platform},{value.get('meta', {}).get('chartId', 'unknown')})"
)
dashboard_info = DashboardInfoClass(
description="",
title=title,
charts=chart_urns,
lastModified=last_modified,
dashboardUrl=dashboard_url,
customProperties={},
)
dashboard_snapshot.aspects.append(dashboard_info)
return dashboard_snapshot
def emit_dashboard_mces(self) -> Iterable[MetadataWorkUnit]:
current_dashboard_page = 0
# we will set total dashboards to the actual number after we get the response
total_dashboards = PAGE_SIZE
while current_dashboard_page * PAGE_SIZE <= total_dashboards:
dashboard_response = self.session.get(
f"{self.config.connect_uri}/api/v1/dashboard",
params=f"q=(page:{current_dashboard_page},page_size:{PAGE_SIZE})",
)
payload = dashboard_response.json()
total_dashboards = payload.get("count") or 0
current_dashboard_page += 1
payload = dashboard_response.json()
for dashboard_data in payload["result"]:
dashboard_snapshot = self.construct_dashboard_from_api_data(
dashboard_data
)
mce = MetadataChangeEvent(proposedSnapshot=dashboard_snapshot)
wu = MetadataWorkUnit(id=dashboard_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
def construct_chart_from_chart_data(self, chart_data):
chart_urn = f"urn:li:chart:({self.platform},{chart_data['id']})"
chart_snapshot = ChartSnapshot(
urn=chart_urn,
aspects=[],
)
modified_actor = f"urn:li:corpuser:{(chart_data.get('changed_by') or {}).get('username', 'unknown')}"
modified_ts = int(
dp.parse(chart_data.get("changed_on_utc", "now")).timestamp() * 1000
)
title = chart_data.get("slice_name", "")
# note: the API does not currently supply created_by usernames due to a bug, but we are required to
# provide a created AuditStamp to comply with ChangeAuditStamp model. For now, I sub in the last
# modified actor urn
last_modified = ChangeAuditStamps(
created=AuditStamp(time=modified_ts, actor=modified_actor),
lastModified=AuditStamp(time=modified_ts, actor=modified_actor),
)
chart_type = chart_type_from_viz_type.get(chart_data.get("viz_type", ""))
chart_url = f"{self.config.connect_uri}{chart_data.get('url', '')}"
datasource_id = chart_data.get("datasource_id")
datasource_urn = self.get_datasource_urn_from_id(datasource_id)
params = json.loads(chart_data.get("params"))
metrics = [
get_metric_name(metric)
for metric in (params.get("metrics", []) or [params.get("metric")])
]
filters = [
get_filter_name(filter_obj)
for filter_obj in params.get("adhoc_filters", [])
]
group_bys = params.get("groupby", []) or []
if isinstance(group_bys, str):
group_bys = [group_bys]
custom_properties = {
"Metrics": ", ".join(metrics),
"Filters": ", ".join(filters),
"Dimensions": ", ".join(group_bys),
}
chart_info = ChartInfoClass(
type=chart_type,
description="",
title=title,
lastModified=last_modified,
chartUrl=chart_url,
inputs=[datasource_urn] if datasource_urn else None,
customProperties=custom_properties,
)
chart_snapshot.aspects.append(chart_info)
return chart_snapshot
def emit_chart_mces(self) -> Iterable[MetadataWorkUnit]:
current_chart_page = 0
# we will set total charts to the actual number after we get the response
total_charts = PAGE_SIZE
while current_chart_page * PAGE_SIZE <= total_charts:
chart_response = self.session.get(
f"{self.config.connect_uri}/api/v1/chart",
params=f"q=(page:{current_chart_page},page_size:{PAGE_SIZE})",
)
current_chart_page += 1
payload = chart_response.json()
total_charts = payload["count"]
for chart_data in payload["result"]:
chart_snapshot = self.construct_chart_from_chart_data(chart_data)
mce = MetadataChangeEvent(proposedSnapshot=chart_snapshot)
wu = MetadataWorkUnit(id=chart_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
yield from self.emit_dashboard_mces()
yield from self.emit_chart_mces()
def get_report(self) -> SourceReport:
return self.report
|
|
from pycket.test.testhelper import *
from pycket.values import *
import pytest
skip = pytest.mark.skipif("True")
# Creating Structure Types
def test_make_struct_type(doctest):
"""
> (define-values (struct:a make-a a? a-ref a-set!)
(make-struct-type 'a #f 2 1 'uninitialized))
> (define an-a (make-a 'x 'y))
> (a-ref an-a 1)
'y
> (a-ref an-a 2)
'uninitialized
> (define a-first (make-struct-field-accessor a-ref 0))
> (a-first an-a)
'x
> (define-values (struct:b make-b b? b-ref b-set!)
(make-struct-type 'b struct:a 1 2 'b-uninitialized))
> (define a-b (make-b 'x 'y 'z))
> (a-ref a-b 1)
'y
> (a-ref a-b 2)
'uninitialized
> (b-ref a-b 0)
'z
> (b-ref a-b 1)
'b-uninitialized
> (b-ref a-b 2)
'b-uninitialized
;;;;;;;;;;;;;;;;
> (define p1 #s(p a b c))
> (define-values (struct:p make-p p? p-ref p-set!)
(make-struct-type 'p #f 3 0 #f null 'prefab #f '(0 1 2)))
> (p? p1)
#t
> (p-ref p1 0)
'a
> (make-p 'x 'y 'z)
'#s(p x y z)
"""
assert doctest
def test_make_struct_type2(doctest):
"""
! (require racket/private/generic-interfaces)
> (struct color (r g b) #:constructor-name -color)
> (struct rectangle (w h color) #:extra-constructor-name rect)
> (rectangle 13 50 (-color 192 157 235))
> (rect 50 37 (-color 35 183 252))
> (struct circle (radius) #:reflection-name '<circle>)
> (circle 15)
;#<|<circle>|>
"""
assert doctest
def test_struct_main_functions(source):
"""
(struct posn (x y))
(let* ([p (posn 1 2)]
[p? (posn? p)]
[notp? (posn? 0)]
[x (posn-x p)]
[y (posn-y p)])
(and p? (not notp?) (= x 1) (= y 2)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_inheritance(source):
"""
(struct posn (x y))
(struct 3d-posn posn (z))
(let* ([p (3d-posn 1 2 3)]
[p? (posn? p)]
[x (posn-x p)]
[z (3d-posn-z p)])
(and p? (= x 1) (= z 3)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_inheritance2():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(struct posn (x y))
(define (raven-constructor super-type)
(struct raven ()
#:super super-type
#:transparent
#:property prop:procedure (lambda (self) 'nevermore)) raven)
(define r ((raven-constructor struct:posn) 1 2))
(define x (posn-x r))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 1
def test_struct_comparison(source):
"""
(struct glass (width height) #:transparent #:mutable)
(struct lead (width height))
(define slab (lead 1 2))
(let* ([glass_test (equal? (glass 1 2) (glass 1 2))]
[slab (lead 1 2)]
[lead_test1 (equal? slab slab)]
[lead_test2 (equal? slab (lead 1 2))])
(and glass_test lead_test1 (not lead_test2)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_comparison2():
m = run_mod(
"""
#lang pycket
(require racket/private/generic-interfaces)
(struct lead (width height)
#:methods
gen:equal+hash
[(define (equal-proc a b equal?-recur)
; compare a and b
(and (equal?-recur (lead-width a) (lead-width b))
(equal?-recur (lead-height a) (lead-height b))))
(define (hash-proc a hash-recur)
; compute primary hash code of a
(+ (hash-recur (lead-width a))
(* 3 (hash-recur (lead-height a)))))
(define (hash2-proc a hash2-recur)
; compute secondary hash code of a
(+ (hash2-recur (lead-width a))
(hash2-recur (lead-height a))))])
(define result (equal? (lead 1 2) (lead 1 2)))
""")
assert m.defs[W_Symbol.make("result")] == w_true
def test_struct_mutation(source):
"""
(struct dot (x y) #:mutable)
(let* ([d (dot 1 2)]
[dx0 (dot-x d)]
[m (set-dot-x! d 10)]
[dx1 (dot-x d)])
(and (= dx0 1) (= dx1 10)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_auto_values(source):
"""
(struct p3 (x y [z #:auto]) #:transparent #:auto-value 0)
(struct p4 p3 (t))
(let* ([p (p3 1 2)]
[4dp (p4 1 2 4)]
[pz (p3-z p)]
[4pdt (p4-t 4dp)])
(and (= pz 0) (= 4pdt 4)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_guard():
run(
"""
((lambda (name) (struct thing (name) #:transparent #:guard
(lambda (name type-name) (cond
[(string? name) name]
[else (error type-name \"bad name: ~e\" name)])))
(thing? (thing name))) \"apple\")
""", w_true)
e = pytest.raises(SchemeException, run,
"""
((lambda (name) (struct thing (name) #:transparent #:guard
(lambda (name type-name) (cond
[(string? name) name]
[else (error type-name "bad name")])))
(thing? (thing name))) 1)
""")
assert "bad name" in e.value.msg
def test_struct_guard2():
m = run_mod(
"""
#lang pycket
(define-values (s:o make-o o? o-ref o-set!)
(make-struct-type 'o #f 1 0 'odefault null (make-inspector) #f null (lambda (o n) (+ o 1))))
(define x (o-ref (make-o 10) 0))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 11
def test_struct_guard3():
m = run_mod(
"""
#lang pycket
(define got null)
(define-values (s:a make-a a? a-ref a-set!)
(make-struct-type 'a #f 2 1 'adefault null (make-inspector) #f null
(lambda (a b n) (set! got (cons (list a b n) got)) (values 1 2))))
(define-values (s:b make-b b? b-ref b-set!)
(make-struct-type 'b s:a 1 2 'bdefault null (make-inspector) #f null
(lambda (a b c n) (set! got (cons (list a b c n) got)) (values 10 20 30))))
(define x (a-ref (make-b 'x 'y 'z) 0))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 1
def test_struct_prefab():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(define lunch '#s(sprout bean))
(struct sprout (kind) #:prefab)
(define t (sprout? lunch))
(define f (sprout? #s(sprout bean #f 17)))
(define result (and (not f) t))
""")
assert m.defs[W_Symbol.make("result")] == w_true
def test_unsafe():
m = run_mod(
"""
#lang pycket
(struct posn ([x #:mutable] [y #:mutable]) #:transparent)
(struct 3dposn posn ([z #:mutable]))
(define p (3dposn 1 2 3))
(unsafe-struct*-set! p 2 4)
(define x (unsafe-struct*-ref p 2))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 4
def test_unsafe_impersonators():
m = run_mod(
"""
#lang pycket
(struct posn ([x #:mutable] [y #:mutable]) #:transparent)
(define a (posn 1 1))
(define b (impersonate-struct a))
(unsafe-struct-set! b 1 2)
(define x (unsafe-struct-ref b 1))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 2
# Structure Type Properties
def test_struct_prop_procedure():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(require (prefix-in k: '#%kernel))
(struct x() #:property prop:procedure (lambda _ 1))
(struct y() #:property k:prop:procedure (lambda _ 2))
(define xval ((x)))
(define yval ((y)))
""")
assert m.defs[W_Symbol.make("xval")].value == 1
assert m.defs[W_Symbol.make("yval")].value == 2
def test_struct_prop_procedure_inheritance():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(struct x (proc) #:property prop:procedure 0)
(struct y x ())
(define b (y (lambda (x) x)))
(define val (b 10))
""")
assert m.defs[W_Symbol.make("val")].value == 10
def test_struct_prop_procedure_fail():
e = pytest.raises(SchemeException, run_mod,
"""
#lang pycket
(require racket/private/kw)
(require (prefix-in k: '#%kernel))
(struct x() #:property prop:procedure (lambda _ 1) #:property k:prop:procedure (lambda _ 2))
""")
assert "duplicate property binding" in e.value.msg
def test_struct_prop_procedure_with_self_arg():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(struct greeter (name)
#:property prop:procedure
(lambda (self other)
(string-append
"Hi " other
", I'm " (greeter-name self))))
(define joe-greet (greeter "Joe"))
(define greeting (joe-greet "Mary"))
""")
ov = m.defs[W_Symbol.make("greeting")]
assert ov.as_str_utf8() == "Hi Mary, I'm Joe"
def test_struct_prop_arity():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(struct evens (proc)
#:property prop:procedure (struct-field-index proc)
#:property prop:arity-string
(lambda (p)
"an even number of arguments"))
(define pairs
(evens
(case-lambda
[() null]
[(a b . more)
(cons (cons a b)
(apply pairs more))])))
(define x (pairs 1 2 3 4))
""")
ov = m.defs[W_Symbol.make("x")]
assert isinstance(ov, W_Cons)
e = pytest.raises(SchemeException, run_mod,
"""
#lang pycket
(require racket/private/kw)
(struct evens (proc)
#:property prop:procedure (struct-field-index proc)
#:property prop:arity-string
(lambda (p)
"an even number of arguments"))
(define pairs
(evens
(case-lambda
[() null]
[(a b . more)
(cons (cons a b)
(apply pairs more))])))
(pairs 5)
""")
assert "an even number of arguments" in e.value.msg
def test_checked_procedure_check_and_extract(source):
"""
(define-values (prop prop? prop-accessor) (make-struct-type-property 'p #f (list (cons prop:checked-procedure add1)) #f))
(define-values (struct:posn make-posn posn? posn-x posn-y) (make-struct-type 'a #f 2 1 'uninitialized (list (cons prop 0))))
(define posn_instance (make-posn (lambda (a b) #t) 2))
(define proc (lambda (a b c) (+ a b c)))
(let* ([check_0 (checked-procedure-check-and-extract struct:posn posn_instance proc 1 2)]
[check_1 (checked-procedure-check-and-extract struct:posn 3 proc 1 2)])
(and (= check_0 2) (= check_1 6)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
# Generic Interfaces
def test_current_inspector(source):
"""
(inspector? (current-inspector))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
# Copying and Updating Structures
def test_struct_copying_and_update(doctest):
"""
> (struct fish (color weight) #:transparent)
> (define marlin (fish 'orange-and-white 11))
> (define dory (struct-copy fish marlin
[color 'blue]))
> dory
(fish 'blue 11)
> (struct shark fish (weeks-since-eating-fish) #:transparent)
> (define bruce (shark 'grey 110 3))
> (define chum (struct-copy shark bruce
[weight #:parent fish 90]
[weeks-since-eating-fish 0]))
> chum
(shark 'grey 90 0)
; subtypes can be copied as if they were supertypes,
; but the result is an instance of the supertype
> (define not-really-chum
(struct-copy fish bruce
[weight 90]))
> not-really-chum
(fish 'grey 90)
"""
assert doctest
# Structure Utilities
def test_struct2vector(source):
"""
(struct posn (x y) #:transparent)
(struct 3d-posn posn ([z #:mutable]))
(let* ([d (3d-posn 1 2 3)]
[v (struct->vector d)]
[v_name (vector-ref v 0)]
[v0 (vector-ref v 1)]
[v2 (vector-ref v 3)])
(and (eq? v_name 'struct:3d-posn) (= v0 1) (= v2 3)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_prefab_struct_key(doctest):
"""
> (prefab-struct-key #s(cat "Garfield"))
'cat
> (struct cat (name) #:prefab)
> (struct cute-cat cat (shipping-dest) #:prefab)
> (cute-cat "Nermel" "Abu Dhabi")
'#s((cute-cat cat 1) "Nermel" "Abu Dhabi")
> (prefab-struct-key (cute-cat "Nermel" "Abu Dhabi"))
'(cute-cat cat 1)
"""
assert doctest
def test_is_prefab_key(source):
"""
(let* ([k0 (prefab-key? 'cat)]
[k1 (prefab-key? '(cute-cat cat 1))]
[k2 (prefab-key? '(clown 1 (1 #f) #(0)))]
[l (prefab-key? 0)])
(and k0 k1 k2 (not l)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_make_prefab_struct(doctest):
"""
> (make-prefab-struct 'clown "Binky" "pie")
'#s(clown "Binky" "pie")
> (make-prefab-struct '(clown 2) "Binky" "pie")
'#s(clown "Binky" "pie")
> (make-prefab-struct '(clown 2 (0 #f) #()) "Binky" "pie")
'#s(clown "Binky" "pie")
> (make-prefab-struct '(clown 1 (1 #f) #()) "Binky" "pie")
'#s((clown (1 #f)) "Binky" "pie")
;> (make-prefab-struct '(clown 1 (1 #f) #(0)) "Binky" "pie")
;'#s((clown (1 #f) #(0)) "Binky" "pie")
"""
assert doctest
# Structure Type Transformer Binding
def test_struct(doctest):
"""
! (require racket/private/define-struct)
> (struct x ())
> (struct? (x))
#f
> (struct y () #:inspector (make-inspector))
> (struct? (y))
#t
"""
assert doctest
def test_struct_info(doctest):
"""
! (require racket/private/define-struct)
> (struct x ())
> (define-values (struct-type skipped?) (struct-info (x)))
> struct-type
#f
> skipped?
#t
> (struct y () #:inspector (make-inspector))
> (define-values (struct-type skipped?) (struct-info (y)))
> skipped?
#f
"""
assert doctest
# Other
def test_procedure():
m = run_mod(
"""
#lang pycket
(require racket/private/kw)
(define ((f x) #:k [y 0])
(+ x y))
(define proc (procedure-rename (f 1) 'x))
(define x (proc))
""")
ov = m.defs[W_Symbol.make("x")]
assert ov.value == 1
def test_struct_immutable_boolean(source):
"""
(struct struct-with-immu (a b c))
(define struct-i (struct-with-immu 1 #f 2))
(let ([first-ok (equal? (struct-with-immu-a struct-i) 1)]
[immu-ok (equal? (struct-with-immu-b struct-i) #f)]
[last-ok (equal? (struct-with-immu-c struct-i) 2)])
(and first-ok immu-ok last-ok))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_immutable_boolean1(source):
"""
(struct struct-with-immu (a b [c #:mutable]))
(define struct-i (struct-with-immu 1 #f 2))
(set-struct-with-immu-c! struct-i 3)
(let ([first-ok (equal? (struct-with-immu-a struct-i) 1)]
[immu-ok (equal? (struct-with-immu-b struct-i) #f)]
[last-ok (equal? (struct-with-immu-c struct-i) 3)])
(and first-ok immu-ok last-ok))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true
def test_struct_tostring(doctest):
"""
! (struct a (a))
! (struct b a (b) #:transparent)
! (struct b1 a (b))
! (struct a1 (a) #:transparent)
! (struct b2 a1 (b))
! (struct b3 a1 (b) #:transparent)
> (format "~v" (a 1))
"#<a>"
> (format "~v" (b 1 2))
"(b ... 2)"
> (format "~v" (b1 1 2))
"#<b1>"
> (format "~v" (b2 1 2))
"(b2 1 ...)"
> (format "~v" (b3 1 2))
"(b3 1 2)"
"""
def test_auto_values(doctest):
"""
! (struct posn (x y [z #:auto]) #:auto-value 0 #:transparent)
! (struct color-posn posn (hue) #:mutable)
! (define cp (color-posn 1 2 "blue"))
> (format "~v" (posn 1 2))
"(posn 1 2 0)"
> (posn? (posn 1 2))
#t
> (posn-y (posn 1 2))
2
> (color-posn-hue cp)
"blue"
> (format "~v" cp)
"(color-posn 1 2 0 ...)"
"""
assert doctest
def test_struct_operations_arity(doctest):
"""
! (require racket/base)
! (struct posn (x [y #:mutable] [z #:auto]) #:auto-value 0 #:transparent)
> (procedure-arity posn-x)
1
> (procedure-arity posn-y)
1
> (procedure-arity posn-z)
1
> (procedure-arity set-posn-y!)
2
> (procedure-arity posn)
2
"""
@skip
def test_serializable(source):
"""
(= (point-x (deserialize (serialize (point 1 2)))) 1)
"""
extra = """
(require racket/serialize)
(serializable-struct point (x y))
"""
result = run_mod_expr(source, extra=extra, wrap=True)
assert result == w_true
def test_inherited_auto_values(doctest):
"""
! (struct test1 ([a #:auto] [b #:auto] [c #:auto]) #:auto-value 0 #:transparent)
! (struct test2 test1 () #:transparent)
! (struct test3 test2 () #:transparent)
> (test1? (test1))
#t
> (test2? (test2))
#t
> (test3? (test3))
#t
"""
|
|
"""
homeassistant.util
~~~~~~~~~~~~~~~~~~
Helper methods for various modules.
"""
import collections
from itertools import chain
import threading
import queue
from datetime import datetime, timedelta
import re
import enum
import socket
import random
import string
from functools import wraps
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
RE_SLUGIFY = re.compile(r'[^A-Za-z0-9_]+')
DATE_STR_FORMAT = "%H:%M:%S %d-%m-%Y"
def sanitize_filename(filename):
""" Sanitizes a filename by removing .. / and \\. """
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path):
""" Sanitizes a path by removing ~ and .. """
return RE_SANITIZE_PATH.sub("", path)
def slugify(text):
""" Slugifies a given text. """
text = text.replace(" ", "_")
return RE_SLUGIFY.sub("", text)
def datetime_to_str(dattim):
""" Converts datetime to a string format.
@rtype : str
"""
return dattim.strftime(DATE_STR_FORMAT)
def str_to_datetime(dt_str):
""" Converts a string to a datetime object.
@rtype: datetime
"""
try:
return datetime.strptime(dt_str, DATE_STR_FORMAT)
except ValueError: # If dt_str did not match our format
return None
def strip_microseconds(dattim):
""" Returns a copy of dattime object but with microsecond set to 0. """
if dattim.microsecond:
return dattim - timedelta(microseconds=dattim.microsecond)
else:
return dattim
def split_entity_id(entity_id):
""" Splits a state entity_id into domain, object_id. """
return entity_id.split(".", 1)
def repr_helper(inp):
""" Helps creating a more readable string representation of objects. """
if isinstance(inp, dict):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, datetime):
return datetime_to_str(inp)
else:
return str(inp)
# Taken from: http://www.cse.unr.edu/~quiroz/inc/colortransforms.py
# License: Code is given as is. Use at your own risk and discretion.
# pylint: disable=invalid-name
def color_RGB_to_xy(R, G, B):
""" Convert from RGB color to XY color. """
if R + G + B == 0:
return 0, 0
var_R = (R / 255.)
var_G = (G / 255.)
var_B = (B / 255.)
if var_R > 0.04045:
var_R = ((var_R + 0.055) / 1.055) ** 2.4
else:
var_R /= 12.92
if var_G > 0.04045:
var_G = ((var_G + 0.055) / 1.055) ** 2.4
else:
var_G /= 12.92
if var_B > 0.04045:
var_B = ((var_B + 0.055) / 1.055) ** 2.4
else:
var_B /= 12.92
var_R *= 100
var_G *= 100
var_B *= 100
# Observer. = 2 deg, Illuminant = D65
X = var_R * 0.4124 + var_G * 0.3576 + var_B * 0.1805
Y = var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
Z = var_R * 0.0193 + var_G * 0.1192 + var_B * 0.9505
# Convert XYZ to xy, see CIE 1931 color space on wikipedia
return X / (X + Y + Z), Y / (X + Y + Z)
def convert(value, to_type, default=None):
""" Converts value to to_type, returns default if fails. """
try:
return default if value is None else to_type(value)
except ValueError:
# If value could not be converted
return default
def ensure_unique_string(preferred_string, current_strings):
""" Returns a string that is not present in current_strings.
If preferred string exists will append _2, _3, .. """
test_string = preferred_string
current_strings = list(current_strings)
tries = 1
while test_string in current_strings:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip():
""" Tries to determine the local IP address of the machine. """
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
ip_addr = sock.getsockname()[0]
sock.close()
return ip_addr
except socket.error:
return socket.gethostbyname(socket.gethostname())
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length=10):
""" Returns a random string with letters and digits. """
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
""" Taken from Python 3.4.0 docs. """
# pylint: disable=no-init, too-few-public-methods
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class OrderedSet(collections.MutableSet):
""" Ordered set taken from http://code.activestate.com/recipes/576694/ """
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
""" Add an element to the end of the set. """
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def promote(self, key):
""" Promote element to beginning of the set, add if not there. """
if key in self.map:
self.discard(key)
begin = self.end[2]
curr = begin[1]
curr[2] = begin[1] = self.map[key] = [key, curr, begin]
def discard(self, key):
""" Discard an element from the set. """
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True): # pylint: disable=arguments-differ
""" Pops element of the end of the set.
Set last=False to pop from the beginning. """
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def update(self, *args):
""" Add elements from args to the set. """
for item in chain(*args):
self.add(item)
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class Throttle(object):
"""
A method decorator to add a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
# pylint: disable=too-few-public-methods
def __init__(self, min_time, limit_no_throttle=None):
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method):
lock = threading.Lock()
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
@wraps(method)
def wrapper(*args, **kwargs):
"""
Wrapper that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
if lock.acquire(False):
try:
last_call = wrapper.last_call
# Check if method is never called or no_throttle is given
force = not last_call or kwargs.pop('no_throttle', False)
if force or datetime.now() - last_call > self.min_time:
result = method(*args, **kwargs)
wrapper.last_call = datetime.now()
return result
else:
return None
finally:
lock.release()
wrapper.last_call = None
return wrapper
class ThreadPool(object):
""" A priority queue-based thread pool. """
# pylint: disable=too-many-instance-attributes
def __init__(self, job_handler, worker_count=0, busy_callback=None):
"""
job_handler: method to be called from worker thread to handle job
worker_count: number of threads to run that handle jobs
busy_callback: method to be called when queue gets too big.
Parameters: worker_count, list of current_jobs,
pending_jobs_count
"""
self._job_handler = job_handler
self._busy_callback = busy_callback
self.worker_count = 0
self.busy_warning_limit = 0
self._work_queue = queue.PriorityQueue()
self.current_jobs = []
self._lock = threading.RLock()
self._quit_task = object()
self.running = True
for _ in range(worker_count):
self.add_worker()
def add_worker(self):
""" Adds a worker to the thread pool. Resets warning limit. """
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
worker = threading.Thread(target=self._worker)
worker.daemon = True
worker.start()
self.worker_count += 1
self.busy_warning_limit = self.worker_count * 3
def remove_worker(self):
""" Removes a worker from the thread pool. Resets warning limit. """
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(0, self._quit_task))
self.worker_count -= 1
self.busy_warning_limit = self.worker_count * 3
def add_job(self, priority, job):
""" Add a job to the queue. """
with self._lock:
if not self.running:
raise RuntimeError("ThreadPool not running")
self._work_queue.put(PriorityQueueItem(priority, job))
# check if our queue is getting too big
if self._work_queue.qsize() > self.busy_warning_limit \
and self._busy_callback is not None:
# Increase limit we will issue next warning
self.busy_warning_limit *= 2
self._busy_callback(
self.worker_count, self.current_jobs,
self._work_queue.qsize())
def block_till_done(self):
""" Blocks till all work is done. """
self._work_queue.join()
def stop(self):
""" Stops all the threads. """
with self._lock:
if not self.running:
return
# Ensure all current jobs finish
self.block_till_done()
# Tell the workers to quit
for _ in range(self.worker_count):
self.remove_worker()
self.running = False
# Wait till all workers have quit
self.block_till_done()
def _worker(self):
""" Handles jobs for the thread pool. """
while True:
# Get new item from work_queue
job = self._work_queue.get().item
if job == self._quit_task:
self._work_queue.task_done()
return
# Add to current running jobs
job_log = (datetime.now(), job)
self.current_jobs.append(job_log)
# Do the job
self._job_handler(job)
# Remove from current running job
self.current_jobs.remove(job_log)
# Tell work_queue the task is done
self._work_queue.task_done()
class PriorityQueueItem(object):
""" Holds a priority and a value. Used within PriorityQueue. """
# pylint: disable=too-few-public-methods
def __init__(self, priority, item):
self.priority = priority
self.item = item
def __lt__(self, other):
return self.priority < other.priority
|
|
# Copyright (c) 2003-2012 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""imports checkers for Python code"""
from logilab.common.graph import get_cycles, DotBackend
from logilab.common.modutils import is_standard_module
from logilab.common.ureports import VerbatimText, Paragraph
from logilab import astng
from logilab.astng import are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker, EmptyReport
def get_first_import(node, context, name, base, level):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astng.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astng.From):
if level == first.level and any(
fullname == '%s.%s' % (first.modname, iname[0]) for iname in first.names):
found = True
break
if found and not are_exclusive(first, node):
return first
# utilities to represents import dependencies as tree and dot graph ###########
def make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = list(data.items())
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(files)
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir = "LR")
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(dep_info.items()):
done[modname] = 1
printer.emit_node(modname)
for modname in dependencies:
if modname not in done:
done[modname] = 1
printer.emit_node(modname)
for depmodname, dependencies in sorted(dep_info.items()):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'F0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is \
detected.'),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.'),
}
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IASTNGChecker
name = 'imports'
msgs = MSGS
priority = -2
options = (('deprecated-modules',
{'default' : ('regsub', 'string', 'TERMIOS',
'Bastion', 'rexec'),
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used, \
separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and \
external) dependencies in the given file (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the \
given file (report RP0402 must not be disabled)'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self.report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self.report_dependencies_graph),
)
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = {}
def close(self):
"""called before visiting project (i.e set of modules)"""
# don't try to compute cycles if the associated message is disabled
if self.linter.is_message_enabled('R0401'):
for cycle in get_cycles(self.import_graph):
self.add_message('R0401', args=' -> '.join(cycle))
def visit_import(self, node):
"""triggered when an import statement is seen"""
modnode = node.root()
for name, _ in node.names:
importedmodnode = self.get_imported_module(modnode, node, name)
if importedmodnode is None:
continue
self._check_relative_import(modnode, node, importedmodnode, name)
self._add_imported_module(node, importedmodnode.name)
self._check_deprecated_module(node, name)
self._check_reimport(node, name)
def visit_from(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astng.From)
and prev.modname == '__future__'):
self.add_message('W0410', node=node)
return
modnode = node.root()
importedmodnode = self.get_imported_module(modnode, node, basename)
if importedmodnode is None:
return
self._check_relative_import(modnode, node, importedmodnode, basename)
self._check_deprecated_module(node, basename)
for name, _ in node.names:
if name == '*':
self.add_message('W0401', args=basename, node=node)
continue
self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name))
self._check_reimport(node, name, basename, node.level)
def get_imported_module(self, modnode, importnode, modname):
try:
return importnode.do_import_module(modname)
except astng.InferenceError as ex:
if str(ex) != modname:
args = '%r (%s)' % (modname, ex)
else:
args = repr(modname)
self.add_message("F0401", args=args, node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if 'W0403' not in self.active_msgs:
return
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('W0403', args=(importedasname, importedmodnode.name),
node=importnode)
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
context_name = node.root().name
if context_name == importedmodname:
# module importing itself !
self.add_message('W0406', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if not context_name in importedmodnames:
importedmodnames.add(context_name)
if is_standard_module(importedmodname, (self.package_dir(),)):
# update import graph
mgraph = self.import_graph.setdefault(context_name, set())
if not importedmodname in mgraph:
mgraph.add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('W0402', node=node, args=mod_path)
def _check_reimport(self, node, name, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if 'W0404' not in self.active_msgs:
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for context, level in contexts:
first = get_first_import(node, context, name, basename, level)
if first is not None:
self.add_message('W0404', node=node,
args=(name, first.fromlineno))
def report_external_dependencies(self, sect, _, dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = make_tree_defs(iter(self._external_dependencies_info().items()))
if not dep_info:
raise EmptyReport()
tree_str = repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def report_dependencies_graph(self, sect, _, dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
raise EmptyReport()
filename = self.config.import_graph
if filename:
make_graph(filename, dep_info, sect, '')
filename = self.config.ext_import_graph
if filename:
make_graph(filename, self._external_dependencies_info(),
sect, 'external ')
filename = self.config.int_import_graph
if filename:
make_graph(filename, self._internal_dependencies_info(),
sect, 'internal ')
def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.base_name
self.__ext_dep_info = result = {}
for importee, importers in self.stats['dependencies'].items():
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info
def _internal_dependencies_info(self):
"""return cached internal dependencies information or build and
cache them
"""
if self.__int_dep_info is None:
package = self.linter.base_name
self.__int_dep_info = result = {}
for importee, importers in self.stats['dependencies'].items():
if importee.startswith(package):
result[importee] = importers
return self.__int_dep_info
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ImportsChecker(linter))
|
|
"""caching_query.py
Represent persistence structures which allow the usage of
Beaker caching with SQLAlchemy.
The three new concepts introduced here are:
* CachingQuery - a Query subclass that caches and
retrieves results in/from Beaker.
* FromCache - a query option that establishes caching
parameters on a Query
* RelationshipCache - a variant of FromCache which is specific
to a query invoked during a lazy load.
* _params_from_query - extracts value parameters from
a Query.
The rest of what's here are standard SQLAlchemy and
Beaker constructs.
"""
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.orm.query import Query
from sqlalchemy.sql import visitors
class CachingQuery(Query):
"""A Query subclass which optionally loads full results from a Beaker
cache region.
The CachingQuery stores additional state that allows it to consult
a Beaker cache before accessing the database:
* A "region", which is a cache region argument passed to a
Beaker CacheManager, specifies a particular cache configuration
(including backend implementation, expiration times, etc.)
* A "namespace", which is a qualifying name that identifies a
group of keys within the cache. A query that filters on a name
might use the name "by_name", a query that filters on a date range
to a joined table might use the name "related_date_range".
When the above state is present, a Beaker cache is retrieved.
The "namespace" name is first concatenated with
a string composed of the individual entities and columns the Query
requests, i.e. such as ``Query(User.id, User.name)``.
The Beaker cache is then loaded from the cache manager based
on the region and composed namespace. The key within the cache
itself is then constructed against the bind parameters specified
by this query, which are usually literals defined in the
WHERE clause.
The FromCache and RelationshipCache mapper options below represent
the "public" method of configuring this state upon the CachingQuery.
"""
def __init__(self, manager, *args, **kw):
self.cache_manager = manager
Query.__init__(self, *args, **kw)
def __iter__(self):
"""override __iter__ to pull results from Beaker
if particular attributes have been configured.
Note that this approach does *not* detach the loaded objects from
the current session. If the cache backend is an in-process cache
(like "memory") and lives beyond the scope of the current session's
transaction, those objects may be expired. The method here can be
modified to first expunge() each loaded item from the current
session before returning the list of items, so that the items
in the cache are not the same ones in the current Session.
"""
if hasattr(self, '_cache_parameters'):
return self.get_value(createfunc=lambda: list(Query.__iter__(self)))
else:
return Query.__iter__(self)
def invalidate(self):
"""Invalidate the value represented by this Query."""
cache, cache_key = _get_cache_parameters(self)
cache.remove(cache_key)
def get_value(self, merge=True, createfunc=None):
"""Return the value from the cache for this query.
Raise KeyError if no value present and no
createfunc specified.
"""
cache, cache_key = _get_cache_parameters(self)
ret = cache.get_value(cache_key, createfunc=createfunc)
if merge:
ret = self.merge_result(ret, load=False)
return ret
def set_value(self, value):
"""Set the value in the cache for this query."""
cache, cache_key = _get_cache_parameters(self)
cache.put(cache_key, value)
def query_callable(manager):
def query(*arg, **kw):
return CachingQuery(manager, *arg, **kw)
return query
def _get_cache_parameters(query):
"""For a query with cache_region and cache_namespace configured,
return the correspoinding Cache instance and cache key, based
on this query's current criterion and parameter values.
"""
if not hasattr(query, '_cache_parameters'):
raise ValueError("This Query does not have caching parameters configured.")
region, namespace, cache_key = query._cache_parameters
namespace = _namespace_from_query(namespace, query)
if cache_key is None:
# cache key - the value arguments from this query's parameters.
args = _params_from_query(query)
cache_key = " ".join([str(x) for x in args])
# get cache
cache = query.cache_manager.get_cache_region(namespace, region)
# optional - hash the cache_key too for consistent length
# import uuid
# cache_key= str(uuid.uuid5(uuid.NAMESPACE_DNS, cache_key))
return cache, cache_key
def _namespace_from_query(namespace, query):
# cache namespace - the token handed in by the
# option + class we're querying against
namespace = " ".join([namespace] + [str(x) for x in query._entities])
# memcached wants this
namespace = namespace.replace(' ', '_')
return namespace
def _set_cache_parameters(query, region, namespace, cache_key):
if hasattr(query, '_cache_parameters'):
region, namespace, cache_key = query._cache_parameters
raise ValueError("This query is already configured "
"for region %r namespace %r" %
(region, namespace)
)
query._cache_parameters = region, namespace, cache_key
class FromCache(MapperOption):
"""Specifies that a Query should load results from a cache."""
propagate_to_loaders = False
def __init__(self, region, namespace, cache_key=None):
"""Construct a new FromCache.
:param region: the cache region. Should be a
region configured in the Beaker CacheManager.
:param namespace: the cache namespace. Should
be a name uniquely describing the target Query's
lexical structure.
:param cache_key: optional. A string cache key
that will serve as the key to the query. Use this
if your query has a huge amount of parameters (such
as when using in_()) which correspond more simply to
some other identifier.
"""
self.region = region
self.namespace = namespace
self.cache_key = cache_key
def process_query(self, query):
"""Process a Query during normal loading operation."""
_set_cache_parameters(query, self.region, self.namespace, self.cache_key)
class RelationshipCache(MapperOption):
"""Specifies that a Query as called within a "lazy load"
should load results from a cache."""
propagate_to_loaders = True
def __init__(self, region, namespace, attribute):
"""Construct a new RelationshipCache.
:param region: the cache region. Should be a
region configured in the Beaker CacheManager.
:param namespace: the cache namespace. Should
be a name uniquely describing the target Query's
lexical structure.
:param attribute: A Class.attribute which
indicates a particular class relationship() whose
lazy loader should be pulled from the cache.
"""
self.region = region
self.namespace = namespace
self._relationship_options = {
( attribute.property.parent.class_, attribute.property.key ) : self
}
def process_query_conditionally(self, query):
"""Process a Query that is used within a lazy loader.
(the process_query_conditionally() method is a SQLAlchemy
hook invoked only within lazyload.)
"""
if query._current_path:
mapper, key = query._current_path[-2:]
for cls in mapper.class_.__mro__:
if (cls, key) in self._relationship_options:
relationship_option = self._relationship_options[(cls, key)]
_set_cache_parameters(
query,
relationship_option.region,
relationship_option.namespace,
None)
def and_(self, option):
"""Chain another RelationshipCache option to this one.
While many RelationshipCache objects can be specified on a single
Query separately, chaining them together allows for a more efficient
lookup during load.
"""
self._relationship_options.update(option._relationship_options)
return self
def _params_from_query(query):
"""Pull the bind parameter values from a query.
This takes into account any scalar attribute bindparam set up.
E.g. params_from_query(query.filter(Cls.foo==5).filter(Cls.bar==7)))
would return [5, 7].
"""
v = []
def visit_bindparam(bind):
value = query._params.get(bind.key, bind.value)
# lazyloader may dig a callable in here, intended
# to late-evaluate params after autoflush is called.
# convert to a scalar value.
if callable(value):
value = value()
v.append(value)
if query._criterion is not None:
visitors.traverse(query._criterion, {}, {'bindparam':visit_bindparam})
return v
|
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity
short_description: Manages SES email and domain identity
description:
- This module allows the user to manage verified email and domain identity for SES.
- This covers verifying and removing identities as well as setting up complaint, bounce
and delivery notification settings.
version_added: "2.5"
author: Ed Costello (@orthanc)
options:
identity:
description:
- This is the email address or domain to verify / delete.
- If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
required: true
state:
description: Whether to create(or update) or delete the identity.
default: present
choices: [ 'present', 'absent' ]
bounce_notifications:
description:
- Setup the SNS topic used to report bounce notifications.
- If omitted, bounce notifications will not be delivered to a SNS topic.
- If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
complaint_notifications:
description:
- Setup the SNS topic used to report complaint notifications.
- If omitted, complaint notifications will not be delivered to a SNS topic.
- If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
delivery_notifications:
description:
- Setup the SNS topic used to report delivery notifications.
- If omitted, delivery notifications will not be delivered to a SNS topic.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
feedback_forwarding:
description:
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
default: True
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Ensure [email protected] email identity exists
aws_ses_identity:
identity: [email protected]
state: present
- name: Delete [email protected] email identity
aws_ses_identity:
email: [email protected]
state: absent
- name: Ensure example.com domain identity exists
aws_ses_identity:
identity: example.com
state: present
# Create an SNS topic and send bounce and complaint notifications to it
# instead of emailing the identity owner
- name: Ensure complaints-topic exists
sns_topic:
name: "complaints-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Deliver feedback to topic instead of owner email
aws_ses_identity:
identity: [email protected]
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: True
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: False
feedback_forwarding: False
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
- name: Ensure delivery-notifications-topic exists
sns_topic:
name: "delivery-notifications-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Delivery notifications to topic
aws_ses_identity:
identity: [email protected]
state: present
delivery_notifications:
topic: "{{ topic_info.sns_arn }}"
'''
RETURN = '''
identity:
description: The identity being modified.
returned: success
type: str
sample: [email protected]
identity_arn:
description: The arn of the identity being modified.
returned: success
type: str
sample: arn:aws:ses:us-east-1:12345678:identity/[email protected]
verification_attributes:
description: The verification information for the identity.
returned: success
type: complex
sample: {
"verification_status": "Pending",
"verification_token": "...."
}
contains:
verification_status:
description: The verification status of the identity.
type: str
sample: "Pending"
verification_token:
description: The verification token for a domain identity.
type: str
notification_attributes:
description: The notification setup for the identity.
returned: success
type: complex
sample: {
"bounce_topic": "arn:aws:sns:....",
"complaint_topic": "arn:aws:sns:....",
"delivery_topic": "arn:aws:sns:....",
"forwarding_enabled": false,
"headers_in_bounce_notifications_enabled": true,
"headers_in_complaint_notifications_enabled": true,
"headers_in_delivery_notifications_enabled": true
}
contains:
bounce_topic:
description:
- The ARN of the topic bounce notifications are delivered to.
- Omitted if bounce notifications are not delivered to a topic.
type: str
complaint_topic:
description:
- The ARN of the topic complaint notifications are delivered to.
- Omitted if complaint notifications are not delivered to a topic.
type: str
delivery_topic:
description:
- The ARN of the topic delivery notifications are delivered to.
- Omitted if delivery notifications are not delivered to a topic.
type: str
forwarding_enabled:
description: Whether or not feedback forwarding is enabled.
type: bool
headers_in_bounce_notifications_enabled:
description: Whether or not headers are included in messages delivered to the bounce topic.
type: bool
headers_in_complaint_notifications_enabled:
description: Whether or not headers are included in messages delivered to the complaint topic.
type: bool
headers_in_delivery_notifications_enabled:
description: Whether or not headers are included in messages delivered to the delivery topic.
type: bool
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info
import time
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
# just registered it. Suspect this is an eventual consistency issue on AWS side.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only after registering the identity.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
identity_verification = response['VerificationAttributes']
if identity in identity_verification:
break
time.sleep(retryDelay)
if identity not in identity_verification:
return None
return identity_verification[identity]
def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_notifications doesn't include the notifications when we've
# just registered the identity.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only when getting the current notification
# status for return.
for attempt in range(0, retries + 1):
try:
response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
notification_attributes = response['NotificationAttributes']
# No clear AWS docs on when this happens, but it appears sometimes identities are not included in
# in the notification attributes when the identity is first registered. Suspect that this is caused by
# eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
#
# When this occurs, just return None and we'll assume no identity notification settings have been changed
# from the default which is reasonable if this is just eventual consistency on creation.
# See: https://github.com/ansible/ansible/issues/36065
if identity in notification_attributes:
break
else:
# Paranoia check for coding errors, we only requested one identity, so if we get a different one
# something has gone very wrong.
if len(notification_attributes) != 0:
module.fail_json(
msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
identity,
notification_attributes.keys(),
)
)
time.sleep(retryDelay)
if identity not in notification_attributes:
return None
return notification_attributes[identity]
def desired_topic(module, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict:
return arg_dict.get('topic', None)
else:
return None
def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
topic_key = notification_type + 'Topic'
if identity_notifications is None:
# If there is no configuration for notifications cannot be being sent to topics
# hence assume None as the current state.
current = None
elif topic_key in identity_notifications:
current = identity_notifications[topic_key]
else:
# If there is information on the notifications setup but no information on the
# particular notification topic it's pretty safe to assume there's no topic for
# this notification. AWS API docs suggest this information will always be
# included but best to be defensive
current = None
required = desired_topic(module, notification_type)
if current != required:
try:
if not module.check_mode:
connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if identity_notifications is None:
# If there is no configuration for topic notifications, headers cannot be being
# forwarded, hence assume false.
current = False
elif header_key in identity_notifications:
current = identity_notifications[header_key]
else:
# AWS API doc indicates that the headers in fields are optional. Unfortunately
# it's not clear on what this means. But it's a pretty safe assumption that it means
# headers are not included since most API consumers would interpret absence as false.
current = False
if arg_dict is not None and 'include_headers' in arg_dict:
required = arg_dict['include_headers']
else:
required = False
if current != required:
try:
if not module.check_mode:
connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
identity=identity,
notification_type=notification_type,
))
return True
return False
def update_feedback_forwarding(connection, module, identity, identity_notifications):
if identity_notifications is None:
# AWS requires feedback forwarding to be enabled unless bounces and complaints
# are being handled by SNS topics. So in the absence of identity_notifications
# information existing feedback forwarding must be on.
current = True
elif 'ForwardingEnabled' in identity_notifications:
current = identity_notifications['ForwardingEnabled']
else:
# If there is information on the notifications setup but no information on the
# forwarding state it's pretty safe to assume forwarding is off. AWS API docs
# suggest this information will always be included but best to be defensive
current = False
required = module.params.get('feedback_forwarding')
if current != required:
try:
if not module.check_mode:
connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
return True
return False
def create_mock_notifications_response(module):
resp = {
"ForwardingEnabled": module.params.get('feedback_forwarding'),
}
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
if arg_dict is not None and 'topic' in arg_dict:
resp[notification_type + 'Topic'] = arg_dict['topic']
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if arg_dict is not None and 'include_headers' in arg_dict:
resp[header_key] = arg_dict['include_headers']
else:
resp[header_key] = False
return resp
def update_identity_notifications(connection, module):
identity = module.params.get('identity')
changed = False
identity_notifications = get_identity_notifications(connection, module, identity)
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
if changed or identity_notifications is None:
if module.check_mode:
identity_notifications = create_mock_notifications_response(module)
else:
identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
return changed, identity_notifications
def validate_params_for_identity_present(module):
if module.params.get('feedback_forwarding') is False:
if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
"feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
def create_or_update_identity(connection, module, region, account_id):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is None:
try:
if not module.check_mode:
if '@' in identity:
connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
else:
connection.verify_domain_identity(Domain=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
if module.check_mode:
verification_attributes = {
"VerificationStatus": "Pending",
}
else:
verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
changed = True
elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
verification_attributes=camel_dict_to_snake_dict(verification_attributes))
if verification_attributes is None:
module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
notifications_changed, notification_attributes = update_identity_notifications(connection, module)
changed |= notifications_changed
if notification_attributes is None:
module.fail_json(msg='Unable to load identity notification attributes.')
identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
module.exit_json(
changed=changed,
identity=identity,
identity_arn=identity_arn,
verification_attributes=camel_dict_to_snake_dict(verification_attributes),
notification_attributes=camel_dict_to_snake_dict(notification_attributes),
)
def destroy_identity(connection, module):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is not None:
try:
if not module.check_mode:
connection.delete_identity(Identity=identity, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
changed = True
module.exit_json(
changed=changed,
identity=identity,
)
def get_account_id(module):
sts = module.client('sts')
try:
caller_identity = sts.get_caller_identity()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve caller identity')
return caller_identity['Account']
def main():
module = AnsibleAWSModule(
argument_spec={
"identity": dict(required=True, type='str'),
"state": dict(default='present', choices=['present', 'absent']),
"bounce_notifications": dict(type='dict'),
"complaint_notifications": dict(type='dict'),
"delivery_notifications": dict(type='dict'),
"feedback_forwarding": dict(default=True, type='bool'),
},
supports_check_mode=True,
)
for notification_type in ('bounce', 'complaint', 'delivery'):
param_name = notification_type + '_notifications'
arg_dict = module.params.get(param_name)
if arg_dict:
extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
if extra_keys:
module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel that's caused throttling
# failures so apply a jittered backoff to call SES calls.
connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
region = get_aws_connection_info(module, boto3=True)[0]
account_id = get_account_id(module)
validate_params_for_identity_present(module)
create_or_update_identity(connection, module, region, account_id)
else:
destroy_identity(connection, module)
if __name__ == '__main__':
main()
|
|
from Queue import Queue
from threading import _Event, Thread
from urlparse import urlparse
from functools import partial
from socketIO_client import SocketIO, SocketIONamespace, EngineIONamespace
from exceptions import HeimdallrClientException
from utils import timestamp, for_own_methods, on_ready
from settings import AUTH_SOURCE, URL
__all__ = ['Client', 'Provider', 'Consumer']
def _init(self, io):
self._io = io
self._callback_by_event = {}
self._log_name = Client._url
self.initialize()
EngineIONamespace.__init__ = _init
class _SocketIO(SocketIO):
def _should_stop_waiting(self, **kwargs):
event = kwargs.pop('event', None)
event_set = False
if isinstance(event, _Event):
event_set = event.is_set()
return super(_SocketIO, self)._should_stop_waiting(**kwargs) or \
event_set
class Client():
"""
The ``Client`` class provides most of the behavior for
Heimdallr clients. However, it is not intended to be
used directly. The ``Client`` constructor creates the
basic connection which in this case is a
``SocketIONamespace``. It sets up callbacks for
connection and authentication as well as a default
callback for errors. The default error handler can be
removed by ``client.remove_listener('err')``.
Args:
token (str): Authentication token
"""
_url = URL
_auth_source = AUTH_SOURCE
_namespace = '/'
_safe = True
def __init__(self, token):
self.ready = False
self.ready_callbacks = []
self.callbacks = {}
self.token = token
self.connection = SocketIONamespace(None, self._namespace)
# Handle sending packets asynchronously
self._emit_queue = Queue()
self._emit_worker = Thread(target=self._emit_task)
self._emit_worker.daemon = True
self._emit_worker.start()
emit = self.connection.emit
def safe_emit(*args, **kwargs):
try:
emit(*args, **kwargs)
except Exception as e:
print (
'HeimdallrClient failed to send. Original exception: %s'
% e.message
)
if self._safe:
self.connection.emit = safe_emit
@self.on('err')
def fn(err):
if 'message' in err:
raise HeimdallrClientException(err['message'])
else:
raise HeimdallrClientException(err)
@self.on('auth-success')
def fn(*args):
self.ready = True
while self.ready_callbacks:
self.ready_callbacks.pop(0)()
def on_connect(*args):
self._emit_queue.put((
'authorize',
{'token': self.token, 'authSource': self._auth_source}
))
self.on('connect', on_connect)
self.on('reconnect', on_connect)
def __del__(self):
# Cleanup thread
self._emit_worker._Thread__stop()
def connect(self, **kwargs):
""" Connect to the Heimdallr server.
The ``connect`` method blocks until the the socket connection
to the server has been established.
Args:
**kwargs: Passed to underlying SocketIO constructor
:returns: :class:`Client <Client>`
"""
try:
parsed = urlparse(self._url)
if self.connection._io and self.connection._io.connected:
self.connection.disconnect()
self.connection._io = _SocketIO(
'%s://%s' % (parsed.scheme, parsed.hostname),
parsed.port,
**kwargs
)
io = self.connection._io
io._namespace = self.connection
io._namespace_by_path[self._namespace] = self.connection
io.connect(self._namespace)
io.wait(for_connect=True)
except Exception as e:
if not self._safe:
raise e
print 'HeimdallrClient failed to connect: %s' % e.message
return self
def run(self, seconds=None, **kwargs):
""" Main loop for a client.
The ``run`` method is the main loop for a client and is where
all communication between the Heimdallr server and client
takes place. The ``run`` method is just a proxy for the
``SocketIO.wait`` method so you can call it with the
same arguments. However, an additional ``event`` option has
been added. If a :py:class:`threading.Event` object is passed in for
``event``, the wait loop will terminate once the flag is set.
Args:
seconds (float): Number of seconds to loop for
event (:py:class:`threading.Event`): Triggers the exit of the run
loop when the flag is set
for_connect (bool): Run until the SocketIO connect event
for_callback (bool): Run until the server has acknowledged all
emits
:returns: :class:`Client <Client>`
**Usage:**
.. code-block:: python
client.run(1) # Loops for 1 second
from threading import Event
event = Event()
client.run(event=event) # Loops until event.is_set() is True
client.run() # Loops forever
"""
kwargs['seconds'] = seconds
self.connection._io.wait(**kwargs)
return self
def _emit_task(self):
while True:
args = self._emit_queue.get()
self.connection.emit(*args)
def __trigger_callbacks(self, message_name, *args):
""" Call all of the callbacks for a socket.io message.
A version of this method curried with ``message_name``
is given to the underlying ``SocketIONamespace``. When the
``SocketIONamespace`` calls it each of the callbacks that
have been attached to ``message_name`` will be called.
Args:
message_name (str): Name of the socket.io message to listen for
args: Data sent with message
"""
callbacks = self.callbacks.get(message_name, [])
for callback in callbacks:
callback(*args)
def __on(self, message_name, callback):
""" Store ``callback`` and register a placeholder callback.
Appends ``callback`` to the list of callbacks for the
given ``message_name``. Also assigns a placeholder
callback to the underlying ``SocketIONamespace`` so that
the placeholder can call all of the callbacks in
the list.
Args:
message_name (str): Name of the socket.io message to listen for
callback (function): Callback to be run when the socket.io message
is heard
"""
self.callbacks.setdefault(message_name, [])
self.callbacks[message_name].append(callback)
self.connection.on(
message_name,
partial(self.__trigger_callbacks, message_name)
)
def on(self, message_name, callback=None):
""" Add a socket.io message listener.
The ``on`` method will add a callback for socket.io messages
of the specified message name. Multiple callbacks can be
added for the same message name. They will be triggered
in the order in which they were added. This method can be
called outright or it can be used as a decorator.
Args:
message_name (str): Name of the socket.io message to listen for
callback (function): Callback to run when the socket.io
message is heard
:returns: :class:`Client <Client>`
**Usage:**
.. code-block:: python
def first(*args):
print 'FIRST'
client.on('myMessage', first)
@client.on('myMessage')
def second(*args):
print 'SECOND'
"""
# Decorator syntax
if callback is None:
def decorator(fn):
self.__on(message_name, fn)
return decorator
# SocketIO-Client syntax
self.__on(message_name, callback)
return self
def remove_listener(self, message_name, callback=None):
""" Remove listener for socket.io message.
If ``callback`` is specified, only the callbacks registered
for ``message_name`` that match ``callback`` will be removed.
If only ``message_name`` is specified, all of the callbacks
will be removed.
Args:
message_name (str): Name of the socket.io message to remove
callback (function): Specific callback to remove
:returns: :class:`Client <Client>`
"""
if callback:
while callback in self.callbacks.get(message_name, []):
self.callbacks[message_name].remove(callback)
else:
self.callbacks.pop(message_name, None)
self.connection._callback_by_event.pop(message_name, None)
return self
@for_own_methods(on_ready)
class Provider(Client):
"""
This class should be used to create a Heimdallr provider.
It inherits most of its functionality but it also
automatically connects to the provider namespace and
provides some convenience functions.
"""
_namespace = '/provider'
def send_event(self, subtype, data=None):
""" Emit a Heimdallr event packet.
This will send a Heimdallr event packet to the
Heimdallr server where it will be rebroadcast.
``data`` must adhere to the provider's schema for
the given ``subtype``.
Args:
subtype (str): The event packet subtype
data: The event packet data
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'event',
{'subtype': subtype, 'data': data, 't': timestamp()}
))
def send_sensor(self, subtype, data=None):
""" Emit a Heimdallr sensor packet.
This will send a Heimdallr sensor packet to the
Heimdallr server where it will be rebroadcast.
``data`` must adhere to the provider's schema for
the given ``subtype``.
Args:
subtype (str): The sensor packet subtype
data: The sensor packet data
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'sensor',
{'subtype': subtype, 'data': data, 't': timestamp()}
))
def send_stream(self, data):
""" Send binary data to the Heimdallr server.
This should only be used when the Heimdallr server
has issued a ``{'stream': 'start'}`` control packet
and should stop being used when the Heimdallr
server issues a ``{'stream': 'start'}`` control
packet.
Args:
data: The binary data to be sent.
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'stream',
bytearray(data)
))
def completed(self, uuid):
""" Signal the Heimdallr server that a control has been completed.
This should be used when a control that has a persistent
field set to ``uuid`` has been completed.
Args:
uuid (str): UUID of the persistent control packet that has been
completed
:returns: :class:`Provider <Provider>`
"""
self._emit_queue.put((
'event',
{'subtype': 'completed', 'data': uuid, 't': timestamp()}
))
@for_own_methods(on_ready)
class Consumer(Client):
"""
This class should be used to create a Heimdallr consumer.
It inherits most of its functionality but it also
automatically connects to the consumer namespace and
provides some convenience functions.
"""
_namespace = '/consumer'
def send_control(self, uuid, subtype, data=None, persistent=False):
""" Emit a Heimdallr control packet.
This will send a control to the provider specified by
``uuid``. ``data`` must adhere to the provider's schema
for the given ``subtype``. If `persistent` is ``True``,
the control packet will be sent immediately and then
again every time the provider connects until the
provider signals the Heimdallr server that it has
completed the control.
Args:
uuid (str): UUID of the provider to send the control packet to
subtype (str): The control packet subtype
data: The control packet data
persistent (bool): Whether or not the control should persist
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'control',
{
'provider': uuid,
'subtype': subtype,
'data': data,
'persistent': persistent
}
))
def subscribe(self, uuid):
""" Subscribe to a provider.
A consumer must subscribe to a provider before it
receives event or sensor packets from the provider
or can send control packets to the provider.
Args:
uuid (str): UUID of the provider to subscribe to
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'subscribe',
{'provider': uuid}
))
def unsubscribe(self, uuid):
""" Unsubscribe from a provider.
The consumer will no longer receive packets from the
provider or be able to send it controls. This will
be done automatically by the Heimdallr server on
disconnect.
Args:
uuid (str): UUID of the provider to subscribe to
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'unsubscribe',
{'provider': uuid}
))
def set_filter(self, uuid, filter_):
""" Control which event and sensor subtypes to hear from provider.
Set which packet subtypes you want to hear from the provider.
`filter` should be a dictionary with the keys `event` and/or
`sensor`. The value of those fields should be an array of
strings of the subtypes that you want to hear for the
provider given by `uuid`.
Args:
uuid (str): UUID of the provider to filter packets from
filter_ (dict): Dictionary containing event and/or sensor packet
subtypes that you want to receive
:returns: :class:`Consumer <Consumer>`
"""
filter_['provider'] = uuid
self._emit_queue.put((
'setFilter',
filter_
))
def get_state(self, uuid, subtypes):
""" Get the current state of a provider.
For each event packet subtype in `subtypes`, the most recent
event packet of that subtype will be sent to the consumer by
the Heimdallr server.
Args:
uuid (str): UUID of the provider to get the state of
subtypes (list): Event subtypes to get the state of
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'getState',
{'provider': uuid, 'subtypes': subtypes}
))
def join_stream(self, uuid):
""" Join binary data stream from a provider.
If this is the first consumer to join the stream of
a provider, the Heimdallr server will send a
``{'stream': 'start'}`` control packet to the provider.
Args:
uuid (str): UUID of the provider to join the stream of
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'joinStream',
{'provider': uuid}
))
def leave_stream(self, uuid):
""" Leave binary data stream for a provider.
If this is the last consumer to leave the stream for a
provider the Heimdallr server will send a
``{'stream': 'stop'}`` control packet to the provider.
This will be done automatically by the Heimdallr server
on disconnect.
Args:
uuid (str): UUID of the provider to leave the stream of
:returns: :class:`Consumer <Consumer>`
"""
self._emit_queue.put((
'leaveStream',
{'provider': uuid}
))
|
|
"""
Essential implementation of the Store interface defined by RDF lib.
"""
from django.db.utils import IntegrityError
import rdflib
from rdflib.store import VALID_STORE
from rdflib.term import Literal, Identifier
from rdflib_django import models
from rdflib_django.models import NamespaceModel
DEFAULT_STORE = "Default Store"
DEFAULT_NAMESPACES = (
("xml", u"http://www.w3.org/XML/1998/namespace"),
("rdf", u"http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
("rdfs", u"http://www.w3.org/2000/01/rdf-schema#")
)
def _get_query_sets_for_object(o):
"""
Determines the correct query set based on the object.
If the object is a literal, it will return a query set over LiteralStatements.
If the object is a URIRef or BNode, it will return a query set over Statements.
If the object is unknown, it will return both the LiteralStatement and Statement query sets.
This method always returns a list of size at least one.
"""
if o:
if isinstance(o, Literal):
query_sets = [models.LiteralStatement.objects]
else:
query_sets = [models.URIStatement.objects]
else:
query_sets = [models.URIStatement.objects, models.LiteralStatement.objects]
return query_sets
def _get_named_graph(context):
"""
Returns the named graph for this context.
"""
if context is None:
return None
return models.NamedGraph.objects.get_or_create(identifier=context.identifier)[0]
class DjangoStore(rdflib.store.Store): # pylint: disable=abstract-method
"""
RDFlib Store implementation the uses Django Models for storage and retrieval.
>>> g = rdflib.Graph('Django')
The implementation is context aware, and uses Django transactions.
>>> g.store.context_aware
True
>>> g.store.transaction_aware
False
The implementation does not support formula's.
>>> g.store.formula_aware
False
The implementation provides a single store with the identifier DEFAULT_STORE. This store
is always present and needs not be opened.
>>> g.store.identifier
'Default Store'
Using other stores is not allowed
>>> g = DjangoStore(identifier='HelloWorld')
Traceback (most recent call last):
...
ValueError: multiple stores are not allowed
"""
context_aware = True
formula_aware = False
transaction_aware = False
def __init__(self, configuration=None, identifier=DEFAULT_STORE):
if identifier and identifier != DEFAULT_STORE:
raise ValueError("multiple stores are not allowed")
self.identifier = DEFAULT_STORE
super(DjangoStore, self).__init__(configuration, identifier)
self.open()
def open(self, configuration=None, create=False):
"""
Opens the underlying store. This is only necessary when opening
a store with another identifier than the default identifier.
>>> g = rdflib.Graph('Django')
>>> g.open(configuration=None, create=False) == rdflib.store.VALID_STORE
True
"""
return VALID_STORE
def destroy(self, configuration=None):
"""
Completely destroys a store and all the contexts and triples in the store.
>>> store = DjangoStore()
>>> g = rdflib.Graph(store=store)
>>> g.open(configuration=None, create=True) == rdflib.store.VALID_STORE
True
>>> g.open(configuration=None, create=False) == rdflib.store.VALID_STORE
True
>>> g.destroy(configuration=None)
>>> g.open(configuration=None, create=False) == rdflib.store.VALID_STORE
True
"""
models.NamedGraph.objects.all().delete()
models.URIStatement.objects.all().delete()
models.LiteralStatement.objects.all().delete()
def add(self, (s, p, o), context, quoted=False):
"""
Adds a triple to the store.
>>> from rdflib.term import URIRef
>>> from rdflib.namespace import RDF
>>> subject = URIRef('http://zoowizard.org/resource/Artis')
>>> object = URIRef('http://schema.org/Zoo')
>>> g = rdflib.Graph('Django')
>>> g.add((subject, RDF.type, object))
>>> len(g)
1
"""
assert isinstance(s, Identifier)
assert isinstance(p, Identifier)
assert isinstance(o, Identifier)
assert not quoted
named_graph = _get_named_graph(context)
query_set = _get_query_sets_for_object(o)[0]
query_set.get_or_create(
subject=s,
predicate=p,
object=o,
context=named_graph,
)
def remove(self, (s, p, o), context=None):
"""
Removes a triple from the store.
"""
named_graph = _get_named_graph(context)
query_sets = _get_query_sets_for_object(o)
filter_parameters = dict()
if named_graph is not None:
filter_parameters['context_id'] = named_graph.id
if s:
filter_parameters['subject'] = s
if p:
filter_parameters['predicate'] = p
if o:
filter_parameters['object'] = o
query_sets = [qs.filter(**filter_parameters) for qs in query_sets]
for qs in query_sets:
qs.delete()
def triples(self, (s, p, o), context=None):
"""
Returns all triples in the current store.
"""
named_graph = _get_named_graph(context)
query_sets = _get_query_sets_for_object(o)
filter_parameters = dict()
if named_graph is not None:
filter_parameters['context_id'] = named_graph.id
if s:
filter_parameters['subject'] = s
if p:
filter_parameters['predicate'] = p
if o:
filter_parameters['object'] = o
query_sets = [qs.filter(**filter_parameters) for qs in query_sets]
for qs in query_sets:
for statement in qs:
triple = statement.as_triple()
yield triple, context
def __len__(self, context=None):
"""
Returns the number of statements in this Graph.
"""
named_graph = _get_named_graph(context)
if named_graph is not None:
return (models.LiteralStatement.objects.filter(context_id=named_graph.id).count()
+ models.URIStatement.objects.filter(context_id=named_graph.id).count())
else:
return (models.URIStatement.objects.values('subject', 'predicate', 'object').distinct().count()
+ models.LiteralStatement.objects.values('subject', 'predicate', 'object').distinct().count())
####################
# CONTEXT MANAGEMENT
def contexts(self, triple=None):
for c in models.NamedGraph.objects.all():
yield c.identifier
######################
# NAMESPACE MANAGEMENT
def bind(self, prefix, namespace):
for ns in DEFAULT_NAMESPACES:
if ns[0] == prefix or unicode(ns[1]) == unicode(namespace):
return
try:
ns = NamespaceModel(prefix=prefix, uri=namespace)
ns.save()
except IntegrityError:
NamespaceModel.objects.filter(prefix=prefix).delete()
NamespaceModel.objects.filter(uri=namespace).delete()
NamespaceModel(prefix=prefix, uri=namespace).save()
def prefix(self, namespace):
try:
ns = NamespaceModel.objects.get(uri=namespace)
return ns.prefix
except NamespaceModel.DoesNotExist:
return None
def namespace(self, prefix):
try:
ns = NamespaceModel.objects.get(prefix=prefix)
return ns.uri
except NamespaceModel.DoesNotExist:
return None
def namespaces(self):
for ns in NamespaceModel.objects.all():
yield ns.prefix, ns.uri
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.base.specs import Specs
from pants.base.specs_parser import SpecsParser
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals import native_engine
from pants.engine.internals.native_engine import PySessionCancellationLatch
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.session import SessionValues
from pants.engine.streaming_workunit_handler import (
StreamingWorkunitHandler,
WorkunitsCallback,
WorkunitsCallbackFactories,
)
from pants.engine.target import RegisteredTargetTypes
from pants.engine.unions import UnionMembership
from pants.goal.run_tracker import RunTracker
from pants.help.help_info_extracter import HelpInfoExtracter
from pants.help.help_printer import HelpPrinter
from pants.init.engine_initializer import EngineInitializer, GraphScheduler, GraphSession
from pants.init.options_initializer import OptionsInitializer
from pants.init.specs_calculator import calculate_specs
from pants.option.arg_splitter import HelpRequest
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.util.contextutil import maybe_profiled
logger = logging.getLogger(__name__)
@dataclass
class LocalPantsRunner:
"""Handles a single pants invocation running in the process-local context.
options: The parsed options for this run.
build_config: The parsed build configuration for this run.
run_tracker: A tracker for metrics for the run.
specs: The specs for this run, i.e. either the address or filesystem specs.
graph_session: A LegacyGraphSession instance for graph reuse.
profile_path: The profile path - if any (from from the `PANTS_PROFILE` env var).
"""
options: Options
options_bootstrapper: OptionsBootstrapper
build_config: BuildConfiguration
run_tracker: RunTracker
specs: Specs
graph_session: GraphSession
union_membership: UnionMembership
profile_path: Optional[str]
@classmethod
def _init_graph_session(
cls,
options_initializer: OptionsInitializer,
options_bootstrapper: OptionsBootstrapper,
build_config: BuildConfiguration,
env: CompleteEnvironment,
run_id: str,
options: Options,
scheduler: Optional[GraphScheduler] = None,
cancellation_latch: Optional[PySessionCancellationLatch] = None,
) -> GraphSession:
native_engine.maybe_set_panic_handler()
graph_scheduler_helper = scheduler or EngineInitializer.setup_graph(
options_bootstrapper, build_config, env
)
with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True):
global_options = options.for_global_scope()
return graph_scheduler_helper.new_session(
run_id,
dynamic_ui=global_options.dynamic_ui,
use_colors=global_options.get("colors", True),
session_values=SessionValues(
{
OptionsBootstrapper: options_bootstrapper,
CompleteEnvironment: env,
}
),
cancellation_latch=cancellation_latch,
)
@classmethod
def create(
cls,
env: CompleteEnvironment,
options_bootstrapper: OptionsBootstrapper,
options_initializer: Optional[OptionsInitializer] = None,
scheduler: Optional[GraphScheduler] = None,
cancellation_latch: Optional[PySessionCancellationLatch] = None,
) -> LocalPantsRunner:
"""Creates a new LocalPantsRunner instance by parsing options.
By the time this method runs, logging will already have been initialized in either
PantsRunner or DaemonPantsRunner.
:param env: The environment for this run.
:param options_bootstrapper: The OptionsBootstrapper instance to reuse.
:param scheduler: If being called from the daemon, a warmed scheduler to use.
"""
options_initializer = options_initializer or OptionsInitializer(options_bootstrapper, env)
build_config, options = options_initializer.build_config_and_options(
options_bootstrapper, env, raise_=True
)
run_tracker = RunTracker(options)
union_membership = UnionMembership.from_rules(build_config.union_rules)
# If we're running with the daemon, we'll be handed a warmed Scheduler, which we use
# to initialize a session here.
graph_session = cls._init_graph_session(
options_initializer,
options_bootstrapper,
build_config,
env,
run_tracker.run_id,
options,
scheduler,
cancellation_latch,
)
# Option values are usually computed lazily on demand, but command line options are
# eagerly computed for validation.
with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True):
for scope in options.scope_to_flags.keys():
options.for_scope(scope)
# Verify configs.
global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
if global_bootstrap_options.verify_config:
options.verify_configs(options_bootstrapper.config)
specs = calculate_specs(
options_bootstrapper=options_bootstrapper,
options=options,
build_root=get_buildroot(),
session=graph_session.scheduler_session,
)
profile_path = env.get("PANTS_PROFILE")
return cls(
options=options,
options_bootstrapper=options_bootstrapper,
build_config=build_config,
run_tracker=run_tracker,
specs=specs,
graph_session=graph_session,
union_membership=union_membership,
profile_path=profile_path,
)
def _perform_run(self, goals: Tuple[str, ...]) -> ExitCode:
global_options = self.options.for_global_scope()
if not global_options.get("loop", False):
return self._perform_run_body(goals, poll=False)
iterations = global_options.loop_max
exit_code = PANTS_SUCCEEDED_EXIT_CODE
while iterations:
# NB: We generate a new "run id" per iteration of the loop in order to allow us to
# observe fresh values for Goals. See notes in `scheduler.rs`.
self.graph_session.scheduler_session.new_run_id()
try:
exit_code = self._perform_run_body(goals, poll=True)
except ExecutionError as e:
logger.warning(e)
iterations -= 1
return exit_code
def _perform_run_body(self, goals: Tuple[str, ...], poll: bool) -> ExitCode:
return self.graph_session.run_goal_rules(
union_membership=self.union_membership,
goals=goals,
specs=self.specs,
poll=poll,
poll_delay=(0.1 if poll else None),
)
def _finish_run(self, code: ExitCode) -> None:
"""Cleans up the run tracker."""
def _print_help(self, request: HelpRequest) -> ExitCode:
global_options = self.options.for_global_scope()
all_help_info = HelpInfoExtracter.get_all_help_info(
self.options,
self.union_membership,
self.graph_session.goal_consumed_subsystem_scopes,
RegisteredTargetTypes.create(self.build_config.target_types),
)
help_printer = HelpPrinter(
bin_name=global_options.pants_bin_name,
help_request=request,
all_help_info=all_help_info,
color=global_options.colors,
)
return help_printer.print_help()
def _get_workunits_callbacks(self) -> Tuple[WorkunitsCallback, ...]:
# Load WorkunitsCallbacks by requesting WorkunitsCallbackFactories, and then constructing
# a per-run instance of each WorkunitsCallback.
(workunits_callback_factories,) = self.graph_session.scheduler_session.product_request(
WorkunitsCallbackFactories, [self.union_membership]
)
return tuple(wcf.callback_factory() for wcf in workunits_callback_factories)
def run(self, start_time: float) -> ExitCode:
spec_parser = SpecsParser(get_buildroot())
specs = [str(spec_parser.parse_spec(spec)) for spec in self.options.specs]
self.run_tracker.start(run_start_time=start_time, specs=specs)
with maybe_profiled(self.profile_path):
global_options = self.options.for_global_scope()
goals = tuple(self.options.goals)
streaming_reporter = StreamingWorkunitHandler(
self.graph_session.scheduler_session,
run_tracker=self.run_tracker,
specs=self.specs,
options_bootstrapper=self.options_bootstrapper,
callbacks=self._get_workunits_callbacks(),
report_interval_seconds=global_options.streaming_workunits_report_interval,
pantsd=global_options.pantsd,
)
with streaming_reporter:
if self.options.help_request:
return self._print_help(self.options.help_request)
if not goals:
return PANTS_SUCCEEDED_EXIT_CODE
try:
engine_result = self._perform_run(goals)
except Exception as e:
ExceptionSink.log_exception(e)
engine_result = PANTS_FAILED_EXIT_CODE
metrics = self.graph_session.scheduler_session.metrics()
self.run_tracker.set_pantsd_scheduler_metrics(metrics)
self.run_tracker.end_run(engine_result)
return engine_result
|
|
#!/usr/bin/env python3
'''
This script extracts kerning and groups from a compiled OTF and injects
them into a new UFO file (which is created via `tx`).
It requires the Adobe FDK (tx) to be installed, as well as the module
`getKerningPairsFromOTF.py`; which is distributed in the same folder.
usage:
python convertKernedOTFtoKernedUFO.py font.otf
'''
import os
import sys
import string
import shutil
import subprocess
import argparse
from argparse import RawTextHelpFormatter
from defcon import Font
from fontTools import ttLib
import getKerningPairsFromOTF
kKernFeatureTag = 'kern'
compressSinglePairs = True
# Switch to control if single pairs shall be written plainly,
# or in a more space-saving notation (using enum).
def sortGlyphs(glyphlist):
'''
Sort glyphs in a way that glyphs from the exceptionList, or glyphs
starting with 'uni' names do not get to be key (first) glyphs.
An infinite loop is avoided, in case there are only glyphs matching
above mentioned properties.
'''
exceptionList = 'dotlessi dotlessj kgreenlandic ae oe AE OE uhorn'.split()
glyphs = sorted(glyphlist)
for i in range(len(glyphs)):
if glyphs[0] in exceptionList or glyphs[0].startswith('uni'):
glyphs.insert(len(glyphs), glyphs.pop(0))
else:
continue
return glyphs
def nameClass(glyphlist, flag):
glyphs = sortGlyphs(glyphlist)
if len(glyphs) == 0:
name = 'error!!!'
print('Found empty class.')
else:
name = glyphs[0]
if name in string.ascii_lowercase:
case = '_LC'
elif name in string.ascii_uppercase:
case = '_UC'
else:
case = ''
return '@MMK%s%s%s' % (flag, name, case)
def makeKernObjects(fontPath):
f = getKerningPairsFromOTF.OTFKernReader(fontPath)
groups = {}
kerning = {}
for kerningClass in f.allLeftClasses:
glyphs = sortGlyphs(f.allLeftClasses[kerningClass])
className = nameClass(glyphs, '_L_')
groups.setdefault(className, glyphs)
for kerningClass in f.allRightClasses:
glyphs = sortGlyphs(f.allRightClasses[kerningClass])
className = nameClass(glyphs, '_R_')
groups.setdefault(className, glyphs)
for (leftClass, rightClass), value in sorted(f.classPairs.items()):
leftGlyphs = sortGlyphs(f.allLeftClasses[leftClass])
leftClassName = nameClass(leftGlyphs, '_L_')
rightGlyphs = sortGlyphs(f.allRightClasses[rightClass])
rightClassName = nameClass(rightGlyphs, '_R_')
kerning[(leftClassName, rightClassName)] = value
kerning.update(f.singlePairs)
return groups, kerning
def injectKerningToUFO(ufoPath, groups, kerning):
ufo = Font(ufoPath)
ufo.kerning.clear()
ufo.groups.clear()
print('Injecting OTF groups and kerning into %s ...' % ufoPath)
ufo.groups.update(groups)
ufo.kerning.update(kerning)
ufo.save()
def injectOS2TableToUFO(otfPath, ufoPath):
otfFont = ttLib.TTFont(otfPath)
os2Table = otfFont['OS/2']
ufo = Font(ufoPath)
print('Injecting OS/2 table into %s ...' % ufoPath)
ufo.info.ascender = os2Table.sTypoAscender
ufo.info.capHeight = os2Table.sCapHeight
ufo.info.descender = os2Table.sTypoDescender
ufo.info.xHeight = os2Table.sxHeight
ufo.info.openTypeOS2VendorID = os2Table.achVendID
ufo.info.openTypeOS2TypoAscender = os2Table.sTypoAscender
ufo.info.openTypeOS2TypoDescender = os2Table.sTypoDescender
ufo.info.openTypeOS2TypoLineGap = os2Table.sTypoLineGap
ufo.info.openTypeOS2StrikeoutPosition = os2Table.yStrikeoutPosition
ufo.info.openTypeOS2StrikeoutSize = os2Table.yStrikeoutSize
ufo.info.openTypeOS2SubscriptXOffset = os2Table.ySubscriptXOffset
ufo.info.openTypeOS2SubscriptXSize = os2Table.ySubscriptXSize
ufo.info.openTypeOS2SubscriptYOffset = os2Table.ySubscriptYOffset
ufo.info.openTypeOS2SubscriptYSize = os2Table.ySubscriptYSize
ufo.info.openTypeOS2SuperscriptXOffset = os2Table.ySuperscriptXOffset
ufo.info.openTypeOS2SuperscriptXSize = os2Table.ySuperscriptXSize
ufo.info.openTypeOS2SuperscriptYOffset = os2Table.ySuperscriptYOffset
ufo.info.openTypeOS2SuperscriptYSize = os2Table.ySuperscriptYSize
ufo.save()
def convertOTFtoUFO(otfPath, overwrite, ignore_errors):
ufoPath = '%s.ufo' % os.path.splitext(otfPath)[0]
if os.path.exists(ufoPath):
if overwrite is True:
shutil.rmtree(ufoPath)
else:
print()
print(
'%s already exists. '
'Use the -o flag to overwrite the existing file.' % ufoPath)
sys.exit()
print(
'Creating %s from %s ...' % (ufoPath, otfPath))
txCommand = ['tx', '-ufo', otfPath, ufoPath]
txProcess = subprocess.Popen(
txCommand,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = txProcess.communicate()
if errors:
if ignore_errors:
return ufoPath
else:
print(errors)
print(
'A UFO file may now exist, but since tx complained, '
'no further steps were taken. '
'Use the -i flag to retry ignoring tx errors.')
sys.exit()
return ufoPath
errorMessage = '''
ERROR:
No valid font and/or UFO provided.
Use the script like this:
python %s font.otf
''' % os.path.basename(__file__)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'fontfile',
help='input OTF file')
parser.add_argument(
'-i', '--ignore_tx',
action='store_true',
help='ignore TX errors')
parser.add_argument(
'-o', '--overwrite',
action='store_true',
help='overwrite existing UFO')
args = parser.parse_args()
assumedFontPath = args.fontfile
ignore_errors = args.ignore_tx
overwrite = args.overwrite
if (
os.path.exists(assumedFontPath) and
os.path.splitext(assumedFontPath)[1].lower() in ['.otf', '.ttf']
):
fontPath = assumedFontPath
groups, kerning = makeKernObjects(fontPath)
ufoPath = convertOTFtoUFO(fontPath, overwrite, ignore_errors)
injectKerningToUFO(ufoPath, groups, kerning)
injectOS2TableToUFO(fontPath, ufoPath)
print('done')
else:
print(errorMessage)
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
Auto Scaling service.
"""
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.ec2.autoscale.request import Request
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale.group import ProcessType
from boto.ec2.autoscale.activity import Activity
from boto.ec2.autoscale.policy import AdjustmentType
from boto.ec2.autoscale.policy import MetricCollectionTypes
from boto.ec2.autoscale.policy import ScalingPolicy
from boto.ec2.autoscale.policy import TerminationPolicies
from boto.ec2.autoscale.instance import Instance
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
from boto.ec2.autoscale.tag import Tag
from boto.ec2.autoscale.limits import AccountLimits
from boto.compat import six
RegionData = load_regions().get('autoscaling', {})
def regions():
"""
Get all available regions for the Auto Scaling service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('autoscaling', connection_cls=AutoScaleConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.autoscale.AutoScaleConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.AutoScaleConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class AutoScaleConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'autoscale_version', '2011-01-01')
DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint',
'autoscaling.us-east-1.amazonaws.com')
DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name',
'us-east-1')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None,
use_block_device_types=False):
"""
Init method to create a new connection to the AutoScaling service.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
AutoScaleConnection)
self.region = region
self.use_block_device_types = use_block_device_types
super(AutoScaleConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path=path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def build_list_params(self, params, items, label):
"""
Items is a list of dictionaries or strings::
[
{
'Protocol' : 'HTTP',
'LoadBalancerPort' : '80',
'InstancePort' : '80'
},
..
] etc.
or::
['us-east-1b',...]
"""
# different from EC2 list params
for i in range(1, len(items) + 1):
if isinstance(items[i - 1], dict):
for k, v in six.iteritems(items[i - 1]):
if isinstance(v, dict):
for kk, vv in six.iteritems(v):
params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv
else:
params['%s.member.%d.%s' % (label, i, k)] = v
elif isinstance(items[i - 1], six.string_types):
params['%s.member.%d' % (label, i)] = items[i - 1]
def _update_group(self, op, as_group):
params = {'AutoScalingGroupName': as_group.name,
'LaunchConfigurationName': as_group.launch_config_name,
'MinSize': as_group.min_size,
'MaxSize': as_group.max_size}
# get availability zone information (required param)
zones = as_group.availability_zones
self.build_list_params(params, zones, 'AvailabilityZones')
if as_group.desired_capacity is not None:
params['DesiredCapacity'] = as_group.desired_capacity
if as_group.vpc_zone_identifier:
params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier
if as_group.health_check_period:
params['HealthCheckGracePeriod'] = as_group.health_check_period
if as_group.health_check_type:
params['HealthCheckType'] = as_group.health_check_type
if as_group.default_cooldown:
params['DefaultCooldown'] = as_group.default_cooldown
if as_group.placement_group:
params['PlacementGroup'] = as_group.placement_group
if as_group.instance_id:
params['InstanceId'] = as_group.instance_id
if as_group.termination_policies:
self.build_list_params(params, as_group.termination_policies,
'TerminationPolicies')
if op.startswith('Create'):
# you can only associate load balancers with an autoscale
# group at creation time
if as_group.load_balancers:
self.build_list_params(params, as_group.load_balancers,
'LoadBalancerNames')
if as_group.tags:
for i, tag in enumerate(as_group.tags):
tag.build_params(params, i + 1)
return self.get_object(op, params, Request)
def attach_instances(self, name, instance_ids):
"""
Attach instances to an autoscaling group.
"""
params = {
'AutoScalingGroupName': name,
}
self.build_list_params(params, instance_ids, 'InstanceIds')
return self.get_status('AttachInstances', params)
def create_auto_scaling_group(self, as_group):
"""
Create auto scaling group.
"""
return self._update_group('CreateAutoScalingGroup', as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
"""
Deletes the specified auto scaling group if the group has no instances
and no scaling activities in progress.
"""
if(force_delete):
params = {'AutoScalingGroupName': name, 'ForceDelete': 'true'}
else:
params = {'AutoScalingGroupName': name}
return self.get_object('DeleteAutoScalingGroup', params, Request)
def create_launch_configuration(self, launch_config):
"""
Creates a new Launch Configuration.
:type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
:param launch_config: LaunchConfiguration object.
"""
params = {'ImageId': launch_config.image_id,
'LaunchConfigurationName': launch_config.name,
'InstanceType': launch_config.instance_type}
if launch_config.key_name:
params['KeyName'] = launch_config.key_name
if launch_config.user_data:
user_data = launch_config.user_data
if isinstance(user_data, six.text_type):
user_data = user_data.encode('utf-8')
params['UserData'] = base64.b64encode(user_data).decode('utf-8')
if launch_config.kernel_id:
params['KernelId'] = launch_config.kernel_id
if launch_config.ramdisk_id:
params['RamdiskId'] = launch_config.ramdisk_id
if launch_config.block_device_mappings:
[x.autoscale_build_list_params(params) for x in launch_config.block_device_mappings]
if launch_config.security_groups:
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
if launch_config.instance_monitoring:
params['InstanceMonitoring.Enabled'] = 'true'
else:
params['InstanceMonitoring.Enabled'] = 'false'
if launch_config.spot_price is not None:
params['SpotPrice'] = str(launch_config.spot_price)
if launch_config.instance_profile_name is not None:
params['IamInstanceProfile'] = launch_config.instance_profile_name
if launch_config.ebs_optimized:
params['EbsOptimized'] = 'true'
else:
params['EbsOptimized'] = 'false'
if launch_config.associate_public_ip_address is True:
params['AssociatePublicIpAddress'] = 'true'
elif launch_config.associate_public_ip_address is False:
params['AssociatePublicIpAddress'] = 'false'
if launch_config.volume_type:
params['VolumeType'] = launch_config.volume_type
if launch_config.delete_on_termination:
params['DeleteOnTermination'] = 'true'
else:
params['DeleteOnTermination'] = 'false'
if launch_config.iops:
params['Iops'] = launch_config.iops
return self.get_object('CreateLaunchConfiguration', params,
Request, verb='POST')
def get_account_limits(self):
"""
Returns the limits for the Auto Scaling resources currently granted for
your AWS account.
"""
params = {}
return self.get_object('DescribeAccountLimits', params, AccountLimits)
def create_scaling_policy(self, scaling_policy):
"""
Creates a new Scaling Policy.
:type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy`
:param scaling_policy: ScalingPolicy object.
"""
params = {'AdjustmentType': scaling_policy.adjustment_type,
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
if scaling_policy.adjustment_type == "PercentChangeInCapacity" and \
scaling_policy.min_adjustment_step is not None:
params['MinAdjustmentStep'] = scaling_policy.min_adjustment_step
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
return self.get_object('PutScalingPolicy', params, Request)
def delete_launch_configuration(self, launch_config_name):
"""
Deletes the specified LaunchConfiguration.
The specified launch configuration must not be attached to an Auto
Scaling group. Once this call completes, the launch configuration is no
longer available for use.
"""
params = {'LaunchConfigurationName': launch_config_name}
return self.get_object('DeleteLaunchConfiguration', params, Request)
def get_all_groups(self, names=None, max_records=None, next_token=None):
"""
Returns a full description of each Auto Scaling group in the given
list. This includes all Amazon EC2 instances that are members of the
group. If a list of names is not provided, the service returns the full
details of all Auto Scaling groups.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type names: list
:param names: List of group names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup`
instances.
"""
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if names:
self.build_list_params(params, names, 'AutoScalingGroupNames')
return self.get_list('DescribeAutoScalingGroups', params,
[('member', AutoScalingGroup)])
def get_all_launch_configurations(self, **kwargs):
"""
Returns a full description of the launch configurations given the
specified names.
If no names are specified, then the full details of all launch
configurations are returned.
:type names: list
:param names: List of configuration names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of configurations to return.
:type next_token: str
:param next_token: If you have more results than can be returned
at once, pass in this parameter to page through all results.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration`
instances.
"""
params = {}
max_records = kwargs.get('max_records', None)
names = kwargs.get('names', None)
if max_records is not None:
params['MaxRecords'] = max_records
if names:
self.build_list_params(params, names, 'LaunchConfigurationNames')
next_token = kwargs.get('next_token')
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeLaunchConfigurations', params,
[('member', LaunchConfiguration)])
def get_all_activities(self, autoscale_group, activity_ids=None,
max_records=None, next_token=None):
"""
Get all activities for the given autoscaling group.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The auto scaling group to get activities on.
:type max_records: int
:param max_records: Maximum amount of activities to return.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.activity.Activity` instances.
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName': name}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if activity_ids:
self.build_list_params(params, activity_ids, 'ActivityIds')
return self.get_list('DescribeScalingActivities',
params, [('member', Activity)])
def get_termination_policies(self):
"""Gets all valid termination policies.
These values can then be used as the termination_policies arg
when creating and updating autoscale groups.
"""
return self.get_object('DescribeTerminationPolicyTypes',
{}, TerminationPolicies)
def delete_scheduled_action(self, scheduled_action_name,
autoscale_group=None):
"""
Deletes a previously scheduled action.
:type scheduled_action_name: str
:param scheduled_action_name: The name of the action you want
to delete.
:type autoscale_group: str
:param autoscale_group: The name of the autoscale group.
"""
params = {'ScheduledActionName': scheduled_action_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeleteScheduledAction', params)
def terminate_instance(self, instance_id, decrement_capacity=True):
"""
Terminates the specified instance. The desired group size can
also be adjusted, if desired.
:type instance_id: str
:param instance_id: The ID of the instance to be terminated.
:type decrement_capability: bool
:param decrement_capacity: Whether to decrement the size of the
autoscaling group or not.
"""
params = {'InstanceId': instance_id}
if decrement_capacity:
params['ShouldDecrementDesiredCapacity'] = 'true'
else:
params['ShouldDecrementDesiredCapacity'] = 'false'
return self.get_object('TerminateInstanceInAutoScalingGroup', params,
Activity)
def delete_policy(self, policy_name, autoscale_group=None):
"""
Delete a policy.
:type policy_name: str
:param policy_name: The name or ARN of the policy to delete.
:type autoscale_group: str
:param autoscale_group: The name of the autoscale group.
"""
params = {'PolicyName': policy_name}
if autoscale_group:
params['AutoScalingGroupName'] = autoscale_group
return self.get_status('DeletePolicy', params)
def get_all_adjustment_types(self):
return self.get_list('DescribeAdjustmentTypes', {},
[('member', AdjustmentType)])
def get_all_autoscaling_instances(self, instance_ids=None,
max_records=None, next_token=None):
"""
Returns a description of each Auto Scaling instance in the instance_ids
list. If a list is not provided, the service returns the full details
of all instances up to a maximum of fifty.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with
the returned token as the NextToken parameter.
:type instance_ids: list
:param instance_ids: List of Autoscaling Instance IDs which should be
searched for.
:type max_records: int
:param max_records: Maximum number of results to return.
:rtype: list
:returns: List of
:class:`boto.ec2.autoscale.instance.Instance` objects.
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceIds')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAutoScalingInstances',
params, [('member', Instance)])
def get_all_metric_collection_types(self):
"""
Returns a list of metrics and a corresponding list of granularities
for each metric.
"""
return self.get_object('DescribeMetricCollectionTypes',
{}, MetricCollectionTypes)
def get_all_policies(self, as_group=None, policy_names=None,
max_records=None, next_token=None):
"""
Returns descriptions of what each policy does. This action supports
pagination. If the response includes a token, there are more records
available. To get the additional records, repeat the request with the
response token as the NextToken parameter.
If no group name or list of policy names are provided, all
available policies are returned.
:type as_group: str
:param as_group: The name of the
:class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
:type policy_names: list
:param policy_names: List of policy names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
:type next_token: str
:param next_token: If you have more results than can be returned
at once, pass in this parameter to page through all results.
"""
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if policy_names:
self.build_list_params(params, policy_names, 'PolicyNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribePolicies', params,
[('member', ScalingPolicy)])
def get_all_scaling_process_types(self):
"""
Returns scaling process types for use in the ResumeProcesses and
SuspendProcesses actions.
"""
return self.get_list('DescribeScalingProcessTypes', {},
[('member', ProcessType)])
def suspend_processes(self, as_group, scaling_processes=None):
"""
Suspends Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to suspend processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to suspend. If omitted,
all processes will be suspended.
"""
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
self.build_list_params(params, scaling_processes,
'ScalingProcesses')
return self.get_status('SuspendProcesses', params)
def resume_processes(self, as_group, scaling_processes=None):
"""
Resumes Auto Scaling processes for an Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to resume processes on.
:type scaling_processes: list
:param scaling_processes: Processes you want to resume. If omitted, all
processes will be resumed.
"""
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
self.build_list_params(params, scaling_processes,
'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
def create_scheduled_group_action(self, as_group, name, time=None,
desired_capacity=None,
min_size=None, max_size=None,
start_time=None, end_time=None,
recurrence=None):
"""
Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
unchanged in the affected Auto Scaling group.
:type as_group: string
:param as_group: The auto scaling group to get activities on.
:type name: string
:param name: Scheduled action name.
:type time: datetime.datetime
:param time: The time for this action to start. (Depracated)
:type desired_capacity: int
:param desired_capacity: The number of EC2 instances that should
be running in this group.
:type min_size: int
:param min_size: The minimum size for the new auto scaling group.
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
:type start_time: datetime.datetime
:param start_time: The time for this action to start. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
:type end_time: datetime.datetime
:param end_time: The time for this action to end. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
:type recurrence: string
:param recurrence: The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. EXAMPLE: '0 10 * * *'
"""
params = {'AutoScalingGroupName': as_group,
'ScheduledActionName': name}
if start_time is not None:
params['StartTime'] = start_time.isoformat()
if end_time is not None:
params['EndTime'] = end_time.isoformat()
if recurrence is not None:
params['Recurrence'] = recurrence
if time:
params['Time'] = time.isoformat()
if desired_capacity is not None:
params['DesiredCapacity'] = desired_capacity
if min_size is not None:
params['MinSize'] = min_size
if max_size is not None:
params['MaxSize'] = max_size
return self.get_status('PutScheduledUpdateGroupAction', params)
def get_all_scheduled_actions(self, as_group=None, start_time=None,
end_time=None, scheduled_actions=None,
max_records=None, next_token=None):
params = {}
if as_group:
params['AutoScalingGroupName'] = as_group
if scheduled_actions:
self.build_list_params(params, scheduled_actions,
'ScheduledActionNames')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeScheduledActions', params,
[('member', ScheduledUpdateGroupAction)])
def disable_metrics_collection(self, as_group, metrics=None):
"""
Disables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of affected
metrics with the Metrics parameter.
"""
params = {'AutoScalingGroupName': as_group}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('DisableMetricsCollection', params)
def enable_metrics_collection(self, as_group, granularity, metrics=None):
"""
Enables monitoring of group metrics for the Auto Scaling group
specified in AutoScalingGroupName. You can specify the list of enabled
metrics with the Metrics parameter.
Auto scaling metrics collection can be turned on only if the
InstanceMonitoring.Enabled flag, in the Auto Scaling group's launch
configuration, is set to true.
:type autoscale_group: string
:param autoscale_group: The auto scaling group to get activities on.
:type granularity: string
:param granularity: The granularity to associate with the metrics to
collect. Currently, the only legal granularity is "1Minute".
:type metrics: string list
:param metrics: The list of metrics to collect. If no metrics are
specified, all metrics are enabled.
"""
params = {'AutoScalingGroupName': as_group,
'Granularity': granularity}
if metrics:
self.build_list_params(params, metrics, 'Metrics')
return self.get_status('EnableMetricsCollection', params)
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
params = {'PolicyName': policy_name}
if as_group:
params['AutoScalingGroupName'] = as_group
if honor_cooldown:
params['HonorCooldown'] = honor_cooldown
return self.get_status('ExecutePolicy', params)
def put_notification_configuration(self, autoscale_group, topic, notification_types):
"""
Configures an Auto Scaling group to send notifications when
specified events take place.
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The Auto Scaling group to put notification
configuration on.
:type topic: str
:param topic: The Amazon Resource Name (ARN) of the Amazon Simple
Notification Service (SNS) topic.
:type notification_types: list
:param notification_types: The type of events that will trigger
the notification. Valid types are:
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
'autoscaling:TEST_NOTIFICATION'
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName': name,
'TopicARN': topic}
self.build_list_params(params, notification_types, 'NotificationTypes')
return self.get_status('PutNotificationConfiguration', params)
def delete_notification_configuration(self, autoscale_group, topic):
"""
Deletes notifications created by put_notification_configuration.
:type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
:param autoscale_group: The Auto Scaling group to put notification
configuration on.
:type topic: str
:param topic: The Amazon Resource Name (ARN) of the Amazon Simple
Notification Service (SNS) topic.
"""
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
params = {'AutoScalingGroupName': name,
'TopicARN': topic}
return self.get_status('DeleteNotificationConfiguration', params)
def set_instance_health(self, instance_id, health_status,
should_respect_grace_period=True):
"""
Explicitly set the health status of an instance.
:type instance_id: str
:param instance_id: The identifier of the EC2 instance.
:type health_status: str
:param health_status: The health status of the instance.
"Healthy" means that the instance is healthy and should remain
in service. "Unhealthy" means that the instance is unhealthy.
Auto Scaling should terminate and replace it.
:type should_respect_grace_period: bool
:param should_respect_grace_period: If True, this call should
respect the grace period associated with the group.
"""
params = {'InstanceId': instance_id,
'HealthStatus': health_status}
if should_respect_grace_period:
params['ShouldRespectGracePeriod'] = 'true'
else:
params['ShouldRespectGracePeriod'] = 'false'
return self.get_status('SetInstanceHealth', params)
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
"""
Adjusts the desired size of the AutoScalingGroup by initiating scaling
activities. When reducing the size of the group, it is not possible to define
which Amazon EC2 instances will be terminated. This applies to any Auto Scaling
decisions that might result in terminating instances.
:type group_name: string
:param group_name: name of the auto scaling group
:type desired_capacity: integer
:param desired_capacity: new capacity setting for auto scaling group
:type honor_cooldown: boolean
:param honor_cooldown: by default, overrides any cooldown period
"""
params = {'AutoScalingGroupName': group_name,
'DesiredCapacity': desired_capacity}
if honor_cooldown:
params['HonorCooldown'] = 'true'
return self.get_status('SetDesiredCapacity', params)
# Tag methods
def get_all_tags(self, filters=None, max_records=None, next_token=None):
"""
Lists the Auto Scaling group tags.
This action supports pagination by returning a token if there
are more pages to retrieve. To get the next page, call this
action again with the returned token as the NextToken
parameter.
:type filters: dict
:param filters: The value of the filter type used to identify
the tags to be returned. NOT IMPLEMENTED YET.
:type max_records: int
:param max_records: Maximum number of tags to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.tag.Tag`
instances.
"""
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeTags', params,
[('member', Tag)])
def create_or_update_tags(self, tags):
"""
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i + 1)
return self.get_status('CreateOrUpdateTags', params, verb='POST')
def delete_tags(self, tags):
"""
Deletes existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i + 1)
return self.get_status('DeleteTags', params, verb='POST')
|
|
from decimal import Decimal as D
from django.utils.six.moves import http_client
import datetime
from django.conf import settings
from django.test import TestCase
from django.utils.translation import ugettext
from django.core.urlresolvers import reverse
from oscar.test.factories import create_product
from oscar.core.compat import get_user_model
from oscar.test import factories
from oscar.test.basket import add_product
from oscar.test.utils import extract_cookie_value
from oscar.apps.basket import reports
from oscar.apps.basket.models import Basket
from oscar.test.testcases import WebTestCase
from oscar.apps.partner import strategy
User = get_user_model()
class TestBasketMerging(TestCase):
def setUp(self):
self.product = create_product(num_in_stock=10)
self.user_basket = Basket()
self.user_basket.strategy = strategy.Default()
add_product(self.user_basket, product=self.product)
self.cookie_basket = Basket()
self.cookie_basket.strategy = strategy.Default()
add_product(self.cookie_basket, quantity=2, product=self.product)
self.user_basket.merge(self.cookie_basket, add_quantities=False)
def test_cookie_basket_has_status_set(self):
self.assertEqual(Basket.MERGED, self.cookie_basket.status)
def test_lines_are_moved_across(self):
self.assertEqual(1, self.user_basket.lines.all().count())
def test_merge_line_takes_max_quantity(self):
line = self.user_basket.lines.get(product=self.product)
self.assertEqual(2, line.quantity)
class AnonAddToBasketViewTests(WebTestCase):
csrf_checks = False
def setUp(self):
self.product = create_product(
price=D('10.00'), num_in_stock=10)
url = reverse('basket:add', kwargs={'pk': self.product.pk})
post_params = {'product_id': self.product.id,
'action': 'add',
'quantity': 1}
self.response = self.app.post(url, params=post_params)
def test_cookie_is_created(self):
self.assertTrue('oscar_open_basket' in self.response.test_app.cookies)
def test_price_is_recorded(self):
oscar_open_basket_cookie = extract_cookie_value(
self.response.test_app.cookies, 'oscar_open_basket'
)
basket_id = oscar_open_basket_cookie.split(':')[0]
basket = Basket.objects.get(id=basket_id)
line = basket.lines.get(product=self.product)
stockrecord = self.product.stockrecords.all()[0]
self.assertEqual(stockrecord.price_excl_tax, line.price_excl_tax)
class BasketSummaryViewTests(WebTestCase):
def setUp(self):
url = reverse('basket:summary')
self.response = self.app.get(url)
def test_shipping_method_in_context(self):
self.assertTrue('shipping_method' in self.response.context)
def test_order_total_in_context(self):
self.assertTrue('order_total' in self.response.context)
def test_view_does_not_error(self):
self.assertEqual(http_client.OK, self.response.status_code)
def test_basket_in_context(self):
self.assertTrue('basket' in self.response.context)
def test_basket_is_empty(self):
basket = self.response.context['basket']
self.assertEqual(0, basket.num_lines)
class BasketThresholdTest(WebTestCase):
csrf_checks = False
def setUp(self):
self._old_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = 3
def tearDown(self):
settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = self._old_threshold
def test_adding_more_than_threshold_raises(self):
dummy_product = create_product(price=D('10.00'), num_in_stock=10)
url = reverse('basket:add', kwargs={'pk': dummy_product.pk})
post_params = {'product_id': dummy_product.id,
'action': 'add',
'quantity': 2}
response = self.app.post(url, params=post_params)
self.assertTrue('oscar_open_basket' in response.test_app.cookies)
post_params = {'product_id': dummy_product.id,
'action': 'add',
'quantity': 2}
response = self.app.post(url, params=post_params)
expected = ugettext(
"Due to technical limitations we are not able to ship more "
"than %(threshold)d items in one order. Your basket currently "
"has %(basket)d items."
) % ({'threshold': 3, 'basket': 2})
self.assertTrue(expected in response.test_app.cookies['messages'])
class BasketReportTests(TestCase):
def test_open_report_doesnt_error(self):
data = {
'start_date': datetime.date(2012, 5, 1),
'end_date': datetime.date(2012, 5, 17),
'formatter': 'CSV'
}
generator = reports.OpenBasketReportGenerator(**data)
generator.generate()
def test_submitted_report_doesnt_error(self):
data = {
'start_date': datetime.date(2012, 5, 1),
'end_date': datetime.date(2012, 5, 17),
'formatter': 'CSV'
}
generator = reports.SubmittedBasketReportGenerator(**data)
generator.generate()
class SavedBasketTests(WebTestCase):
csrf_checks = False
def test_moving_from_saved_basket(self):
self.user = User.objects.create_user(username='test', password='pass',
email='[email protected]')
product = create_product(price=D('10.00'), num_in_stock=2)
basket = factories.create_basket(empty=True)
basket.owner = self.user
basket.save()
add_product(basket, product=product)
saved_basket, created = Basket.saved.get_or_create(owner=self.user)
saved_basket.strategy = basket.strategy
add_product(saved_basket, product=product)
response = self.get(reverse('basket:summary'))
saved_formset = response.context['saved_formset']
saved_form = saved_formset.forms[0]
data = {
saved_formset.add_prefix('INITIAL_FORMS'): 1,
saved_formset.add_prefix('MAX_NUM_FORMS'): 1,
saved_formset.add_prefix('TOTAL_FORMS'): 1,
saved_form.add_prefix('id'): saved_form.initial['id'],
saved_form.add_prefix('move_to_basket'): True,
}
response = self.post(reverse('basket:saved'), params=data)
self.assertEqual(Basket.open.get(id=basket.id).lines.get(
product=product).quantity, 2)
self.assertRedirects(response, reverse('basket:summary'))
def test_moving_from_saved_basket_more_than_stocklevel_raises(self):
self.user = User.objects.create_user(username='test', password='pass',
email='[email protected]')
product = create_product(price=D('10.00'), num_in_stock=1)
basket, created = Basket.open.get_or_create(owner=self.user)
add_product(basket, product=product)
saved_basket, created = Basket.saved.get_or_create(owner=self.user)
add_product(saved_basket, product=product)
response = self.get(reverse('basket:summary'))
saved_formset = response.context['saved_formset']
saved_form = saved_formset.forms[0]
data = {
saved_formset.add_prefix('INITIAL_FORMS'): 1,
saved_formset.add_prefix('MAX_NUM_FORMS'): 1,
saved_formset.add_prefix('TOTAL_FORMS'): 1,
saved_form.add_prefix('id'): saved_form.initial['id'],
saved_form.add_prefix('move_to_basket'): True,
}
response = self.post(reverse('basket:saved'), params=data)
# we can't add more than stock level into basket
self.assertEqual(Basket.open.get(id=basket.id).lines.get(product=product).quantity, 1)
self.assertRedirects(response, reverse('basket:summary'))
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import IPNetwork
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects import Cluster
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects.serializers.network_group import NetworkGroupSerializer
class NetworkGroup(NailgunObject):
model = models.NetworkGroup
serializer = NetworkGroupSerializer
@classmethod
def get_from_node_group_by_name(cls, node_group_id, network_name):
ng = db().query(models.NetworkGroup).filter_by(group_id=node_group_id,
name=network_name)
return ng.first() if ng else None
@classmethod
def get_default_admin_network(cls):
return db().query(models.NetworkGroup)\
.filter_by(name=consts.NETWORKS.fuelweb_admin)\
.filter_by(group_id=None)\
.first()
@classmethod
def create(cls, data):
"""Create NetworkGroup instance with specified parameters in DB.
Create corresponding IPAddrRange instance with IP range specified in
data or calculated from CIDR if not specified.
:param data: dictionary of key-value pairs as NetworkGroup fields
:returns: instance of new NetworkGroup
"""
instance = super(NetworkGroup, cls).create(data)
cls._create_ip_ranges_on_notation(instance)
cls._reassign_template_networks(instance)
db().refresh(instance)
return instance
@classmethod
def update(cls, instance, data):
# cleanup stalled data and generate new for the group
cls._regenerate_ip_ranges_on_notation(instance, data)
# as ip ranges were regenerated we must update instance object
# in order to prevent possible SQAlchemy errors with operating
# on stale data
db().refresh(instance)
# remove 'ip_ranges' (if) any from data as this is relation
# attribute for the orm model object
data.pop('ip_ranges', None)
return super(NetworkGroup, cls).update(instance, data)
@classmethod
def delete(cls, instance):
notation = instance.meta.get('notation')
if notation and not instance.nodegroup.cluster.is_locked:
cls._delete_ips(instance)
instance.nodegroup.networks.remove(instance)
db().flush()
@classmethod
def is_untagged(cls, instance):
"""Return True if network is untagged"""
return (instance.vlan_start is None) \
and not instance.meta.get('neutron_vlan_range') \
and not instance.meta.get('ext_net_data')
@classmethod
def _create_ip_ranges_on_notation(cls, instance):
"""Create IP-address ranges basing on 'notation' field of 'meta' field
:param instance: NetworkGroup instance
:type instance: models.NetworkGroup
:return: None
"""
notation = instance.meta.get("notation")
if notation:
try:
if notation == 'cidr':
cls._update_range_from_cidr(
instance, IPNetwork(instance.cidr).cidr,
instance.meta.get('use_gateway'))
elif notation == 'ip_ranges' and instance.meta.get("ip_range"):
cls._set_ip_ranges(instance, [instance.meta["ip_range"]])
else:
raise errors.CannotCreate()
except (
errors.CannotCreate,
IndexError,
TypeError
):
raise errors.CannotCreate(
"IPAddrRange object cannot be created for network '{0}' "
"with notation='{1}', ip_range='{2}'".format(
instance.name,
instance.meta.get('notation'),
instance.meta.get('ip_range'))
)
@classmethod
def _regenerate_ip_ranges_on_notation(cls, instance, data):
"""Regenerate IP-address ranges
This method regenerates IPs based on 'notation' field of
Network group 'meta' content.
:param instance: NetworkGroup instance
:type instance: models.NetworkGroup
:param data: network data
:type data: dict
:return: None
"""
notation = instance.meta['notation']
data_meta = data.get('meta', {})
notation = data_meta.get('notation', notation)
if notation == consts.NETWORK_NOTATION.ip_ranges:
ip_ranges = data.get("ip_ranges") or \
[(r.first, r.last) for r in instance.ip_ranges]
cls._set_ip_ranges(instance, ip_ranges)
elif notation == consts.NETWORK_NOTATION.cidr:
use_gateway = data_meta.get(
'use_gateway', instance.meta.get('use_gateway'))
cidr = data.get('cidr', instance.cidr)
cls._update_range_from_cidr(
instance, cidr, use_gateway=use_gateway)
@classmethod
def _set_ip_ranges(cls, instance, ip_ranges):
"""Set IP-address ranges.
:param instance: NetworkGroup instance being updated
:type instance: models.NetworkGroup
:param ip_ranges: IP-address ranges sequence
:type ip_ranges: iterable of pairs
:return: None
"""
# deleting old ip ranges
db().query(models.IPAddrRange).filter_by(
network_group_id=instance.id).delete()
for r in ip_ranges:
new_ip_range = models.IPAddrRange(
first=r[0],
last=r[1],
network_group_id=instance.id)
db().add(new_ip_range)
db().refresh(instance)
db().flush()
@classmethod
def _update_range_from_cidr(
cls, instance, cidr, use_gateway=False):
"""Update network ranges for CIDR.
:param instance: NetworkGroup instance being updated
:type instance: models.NetworkGroup
:param cidr: CIDR network representation
:type cidr: basestring
:param use_gateway: whether gateway is taken into account
:type use_gateway: bool
:return: None
"""
first_idx = 2 if use_gateway else 1
new_cidr = IPNetwork(cidr)
ip_range = (str(new_cidr[first_idx]), str(new_cidr[-2]))
cls._set_ip_ranges(instance, [ip_range])
@classmethod
def _delete_ips(cls, instance):
"""Network group cleanup
Deletes all IPs which were assigned within the network group.
:param instance: NetworkGroup instance
:type instance: models.NetworkGroup
:returns: None
"""
logger.debug("Deleting old IPs for network with id=%s, cidr=%s",
instance.id, instance.cidr)
db().query(models.IPAddr).filter(
models.IPAddr.network == instance.id
).delete()
db().flush()
@classmethod
def _reassign_template_networks(cls, instance):
cluster = instance.nodegroup.cluster
if not cluster.network_config.configuration_template:
return
nm = Cluster.get_network_manager(cluster)
for node in cluster.nodes:
nm.assign_networks_by_template(node)
class NetworkGroupCollection(NailgunCollection):
single = NetworkGroup
|
|
from common_fixtures import * # NOQA
WEB_IMAGE_UUID = "docker:sangeetha/testlbsd:latest"
SSH_IMAGE_UUID = "docker:sangeetha/testclient:latest"
logger = logging.getLogger(__name__)
def env_with_sidekick_config(client, service_scale,
launch_config_consumed_service,
launch_config_service, env=None):
if env is None:
# Create Environment
random_name = random_str()
env_name = random_name.replace("-", "")
env = client.create_environment(name=env_name)
env = client.wait_success(env)
assert env.state == "active"
# Create service
random_name = random_str()
consumed_service_name = random_name.replace("-", "")
launch_config_consumed_service["name"] = consumed_service_name
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_service, scale=service_scale,
secondaryLaunchConfigs=[launch_config_consumed_service])
service = client.wait_success(service)
assert service.state == "inactive"
consumed_service_name = \
env.name + "_" + service.name + "_" + consumed_service_name
service_name = env.name + "_" + service.name
return env, service, service_name, consumed_service_name
def create_env_with_sidekick(client, service_scale, expose_port, env=None):
launch_config_consumed_service = {
"imageUuid": WEB_IMAGE_UUID}
# Adding service anti-affinity rule to workaround bug-1419
launch_config_service = {
"imageUuid": SSH_IMAGE_UUID,
"ports": [expose_port+":22/tcp"],
"labels": {
'io.rancher.scheduler.affinity:container_label_ne':
"io.rancher.stack_service.name" +
"=${stack_name}/${service_name}"
}
}
env, service, service_name, consumed_service_name = \
env_with_sidekick_config(client, service_scale,
launch_config_consumed_service,
launch_config_service, env)
return env, service, service_name, consumed_service_name
def create_env_with_sidekick_for_linking(client, service_scale, env=None):
launch_config_consumed_service = {
"imageUuid": WEB_IMAGE_UUID}
launch_config_service = {
"imageUuid": WEB_IMAGE_UUID}
env, service, service_name, consumed_service_name = \
env_with_sidekick_config(client, service_scale,
launch_config_consumed_service,
launch_config_service, env)
return env, service, service_name, consumed_service_name
def create_env_with_sidekick_anti_affinity(client, service_scale):
launch_config_consumed_service = {
"imageUuid": WEB_IMAGE_UUID}
launch_config_service = {
"imageUuid": SSH_IMAGE_UUID,
"labels": {
'io.rancher.scheduler.affinity:container_label_ne':
"io.rancher.stack_service.name" +
"=${stack_name}/${service_name}"
}
}
env, service, service_name, consumed_service_name = \
env_with_sidekick_config(client, service_scale,
launch_config_consumed_service,
launch_config_service)
return env, service, service_name, consumed_service_name
def create_env_with_exposed_port_on_secondary(client, service_scale,
expose_port):
launch_config_consumed_service = {
"imageUuid": WEB_IMAGE_UUID,
"ports": [expose_port+":80/tcp"]}
launch_config_service = {
"imageUuid": WEB_IMAGE_UUID}
env, service, service_name, consumed_service_name = \
env_with_sidekick_config(client, service_scale,
launch_config_consumed_service,
launch_config_service)
return env, service, service_name, consumed_service_name
def create_env_with_exposed_ports_on_primary_and_secondary(
client, service_scale, expose_port_pri, expose_port_sec):
launch_config_consumed_service = {
"imageUuid": SSH_IMAGE_UUID,
"ports": [expose_port_pri+":22/tcp"]}
launch_config_service = {
"imageUuid": WEB_IMAGE_UUID,
"ports": [expose_port_sec+":22/tcp"]}
env, service, service_name, consumed_service_name = \
env_with_sidekick_config(client, service_scale,
launch_config_consumed_service,
launch_config_service)
return env, service, service_name, consumed_service_name
def create_env_with_multiple_sidekicks(client, service_scale, expose_port):
launch_config_consumed_service1 = {
"imageUuid": WEB_IMAGE_UUID}
launch_config_consumed_service2 = {
"imageUuid": WEB_IMAGE_UUID}
launch_config_service = {
"imageUuid": SSH_IMAGE_UUID,
"ports": [expose_port+":22/tcp"],
"labels": {
'io.rancher.scheduler.affinity:container_label_ne':
"io.rancher.stack_service.name" +
"=${stack_name}/${service_name}"
}}
random_name = random_str()
consumed_service_name1 = random_name.replace("-", "")
random_name = random_str()
consumed_service_name2 = random_name.replace("-", "")
launch_config_consumed_service1["name"] = consumed_service_name1
launch_config_consumed_service2["name"] = consumed_service_name2
# Create Environment
random_name = random_str()
env_name = random_name.replace("-", "")
env = client.create_environment(name=env_name)
env = client.wait_success(env)
assert env.state == "active"
# Create service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_service, scale=service_scale,
secondaryLaunchConfigs=[launch_config_consumed_service1,
launch_config_consumed_service2]
)
service = client.wait_success(service)
assert service.state == "inactive"
consumed_service_name1 = \
env.name + "_" + service.name + "_" + consumed_service_name1
consumed_service_name2 = \
env.name + "_" + service.name + "_" + consumed_service_name2
service_name = env.name + "_" + service.name
return env, service, service_name, \
consumed_service_name1, consumed_service_name2
def env_with_sidekick(super_client, client, service_scale, exposed_port):
env, service, service_name, consumed_service_name = \
create_env_with_sidekick(client, service_scale, exposed_port)
env = env.activateservices()
env = client.wait_success(env, 120)
assert env.state == "active"
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
return env, service, service_name, consumed_service_name
def test_sidekick_activate_env(client, super_client):
exposed_port = "7000"
service_scale = 2
env, service, service_name, consumed_service_name = \
create_env_with_sidekick(client, service_scale, exposed_port)
env = env.activateservices()
env = client.wait_success(env, 120)
assert env.state == "active"
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_multiple_sidekick_activate_service(client, super_client):
exposed_port = "7003"
service_scale = 2
env, service, service_name, consumed_service1, consumed_service2 =\
create_env_with_multiple_sidekicks(
client, service_scale, exposed_port)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service1, exposed_port, dnsname)
dnsname = service.secondaryLaunchConfigs[1].name
validate_sidekick(super_client, service, service_name,
consumed_service2, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_for_lb(client, super_client, socat_containers):
service_scale = 2
port = "7080"
env, service1, service1_name, consumed_service_name = \
create_env_with_sidekick_for_linking(client, service_scale)
env = env.activateservices()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
validate_sidekick(super_client, service1, service1_name,
consumed_service_name)
env, service2, service2_name, consumed_service_name = \
create_env_with_sidekick_for_linking(client, service_scale, env)
service2 = client.wait_success(service2.activate(), 120)
assert service2.state == "active"
validate_sidekick(super_client, service2, service2_name,
consumed_service_name)
# Add LB services
launch_config_lb = {"ports": [port+":80"]}
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name, environmentId=env.id, launchConfig=launch_config_lb,
scale=1)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
lb_service.setservicelinks(
serviceLinks=[{"serviceId": service1.id, "ports": []},
{"serviceId": service2.id, "ports": []}])
lb_service = lb_service.activate()
lb_service = client.wait_success(lb_service, 120)
assert lb_service.state == "active"
validate_add_service_link(super_client, lb_service, service1)
validate_add_service_link(super_client, lb_service, service2)
wait_for_lb_service_to_become_active(super_client, client,
[service1, service2], lb_service)
target_count = service1.scale + service2.scale
container_name1 = get_service_containers_with_name(super_client,
service1,
service1_name)
container_name2 = get_service_containers_with_name(super_client,
service2,
service2_name)
containers = container_name1 + container_name2
container_names = []
for c in containers:
if c.state == "running":
container_names.append(c.externalId[:12])
assert len(container_names) == target_count
validate_lb_service_con_names(super_client, client, lb_service, port,
container_names)
delete_all(client, [env])
def test_sidekick(client, super_client):
service_scale = 2
env, service, service_name, consumed_service_name = \
create_env_with_sidekick_for_linking(client, service_scale)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
validate_sidekick(super_client, service, service_name,
consumed_service_name)
delete_all(client, [env])
def test_sidekick_with_anti_affinity(client, super_client):
service_scale = 2
env, service, service_name, consumed_service_name = \
create_env_with_sidekick_anti_affinity(client, service_scale)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
validate_sidekick(super_client, service, service_name,
consumed_service_name)
delete_all(client, [env])
def test_service_links_to_sidekick(client, super_client):
service_scale = 2
env, linked_service, linked_service_name, linked_consumed_service_name = \
create_env_with_sidekick_for_linking(client, service_scale)
client_port = "7004"
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [client_port+":22/tcp"]}
service = create_svc(client, env, launch_config, 1)
link_svc(super_client, service, [linked_service])
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
service_containers = get_service_container_list(super_client, service)
primary_consumed_service = get_service_containers_with_name(
super_client, linked_service, linked_service_name)
secondary_consumed_service = get_service_containers_with_name(
super_client, linked_service, linked_consumed_service_name)
dnsname = linked_service.name
validate_dns(super_client, service_containers, primary_consumed_service,
client_port, dnsname)
dnsname = \
linked_service.secondaryLaunchConfigs[0].name + "." + \
linked_service.name
validate_dns(super_client, service_containers, secondary_consumed_service,
client_port, dnsname)
delete_all(client, [env])
def test_sidekick_service_scale_up(client, super_client):
service_scale = 2
exposed_port = "7005"
final_service_scale = 3
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_scale_down(client, super_client):
service_scale = 3
exposed_port = "7006"
final_service_scale = 2
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_consumed_services_stop_start_instance(client, super_client):
service_scale = 2
exposed_port = "7007"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
container_name = consumed_service_name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Stop instance
container = client.wait_success(container.stop(), 120)
client.wait_success(service)
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_consumed_services_restart_instance(client, super_client):
service_scale = 2
exposed_port = "7008"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
container_name = consumed_service_name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# restart instance
container = client.wait_success(container.restart(), 120)
assert container.state == 'running'
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_consumed_services_delete_instance(client, super_client):
service_scale = 3
exposed_port = "7009"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
container_name = consumed_service_name + "_1"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
print container_name
primary_container = get_side_kick_container(
super_client, container, service, service_name)
print primary_container.name
# Delete instance
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
client.wait_success(service)
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
# Check that the consumed container is not recreated
primary_container = client.reload(primary_container)
print primary_container.state
assert primary_container.state == "running"
delete_all(client, [env])
def test_sidekick_deactivate_activate_environment(client, super_client):
service_scale = 2
exposed_port = "7010"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
env = env.deactivateservices()
service = client.wait_success(service, 120)
assert service.state == "inactive"
wait_until_instances_get_stopped_for_service_with_sec_launch_configs(
super_client, service)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_services_stop_start_instance(client, super_client):
service_scale = 2
exposed_port = "7011"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Stop instance
container = client.wait_success(container.stop(), 120)
client.wait_success(service)
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_services_restart_instance(client, super_client):
service_scale = 3
exposed_port = "7012"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# restart instance
container = client.wait_success(container.restart(), 120)
assert container.state == 'running'
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_services_delete_instance(client, super_client):
service_scale = 2
exposed_port = "7013"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
container_name = env.name + "_" + service.name + "_1"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
print container_name
consumed_container = get_side_kick_container(
super_client, container, service, consumed_service_name)
print consumed_container.name
# Delete instance
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
client.wait_success(service)
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
# Check that the consumed container is not recreated
consumed_container = client.reload(consumed_container)
print consumed_container.state
assert consumed_container.state == "running"
delete_all(client, [env])
def test_sidekick_services_deactivate_activate(client, super_client):
service_scale = 2
exposed_port = "7014"
env, service, service_name, consumed_service_name = \
env_with_sidekick(super_client, client, service_scale, exposed_port)
service = service.deactivate()
service = client.wait_success(service, 120)
assert service.state == "inactive"
wait_until_instances_get_stopped_for_service_with_sec_launch_configs(
super_client, service)
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, exposed_port, dnsname)
delete_all(client, [env])
def test_sidekick_lbactivation_after_linking(client,
super_client, socat_containers):
service_scale = 2
port = "7091"
env, service1, service1_name, consumed_service_name = \
create_env_with_sidekick_for_linking(client, service_scale)
env = env.activateservices()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
validate_sidekick(super_client, service1, service1_name,
consumed_service_name)
# Add LB service
launch_config_lb = {"ports": [port + ":80"]}
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name, environmentId=env.id, launchConfig=launch_config_lb,
scale=1)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
lb_service.setservicelinks(
serviceLinks=[{"serviceId": service1.id, "ports": []}])
validate_add_service_link(super_client, lb_service, service1)
# Activate LB service
lb_service = lb_service.activate()
lb_service = client.wait_success(lb_service, 120)
assert lb_service.state == "active"
wait_for_lb_service_to_become_active(super_client, client,
[service1], lb_service)
target_count = service1.scale
container_name1 = get_service_containers_with_name(super_client,
service1,
service1_name)
containers = container_name1
container_names = []
for c in containers:
if c.state == "running":
container_names.append(c.externalId[:12])
assert len(container_names) == target_count
validate_lb_service_con_names(super_client, client, lb_service, port,
container_names)
delete_all(client, [env])
def validate_sidekick(super_client, primary_service, service_name,
consumed_service_name, exposed_port=None, dnsname=None):
print "Validating service - " + service_name
containers = get_service_containers_with_name(super_client,
primary_service,
service_name)
assert len(containers) == primary_service.scale
print "Validating Consumed Services: " + consumed_service_name
consumed_containers = get_service_containers_with_name(
super_client, primary_service, consumed_service_name)
assert len(consumed_containers) == primary_service.scale
# For every container in the service , make sure that there is 1
# associated container from each of the consumed service with the same
# label and make sure that this container is the same host as the
# primary service container
for con in containers:
pri_host = con.hosts[0].id
label = con.labels["io.rancher.service.deployment.unit"]
print con.name + " - " + label + " - " + pri_host
secondary_con = get_service_container_with_label(
super_client, primary_service, consumed_service_name, label)
sec_host = secondary_con.hosts[0].id
print secondary_con.name + " - " + label + " - " + sec_host
assert sec_host == pri_host
if exposed_port is not None and dnsname is not None:
# Check for DNS resolution
secondary_con = get_service_containers_with_name(
super_client, primary_service, consumed_service_name)
validate_dns(super_client, containers, secondary_con, exposed_port,
dnsname)
def validate_dns(super_client, service_containers, consumed_service,
exposed_port, dnsname):
time.sleep(5)
for service_con in service_containers:
host = super_client.by_id('host', service_con.hosts[0].id)
expected_dns_list = []
expected_link_response = []
dns_response = []
print "Validating DNS for " + dnsname + " - container -" \
+ service_con.name
for con in consumed_service:
expected_dns_list.append(con.primaryIpAddress)
expected_link_response.append(con.externalId[:12])
print "Expected dig response List" + str(expected_dns_list)
print "Expected wget response List" + str(expected_link_response)
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(exposed_port))
# Validate link containers
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + dnsname + \
":80/name.html;cat result.txt"
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
print "Actual wget Response" + str(resp)
assert resp in (expected_link_response)
# Validate DNS resolution using dig
cmd = "dig " + dnsname + " +short"
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
print "Actual dig Response" + str(response)
assert len(response) == len(expected_dns_list)
for resp in response:
dns_response.append(resp.strip("\n"))
for address in expected_dns_list:
assert address in dns_response
|
|
#
# Collect Thread Class
# Author: Francis T
#
# Thread for handling data collection operations
#
import logging
import dryad.ble_utils as ble_utils
from random import randint
from time import sleep
from threading import Thread, Event, Lock
from queue import Queue
from dryad.database import DryadDatabase
from dryad.sensor_node.bluno_sensor_node import BlunoSensorNode
from dryad.sensor_node.parrot_sensor_node import ParrotSensorNode
MAX_QUEUE_TASKS = 4
class CollectThread(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.logger = logging.getLogger("main.AggregatorNode.CollectThread")
self.parent = parent
self.node_queue = None
self.node_queue_size = MAX_QUEUE_TASKS
self.worker_threads = None
self.active_flag = False
self.active_flag_lock = Lock()
self.active_wait_events = []
return
def classify_node(self, node):
db = DryadDatabase()
self.logger.info("Discovering node classification...")
try:
node['type'], node['class'] = ble_utils.discover_node_category(node['addr'], node['id'])
except Exception as e:
self.logger.error("Failed to discover node classification: {}".format(e))
db.close_session()
return False
# Update the database node information
result = db.insert_or_update_node( name = node['id'],
node_class = node['class'] )
if result == False:
self.logger.error("Unable to update node record")
db.close_session()
return False
# Update the node device record in the database
result = db.insert_or_update_device( address = node['addr'],
node_id = node['id'],
device_type = node['type'] )
if result == False:
self.logger.error("Unable to update node device record")
db.close_session()
return False
db.close_session()
return True
def instantiate_node(self, node_info, wait_event):
if node_info['type'] == ble_utils.NTYPE_BLUNO:
return BlunoSensorNode( node_info['id'],
node_info['addr'],
wait_event )
elif node_info['type'] == ble_utils.NTYPE_PARROT:
return ParrotSensorNode( node_info['id'],
node_info['addr'],
wait_event )
# if the node cannot be instantiated due to its type
# being unknown, then simply return None
return None
def process_node(self):
while self.check_active():
node = self.node_queue.get()
if (node == None):
break
self.logger.debug("Processing {}...".format(node['id']))
# Check if the node id is valid
if (node['id'] == None) or (node['id'] == ''):
self.logger.info("Skipping blank \"node\" with address {}".format(node['addr']))
self.node_queue.task_done()
continue
# Classify the node if it hasn't been classified yet
if node['class'] == ble_utils.NCLAS_UNKNOWN:
result = self.classify_node(node)
if result == False:
self.node_queue.task_done()
continue
# Based on the node type, instantiate a Node object and read
wait_event = Event()
node_instance = self.instantiate_node(node, wait_event)
if node_instance == None:
self.logger.error( "Could not instantiate node: {} ({})".format(
node['id'], node['addr']) )
self.node_queue.task_done()
continue
if node_instance.connect() == False:
self.logger.error( "Could not connect to node: {} ({})".format(
node['id'], node['addr']) )
self.node_queue.task_done()
continue
if self.check_active() == False:
self.node_queue.task_done()
continue
node_instance.start()
self.active_wait_events.append( { 'id' : node['id'],
'event' : wait_event } )
wait_event.wait()
node_instance.stop()
self.node_queue.task_done()
if node != None:
self.logger.debug("Processed {}!".format(node['id']))
return
def offload_data(self):
db = DryadDatabase()
session_data = db.get_session_data()
self.logger.debug(session_data)
blk_count = 0
curr_session = 0
prev_session = 0
n_params = 13 # ideal number of parameters per data block
data_blocks = {}
offloaded_data = {}
for reading in session_data:
# Save the current session id
curr_session = reading.session_id
# Extract the data type and value from the 'content' string
data_key = reading.content.split(":")[0].strip()
data_val = reading.content.split(":")[1].strip()
data_source_id = reading.source_id
# Boolean indicator whether data key exists in a data block
key_exists = True
# Check if source id exists in current data blocks
if data_source_id in data_blocks.keys():
source_id_readings = data_blocks[data_source_id]
for i in range(len(source_id_readings)):
if data_key in source_id_readings[i].keys():
# Go to the next source id reading
continue
# If the data key is not existing on the source id readings
key_exists = False
data_blocks[data_source_id][i][data_key] = data_val
# Add block to offloaded_data if complete
if len(data_blocks[data_source_id][i]) == n_params:
if data_source_id not in offloaded_data.keys():
offloaded_data[data_source_id] = [data_blocks[data_source_id][i]]
else:
offloaded_data[data_source_id].append(data_blocks[data_source_id][i])
# Remove datum that has been offloaded
del data_blocks[data_source_id][i]
# Go to the next reading
break
if key_exists is True:
data_blocks[data_source_id].append({data_key: data_val})
# Initialize data block if source id not existing
else:
data_blocks[data_source_id] = [{data_key: data_val}]
# Add remaining data blocks to offload
for key, block in data_blocks.items():
for reading_set in block:
if len(reading_set) is not 0:
if key not in offloaded_data.keys():
offloaded_data[key] = [reading_set]
else:
offloaded_data[key].append(reading_set)
blk_count = 0
# Add offloaded data to database
for source, block in offloaded_data.items():
for reading_set in block:
# Save the block to the database
db.add_data( blk_id=blk_count,
session_id=curr_session,
source_id=source,
content=str(reading_set),
timestamp=reading.timestamp )
blk_count += 1
db.clear_session_data()
db.close_session()
return
def setup_worker_threads(self):
self.worker_threads = []
db = DryadDatabase()
if db.get_current_session() != False:
self.logger.error("A previous session is still active. Closing it...")
db.terminate_session()
db.start_session()
db.close_session()
for i in range(self.node_queue_size):
t = Thread(target=self.process_node)
t.start()
self.worker_threads.append(t)
return
def cleanup_worker_threads(self):
for i in range(self.node_queue_size):
self.node_queue.put(None)
for t in self.worker_threads:
self.logger.debug("Cleaning up thread: {}".format(t.name))
t.join()
db = DryadDatabase()
db.terminate_session()
db.close_session()
return
def run(self):
self.set_active(True)
self.logger.debug("Data collection started")
self.node_queue = Queue(self.node_queue_size)
# Load node list from the Aggregator Node
node_list = self.parent.get_node_list()
if node_list == None:
self.logger.error("Error could not reload node list!")
return
self.setup_worker_threads()
# Add nodes to the queue
for node in node_list:
self.logger.debug("Added node to queue: {}".format(node['id']))
self.node_queue.put(node)
# Wait until all nodes enqueued have been processed
self.node_queue.join()
self.logger.debug("All nodes processed!")
# Cleanup remaining threads
self.cleanup_worker_threads()
# Compress and offload collected data to data archive
self.offload_data()
self.logger.debug("Data collection finished")
self.set_active(False)
self.parent.add_task("STOP_COLLECT")
return
def cancel(self):
self.logger.debug("Data collection cancelled")
self.set_active(False)
for event in self.active_wait_events:
event['event'].set()
self.active_wait_events.remove(event) # TODO Not sure if safe or...
return
def check_active(self):
result = False
self.active_flag_lock.acquire()
result = self.active_flag
self.active_flag_lock.release()
return result
def set_active(self, flag):
self.active_flag_lock.acquire()
self.active_flag = flag
self.active_flag_lock.release()
return
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'RsvpBc0Enum' : _MetaInfoEnum('RsvpBc0Enum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg',
{
'bc0':'bc0',
'global-pool':'global_pool',
'not-specified':'not_specified',
}, 'Cisco-IOS-XR-ip-rsvp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg']),
'RsvpBwCfgEnum' : _MetaInfoEnum('RsvpBwCfgEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg',
{
'absolute':'absolute',
'percentage':'percentage',
}, 'Cisco-IOS-XR-ip-rsvp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg']),
'RsvpRdmEnum' : _MetaInfoEnum('RsvpRdmEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg',
{
'rdm':'rdm',
'not-specified':'not_specified',
'use-default-bandwidth':'use_default_bandwidth',
}, 'Cisco-IOS-XR-ip-rsvp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg']),
'RsvpBc1Enum' : _MetaInfoEnum('RsvpBc1Enum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg',
{
'bc1':'bc1',
'sub-pool':'sub_pool',
}, 'Cisco-IOS-XR-ip-rsvp-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg']),
'Rsvp.Neighbors.Neighbor.Authentication' : {
'meta_info' : _MetaInfoClass('Rsvp.Neighbors.Neighbor.Authentication',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable or disable RSVP authentication
''',
'enable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('key-chain', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Key chain to authenticate RSVP signalling
messages
''',
'key_chain',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('life-time', ATTRIBUTE, 'int' , None, None,
[('30', '86400')], [],
''' Life time (in seconds) for each security
association
''',
'life_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('window-size', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Window-size to limit number of out-of-order
messages
''',
'window_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Neighbors.Neighbor' : {
'meta_info' : _MetaInfoClass('Rsvp.Neighbors.Neighbor',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor',
'Cisco-IOS-XR-ip-rsvp-cfg', True),
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Neighbors.Neighbor.Authentication',
[], [],
''' Configure RSVP authentication
''',
'authentication',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Neighbors' : {
'meta_info' : _MetaInfoClass('Rsvp.Neighbors',
False,
[
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Neighbors.Neighbor',
[], [],
''' RSVP neighbor configuration
''',
'neighbor',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'neighbors',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Controllers.Controller.CntlSignalling.OutOfBand' : {
'meta_info' : _MetaInfoClass('Rsvp.Controllers.Controller.CntlSignalling.OutOfBand',
False,
[
_MetaInfoClassMember('missed-messages', ATTRIBUTE, 'int' , None, None,
[('1', '110000')], [],
''' Configure max number of consecutive missed
messages for state expiry for out-of-band
tunnels
''',
'missed_messages',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('refresh-interval', ATTRIBUTE, 'int' , None, None,
[('180', '86400')], [],
''' Configure interval between successive refreshes
for out-of-band tunnels
''',
'refresh_interval',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'out-of-band',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Controllers.Controller.CntlSignalling' : {
'meta_info' : _MetaInfoClass('Rsvp.Controllers.Controller.CntlSignalling',
False,
[
_MetaInfoClassMember('out-of-band', REFERENCE_CLASS, 'OutOfBand' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Controllers.Controller.CntlSignalling.OutOfBand',
[], [],
''' Configure RSVP out-of-band signalling parameters
''',
'out_of_band',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'cntl-signalling',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Controllers.Controller' : {
'meta_info' : _MetaInfoClass('Rsvp.Controllers.Controller',
False,
[
_MetaInfoClassMember('controller-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of controller
''',
'controller_name',
'Cisco-IOS-XR-ip-rsvp-cfg', True),
_MetaInfoClassMember('cntl-signalling', REFERENCE_CLASS, 'CntlSignalling' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Controllers.Controller.CntlSignalling',
[], [],
''' Configure RSVP signalling parameters
''',
'cntl_signalling',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable RSVP on an interface
''',
'enable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'controller',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Controllers' : {
'meta_info' : _MetaInfoClass('Rsvp.Controllers',
False,
[
_MetaInfoClassMember('controller', REFERENCE_LIST, 'Controller' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Controllers.Controller',
[], [],
''' Controller configuration
''',
'controller',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'controllers',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.GlobalLogging' : {
'meta_info' : _MetaInfoClass('Rsvp.GlobalLogging',
False,
[
_MetaInfoClassMember('log-issu-status', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable ISSU Status Logging
''',
'log_issu_status',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('log-nsr-status', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable NSR Status Logging
''',
'log_nsr_status',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'global-logging',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.GlobalBandwidth.DefaultInterfacePercent.Mam' : {
'meta_info' : _MetaInfoClass('Rsvp.GlobalBandwidth.DefaultInterfacePercent.Mam',
False,
[
_MetaInfoClassMember('bc0-percent', ATTRIBUTE, 'int' , None, None,
[('0', '10000')], [],
''' Default BC0 pool I/F % B/W
''',
'bc0_percent',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc1-percent', ATTRIBUTE, 'int' , None, None,
[('0', '10000')], [],
''' Default BC1 pool I/F % B/W
''',
'bc1_percent',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('max-res-percent', ATTRIBUTE, 'int' , None, None,
[('0', '10000')], [],
''' Default maximum reservable I/F % B/W
''',
'max_res_percent',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'mam',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.GlobalBandwidth.DefaultInterfacePercent.Rdm' : {
'meta_info' : _MetaInfoClass('Rsvp.GlobalBandwidth.DefaultInterfacePercent.Rdm',
False,
[
_MetaInfoClassMember('bc0-percent', ATTRIBUTE, 'int' , None, None,
[('0', '10000')], [],
''' Default BC0 pool I/F % B/W
''',
'bc0_percent',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc1-percent', ATTRIBUTE, 'int' , None, None,
[('0', '10000')], [],
''' Default BC1 pool I/F % B/W
''',
'bc1_percent',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'rdm',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.GlobalBandwidth.DefaultInterfacePercent' : {
'meta_info' : _MetaInfoClass('Rsvp.GlobalBandwidth.DefaultInterfacePercent',
False,
[
_MetaInfoClassMember('mam', REFERENCE_CLASS, 'Mam' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.GlobalBandwidth.DefaultInterfacePercent.Mam',
[], [],
''' Configure global default MAM I/F percent
bandwidth parameters
''',
'mam',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('rdm', REFERENCE_CLASS, 'Rdm' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.GlobalBandwidth.DefaultInterfacePercent.Rdm',
[], [],
''' Configure global default RDM I/F percent
bandwidth parameters
''',
'rdm',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'default-interface-percent',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.GlobalBandwidth' : {
'meta_info' : _MetaInfoClass('Rsvp.GlobalBandwidth',
False,
[
_MetaInfoClassMember('default-interface-percent', REFERENCE_CLASS, 'DefaultInterfacePercent' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.GlobalBandwidth.DefaultInterfacePercent',
[], [],
''' Configure Global RSVP signalling parameters
''',
'default_interface_percent',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'global-bandwidth',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.IfSignalling.RefreshReduction' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.IfSignalling.RefreshReduction',
False,
[
_MetaInfoClassMember('bundle-message-max-size', ATTRIBUTE, 'int' , None, None,
[('512', '65000')], [],
''' Configure maximum size of a single RSVP
Bundle message
''',
'bundle_message_max_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable refresh reduction
''',
'disable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('reliable-ack-hold-time', ATTRIBUTE, 'int' , None, None,
[('100', '5000')], [],
''' Configure hold time for sending RSVP ACK
message(s)
''',
'reliable_ack_hold_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('reliable-ack-max-size', ATTRIBUTE, 'int' , None, None,
[('20', '65000')], [],
''' Configure max size of a single RSVP ACK
message
''',
'reliable_ack_max_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('reliable-retransmit-time', ATTRIBUTE, 'int' , None, None,
[('100', '10000')], [],
''' Configure min delay to wait for an ACK
before a retransmit
''',
'reliable_retransmit_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('reliable-s-refresh', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Configure use of reliable messaging for
summary refresh
''',
'reliable_s_refresh',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('summary-max-size', ATTRIBUTE, 'int' , None, None,
[('20', '65000')], [],
''' Configure max size of a single RSVP summary
refresh message
''',
'summary_max_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'refresh-reduction',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.IfSignalling.IntervalRate' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.IfSignalling.IntervalRate',
False,
[
_MetaInfoClassMember('interval-size', ATTRIBUTE, 'int' , None, None,
[('250', '2000')], [],
''' Size of an interval (milliseconds)
''',
'interval_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('messages-per-interval', ATTRIBUTE, 'int' , None, None,
[('1', '500')], [],
''' Number of messages to be sent per interval
''',
'messages_per_interval',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'interval-rate',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.IfSignalling.OutOfBand' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.IfSignalling.OutOfBand',
False,
[
_MetaInfoClassMember('missed-messages', ATTRIBUTE, 'int' , None, None,
[('1', '110000')], [],
''' Configure max number of consecutive missed
messages for state expiry for out-of-band
tunnels
''',
'missed_messages',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('refresh-interval', ATTRIBUTE, 'int' , None, None,
[('180', '86400')], [],
''' Configure interval between successive refreshes
for out-of-band tunnels
''',
'refresh_interval',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'out-of-band',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.IfSignalling' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.IfSignalling',
False,
[
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Differentiated Services Code Point (DSCP)
''',
'dscp',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('hello-graceful-restart-if-based', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable IF-based Hello adjacency on a RSVP
interface
''',
'hello_graceful_restart_if_based',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('interval-rate', REFERENCE_CLASS, 'IntervalRate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.IfSignalling.IntervalRate',
[], [],
''' Configure number of messages to be sent per
interval
''',
'interval_rate',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('missed-messages', ATTRIBUTE, 'int' , None, None,
[('1', '8')], [],
''' Configure max number of consecutive missed
messages for state expiry
''',
'missed_messages',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('out-of-band', REFERENCE_CLASS, 'OutOfBand' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.IfSignalling.OutOfBand',
[], [],
''' Configure RSVP out-of-band signalling parameters
''',
'out_of_band',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('pacing', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable rate-limiting on the interface
''',
'pacing',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('refresh-interval', ATTRIBUTE, 'int' , None, None,
[('10', '180')], [],
''' Configure interval between successive
refreshes
''',
'refresh_interval',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('refresh-reduction', REFERENCE_CLASS, 'RefreshReduction' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.IfSignalling.RefreshReduction',
[], [],
''' Configure RSVP Refresh Reduction parameters
''',
'refresh_reduction',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'if-signalling',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.Bandwidth.Mam' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.Bandwidth.Mam',
False,
[
_MetaInfoClassMember('bandwidth-mode', REFERENCE_ENUM_CLASS, 'RsvpBwCfgEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'RsvpBwCfgEnum',
[], [],
''' Absolute or Percentage bandwidth mode
''',
'bandwidth_mode',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc0-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reservable bandwidth in BC0 (Kbps or percent
of physical bandwidth)
''',
'bc0_bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc1-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reservable bandwidth in BC1 (Kbps or percent
of physical bandwidth)
''',
'bc1_bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('max-resv-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Maximum reservable bandwidth (Kbps or
percent of physical bandwidth)
''',
'max_resv_bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('max-resv-flow', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Largest reservable flow (Kbps or percent of
physical bandwidth)
''',
'max_resv_flow',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'mam',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.Bandwidth.Rdm' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.Bandwidth.Rdm',
False,
[
_MetaInfoClassMember('bandwidth-mode', REFERENCE_ENUM_CLASS, 'RsvpBwCfgEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'RsvpBwCfgEnum',
[], [],
''' Absolute or Percentage bandwidth mode
''',
'bandwidth_mode',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc0-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reservable bandwidth in BC0 (Kbps or percent
of physical bandwidth)
''',
'bc0_bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc0-keyword', REFERENCE_ENUM_CLASS, 'RsvpBc0Enum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'RsvpBc0Enum',
[], [],
''' Set requests should always use BC0
''',
'bc0_keyword',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc1-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Reservable bandwidth in BC1 (Kbps or percent
of physical bandwidth)
''',
'bc1_bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bc1-keyword', REFERENCE_ENUM_CLASS, 'RsvpBc1Enum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'RsvpBc1Enum',
[], [],
''' Set requests should always use BC1
''',
'bc1_keyword',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('max-resv-flow', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Largest reservable flow (Kbps or percent of
physical bandwidth)
''',
'max_resv_flow',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('rdm-keyword', REFERENCE_ENUM_CLASS, 'RsvpRdmEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'RsvpRdmEnum',
[], [],
''' Set requests should always use RDM
''',
'rdm_keyword',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'rdm',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.Bandwidth' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.Bandwidth',
False,
[
_MetaInfoClassMember('mam', REFERENCE_CLASS, 'Mam' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.Bandwidth.Mam',
[], [],
''' Configure MAM bandwidth parameters
''',
'mam',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('rdm', REFERENCE_CLASS, 'Rdm' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.Bandwidth.Rdm',
[], [],
''' Configure RDM bandwidth parameters
''',
'rdm',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'bandwidth',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface.Authentication' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface.Authentication',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable or disable RSVP authentication
''',
'enable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('key-chain', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Key chain to authenticate RSVP signalling
messages
''',
'key_chain',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('life-time', ATTRIBUTE, 'int' , None, None,
[('30', '86400')], [],
''' Life time (in seconds) for each security
association
''',
'life_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('window-size', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Window-size to limit number of out-of-order
messages
''',
'window_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces.Interface',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of interface
''',
'name',
'Cisco-IOS-XR-ip-rsvp-cfg', True),
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.Authentication',
[], [],
''' Configure RSVP authentication
''',
'authentication',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('bandwidth', REFERENCE_CLASS, 'Bandwidth' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.Bandwidth',
[], [],
''' Configure Bandwidth
''',
'bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable RSVP on an interface
''',
'enable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('if-signalling', REFERENCE_CLASS, 'IfSignalling' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface.IfSignalling',
[], [],
''' Configure RSVP signalling parameters
''',
'if_signalling',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Interfaces' : {
'meta_info' : _MetaInfoClass('Rsvp.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces.Interface',
[], [],
''' Interface configuration
''',
'interface',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling.GlobalOutOfBand' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling.GlobalOutOfBand',
False,
[
_MetaInfoClassMember('vrf', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' VRF used for out-of-band control signalling
''',
'vrf',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'global-out-of-band',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling.GracefulRestart' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling.GracefulRestart',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable RSVP graceful restart
''',
'enable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('recovery-time', ATTRIBUTE, 'int' , None, None,
[('0', '3600')], [],
''' Graceful restart recovery time (seconds)
''',
'recovery_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('restart-time', ATTRIBUTE, 'int' , None, None,
[('60', '3600')], [],
''' Graceful restart time (seconds)
''',
'restart_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'graceful-restart',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling.PrefixFiltering.DefaultDenyAction' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling.PrefixFiltering.DefaultDenyAction',
False,
[
_MetaInfoClassMember('drop', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Configure RSVP to drop packets when ACL match
yields a default (implicit) deny
''',
'drop',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'default-deny-action',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling.PrefixFiltering' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling.PrefixFiltering',
False,
[
_MetaInfoClassMember('acl', ATTRIBUTE, 'str' , None, None,
[(1, 65)], [],
''' Configure an ACL to perform prefix filtering
of RSVP Router Alert messages
''',
'acl',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('default-deny-action', REFERENCE_CLASS, 'DefaultDenyAction' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling.PrefixFiltering.DefaultDenyAction',
[], [],
''' Configure RSVP behaviour for scenarios where
ACL match yields a default (implicit) deny
''',
'default_deny_action',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'prefix-filtering',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling.Pesr' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling.Pesr',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable RSVP PESR
''',
'disable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'pesr',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling.Checksum' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling.Checksum',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable RSVP message checksum computation
''',
'disable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'checksum',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Signalling' : {
'meta_info' : _MetaInfoClass('Rsvp.Signalling',
False,
[
_MetaInfoClassMember('checksum', REFERENCE_CLASS, 'Checksum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling.Checksum',
[], [],
''' RSVP message checksum computation
''',
'checksum',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('global-out-of-band', REFERENCE_CLASS, 'GlobalOutOfBand' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling.GlobalOutOfBand',
[], [],
''' Configure out-of-band signalling parameters
''',
'global_out_of_band',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('graceful-restart', REFERENCE_CLASS, 'GracefulRestart' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling.GracefulRestart',
[], [],
''' Configure RSVP Graceful-Restart parameters
''',
'graceful_restart',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('hello-graceful-restart-interval', ATTRIBUTE, 'int' , None, None,
[('3000', '30000')], [],
''' Configure interval between successive Hello
messages
''',
'hello_graceful_restart_interval',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('hello-graceful-restart-misses', ATTRIBUTE, 'int' , None, None,
[('1', '10')], [],
''' Configure max number of consecutive missed
Hello messages
''',
'hello_graceful_restart_misses',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('pesr', REFERENCE_CLASS, 'Pesr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling.Pesr',
[], [],
''' Sending Path Error with State-Removal flag
''',
'pesr',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('prefix-filtering', REFERENCE_CLASS, 'PrefixFiltering' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling.PrefixFiltering',
[], [],
''' Configure prefix filtering parameters
''',
'prefix_filtering',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'signalling',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp.Authentication' : {
'meta_info' : _MetaInfoClass('Rsvp.Authentication',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable or disable RSVP authentication
''',
'enable',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('key-chain', ATTRIBUTE, 'str' , None, None,
[(1, 32)], [],
''' Key chain to authenticate RSVP signalling
messages
''',
'key_chain',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('life-time', ATTRIBUTE, 'int' , None, None,
[('30', '86400')], [],
''' Life time (in seconds) for each security
association
''',
'life_time',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('window-size', ATTRIBUTE, 'int' , None, None,
[('1', '64')], [],
''' Window-size to limit number of out-of-order
messages
''',
'window_size',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
'Rsvp' : {
'meta_info' : _MetaInfoClass('Rsvp',
False,
[
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Authentication',
[], [],
''' Configure RSVP authentication
''',
'authentication',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('controllers', REFERENCE_CLASS, 'Controllers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Controllers',
[], [],
''' Controller table
''',
'controllers',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('global-bandwidth', REFERENCE_CLASS, 'GlobalBandwidth' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.GlobalBandwidth',
[], [],
''' Configure Global Bandwidth Parameters
''',
'global_bandwidth',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('global-logging', REFERENCE_CLASS, 'GlobalLogging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.GlobalLogging',
[], [],
''' Global Logging
''',
'global_logging',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Interfaces',
[], [],
''' Interface table
''',
'interfaces',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('neighbors', REFERENCE_CLASS, 'Neighbors' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Neighbors',
[], [],
''' RSVP Neighbor Table
''',
'neighbors',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
_MetaInfoClassMember('signalling', REFERENCE_CLASS, 'Signalling' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg', 'Rsvp.Signalling',
[], [],
''' Configure Global RSVP signalling parameters
''',
'signalling',
'Cisco-IOS-XR-ip-rsvp-cfg', False),
],
'Cisco-IOS-XR-ip-rsvp-cfg',
'rsvp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-rsvp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_rsvp_cfg'
),
},
}
_meta_table['Rsvp.Neighbors.Neighbor.Authentication']['meta_info'].parent =_meta_table['Rsvp.Neighbors.Neighbor']['meta_info']
_meta_table['Rsvp.Neighbors.Neighbor']['meta_info'].parent =_meta_table['Rsvp.Neighbors']['meta_info']
_meta_table['Rsvp.Controllers.Controller.CntlSignalling.OutOfBand']['meta_info'].parent =_meta_table['Rsvp.Controllers.Controller.CntlSignalling']['meta_info']
_meta_table['Rsvp.Controllers.Controller.CntlSignalling']['meta_info'].parent =_meta_table['Rsvp.Controllers.Controller']['meta_info']
_meta_table['Rsvp.Controllers.Controller']['meta_info'].parent =_meta_table['Rsvp.Controllers']['meta_info']
_meta_table['Rsvp.GlobalBandwidth.DefaultInterfacePercent.Mam']['meta_info'].parent =_meta_table['Rsvp.GlobalBandwidth.DefaultInterfacePercent']['meta_info']
_meta_table['Rsvp.GlobalBandwidth.DefaultInterfacePercent.Rdm']['meta_info'].parent =_meta_table['Rsvp.GlobalBandwidth.DefaultInterfacePercent']['meta_info']
_meta_table['Rsvp.GlobalBandwidth.DefaultInterfacePercent']['meta_info'].parent =_meta_table['Rsvp.GlobalBandwidth']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.IfSignalling.RefreshReduction']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface.IfSignalling']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.IfSignalling.IntervalRate']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface.IfSignalling']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.IfSignalling.OutOfBand']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface.IfSignalling']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.Bandwidth.Mam']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface.Bandwidth']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.Bandwidth.Rdm']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface.Bandwidth']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.IfSignalling']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.Bandwidth']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface']['meta_info']
_meta_table['Rsvp.Interfaces.Interface.Authentication']['meta_info'].parent =_meta_table['Rsvp.Interfaces.Interface']['meta_info']
_meta_table['Rsvp.Interfaces.Interface']['meta_info'].parent =_meta_table['Rsvp.Interfaces']['meta_info']
_meta_table['Rsvp.Signalling.PrefixFiltering.DefaultDenyAction']['meta_info'].parent =_meta_table['Rsvp.Signalling.PrefixFiltering']['meta_info']
_meta_table['Rsvp.Signalling.GlobalOutOfBand']['meta_info'].parent =_meta_table['Rsvp.Signalling']['meta_info']
_meta_table['Rsvp.Signalling.GracefulRestart']['meta_info'].parent =_meta_table['Rsvp.Signalling']['meta_info']
_meta_table['Rsvp.Signalling.PrefixFiltering']['meta_info'].parent =_meta_table['Rsvp.Signalling']['meta_info']
_meta_table['Rsvp.Signalling.Pesr']['meta_info'].parent =_meta_table['Rsvp.Signalling']['meta_info']
_meta_table['Rsvp.Signalling.Checksum']['meta_info'].parent =_meta_table['Rsvp.Signalling']['meta_info']
_meta_table['Rsvp.Neighbors']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
_meta_table['Rsvp.Controllers']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
_meta_table['Rsvp.GlobalLogging']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
_meta_table['Rsvp.GlobalBandwidth']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
_meta_table['Rsvp.Interfaces']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
_meta_table['Rsvp.Signalling']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
_meta_table['Rsvp.Authentication']['meta_info'].parent =_meta_table['Rsvp']['meta_info']
|
|
"""
Tutorial followed from https://www.kaggle.com/gzuidhof/full-preprocessing-tutorial
"""
import numpy as np # linear algebra
np.set_printoptions(threshold=np.inf)
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
from skimage import measure, morphology, segmentation
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import skimage
from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing
from skimage.measure import label,regionprops, perimeter
from skimage.morphology import binary_dilation, binary_opening
from skimage.filters import roberts, sobel
from skimage import feature
from skimage.segmentation import clear_border
from skimage import data
import scipy.misc
from subprocess import check_output
# Some constants
HU_MIN = 0
HU_MAX = 1424
# Load the scans in given folder path
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: float(x.ImagePositionPatient[2]))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
# converts to Hounsfield Unit (HU), which is a measure of radiodensity used in CT scans
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
# print(image[np.shape(image)[0]/2, np.shape(image)[1]/2, :])
image = image.astype(np.int16)
# print('-'*100)
# print(image[np.shape(image)[0]/2, np.shape(image)[1]/2, :])
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
# print slope, intercept
if slope != 1:
image[slice_number] = image[slice_number].astype(np.float64)*slope
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
image[slice_number] += 1024
return np.array(image, dtype=np.int16)
# displays an example of one patient's scan
def showOneExample(data, plot=False, image=False):
first_patient = load_scan(data)
first_patient_pixels = np.asarray(get_pixels_hu(first_patient))
if (not plot and not image):
return first_patient, first_patient_pixels
if plot:
fig = plt.figure()
fig.suptitle('Histogram frequencies from different locations for one patient')
fig.subplots_adjust(hspace=0.5)
a = fig.add_subplot(2, 2, 1)
a.set_title("Scan from 20/128 pixels", fontsize=8)
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.hist(first_patient_pixels.flatten(), bins=20, color='c')
b = fig.add_subplot(2, 2, 2)
b.set_title("Scan from 50/128 pixels", fontsize=8)
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.hist(first_patient_pixels.flatten(), bins=50, color='c')
c = fig.add_subplot(2, 2, 3)
c.set_title("Scan from 80/128 pixels", fontsize=8)
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.hist(first_patient_pixels.flatten(), bins=80, color='c')
d = fig.add_subplot(2, 2, 4)
d.set_title("Scan from 110/128 pixels", fontsize=8)
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.hist(first_patient_pixels.flatten(), bins=110, color='c')
plt.show()
if image:
fig = plt.figure()
fig.suptitle('Scans from different locations for one patient')
fig.subplots_adjust(hspace=0.5)
a = fig.add_subplot(2, 2, 1)
a.set_xlabel("Scan from 20/128 pixels")
plt.imshow(first_patient_pixels[20], cmap=plt.cm.gray)
b = fig.add_subplot(2, 2, 2)
b.set_xlabel("Scan from 50/128 pixels")
plt.imshow(first_patient_pixels[50], cmap=plt.cm.gray)
c = fig.add_subplot(2, 2, 3)
c.set_xlabel("Scan from 80/128 pixels")
plt.imshow(first_patient_pixels[80], cmap=plt.cm.gray)
d = fig.add_subplot(2, 2, 4)
d.set_xlabel("Scan from 110/128 pixels")
plt.imshow(first_patient_pixels[110], cmap=plt.cm.gray)
plt.show()
return first_patient, first_patient_pixels
# resamples scans to isotropic resolution set by new_spacing
def resample(image, scan, new_spacing=[1, 1, 1]):
# Determine current pixel spacing
spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32)
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')
return image, new_spacing
# 3d plot the image
# def plot_3d(image, threshold=-300, show=False):
# # Position the scan upright,
# # so the head of the patient would be at the top facing the camera
# p = image.transpose(2, 1, 0)
# verts, faces = measure.marching_cubes(p, threshold)
# # verts, faces = measure.marching_cubes(p, None)
# fig = plt.figure(figsize=(10, 10))
# ax = fig.add_subplot(111, projection='3d')
# # Fancy indexing: `verts[faces]` to generate a collection of triangles
# mesh = Poly3DCollection(verts[faces], alpha=0.70)
# face_color = [0.45, 0.45, 0.75]
# mesh.set_facecolor(face_color)
# ax.add_collection3d(mesh)
# ax.set_xlim(0, p.shape[0])
# ax.set_ylim(0, p.shape[1])
# ax.set_zlim(0, p.shape[2])
# plt.show()
def plot_3d(image, threshold=-300):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
p = p[:,:,::-1]
verts, faces, _, _ = measure.marching_cubes_lewiner(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
def largest_label_volume(im, bg=-1):
vals, counts = np.unique(im, return_counts=True)
counts = counts[vals != bg]
vals = vals[vals != bg]
if len(counts) > 0:
return vals[np.argmax(counts)]
else:
return None
# def segment_lung_mask(image, fill_lung_structures=True):
# # not actually binary, but 1 and 2.
# # 0 is treated as background, which we do not want
# binary_image = np.array(image > -320, dtype=np.int8) + 1
# labels = measure.label(binary_image)
# # Pick the pixel in the very corner to determine which label is air.
# # Improvement: Pick multiple background labels from around the patient
# # More resistant to "trays" on which the patient lays cutting the air
# # around the person in half
# background_label1 = labels[0, 0, 0]
# background_label2 = labels[0, 0, -1]
# background_label3 = labels[0, -1, 0]
# background_label4 = labels[0, -1, -1]
# background_label5 = labels[-1, 0, 0]
# background_label6 = labels[-1, 0, -1]
# background_label7 = labels[-1, -1, 0]
# background_label8 = labels[-1, -1, -1]
# # Fill the air around the person
# binary_image[background_label1 == labels] = 2
# binary_image[background_label2 == labels] = 2
# binary_image[background_label3 == labels] = 2
# binary_image[background_label4 == labels] = 2
# binary_image[background_label5 == labels] = 2
# binary_image[background_label6 == labels] = 2
# binary_image[background_label7 == labels] = 2
# binary_image[background_label8 == labels] = 2
# # Method of filling the lung structures (that is superior to something like
# # morphological closing)
# if fill_lung_structures:
# # For every slice we determine the largest solid structure
# for i, axial_slice in enumerate(binary_image):
# axial_slice = axial_slice - 1
# labeling = measure.label(axial_slice)
# l_max = largest_label_volume(labeling, bg=0)
# if l_max is not None: # This slice contains some lung
# binary_image[i][labeling != l_max] = 1
# binary_image -= 1 # Make the image actual binary
# binary_image = 1 - binary_image # Invert it, lungs are now 1
# # Remove other air pockets insided body
# labels = measure.label(binary_image, background=0)
# l_max = largest_label_volume(labels, bg=0)
# if l_max is not None: # There are air pockets
# binary_image[labels != l_max] = 0
# return binary_image
def zero_center(image):
image = image - PIXEL_MEAN
return image
# # gives a run through of the preprocessing tutorial
# def testScans():
# slices = load_scan(INPUT_FOLDER + patients[0])
# hu_slices = get_pixels_hu(slices)
# first_patient, first_patient_pixels = showOneExample(INPUT_FOLDER + patients[0])
# pix_resampled, spacing = resample(first_patient_pixels, first_patient, [1, 1, 1])
# # print("Shape before resampling\t", first_patient_pixels.shape) # (128, 512, 512)
# # print("Shape after resampling\t", pix_resampled.shape) # (320, 347, 347)
# # plot_3d(pix_resampled, 400, show=True)
# segmented_lungs = segment_lung_mask(pix_resampled, False)
# segmented_lungs_fill = segment_lung_mask(pix_resampled, True)
# show3D = False
# if show3D:
# # segmented_lungs has no air.
# plot_3d(segmented_lungs, 0)
# # plot_3d(segmented_lungs_fill, 0)
# # plot_3d(segmented_lungs_fill - segmented_lungs, 0)
def normalize(image):
MIN_BOUND = float(HU_MIN)
MAX_BOUND = float(HU_MAX)
print(np.max(image), ' is max of image in normalize')
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
return np.float32(image)
def get_segmented_lungs(im, plot=False):
'''
This funtion segments the lungs from the given 2D slice.
'''
if plot == True:
f, plots = plt.subplots(8, 1, figsize=(5, 40))
'''
Step 1: Convert into a binary image.
'''
binary = im < 604
if plot == True:
plots[0].axis('off')
plots[0].imshow(binary, cmap=plt.cm.bone)
'''
Step 2: Remove the blobs connected to the border of the image.
'''
cleared = clear_border(binary)
if plot == True:
plots[1].axis('off')
plots[1].imshow(cleared, cmap=plt.cm.bone)
'''
Step 3: Label the image.
'''
label_image = label(cleared)
if plot == True:
plots[2].axis('off')
plots[2].imshow(label_image, cmap=plt.cm.bone)
'''
Step 4: Keep the labels with 2 largest areas.
'''
areas = [r.area for r in regionprops(label_image)]
areas.sort()
if len(areas) > 2:
for region in regionprops(label_image):
if region.area < areas[-2]:
for coordinates in region.coords:
label_image[coordinates[0], coordinates[1]] = 0
# print label_image[71]
binary = label_image > 0
if plot == True:
plots[3].axis('off')
plots[3].imshow(binary, cmap=plt.cm.bone)
'''
Step 5: Erosion operation with a disk of radius 2. This operation is
seperate the lung nodules attached to the blood vessels.
'''
selem = disk(2)
binary = binary_erosion(binary, selem)
if plot == True:
plots[4].axis('off')
plots[4].imshow(binary, cmap=plt.cm.bone)
'''
Step 6: Closure operation with a disk of radius 10. This operation is
to keep nodules attached to the lung wall.
'''
selem = disk(10)
binary = binary_closing(binary, selem)
if plot == True:
plots[5].axis('off')
plots[5].imshow(binary, cmap=plt.cm.bone)
'''
Step 7: Fill in the small holes inside the binary mask of lungs.
'''
edges = roberts(binary)
binary = ndimage.binary_fill_holes(edges)
if plot == True:
plots[6].axis('off')
plots[6].imshow(binary, cmap=plt.cm.bone)
'''
Step 8: Superimpose the binary mask on the input image.
'''
get_high_vals = binary == 0
im[get_high_vals] = HU_MIN
if plot == True:
plots[7].axis('off')
plots[7].imshow(im, cmap=plt.cm.bone)
return im
def clip_edges(image):
x_min = 0
x_max = image.shape[0]
y_min = 0
y_max = image.shape[1]
z_min = 0
z_max = image.shape[2]
for x in range(0, image.shape[0]):
if np.all(image[x,:,:] < 0.0001):
continue
else:
x_min = max(0,x-1)
break
for x in range(image.shape[0]-1, -1, -1):
if np.all(image[x,:,:] < 0.0001):
continue
else:
x_max = min(x+2,image.shape[0])
break
image = image[x_min:x_max, :, :]
for y in range(0, image.shape[1]):
if np.all(image[:,y,:] < 0.0001):
continue
else:
y_min = max(0,y-1)
break
for y in range(image.shape[1]-1, -1, -1):
if np.all(image[:,y,:] < 0.0001):
continue
else:
y_max = min(y+2,image.shape[1])
break
image = image[:, y_min:y_max, :]
for z in range(0, image.shape[2]):
if np.all(image[:,:,z] < 0.0001):
continue
else:
z_min = max(0,z-1)
break
for z in range(image.shape[2]-1, -1, -1):
if np.all(image[:,:,z] < 0.00001):
continue
else:
z_max = min(z+2,image.shape[2])
break
image = image[:, :, z_min:z_max]
return image
def preprocessPatient(patient):
first_patient = load_scan(patient)
pix_resampled = np.asarray(get_pixels_hu(first_patient))
# print pix_resampled[70]
# plt.imshow(pix_resampled[200], cmap='gray')
# plt.show()
# sys.exit()
# test_segmented, test_lungfilter, test_outline, test_watershed, test_sobel_gradient, test_marker_internal, test_marker_external, test_marker_watershed = seperate_lungs(first_patient_pixels[65])
# test_segmented = get_segmented_lungs(first_patient_pixels[65])
# pix_resampled, _ = resample(first_patient_pixels, first_patient, [1, 1, 1])
# print pix_resampled[70]
# sys.exit()
segmented_ct_scan = np.asarray([get_segmented_lungs(slice) for slice in pix_resampled])
print ("Segmented Lung")
# plt.ihow()
# segmented_ct_scan[segmented_ct_scan < 604] = HU_MIN
# print segmented_ct_scan[70]
# plt.imshow(segmented_ct_scan[71], cmap='gray')
# plt.show()
# plt.s
# plot_3d(segmented_ct_scan, 604)
# sys.exit()
print segmented_ct_scan.shape
# plt.imshow(test_segmented, cmap='gray')
# plt.show()
# print ("Watershed Image")
# plt.imshow(test_watershed, cmap='gray')
# plt.show()
# print ("Outline after reinclusion")
# plt.imshow(test_outline, cmap='gray')
# plt.show()
# print ("Lungfilter after clot.imsing")
# plt.imshow(test_lungfilter, cmap='gray')
# plt.show()
# plt.show()
# plt.imshow(segmented_ct_scan[65], cmap='gray')
# plt.show()
# print segmented_ct_scan.shape
# print segmented_ct_scan.min()
# print segmented_ct_scan.max()
# plot_3d(segmented_ct_scan, -400)
'''
selem = ball(2)
binary = binary_closing(segmented_ct_scan, selem)
label_scan = label(binary)
# print np.sum(label_scan)
# print binary[70]
# print np.all(label_scan)
areas = [r.area for r in regionprops(label_scan)]
areas.sort()
# print len(areas)
components = 4
for r in regionprops(label_scan):
max_x, max_y, max_z = 0, 0, 0
min_x, min_y, min_z = 1000, 1000, 1000
for c in r.coords:
max_z = max(c[0], max_z)
max_y = max(c[1], max_y)
max_x = max(c[2], max_x)
min_z = min(c[0], min_z)
min_y = min(c[1], min_y)
min_x = min(c[2], min_x)
if (min_z == max_z or min_y == max_y or min_x == max_x or r.area > areas[-components-1]):
for c in r.coords:
segmented_ct_scan[c[0], c[1], c[2]] = HU_MIN
else:
index = (max((max_x - min_x), (max_y - min_y), (max_z - min_z))) / (min((max_x - min_x), (max_y - min_y) , (max_z - min_z)))
# print segmented_ct_scan[70]
segmented_ct_scan = clip_edges(segmented_ct_scan)
segmented_ct_scan = normalize(segmented_ct_scan)
# print segmented_ct_scan.shape
print segmented_ct_scan.shape
# print segmented_ct_scan.min()
# print segmented_ct_scan.max()
# plot_3d(segmented_ct_scan, (604.-HU_MIN)/(HU_MAX - HU_MIN))
# sys.exit()
# pix_resampled, spacing = resample(first_patient_pixels, first_patient, [1, 1, 1])
# norm_lung_data = normalize(pix_resampled)
# segmented_lungs_fill = segment_lung_mask(pix_resampled, True)
# norm_lung_data = norm_lung_data * segmented_lungs_fill
'''
return segmented_ct_scan
# makes a directory
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def preprocessAll():
i = 0
for STAGE in range(1,3):
path = '/media/ninja2/Seagate Backup Plus Drive/kaggle-lung-masks/%s-' % STAGE
INPUT_FOLDER = '/media/ninja2/Seagate Backup Plus Drive/CS231N-project/stage%s/' % STAGE
patients = os.listdir(INPUT_FOLDER)
patients.sort()
# ensure_dir(path)
# n_slices = 64.0
# x_dim = 128.0
# y_dim = 128.0
for p in range(len(patients)): #range(len(patients)):
i += 1
if i <= 1180:
continue
# if p != 402: continue
print p
print patients[p]
# if p == 1137: continue
x = preprocessPatient(INPUT_FOLDER + patients[p])
# x_resample = x.astype(np.float16)
x_resample = np.float16(ndimage.zoom(x, (0.5, 0.5, 0.5), order=0))
# plt.imshow(x_resample[30].astype(np.float32), cmap='gray')
# plt.show()
# sys.exit()
print x_resample.shape
np.save(path + patients[p], x_resample)
print('wrote patient' + path +patients[p])
if __name__ == "__main__":
# testScans()
preprocessAll()
|
|
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from contextlib import contextmanager
import os
import sys
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import rospkg
import rospkg.os_detect
import unittest
from mock import patch
from mock import DEFAULT
from rosdep2 import main
from rosdep2.main import rosdep_main
from rosdep2.main import setup_proxy_opener
GITHUB_BASE_URL = 'https://github.com/ros/rosdistro/raw/master/rosdep/base.yaml'
GITHUB_PYTHON_URL = 'https://github.com/ros/rosdistro/raw/master/rosdep/python.yaml'
def get_test_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_test_tree_dir():
return os.path.abspath(os.path.join(get_test_dir(), 'tree'))
def get_test_catkin_tree_dir():
return os.path.abspath(os.path.join(get_test_tree_dir(), 'catkin'))
def get_cache_dir():
p = os.path.join(get_test_dir(), 'sources_cache')
assert os.path.isdir(p)
return p
@contextmanager
def fakeout():
realstdout = sys.stdout
realstderr = sys.stderr
fakestdout = StringIO()
fakestderr = StringIO()
sys.stdout = fakestdout
sys.stderr = fakestderr
yield fakestdout, fakestderr
sys.stdout = realstdout
sys.stderr = realstderr
# the goal of these tests is only to test that we are wired into the
# APIs. More exhaustive tests are at the unit level.
class TestRosdepMain(unittest.TestCase):
def setUp(self):
if 'ROSDEP_DEBUG' in os.environ:
del os.environ['ROSDEP_DEBUG']
self.old_rr = rospkg.get_ros_root()
self.old_rpp = rospkg.get_ros_package_path()
if 'ROS_ROOT' in os.environ:
del os.environ['ROS_ROOT']
os.environ['ROS_PACKAGE_PATH'] = os.path.join(get_test_tree_dir())
def tearDown(self):
if self.old_rr is not None:
os.environ['ROS_ROOT'] = self.old_rr
if self.old_rpp is not None:
os.environ['ROS_PACKAGE_PATH'] = self.old_rpp
def test_bad_commands(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
for commands in [[], ['fake', 'something'], ['check'], ['install', '-a', 'rospack_fake'],
['check', 'rospack_fake', '--os', 'ubuntulucid'],
]:
try:
rosdep_main(commands + cmd_extras)
assert False, 'system exit should have occurred'
except SystemExit:
pass
def test_check(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
with fakeout() as b:
try:
rosdep_main(['check', 'python_dep'] + cmd_extras)
except SystemExit:
assert False, 'system exit occurred: %s\n%s' % (b[0].getvalue(), b[1].getvalue())
stdout, stderr = b
assert stdout.getvalue().strip() == 'All system dependencies have been satisified', stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
try:
osd = rospkg.os_detect.OsDetect()
override = '%s:%s' % (osd.get_name(), osd.get_codename())
with fakeout() as b:
rosdep_main(['check', 'python_dep', '--os', override] + cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == 'All system dependencies have been satisified'
assert not stderr.getvalue(), stderr.getvalue()
except SystemExit:
assert False, 'system exit occurred'
# this used to abort, but now rosdep assumes validity for even empty stack args
try:
with fakeout() as b:
rosdep_main(['check', 'packageless'] + cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == 'All system dependencies have been satisified'
assert not stderr.getvalue(), stderr.getvalue()
except SystemExit:
assert False, 'system exit occurred'
try:
rosdep_main(['check', 'nonexistent'] + cmd_extras)
assert False, 'system exit should have occurred'
except SystemExit:
pass
def test_install(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
catkin_tree = get_test_catkin_tree_dir()
try:
# python must have already been installed
with fakeout() as b:
rosdep_main(['install', 'python_dep'] + cmd_extras)
stdout, stderr = b
assert 'All required rosdeps installed' in stdout.getvalue(), stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
with fakeout() as b:
rosdep_main(['install', 'python_dep', '-r'] + cmd_extras)
stdout, stderr = b
assert 'All required rosdeps installed' in stdout.getvalue(), stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
with fakeout() as b:
rosdep_main([
'install', '-s', '-i',
'--os', 'ubuntu:lucid',
'--rosdistro', 'fuerte',
'--from-paths', catkin_tree
] + cmd_extras)
stdout, stderr = b
expected = [
'#[apt] Installation commands:',
' sudo -H apt-get install ros-fuerte-catkin',
' sudo -H apt-get install libboost1.40-all-dev'
]
lines = stdout.getvalue().splitlines()
assert set(lines) == set(expected), lines
assert not stderr.getvalue(), stderr.getvalue()
except SystemExit:
assert False, 'system exit occurred: ' + b[1].getvalue()
try:
rosdep_main(['install', 'nonexistent'])
assert False, 'system exit should have occurred'
except SystemExit:
pass
def test_where_defined(self):
try:
sources_cache = get_cache_dir()
expected = GITHUB_PYTHON_URL
for command in (['where_defined', 'testpython'], ['where_defined', 'testpython']):
with fakeout() as b:
# set os to ubuntu so this test works on different platforms
rosdep_main(command + ['-c', sources_cache, '--os=ubuntu:lucid'])
stdout, stderr = b
output = stdout.getvalue().strip()
assert output == expected, output
except SystemExit:
assert False, 'system exit occurred'
def test_what_needs(self):
try:
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
expected = ['python_dep']
with fakeout() as b:
rosdep_main(['what-needs', 'testpython'] + cmd_extras)
stdout, stderr = b
output = stdout.getvalue().strip()
assert output.split('\n') == expected
expected = ['python_dep']
with fakeout() as b:
rosdep_main(['what_needs', 'testpython', '--os', 'ubuntu:lucid', '--verbose'] + cmd_extras)
stdout, stderr = b
output = stdout.getvalue().strip()
assert output.split('\n') == expected
except SystemExit:
assert False, 'system exit occurred'
def test_keys(self):
sources_cache = get_cache_dir()
cmd_extras = ['-c', sources_cache]
try:
with fakeout() as b:
rosdep_main(['keys', 'rospack_fake'] + cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == 'testtinyxml', stdout.getvalue()
assert not stderr.getvalue(), stderr.getvalue()
with fakeout() as b:
rosdep_main(['keys', 'rospack_fake', '--os', 'ubuntu:lucid', '--verbose'] + cmd_extras)
stdout, stderr = b
assert stdout.getvalue().strip() == 'testtinyxml', stdout.getvalue()
except SystemExit:
assert False, 'system exit occurred'
try:
rosdep_main(['keys', 'nonexistent'] + cmd_extras)
assert False, 'system exit should have occurred'
except SystemExit:
pass
@patch('rosdep2.main.install_opener')
@patch('rosdep2.main.build_opener')
@patch('rosdep2.main.HTTPBasicAuthHandler')
@patch('rosdep2.main.ProxyHandler')
def test_proxy_detection(self, proxy, bah, build, install):
with patch.dict('os.environ', {'http_proxy': 'something'}, clear=True):
setup_proxy_opener()
proxy.assert_called_with({'http': 'something'})
with patch.dict('os.environ', {'https_proxy': 'somethings'}, clear=True):
setup_proxy_opener()
proxy.assert_called_with({'https': 'somethings'})
|
|
# -*- coding: utf-8 -*-
"""Test layout."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from itertools import product
import numpy as np
from numpy.testing import assert_equal as ae
from numpy.testing import assert_allclose as ac
from ..base import BaseVisual, BaseCanvas
from ..interact import Grid, Boxed, Stacked, Lasso
from ..panzoom import PanZoom
from ..transform import NDC
from ..visuals import ScatterVisual
from . import mouse_click
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
N = 10000
class MyTestVisual(BaseVisual):
def __init__(self):
super(MyTestVisual, self).__init__()
self.vertex_shader = """
attribute vec2 a_position;
void main() {
vec2 xy = a_position.xy;
gl_Position = transform(xy);
gl_PointSize = 5.;
}
"""
self.fragment_shader = """
void main() {
gl_FragColor = vec4(1, 1, 1, 1);
}
"""
self.set_primitive_type('points')
def set_data(self):
self.n_vertices = N
position = np.random.uniform(low=-1, high=+1, size=(N, 2))
self.data = position
self.program['a_position'] = position.astype(np.float32)
self.emit_visual_set_data()
def _create_visual(qtbot, canvas, layout, box_index):
c = canvas
# Attach the layout *and* PanZoom. The order matters!
layout.attach(c)
PanZoom(aspect=None, constrain_bounds=NDC).attach(c)
visual = MyTestVisual()
c.add_visual(visual)
visual.set_data()
visual.set_box_index(box_index)
c.show()
qtbot.waitForWindowShown(c)
#------------------------------------------------------------------------------
# Test grid
#------------------------------------------------------------------------------
def test_grid_layout():
grid = Grid((4, 8))
ac(grid.map([0., 0.], (0, 0)), [[-0.875, 0.75]])
ac(grid.map([0., 0.], (1, 3)), [[-0.125, 0.25]])
ac(grid.map([0., 0.], (3, 7)), [[0.875, -0.75]])
ac(grid.imap([[0.875, -0.75]], (3, 7)), [[0., 0.]])
def test_grid_closest_box():
grid = Grid((3, 7))
ac(grid.get_closest_box((0., 0.)), (1, 3))
ac(grid.get_closest_box((-1., +1.)), (0, 0))
ac(grid.get_closest_box((+1., -1.)), (2, 6))
ac(grid.get_closest_box((-1., -1.)), (2, 0))
ac(grid.get_closest_box((+1., +1.)), (0, 6))
def test_grid_1(qtbot, canvas):
n = N // 10
box_index = [[i, j] for i, j in product(range(2), range(5))]
box_index = np.repeat(box_index, n, axis=0)
grid = Grid((2, 5))
_create_visual(qtbot, canvas, grid, box_index)
grid.add_boxes(canvas)
# qtbot.stop()
def test_grid_2(qtbot, canvas):
n = N // 10
box_index = [[i, j] for i, j in product(range(2), range(5))]
box_index = np.repeat(box_index, n, axis=0)
grid = Grid()
_create_visual(qtbot, canvas, grid, box_index)
grid.shape = (5, 2)
assert grid.shape == (5, 2)
grid.scaling = (.5, 2)
assert grid.scaling == (.5, 2)
# qtbot.stop()
#------------------------------------------------------------------------------
# Test boxed
#------------------------------------------------------------------------------
def test_boxed_1(qtbot, canvas):
n = 10
b = np.zeros((n, 2))
b[:, 1] = np.linspace(-1., 1., n)
box_index = np.repeat(np.arange(n), N // n, axis=0)
assert box_index.shape == (N,)
boxed = Boxed(box_pos=b)
_create_visual(qtbot, canvas, boxed, box_index)
boxed.add_boxes(canvas)
assert boxed.box_scaling == (1, 1)
assert boxed.layout_scaling == (1, 1)
ac(boxed.box_pos[:, 0], 0, atol=1e-9)
assert boxed.box_size[0] >= .9
assert boxed.box_size[1] >= .05
assert boxed.box_bounds.shape == (n, 4)
boxed.expand_box_width()
boxed.shrink_box_width()
boxed.expand_box_height()
boxed.shrink_box_height()
boxed.expand_layout_width()
boxed.shrink_layout_width()
boxed.expand_layout_height()
boxed.shrink_layout_height()
# qtbot.stop()
def test_boxed_2(qtbot, canvas):
from ..visuals import PlotAggVisual
n = 10
b = np.zeros((n, 2))
b[:, 1] = np.linspace(-1., 1., n)
box_index = np.repeat(np.arange(n), 2 * (N + 2), axis=0)
boxed = Boxed(box_pos=b)
c = canvas
boxed.attach(c)
PanZoom(aspect=None, constrain_bounds=NDC).attach(c)
t = np.linspace(-1, 1, N)
x = np.atleast_2d(t)
y = np.atleast_2d(.5 * np.sin(20 * t))
x = np.tile(x, (n, 1))
y = np.tile(y, (n, 1))
visual = PlotAggVisual()
c.add_visual(visual)
visual.set_data(x=x, y=y)
visual.set_box_index(box_index)
c.show()
qtbot.waitForWindowShown(c)
#------------------------------------------------------------------------------
# Test stacked
#------------------------------------------------------------------------------
def test_stacked_1(qtbot, canvas):
n = 10
box_index = np.repeat(np.arange(n), N // n, axis=0)
stacked = Stacked(n_boxes=n, origin='top')
_create_visual(qtbot, canvas, stacked, box_index)
assert stacked.origin == 'top'
stacked.origin = 'bottom'
assert stacked.origin == 'bottom'
# qtbot.stop()
def test_stacked_closest_box():
stacked = Stacked(n_boxes=4, origin='top')
ac(stacked.get_closest_box((-.5, .9)), 0)
ac(stacked.get_closest_box((+.5, -.9)), 3)
stacked = Stacked(n_boxes=4, origin='bottom')
ac(stacked.get_closest_box((-.5, .9)), 3)
ac(stacked.get_closest_box((+.5, -.9)), 0)
stacked.n_boxes = 3
#------------------------------------------------------------------------------
# Test lasso
#------------------------------------------------------------------------------
def test_lasso_simple(qtbot):
view = BaseCanvas()
x = .25 * np.random.randn(N)
y = .25 * np.random.randn(N)
scatter = ScatterVisual()
view.add_visual(scatter)
scatter.set_data(x=x, y=y)
l = Lasso()
l.attach(view)
l.create_lasso_visual()
view.show()
#qtbot.waitForWindowShown(view)
l.add((-.5, -.5))
l.add((+.5, -.5))
l.add((+.5, +.5))
l.add((-.5, +.5))
assert l.count == 4
assert l.polygon.shape == (4, 2)
b = [[-.5, -.5], [+.5, -.5], [+.5, +.5], [-.5, +.5]]
ae(l.in_polygon(b), [False, False, True, True])
assert str(l)
# qtbot.stop()
view.close()
def test_lasso_grid(qtbot, canvas):
grid = Grid((1, 2))
grid.attach(canvas)
PanZoom(aspect=None).attach(canvas)
grid.add_boxes(canvas)
visual = MyTestVisual()
canvas.add_visual(visual)
# Right panel.
box_index = np.zeros((N, 2), dtype=np.float32)
box_index[:, 1] = 1
visual.program['a_box_index'] = box_index
visual.set_data()
# lasso interact
l = Lasso()
l.attach(canvas)
l.create_lasso_visual()
l.update_lasso_visual()
canvas.show()
qtbot.waitForWindowShown(canvas)
qtbot.wait(20)
def _ctrl_click(x, y, button='left'):
mouse_click(qtbot, canvas, (x, y), button=button, modifiers=('Control',))
# Square selection in the right panel.
w, h = canvas.get_size()
x0 = w / 2 + 100
x1 = x0 + 200
y0 = 100
y1 = 300
_ctrl_click(x0, y0)
_ctrl_click(x1, y0)
_ctrl_click(x1, y1)
_ctrl_click(x0, y1)
assert l.polygon.shape == (4, 2)
assert l.box == (0, 1)
inlasso = l.in_polygon(visual.data)
assert .001 < inlasso.mean() < .999
# Clear box.
_ctrl_click(x0, y0, 'right')
assert l.polygon.shape == (0, 2)
assert l.box is None
qtbot.wait(20)
canvas.close()
|
|
#! /usr/bin/env python3
import sys
import math
from mule_local.JobMule import *
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
groups = ['runtime.timestepping_method']
tagnames_y = [
'sphere_data_diff_prog_phi_pert.res_norm_linf',
'sphere_data_diff_prog_div.res_norm_linf',
'sphere_data_diff_prog_vrt.res_norm_linf',
]
j = JobsData(verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(" + "+key)
for tagname_y in tagnames_y:
print("*"*80)
print("Processing tagname "+tagname_y)
print("*"*80)
tagname_x = 'runtime.timestep_size'
if True:
"""
Use plotting format to create (x/y) data
"""
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
meta_attribute_name = 'runtime.timestepping_order',
)
for group_name, group_data in d.get_data_float().items():
print("*"*80)
print("Group: "+group_name)
prev_value = -1.0
conv = '-'
convergence_order = None
for (x, y, convergence_order_) in zip(group_data['x_values'], group_data['y_values'], group_data['meta_values']):
if prev_value > 0:
conv = y/prev_value
elif prev_value == 0:
conv = '[error=0]'
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv))
prev_value = y
if convergence_order == None:
convergence_order = convergence_order_
else:
if convergence_order != convergence_order_:
raise Exception("Convergence order mismatch!!!")
print("")
print("Testing convergence")
# 'convergence', 'error'
test_type = 'convergence'
error_tolerance_convergence = -1
error_tolerance_error = -1
if 'vrt' in tagname_y:
if 'exp' in group_name:
# Convergence for rexi fracking fast
# Be a little bit more tolerant for convergence due to high accuracy
if convergence_order == 1:
error_tolerance_convergence = 0.1
conv_test_range_start = 1
conv_test_range_end = 5
elif convergence_order == 2:
# No convergence check for 2nd order on vorticity field
# This is already insanely accurate since it's primarily driven by linear parts
# Test only last 2 values for REXI
if group_name.startswith('lg_exp'):
error_tolerance_convergence = 0.2
conv_test_range_start = 1
# Exclude results which seems to be unstable
conv_test_range_end = 5
else:
error_tolerance_convergence = 0.1
# Start later with convergence tests due to very high accuracy
conv_test_range_end = 3
conv_test_range_start = 6
else:
raise Exception("Unsupported convergence_order")
else:
conv_test_range_start = 0
conv_test_range_end = 4
error_tolerance_convergence = 0.1
else:
conv_test_range_start = 0
conv_test_range_end = 4
error_tolerance_convergence = 0.05
if 'exp' in group_name:
# Convergence for rexi fracking fast
# Be a little bit more tolerant for convergence due to high accuracy
error_tolerance_convergence = 0.2
conv_test_range_start = 2
conv_test_range_end = 5
if 'exp' in group_name:
test_type = 'error'
error_tolerance_error = 1e-4
print(" + test_type: "+test_type)
print(" + range start/end: "+str(conv_test_range_start)+", "+str(conv_test_range_end))
print(" + error_tolerance_convergence: "+str(error_tolerance_convergence))
print(" + error_tolerance_error: "+str(error_tolerance_error))
if len(group_data['meta_values']) < conv_test_range_end:
raise Exception("Not enough samples to run convergence test")
for i in range(len(group_data['meta_values'])):
if group_data['meta_values'][i] != group_data['meta_values'][0]:
print("FATAL: Different convergence orders in same test")
for i in range(len(group_data['meta_values'])):
print("order: "+str(group_data['meta_values']))
raise Exception("FATAL: Different convergence orders in same test")
l = len(group_data['x_values'])
if l < conv_test_range_end:
print("There are only "+str(l)+" values, but we need at least "+str(conv_test_range_end)+" values")
raise Exception("Not enough values to study convergence")
prev_value = -1.0
conv = '-'
for i in range(conv_test_range_start, conv_test_range_end):
x = group_data['x_values'][i]
y = group_data['y_values'][i]
meta = group_data['meta_values'][i]
if prev_value > 0:
conv = y/prev_value
elif prev_value == 0:
conv = '[error=0]'
error_convergence = '-'
if isinstance(conv, float):
# Convergence order is stored in meta value
target_conv = pow(2.0, meta)
error_convergence = abs(conv - target_conv)/target_conv
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv)+"\terror: "+str(error_convergence))
if test_type == 'convergence':
# Test for convergence if exists
if error_convergence != '-':
if error_convergence > error_tolerance_convergence:
print("Error: "+str(error_convergence))
if len(sys.argv) <= 1:
raise Exception("Convergence exceeds tolerance of "+str(error_tolerance_convergence))
elif test_type == 'error':
# Alternate tests instead of convergence check
# Convergence doesn't really make sense for REXI in the way how it's applied
# This should be only used for l_exp and lg_exp
# Just ensure that the errors are below a certain level
if y > error_tolerance_error:
print("Error: "+str(y))
if len(sys.argv) <= 1:
raise Exception("Error exceeds tolerance of "+str(error_tolerance_error))
else:
raise Exception("Unknown test type "+test_type)
prev_value = y
if len(sys.argv) <= 1:
print("[OK]")
if len(sys.argv) <= 1:
print("*"*80)
print("Convergence tests successful")
print("*"*80)
|
|
#!/usr/bin/env python
# encoding: utf-8
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
try:
import json
except ImportError:
import simplejson as json
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from desktop.lib.django_db_util import remove_content_type
from jobsub.models import JobDesign, OozieJavaAction, OozieStreamingAction, OozieDesign
LOG = logging.getLogger(__name__)
class Migration(SchemaMigration):
def forwards(self, orm):
"""
Added custom transaction processing for transactional DBMS.
If a DDL operation fails, the entire transaction fails and all future commands are ignored.
"""
# Adding model 'OozieStreamingAction'
db.create_table('jobsub_ooziestreamingaction', (
('oozieaction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['jobsub.OozieAction'], unique=True, primary_key=True)),
('files', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('mapper', self.gf('django.db.models.fields.CharField')(max_length=512)),
('reducer', self.gf('django.db.models.fields.CharField')(max_length=512)),
('job_properties', self.gf('django.db.models.fields.CharField')(default='[]', max_length=32768)),
('archives', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
))
db.send_create_signal('jobsub', ['OozieStreamingAction'])
# Adding model 'OozieAction'
db.create_table('jobsub_oozieaction', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('action_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('jobsub', ['OozieAction'])
# Adding model 'OozieDesign'
db.create_table('jobsub_ooziedesign', (
('description', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('root_action', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['jobsub.OozieAction'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('jobsub', ['OozieDesign'])
# Adding model 'JobHistory'
db.create_table('jobsub_jobhistory', (
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('submission_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('design', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['jobsub.OozieDesign'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('job_id', self.gf('django.db.models.fields.CharField')(max_length=128)),
))
db.send_create_signal('jobsub', ['JobHistory'])
# Adding model 'OozieMapreduceAction'
db.create_table('jobsub_ooziemapreduceaction', (
('oozieaction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['jobsub.OozieAction'], unique=True, primary_key=True)),
('files', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('jar_path', self.gf('django.db.models.fields.CharField')(max_length=512)),
('archives', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('job_properties', self.gf('django.db.models.fields.CharField')(default='[]', max_length=32768)),
))
db.send_create_signal('jobsub', ['OozieMapreduceAction'])
# Adding model 'OozieJavaAction'
db.create_table('jobsub_ooziejavaaction', (
('oozieaction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['jobsub.OozieAction'], unique=True, primary_key=True)),
('files', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('jar_path', self.gf('django.db.models.fields.CharField')(max_length=512)),
('java_opts', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('args', self.gf('django.db.models.fields.CharField')(max_length=4096, blank=True)),
('job_properties', self.gf('django.db.models.fields.CharField')(default='[]', max_length=32768)),
('archives', self.gf('django.db.models.fields.CharField')(default='[]', max_length=512)),
('main_class', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('jobsub', ['OozieJavaAction'])
# Adding field 'CheckForSetup.setup_level'
db.add_column('jobsub_checkforsetup', 'setup_level', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# The next sequence may fail... so they should have their own transactions.
db.commit_transaction()
# Delete legacy tables. Note that this only applies to Hue 1.x installations
db.start_transaction()
try:
db.delete_table('jobsub_submission')
remove_content_type('jobsub', 'submission')
db.commit_transaction()
except Exception, ex:
db.rollback_transaction()
db.start_transaction()
try:
db.delete_table('jobsub_serversubmissionstate')
remove_content_type('jobsub', 'serversubmissionstate')
db.commit_transaction()
except Exception, ex:
db.rollback_transaction()
db.start_transaction()
hue1_to_hue2_data_migration()
def backwards(self, orm):
# Deleting model 'OozieStreamingAction'
db.delete_table('jobsub_ooziestreamingaction')
# Deleting model 'OozieAction'
db.delete_table('jobsub_oozieaction')
# Deleting model 'OozieDesign'
db.delete_table('jobsub_ooziedesign')
# Deleting model 'JobHistory'
db.delete_table('jobsub_jobhistory')
# Deleting model 'OozieMapreduceAction'
db.delete_table('jobsub_ooziemapreduceaction')
# Deleting model 'OozieJavaAction'
db.delete_table('jobsub_ooziejavaaction')
# Deleting field 'CheckForSetup.setup_level'
db.delete_column('jobsub_checkforsetup', 'setup_level')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'jobsub.checkforsetup': {
'Meta': {'object_name': 'CheckForSetup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setup_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'setup_run': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'jobsub.jobdesign': {
'Meta': {'object_name': 'JobDesign'},
'data': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'jobsub.jobhistory': {
'Meta': {'object_name': 'JobHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'design': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['jobsub.OozieDesign']"})
},
'jobsub.oozieaction': {
'Meta': {'object_name': 'OozieAction'},
'action_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'jobsub.ooziejavaaction': {
'Meta': {'object_name': 'OozieJavaAction', '_ormbases': ['jobsub.OozieAction']},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '32768'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'oozieaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jobsub.OozieAction']", 'unique': 'True', 'primary_key': 'True'})
},
'jobsub.ooziemapreduceaction': {
'Meta': {'object_name': 'OozieMapreduceAction', '_ormbases': ['jobsub.OozieAction']},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '32768'}),
'oozieaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jobsub.OozieAction']", 'unique': 'True', 'primary_key': 'True'})
},
'jobsub.ooziestreamingaction': {
'Meta': {'object_name': 'OozieStreamingAction', '_ormbases': ['jobsub.OozieAction']},
'archives': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'files': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '512'}),
'job_properties': ('django.db.models.fields.CharField', [], {'default': "'[]'", 'max_length': '32768'}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'oozieaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jobsub.OozieAction']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'jobsub.ooziedesign': {
'Meta': {'object_name': 'OozieDesign'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'root_action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['jobsub.OozieAction']"})
}
}
complete_apps = ['jobsub']
#
# Data migration helper
#
def hue1_to_hue2_data_migration():
"""
Data migration from the JobDesign table to the new Oozie-based models.
The migration could be incomplete:
- Jar types, for which the main class wasn't specified.
We add an `(incomplete)' marker to the design name to alert the user.
"""
jd_list = JobDesign.objects.all()
for jd in jd_list:
if jd.type == 'jar':
job_design_migration_for_jar(jd)
elif jd.type == 'streaming':
job_design_migration_for_streaming(jd)
else:
LOG.warn("Unknown JobDesign type '%s' in the old table. Row id: %s" %
(jd.type, jd.id))
def job_design_migration_for_jar(jd):
"""Migrate one jar type design"""
data = json.loads(jd.data)
action = OozieJavaAction(action_type=OozieJavaAction.ACTION_TYPE,
jar_path=data['jarfile'],
main_class="please.specify.in.the.job.design",
args=data['arguments'])
action.save()
design = OozieDesign(owner=jd.owner,
name=jd.name + ' (incomplete)',
description=jd.description,
root_action=action)
design.save()
def job_design_migration_for_streaming(jd):
"""Migrate one streaming type design"""
data = json.loads(jd.data)
files = json.dumps(data['cache_files'])
archives = json.dumps(data['cache_archives'])
properties = data['hadoop_properties']
def add_property(key, value):
if value:
properties[key] = value
add_property('mapred.input.dir', ','.join(data['input']))
add_property('mapred.output.dir', data['output'])
add_property('mapred.combiner.class', data['combiner_class'])
add_property('mapred.mapper.class', data['mapper_class'])
add_property('mapred.reducer.class', data['reducer_class'])
add_property('mapred.partitioner.class', data['partitioner_class'])
add_property('mapred.input.format.class', data['inputformat_class'])
add_property('mapred.output.format.class', data['outputformat_class'])
add_property('mapred.reduce.tasks', data['num_reduce_tasks'])
property_list = [ ]
for k, v in properties.iteritems():
property_list.append(dict(name=k, value=v))
action = OozieStreamingAction(action_type=OozieStreamingAction.ACTION_TYPE,
mapper=data['mapper_cmd'],
reducer=data['reducer_cmd'],
files=files,
archives=archives,
job_properties=json.dumps(property_list))
action.save()
design = OozieDesign(owner=jd.owner,
name=jd.name,
description=jd.description,
root_action=action)
design.save()
|
|
#!/usr/bin/env python3
import sys
import os
import zipfile
import urllib
import time
import getopt
import json
import datetime
import random
import shutil
from sqlite_bmk import SQLiteBenchmarker
def main(argv):
## default values
options_file = ''
base_dir=os.path.abspath(os.getcwd())
num_random = 30
found_options = False
cleanonly = False
## first read terminal arguments
try:
opts, args = getopt.getopt(argv,"o:whr:",["optionsfile=","workingdir","help", "num-random=", "clean"])
except (getopt.GetoptError, err):
print(str(err))
print(help_str())
sys.exit(ConfigCreator.EXIT_ERROR)
for opt, arg in opts:
if opt in ("-w", "--workingdir"):
base_dir = os.path.abspath(arg)
elif opt in ("-o", "--optionsfile"):
options_file = os.path.abspath(arg)
found_options = True
elif opt in ("-r", "--num-random"):
num_random = arg
found_options = True
elif opt == "--clean":
cleanonly = True
else:
print (help_str())
sys.exit(ConfigCreator.EXIT_ERROR)
if cleanonly:
ConfigCreator.clean(base_dir)
sys.exit(ConfigCreator.EXIT_CLEAN_ONLY)
if not found_options:
print (help_str())
sys.exit(ConfigCreator.EXIT_ERROR)
generator = ConfigCreator(base_dir=base_dir,options_file=options_file)
#non_default = generator.generate_non_default_single_option("SQLITE_TEMP_STORE")
generator.generate_and_write_one_for_each_option()
generator.generate_set_randomly(int(num_random))
class ConfigCreator:
## exit flags
EXIT_SUCCESS = 0
EXIT_CLEAN_ONLY = 1
EXIT_ERROR = 2
JSON_INDENT = 4
def __init__(self, base_dir, options_file):
self.base_dir = base_dir
self.options_file = options_file
## read list of all possible config flags ("features"/"options")
with open(self.options_file) as json_data:
json_data = json.load(json_data)
self.options = self.parse_options(json_data)
print('Finished initialising.')
@staticmethod
def parse_options(json):
""" generates and returns all possible values for all options given as json """
options = {}
possible_options = {}
if "compile-options" in json:
json_options = json["compile-options"]
# We allow
# - unary options (take effect on existence)
# - set options (can take a value out of a set of allowed values - includes binary options)
# - range options (can take any value inside a range, using a given step size)
# iterate over all options and generate all valid values
for option, value in json_options.items():
val_dict = {}
if value is None:
# unary option
possible_options[option] = None
else:
val_type = value["type"]
val_default = value["default"]
val_dict["default"] = val_default
if val_type == "list":
# list type option
vals = value["values"]
val_dict["values"] = vals
possible_options[option] = val_dict
elif val_type == "range":
# range type option
max_val = value["max"]
min_val = value["min"]
stepsize = value["stepsize"]
possible_values = list(range(min_val,max_val+stepsize,stepsize))
val_dict["values"] = possible_values
possible_options[option] = val_dict
else:
print("Found unsupported option: " + option + "=" + value)
options = possible_options
return options
def write_config(self,
config,
suffix = ""):
""" wites a file for a given configuration dict """
config_wrapper = {}
config_wrapper["features"] = config
config_folder = os.path.join(self.base_dir, 'compile-configs')
config_folder_exists = os.path.exists(config_folder)
json_conf = json.dumps(config_wrapper, ConfigCreator.JSON_INDENT, sort_keys=True)
if not config_folder_exists:
os.mkdir(config_folder)
config_file_name = "config_"
if suffix is not "":
config_file_name += suffix + "_"
config_file_name += str(hash(json_conf))
complete_path = os.path.join(config_folder, config_file_name)
complete_path += ".cfg"
with open(complete_path, 'w') as f:
f.seek(0)
f.write(json_conf)
f.truncate()
def generate_randomly(self):
"""generates and returns a config with random values for each option"""
rand_conf = {}
for feature, f_desc in self.options.items():
if f_desc is None:
#unary option
on = bool(random.getrandbits(1))
if on:
rand_conf[feature] = None
else:
possible_values = f_desc["values"]
val = random.choice(possible_values)
rand_conf[feature] = val
return rand_conf
def generate_set_randomly(self,num):
""" generates a set of random configs and writes them to seperate files """
for i in range(num):
self.generate_rand_and_write()
def generate_rand_and_write(self):
""" generates a random config and writes it to a file """
random_config = self.generate_randomly()
self.write_config(random_config, suffix="rnd")
def generate_and_write_one_for_each_option(self):
""" generates a set of configs of which each only has one active option """
for options in self.options:
non_default_option = self.generate_non_default_single_option(options)
self.write_config(non_default_option, suffix=options)
def generate_non_default_single_option(self, option):
""" generates and returns a value for an option which is not its default value """
if option not in self.options:
raise ValueError('Can find no non-default value for option ' +
option + " since it is not in the parsed set of options")
option_desc = self.options[option]
possible_vals= []
if option_desc is None:
# unary option
possible_vals = [None]
else:
val_default = option_desc["default"]
possible_vals = option_desc["values"]
if val_default in possible_vals:
possible_vals.remove(val_default)
val = random.choice(possible_vals)
rand_conf = {}
rand_conf[option] = val
return rand_conf
@staticmethod
def write_all_in_one_config_file(base_dir):
""" writes all configs in one file """
file_content = ''
config_folder = os.path.join(base_dir, 'compile-configs')
file_list = os.listdir(config_folder)
file_num = len(file_list)
for filename in file_list:
abs_file = os.path.join(config_folder, filename)
with open(abs_file) as json_data:
config = json.load(json_data)
compile_command = SQLiteBenchmarker.get_param_string(config["features"])
file_content += compile_command + "\n"
with open(os.path.join(base_dir, 'all-in-one.cfg'), 'w') as f:
f.seek(0)
f.write(file_content)
f.close()
@staticmethod
def clean(base_dir):
""" deletes all files that could be created by
this class at some point """
cfg_path = os.path.join(base_dir, 'compile-configs')
all_in_one_cmd_path = os.path.join(base_dir, 'all-in-one.cfg')
try:
if os.path.exists(cfg_path):
shutil.rmtree(cfg_path)
if os.path.exists(all_in_one_cmd_path):
os.remove(all_in_one_cmd_path)
except:
print("Couldnt delete files")
def cur_milli():
return time.time()*1000
def cur_milli_str():
return str(int(round(cur_milli())))
def milli_str(milli):
return str(int(round(milli)))
def help_str():
return "USAGE: config-creator.py -o compile-options.json"
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import logging
from nose.tools import assert_equal, assert_true, \
assert_not_equal
from .tests_integraion_base import TestsIntegration
logger = logging.getLogger(__name__)
__all__ = ("TestsDocument",)
class TestsDocument(TestsIntegration):
def tearDown(self):
super(TestsDocument, self).tearDown()
c = self.conn
c.collection.test.delete()
def test_document_creation(self):
c = self.conn
logger.info("Creationg new collection 'test'")
body = {
"value": 1,
"testing": True,
"options": [
1,
2,
3
]
}
c.collection.test.create()
count_before = c.collection.test.count()
c.collection.test.documents.create(body)
assert_equal(c.collection.test.count(), count_before + 1)
c.collection.test.documents.create(body)
assert_equal(c.collection.test.count(), count_before + 2)
def test_document_deletion(self):
c = self.conn
logger.info("Creating collection 'test'")
c.collection.test.create()
logger.info("Creating sample document")
doc = c.collection.test.documents.create({1: 1})
assert_not_equal(doc, None)
count = c.collection.test.documents.count
assert_true(doc.delete())
assert_equal(
c.collection.test.documents.count,
count - 1
)
def test_document_deletion_collection(self):
c = self.conn.collection
logger.info("Creating collection 'test'")
c.test.create()
doc1 = c.test.documents.create({"a": 1})
doc2 = c.test.documents.create({"a": 2})
prev_count = int(c.test.documents.count)
# delete by document itself
c.test.documents.delete(doc1)
assert_equal(int(c.test.documents.count), prev_count - 1)
# delete by reference only
c.test.documents.delete(doc2.id)
assert_equal(c.test.documents.count, prev_count - 2)
def test_document_update(self):
c = self.conn.collection
logger.info("Creating collection 'test'")
c.test.create()
doc = c.test.documents.create({1: 1})
c.test.documents.update(doc, {2: 2})
assert_equal(
c.test.documents().first.body["1"], 1)
assert_equal(
c.test.documents().first.body["2"], 2)
c.test.documents.delete(doc)
doc1 = c.test.documents.create({"name": "John"})
c.test.documents.update(doc1.id, {"position": "manager"})
assert_equal(
dict(
[(key, val) for key, val in
c.test.documents().first.body.items()
if key in ["name", "position"]]
),
{
"name": "John",
"position": "manager"
}
)
def test_document_body_setter(self):
c = self.conn.collection
logger.info("Creating collection 'test'")
c.test.create()
doc = c.test.documents.create({"data": 1})
data = {"data": 2}
doc.body = data
doc.save()
assert_not_equal(doc, None)
inter = list(
set(c.test.documents().first.body).intersection(
set(data)))
assert_equal(
data[inter[0]],
c.test.documents().first.body[inter[0]])
def test_list_of_documents(self):
c = self.conn.collection
c.test.create()
docs = [
{"title": "doc1"},
{"title": "doc2"},
{"title": "doc3"}
]
for doc in docs:
c.test.documents.create(doc)
for index, doc in enumerate(c.test.documents()):
for src in docs:
flag = False
for key, val in src.items():
if doc.body.get(key) == val:
flag = True
break
if flag:
break
assert_true(flag)
def test_bulk_insert(self):
c = self.conn.collection.test
c.create()
docs = [
{"title": "doc1"},
{"title": "doc2"},
{"title": "doc3"}]
count = c.documents.count
response = c.documents.create_bulk(docs)
assert_equal(count + len(docs), c.documents.count)
assert_equal(
response,
{u'created': 3, u'errors': 0, u'empty': 0, u'error': False})
def test_bulk_insert_attrs_and_values(self):
c = self.conn.collection.test
c.create()
docs = [
["name", "age", "sex"],
["max", 27, "male"],
["val", 28, "female"],
["emma", 4, "female"]]
count = c.documents.count
response = c.documents.create_bulk(docs)
assert_equal(count + len(docs) - 1, c.documents.count)
assert_equal(
response,
{u'created': 3, u'errors': 0, u'empty': 0, u'error': False})
|
|
import httplib
import re
import os
import requests
import json
from datetime import datetime
from distutils.version import LooseVersion
from cumulusci.tasks.release_notes.github_api import GithubApiMixin
from cumulusci.tasks.release_notes.parser import ChangeNotesLinesParser
from cumulusci.tasks.release_notes.parser import IssuesParser
from cumulusci.tasks.release_notes.parser import GithubIssuesParser
from cumulusci.tasks.release_notes.parser import GithubLinesParser
from cumulusci.tasks.release_notes.parser import GithubLinkingLinesParser
from cumulusci.tasks.release_notes.parser import CommentingGithubIssuesParser
from cumulusci.tasks.release_notes.provider import StaticChangeNotesProvider
from cumulusci.tasks.release_notes.provider import DirectoryChangeNotesProvider
from cumulusci.tasks.release_notes.provider import GithubChangeNotesProvider
from cumulusci.tasks.release_notes.exceptions import GithubApiNotFoundError
class BaseReleaseNotesGenerator(object):
def __init__(self):
self.change_notes = []
self.init_parsers()
self.init_change_notes()
def __call__(self):
self._parse_change_notes()
return self.render()
def init_change_notes(self):
self.change_notes = self._init_change_notes()
def _init_change_notes(self):
""" Subclasses should override this method to return an initialized
subclass of BaseChangeNotesProvider """
return []
def init_parsers(self):
""" Initializes the parser instances as the list self.parsers """
self.parsers = []
self._init_parsers()
def _init_parsers(self):
""" Subclasses should override this method to initialize their
parsers """
pass
def _parse_change_notes(self):
""" Parses all change_notes in self.change_notes() through all parsers
in self.parsers """
for change_note in self.change_notes():
self._parse_change_note(change_note)
def _parse_change_note(self, change_note):
""" Parses an individual change note through all parsers in
self.parsers """
for parser in self.parsers:
parser.parse(change_note)
def render(self):
""" Returns the rendered release notes from all parsers as a string """
release_notes = []
for parser in self.parsers:
parser_content = parser.render()
if parser_content is not None:
release_notes.append(parser_content)
return u'\r\n\r\n'.join(release_notes)
class StaticReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, change_notes):
self._change_notes = change_notes
super(StaticReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(ChangeNotesLinesParser(
self, 'Critical Changes'))
self.parsers.append(ChangeNotesLinesParser(self, 'Changes'))
self.parsers.append(IssuesParser(
self, 'Issues Closed'))
def _init_change_notes(self):
return StaticChangeNotesProvider(self, self._change_notes)
class DirectoryReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, directory):
self.directory = directory
super(DirectoryReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self.parsers.append(ChangeNotesLinesParser(
self, 'Critical Changes'))
self.parsers.append(ChangeNotesLinesParser(self, 'Changes'))
self.parsers.append(IssuesParser(
self, 'Issues Closed'))
def _init_change_notes(self):
return DirectoryChangeNotesProvider(self, self.directory)
class GithubReleaseNotesGenerator(BaseReleaseNotesGenerator):
def __init__(self, github_info, current_tag, last_tag=None, link_pr=False):
self.github_info = github_info
self.current_tag = current_tag
self.last_tag = last_tag
self.link_pr = link_pr
self.lines_parser_class = None
self.issues_parser_class = None
super(GithubReleaseNotesGenerator, self).__init__()
def _init_parsers(self):
self._set_classes()
self.parsers.append(self.lines_parser_class(
self,
'Critical Changes',
))
self.parsers.append(self.lines_parser_class(
self,
'Changes',
))
self.parsers.append(self.issues_parser_class(
self,
'Issues Closed',
link_pr=self.link_pr,
))
def _init_change_notes(self):
return GithubChangeNotesProvider(
self,
self.current_tag,
self.last_tag
)
def _set_classes(self):
self.lines_parser_class = (GithubLinkingLinesParser if self.link_pr
else GithubLinesParser)
self.issues_parser_class = GithubIssuesParser
class PublishingGithubReleaseNotesGenerator(GithubReleaseNotesGenerator, GithubApiMixin):
def __call__(self):
content = super(PublishingGithubReleaseNotesGenerator, self).__call__()
return self.publish(content)
def publish(self, content):
release = self._get_release()
return self._update_release(release, content)
def _get_release(self):
# Query for the release
return self.call_api('/releases/tags/{}'.format(self.current_tag))
def _set_classes(self):
self.lines_parser_class = (GithubLinkingLinesParser if self.link_pr
else GithubLinesParser)
self.issues_parser_class = CommentingGithubIssuesParser
def _update_release(self, release, content):
if release['body']:
new_body = []
current_parser = None
is_start_line = False
for parser in self.parsers:
parser.replaced = False
# update existing sections
for line in release['body'].splitlines():
if current_parser:
if current_parser._is_end_line(current_parser._process_line(line)):
parser_content = current_parser.render()
if parser_content:
# replace existing section with new content
new_body.append(parser_content + '\r\n')
current_parser = None
for parser in self.parsers:
if parser._render_header().strip() == parser._process_line(line).strip():
parser.replaced = True
current_parser = parser
is_start_line = True
break
else:
is_start_line = False
if is_start_line:
continue
if current_parser:
continue
else:
# preserve existing sections
new_body.append(line.strip())
# catch section without end line
if current_parser:
new_body.append(current_parser.render())
# add new sections at bottom
for parser in self.parsers:
parser_content = parser.render()
if parser_content and not parser.replaced:
new_body.append(parser_content + '\r\n')
release['body'] = u'\r\n'.join(new_body)
else:
release['body'] = content
if release.get('id'):
resp = self.call_api(
'/releases/{}'.format(release['id']), data=release)
else:
resp = self.call_api('/releases', data=release)
return release['body']
|
|
#!/usr/bin/env python
####################################################################
# AUTHOR: Miguel Ibarra ([email protected])
#
# DESCRIPTION: Pairwise and to mean standarized euclidean comparison
####################################################################
import os
import logging
import argparse
import numpy as np
import pandas as pd
import scipy.stats as stats
from numpy.linalg import svd
import matplotlib
matplotlib.use("Agg")
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.neighbors import DistanceMetric
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
from secimtools.visualManager import module_box as box
from secimtools.visualManager import module_lines as lines
from secimtools.visualManager import module_scatter as scatter
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def getOptions():
""" Function to pull in arguments """
description = """"""
parser = argparse.ArgumentParser(
description=description, formatter_class=argparse.RawDescriptionHelpFormatter
)
# Standard Input
standard = parser.add_argument_group(description="Required Input")
standard.add_argument(
"-i",
"--input",
dest="input",
action="store",
required=True,
help="Dataset in Wide format",
)
standard.add_argument(
"-d",
"--design", dest="design",
action="store",
required=True,
help="Design file",
)
standard.add_argument(
"-id",
"--ID",
dest="uniqID",
action="store",
required=True,
help="Name of the column with uniqueID.",
)
standard.add_argument(
"-g",
"--group",
dest="group",
default=False,
action="store",
required=False,
help="Treatment group",
)
standard.add_argument(
"-o", "--order", dest="order", action="store", default=False, help="Run Order"
)
standard.add_argument(
"-l",
"--levels",
dest="levels",
action="store",
default=False,
help="Additional notes.",
)
# Tool output
output = parser.add_argument_group(description="Output Files")
output.add_argument(
"-f",
"--figure",
dest="figure",
action="store",
required=True,
help="PDF Output of standardized" "Euclidean distance plot",
)
output.add_argument(
"-m",
"--distanceToMean",
dest="toMean",
action="store",
required=True,
help="TSV Output of Mahalanobis " "distances from samples to the mean.",
)
output.add_argument(
"-pw",
"--distancePairwise",
dest="pairwise",
action="store",
required=True,
help="TSV Output of sample-pairwise" "mahalanobis distances.",
)
# Tool Input
tool = parser.add_argument_group(description="Optional Input")
tool.add_argument(
"-p",
"--per",
dest="p",
action="store",
required=False,
default=0.95,
type=float,
help="The threshold" "for standard distributions. The default is 0.95.",
)
tool.add_argument(
"-pen",
"--penalty",
dest="penalty",
action="store",
required=False,
default=0.5,
type=float,
help="Value" " of lambda for the penalty.",
)
tool.add_argument(
"-lg",
"--log",
dest="log",
action="store",
required=False,
default=True,
help="Log file",
)
# Plot options
plot = parser.add_argument_group(title="Plot options")
plot.add_argument(
"-pal",
"--palette",
dest="palette",
action="store",
required=False,
default="tableau",
help="Name of the palette to use.",
)
plot.add_argument(
"-col",
"--color",
dest="color",
action="store",
required=False,
default="Tableau_20",
help="Name of a valid color scheme" " on the selected palette",
)
args = parser.parse_args()
# Standardize paths
args.figure = os.path.abspath(args.figure)
args.toMean = os.path.abspath(args.toMean)
args.pairwise = os.path.abspath(args.pairwise)
# if args.levels then split otherwise args.level = emptylist
if args.levels:
args.levels = args.levels.split(",")
return args
def calculatePenalizedSigma(data, penalty=0.5):
# Getting n and p of data.
n, p = data.shape
# Calculate the mean of every row
data["mean"] = data.mean(axis=1)
# Standardize data (_std stands for standardized)
data_std = data.apply(lambda x: x - x["mean"], axis=1)
# Dropping mean column from both data and data_standardized
data.drop("mean", axis=1, inplace=True)
data_std.drop("mean", axis=1, inplace=True)
# Calculate singular value decomposition
U, s, V = svd(data_std)
# Calculate ds based on ss (d = s**2)
d = s ** 2
# Based on ds calculate the penalty. penalty must be expressed as a
# proportion (from 0 to 1) to use it on the np.percentile it will be
# multiplied by 100
penalty = np.percentile(d, q=penalty * 100.0)
# After the calculation of the penalty extend d vector to size n
d = np.append(d, np.zeros(shape=(n - len(d))))
# Calculate penalty and inverse for d (1/(d+penalty))
d = [1 / (val + penalty) for val in d]
# Creating empty matrix of size n x p and the fiiling the diagonal with
# penalized s values
# S = np.zeros((n,p))
# S[:len(s),:len(s)] = np.diag(s)
D = np.diag(d)
# Compute the stimate of the penalized sigma
penalized_sigma = np.dot(U, np.dot(D, U.T))
# Multiply everything by p-1
penalized_sigma = (p - 1) * penalized_sigma
# Returning penalized sigma
return penalized_sigma
def calculateDistances(data, V_VI):
"""
Calculates euclidean or mahalanobis distances. Returns an array of
distances to the Mean and an a matrix of pairwise distances.
:Arguments:
:type wide: pandas.DataFrame
:param wide: A wide formatted data frame with samples as columns and
compounds as rows.
:Returns:
:return distanceToMean: pd.DataFrames with distances to the mean.
:rtype: pd.DataFrames
:return distancePairwise: pd.DataFrames with pairwisde distances between
samples.
:rtype: pd.DataFrames
"""
# Calculating mean
mean = pd.DataFrame(data.mean(axis=1))
# Getting metric
dist = DistanceMetric.get_metric("mahalanobis", VI=V_VI)
# Calculate distance from all samples to the mean
distanceToMean = dist.pairwise(data.values.T, mean.T)
distanceToMean = pd.DataFrame(
distanceToMean, columns=["distance_to_mean"], index=data.columns
)
distanceToMean.name = data.name
# Calculate pairwise distances among samples
distancePairwise = dist.pairwise(data.values.T)
distancePairwise = pd.DataFrame(
distancePairwise, columns=data.columns, index=data.columns
)
distancePairwise.name = data.name
# Converts to NaN the diagonal
for index, row in distancePairwise.iterrows():
distancePairwise.loc[index, index] = np.nan
return (distanceToMean, distancePairwise)
def calculateCutoffs(data, p):
"""
Calculate the Standardized Euclidean Distance and return an array of
distances to the Mean and a matrix of pairwise distances.
:Arguments:
:type wide: pandas.DataFrame
:param wide: A wide formatted data frame with samples as columns and
compounds as rows.
:type p: float.
:param p: percentile of cutoff.
:Returns:
:rtype cutoff1: pandas.dataFrame
:return cutoff1: Cutoff values for mean, beta, chi-sqr and normal.
:rtype cutoff2: pandas.dataFrame
:return cutoff2: Cutoff values for pairwise, beta, chi-sqr and normal.
"""
# Stablish iterations, and numer of colums ps and number of rows nf
ps = len(data.columns) # p number of samples
nf = len(data.index) # s number of features
iters = 20000
# Calculates betaP
betaP = np.percentile(
pd.DataFrame(
stats.beta.rvs(0.5, 0.5 * (ps - 2), size=iters * nf).reshape(iters, nf)
).sum(axis=1),
p * 100.0,
)
# casting to float so it behaves well
ps = float(ps)
nf = float(nf)
# Calculates cutoffs beta,norm & chisq for data to mean
betaCut1 = np.sqrt((ps - 1) ** 2 / ps * betaP)
normCut1 = np.sqrt(
stats.norm.ppf(
p,
(ps - 1) / ps * nf,
np.sqrt(2 * nf * (ps - 2) * (ps - 1) ** 2 / ps ** 2 / (ps + 1)),
)
)
chisqCut1 = np.sqrt((ps - 1) / ps * stats.chi2.ppf(p, nf))
# Calculates cutoffs beta,n norm & chisq for pairwise
betaCut2 = np.sqrt((ps - 1) * 2 * betaP)
normCut2 = np.sqrt(stats.norm.ppf(p, 2 * nf, np.sqrt(8 * nf * (ps - 2) / (ps + 1))))
chisqCut2 = np.sqrt(2 * stats.chi2.ppf(p, nf))
# Create data fram for ecah set of cut offs
cutoff1 = pd.DataFrame(
[[betaCut1, normCut1, chisqCut1], ["Beta(Exact)", "Normal", "Chi-sq"]],
index=["cut", "name"],
columns=["Beta(Exact)", "Normal", "Chi-sq"],
)
cutoff2 = pd.DataFrame(
[[betaCut2, normCut2, chisqCut2], ["Beta(Exact)", "Normal", "Chi-sq"]],
index=["cut", "name"],
columns=["Beta(Exact)", "Normal", "Chi-sq"],
)
# Create Palette
cutPalette.getColors(cutoff1.T, ["name"])
# Returning colors
return (cutoff1, cutoff2)
def plotCutoffs(cut_S, ax, p):
"""
Plot the cutoff lines to each plot
:Arguments:
:type cut_S: pandas.Series
:param cut_S: contains a cutoff value, name and color
:type ax: matplotlib.axes._subplots.AxesSubplot
:param ax: Gets an ax project.
:type p: float
:param p: percentile of cutoff
"""
lines.drawCutoffHoriz(
ax=ax,
y=float(cut_S.values[0]),
cl=cutPalette.ugColors[cut_S.name],
lb="{0} {1}% Threshold: {2}".format(
cut_S.name, round(p * 100, 3), round(float(cut_S.values[0]), 1)
),
ls="--",
lw=2,
)
def plotDistances(df_distance, palette, plotType, disType, cutoff, p, pdf):
# Geting number of samples in dataframe (ns stands for number of samples)
ns = len(df_distance.index)
# Calculates the widht for the figure base on the number of samples
figWidth = max(ns / 2, 16)
# Keeping the order on the colors
df_distance["colors"] = palette.design["colors"]
# Create figure object with a single axis
figure = figureHandler(proj="2d", figsize=(figWidth, 8))
# Getting type of distance file
if "distance_to_mean" in df_distance.columns:
dataType = "to the mean"
else:
dataType = "pairwise"
# Getting ty of distance header
if disType == "Mahalanobis":
distType1 = "Penalized"
distType2 = disType
else:
distType1 = "Standardized"
distType2 = disType
# Adds Figure title, x axis limits and set the xticks
figure.formatAxis(
figTitle="{0} for {1} {2} Distance for {3} {4}".format(
plotType, distType1, distType2, df_distance.name, dataType
),
yTitle="{0} {1} Distance".format(distType1, distType2),
xTitle="Index",
ylim="ignore",
xlim=(-0.5, -0.5 + ns),
xticks=df_distance.index,
)
# If distance to mean
if dataType == "to the mean":
# Plot scatterplot quickplot
scatter.scatter2D(
ax=figure.ax[0],
colorList=df_distance["colors"],
x=range(len(df_distance.index)),
y=df_distance["distance_to_mean"],
)
# if pairwise
else:
if plotType == "Scatterplot":
# Plot scatterplot
for index in df_distance.index:
scatter.scatter2D(
ax=figure.ax[0],
colorList=df_distance["colors"][index],
x=range(len(df_distance.index)),
y=df_distance[index],
)
elif plotType == "Box-plots":
# Plot Box plot
box.boxDF(ax=figure.ax[0], colors=df_distance["colors"], dat=df_distance)
# Shrink figure
figure.shrink()
# Plot legend
figure.makeLegend(figure.ax[0], palette.ugColors, palette.combName)
# Add a cutoof line
cutoff.apply(lambda x: plotCutoffs(x, ax=figure.ax[0], p=p), axis=0)
# Add figure to PDF and close the figure afterwards
figure.addToPdf(pdf)
# Drop "color" column to no mess the results
df_distance.drop("colors", axis=1, inplace=True)
def main(args):
"""
Main function
"""
if args.levels and args.group:
levels = [args.group] + args.levels
elif args.group and not args.levels:
levels = [args.group]
else:
levels = []
logger.info(u"Color selection groups: {0}".format(",".join(levels)))
dat = wideToDesign(
args.input,
args.design,
args.uniqID,
group=args.group,
anno=args.levels,
logger=logger,
runOrder=args.order,
)
# Removing groups with just one sample and then clean from missing data.
dat.removeSingle()
dat.dropMissing()
# Select colors for data by adding an additional column for colors
dataPalette.getColors(design=dat.design, groups=levels)
if args.group:
disGroups = [
(group.index, level)
for level, group in dataPalette.design.groupby(dataPalette.combName)
]
else:
disGroups = [(dat.design.index, "samples")]
pairwise_disCuts = list()
toMean_disCuts = list()
for indexes, name in disGroups:
# If less than 3 elements in the group skip to the next
if len(indexes) < 3:
logger.error(
"Group {0} with fewer than 3 elements is excluded from the analysis".format(name)
)
continue
# Subsetting wide
currentFrame = pd.DataFrame(dat.wide[indexes].copy())
currentFrame.name = name
# Calculate Penalized Sigma
penalizedSigma = calculatePenalizedSigma(
data=currentFrame, penalty=args.penalty
)
# Calculate Distances (dis stands for distance)
disToMean, disPairwise = calculateDistances(
data=currentFrame, V_VI=penalizedSigma
)
# Calculate cutoffs
cutoff1, cutoff2 = calculateCutoffs(currentFrame, args.p)
# Appending results
pairwise_disCuts.append([disPairwise, cutoff2])
toMean_disCuts.append([disToMean, cutoff1])
if args.group:
# Splitting results to mean and pairwise
pairwise_dis = [distance for distance, cutoff in pairwise_disCuts]
toMean_dis = [distance for distance, cutoff in toMean_disCuts]
# Merging to get distance for all pairwise
pairwise_dis_all = pd.DataFrame()
for dis in pairwise_dis:
pairwise_dis_all = pd.DataFrame.merge(
pairwise_dis_all,
dis,
left_index=True,
right_index=True,
how="outer",
sort=False,
)
pairwise_dis_all.name = "samples"
# Merging to get distance for all to mean
toMean_dis_all = pd.DataFrame(columns=["distance_to_mean","group"])
for dis in toMean_dis:
dis["group"] = dis.name
toMean_dis_all = toMean_dis_all.append(dis)
toMean_dis_all.sort_values(by="group", inplace=True)
toMean_dis_all.drop("group", axis=1, inplace=True)
toMean_dis_all.name = "samples"
# Get cuttoffs for distances
cutoff1, cutoff2 = calculateCutoffs(dat.wide, args.p)
# Append toMean_dis_all and pairwise_dis_all to toMean_dis_cuts and
# pairwise_dis_cuts respectively.
toMean_disCuts.append([toMean_dis_all, cutoff1])
pairwise_disCuts.append([pairwise_dis_all, cutoff2])
# Iterate over each pair of (distance,cutoff) for toMean and pairwise to
# plot distances.
with PdfPages((args.figure)) as pdf:
# Iterating over toMean,pairwise distances in parallel
for toMean, pairwise in zip(toMean_disCuts, pairwise_disCuts):
# Making plots
plotDistances(
df_distance=toMean[0],
palette=dataPalette,
p=args.p,
plotType="Scatterplot",
disType="Mahalanobis",
cutoff=toMean[1],
pdf=pdf,
)
plotDistances(
df_distance=pairwise[0],
palette=dataPalette,
p=args.p,
plotType="Scatterplot",
disType="Mahalanobis",
cutoff=pairwise[1],
pdf=pdf,
)
plotDistances(
df_distance=pairwise[0],
palette=dataPalette,
p=args.p,
plotType="Box-plots",
disType="Mahalanobis",
cutoff=pairwise[1],
pdf=pdf,
)
# Since its a list of dataframes and we are only interested in the last one
# we are using [-1] to access it and [0] to getit out of the list.
# Outputting distances to mean and pairwise
toMean_disCuts[-1][0].to_csv(args.toMean, index_label="sampleID", sep="\t")
pairwise_disCuts[-1][0].to_csv(args.pairwise, index_label="sampleID", sep="\t")
# Ending script
logger.info("Script complete.")
if __name__ == "__main__":
args = getOptions()
logger = logging.getLogger()
sl.setLogger(logger)
logger.info(
u"""Importing data with following parameters:
\tWide: {0}
\tDesign: {1}
\tUnique ID: {2}
\tGroup: {3}
\tRun Order: {4}
""".format(
args.input, args.design, args.uniqID, args.group, args.order
)
)
dataPalette = colorHandler(pal=args.palette, col=args.color)
cutPalette = colorHandler(pal="tableau", col="TrafficLight_9")
logger.info(
u"Using {0} color scheme from {1} palette".format(args.color, args.palette)
)
main(args)
|
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod, abstractproperty
import math
import re
import numpy as np
from pyfr.nputil import npeval, fuzzysort
from pyfr.util import lazyprop, memoize
class BaseElements(object, metaclass=ABCMeta):
privarmap = None
convarmap = None
def __init__(self, basiscls, eles, cfg):
self._be = None
self.eles = eles
self.cfg = cfg
self.nspts = nspts = eles.shape[0]
self.neles = neles = eles.shape[1]
self.ndims = ndims = eles.shape[2]
# Kernels we provide
self.kernels = {}
# Check the dimensionality of the problem
if ndims != basiscls.ndims or ndims not in self.privarmap:
raise ValueError('Invalid element matrix dimensions')
# Determine the number of dynamical variables
self.nvars = len(self.privarmap[ndims])
# Instantiate the basis class
self.basis = basis = basiscls(nspts, cfg)
# See what kind of projection the basis is using
self.antialias = basis.antialias
# If we need quadrature points or not
haveqpts = 'flux' in self.antialias or 'div-flux' in self.antialias
# Sizes
self.nupts = basis.nupts
self.nqpts = basis.nqpts if haveqpts else None
self.nfpts = basis.nfpts
self.nfacefpts = basis.nfacefpts
self.nmpts = basis.nmpts
@abstractmethod
def pri_to_con(ics, cfg):
pass
def set_ics_from_cfg(self):
# Bring simulation constants into scope
vars = self.cfg.items_as('constants', float)
if any(d in vars for d in 'xyz'):
raise ValueError('Invalid constants (x, y, or z) in config file')
# Get the physical location of each solution point
coords = self.ploc_at_np('upts').swapaxes(0, 1)
vars.update(dict(zip('xyz', coords)))
# Evaluate the ICs from the config file
ics = [npeval(self.cfg.getexpr('soln-ics', dv), vars)
for dv in self.privarmap[self.ndims]]
# Allocate
self._scal_upts = np.empty((self.nupts, self.nvars, self.neles))
# Convert from primitive to conservative form
for i, v in enumerate(self.pri_to_con(ics, self.cfg)):
self._scal_upts[:, i, :] = v
def set_ics_from_soln(self, solnmat, solncfg):
# Recreate the existing solution basis
solnb = self.basis.__class__(None, solncfg)
# Form the interpolation operator
interp = solnb.ubasis.nodal_basis_at(self.basis.upts)
# Sizes
nupts, neles, nvars = self.nupts, self.neles, self.nvars
# Apply and reshape
self._scal_upts = np.dot(interp, solnmat.reshape(solnb.nupts, -1))
self._scal_upts = self._scal_upts.reshape(nupts, nvars, neles)
@lazyprop
def plocfpts(self):
# Construct the physical location operator matrix
plocop = self.basis.sbasis.nodal_basis_at(self.basis.fpts)
# Apply the operator to the mesh elements and reshape
plocfpts = np.dot(plocop, self.eles.reshape(self.nspts, -1))
plocfpts = plocfpts.reshape(self.nfpts, self.neles, self.ndims)
return plocfpts
@lazyprop
def _srtd_face_fpts(self):
plocfpts = self.plocfpts.transpose(1, 2, 0)
return [[np.array(fuzzysort(pts.tolist(), ffpts)) for pts in plocfpts]
for ffpts in self.basis.facefpts]
@abstractproperty
def _scratch_bufs(self):
pass
@lazyprop
def _src_exprs(self):
convars = self.convarmap[self.ndims]
# Variable and function substitutions
subs = self.cfg.items('constants')
subs.update(x='ploc[0]', y='ploc[1]', z='ploc[2]')
subs.update({v: 'u[{0}]'.format(i) for i, v in enumerate(convars)})
subs.update(abs='fabs', pi=str(math.pi))
return [self.cfg.getexpr('solver-source-terms', v, '0', subs=subs)
for v in convars]
@lazyprop
def _ploc_in_src_exprs(self):
return any(re.search(r'\bploc\b', ex) for ex in self._src_exprs)
@lazyprop
def _soln_in_src_exprs(self):
return any(re.search(r'\bu\b', ex) for ex in self._src_exprs)
@abstractmethod
def set_backend(self, backend, nscal_upts, nonce):
self._be = backend
# Sizes
ndims, nvars, neles = self.ndims, self.nvars, self.neles
nfpts, nupts, nqpts = self.nfpts, self.nupts, self.nqpts
sbufs, abufs = self._scratch_bufs, []
# Convenience functions for scalar/vector allocation
alloc = lambda ex, n: abufs.append(
backend.matrix(n, extent=nonce + ex, tags={'align'})
) or abufs[-1]
salloc = lambda ex, n: alloc(ex, (n, nvars, neles))
valloc = lambda ex, n: alloc(ex, (ndims, n, nvars, neles))
# Allocate required scalar scratch space
if 'scal_fpts' in sbufs and 'scal_qpts' in sbufs:
self._scal_fqpts = salloc('_scal_fqpts', nfpts + nqpts)
self._scal_fpts = self._scal_fqpts.rslice(0, nfpts)
self._scal_qpts = self._scal_fqpts.rslice(nfpts, nfpts + nqpts)
elif 'scal_fpts' in sbufs:
self._scal_fpts = salloc('scal_fpts', nfpts)
elif 'scal_qpts' in sbufs:
self._scal_qpts = salloc('scal_qpts', nqpts)
# Allocate additional scalar scratch space
if 'scal_upts_cpy' in sbufs:
self._scal_upts_cpy = salloc('scal_upts_cpy', nupts)
elif 'scal_qpts_cpy' in sbufs:
self._scal_qpts_cpy = salloc('scal_qpts_cpy', nqpts)
# Allocate required vector scratch space
if 'vect_upts' in sbufs:
self._vect_upts = valloc('vect_upts', nupts)
if 'vect_qpts' in sbufs:
self._vect_qpts = valloc('vect_qpts', nqpts)
if 'vect_fpts' in sbufs:
self._vect_fpts = valloc('vect_fpts', nfpts)
# Allocate and bank the storage required by the time integrator
self._scal_upts = [backend.matrix(self._scal_upts.shape,
self._scal_upts, tags={'align'})
for i in range(nscal_upts)]
self.scal_upts_inb = inb = backend.matrix_bank(self._scal_upts)
self.scal_upts_outb = backend.matrix_bank(self._scal_upts)
# Find/allocate space for a solution-sized scalar that is
# allowed to alias other scratch space in the simulation
aliases = next((m for m in abufs if m.nbytes >= inb.nbytes), None)
self._scal_upts_temp = backend.matrix(inb.ioshape, aliases=aliases,
tags=inb.tags)
@memoize
def opmat(self, expr):
return self._be.const_matrix(self.basis.opmat(expr),
tags={expr, 'align'})
@memoize
def smat_at_np(self, name):
smats_mpts, _ = self._smats_djacs_mpts
# Interpolation matrix to pts
m0 = self.basis.mbasis.nodal_basis_at(getattr(self.basis, name))
# Interpolate the smats
smats = np.array([np.dot(m0, smat) for smat in smats_mpts])
return smats.reshape(self.ndims, -1, self.ndims, self.neles)
@memoize
def smat_at(self, name):
return self._be.const_matrix(self.smat_at_np(name), tags={'align'})
@memoize
def rcpdjac_at_np(self, name):
_, djacs_mpts = self._smats_djacs_mpts
# Interpolation matrix to pts
m0 = self.basis.mbasis.nodal_basis_at(getattr(self.basis, name))
# Interpolate the djacs
djac = np.dot(m0, djacs_mpts)
if np.any(djac < -1e-5):
raise RuntimeError('Negative mesh Jacobians detected')
return 1.0 / djac
@memoize
def rcpdjac_at(self, name):
return self._be.const_matrix(self.rcpdjac_at_np(name), tags={'align'})
@memoize
def ploc_at_np(self, name):
op = self.basis.sbasis.nodal_basis_at(getattr(self.basis, name))
ploc = np.dot(op, self.eles.reshape(self.nspts, -1))
ploc = ploc.reshape(-1, self.neles, self.ndims).swapaxes(1, 2)
return ploc
@memoize
def ploc_at(self, name):
return self._be.const_matrix(self.ploc_at_np(name), tags={'align'})
def _gen_pnorm_fpts(self):
smats = self.smat_at_np('fpts').transpose(1, 3, 0, 2)
# We need to compute |J|*[(J^{-1})^{T}.N] where J is the
# Jacobian and N is the normal for each fpt. Using
# J^{-1} = S/|J| where S are the smats, we have S^{T}.N.
pnorm_fpts = np.einsum('ijlk,il->ijk', smats, self.basis.norm_fpts)
# Compute the magnitudes of these flux point normals
mag_pnorm_fpts = np.einsum('...i,...i', pnorm_fpts, pnorm_fpts)
mag_pnorm_fpts = np.sqrt(mag_pnorm_fpts)
# Check that none of these magnitudes are zero
if np.any(mag_pnorm_fpts < 1e-10):
raise RuntimeError('Zero face normals detected')
# Normalize the physical normals at the flux points
self._norm_pnorm_fpts = pnorm_fpts / mag_pnorm_fpts[..., None]
self._mag_pnorm_fpts = mag_pnorm_fpts
@lazyprop
def _norm_pnorm_fpts(self):
self._gen_pnorm_fpts()
return self._norm_pnorm_fpts
@lazyprop
def _mag_pnorm_fpts(self):
self._gen_pnorm_fpts()
return self._mag_pnorm_fpts
@lazyprop
def _smats_djacs_mpts(self):
# Metric basis with grid point (q<=p) or pseudo grid points (q>p)
mpts = self.basis.mpts
mbasis = self.basis.mbasis
# Dimensions, number of elements and number of mpts
ndims, neles, nmpts = self.ndims, self.neles, self.nmpts
# Physical locations of the pseudo grid points
x = self.ploc_at_np('mpts')
# Jacobian operator at these points
jacop = np.rollaxis(mbasis.jac_nodal_basis_at(mpts), 2)
jacop = jacop.reshape(-1, nmpts)
# Cast as a matrix multiply and apply to eles
jac = np.dot(jacop, x.reshape(nmpts, -1))
# Reshape (nmpts*ndims, neles*ndims) => (nmpts, ndims, neles, ndims)
jac = jac.reshape(nmpts, ndims, ndims, neles)
# Transpose to get (ndims, ndims, nmpts, neles)
jac = jac.transpose(1, 2, 0, 3)
smats = np.empty((ndims, nmpts, ndims, neles))
if ndims == 2:
a, b, c, d = jac[0, 0], jac[1, 0], jac[0, 1], jac[1, 1]
smats[0, :, 0], smats[0, :, 1] = d, -b
smats[1, :, 0], smats[1, :, 1] = -c, a
djacs = a*d - b*c
else:
dtt = []
for dx in jac:
# Compute x cross x_(chi)
tt = np.cross(x, dx, axisa=1, axisb=0, axisc=1)
# Jacobian of x cross x_(chi) at the pseudo grid points
dt = np.dot(jacop, tt.reshape(nmpts, -1))
dt = dt.reshape(nmpts, ndims, ndims, -1).swapaxes(0, 1)
dtt.append(dt)
# Kopriva's invariant form of smats; JSC 26(3), 301-327, Eq. (37)
smats[0] = 0.5*(dtt[2][1] - dtt[1][2])
smats[1] = 0.5*(dtt[0][2] - dtt[2][0])
smats[2] = 0.5*(dtt[1][0] - dtt[0][1])
# Exploit the fact that det(J) = x0 . (x1 ^ x2)
djacs = np.einsum('ij...,ji...->j...', jac[0], smats[0])
return smats.reshape(ndims, nmpts, -1), djacs
def get_mag_pnorms(self, eidx, fidx):
fpts_idx = self.basis.facefpts[fidx]
return self._mag_pnorm_fpts[fpts_idx,eidx]
def get_mag_pnorms_for_inter(self, eidx, fidx):
fpts_idx = self._srtd_face_fpts[fidx][eidx]
return self._mag_pnorm_fpts[fpts_idx,eidx]
def get_norm_pnorms_for_inter(self, eidx, fidx):
fpts_idx = self._srtd_face_fpts[fidx][eidx]
return self._norm_pnorm_fpts[fpts_idx,eidx]
def get_norm_pnorms(self, eidx, fidx):
fpts_idx = self.basis.facefpts[fidx]
return self._norm_pnorm_fpts[fpts_idx,eidx]
def get_scal_fpts_for_inter(self, eidx, fidx):
nfp = self.nfacefpts[fidx]
rmap = self._srtd_face_fpts[fidx][eidx]
cmap = (eidx,)*nfp
return (self._scal_fpts.mid,)*nfp, rmap, cmap
def get_vect_fpts_for_inter(self, eidx, fidx):
nfp = self.nfacefpts[fidx]
rmap = self._srtd_face_fpts[fidx][eidx]
cmap = (eidx,)*nfp
rstri = (self.nfpts,)*nfp
return (self._vect_fpts.mid,)*nfp, rmap, cmap, rstri
def get_ploc_for_inter(self, eidx, fidx):
fpts_idx = self._srtd_face_fpts[fidx][eidx]
return self.plocfpts[fpts_idx,eidx]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from openstackclient.network.v2 import floating_ip
from openstackclient.tests.compute.v2 import fakes as compute_fakes
from openstackclient.tests.network.v2 import fakes as network_fakes
from openstackclient.tests import utils as tests_utils
# Tests for Neutron network
#
class TestFloatingIPNetwork(network_fakes.TestNetworkV2):
def setUp(self):
super(TestFloatingIPNetwork, self).setUp()
# Get a shortcut to the network client
self.network = self.app.client_manager.network
class TestCreateFloatingIPNetwork(TestFloatingIPNetwork):
# Fake data for option tests.
floating_network = network_fakes.FakeNetwork.create_one_network()
subnet = network_fakes.FakeSubnet.create_one_subnet()
port = network_fakes.FakePort.create_one_port()
# The floating ip to be deleted.
floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip(
attrs={
'floating_network_id': floating_network.id,
'port_id': port.id,
}
)
columns = (
'dns_domain',
'dns_name',
'fixed_ip_address',
'floating_ip_address',
'floating_network_id',
'id',
'port_id',
'project_id',
'router_id',
'status',
)
data = (
floating_ip.dns_domain,
floating_ip.dns_name,
floating_ip.fixed_ip_address,
floating_ip.floating_ip_address,
floating_ip.floating_network_id,
floating_ip.id,
floating_ip.port_id,
floating_ip.project_id,
floating_ip.router_id,
floating_ip.status,
)
def setUp(self):
super(TestCreateFloatingIPNetwork, self).setUp()
self.network.create_ip = mock.Mock(return_value=self.floating_ip)
self.network.find_network = mock.Mock(
return_value=self.floating_network)
self.network.find_subnet = mock.Mock(return_value=self.subnet)
self.network.find_port = mock.Mock(return_value=self.port)
# Get the command object to test
self.cmd = floating_ip.CreateFloatingIP(self.app, self.namespace)
def test_create_no_options(self):
arglist = []
verifylist = []
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_create_default_options(self):
arglist = [
self.floating_ip.floating_network_id,
]
verifylist = [
('network', self.floating_ip.floating_network_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_ip.assert_called_once_with(**{
'floating_network_id': self.floating_ip.floating_network_id,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_create_all_options(self):
arglist = [
'--subnet', self.subnet.id,
'--port', self.floating_ip.port_id,
'--floating-ip-address', self.floating_ip.floating_ip_address,
'--fixed-ip-address', self.floating_ip.fixed_ip_address,
self.floating_ip.floating_network_id,
]
verifylist = [
('subnet', self.subnet.id),
('port', self.floating_ip.port_id),
('floating_ip_address', self.floating_ip.floating_ip_address),
('fixed_ip_address', self.floating_ip.fixed_ip_address),
('network', self.floating_ip.floating_network_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_ip.assert_called_once_with(**{
'subnet_id': self.subnet.id,
'port_id': self.floating_ip.port_id,
'floating_ip_address': self.floating_ip.floating_ip_address,
'fixed_ip_address': self.floating_ip.fixed_ip_address,
'floating_network_id': self.floating_ip.floating_network_id,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestDeleteFloatingIPNetwork(TestFloatingIPNetwork):
# The floating ip to be deleted.
floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
def setUp(self):
super(TestDeleteFloatingIPNetwork, self).setUp()
self.network.delete_ip = mock.Mock(return_value=None)
self.network.find_ip = mock.Mock(return_value=self.floating_ip)
# Get the command object to test
self.cmd = floating_ip.DeleteFloatingIP(self.app, self.namespace)
def test_floating_ip_delete(self):
arglist = [
self.floating_ip.id,
]
verifylist = [
('floating_ip', self.floating_ip.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.network.find_ip.assert_called_once_with(self.floating_ip.id)
self.network.delete_ip.assert_called_once_with(self.floating_ip)
self.assertIsNone(result)
class TestListFloatingIPNetwork(TestFloatingIPNetwork):
# The floating ips to list up
floating_ips = network_fakes.FakeFloatingIP.create_floating_ips(count=3)
columns = (
'ID',
'Floating IP Address',
'Fixed IP Address',
'Port',
)
data = []
for ip in floating_ips:
data.append((
ip.id,
ip.floating_ip_address,
ip.fixed_ip_address,
ip.port_id,
))
def setUp(self):
super(TestListFloatingIPNetwork, self).setUp()
self.network.ips = mock.Mock(return_value=self.floating_ips)
# Get the command object to test
self.cmd = floating_ip.ListFloatingIP(self.app, self.namespace)
def test_floating_ip_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.ips.assert_called_once_with(**{})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
class TestShowFloatingIPNetwork(TestFloatingIPNetwork):
# The floating ip to display.
floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
columns = (
'dns_domain',
'dns_name',
'fixed_ip_address',
'floating_ip_address',
'floating_network_id',
'id',
'port_id',
'project_id',
'router_id',
'status',
)
data = (
floating_ip.dns_domain,
floating_ip.dns_name,
floating_ip.fixed_ip_address,
floating_ip.floating_ip_address,
floating_ip.floating_network_id,
floating_ip.id,
floating_ip.port_id,
floating_ip.tenant_id,
floating_ip.router_id,
floating_ip.status,
)
def setUp(self):
super(TestShowFloatingIPNetwork, self).setUp()
self.network.find_ip = mock.Mock(return_value=self.floating_ip)
# Get the command object to test
self.cmd = floating_ip.ShowFloatingIP(self.app, self.namespace)
def test_floating_ip_show(self):
arglist = [
self.floating_ip.id,
]
verifylist = [
('floating_ip', self.floating_ip.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.find_ip.assert_called_once_with(
self.floating_ip.id,
ignore_missing=False
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
# Tests for Nova network
#
class TestFloatingIPCompute(compute_fakes.TestComputev2):
def setUp(self):
super(TestFloatingIPCompute, self).setUp()
# Get a shortcut to the compute client
self.compute = self.app.client_manager.compute
class TestCreateFloatingIPCompute(TestFloatingIPCompute):
# The floating ip to be deleted.
floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
columns = (
'fixed_ip',
'id',
'instance_id',
'ip',
'pool',
)
data = (
floating_ip.fixed_ip,
floating_ip.id,
floating_ip.instance_id,
floating_ip.ip,
floating_ip.pool,
)
def setUp(self):
super(TestCreateFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
self.compute.floating_ips.create.return_value = self.floating_ip
# Get the command object to test
self.cmd = floating_ip.CreateFloatingIP(self.app, None)
def test_create_no_options(self):
arglist = []
verifylist = []
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_create_default_options(self):
arglist = [
self.floating_ip.pool,
]
verifylist = [
('network', self.floating_ip.pool),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.floating_ips.create.assert_called_once_with(
self.floating_ip.pool)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestDeleteFloatingIPCompute(TestFloatingIPCompute):
# The floating ip to be deleted.
floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
def setUp(self):
super(TestDeleteFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
self.compute.floating_ips.delete.return_value = None
# Return value of utils.find_resource()
self.compute.floating_ips.get.return_value = self.floating_ip
# Get the command object to test
self.cmd = floating_ip.DeleteFloatingIP(self.app, None)
def test_floating_ip_delete(self):
arglist = [
self.floating_ip.id,
]
verifylist = [
('floating_ip', self.floating_ip.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.compute.floating_ips.delete.assert_called_once_with(
self.floating_ip.id
)
self.assertIsNone(result)
class TestListFloatingIPCompute(TestFloatingIPCompute):
# The floating ips to be list up
floating_ips = compute_fakes.FakeFloatingIP.create_floating_ips(count=3)
columns = (
'ID',
'Floating IP Address',
'Fixed IP Address',
'Server',
'Pool',
)
data = []
for ip in floating_ips:
data.append((
ip.id,
ip.ip,
ip.fixed_ip,
ip.instance_id,
ip.pool,
))
def setUp(self):
super(TestListFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
self.compute.floating_ips.list.return_value = self.floating_ips
# Get the command object to test
self.cmd = floating_ip.ListFloatingIP(self.app, None)
def test_floating_ip_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.floating_ips.list.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
class TestShowFloatingIPCompute(TestFloatingIPCompute):
# The floating ip to display.
floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
columns = (
'fixed_ip',
'id',
'instance_id',
'ip',
'pool',
)
data = (
floating_ip.fixed_ip,
floating_ip.id,
floating_ip.instance_id,
floating_ip.ip,
floating_ip.pool,
)
def setUp(self):
super(TestShowFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Return value of utils.find_resource()
self.compute.floating_ips.get.return_value = self.floating_ip
# Get the command object to test
self.cmd = floating_ip.ShowFloatingIP(self.app, None)
def test_floating_ip_show(self):
arglist = [
self.floating_ip.id,
]
verifylist = [
('floating_ip', self.floating_ip.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Radon contextual effects model."""
import functools
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
from inference_gym.internal import data
from inference_gym.targets import bayesian_model
from inference_gym.targets import model
from inference_gym.targets.ground_truth import radon_contextual_effects_indiana
from inference_gym.targets.ground_truth import radon_contextual_effects_indiana_halfnormal
from inference_gym.targets.ground_truth import radon_contextual_effects_minnesota
from inference_gym.targets.ground_truth import radon_contextual_effects_minnesota_halfnormal
tfb = tfp.bijectors
tfd = tfp.distributions
__all__ = [
'RadonContextualEffects',
'RadonContextualEffectsIndiana',
'RadonContextualEffectsHalfNormalIndiana',
'RadonContextualEffectsMinnesota',
'RadonContextualEffectsHalfNormalMinnesota',
]
def affine(x, kernel_diag, bias=None):
"""`kernel_diag * x + bias` with broadcasting."""
if bias is None:
bias = tf.zeros([], dtype=x.dtype)
return x * kernel_diag + bias
def make_radon_prior(num_counties, dtype, prior_scale):
"""Generative process for the radon model with contextual effects."""
if prior_scale == 'uniform':
county_effect_scale = tfd.Uniform(low=tf.zeros([], dtype=dtype), high=100.)
log_radon_scale = tfd.Uniform(low=tf.zeros([], dtype=dtype), high=100.)
elif prior_scale == 'halfnormal':
county_effect_scale = tfd.HalfNormal(scale=tf.ones([], dtype=dtype))
log_radon_scale = tfd.HalfNormal(scale=tf.ones([], dtype=dtype))
else:
raise ValueError(prior_scale, ' is not a valid value for `prior_scale`')
return tfd.JointDistributionNamed(
dict(
county_effect_mean=tfd.Normal(
loc=tf.zeros([], dtype=dtype), scale=1.),
county_effect_scale=county_effect_scale,
county_effect=(
lambda county_effect_scale, county_effect_mean: # pylint: disable=g-long-lambda
tfd.Sample(
tfd.Normal(loc=county_effect_mean, scale=county_effect_scale),
sample_shape=[num_counties])),
weight=tfd.Sample(
tfd.Normal(loc=tf.zeros([], dtype=dtype), scale=1.),
sample_shape=[3]),
log_radon_scale=log_radon_scale,
))
def make_radon_observation_dist(params, log_uranium, floor, county,
floor_by_county):
"""Likelihood of observed data under the contextual effects radon model."""
floor = tf.cast(floor, dtype=log_uranium.dtype)
return tfd.Normal(
loc=affine(
log_uranium,
params['weight'][..., :1],
affine(floor, params['weight'][..., 1:2])
+ affine(floor_by_county, params['weight'][..., 2:])
+ tf.gather(params['county_effect'], county, axis=-1)),
scale=params['log_radon_scale'][..., tf.newaxis])
def radon_log_likelihood_fn(
params, log_uranium, floor, county, floor_by_county, log_radon,
reduce_sum=True):
log_likelihood = make_radon_observation_dist(
params, log_uranium, floor, county, floor_by_county).log_prob(log_radon)
if reduce_sum:
return tf.reduce_sum(log_likelihood, [-1])
return log_likelihood
class RadonContextualEffects(bayesian_model.BayesianModel):
"""Hierarchical radon model with contextual effects.
Radon is a radioactive gas that enters homes through contact points with the
ground. It is a carcinogen that is the primary cause of lung cancer in
non-smokers. Radon levels vary greatly from household to household.
The EPA did a study of radon levels in 80,000 houses. Two important predictors
are: 1. Measurement in the basement or the first floor (radon higher in
basements) 2. County uranium level (positive correlation with radon levels)
We will build a hierarchical model of radon measurements in houses, in which
the hierarchy is households within each county. We will incorporate a
contextual effect corresponding to the mean floor at which the measurement
was taken, by county.
```none
county_effect_mean ~ Normal(loc=0, scale=1)
county_effect_scale ~ Uniform(low=0, high=100)
for i in range(num_counties):
county_effect[i] ~ Normal(loc=county_effect_mean,
scale=county_effect_scale)
for j in range(3):
weight[j] ~ Normal(loc=0, scale=1)
log_radon_scale ~ Uniform(low=0, high=100)
for k in range(num_houses):
log_radon[k] ~ Normal(
loc=log_uranium * weight[1] # effect of soil uranium
+ floor * weight[2] # effect of floor
+ floor_by_county * weight[3] # effect of mean floor by county
+ county_effect[county[k]], # effect of county
scale=log_radon_scale)
```
This model is based on an example from [1] and is the same as the Stan model
at
<https://mc-stan.org/users/documentation/case-studies/radon.html
#Correlations-among-levels>.
Initializing this model with a `halfnormal` value for `prior_scale` will
construct a modified version of this model in which the scales for the prior
are constructed with a `HalfNormal` distribution instead of a `Uniform`
distribution.
```none
county_effect_scale ~ HalfNormal(scale=1.)
log_radon_scale ~ HalfNormal(scale=1.)
```
#### References
[1] Gelman, A., & Hill, J. (2007). Data Analysis Using Regression and
Multilevel/Hierarchical Models (1st ed.). Cambridge University Press.
[2] Stan Development Team. 2018. Stan Modeling Language Users Guide and
Reference Manual, Version 2.18.0. http://mc-stan.org
"""
def __init__(self,
num_counties,
train_log_uranium,
train_floor,
train_county,
train_floor_by_county,
train_log_radon,
test_log_uranium=None,
test_floor=None,
test_county=None,
test_floor_by_county=None,
test_log_radon=None,
prior_scale='uniform',
name='radon_contextual_effects',
pretty_name='Radon Contextual Effects'):
"""Construct the hierarchical radon model with contextual effects.
Args:
num_counties: `int`, number of counties represented in the data.
train_log_uranium: Floating-point `Tensor` with shape
`[num_train_points]`. Soil uranium measurements.
train_floor: Integer `Tensor` with shape `[num_train_points]`. Floor of
the house on which the measurement was taken.
train_county: Integer `Tensor` with values in `range(0, num_counties)` of
shape `[num_train_points]`. County in which the measurement was taken.
train_floor_by_county: Floating-point `Tensor` with shape
`[num_train_points]`. Average floor on which the measurement was taken
for the county in which each house is located (the `Tensor` will have
`num_counties` unique values). This represents the contextual effect.
train_log_radon: Floating-point `Tensor` with shape `[num_train_points]`.
Radon measurement for each house (the dependent variable in the model).
test_log_uranium: Floating-point `Tensor` with shape `[num_test_points]`.
Soil uranium measurements for the test set. Can be `None`, in which case
test-related sample transformations are not computed.
test_floor: Integer `Tensor` with shape `[num_test_points]`. Floor of the
house on which the measurement was taken. Can be `None`, in which case
test-related sample transformations are not computed.
test_county: Integer `Tensor` with values in `range(0, num_counties)` of
shape `[num_test_points]`. County in which the measurement was taken.
Can be `None`, in which case test-related sample transformations are not
computed.
test_floor_by_county: Floating-point `Tensor` with shape
`[num_test_points]`. Average floor on which the measurement was taken
(calculated from the training set) for the county in which each house is
located (the `Tensor` will have `num_counties` unique values). This
represents the contextual effect. Can be `None`, in which case
test-related sample transformations are not computed.
test_log_radon: Floating-point `Tensor` with shape `[num_test_points]`.
Radon measurement for each house (the dependent variable in the model).
Can be `None`, in which case test-related sample transformations are not
computed.
prior_scale: String value. The default `uniform` value constructs the
prior distribution's `county_effect_scale` and `log_radon_scale` with a
`Uniform` distribution as in the original Stan model. A `halfnormal`
value constructs the prior distribution's `county_effect_scale` and
`log_radon_scale` with a `HalfNormal` distribution.
name: Python `str` name prefixed to Ops created by this class.
pretty_name: A Python `str`. The pretty name of this model.
Raises:
ValueError if any but not all of `test_*` inputs are None.
"""
with tf.name_scope(name):
test_data = (test_log_uranium, test_floor, test_county,
test_floor_by_county, test_log_radon)
test_data_present = (d is not None for d in test_data)
self._have_test = all(test_data_present)
if not self._have_test and any(test_data_present):
raise ValueError(
'Test set values must all be specified or all `None`. Got:'
'`test_log_uranium`={}, `test_floor`={}, `test_county`={},'
'`test_floor_by_county`={}, `test_log_radon`={}`'.format(
*test_data))
dtype = train_log_radon.dtype
self._prior_dist = make_radon_prior(
num_counties, dtype=dtype, prior_scale=prior_scale)
self._train_log_likelihood_fn = functools.partial(
radon_log_likelihood_fn,
log_uranium=train_log_uranium,
floor=train_floor,
county=train_county,
floor_by_county=train_floor_by_county,
log_radon=train_log_radon)
sample_transformations = {
'identity':
model.Model.SampleTransformation(
fn=lambda params: params,
pretty_name='Identity',
dtype=self._prior_dist.dtype,
)
}
if self._have_test:
test_log_likelihood_fn = functools.partial(
radon_log_likelihood_fn,
log_uranium=test_log_uranium,
floor=test_floor,
county=test_county,
floor_by_county=test_floor_by_county,
log_radon=test_log_radon)
sample_transformations['test_nll'] = (
model.Model.SampleTransformation(
fn=test_log_likelihood_fn,
pretty_name='Test NLL',
))
sample_transformations['per_example_test_nll'] = (
model.Model.SampleTransformation(
fn=functools.partial(test_log_likelihood_fn, reduce_sum=False),
pretty_name='Per-example Test NLL',
))
self._train_log_uranium = train_log_uranium
self._train_floor = train_floor
self._train_county = train_county
self._train_floor_by_county = train_floor_by_county
self._test_log_uranium = test_log_uranium
self._test_floor = test_floor
self._test_county = test_county
self._test_floor_by_county = test_floor_by_county
self._num_counties = num_counties
self._prior_scale = prior_scale
super(RadonContextualEffects, self).__init__(
default_event_space_bijector={
'county_effect_mean':
tfb.Identity(),
'county_effect_scale':
tfb.Sigmoid(low=tf.zeros([], dtype=dtype), high=100.)
if self._prior_scale == 'uniform' else tfb.Softplus(),
'county_effect':
tfb.Identity(),
'weight':
tfb.Identity(),
'log_radon_scale':
tfb.Sigmoid(low=tf.zeros([], dtype=dtype), high=100.)
if self._prior_scale == 'uniform' else tfb.Softplus()
},
event_shape=self._prior_dist.event_shape,
dtype=self._prior_dist.dtype,
name=name,
pretty_name=pretty_name,
sample_transformations=sample_transformations,
)
def _prior_distribution(self):
return self._prior_dist
def _log_likelihood(self, value):
return self._train_log_likelihood_fn(value)
def _sample_dataset(self, seed):
dataset = dict(
train_log_uranium=self._train_log_uranium,
train_floor=self._train_floor,
train_county=self._train_county,
train_floor_by_county=self._train_floor_by_county,
num_counties=self._num_counties,
test_log_uranium=self._test_log_uranium,
test_floor=self._test_floor,
test_county=self._test_county,
test_floor_by_county=self._test_floor_by_county,
)
prior_samples = self._prior_distribution().sample(seed=seed)
dist = make_radon_observation_dist(
prior_samples, self._train_log_uranium, self._train_floor,
self._train_county, self._train_floor_by_county)
dataset['train_log_radon'] = dist.sample(seed=seed)
if self._have_test:
test_dist = make_radon_observation_dist(
prior_samples, self._test_log_uranium, self._test_floor,
self._test_county, self._test_floor_by_county)
dataset['test_log_radon'] = test_dist.sample(seed=seed)
return dataset
class RadonContextualEffectsIndiana(RadonContextualEffects):
"""Bayesian hierarchical model to predict radon measurements in houses.
This model uses the Radon data set that accompanies the example in [1],
filtered to include only houses in Indiana. Unlike the classical Minnesotta
dataset, this dataset is somewhat larger and produces a posterior that does
not require 64 bit precision to sample from.
#### References
[1] Gelman, A., & Hill, J. (2007). Data Analysis Using Regression and
Multilevel/Hierarchical Models (1st ed.). Cambridge University Press.
http://www.stat.columbia.edu/~gelman/arm/examples/radon/
"""
GROUND_TRUTH_MODULE = radon_contextual_effects_indiana
def __init__(self, dtype=tf.float32):
dataset = data.radon_indiana()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
elif dtype_util.is_floating(dataset[key].dtype):
dataset[key] = tf.cast(dataset[key], dtype)
super(RadonContextualEffectsIndiana, self).__init__(
name='radon_contextual_effects_indiana',
pretty_name='Radon Contextual Effects Indiana',
**dataset)
class RadonContextualEffectsHalfNormalIndiana(RadonContextualEffects):
"""Bayesian hierarchical model to predict radon measurements in houses.
This model uses the Radon data set that accompanies the example in [1],
filtered to include only houses in Indiana. It uses the form of the model with
a `HalfNormal` prior on the scale parameters. Unlike the classical Minnesotta
dataset, this dataset is somewhat larger and produces a posterior that does
not require 64 bit precision to sample from.
#### References
[1] Gelman, A., & Hill, J. (2007). Data Analysis Using Regression and
Multilevel/Hierarchical Models (1st ed.). Cambridge University Press.
http://www.stat.columbia.edu/~gelman/arm/examples/radon/
"""
GROUND_TRUTH_MODULE = radon_contextual_effects_indiana_halfnormal
def __init__(self, dtype=tf.float32):
dataset = data.radon_indiana()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
elif dtype_util.is_floating(dataset[key].dtype):
dataset[key] = tf.cast(dataset[key], dtype)
super(RadonContextualEffectsHalfNormalIndiana, self).__init__(
name='radon_contextual_effects_halfnormal_indiana',
pretty_name='Radon Contextual Effects HalfNormal Indiana',
prior_scale='halfnormal',
**dataset)
class RadonContextualEffectsMinnesota(RadonContextualEffects):
"""Bayesian hierarchical model to predict radon measurements in houses.
This model uses the Radon data set that accompanies the example in [1],
filtered to include only houses in Minnesota.
#### References
[1] Gelman, A., & Hill, J. (2007). Data Analysis Using Regression and
Multilevel/Hierarchical Models (1st ed.). Cambridge University Press.
http://www.stat.columbia.edu/~gelman/arm/examples/radon/
"""
GROUND_TRUTH_MODULE = radon_contextual_effects_minnesota
def __init__(self, dtype=tf.float64):
dataset = data.radon_minnesota()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
elif dtype_util.is_floating(dataset[key].dtype):
dataset[key] = tf.cast(dataset[key], dtype)
super(RadonContextualEffectsMinnesota, self).__init__(
name='radon_contextual_effects_minnesota',
pretty_name='Radon Contextual Effects Minnesota',
**dataset)
class RadonContextualEffectsHalfNormalMinnesota(RadonContextualEffects):
"""Bayesian hierarchical model to predict radon measurements in houses.
This model uses the Radon data set that accompanies the example in [1],
filtered to include only houses in Minnesota. It uses the form of the model
with a `HalfNormal` prior on the scale parameters.
#### References
[1] Gelman, A., & Hill, J. (2007). Data Analysis Using Regression and
Multilevel/Hierarchical Models (1st ed.). Cambridge University Press.
http://www.stat.columbia.edu/~gelman/arm/examples/radon/
"""
GROUND_TRUTH_MODULE = radon_contextual_effects_minnesota_halfnormal
def __init__(self, dtype=tf.float64):
dataset = data.radon_minnesota()
for key in list(dataset.keys()):
if key.startswith('test_'):
del dataset[key]
elif dtype_util.is_floating(dataset[key].dtype):
dataset[key] = tf.cast(dataset[key], dtype)
super(RadonContextualEffectsHalfNormalMinnesota, self).__init__(
name='radon_contextual_effects_halfnormal_minnesota',
pretty_name='Radon Contextual Effects HalfNormal Minnesota',
prior_scale='halfnormal',
**dataset)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import copy
from mxnet import gluon
from mxnet.gluon import contrib
from mxnet.gluon import nn
from mxnet.gluon.contrib.nn import (
Concurrent, HybridConcurrent, Identity, SparseEmbedding, PixelShuffle1D,
PixelShuffle2D, PixelShuffle3D)
from mxnet.test_utils import almost_equal, default_context, assert_almost_equal, assert_allclose
from common import setup_module, with_seed, teardown
import numpy as np
def check_rnn_cell(cell, prefix, in_shape=(10, 50), out_shape=(10, 100), begin_state=None):
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs, begin_state=begin_state)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == [prefix+'h2h_bias', prefix+'h2h_weight',
prefix+'i2h_bias', prefix+'i2h_weight']
assert outputs.list_outputs() == [prefix+'t0_out_output', prefix+'t1_out_output', prefix+'t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=in_shape,
rnn_t1_data=in_shape,
rnn_t2_data=in_shape)
assert outs == [out_shape]*3
def check_rnn_forward(layer, inputs):
inputs.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
layer.unroll(3, inputs, merge_outputs=True)[0].backward()
mx.autograd.backward(layer.unroll(3, inputs, merge_outputs=False)[0])
mx.nd.waitall()
@with_seed()
def test_rnn_cells():
check_rnn_forward(contrib.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)),
mx.nd.ones((8, 3, 5, 7)))
check_rnn_forward(contrib.rnn.Conv1DRNNCell((5, 7), 10, (3,), (3,)),
mx.nd.ones((8, 3, 5, 7)))
check_rnn_forward(contrib.rnn.Conv1DGRUCell((5, 7), 10, (3,), (3,)),
mx.nd.ones((8, 3, 5, 7)))
net = mx.gluon.rnn.SequentialRNNCell()
net.add(contrib.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)))
net.add(contrib.rnn.Conv1DRNNCell((10, 5), 11, (3,), (3,)))
net.add(contrib.rnn.Conv1DGRUCell((11, 3), 12, (3,), (3,)))
check_rnn_forward(net, mx.nd.ones((8, 3, 5, 7)))
@with_seed()
def test_convrnn():
cell = contrib.rnn.Conv1DRNNCell((10, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = contrib.rnn.Conv2DRNNCell((10, 20, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = contrib.rnn.Conv3DRNNCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
@with_seed()
def test_convlstm():
cell = contrib.rnn.Conv1DLSTMCell((10, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = contrib.rnn.Conv2DLSTMCell((10, 20, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = contrib.rnn.Conv3DLSTMCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
@with_seed()
def test_convgru():
cell = contrib.rnn.Conv1DGRUCell((10, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = contrib.rnn.Conv2DGRUCell((10, 20, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = contrib.rnn.Conv3DGRUCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
@with_seed()
def test_conv_fill_shape():
cell = contrib.rnn.Conv1DLSTMCell((0, 7), 10, (3,), (3,))
cell.hybridize()
check_rnn_forward(cell, mx.nd.ones((8, 3, 5, 7)))
assert cell.i2h_weight.shape[1] == 5, cell.i2h_weight.shape[1]
@with_seed()
def test_lstmp():
nhid = 100
nproj = 64
cell = contrib.rnn.LSTMPCell(nhid, nproj, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
expected_params = ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_h2r_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
expected_outputs = ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
assert sorted(cell.collect_params().keys()) == expected_params
assert outputs.list_outputs() == expected_outputs, outputs.list_outputs()
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, nproj), (10, nproj), (10, nproj)]
@with_seed()
def test_vardrop():
def check_vardrop(drop_inputs, drop_states, drop_outputs):
cell = contrib.rnn.VariationalDropoutCell(mx.gluon.rnn.RNNCell(100, prefix='rnn_'),
drop_outputs=drop_outputs,
drop_states=drop_states,
drop_inputs=drop_inputs)
cell.collect_params().initialize(init='xavier')
input_data = mx.nd.random_uniform(shape=(10, 3, 50), ctx=mx.context.current_context())
with mx.autograd.record():
outputs1, _ = cell.unroll(3, input_data, merge_outputs=True)
mx.nd.waitall()
outputs2, _ = cell.unroll(3, input_data, merge_outputs=True)
assert not almost_equal(outputs1.asnumpy(), outputs2.asnumpy())
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs, merge_outputs=False)
outputs = mx.sym.Group(outputs)
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
cell.reset()
cell.hybridize()
with mx.autograd.record():
outputs3, _ = cell.unroll(3, input_data, merge_outputs=True)
mx.nd.waitall()
outputs4, _ = cell.unroll(3, input_data, merge_outputs=True)
assert not almost_equal(outputs3.asnumpy(), outputs4.asnumpy())
assert not almost_equal(outputs1.asnumpy(), outputs3.asnumpy())
check_vardrop(0.5, 0.5, 0.5)
check_vardrop(0.5, 0, 0.5)
def test_concurrent():
model = HybridConcurrent(axis=1)
model.add(nn.Dense(128, activation='tanh', in_units=10))
model.add(nn.Dense(64, activation='tanh', in_units=10))
model.add(nn.Dense(32, in_units=10))
model2 = Concurrent(axis=1)
model2.add(nn.Dense(128, activation='tanh', in_units=10))
model2.add(nn.Dense(64, activation='tanh', in_units=10))
model2.add(nn.Dense(32, in_units=10))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.initialize(mx.init.Xavier(magnitude=2.24))
model2.initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 10)))
x2 = model2(mx.nd.zeros((32, 10)))
assert x.shape == (32, 224)
assert x2.shape == (32, 224)
x.wait_to_read()
x2.wait_to_read()
@with_seed()
def test_identity():
model = Identity()
x = mx.nd.random.uniform(shape=(128, 33, 64))
assert_almost_equal(model(x), x)
@with_seed()
def test_sparse_embedding():
layer = SparseEmbedding(10, 100)
layer.initialize()
trainer = mx.gluon.Trainer(layer.collect_params(), 'sgd')
x = mx.nd.array([3,4,2,0,1])
with mx.autograd.record():
y = layer(x)
y.backward()
assert (layer.weight.grad().asnumpy()[:5] == 1).all()
assert (layer.weight.grad().asnumpy()[5:] == 0).all()
def test_pixelshuffle1d():
nchan = 2
up_x = 2
nx = 3
shape_before = (1, nchan * up_x, nx)
shape_after = (1, nchan, nx * up_x)
layer = PixelShuffle1D(up_x)
x = mx.nd.arange(np.prod(shape_before)).reshape(shape_before)
y = layer(x)
assert y.shape == shape_after
assert_allclose(
y,
[[[0, 3, 1, 4, 2, 5],
[6, 9, 7, 10, 8, 11]]]
)
def test_pixelshuffle2d():
nchan = 2
up_x = 2
up_y = 3
nx = 2
ny = 3
shape_before = (1, nchan * up_x * up_y, nx, ny)
shape_after = (1, nchan, nx * up_x, ny * up_y)
layer = PixelShuffle2D((up_x, up_y))
x = mx.nd.arange(np.prod(shape_before)).reshape(shape_before)
y = layer(x)
assert y.shape == shape_after
# - Channels are reshaped to form 2x3 blocks
# - Within each block, the increment is `nx * ny` when increasing the column
# index by 1
# - Increasing the block index adds an offset of 1
# - Increasing the channel index adds an offset of `nx * up_x * ny * up_y`
assert_allclose(
y,
[[[[ 0, 6, 12, 1, 7, 13, 2, 8, 14],
[18, 24, 30, 19, 25, 31, 20, 26, 32],
[ 3, 9, 15, 4, 10, 16, 5, 11, 17],
[21, 27, 33, 22, 28, 34, 23, 29, 35]],
[[36, 42, 48, 37, 43, 49, 38, 44, 50],
[54, 60, 66, 55, 61, 67, 56, 62, 68],
[39, 45, 51, 40, 46, 52, 41, 47, 53],
[57, 63, 69, 58, 64, 70, 59, 65, 71]]]]
)
def test_pixelshuffle3d():
nchan = 1
up_x = 2
up_y = 1
up_z = 2
nx = 2
ny = 3
nz = 4
shape_before = (1, nchan * up_x * up_y * up_z, nx, ny, nz)
shape_after = (1, nchan, nx * up_x, ny * up_y, nz * up_z)
layer = PixelShuffle3D((up_x, up_y, up_z))
x = mx.nd.arange(np.prod(shape_before)).reshape(shape_before)
y = layer(x)
assert y.shape == shape_after
# - Channels are reshaped to form 2x1x2 blocks
# - Within each block, the increment is `nx * ny * nz` when increasing the
# column index by 1, e.g. the block [[[ 0, 24]], [[48, 72]]]
# - Increasing the block index adds an offset of 1
assert_allclose(
y,
[[[[[ 0, 24, 1, 25, 2, 26, 3, 27],
[ 4, 28, 5, 29, 6, 30, 7, 31],
[ 8, 32, 9, 33, 10, 34, 11, 35]],
[[48, 72, 49, 73, 50, 74, 51, 75],
[52, 76, 53, 77, 54, 78, 55, 79],
[56, 80, 57, 81, 58, 82, 59, 83]],
[[12, 36, 13, 37, 14, 38, 15, 39],
[16, 40, 17, 41, 18, 42, 19, 43],
[20, 44, 21, 45, 22, 46, 23, 47]],
[[60, 84, 61, 85, 62, 86, 63, 87],
[64, 88, 65, 89, 66, 90, 67, 91],
[68, 92, 69, 93, 70, 94, 71, 95]]]]]
)
def test_datasets():
wikitext2_train = contrib.data.text.WikiText2(root='data/wikitext-2', segment='train')
wikitext2_val = contrib.data.text.WikiText2(root='data/wikitext-2', segment='validation',
vocab=wikitext2_train.vocabulary)
wikitext2_test = contrib.data.text.WikiText2(root='data/wikitext-2', segment='test')
assert len(wikitext2_train) == 59305, len(wikitext2_train)
assert len(wikitext2_train.vocabulary) == 33278, len(wikitext2_train.vocabulary)
assert len(wikitext2_train.frequencies) == 33277, len(wikitext2_train.frequencies)
assert len(wikitext2_val) == 6181, len(wikitext2_val)
assert len(wikitext2_val.vocabulary) == 33278, len(wikitext2_val.vocabulary)
assert len(wikitext2_val.frequencies) == 13776, len(wikitext2_val.frequencies)
assert len(wikitext2_test) == 6974, len(wikitext2_test)
assert len(wikitext2_test.vocabulary) == 14143, len(wikitext2_test.vocabulary)
assert len(wikitext2_test.frequencies) == 14142, len(wikitext2_test.frequencies)
assert wikitext2_test.frequencies['English'] == 32
def test_sampler():
interval_sampler = contrib.data.IntervalSampler(10, 3)
assert sorted(list(interval_sampler)) == list(range(10))
interval_sampler = contrib.data.IntervalSampler(10, 3, rollover=False)
assert list(interval_sampler) == [0, 3, 6, 9]
class TestRNNLayer(gluon.HybridBlock):
def __init__(self, cell_type, hidden_size, layout, prefix=None, params=None):
super(TestRNNLayer, self).__init__(prefix=prefix, params=params)
self.cell = cell_type(hidden_size, prefix='rnn_')
self.layout = layout
def hybrid_forward(self, F, inputs, states, valid_length):
if isinstance(valid_length, list) and len(valid_length) == 0:
valid_length = None
return contrib.rnn.rnn_cell.dynamic_unroll(self.cell, inputs, states,
valid_length=valid_length,
layout=self.layout)
def check_unroll(cell_type, num_states, layout):
batch_size = 20
input_size = 50
hidden_size = 30
seq_len = 10
if layout == 'TNC':
rnn_data = mx.nd.normal(loc=0, scale=1, shape=(seq_len, batch_size, input_size))
elif layout == 'NTC':
rnn_data = mx.nd.normal(loc=0, scale=1, shape=(batch_size, seq_len, input_size))
else:
print("Wrong layout")
return
valid_length = mx.nd.round(mx.nd.random.uniform(low=1, high=10, shape=(batch_size)))
state_shape = (batch_size, hidden_size)
states = [mx.nd.normal(loc=0, scale=1, shape=state_shape) for i in range(num_states)]
cell = cell_type(hidden_size, prefix='rnn_')
cell.initialize(ctx=default_context())
if layout == 'TNC':
cell(rnn_data[0], states)
else:
cell(rnn_data[:,0,:], states)
params1 = cell.collect_params()
orig_params1 = copy.deepcopy(params1)
trainer = gluon.Trainer(params1, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res1, states1 = cell.unroll(seq_len, rnn_data, states, valid_length=valid_length,
layout=layout, merge_outputs=True)
res1.backward()
trainer.step(batch_size)
configs = [
lambda layer: None,
lambda layer: layer.hybridize(),
lambda layer: layer.hybridize({'inline_limit': 0}),
lambda layer: layer.hybridize({'static_alloc': True}),
lambda layer: layer.hybridize({'static_alloc': True, 'static_shape': True}) ]
# We can't pass None to a hybrid block, but it accepts an empty list.
# so we use an empty list to represent valid_length if it's None.
if valid_length is None:
valid_length = []
for config in configs:
layer = TestRNNLayer(cell_type, hidden_size, layout)
layer.initialize(ctx=default_context())
config(layer)
res2, states2 = layer(rnn_data, states, valid_length)
params2 = layer.collect_params()
for key, val in orig_params1.items():
params2[key].set_data(copy.deepcopy(val.data()))
trainer = gluon.Trainer(params2, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res2, states2 = layer(rnn_data, states, valid_length)
assert_almost_equal(res1, res2, rtol=0.001, atol=0.0001)
assert len(states1) == len(states2)
for i in range(len(states1)):
assert_almost_equal(states1[i], states2[i], rtol=0.001, atol=0.0001)
res2.backward()
trainer.step(batch_size)
for key, val in params1.items():
weight1 = val.data()
weight2 = params2[key].data()
assert_almost_equal(weight1, weight2, rtol=0.001, atol=0.0001)
@with_seed()
def test_contrib_unroll():
cell_types = [(gluon.rnn.RNNCell, 1), (gluon.rnn.LSTMCell, 2),
(gluon.rnn.GRUCell, 1)]
for cell_type, num_states in cell_types:
check_unroll(cell_type, num_states, 'TNC')
check_unroll(cell_type, num_states, 'NTC')
if __name__ == '__main__':
import nose
nose.runmodule()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.