code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
''' Generate confusion matrix from y_info '''
a = deepcopy(y_info['true'])
true_count = dict((i, a.count(i)) for i in set(a))
a = deepcopy(y_info['pred'])
pred_count = dict((i, a.count(i)) for i in set(a))
sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))
conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)
df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)
total_correct = np.trace(df_conf)
total_pred = df_conf.sum().sum()
fraction_correct = total_correct/float(total_pred)
# calculate ser_correct
correct_list = []
cat_counts = df_conf.sum(axis=1)
all_cols = df_conf.columns.tolist()
for inst_cat in all_cols:
inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat]
correct_list.append(inst_correct)
ser_correct = pd.Series(data=correct_list, index=all_cols)
populations = {}
populations['true'] = true_count
populations['pred'] = pred_count
return df_conf, populations, ser_correct, fraction_correct
|
def confusion_matrix_and_correct_series(self, y_info)
|
Generate confusion matrix from y_info
| 2.583829 | 2.582159 | 1.000647 |
''' load data into nodes and mat, also convert mat to numpy array'''
net.dat['nodes'] = inst_net['nodes']
net.dat['mat'] = inst_net['mat']
data_formats.mat_to_numpy_arr(net)
|
def load_data_to_net(net, inst_net)
|
load data into nodes and mat, also convert mat to numpy array
| 7.712164 | 4.54656 | 1.696264 |
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(net.dat)
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
if 'mat_orig' in exp_dict:
exp_dict['mat_orig'] = exp_dict['mat_orig'].tolist()
elif net_type == 'viz':
exp_dict = net.viz
elif net_type == 'sim_row':
exp_dict = net.sim['row']
elif net_type == 'sim_col':
exp_dict = net.sim['col']
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
|
def export_net_json(net, net_type, indent='no-indent')
|
export json string of dat
| 2.720608 | 2.572315 | 1.05765 |
'''
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
'''
import pandas as pd
if df is None:
df = net.dat_to_df()
return df['mat'].to_csv(filename, sep='\t')
|
def write_matrix_to_tsv(net, filename=None, df=None)
|
This will export the matrix in net.dat or a dataframe (optional df in
arguments) as a tsv file. Row/column categories will be saved as tuples in
tsv, which can be read back into the network object.
| 10.155687 | 2.219037 | 4.576619 |
'''
Run in load_data module (which runs when file is loaded or dataframe is loaded),
check for duplicate row/col names, and add index to names if necesary
'''
if df is None:
df = net.export_df()
# rows
#############
rows = df.index.tolist()
if type(rows[0]) is str:
if len(rows) != len(list(set(rows))):
new_rows = add_index_list(rows)
df.index = new_rows
elif type(rows[0]) is tuple:
row_names = []
for inst_row in rows:
row_names.append(inst_row[0])
if len(row_names) != len(list(set(row_names))):
row_names = add_index_list(row_names)
# add back to tuple
new_rows = []
for inst_index in range(len(rows)):
inst_row = rows[inst_index]
new_row = list(inst_row)
new_row[0] = row_names[inst_index]
new_row = tuple(new_row)
new_rows.append(new_row)
df.index = new_rows
# cols
#############
cols = df.columns.tolist()
if type(cols[0]) is str:
# list column names
if len(cols) != len(list(set(cols))):
new_cols = add_index_list(cols)
df.columns = new_cols
elif type(cols[0]) is tuple:
col_names = []
for inst_col in cols:
col_names.append(inst_col[0])
if len(col_names) != len(list(set(col_names))):
col_names = add_index_list(col_names)
# add back to tuple
new_cols = []
for inst_index in range(len(cols)):
inst_col = cols[inst_index]
new_col = list(inst_col)
new_col[0] = col_names[inst_index]
new_col = tuple(new_col)
new_cols.append(new_col)
df.columns = new_cols
# return dataframe with unique names
return df
|
def main(net, df=None)
|
Run in load_data module (which runs when file is loaded or dataframe is loaded),
check for duplicate row/col names, and add index to names if necesary
| 2.10398 | 1.641267 | 1.281924 |
class NPM(BaseCommand):
description = 'install package.json dependencies using npm'
def run(self):
if skip_npm:
log.info('Skipping npm-installation')
return
node_package = path or HERE
node_modules = pjoin(node_package, 'node_modules')
is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock'))
npm_cmd = npm
if npm is None:
if is_yarn:
npm_cmd = ['yarn']
else:
npm_cmd = ['npm']
if not which(npm_cmd[0]):
log.error("`{0}` unavailable. If you're running this command "
"using sudo, make sure `{0}` is availble to sudo"
.format(npm_cmd[0]))
return
if force or is_stale(node_modules, pjoin(node_package, 'package.json')):
log.info('Installing build dependencies with npm. This may '
'take a while...')
run(npm_cmd + ['install'], cwd=node_package)
if build_dir and source_dir and not force:
should_build = is_stale(build_dir, source_dir)
else:
should_build = True
if should_build:
run(npm_cmd + ['run', build_cmd], cwd=node_package)
return NPM
|
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None)
|
Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
npm: str or list, optional.
The npm executable name, or a tuple of ['node', executable].
| 3.013625 | 2.777075 | 1.08518 |
if parts[0] in ('.', ''):
parts = parts[1:]
return pjoin(*parts).replace(os.sep, '/')
|
def _glob_pjoin(*parts)
|
Join paths for glob processing
| 3.5307 | 3.512542 | 1.005169 |
# Extract the existing data files into a staging object.
file_data = defaultdict(list)
for (path, files) in existing or []:
file_data[path] = files
# Extract the files and assign them to the proper data
# files path.
for (path, dname, pattern) in data_specs or []:
if os.path.isabs(dname):
dname = os.path.relpath(dname, top)
dname = dname.replace(os.sep, '/')
offset = 0 if dname in ('.', '') else len(dname) + 1
files = _get_files(_glob_pjoin(dname, pattern), top=top)
for fname in files:
# Normalize the path.
root = os.path.dirname(fname)
full_path = _glob_pjoin(path, root[offset:])
print(dname, root, full_path, offset)
if full_path.endswith('/'):
full_path = full_path[:-1]
file_data[full_path].append(fname)
# Construct the data files spec.
data_files = []
for (path, files) in file_data.items():
data_files.append((path, files))
return data_files
|
def _get_data_files(data_specs, existing, top=HERE)
|
Expand data file specs into valid data files metadata.
Parameters
----------
data_specs: list of tuples
See [create_cmdclass] for description.
existing: list of tuples
The existing distrubution data_files metadata.
Returns
-------
A valid list of data_files items.
| 3.291406 | 3.351249 | 0.982143 |
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
for i, p in enumerate(file_patterns):
if os.path.isabs(p):
file_patterns[i] = os.path.relpath(p, top)
matchers = [_compile_pattern(p) for p in file_patterns]
files = set()
for root, dirnames, filenames in os.walk(top):
# Don't recurse into node_modules
if 'node_modules' in dirnames:
dirnames.remove('node_modules')
for m in matchers:
for filename in filenames:
fn = os.path.relpath(_glob_pjoin(root, filename), top)
fn = fn.replace(os.sep, '/')
if m(fn):
files.add(fn.replace(os.sep, '/'))
return list(files)
|
def _get_files(file_patterns, top=HERE)
|
Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
| 2.300898 | 2.325896 | 0.989252 |
if file_patterns is None:
file_patterns = ['*']
return _get_files(file_patterns, _glob_pjoin(HERE, root))
|
def _get_package_data(root, file_patterns=None)
|
Expand file patterns to a list of `package_data` paths.
Parameters
-----------
root: str
The relative path to the package root from `HERE`.
file_patterns: list or str, optional
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the root or
absolute paths. If not given, all files will be used.
Note:
Files in `node_modules` are ignored.
| 5.764047 | 6.285498 | 0.917039 |
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
df_copy = df_copy.transpose()
tmp_sum = df_copy.sum(axis=0)
tmp_sum = tmp_sum.abs()
tmp_sum.sort_values(inplace=True, ascending=False)
tmp_sum = tmp_sum[tmp_sum > threshold]
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_rows)
return df
|
def df_filter_row_sum(df, threshold, take_abs=True)
|
filter rows in matrix at some threshold
and remove columns that have a sum below this threshold
| 2.624914 | 2.275199 | 1.153707 |
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > 0]
if take_abs is True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
else:
df['mat'] = df_copy
return df
|
def df_filter_col_sum(df, threshold, take_abs=True)
|
filter columns in matrix at some threshold
and remove rows that have all zero values
| 2.33588 | 2.037461 | 1.146466 |
'''
Filter a network's rows or cols based on num_occur values being above a
threshold (in absolute_value)
'''
from copy import deepcopy
inst_df = deepcopy(df['mat'])
if inst_rc == 'col':
inst_df = inst_df.transpose()
inst_df = inst_df.abs()
ini_rows = inst_df.index.values.tolist()
inst_df[inst_df < threshold] = 0
inst_df[inst_df >= threshold] = 1
tmp_sum = inst_df.sum(axis=1)
tmp_sum = tmp_sum[tmp_sum >= num_occur]
keep_names = tmp_sum.index.values.tolist()
if inst_rc == 'row':
if len(keep_names) < len(ini_rows):
df['mat'] = grab_df_subset(df['mat'], keep_rows=keep_names)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], keep_rows=keep_names)
df['mat_dn'] = grab_df_subset(df['mat_dn'], keep_rows=keep_names)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], keep_rows=keep_names)
elif inst_rc == 'col':
inst_df = inst_df.transpose()
inst_rows = inst_df.index.values.tolist()
inst_cols = keep_names
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
return df
|
def filter_threshold(df, inst_rc, threshold, num_occur=1)
|
Filter a network's rows or cols based on num_occur values being above a
threshold (in absolute_value)
| 2.089944 | 1.786433 | 1.169898 |
flags = CompilerFlags(0)
if not isinstance(bytecode, (_bytecode.Bytecode,
_bytecode.ConcreteBytecode,
_bytecode.ControlFlowGraph)):
msg = ('Expected a Bytecode, ConcreteBytecode or ControlFlowGraph '
'instance not %s')
raise ValueError(msg % bytecode)
instructions = (bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph) else
bytecode)
instr_names = {i.name for i in instructions
if not isinstance(i, (_bytecode.SetLineno,
_bytecode.Label))}
if not (instr_names & {'STORE_NAME', 'LOAD_NAME', 'DELETE_NAME'}):
flags |= CompilerFlags.OPTIMIZED
flags |= bytecode.flags & (CompilerFlags.NEWLOCALS |
CompilerFlags.VARARGS |
CompilerFlags.VARKEYWORDS |
CompilerFlags.NESTED)
if instr_names & {'YIELD_VALUE', 'YIELD_FROM'}:
if not is_async and not bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
flags |= CompilerFlags.GENERATOR
else:
flags |= CompilerFlags.ASYNC_GENERATOR
if not (instr_names & {'LOAD_CLOSURE', 'LOAD_DEREF', 'STORE_DEREF',
'DELETE_DEREF', 'LOAD_CLASSDEREF'}):
flags |= CompilerFlags.NOFREE
if (not (bytecode.flags & CompilerFlags.ITERABLE_COROUTINE or
flags & CompilerFlags.ASYNC_GENERATOR) and
(instr_names & {'GET_AWAITABLE', 'GET_AITER', 'GET_ANEXT',
'BEFORE_ASYNC_WITH', 'SETUP_ASYNC_WITH'} or
bytecode.flags & CompilerFlags.COROUTINE)):
flags |= CompilerFlags.COROUTINE
flags |= bytecode.flags & CompilerFlags.ITERABLE_COROUTINE
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
if ([bool(flags & getattr(CompilerFlags, k))
for k in ('COROUTINE', 'ITERABLE_COROUTINE', 'GENERATOR',
'ASYNC_GENERATOR')].count(True) > 1):
raise ValueError("Code should not have more than one of the "
"following flag set : generator, coroutine, "
"iterable coroutine and async generator, got:"
"%s" % flags)
return flags
|
def infer_flags(bytecode, is_async=False)
|
Infer the proper flags for a bytecode based on the instructions.
| 2.68668 | 2.661506 | 1.009459 |
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, (Instr, ConcreteInstr)):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode
|
def to_bytecode(self)
|
Convert to Bytecode.
| 3.566756 | 3.456543 | 1.031885 |
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize)
|
def to_code(self, stacksize=None)
|
Convert to code.
| 4.109174 | 3.123897 | 1.3154 |
self._set(name, arg, self._lineno)
|
def set(self, name, arg=UNSET)
|
Modify the instruction in-place.
Replace name and arg attributes. Don't modify lineno.
| 15.839335 | 9.907252 | 1.598762 |
global _tf_version_string, _tf_version
_tf_version_string = tf.__version__
_tf_version = _parse_tf_version(_tf_version_string)
if order is not None:
optimize(order)
|
def setup(tf, order=None)
|
Sets up global variables (currently only the tensorflow version) to adapt to peculiarities of
different tensorflow versions. This function should only be called before :py:class:`Model`
creation, not for evaluation. Therefore, the tensorflow module *tf* must be passed:
.. code-block:: python
import tensorflow as tf
import tfdeploy as td
td.setup(tf)
# ...
Also, when *order* is not *None*, it is forwarded to :py:func:`optimize` for convenience.
| 3.445187 | 3.200597 | 1.07642 |
if not isinstance(order, (list, tuple)):
order = [order]
for op in Operation.__subclasses__():
for impl in order:
if impl in op.impls:
op.use_impl(impl)
break
|
def optimize(order)
|
optimize(impl)
Tries to set the implementation type of all registered :py:class:`Operation` classes to *impl*.
This has no effect when an op does not implement that type.
The behavior is equivalent to:
.. code-block:: python
for op in Operation.__subclasses__():
if impl in op.impls:
op.use_impl(impl)
*impl* can also be a list or tuple of valid implementation types representing a preferred order.
| 4.448362 | 2.387743 | 1.862999 |
offset = depth * indent
line = "td tensor: %s" % td_tensor.name
if td_tensor.value is not None:
line += " (%s)" % (",".join(str(i) for i in td_tensor.value.shape),)
print(offset + line)
if td_tensor.op and (max_depth < 0 or max_depth > depth):
print_op(td_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1)
|
def print_tensor(td_tensor, indent="| ", max_depth=-1, depth=0)
|
print_tensor(td_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is
indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where
each tensor and each op count as a level.
| 2.874099 | 3.121644 | 0.920701 |
offset = depth * indent
line = "td op: %s (%s)" % (td_op.name, ",".join(td_op.types))
print(offset + line)
if max_depth < 0 or max_depth > depth:
for td_tensor in td_op.inputs:
print_tensor(td_tensor, indent=indent, max_depth=max_depth, depth=depth+1)
|
def print_op(td_op, indent="| ", max_depth=-1, depth=0)
|
print_op(td_op, indent=" ", max_depth=-1)
Prints the dependency graph of a :py:class:`Operation` *td_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
| 2.85594 | 3.294827 | 0.866795 |
offset = depth * indent
shape = tuple(int(i) for i in tf_tensor.get_shape())
line = "tf tensor: %s (%s)" % (tf_tensor.name, ",".join(str(i) for i in shape))
print(offset + line)
if tf_tensor.op and (max_depth < 0 or max_depth > depth):
print_tf_op(tf_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1)
|
def print_tf_tensor(tf_tensor, indent="| ", max_depth=-1, depth=0)
|
print_tf_tensor(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow tensor *tf_tensor*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
| 2.749057 | 2.971262 | 0.925215 |
offset = depth * indent
line = "tf op: %s (%s)" % (tf_op.name, tf_op.type)
print(offset + line)
if max_depth < 0 or max_depth > depth:
for tf_tensor in tf_op.inputs:
print_tf_tensor(tf_tensor, indent=indent, max_depth=max_depth, depth=depth+1)
|
def print_tf_op(tf_op, indent="| ", max_depth=-1, depth=0)
|
print_tf_op(tf_tensor, indent=" ", max_depth=-1)
Prints the dependency graph of a tensorflow operation *tf_op*, where each new level is indented
by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each
tensor and each op count as a level.
| 2.461185 | 2.864372 | 0.859241 |
return np.linspace(start, stop, num=num, dtype=np.float32),
|
def LinSpace(start, stop, num)
|
Linspace op.
| 3.631397 | 3.945607 | 0.920365 |
return np.arange(start, limit, delta, dtype=np.int32),
|
def Range(start, limit, delta)
|
Range op.
| 4.600969 | 5.235152 | 0.878861 |
if seed:
np.random.seed(seed)
return np.random.normal(size=reduce(mul, shape)).reshape(shape).astype(dtype_map[dtype]),
|
def RandomStandardNormal(shape, dtype, seed)
|
Standard (mu=0, sigma=1) gaussian op.
| 4.552612 | 4.6658 | 0.975741 |
if seed:
np.random.seed(seed)
n = reduce(mul, shape)
r = np.empty(n, dtype=dtype_map[dtype])
idxs = np.ones(n, dtype=np.bool)
while n:
r[idxs] = np.random.normal(size=n)
idxs = np.abs(r) > 2
n = np.sum(idxs)
return r.reshape(shape),
|
def TruncatedNormal(shape, dtype, seed)
|
Standard (mu=0, sigma=1) gaussian op with truncation above 2 sigma.
| 3.19168 | 3.207171 | 0.99517 |
if seed:
np.random.seed(seed)
return np.random.uniform(size=shape).astype(dtype_map[dtype]),
|
def RandomUniform(shape, dtype, seed)
|
Random uniform op.
| 3.503533 | 3.838511 | 0.912732 |
if seed:
np.random.seed(seed)
return np.random.randint(minval, maxval, size=shape),
|
def RandomUniformInt(shape, minval, maxval, seed)
|
Random uniform int op.
| 2.71525 | 3.075612 | 0.882833 |
if seed:
np.random.seed(seed)
r = a.copy()
np.random.shuffle(r)
return r,
|
def RandomShuffle(a, seed)
|
Random uniform op.
| 2.78267 | 3.176006 | 0.876154 |
return np.array([len(a.shape)], dtype=np.int32),
|
def Rank(a)
|
Rank op.
| 8.843553 | 8.067099 | 1.096249 |
if not squeeze_dims:
squeeze_dims = list(range(len(a.shape)))
slices = [(0 if (dim == 1 and i in squeeze_dims) else slice(None)) \
for i, dim in enumerate(a.shape)]
return np.copy(a)[slices],
|
def Squeeze(a, squeeze_dims)
|
Squeeze op, i.e. removes singular axes.
| 3.279469 | 3.429935 | 0.956132 |
shape = list(a.shape)
if dim >= 0:
shape.insert(dim, 1)
else:
shape.insert(len(shape) + dim + 1, 1)
return np.copy(a).reshape(*shape),
|
def ExpandDims(a, dim)
|
Expand dim op, i.e. add singular axis at dim.
| 2.469491 | 2.479747 | 0.995864 |
return np.copy(a)[[slice(*tpl) for tpl in zip(begin, begin+size)]],
|
def Slice(a, begin, size)
|
Slicing op.
| 7.878357 | 9.068686 | 0.868743 |
return tuple(np.split(np.copy(a), n, axis=axis))
|
def Split(axis, a, n)
|
Split op with n splits.
| 4.688277 | 5.609359 | 0.835795 |
return tuple(np.split(np.copy(a), np.cumsum(splits), axis=axis))
|
def SplitV(a, splits, axis)
|
Split op with multiple split sizes.
| 4.17034 | 4.71657 | 0.884189 |
axis = inputs.pop()
return np.concatenate(inputs, axis=axis),
|
def ConcatV2(inputs)
|
Concat op.
| 9.644657 | 10.78195 | 0.894519 |
return tuple(np.squeeze(b, axis=axis) for b in np.split(a, num, axis=axis))
|
def Unpack(a, num, axis)
|
Unpack op.
| 3.298717 | 4.186443 | 0.787952 |
r = np.copy(a)
invidxs = (len(r.shape) - 1) * [slice(None)]
if seq_dim < batch_dim:
invidxs[seq_dim] = slice(None, None, -1)
else:
invidxs[seq_dim - 1] = slice(None, None, -1)
_invidxs = tuple(invidxs)
selidxs = len(r.shape) * [slice(None)]
for i, l in enumerate(seq_lengths):
if not l:
continue
selidxs[batch_dim] = i
selidxs[seq_dim] = slice(0, l)
_selidxs = tuple(selidxs)
r[_selidxs] = a[_selidxs][_invidxs]
return r,
|
def ReverseSequence(a, seq_lengths, seq_dim, batch_dim)
|
Sequential reverse op.
| 2.218508 | 2.228698 | 0.995428 |
idxs = tuple(slice(None, None, 2 * int(i not in axes) - 1) for i in range(len(a.shape)))
return np.copy(a[idxs]),
|
def ReverseV2(a, axes)
|
Reverse op.
| 4.553762 | 4.645198 | 0.980316 |
return sp.special.betainc(a, b, x),
|
def Betainc(a, b, x)
|
Complemented, incomplete gamma op.
| 5.504312 | 5.29425 | 1.039677 |
r = np.zeros(2 * a.shape, dtype=a.dtype)
for idx, v in np.ndenumerate(a):
r[2 * idx] = v
return r,
|
def Diag(a)
|
Diag op.
| 3.793443 | 3.774704 | 1.004964 |
r = np.zeros(a.shape[:-2] + (min(a.shape[-2:]),))
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.diagonal(a[pos])
return r,
|
def MatrixDiagPart(a)
|
Batched diag op that returns only the diagonal elements.
| 3.066764 | 3.214586 | 0.954015 |
return np.dot(a if not transpose_a else np.transpose(a),
b if not transpose_b else np.transpose(b)),
|
def MatMul(a, b, transpose_a, transpose_b)
|
Matrix multiplication op.
| 2.799559 | 2.953671 | 0.947824 |
return np.linalg.inv(a if not adj else _adjoint(a)),
|
def MatrixInverse(a, adj)
|
Matrix inversion op.
| 8.435327 | 8.34704 | 1.010577 |
return np.linalg.solve(a if not adj else _adjoint(a), rhs),
|
def MatrixSolve(a, rhs, adj)
|
Matrix solve op.
| 8.352999 | 8.636946 | 0.967124 |
trans = 0 if not adj else 2
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos],
trans=trans, lower=lower)
return r,
|
def MatrixTriangularSolve(a, rhs, lower, adj)
|
Matrix triangular solve op.
| 4.273401 | 4.319919 | 0.989232 |
r = np.empty(rhs.shape).astype(a.dtype)
for coord in np.ndindex(a.shape[:-2]):
pos = coord + (Ellipsis,)
r[pos] = np.linalg.lstsq(a[pos], rhs[pos])[0]
return r,
|
def MatrixSolveLs(a, rhs, l2_reg)
|
Matrix least-squares solve op.
| 3.646143 | 3.567899 | 1.02193 |
shape = list(a.shape)
shape[-2] += 1
return np.append(*np.linalg.eig(a)).reshape(*shape),
|
def SelfAdjointEig(a)
|
Eigen decomp op.
| 7.056018 | 7.587093 | 0.930003 |
u, s, v = np.linalg.svd(a, full_matrices=full, compute_uv=uv)
return s, u, v
|
def Svd(a, uv, full)
|
Single value decomp op.
| 2.434459 | 2.496447 | 0.975169 |
return np.sum(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def Sum(a, axis, keep_dims)
|
Sum reduction op.
| 3.880393 | 4.047409 | 0.958735 |
return np.prod(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def Prod(a, axis, keep_dims)
|
Prod reduction op.
| 3.886592 | 3.999415 | 0.97179 |
return np.amin(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def Min(a, axis, keep_dims)
|
Min reduction op.
| 4.372643 | 4.389674 | 0.99612 |
return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def Max(a, axis, keep_dims)
|
Max reduction op.
| 4.028647 | 4.240833 | 0.949966 |
return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def Mean(a, axis, keep_dims)
|
Mean reduction op.
| 3.773323 | 4.106915 | 0.918773 |
return np.all(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def All(a, axis, keep_dims)
|
All reduction op.
| 3.945342 | 4.268853 | 0.924216 |
return np.any(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims),
|
def Any(a, axis, keep_dims)
|
Any reduction op.
| 3.909226 | 4.432742 | 0.881898 |
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids),
|
def SegmentSum(a, ids, *args)
|
Segmented sum op.
| 10.756198 | 9.263833 | 1.161096 |
func = lambda idxs: reduce(np.multiply, a[idxs])
return seg_map(func, a, ids),
|
def SegmentProd(a, ids)
|
Segmented prod op.
| 9.657506 | 8.893114 | 1.085953 |
func = lambda idxs: np.amin(a[idxs], axis=0)
return seg_map(func, a, ids),
|
def SegmentMin(a, ids)
|
Segmented min op.
| 7.016713 | 6.775658 | 1.035577 |
func = lambda idxs: np.amax(a[idxs], axis=0)
return seg_map(func, a, ids),
|
def SegmentMax(a, ids)
|
Segmented max op.
| 6.347635 | 6.041828 | 1.050615 |
func = lambda idxs: np.mean(a[idxs], axis=0)
return seg_map(func, a, ids),
|
def SegmentMean(a, ids)
|
Segmented mean op.
| 5.727454 | 5.616382 | 1.019776 |
func = lambda _idxs: np.divide(reduce(np.add, a[idxs][_idxs]), np.math.sqrt(len(_idxs)))
return seg_map(func, a, ids),
|
def SparseSegmentSqrtN(a, idxs, ids)
|
Sparse segmented sum / sqrt(n=len(idxs)) op.
| 8.728653 | 8.338761 | 1.046757 |
d = np.setdiff1d(a, b)
return d, np.searchsorted(a, d).astype(np.int32)
|
def ListDiff(a, b)
|
List diff op.
| 3.933892 | 4.249426 | 0.925747 |
_, idxs, inv = np.unique(a, return_index=True, return_inverse=True)
return np.copy(a)[np.sort(idxs)], idxs[inv].astype(dtype_map[t])
|
def Unique(a, t)
|
Unique op.
| 3.504902 | 3.820215 | 0.917462 |
return np.where(a < 0, np.subtract(np.exp(a), 1), a),
|
def Elu(a)
|
Elu op.
| 5.172436 | 4.733149 | 1.092811 |
return np.divide(a, np.add(np.abs(a), 1)),
|
def Softsign(a)
|
Softsign op.
| 4.234314 | 5.114306 | 0.827935 |
e = np.exp(a)
return np.divide(e, np.sum(e, axis=-1, keepdims=True)),
|
def Softmax(a)
|
Softmax op.
| 3.082639 | 3.282941 | 0.938987 |
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _conv_patches(a, f, 3 * [strides], padding.decode("ascii"))
conv = np.sum(patches, axis=tuple(range(-f.ndim, -1)))
if data_format.decode("ascii") == "NCHW":
conv = np.rollaxis(conv, -1, 1)
return conv,
|
def Conv1D(a, f, strides, padding, data_format)
|
1D conv op.
| 3.535972 | 3.594656 | 0.983675 |
patches = _conv_patches(a, f, strides, padding.decode("ascii"))
return np.sum(patches, axis=tuple(range(-f.ndim, -1))),
|
def Conv3D(a, f, strides, padding)
|
3D conv op.
| 6.987578 | 7.070533 | 0.988268 |
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.average(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool,
|
def AvgPool(a, k, strides, padding, data_format)
|
Average pooling op.
| 3.007273 | 3.080016 | 0.976382 |
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.amax(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool,
|
def MaxPool(a, k, strides, padding, data_format)
|
Maximum pooling op.
| 3.06828 | 3.134064 | 0.97901 |
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.average(patches, axis=tuple(range(-len(k), 0))),
|
def AvgPool3D(a, k, strides, padding)
|
Average 3D pooling op.
| 5.699454 | 5.900015 | 0.966007 |
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
return np.amax(patches, axis=tuple(range(-len(k), 0))),
|
def MaxPool3D(a, k, strides, padding)
|
Maximum 3D pooling op.
| 5.708325 | 5.912484 | 0.96547 |
tensors = tuple(self._get(name, **kwargs) for name in names)
return tensors[0] if len(names) == 1 else tensors
|
def get(self, *names, **kwargs)
|
get(*names, key=None)
Returns one or more :py:class:`Tensor` instances given by *names* using a deep lookup within
the model. If *key* is not *None*, only the root tensor with that *key* is traversed. *None*
is returned when no tensor was found. In case a tensor is passed, it's name is used for the
lookup.
| 3.649331 | 3.366309 | 1.084075 |
if not isinstance(tensor, Tensor):
tensor = Tensor(tensor, tf_sess, **kwargs)
if key is None:
if len(self.roots) == 0:
key = 0
else:
key = max(self.roots.keys()) + 1
self.roots[key] = tensor
|
def add(self, tensor, tf_sess=None, key=None, **kwargs)
|
Adds a new root *tensor* for a *key* which, if *None*, defaults to a consecutive number.
When *tensor* is not an instance of :py:class:`Tensor` but an instance of
``tensorflow.Tensor``, it is converted first. In that case, *tf_sess* should be a valid
tensorflow session and *kwargs* are forwarded to the :py:class:`Tensor` constructor.
| 2.441759 | 1.95871 | 1.246616 |
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "rb") as f:
roots = pickle.load(f)
for key, tensor in roots.items():
self.add(tensor, key=key)
|
def load(self, path)
|
Loads all tensors from a file defined by *path* and adds them to the root set.
| 3.116846 | 2.333605 | 1.335636 |
path = os.path.expandvars(os.path.expanduser(path))
with open(path, "wb") as f:
pickle.dump(self.roots, f)
|
def save(self, path)
|
Saves all tensors of the root set to a file defined by *path*.
| 2.988476 | 2.474138 | 1.207886 |
# create empty tensor ensembles with our method
tensor_ensembles = [TensorEnsemble([], self.method) for name in names]
# loop over models, collect and add tensors
for model in self.models:
tensors = model.get(*names, **kwargs)
if not isinstance(tensors, tuple):
tensors = (tensors,)
for i, t in enumerate(tensors if isinstance(tensors, tuple) else (tensors,)):
tensor_ensembles[i].tensors.append(t)
return tensor_ensembles[0] if len(names) == 1 else tuple(tensor_ensembles)
|
def get(self, *names, **kwargs)
|
get(*names, key=None)
Returns one or more :py:class:`TensorEnsemble` instances given by *names* using a deep
lookup within all read models. Each returned tensor ensemble will have ``len(models)``
tensors. If a model does not contain a specific tensor defined by a specific *name*, the
associated ensemble tensor will contain a *None* for that model in its tensors. If *key* is
not *None*, only the root tensors with that *key* are traversed.
| 3.445448 | 3.203067 | 1.075671 |
for path in paths:
self.models.append(Model(path))
|
def load(self, paths)
|
Loads models from a list of *paths*.
| 5.865807 | 4.503778 | 1.302419 |
# first, check that the length of all feed_dict keys match our own length
for tensor_ensemble in feed_dict:
if len(tensor_ensemble.tensors) != len(self.tensors):
raise EnsembleMismatchException("incompatible lengths of tensors: %d, %d" \
% (len(self.tensors), len(tensor_ensemble.tensors)))
# create a joined uuid
_uuid = uuid4()
# prepare feed_dicts
feed_dicts = [{} for _ in range(len(self.tensors))]
for tensor_ensemble, value in feed_dict.items():
for i, tensor in enumerate(tensor_ensemble.tensors):
if tensor is not None:
feed_dicts[i][tensor] = value[i] if isinstance(value, (list, tuple)) else value
# eval all tensors
values = [t.eval(feed_dict=d, _uuid=_uuid) for t, d in zip(self.tensors, feed_dicts)]
# return the computed ensemble value
return self.func(values)
|
def eval(self, feed_dict=None)
|
Evaluates all contained tensors using a *feed_dict* and returns the ensemble value. The keys
of *feed_dict* must be tensor ensembles. Its values can be batches, i.e., numpy arrays, or
lists or tuples of batches. In the latter case, these lists or tuples must have the same
length as the list of stored tensors as they will be mapped.
| 3.771421 | 3.134256 | 1.203291 |
if self.method == METHOD_MEAN:
return self.func_mean(values)
elif self.method == METHOD_MAX:
return self.func_max(values)
elif self.method == METHOD_MIN:
return self.func_min(values)
elif self.method == METHOD_CUSTOM:
return self.func_custom(values)
else:
raise UnknownEnsembleMethodException(self.method)
|
def func(self, values)
|
The actual ensembling logic that combines multiple *values*. The method call is forwareded
tothe ensemble method-specific variant which is determined using *method*.
| 2.128255 | 1.88165 | 1.131058 |
# make a list of all the keys that are detected
pressed = []
# set all pins pins to be inputs w/pullups
for pin in self.row_pins+self.col_pins:
pin.direction = Direction.INPUT
pin.pull = Pull.UP
for row in range(len(self.row_pins)):
# set one row low at a time
self.row_pins[row].direction = Direction.OUTPUT
self.row_pins[row].value = False
# check the column pins, which ones are pulled down
for col in range(len(self.col_pins)):
if not self.col_pins[col].value:
pressed.append(self.keys[row][col])
# reset the pin to be an input
self.row_pins[row].direction = Direction.INPUT
self.row_pins[row].pull = Pull.UP
return pressed
|
def pressed_keys(self)
|
An array containing all detected keys that are pressed from the initalized
list-of-lists passed in during creation
| 3.26381 | 3.234778 | 1.008975 |
load_times = []
search_str = '{0}_load_time'.format(asset_type)
for har_page in self.pages:
val = getattr(har_page, search_str, None)
if val is not None:
load_times.append(val)
return load_times
|
def get_load_times(self, asset_type)
|
Just a ``list`` of the load times of a certain asset type for each page
:param asset_type: ``str`` of the asset type to return load times for
| 3.173211 | 3.418531 | 0.928238 |
load_times = []
# Handle edge cases like TTFB
if asset_type == 'ttfb':
for page in self.pages:
if page.time_to_first_byte is not None:
load_times.append(page.time_to_first_byte)
elif asset_type not in self.asset_types and asset_type != 'page':
raise ValueError('asset_type must be one of:\nttfb\n{0}'.format(
'\n'.join(self.asset_types)))
else:
load_times = self.get_load_times(asset_type)
if not load_times or not sum(load_times):
return 0
return round(stdev(load_times),
self.decimal_precision)
|
def get_stdev(self, asset_type)
|
Returns the standard deviation for a set of a certain asset type.
:param asset_type: ``str`` of the asset type to calculate standard
deviation for.
:returns: A ``int`` or ``float`` of standard deviation, depending on
the self.decimal_precision
| 3.157869 | 3.138515 | 1.006166 |
pages = []
for har_dict in self.har_data:
har_parser = HarParser(har_data=har_dict)
if self.page_id:
for page in har_parser.pages:
if page.page_id == self.page_id:
pages.append(page)
else:
pages = pages + har_parser.pages
return pages
|
def pages(self)
|
The aggregate pages of all the parser objects.
| 3.068554 | 2.619679 | 1.171347 |
ttfb = []
for page in self.pages:
if page.time_to_first_byte is not None:
ttfb.append(page.time_to_first_byte)
return round(mean(ttfb), self.decimal_precision)
|
def time_to_first_byte(self)
|
The aggregate time to first byte for all pages.
| 2.889148 | 2.468868 | 1.170232 |
load_times = self.get_load_times('page')
return round(mean(load_times), self.decimal_precision)
|
def page_load_time(self)
|
The average total load time for all runs (not weighted).
| 7.685127 | 6.3099 | 1.217948 |
load_times = self.get_load_times('js')
return round(mean(load_times), self.decimal_precision)
|
def js_load_time(self)
|
Returns aggregate javascript load time.
| 6.810428 | 5.384786 | 1.264754 |
load_times = self.get_load_times('css')
return round(mean(load_times), self.decimal_precision)
|
def css_load_time(self)
|
Returns aggregate css load time for all pages.
| 6.580968 | 5.385409 | 1.222 |
load_times = self.get_load_times('image')
return round(mean(load_times), self.decimal_precision)
|
def image_load_time(self)
|
Returns aggregate image load time for all pages.
| 6.826415 | 5.581131 | 1.223124 |
load_times = self.get_load_times('html')
return round(mean(load_times), self.decimal_precision)
|
def html_load_time(self)
|
Returns aggregate html load time for all pages.
| 6.956481 | 5.364771 | 1.296697 |
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision)
|
def audio_load_time(self)
|
Returns aggregate audio load time for all pages.
| 6.877628 | 5.753539 | 1.195373 |
load_times = self.get_load_times('video')
return round(mean(load_times), self.decimal_precision)
|
def video_load_time(self)
|
Returns aggregate video load time for all pages.
| 6.948451 | 5.740906 | 1.210341 |
if header_type not in entry:
raise ValueError('Invalid header_type, should be either:\n\n'
'* \'request\'\n*\'response\'')
# TODO - headers are empty in some HAR data.... need fallbacks here
for h in entry[header_type]['headers']:
if h['name'].lower() == header.lower() and h['value'] is not None:
if regex and re.search(value, h['value'], flags=re.IGNORECASE):
return True
elif value == h['value']:
return True
return False
|
def match_headers(self, entry, header_type, header, value, regex=True)
|
Function to match headers.
Since the output of headers might use different case, like:
'content-type' vs 'Content-Type'
This function is case-insensitive
:param entry: entry object
:param header_type: ``str`` of header type. Valid values:
* 'request'
* 'response'
:param header: ``str`` of the header to search for
:param value: ``str`` of value to search for
:param regex: ``bool`` indicating whether to use regex or exact match
:returns: a ``bool`` indicating whether a match was found
| 4.57447 | 4.726161 | 0.967904 |
mimeType = entry['response']['content']['mimeType']
if regex and re.search(content_type, mimeType, flags=re.IGNORECASE):
return True
elif content_type == mimeType:
return True
return False
|
def match_content_type(entry, content_type, regex=True)
|
Matches the content type of a request using the mimeType metadata.
:param entry: ``dict`` of a single entry from a HarPage
:param content_type: ``str`` of regex to use for finding content type
:param regex: ``bool`` indicating whether to use regex or exact match.
| 3.212099 | 3.790036 | 0.847511 |
if regex:
return re.search(request_type, entry['request']['method'],
flags=re.IGNORECASE) is not None
else:
return entry['request']['method'] == request_type
|
def match_request_type(self, entry, request_type, regex=True)
|
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
| 2.287145 | 3.590488 | 0.637001 |
response_version = entry['response']['httpVersion']
if regex:
return re.search(http_version, response_version,
flags=re.IGNORECASE) is not None
else:
return response_version == http_version
|
def match_http_version(entry, http_version, regex=True)
|
Helper function that returns entries with a request type
matching the given `request_type` argument.
:param entry: entry object to analyze
:param request_type: ``str`` of request type to match
:param regex: ``bool`` indicating whether to use a regex or string match
| 2.437244 | 3.491544 | 0.698042 |
if regex:
return re.search(status_code,
str(entry['response']['status'])) is not None
else:
return str(entry['response']['status']) == status_code
|
def match_status_code(self, entry, status_code, regex=True)
|
Helper function that returns entries with a status code matching
then given `status_code` argument.
NOTE: This is doing a STRING comparison NOT NUMERICAL
:param entry: entry object to analyze
:param status_code: ``str`` of status code to search for
:param request_type: ``regex`` of request type to match
| 2.65735 | 4.619057 | 0.575301 |
results = dict()
for asset in asset_list:
time_key = dateutil.parser.parse(asset['startedDateTime'])
load_time = int(asset['time'])
# Add the start time and asset to the results dict
if time_key in results:
results[time_key].append(asset)
else:
results[time_key] = [asset]
# For each millisecond the asset was loading, insert the asset
# into the appropriate key of the results dict. Starting the range()
# index at 1 because we already inserted the first millisecond.
for _ in range(1, load_time):
time_key = time_key + datetime.timedelta(milliseconds=1)
if time_key in results:
results[time_key].append(asset)
else:
results[time_key] = [asset]
return results
|
def create_asset_timeline(self, asset_list)
|
Returns a ``dict`` of the timeline for the requested assets. The key is
a datetime object (down to the millisecond) of ANY time where at least
one of the requested assets was loaded. The value is a ``list`` of ALL
assets that were loading at that time.
:param asset_list: ``list`` of the assets to create a timeline for.
| 3.474136 | 3.232685 | 1.074691 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.