code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
from proseco.core import get_kwargs_from_starcheck_text
# Vanilla observation info
STD_INFO = dict(att=(0, 0, 0),
detector='ACIS-S',
sim_offset=0,
focus_offset=0,
date='2018:001',
n_guide=5,
n_fid=3,
t_ccd=-11,
man_angle=90,
dither=8.0)
def mod_std_info(**kwargs):
std_info = STD_INFO.copy()
std_info.update(kwargs)
return std_info
# Flat dark current map
DARK40 = np.full(shape=(1024, 1024), fill_value=40)
# Parameters for test cases (to avoid starcheck.db3 dependence)
OBS_INFO = {}
OBS_INFO[19387] = dict(obsid=19387,
detector='ACIS-S',
n_fid=3,
sim_offset=0,
focus_offset=0,
att=[188.617671, 2.211623, 231.249803],
date='2017:182:22:06:22.744',
t_ccd=-14.1,
man_angle=1.74,
dither=4.0)
OBS_INFO[21007] = dict(obsid=21007,
detector='ACIS-S',
n_fid=3,
sim_offset=0,
focus_offset=0,
att=[184.371121, 17.670062, 223.997765],
date='2018:159:11:20:52.162',
t_ccd=-11.3,
man_angle=60.39,
dither=8.0)
OBS_INFO[20603] = dict(obsid=20603,
detector='ACIS-S',
n_fid=3,
sim_offset=0,
focus_offset=0,
att=[201.561783, 7.748784, 205.998301],
date='2018:120:19:06:28.154',
t_ccd=-11.2,
man_angle=111.95,
dither=8.0)
OBS_INFO[19605] = {'att': [350.897404, 58.836913, 75.068745],
'date': '2018:135:15:52:08.898',
'n_fid': 3,
'detector': 'ACIS-S',
'dither': 8.0,
'focus_offset': 0,
'man_angle': 79.15,
'obsid': 19605,
'sim_offset': 0,
't_ccd': -10.8}
def get_starcheck_obs_kwargs(filename):
"""
Parse the starcheck.txt file to get keyword arg dicts for get_aca_catalog()
:param filename: file name of starcheck.txt in load products
:returns: dict (by obsid) of kwargs for get_aca_catalog()
"""
delim = "==================================================================================== "
with open(filename, 'r') as fh:
text = fh.read()
chunks = text.split(delim)
outs = {}
for chunk in chunks:
if "No star catalog for obsid" in chunk:
continue
try:
out = get_kwargs_from_starcheck_text(chunk, include_cat=True)
except ValueError:
continue
else:
outs[out['obsid']] = out
return outs
|
[
"numpy.full",
"proseco.core.get_kwargs_from_starcheck_text"
] |
[((546, 588), 'numpy.full', 'np.full', ([], {'shape': '(1024, 1024)', 'fill_value': '(40)'}), '(shape=(1024, 1024), fill_value=40)\n', (553, 588), True, 'import numpy as np\n'), ((2915, 2970), 'proseco.core.get_kwargs_from_starcheck_text', 'get_kwargs_from_starcheck_text', (['chunk'], {'include_cat': '(True)'}), '(chunk, include_cat=True)\n', (2945, 2970), False, 'from proseco.core import get_kwargs_from_starcheck_text\n')]
|
# -*- coding: utf-8 -*-
"""
A pure implementation of the Monte Carlo Tree Search (MCTS)
@author: <NAME>
"""
import numpy as np
import copy
from operator import itemgetter
def rollout_policy_fn(board):
"""rollout_policy_fn -- a coarse, fast version of policy_fn used in the rollout phase."""
# rollout randomly
action_probs = np.random.rand(len(board.availables))
return zip(board.availables, action_probs)
def policy_value_fn(board):
"""a function that takes in a state and outputs a list of (action, probability)
tuples and a score for the state"""
# return uniform probabilities and 0 score for pure MCTS
action_probs = np.ones(len(board.availables))/len(board.availables)
return zip(board.availables, action_probs), 0
class TreeNode(object):
"""A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
self._n_visits = 0
self._Q = 0
self._u = 0
self._P = prior_p
def expand(self, action_priors):
"""Expand tree by creating new children.
action_priors -- output from policy function - a list of tuples of actions
and their prior probability according to the policy function.
"""
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def select(self, c_puct):
"""Select action among children that gives maximum action value, Q plus bonus u(P).
Returns:
A tuple of (action, next_node)
"""
return max(self._children.iteritems(), key=lambda act_node: act_node[1].get_value(c_puct))
def update(self, leaf_value):
"""Update node values from leaf evaluation.
Arguments:
leaf_value -- the value of subtree evaluation from the current player's perspective.
"""
# Count visit.
self._n_visits += 1
# Update Q, a running average of values for all visits.
self._Q += 1.0*(leaf_value - self._Q) / self._n_visits
def update_recursive(self, leaf_value):
"""Like a call to update(), but applied recursively for all ancestors.
"""
# If it is not root, this node's parent should be updated first.
if self._parent:
self._parent.update_recursive(-leaf_value)
self.update(leaf_value)
def get_value(self, c_puct):
"""Calculate and return the value for this node: a combination of leaf evaluations, Q, and
this node's prior adjusted for its visit count, u
c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and
prior probability, P, on this node's score.
"""
self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)
return self._Q + self._u
def is_leaf(self):
"""Check if leaf node (i.e. no nodes below this have been expanded).
"""
return self._children == {}
def is_root(self):
return self._parent is None
class MCTS(object):
"""A simple implementation of Monte Carlo Tree Search.
"""
def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
"""Arguments:
policy_value_fn -- a function that takes in a board state and outputs a list of (action, probability)
tuples and also a score in [-1, 1] (i.e. the expected value of the end game score from
the current player's perspective) for the current player.
c_puct -- a number in (0, inf) that controls how quickly exploration converges to the
maximum-value policy, where a higher value means relying on the prior more
"""
self._root = TreeNode(None, 1.0)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at the leaf and
propagating it back through its parents. State is modified in-place, so a copy must be
provided.
Arguments:
state -- a copy of the state.
"""
node = self._root
while(1):
if node.is_leaf():
break
# Greedily select next move.
action, node = node.select(self._c_puct)
state.do_move(action)
action_probs, _ = self._policy(state)
# Check for end of game
end, winner = state.game_end()
if not end:
node.expand(action_probs)
# Evaluate the leaf node by random rollout
leaf_value = self._evaluate_rollout(state)
# Update value and visit count of nodes in this traversal.
node.update_recursive(-leaf_value)
def _evaluate_rollout(self, state, limit=1000):
"""Use the rollout policy to play until the end of the game, returning +1 if the current
player wins, -1 if the opponent wins, and 0 if it is a tie.
"""
player = state.get_current_player()
for i in range(limit):
end, winner = state.game_end()
if end:
break
action_probs = rollout_policy_fn(state)
max_action = max(action_probs, key=itemgetter(1))[0]
state.do_move(max_action)
else:
# If no break from the loop, issue a warning.
print("WARNING: rollout reached move limit")
if winner == -1: # tie
return 0
else:
return 1 if winner == player else -1
def get_move(self, state):
"""Runs all playouts sequentially and returns the most visited action.
Arguments:
state -- the current state, including both game state and the current player.
Returns:
the selected action
"""
for n in range(self._n_playout):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
return max(self._root._children.iteritems(), key=lambda act_node: act_node[1]._n_visits)[0]
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know about the subtree.
"""
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
def __str__(self):
return "MCTS"
class MCTSPlayer(object):
"""AI player based on MCTS"""
def __init__(self, c_puct=5, n_playout=2000):
self.mcts = MCTS(policy_value_fn, c_puct, n_playout)
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board):
sensible_moves = board.availables
if len(sensible_moves) > 0:
move = self.mcts.get_move(board)
self.mcts.update_with_move(-1)
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player)
|
[
"operator.itemgetter",
"numpy.sqrt",
"copy.deepcopy"
] |
[((6139, 6159), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (6152, 6159), False, 'import copy\n'), ((2966, 2997), 'numpy.sqrt', 'np.sqrt', (['self._parent._n_visits'], {}), '(self._parent._n_visits)\n', (2973, 2997), True, 'import numpy as np\n'), ((5499, 5512), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (5509, 5512), False, 'from operator import itemgetter\n')]
|
import click
import json
import numpy as np
import os
import tensorflow.compat.v1 as tf
import time
from luminoth.datasets import get_dataset
from luminoth.models import get_model
from luminoth.utils.bbox_overlap import bbox_overlap
from luminoth.utils.config import get_config
from luminoth.utils.image_vis import image_vis_summaries
@click.command(help='Evaluate trained (or training) models')
@click.option('dataset_split', '--split', default='val', help='Dataset split to use.') # noqa
@click.option('config_files', '--config', '-c', required=True, multiple=True, help='Config to use.') # noqa
@click.option('--watch/--no-watch', default=True, help='Keep watching checkpoint directory for new files.') # noqa
@click.option('--from-global-step', type=int, default=None, help='Consider only checkpoints after this global step') # noqa
@click.option('override_params', '--override', '-o', multiple=True, help='Override model config params.') # noqa
@click.option('--files-per-class', type=int, default=10, help='How many files per class display in every epoch.') # noqa
@click.option('--max-detections', type=int, default=100, help='Max detections to consider.') # noqa
def eval(dataset_split, config_files, watch, from_global_step, override_params,
files_per_class, max_detections):
"""Evaluate models using dataset."""
# If the config file is empty, our config will be the base_config for the
# default model.
try:
config = get_config(config_files, override_params=override_params)
except KeyError:
raise KeyError('model.type should be set on the custom config.')
if not config.train.job_dir:
raise KeyError('`job_dir` should be set.')
if not config.train.run_name:
raise KeyError('`run_name` should be set.')
# `run_dir` is where the actual checkpoint and logs are located.
run_dir = os.path.join(config.train.job_dir, config.train.run_name)
# Only activate debug for if needed for debug visualization mode.
if not config.train.debug:
config.train.debug = config.eval.image_vis == 'debug'
if config.train.debug or config.train.tf_debug:
tf.logging.set_verbosity(tf.logging.DEBUG)
else:
tf.logging.set_verbosity(tf.logging.INFO)
# Build the dataset tensors, overriding the default dataset split.
config.dataset.split = dataset_split
# Disable data augmentation.
config.dataset.data_augmentation = []
# Attempt to get class names, if available.
classes_file = os.path.join(config.dataset.dir, 'classes.json')
if tf.gfile.Exists(classes_file):
class_labels = json.load(tf.gfile.GFile(classes_file))
else:
class_labels = None
if config.model.type == 'fasterrcnn':
# Override max detections with specified value.
if config.model.network.with_rcnn:
config.model.rcnn.proposals.total_max_detections = max_detections
else:
config.model.rpn.proposals.post_nms_top_n = max_detections
# Also overwrite `min_prob_threshold` in order to use all detections.
config.model.rcnn.proposals.min_prob_threshold = 0.0
elif config.model.type == 'ssd':
config.model.proposals.total_max_detections = max_detections
config.model.proposals.min_prob_threshold = 0.0
else:
raise ValueError(
"Model type '{}' not supported".format(config.model.type)
)
# Only a single run over the dataset to calculate metrics.
config.train.num_epochs = 1
# Seed setup.
if config.train.seed:
tf.set_random_seed(config.train.seed)
# Set pretrained as not training.
config.model.base_network.trainable = False
model_class = get_model(config.model.type)
model = model_class(config)
dataset_class = get_dataset(config.dataset.type)
dataset = dataset_class(config)
train_dataset = dataset()
train_image = train_dataset['image']
train_objects = train_dataset['bboxes']
train_filename = train_dataset['filename']
# Build the graph of the model to evaluate, retrieving required
# intermediate tensors.
prediction_dict = model(train_image, train_objects)
if config.model.type == 'ssd' or config.model.network.with_rcnn:
pred = prediction_dict['classification_prediction']
pred_objects = pred['objects']
pred_objects_classes = pred['labels']
pred_objects_scores = pred['probs']
else:
# Force the num_classes to 1.
config.model.network.num_classes = 1
pred = prediction_dict['rpn_prediction']
pred_objects = pred['proposals']
pred_objects_scores = pred['scores']
# When using only RPN all classes are 0.
pred_objects_classes = tf.zeros(
(tf.shape(pred_objects_scores)[0],), dtype=tf.int32
)
# Retrieve *all* the losses from the model and calculate their streaming
# means, so we get the loss over the whole dataset.
batch_losses = model.loss(prediction_dict, return_all=True)
losses = {}
for loss_name, loss_tensor in batch_losses.items():
loss_mean, _ = tf.metrics.mean(
loss_tensor, name=loss_name,
metrics_collections='metrics',
updates_collections='metric_ops',
)
full_loss_name = '{}_losses/{}'.format(dataset_split, loss_name)
losses[full_loss_name] = loss_mean
metric_ops = tf.get_collection('metric_ops')
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
# Using a global saver instead of the one for the model.
saver = tf.train.Saver(sharded=True, allow_empty=True)
# Aggregate the required ops to evaluate into a dict.
ops = {
'init_op': init_op,
'metric_ops': metric_ops,
'pred_objects': pred_objects,
'pred_objects_classes': pred_objects_classes,
'pred_objects_scores': pred_objects_scores,
'train_objects': train_objects,
'losses': losses,
'prediction_dict': prediction_dict,
'filename': train_filename,
'train_image': train_image
}
metrics_scope = '{}_metrics'.format(dataset_split)
# Use global writer for all checkpoints. We don't want to write different
# files for each checkpoint.
writer = tf.summary.FileWriter(run_dir)
files_to_visualize = {}
last_global_step = from_global_step
while True:
# Get the checkpoint files to evaluate.
try:
checkpoints = get_checkpoints(
run_dir, last_global_step, last_only=not watch
)
except ValueError as e:
if not watch:
tf.logging.error('Missing checkpoint.')
raise e
tf.logging.warning(
'Missing checkpoint; Checking again in a moment')
time.sleep(5)
continue
for checkpoint in checkpoints:
# Always returned in order, so it's safe to assign directly.
tf.logging.info(
'Evaluating global_step {} using checkpoint \'{}\''.format(
checkpoint['global_step'], checkpoint['file']
)
)
try:
start = time.time()
evaluate_once(
config, writer, saver, ops, checkpoint,
class_labels=class_labels,
metrics_scope=metrics_scope,
image_vis=config.eval.image_vis,
files_per_class=files_per_class,
files_to_visualize=files_to_visualize,
)
last_global_step = checkpoint['global_step']
tf.logging.info('Evaluated in {:.2f}s'.format(
time.time() - start
))
except tf.errors.NotFoundError:
# The checkpoint is not ready yet. It was written in the
# checkpoints file, but it still hasn't been completely saved.
tf.logging.info(
'Checkpoint {} is not ready yet. '
'Checking again in a moment.'.format(
checkpoint['file']
)
)
time.sleep(5)
continue
# If no watching was requested, finish the execution.
if not watch:
return
# Sleep for a moment and check for new checkpoints.
tf.logging.info('All checkpoints evaluated; sleeping for a moment')
time.sleep(5)
def get_checkpoints(run_dir, from_global_step=None, last_only=False):
"""Return all available checkpoints.
Args:
run_dir: Directory where the checkpoints are located.
from_global_step (int): Only return checkpoints after this global step.
The comparison is *strict*. If ``None``, returns all available
checkpoints.
Returns:
List of dicts (with keys ``global_step``, ``file``) with all the
checkpoints found.
Raises:
ValueError: If there are no checkpoints in ``run_dir``.
"""
# The latest checkpoint file should be the last item of
# `all_model_checkpoint_paths`, according to the CheckpointState protobuf
# definition.
# TODO: Must check if the checkpoints are complete somehow.
ckpt = tf.train.get_checkpoint_state(run_dir)
if not ckpt or not ckpt.all_model_checkpoint_paths:
raise ValueError('Could not find checkpoint in {}.'.format(run_dir))
# TODO: Any other way to get the global_step? (Same as in `checkpoints`.)
checkpoints = sorted([
{'global_step': int(path.split('-')[-1]), 'file': path}
for path in ckpt.all_model_checkpoint_paths
], key=lambda c: c['global_step'])
if last_only:
checkpoints = checkpoints[-1:]
tf.logging.info(
'Using last checkpoint in run_dir, global_step = {}'.format(
checkpoints[0]['global_step']
)
)
elif from_global_step is not None:
checkpoints = [
c for c in checkpoints
if c['global_step'] > from_global_step
]
tf.logging.info(
'Found %s checkpoints in run_dir with global_step > %s',
len(checkpoints), from_global_step,
)
else:
tf.logging.info(
'Found {} checkpoints in run_dir'.format(len(checkpoints))
)
return checkpoints
def evaluate_once(config, writer, saver, ops, checkpoint,
class_labels, metrics_scope='metrics', image_vis=None,
files_per_class=None, files_to_visualize=None):
"""Run the evaluation once.
Create a new session with the previously-built graph, run it through the
dataset, calculate the evaluation metrics and write the corresponding
summaries.
Args:
config: Config object for the model.
writer: Summary writers.
saver: Saver object to restore checkpoint parameters.
ops (dict): All the operations needed to successfully run the model.
Expects the following keys: ``init_op``, ``metric_ops``,
``pred_objects``, ``pred_objects_classes``,
``pred_objects_scores``, ``train_objects``, ``losses``,
``train_image``.
checkpoint (dict): Checkpoint-related data.
Expects the following keys: ``global_step``, ``file``.
"""
# Output of the detector, per batch.
output_per_batch = {
'bboxes': [], # Bounding boxes detected.
'classes': [], # Class associated to each bounding box.
'scores': [], # Score for each detection.
'gt_bboxes': [], # Ground-truth bounding boxes for the batch.
'gt_classes': [], # Ground-truth classes for each bounding box.
}
with tf.Session() as sess:
sess.run(ops['init_op'])
saver.restore(sess, checkpoint['file'])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
total_evaluated = 0
start_time = time.time()
try:
track_start = start_time
track_count = 0
while not coord.should_stop():
fetches = {
'metric_ops': ops['metric_ops'],
'bboxes': ops['pred_objects'],
'classes': ops['pred_objects_classes'],
'scores': ops['pred_objects_scores'],
'gt_bboxes': ops['train_objects'],
'losses': ops['losses'],
'filename': ops['filename'],
}
if image_vis is not None:
fetches['prediction_dict'] = ops['prediction_dict']
fetches['train_image'] = ops['train_image']
batch_fetched = sess.run(fetches)
output_per_batch['bboxes'].append(batch_fetched.get('bboxes'))
output_per_batch['classes'].append(batch_fetched['classes'])
output_per_batch['scores'].append(batch_fetched['scores'])
batch_gt_objects = batch_fetched['gt_bboxes']
output_per_batch['gt_bboxes'].append(batch_gt_objects[:, :4])
batch_gt_classes = batch_gt_objects[:, 4]
output_per_batch['gt_classes'].append(batch_gt_classes)
val_losses = batch_fetched['losses']
if image_vis is not None:
filename = batch_fetched['filename'].decode('utf-8')
visualize_file = False
for gt_class in batch_gt_classes:
cls_files = files_to_visualize.get(
gt_class, set()
)
if len(cls_files) < files_per_class:
files_to_visualize.setdefault(
gt_class, set()
).add(filename)
visualize_file = True
break
elif filename in cls_files:
visualize_file = True
break
if visualize_file:
image_summaries = image_vis_summaries(
batch_fetched['prediction_dict'],
config=config.model,
extra_tag=filename,
image_visualization_mode=image_vis,
image=batch_fetched['train_image'],
gt_bboxes=batch_fetched['gt_bboxes']
)
for image_summary in image_summaries:
writer.add_summary(
image_summary, checkpoint['global_step']
)
total_evaluated += 1
track_count += 1
track_end = time.time()
if track_end - track_start > 20.:
click.echo(
'{} processed in {:.2f}s (global {:.2f} images/s, '
'period {:.2f} images/s)'.format(
total_evaluated, track_end - start_time,
total_evaluated / (track_end - start_time),
track_count / (track_end - track_start),
))
track_count = 0
track_start = track_end
except tf.errors.OutOfRangeError:
# Save final evaluation stats into summary under the checkpoint's
# global step.
ap_per_class, ar_per_class = calculate_metrics(
output_per_batch, config.model.network.num_classes
)
map_at_50 = np.mean(ap_per_class[:, 0])
map_at_75 = np.mean(ap_per_class[:, 5])
map_at_range = np.mean(ap_per_class)
mar_at_range = np.mean(ar_per_class)
tf.logging.info('Finished evaluation at step {}.'.format(
checkpoint['global_step']))
tf.logging.info('Evaluated {} images.'.format(total_evaluated))
tf.logging.info(
'Average Precision (AP) @ [0.50] = {:.3f}'.format(map_at_50)
)
tf.logging.info(
'Average Precision (AP) @ [0.75] = {:.3f}'.format(map_at_75)
)
tf.logging.info(
'Average Precision (AP) @ [0.50:0.95] = {:.3f}'.format(
map_at_range
)
)
tf.logging.info(
'Average Recall (AR) @ [0.50:0.95] = {:.3f}'.format(
mar_at_range
)
)
for idx, val in enumerate(ap_per_class[:, 0]):
class_label = '{} ({})'.format(
class_labels[idx], idx
) if class_labels else idx
tf.logging.debug(
'Average Precision (AP) @ [0.50] for {} = {:.3f}'.format(
class_label, val
)
)
summary = [
tf.Summary.Value(
tag='{}/[email protected]'.format(metrics_scope),
simple_value=map_at_50
),
tf.Summary.Value(
tag='{}/[email protected]'.format(metrics_scope),
simple_value=map_at_75
),
tf.Summary.Value(
tag='{}/AP@[0.50:0.95]'.format(metrics_scope),
simple_value=map_at_range
),
tf.Summary.Value(
tag='{}/AR@[0.50:0.95]'.format(metrics_scope),
simple_value=mar_at_range
),
tf.Summary.Value(
tag='{}/total_evaluated'.format(metrics_scope),
simple_value=total_evaluated
),
tf.Summary.Value(
tag='{}/evaluation_time'.format(metrics_scope),
simple_value=time.time() - start_time
),
]
for loss_name, loss_value in val_losses.items():
tf.logging.debug('{} loss = {:.4f}'.format(
loss_name, loss_value))
summary.append(tf.Summary.Value(
tag=loss_name,
simple_value=loss_value
))
writer.add_summary(
tf.Summary(value=summary), checkpoint['global_step']
)
finally:
coord.request_stop()
# Wait for all threads to stop.
coord.join(threads)
def calculate_metrics(output_per_batch, num_classes):
"""Calculates mAP and mAR from the detector's output.
The procedure for calculating the average precision for class ``C`` is as
follows (see `VOC mAP metric`_ for more details):
Start by ranking all the predictions (for a given image and said class) in
order of confidence. Each of these predictions is marked as correct (true
positive, when it has a IoU-threshold greater or equal to `iou_thresholds`)
or incorrect (false positive, in the other case). This matching is
performed greedily over the confidence scores, so a higher-confidence
prediction will be matched over another lower-confidence one even if the
latter has better IoU. Also, each prediction is matched at most once, so
repeated detections are counted as false positives.
We then integrate over the interpolated PR curve, thus obtaining the value
for the class' average precision. This interpolation makes sure the
precision curve is monotonically decreasing; for this, we go through the
precisions and make sure it's always decreasing. The integration is
performed over 101 fixed points over the curve (``[0.0, 0.01, ..., 1.0]``).
Average the result among all the classes to obtain the final, ``mAP``,
value.
Args:
output_per_batch (dict): Output of the detector to calculate mAP.
Expects the following keys: ``bboxes``, ``classes``, ``scores``,
``gt_bboxes``, ``gt_classes``. Under each key, there should be a
list of the results per batch as returned by the detector.
num_classes (int): Number of classes on the dataset.
Returns:
(``np.ndarray``, ``ndarray``) tuple. The first value is an array of
size (`num_classes`,), with the AP value per class, while the second
one is an array for the AR.
.. _VOC mAP metric:
http://host.robots.ox.ac.uk/pascal/VOC/pubs/everingham10.pdf
"""
iou_thresholds = np.linspace(
0.50, 0.95, int(np.round((0.95 - 0.50) / 0.05) + 1)
)
# 101 recall levels, same as COCO evaluation.
rec_thresholds = np.linspace(
0.00, 1.00, int(np.round((1.00 - 0.00) / 0.01) + 1)
)
# List; first by class, then by example. Each entry is a tuple of ndarrays
# of size (D_{c,i},), for tp/fp labels and for score, where D_{c,i} is the
# number of detected boxes for class `c` on image `i`.
tp_fp_labels_by_class = [[] for _ in range(num_classes)]
num_examples_per_class = [0 for _ in range(num_classes)]
# For each image, order predictions by score and classify each as a true
# positive or a false positive.
num_batches = len(output_per_batch['bboxes'])
for idx in range(num_batches):
# Get the results of the batch.
classes = output_per_batch['classes'][idx] # (D_{c,i},)
bboxes = output_per_batch['bboxes'][idx] # (D_{c,i}, 4)
scores = output_per_batch['scores'][idx] # (D_{c,i},)
gt_classes = output_per_batch['gt_classes'][idx]
gt_bboxes = output_per_batch['gt_bboxes'][idx]
# Analysis must be made per-class.
for cls in range(num_classes):
# Get the bounding boxes of `cls` only.
cls_bboxes = bboxes[classes == cls, :]
cls_scores = scores[classes == cls]
cls_gt_bboxes = gt_bboxes[gt_classes == cls, :]
num_gt = cls_gt_bboxes.shape[0]
num_examples_per_class[cls] += num_gt
# Sort by score descending, so we prioritize higher-confidence
# results when matching.
sorted_indices = np.argsort(-cls_scores)
# Whether the ground-truth has been previously detected.
is_detected = np.zeros((num_gt, len(iou_thresholds)))
# TP/FP labels for detected bboxes of (class, image).
tp_fp_labels = np.zeros((len(sorted_indices), len(iou_thresholds)))
if num_gt == 0:
# If no ground truth examples for class, all predictions must
# be false positives.
tp_fp_labels_by_class[cls].append(
(tp_fp_labels, cls_scores[sorted_indices])
)
continue
# Get the IoUs for the class' bboxes.
ious = bbox_overlap(cls_bboxes, cls_gt_bboxes)
# Greedily assign bboxes to ground truths (highest score first).
for bbox_idx in sorted_indices:
gt_match = np.argmax(ious[bbox_idx, :])
# TODO: Try to vectorize.
for iou_idx, iou_threshold in enumerate(iou_thresholds):
if ious[bbox_idx, gt_match] >= iou_threshold:
# Over IoU threshold.
if not is_detected[gt_match, iou_idx]:
# And first detection: it's a true positive.
tp_fp_labels[bbox_idx, iou_idx] = True
is_detected[gt_match, iou_idx] = True
tp_fp_labels_by_class[cls].append(
(tp_fp_labels, cls_scores[sorted_indices])
)
# Calculate average precision per class.
ap_per_class = np.zeros((num_classes, len(iou_thresholds)))
ar_per_class = np.zeros((num_classes, len(iou_thresholds)))
for cls in range(num_classes):
tp_fp_labels = tp_fp_labels_by_class[cls]
num_examples = num_examples_per_class[cls]
# Flatten the tp/fp labels into a single ndarray.
labels, scores = zip(*tp_fp_labels)
labels = np.concatenate(labels)
scores = np.concatenate(scores)
# Sort the tp/fp labels by decreasing confidence score and calculate
# precision and recall at every position of this ranked output.
sorted_indices = np.argsort(-scores)
true_positives = labels[sorted_indices, :]
false_positives = 1 - true_positives
cum_true_positives = np.cumsum(true_positives, axis=0)
cum_false_positives = np.cumsum(false_positives, axis=0)
recall = cum_true_positives.astype(float) / num_examples
precision = np.divide(
cum_true_positives.astype(float),
cum_true_positives + cum_false_positives
)
# Find AP by integrating over PR curve, with interpolated precision.
for iou_idx in range(len(iou_thresholds)):
p = precision[:, iou_idx]
r = recall[:, iou_idx]
# Interpolate the precision. (Make it monotonically-increasing.)
for i in range(len(p) - 1, 0, -1):
if p[i] > p[i-1]:
p[i-1] = p[i]
ap = 0
inds = np.searchsorted(r, rec_thresholds)
for ridx, pidx in enumerate(inds):
if pidx >= len(r):
# Out of bounds, no recall higher than threshold for any of
# the remaining thresholds (as they're ordered).
break
ap += p[pidx] / len(rec_thresholds)
ap_per_class[cls, iou_idx] = ap
if len(r):
ar_per_class[cls, iou_idx] = r[-1]
else:
ar_per_class[cls, iou_idx] = 0
return ap_per_class, ar_per_class
if __name__ == '__main__':
eval()
|
[
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.gfile.Exists",
"tensorflow.compat.v1.train.start_queue_runners",
"time.sleep",
"numpy.argsort",
"tensorflow.compat.v1.logging.error",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.train.Coordinator",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.mean",
"click.option",
"numpy.searchsorted",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.concatenate",
"tensorflow.compat.v1.local_variables_initializer",
"click.command",
"luminoth.utils.bbox_overlap.bbox_overlap",
"numpy.round",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.Summary.Value",
"numpy.argmax",
"tensorflow.compat.v1.Summary",
"luminoth.datasets.get_dataset",
"luminoth.models.get_model",
"time.time",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.metrics.mean",
"tensorflow.compat.v1.logging.warning",
"os.path.join",
"tensorflow.compat.v1.train.get_checkpoint_state",
"luminoth.utils.image_vis.image_vis_summaries",
"numpy.cumsum",
"luminoth.utils.config.get_config",
"tensorflow.compat.v1.train.Saver"
] |
[((339, 398), 'click.command', 'click.command', ([], {'help': '"""Evaluate trained (or training) models"""'}), "(help='Evaluate trained (or training) models')\n", (352, 398), False, 'import click\n'), ((400, 490), 'click.option', 'click.option', (['"""dataset_split"""', '"""--split"""'], {'default': '"""val"""', 'help': '"""Dataset split to use."""'}), "('dataset_split', '--split', default='val', help=\n 'Dataset split to use.')\n", (412, 490), False, 'import click\n'), ((495, 598), 'click.option', 'click.option', (['"""config_files"""', '"""--config"""', '"""-c"""'], {'required': '(True)', 'multiple': '(True)', 'help': '"""Config to use."""'}), "('config_files', '--config', '-c', required=True, multiple=True,\n help='Config to use.')\n", (507, 598), False, 'import click\n'), ((604, 715), 'click.option', 'click.option', (['"""--watch/--no-watch"""'], {'default': '(True)', 'help': '"""Keep watching checkpoint directory for new files."""'}), "('--watch/--no-watch', default=True, help=\n 'Keep watching checkpoint directory for new files.')\n", (616, 715), False, 'import click\n'), ((720, 840), 'click.option', 'click.option', (['"""--from-global-step"""'], {'type': 'int', 'default': 'None', 'help': '"""Consider only checkpoints after this global step"""'}), "('--from-global-step', type=int, default=None, help=\n 'Consider only checkpoints after this global step')\n", (732, 840), False, 'import click\n'), ((845, 954), 'click.option', 'click.option', (['"""override_params"""', '"""--override"""', '"""-o"""'], {'multiple': '(True)', 'help': '"""Override model config params."""'}), "('override_params', '--override', '-o', multiple=True, help=\n 'Override model config params.')\n", (857, 954), False, 'import click\n'), ((959, 1076), 'click.option', 'click.option', (['"""--files-per-class"""'], {'type': 'int', 'default': '(10)', 'help': '"""How many files per class display in every epoch."""'}), "('--files-per-class', type=int, default=10, help=\n 'How many files per class display in every epoch.')\n", (971, 1076), False, 'import click\n'), ((1081, 1177), 'click.option', 'click.option', (['"""--max-detections"""'], {'type': 'int', 'default': '(100)', 'help': '"""Max detections to consider."""'}), "('--max-detections', type=int, default=100, help=\n 'Max detections to consider.')\n", (1093, 1177), False, 'import click\n'), ((1878, 1935), 'os.path.join', 'os.path.join', (['config.train.job_dir', 'config.train.run_name'], {}), '(config.train.job_dir, config.train.run_name)\n', (1890, 1935), False, 'import os\n'), ((2521, 2569), 'os.path.join', 'os.path.join', (['config.dataset.dir', '"""classes.json"""'], {}), "(config.dataset.dir, 'classes.json')\n", (2533, 2569), False, 'import os\n'), ((2577, 2606), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['classes_file'], {}), '(classes_file)\n', (2592, 2606), True, 'import tensorflow.compat.v1 as tf\n'), ((3725, 3753), 'luminoth.models.get_model', 'get_model', (['config.model.type'], {}), '(config.model.type)\n', (3734, 3753), False, 'from luminoth.models import get_model\n'), ((3806, 3838), 'luminoth.datasets.get_dataset', 'get_dataset', (['config.dataset.type'], {}), '(config.dataset.type)\n', (3817, 3838), False, 'from luminoth.datasets import get_dataset\n'), ((5427, 5458), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['"""metric_ops"""'], {}), "('metric_ops')\n", (5444, 5458), True, 'import tensorflow.compat.v1 as tf\n'), ((5648, 5694), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {'sharded': '(True)', 'allow_empty': '(True)'}), '(sharded=True, allow_empty=True)\n', (5662, 5694), True, 'import tensorflow.compat.v1 as tf\n'), ((6340, 6370), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', (['run_dir'], {}), '(run_dir)\n', (6361, 6370), True, 'import tensorflow.compat.v1 as tf\n'), ((9383, 9421), 'tensorflow.compat.v1.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['run_dir'], {}), '(run_dir)\n', (9412, 9421), True, 'import tensorflow.compat.v1 as tf\n'), ((1471, 1528), 'luminoth.utils.config.get_config', 'get_config', (['config_files'], {'override_params': 'override_params'}), '(config_files, override_params=override_params)\n', (1481, 1528), False, 'from luminoth.utils.config import get_config\n'), ((2161, 2203), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.DEBUG'], {}), '(tf.logging.DEBUG)\n', (2185, 2203), True, 'import tensorflow.compat.v1 as tf\n'), ((2222, 2263), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2246, 2263), True, 'import tensorflow.compat.v1 as tf\n'), ((3581, 3618), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['config.train.seed'], {}), '(config.train.seed)\n', (3599, 3618), True, 'import tensorflow.compat.v1 as tf\n'), ((5136, 5249), 'tensorflow.compat.v1.metrics.mean', 'tf.metrics.mean', (['loss_tensor'], {'name': 'loss_name', 'metrics_collections': '"""metrics"""', 'updates_collections': '"""metric_ops"""'}), "(loss_tensor, name=loss_name, metrics_collections='metrics',\n updates_collections='metric_ops')\n", (5151, 5249), True, 'import tensorflow.compat.v1 as tf\n'), ((5492, 5525), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5523, 5525), True, 'import tensorflow.compat.v1 as tf\n'), ((5535, 5567), 'tensorflow.compat.v1.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (5565, 5567), True, 'import tensorflow.compat.v1 as tf\n'), ((8497, 8564), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""All checkpoints evaluated; sleeping for a moment"""'], {}), "('All checkpoints evaluated; sleeping for a moment')\n", (8512, 8564), True, 'import tensorflow.compat.v1 as tf\n'), ((8573, 8586), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (8583, 8586), False, 'import time\n'), ((11862, 11874), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (11872, 11874), True, 'import tensorflow.compat.v1 as tf\n'), ((11982, 12004), 'tensorflow.compat.v1.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (12002, 12004), True, 'import tensorflow.compat.v1 as tf\n'), ((12023, 12075), 'tensorflow.compat.v1.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess', 'coord': 'coord'}), '(sess=sess, coord=coord)\n', (12051, 12075), True, 'import tensorflow.compat.v1 as tf\n'), ((12126, 12137), 'time.time', 'time.time', ([], {}), '()\n', (12135, 12137), False, 'import time\n'), ((24398, 24420), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (24412, 24420), True, 'import numpy as np\n'), ((24438, 24460), 'numpy.concatenate', 'np.concatenate', (['scores'], {}), '(scores)\n', (24452, 24460), True, 'import numpy as np\n'), ((24636, 24655), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (24646, 24655), True, 'import numpy as np\n'), ((24782, 24815), 'numpy.cumsum', 'np.cumsum', (['true_positives'], {'axis': '(0)'}), '(true_positives, axis=0)\n', (24791, 24815), True, 'import numpy as np\n'), ((24846, 24880), 'numpy.cumsum', 'np.cumsum', (['false_positives'], {'axis': '(0)'}), '(false_positives, axis=0)\n', (24855, 24880), True, 'import numpy as np\n'), ((2641, 2669), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['classes_file'], {}), '(classes_file)\n', (2655, 2669), True, 'import tensorflow.compat.v1 as tf\n'), ((22454, 22477), 'numpy.argsort', 'np.argsort', (['(-cls_scores)'], {}), '(-cls_scores)\n', (22464, 22477), True, 'import numpy as np\n'), ((23133, 23172), 'luminoth.utils.bbox_overlap.bbox_overlap', 'bbox_overlap', (['cls_bboxes', 'cls_gt_bboxes'], {}), '(cls_bboxes, cls_gt_bboxes)\n', (23145, 23172), False, 'from luminoth.utils.bbox_overlap import bbox_overlap\n'), ((25521, 25555), 'numpy.searchsorted', 'np.searchsorted', (['r', 'rec_thresholds'], {}), '(r, rec_thresholds)\n', (25536, 25555), True, 'import numpy as np\n'), ((6789, 6857), 'tensorflow.compat.v1.logging.warning', 'tf.logging.warning', (['"""Missing checkpoint; Checking again in a moment"""'], {}), "('Missing checkpoint; Checking again in a moment')\n", (6807, 6857), True, 'import tensorflow.compat.v1 as tf\n'), ((6887, 6900), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6897, 6900), False, 'import time\n'), ((7279, 7290), 'time.time', 'time.time', ([], {}), '()\n', (7288, 7290), False, 'import time\n'), ((15031, 15042), 'time.time', 'time.time', ([], {}), '()\n', (15040, 15042), False, 'import time\n'), ((15891, 15918), 'numpy.mean', 'np.mean', (['ap_per_class[:, 0]'], {}), '(ap_per_class[:, 0])\n', (15898, 15918), True, 'import numpy as np\n'), ((15943, 15970), 'numpy.mean', 'np.mean', (['ap_per_class[:, 5]'], {}), '(ap_per_class[:, 5])\n', (15950, 15970), True, 'import numpy as np\n'), ((15998, 16019), 'numpy.mean', 'np.mean', (['ap_per_class'], {}), '(ap_per_class)\n', (16005, 16019), True, 'import numpy as np\n'), ((16047, 16068), 'numpy.mean', 'np.mean', (['ar_per_class'], {}), '(ar_per_class)\n', (16054, 16068), True, 'import numpy as np\n'), ((20845, 20874), 'numpy.round', 'np.round', (['((0.95 - 0.5) / 0.05)'], {}), '((0.95 - 0.5) / 0.05)\n', (20853, 20874), True, 'import numpy as np\n'), ((20995, 21023), 'numpy.round', 'np.round', (['((1.0 - 0.0) / 0.01)'], {}), '((1.0 - 0.0) / 0.01)\n', (21003, 21023), True, 'import numpy as np\n'), ((23322, 23350), 'numpy.argmax', 'np.argmax', (['ious[bbox_idx, :]'], {}), '(ious[bbox_idx, :])\n', (23331, 23350), True, 'import numpy as np\n'), ((4782, 4811), 'tensorflow.compat.v1.shape', 'tf.shape', (['pred_objects_scores'], {}), '(pred_objects_scores)\n', (4790, 4811), True, 'import tensorflow.compat.v1 as tf\n'), ((6712, 6751), 'tensorflow.compat.v1.logging.error', 'tf.logging.error', (['"""Missing checkpoint."""'], {}), "('Missing checkpoint.')\n", (6728, 6751), True, 'import tensorflow.compat.v1 as tf\n'), ((8285, 8298), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (8295, 8298), False, 'import time\n'), ((18611, 18636), 'tensorflow.compat.v1.Summary', 'tf.Summary', ([], {'value': 'summary'}), '(value=summary)\n', (18621, 18636), True, 'import tensorflow.compat.v1 as tf\n'), ((14319, 14532), 'luminoth.utils.image_vis.image_vis_summaries', 'image_vis_summaries', (["batch_fetched['prediction_dict']"], {'config': 'config.model', 'extra_tag': 'filename', 'image_visualization_mode': 'image_vis', 'image': "batch_fetched['train_image']", 'gt_bboxes': "batch_fetched['gt_bboxes']"}), "(batch_fetched['prediction_dict'], config=config.model,\n extra_tag=filename, image_visualization_mode=image_vis, image=\n batch_fetched['train_image'], gt_bboxes=batch_fetched['gt_bboxes'])\n", (14338, 14532), False, 'from luminoth.utils.image_vis import image_vis_summaries\n'), ((18446, 18502), 'tensorflow.compat.v1.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'loss_name', 'simple_value': 'loss_value'}), '(tag=loss_name, simple_value=loss_value)\n', (18462, 18502), True, 'import tensorflow.compat.v1 as tf\n'), ((7805, 7816), 'time.time', 'time.time', ([], {}), '()\n', (7814, 7816), False, 'import time\n'), ((18191, 18202), 'time.time', 'time.time', ([], {}), '()\n', (18200, 18202), False, 'import time\n')]
|
import cv2
import numpy as np
import dlib
webcam = 1
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def creatBox(img,points,scale =5,masked = False, cropped = True):
if masked:
mask = np.zeros_like(img)
mask = cv2.fillPoly(mask,[points],(255,255,255))
img = cv2.bitwise_and(img,mask)
if cropped:
bbox = cv2.boundingRect(points)
x,y,w,h = bbox
imgCrop = img[y:y+h,x:x+w]
imgCrop = cv2.resize(imgCrop,(0,0),None,scale,scale)
return imgCrop
else:
return mask
while True:
if webcam: success , img = cap.read()
else: img = cv2.imread('2 copy.jpeg')
#img = cv2.resize(img,(0,0),None,0.5,0.5)
#imOriginal = img.copy()
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = detector(imgGray)
for face in faces:
x1,y1 = face.left(),face.top()
x2,y2 = face.right(),face.bottom()
#imgOriginal = cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
landmarks = predictor(imgGray,face)
myPoints = []
for n in range(68):
x = landmarks.part(n).x
y = landmarks.part(n).y
myPoints.append([x,y])
myPoints = np.array(myPoints)
imgLips = creatBox(img,myPoints[48:61],3,masked=True,cropped=False)
img1 = img.copy()
imgColorLips = cv2.fillPoly(img1, [myPoints[48:61]],(23,23,200))
imgColorLips = cv2.addWeighted(img, 0.5, imgColorLips, 0.3, 0.5)
cv2.putText(imgColorLips, 'hello', (0, 150), cv2.FONT_HERSHEY_COMPLEX_SMALL, 3, (0, 0, 255), 5)
cv2.imshow('main',imgColorLips)
cv2.waitKey(1)
|
[
"cv2.fillPoly",
"cv2.imread",
"numpy.zeros_like",
"dlib.shape_predictor",
"cv2.bitwise_and",
"cv2.putText",
"cv2.imshow",
"dlib.get_frontal_face_detector",
"numpy.array",
"cv2.addWeighted",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.resize",
"cv2.waitKey",
"cv2.boundingRect"
] |
[((60, 79), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (76, 79), False, 'import cv2\n'), ((93, 125), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (123, 125), False, 'import dlib\n'), ((138, 199), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (158, 199), False, 'import dlib\n'), ((832, 869), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (844, 869), False, 'import cv2\n'), ((1713, 1727), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1724, 1727), False, 'import cv2\n'), ((298, 316), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (311, 316), True, 'import numpy as np\n'), ((332, 377), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[points]', '(255, 255, 255)'], {}), '(mask, [points], (255, 255, 255))\n', (344, 377), False, 'import cv2\n'), ((388, 414), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (403, 414), False, 'import cv2\n'), ((447, 471), 'cv2.boundingRect', 'cv2.boundingRect', (['points'], {}), '(points)\n', (463, 471), False, 'import cv2\n'), ((548, 595), 'cv2.resize', 'cv2.resize', (['imgCrop', '(0, 0)', 'None', 'scale', 'scale'], {}), '(imgCrop, (0, 0), None, scale, scale)\n', (558, 595), False, 'import cv2\n'), ((717, 742), 'cv2.imread', 'cv2.imread', (['"""2 copy.jpeg"""'], {}), "('2 copy.jpeg')\n", (727, 742), False, 'import cv2\n'), ((1298, 1316), 'numpy.array', 'np.array', (['myPoints'], {}), '(myPoints)\n', (1306, 1316), True, 'import numpy as np\n'), ((1442, 1494), 'cv2.fillPoly', 'cv2.fillPoly', (['img1', '[myPoints[48:61]]', '(23, 23, 200)'], {}), '(img1, [myPoints[48:61]], (23, 23, 200))\n', (1454, 1494), False, 'import cv2\n'), ((1515, 1564), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(0.5)', 'imgColorLips', '(0.3)', '(0.5)'], {}), '(img, 0.5, imgColorLips, 0.3, 0.5)\n', (1530, 1564), False, 'import cv2\n'), ((1573, 1672), 'cv2.putText', 'cv2.putText', (['imgColorLips', '"""hello"""', '(0, 150)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(3)', '(0, 0, 255)', '(5)'], {}), "(imgColorLips, 'hello', (0, 150), cv2.FONT_HERSHEY_COMPLEX_SMALL,\n 3, (0, 0, 255), 5)\n", (1584, 1672), False, 'import cv2\n'), ((1677, 1709), 'cv2.imshow', 'cv2.imshow', (['"""main"""', 'imgColorLips'], {}), "('main', imgColorLips)\n", (1687, 1709), False, 'import cv2\n')]
|
import numpy as np
from skimage.transform import resize
import skimage
import torchvision.utils as tvutils
import torch
import PIL
from PIL import Image
import torchvision
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = torch.tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3)
self.std = torch.tensor(std).unsqueeze(0).unsqueeze(2).unsqueeze(3)
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# for t, m, s in zip(tensor, self.mean, self.std):
# t * s + m
# # The normalize code -> t.sub_(m).div_(s)
return tensor * self.std.to(tensor.device) + self.mean.to(tensor.device)
imagenet_unnormalize = UnNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
taskononomy_unnormalize = UnNormalize([0.5,0.5,0.5], [0.5, 0.5, 0.5])
def log_input_images(obs_unpacked, mlog, num_stack, key_names=['map'], meter_name='debug/input_images', step_num=0, reset_meter=True, phase='train', unnormalize=taskononomy_unnormalize):
# Plots the observations from the first process
stacked = []
for key_name in key_names:
if key_name not in obs_unpacked:
print(key_name, "not found")
continue
obs = obs_unpacked[key_name][0]
obs = (obs + 1.0) / 2.0
# obs = unnormalize(obs)
# obs = (obs * 2. - 1.)
try:
obs = obs.cpu()
except:
pass
obs_chunked = list(torch.chunk(obs, num_stack, dim=0))
if obs_chunked[0].shape[2] == 1 or obs_chunked[0].shape[2] == 3:
obs_chunked = [o.permute(2, 0, 1) for o in obs_chunked]
obs_chunked = [hacky_resize(obs) for obs in obs_chunked]
key_stacked = torchvision.utils.make_grid(obs_chunked, nrow=num_stack, padding=2)
stacked.append(key_stacked)
stacked = torch.cat(stacked, dim=1)
mlog.update_meter(stacked, meters={meter_name}, phase=phase)
if reset_meter:
mlog.reset_meter(step_num, meterlist={meter_name})
def hacky_resize(obs: torch.Tensor) -> torch.Tensor:
obs_img_format = np.transpose((255 * obs.cpu().numpy()).astype(np.uint8), (1,2,0))
obs_resized = torch.Tensor(np.array(Image.fromarray(obs_img_format).resize((84,84))).astype(np.float32)).permute((2,0,1))
return obs_resized / 255.
def rescale_for_display( batch, rescale=True, normalize=False ):
'''
Prepares network output for display by optionally rescaling from [-1,1],
and by setting some pixels to the min/max of 0/1. This prevents matplotlib
from rescaling the images.
'''
if rescale:
display_batch = [ rescale_image( im.copy(), new_scale=[0, 1], current_scale=[-1, 1] )
for im in batch ]
else:
display_batch = batch.copy()
if not normalize:
for im in display_batch:
im[0,0,0] = 1.0 # Adjust some values so that matplotlib doesn't rescale
im[0,1,0] = 0.0 # Now adjust the min
return display_batch
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
# im = im.astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
im = skimage.img_as_float(im)
return im
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
# # skimage is fast but only understands {1,3} channel images
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def pack_images(x, prediction, label, mask=None):
uncertainty = None
if isinstance(prediction, tuple):
prediction, uncertainty = prediction
if len(label.shape) == 4 and label.shape[1] == 2:
zeros = torch.zeros(label.shape[0], 1, label.shape[2], label.shape[3]).to(label.device)
label = torch.cat([label, zeros], dim=1)
prediction = torch.cat([prediction, zeros], dim=1)
if uncertainty is not None:
uncertainty = torch.cat([uncertainty, zeros], dim=1)
if mask is not None:
mask = torch.cat([mask, mask[:,0].unsqueeze(1)], dim=1)
if len(x.shape) == 4 and x.shape[1] == 2:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
to_cat.append(prediction.expand(shape_with_three_channels))
if uncertainty is not None:
print(uncertainty.min(), uncertainty.max())
uncertainty = 2*uncertainty - 1.0
uncertainty = uncertainty.clamp(min=-1.0, max=1.0)
to_cat.append(uncertainty.expand(shape_with_three_channels))
to_cat.append(label.expand(shape_with_three_channels))
if mask is not None:
to_cat.append(mask.expand(shape_with_three_channels))
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
def maybe_entriple(x, is_mask=False):
if x.shape[1] == 2:
if is_mask:
x = torch.cat([x, x[:,0].unsqueeze(1)], dim=1)
else:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
return x.expand(shape_with_three_channels)
def pack_chained_images(x, predictions, labels, mask=None):
x = maybe_entriple(x)
if mask is not None:
mask = maybe_entriple(mask, is_mask=True)
tripled_predictions, uncertainties = [], []
for p in predictions:
if isinstance(p, tuple):
p, u = p
uncertainties.append(maybe_entriple(u))
else:
uncertainties.append(None)
tripled_predictions.append(maybe_entriple(p))
predictions = tripled_predictions
labels = [maybe_entriple(l) for l in labels]
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
for pred, uncert, label in zip(predictions, uncertainties, labels):
to_cat.append(label)
to_cat.append(pred)
if uncert is not None:
print(uncert.min(), uncert.max())
uncert = 2*uncert - 1.0
uncert = uncert.clamp(min=-1.0, max=1.0)
to_cat.append(uncert)
if mask is not None:
to_cat.append(mask)
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
|
[
"numpy.clip",
"PIL.Image.fromarray",
"skimage.img_as_float",
"numpy.array",
"torch.tensor",
"torch.chunk",
"torchvision.utils.make_grid",
"skimage.transform.resize",
"torch.zeros",
"torch.cat"
] |
[((2026, 2051), 'torch.cat', 'torch.cat', (['stacked'], {'dim': '(1)'}), '(stacked, dim=1)\n', (2035, 2051), False, 'import torch\n'), ((3979, 4003), 'skimage.img_as_float', 'skimage.img_as_float', (['im'], {}), '(im)\n', (3999, 4003), False, 'import skimage\n'), ((5885, 5910), 'skimage.img_as_float', 'skimage.img_as_float', (['img'], {}), '(img)\n', (5905, 5910), False, 'import skimage\n'), ((7528, 7552), 'torch.cat', 'torch.cat', (['to_cat'], {'dim': '(3)'}), '(to_cat, dim=3)\n', (7537, 7552), False, 'import torch\n'), ((9112, 9136), 'torch.cat', 'torch.cat', (['to_cat'], {'dim': '(3)'}), '(to_cat, dim=3)\n', (9121, 9136), False, 'import torch\n'), ((1908, 1975), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['obs_chunked'], {'nrow': 'num_stack', 'padding': '(2)'}), '(obs_chunked, nrow=num_stack, padding=2)\n', (1935, 1975), False, 'import torchvision\n'), ((6397, 6429), 'torch.cat', 'torch.cat', (['[label, zeros]'], {'dim': '(1)'}), '([label, zeros], dim=1)\n', (6406, 6429), False, 'import torch\n'), ((6451, 6488), 'torch.cat', 'torch.cat', (['[prediction, zeros]'], {'dim': '(1)'}), '([prediction, zeros], dim=1)\n', (6460, 6488), False, 'import torch\n'), ((6826, 6854), 'torch.cat', 'torch.cat', (['[x, zeros]'], {'dim': '(1)'}), '([x, zeros], dim=1)\n', (6835, 6854), False, 'import torch\n'), ((1644, 1678), 'torch.chunk', 'torch.chunk', (['obs', 'num_stack'], {'dim': '(0)'}), '(obs, num_stack, dim=0)\n', (1655, 1678), False, 'import torch\n'), ((3798, 3827), 'numpy.clip', 'np.clip', (['im', 'min_val', 'max_val'], {}), '(im, min_val, max_val)\n', (3805, 3827), True, 'import numpy as np\n'), ((5020, 5081), 'skimage.transform.resize', 'resize', (['im', 'new_dims'], {'order': 'interp_order', 'preserve_range': '(True)'}), '(im, new_dims, order=interp_order, preserve_range=True)\n', (5026, 5081), False, 'from skimage.transform import resize\n'), ((6551, 6589), 'torch.cat', 'torch.cat', (['[uncertainty, zeros]'], {'dim': '(1)'}), '([uncertainty, zeros], dim=1)\n', (6560, 6589), False, 'import torch\n'), ((7913, 7941), 'torch.cat', 'torch.cat', (['[x, zeros]'], {'dim': '(1)'}), '([x, zeros], dim=1)\n', (7922, 7941), False, 'import torch\n'), ((6301, 6363), 'torch.zeros', 'torch.zeros', (['label.shape[0]', '(1)', 'label.shape[2]', 'label.shape[3]'], {}), '(label.shape[0], 1, label.shape[2], label.shape[3])\n', (6312, 6363), False, 'import torch\n'), ((6750, 6800), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(1)', 'x.shape[2]', 'x.shape[3]'], {}), '(x.shape[0], 1, x.shape[2], x.shape[3])\n', (6761, 6800), False, 'import torch\n'), ((5171, 5202), 'numpy.array', 'np.array', (['new_dims'], {'dtype': 'float'}), '(new_dims, dtype=float)\n', (5179, 5202), True, 'import numpy as np\n'), ((5205, 5227), 'numpy.array', 'np.array', (['im.shape[:2]'], {}), '(im.shape[:2])\n', (5213, 5227), True, 'import numpy as np\n'), ((7833, 7883), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(1)', 'x.shape[2]', 'x.shape[3]'], {}), '(x.shape[0], 1, x.shape[2], x.shape[3])\n', (7844, 7883), False, 'import torch\n'), ((257, 275), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (269, 275), False, 'import torch\n'), ((334, 351), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (346, 351), False, 'import torch\n'), ((2377, 2408), 'PIL.Image.fromarray', 'Image.fromarray', (['obs_img_format'], {}), '(obs_img_format)\n', (2392, 2408), False, 'from PIL import Image\n')]
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Test a Detectron network on an imdb (image database)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import datetime
import logging
import numpy as np
import os
import yaml
import shutil
import torch
from mitok.detect.bound3d import Bound3D, combine_bounds_3d_direct
from core.config import cfg
# from core.rpn_generator import generate_rpn_on_dataset #TODO: for rpn only case
# from core.rpn_generator import generate_rpn_on_range
from core.test import ct_detect_all
from datasets.ct_json_dataset import JsonDataset
from modeling.model_factory import GetRCNNModel
import nn as mynn
from utils.detectron_weight_helper import load_detectron_weight
import utils.env as envu
import utils.boxes as box_utils
import utils.net as net_utils
import utils.subprocess as subprocess_utils
import utils.vis as vis_utils
import utils.vis_gt as vis_gt_utils
from utils.ImageIO import windowing
from utils.io import save_object
from utils.timer import Timer
import os.path as osp
from sync_batchnorm.replicate import patch_replication_callback
logger = logging.getLogger(__name__)
from datasets.dataset_catalog import ANN_FN
from datasets.dataset_catalog import DATASETS
from datasets.dataset_catalog import CT_DIR
from datasets.dataset_catalog import CT_SUB_DIRS
from datasets.dataset_catalog import IM_PREFIX
# For CT
from datasets.dataset_catalog import MASK_DIR
def get_eval_functions():
# Determine which parent or child function should handle inference
if cfg.MODEL.RPN_ONLY:
raise NotImplementedError
# child_func = generate_rpn_on_range
# parent_func = generate_rpn_on_dataset
else:
# Generic case that handles all network types other than RPN-only nets
# and RetinaNet
child_func = test_net
parent_func = test_net_on_dataset
return parent_func, child_func
def get_inference_dataset(index, is_parent=True):
assert is_parent or len(cfg.TEST.DATASETS) == 1, \
'The child inference process can only work on a single dataset'
dataset_name = cfg.TEST.DATASETS[index]
if cfg.TEST.PRECOMPUTED_PROPOSALS:
assert is_parent or len(cfg.TEST.PROPOSAL_FILES) == 1, \
'The child inference process can only work on a single proposal file'
assert len(cfg.TEST.PROPOSAL_FILES) == len(cfg.TEST.DATASETS), \
'If proposals are used, one proposal file must be specified for ' \
'each dataset'
proposal_file = cfg.TEST.PROPOSAL_FILES[index]
else:
proposal_file = None
return dataset_name, proposal_file
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
return
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
result_getter()
return None
def test_net_on_dataset(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=False,
gpu_id=0):
"""Run inference on a dataset."""
if 'testA' in dataset_name or 'testB' in dataset_name:
dataset = None
sub_dirs = DATASETS[dataset_name][CT_SUB_DIRS]
image_dir = DATASETS[dataset_name][CT_DIR]
mask_dir = DATASETS[dataset_name][MASK_DIR]
with open(sub_dirs) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
roidb = []
for line in lines:
entry = {}
entry['image'] = osp.join(image_dir, line, 'norm_image.npz')
entry['mask_image'] = osp.join(mask_dir, line, 'mask_image.npz')
#entry[] = osp.join(mask_dir, line)
roidb.append(entry)
elif cfg.DATA_SOURCE == 'coco':
dataset = JsonDataset(dataset_name)
elif cfg.DATA_SOURCE == 'mammo':
dataset = MammoDataset(dataset_name)
#elif cfg.DATA_SOURCE == 'lesion':
# dataset = LesionDataset(dataset_name)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
if 'testA' in dataset_name or 'testB' in dataset_name:
num_images = len(roidb)
else:
num_images = len(dataset.get_roidb())
multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir
)
else:
test_net(
args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
)
test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
return None
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'CT_detection', num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, opts
)
# Collate the results from each subprocess
all_results_dicts = {}
for det_data in outputs:
all_results_dicts.update(det_data)
CT_det_file = os.path.join(output_dir, 'CT_detections.pkl')
save_object(all_results_dicts,CT_det_file)
logger.info('Wrote CT level detections to: {}'.format(os.path.abspath(CT_det_file)))
return None
def test_net(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=None,
gpu_id=0):
"""Run inference on all images in a dataset or over an index range of images
in a dataset using a single GPU.
"""
assert not cfg.MODEL.RPN_ONLY, \
'Use rpn_generate to generate proposals from RPN-only models'
roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
dataset_name, proposal_file, ind_range
)
# for debug
#roidb=roidb[0:1]
#import pdb
#pdb.set_trace()
model = initialize_model_from_cfg(args, gpu_id=gpu_id)
num_images = len(roidb)
num_classes = cfg.MODEL.NUM_CLASSES
all_results_dicts = {}
timers = defaultdict(Timer)
for i, entry in enumerate(roidb):
if cfg.TEST.PRECOMPUTED_PROPOSALS:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select only the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = entry['boxes'][entry['gt_classes'] == 0]
if len(box_proposals) == 0:
continue
else:
# Faster R-CNN type models generate proposals on-the-fly with an
# in-network RPN; 1-stage models don't require proposals.
box_proposals = None
image_tensor = np.load(open(entry['image'], 'rb'))['data']
mask_tensor = None
#mask_tensor = np.load(open(entry['mask_image'], 'rb'))['data']
results_dict, num_slice = ct_detect_all(model, image_tensor, mask_tensor, slice_expand=10, box_proposals=None)
if cfg.VIS:
visualize_pred_gt(dataset, image_tensor, results_dict, entry, output_dir, folder_name='vis_ct', vis_pred_only=True)
bound_groups, bound3ds = combine_slice_level_pred(results_dict, cfg.CT_INFERENCE.COMBINE_THRESH)
all_results_dicts[entry['image']] = results_dict
#for bound3d in bound3ds:
# print(bound3d.cube, image_tensor.shape)
#combined_results_dict = bound3d_to_return_dict(bound3ds, results_dict)
#print("Merged bbox num %d" % len(bound3ds))
#if cfg.VIS:
#visualize_pred_gt(dataset, image_tensor, combined_results_dict, entry, output_dir, folder_name='vis_combined_ct')
if i % 100 == 0: # Reduce log file size
ave_total_time = np.sum([t.average_time for t in timers.values()])
eta_seconds = ave_total_time * (num_images - i - 1)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
det_time = (
timers['im_detect_bbox'].average_time +
timers['im_detect_mask'].average_time +
timers['im_detect_keypoints'].average_time
)
misc_time = (
timers['misc_bbox'].average_time +
timers['misc_mask'].average_time +
timers['misc_keypoints'].average_time
)
logger.info(
(
'im_detect: range [{:d}, {:d}] of {:d}: '
'{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
).format(
start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
start_ind + num_images, det_time, misc_time, eta
))
cfg_yaml = yaml.dump(cfg)
# save CT level det results(image_path-return_dict map)
if ind_range is not None:
CT_det_name = 'CT_detection_range_%s_%s.pkl' % tuple(ind_range)
else:
CT_det_name = 'CT_detections.pkl'
CT_det_file = os.path.join(output_dir, CT_det_name)
save_object(all_results_dicts,CT_det_file)
logger.info('Wrote CT level detections to: {}'.format(os.path.abspath(CT_det_file)))
return None
def bound3d_to_dict(bound3d):
"""Convert instance of class Bound to python dict.
"""
bound3d_dict = {'cube':bound3d.cube, 'pathch_number':bound3d.get_patch_num(),\
'score':bound3d.score, 'label':bound3d.label}
return bound3d_dict
def bound3ds_to_list(bound3ds):
"""Convert instance of class Bound to python list.
"""
bound3ds_list = []
for bound3d in bound3ds:
bound_dict = {'cube':bound3d.cube, 'pathch_number':bound3d.get_patch_num(),\
'score':bound3d.score, 'label':bound3d.label}
bound3ds_list.append(bound_dict)
return bound3ds_list
def initialize_model_from_cfg(args, gpu_id=0):
"""Initialize a model from the global cfg. Loads test-time weights and
set to evaluation mode.
"""
model = GetRCNNModel()
model.eval()
if args.cuda:
model.cuda()
if args.load_ckpt:
load_name = args.load_ckpt
logger.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(model, checkpoint['model'])
if args.load_detectron:
logger.info("loading detectron weights %s", args.load_detectron)
load_detectron_weight(model, args.load_detectron)
model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)
if cfg.TRAIN_SYNC_BN:
# Shu:For synchorinized BN
patch_replication_callback(model)
return model
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range):
"""Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if ind_range is a pair of integers.
"""
if 'testA' in dataset_name or 'testB' in dataset_name:
print('#'*20)
print('tianchi testA/B')
print('='*20)
dataset = None
sub_dirs = DATASETS[dataset_name][CT_SUB_DIRS]
image_dir = DATASETS[dataset_name][CT_DIR]
mask_dir = DATASETS[dataset_name][MASK_DIR]
with open(sub_dirs) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
roidb = []
for line in lines:
entry = {}
entry['image'] = osp.join(image_dir, line, 'norm_image.npz')
entry['mask_image'] = osp.join(mask_dir, line, 'mask_image.npz')
#entry[] = osp.join(mask_dir, line)
roidb.append(entry)
else:
dataset = JsonDataset(dataset_name)
if cfg.TEST.PRECOMPUTED_PROPOSALS:
assert proposal_file, 'No proposal file given'
roidb = dataset.get_roidb(
proposal_file=proposal_file,
proposal_limit=cfg.TEST.PROPOSAL_LIMIT
)
else:
roidb = dataset.get_roidb(gt=True)
print('len roidb: ',len(roidb))
if ind_range is not None:
total_num_images = len(roidb)
start, end = ind_range
roidb = roidb[start:end]
else:
start = 0
end = len(roidb)
total_num_images = end
return roidb, dataset, start, end, total_num_images
def empty_results(num_classes, num_images):
"""Return empty results lists for boxes, masks, and keypoints.
Box detections are collected into:
all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)
Instance mask predictions are collected into:
all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in
1:1 correspondence with the boxes in all_boxes[cls][image]
Keypoint predictions are collected into:
all_keyps[cls][image] = [...] list of keypoints results, each encoded as
a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to
[x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints).
Keypoints are recorded for person (cls = 1); they are in 1:1
correspondence with the boxes in all_boxes[cls][image].
"""
# Note: do not be tempted to use [[] * N], which gives N references to the
# *same* empty list.
all_bound3ds = [[[] for _ in range(num_images)] for _ in range(num_classes)]
return all_bound3ds
def extend_results(index, all_res, bound3ds):
"""Add results for an image to the set of all results at the specified
index.
"""
# Skip cls_idx 0 (__background__)
#for cls_idx in range(1, len(im_res)):
for bound3d in bound3ds:
cls_idx = bound3d.label
all_res[cls_idx][index].append(bound3d_to_dict(bound3d))
def combine_slice_level_pred(results_dict, thresh, cls_num=15, max_slices_stride=5, iom_thresh=0.7):
""" Group slice level bounds (x,y,w,h,slice_idx,label,score) into bound3d. """
combine_opt = {}
# is per-class-threshold difined
if isinstance(thresh, list):
thresh_list = thresh
else:
thresh_list = [thresh for _ in range(cls_num)]
label_combine_matrix = np.ones((cls_num, cls_num), dtype='int32') * -1
label_combine_matrix[range(1,cls_num), range(1,cls_num)] = range(1,cls_num)
least_inter_ratio_matrix = np.zeros((cls_num, cls_num), dtype='float32')
least_inter_ratio_matrix[range(1,cls_num), range(1,cls_num)] = np.ones((cls_num-1), dtype='float32') * iom_thresh
combine_opt['label_combine_matrix'] = label_combine_matrix
combine_opt['least_inter_ratio_matrix'] = least_inter_ratio_matrix
combine_opt['max_slices_stride'] = max_slices_stride
bound2d_list = []
for slice_idx, slice_pred in results_dict.items():
scores, boxes, cls_boxes = slice_pred
_,_,_, classes = vis_gt_utils.convert_from_cls_format(
cls_boxes, None, None)
#print(len(cls_boxes), cls_boxes[0])
for idx, score in enumerate(scores):
if score >= thresh_list[classes[idx]]:
x, y, w, h = box_utils.xyxy_to_xywh(boxes[idx, :].tolist())
bound2d_list.append([x, y, w, h, slice_idx, classes[idx], score])
#print([x, y, w, h, slice_idx, classes[idx], score])
bound_groups = combine_bounds_3d_direct(bound2d_list, combine_opt)
bound3ds = []
direct = 0
for bound_group in bound_groups:
bound3d = Bound3D()
for bound2d in bound_group:
x, y, w, h, slice_id, label, score = bound2d
cube = [x, y, slice_id, w, h, 1]
bound3d.append(cube, direct, label, score, combine_opt['label_combine_matrix'])
if not bound3d.is_valid:
return None
bound3ds.append(bound3d)
#print(bound3d)
return bound_groups, bound3ds
def bound3d_to_return_dict(bound3ds, results_dict):
"""convert from bound3d to results_dict.
bound3d.cube (x,y,z,w,h,d)
cls_boxes is a list of #classes elements, each element has shape (#detection, 5).
[x1, y1, x2, y2, score]
return_dict: dict with {slice: cls_boxes}
"""
slice_pred = bound3d_to_slice_pred(bound3ds)
class_num = int(np.max(slice_pred[:, 6]))
print(class_num)
total_slice = np.unique(slice_pred[:, 5]).tolist()
return_dict = {}
for key in results_dict.keys():
return_dict[key] = ([], [], [[] for i in range(class_num + 1)])
for cur_slice in total_slice:
if cur_slice not in results_dict.keys():
#print("cur_slice %d is not in image_tensor"%cur_slice)
continue
cur_index = np.where(slice_pred[:, 5] == cur_slice)[0]
cur_bboxes = slice_pred[cur_index, :]
#print('cur_boxes all classes score, slice, label: ', cur_bboxes)
for cur_class in range(class_num + 1):
class_index = np.where(cur_bboxes[:, 6] == cur_class)[0]
return_dict[int(cur_slice)][2][cur_class] = cur_bboxes[class_index, 0:5]
return return_dict
def bound3d_to_slice_pred(bound3ds):
"""convert from bound3d to slice pred.
slice pred in the form of N*6 array, with each row represents [x1, y1, x2, y2, score, slice, label]
"""
slice_pred = np.empty((0, 7), dtype=np.float32)
for bound3d in bound3ds:
x, y, z, w, h, d = bound3d.cube
label = bound3d.label
score = bound3d.score
x2 = x + max(0, w - 1)
y2 = y + max(0, h - 1)
z2 = z + max(0, d - 1)
for cur_slice in range(int(z), int(z2) + 1):
tmp_pred = np.zeros((1, 7), dtype=np.float32)
tmp_pred[0,:] = [x, y, x2, y2, score, cur_slice, label]
slice_pred = np.append(slice_pred, tmp_pred, axis=0)
return slice_pred
def visualize_pred_gt(dataset, image_tensor, results_dict, entry, output_dir, folder_name='vis_ct', vis_pred_only=False):
im_dir =os.sep.join(entry['image'].split(os.sep)[-4:-1])
if cfg.LESION.LESION_ENABLED:
image_tensor = image_tensor.astype(np.float32)
image_tensor -= 32768
print(image_tensor.max(), image_tensor.min())
image_tensor = windowing(image_tensor, [-1400,200])
image_tensor = image_tensor.astype(np.uint8)
for center_idx in range(image_tensor.shape[0]):
other_im_show = cv2.merge([image_tensor[center_idx, :, :], image_tensor[center_idx, :, :], image_tensor[center_idx, :, :]])
if vis_pred_only:
gt_idx = []
gt_boxes_show = [[], []]
gt_classes = [[], []]
else:
gt_idx = np.where(entry['boxes'][:, 4] == center_idx)[0]
gt_boxes_show = [entry['boxes'][gt_idx, :4].tolist(), []]
gt_classes = [entry['gt_classes'][gt_idx].tolist(), []]
im_show = cv2.merge([image_tensor[center_idx, :, :], image_tensor[center_idx, :, :], image_tensor[center_idx, :, :]])
if len(gt_boxes_show[0]) == 0:
tmp_name = '%03d' % center_idx
else:
tmp_name = '%03d_gt' % center_idx
vis_gt_utils.vis_one_image(
im_show,
other_im_show,
tmp_name,
os.path.join(output_dir, folder_name, im_dir),
results_dict[center_idx][2],
gt_boxes_show,
segms=None,
thresh=cfg.VIS_TH,
box_alpha=0.8,
dataset=dataset,
show_class=True,
gt_classes = gt_classes
)
|
[
"logging.getLogger",
"core.test.ct_detect_all",
"mitok.detect.bound3d.combine_bounds_3d_direct",
"os.path.exists",
"utils.ImageIO.windowing",
"utils.net.load_ckpt",
"numpy.where",
"utils.timer.Timer",
"numpy.max",
"utils.io.save_object",
"numpy.empty",
"cv2.merge",
"numpy.ones",
"yaml.dump",
"mitok.detect.bound3d.Bound3D",
"utils.subprocess.process_in_parallel",
"datasets.ct_json_dataset.JsonDataset",
"nn.DataParallel",
"modeling.model_factory.GetRCNNModel",
"numpy.unique",
"torch.load",
"sync_batchnorm.replicate.patch_replication_callback",
"os.path.join",
"utils.detectron_weight_helper.load_detectron_weight",
"numpy.append",
"utils.env.get_runtime_dir",
"numpy.zeros",
"collections.defaultdict",
"os.path.abspath",
"utils.env.get_py_bin_ext",
"utils.vis_gt.convert_from_cls_format"
] |
[((1871, 1898), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1888, 1898), False, 'import logging\n'), ((6056, 6063), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (6061, 6063), False, 'from utils.timer import Timer\n'), ((6780, 6802), 'utils.env.get_runtime_dir', 'envu.get_runtime_dir', ([], {}), '()\n', (6800, 6802), True, 'import utils.env as envu\n'), ((6820, 6841), 'utils.env.get_py_bin_ext', 'envu.get_py_bin_ext', ([], {}), '()\n', (6839, 6841), True, 'import utils.env as envu\n'), ((6855, 6912), 'os.path.join', 'os.path.join', (['binary_dir', '(args.test_net_file + binary_ext)'], {}), '(binary_dir, args.test_net_file + binary_ext)\n', (6867, 6912), False, 'import os\n'), ((6924, 6946), 'os.path.exists', 'os.path.exists', (['binary'], {}), '(binary)\n', (6938, 6946), False, 'import os\n'), ((7430, 7561), 'utils.subprocess.process_in_parallel', 'subprocess_utils.process_in_parallel', (['"""CT_detection"""', 'num_images', 'binary', 'output_dir', 'args.load_ckpt', 'args.load_detectron', 'opts'], {}), "('CT_detection', num_images, binary,\n output_dir, args.load_ckpt, args.load_detectron, opts)\n", (7466, 7561), True, 'import utils.subprocess as subprocess_utils\n'), ((7746, 7791), 'os.path.join', 'os.path.join', (['output_dir', '"""CT_detections.pkl"""'], {}), "(output_dir, 'CT_detections.pkl')\n", (7758, 7791), False, 'import os\n'), ((7796, 7839), 'utils.io.save_object', 'save_object', (['all_results_dicts', 'CT_det_file'], {}), '(all_results_dicts, CT_det_file)\n', (7807, 7839), False, 'from utils.io import save_object\n'), ((8696, 8714), 'collections.defaultdict', 'defaultdict', (['Timer'], {}), '(Timer)\n', (8707, 8714), False, 'from collections import defaultdict\n'), ((11479, 11493), 'yaml.dump', 'yaml.dump', (['cfg'], {}), '(cfg)\n', (11488, 11493), False, 'import yaml\n'), ((11726, 11763), 'os.path.join', 'os.path.join', (['output_dir', 'CT_det_name'], {}), '(output_dir, CT_det_name)\n', (11738, 11763), False, 'import os\n'), ((11768, 11811), 'utils.io.save_object', 'save_object', (['all_results_dicts', 'CT_det_file'], {}), '(all_results_dicts, CT_det_file)\n', (11779, 11811), False, 'from utils.io import save_object\n'), ((12714, 12728), 'modeling.model_factory.GetRCNNModel', 'GetRCNNModel', ([], {}), '()\n', (12726, 12728), False, 'from modeling.model_factory import GetRCNNModel\n'), ((13216, 13291), 'nn.DataParallel', 'mynn.DataParallel', (['model'], {'cpu_keywords': "['im_info', 'roidb']", 'minibatch': '(True)'}), "(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n", (13233, 13291), True, 'import nn as mynn\n'), ((17000, 17045), 'numpy.zeros', 'np.zeros', (['(cls_num, cls_num)'], {'dtype': '"""float32"""'}), "((cls_num, cls_num), dtype='float32')\n", (17008, 17045), True, 'import numpy as np\n'), ((17965, 18016), 'mitok.detect.bound3d.combine_bounds_3d_direct', 'combine_bounds_3d_direct', (['bound2d_list', 'combine_opt'], {}), '(bound2d_list, combine_opt)\n', (17989, 18016), False, 'from mitok.detect.bound3d import Bound3D, combine_bounds_3d_direct\n'), ((19916, 19950), 'numpy.empty', 'np.empty', (['(0, 7)'], {'dtype': 'np.float32'}), '((0, 7), dtype=np.float32)\n', (19924, 19950), True, 'import numpy as np\n'), ((9670, 9758), 'core.test.ct_detect_all', 'ct_detect_all', (['model', 'image_tensor', 'mask_tensor'], {'slice_expand': '(10)', 'box_proposals': 'None'}), '(model, image_tensor, mask_tensor, slice_expand=10,\n box_proposals=None)\n', (9683, 9758), False, 'from core.test import ct_detect_all\n'), ((12922, 12986), 'torch.load', 'torch.load', (['load_name'], {'map_location': '(lambda storage, loc: storage)'}), '(load_name, map_location=lambda storage, loc: storage)\n', (12932, 12986), False, 'import torch\n'), ((12995, 13042), 'utils.net.load_ckpt', 'net_utils.load_ckpt', (['model', "checkpoint['model']"], {}), "(model, checkpoint['model'])\n", (13014, 13042), True, 'import utils.net as net_utils\n'), ((13153, 13202), 'utils.detectron_weight_helper.load_detectron_weight', 'load_detectron_weight', (['model', 'args.load_detectron'], {}), '(model, args.load_detectron)\n', (13174, 13202), False, 'from utils.detectron_weight_helper import load_detectron_weight\n'), ((13361, 13394), 'sync_batchnorm.replicate.patch_replication_callback', 'patch_replication_callback', (['model'], {}), '(model)\n', (13387, 13394), False, 'from sync_batchnorm.replicate import patch_replication_callback\n'), ((14402, 14427), 'datasets.ct_json_dataset.JsonDataset', 'JsonDataset', (['dataset_name'], {}), '(dataset_name)\n', (14413, 14427), False, 'from datasets.ct_json_dataset import JsonDataset\n'), ((16841, 16883), 'numpy.ones', 'np.ones', (['(cls_num, cls_num)'], {'dtype': '"""int32"""'}), "((cls_num, cls_num), dtype='int32')\n", (16848, 16883), True, 'import numpy as np\n'), ((17113, 17150), 'numpy.ones', 'np.ones', (['(cls_num - 1)'], {'dtype': '"""float32"""'}), "(cls_num - 1, dtype='float32')\n", (17120, 17150), True, 'import numpy as np\n'), ((17505, 17564), 'utils.vis_gt.convert_from_cls_format', 'vis_gt_utils.convert_from_cls_format', (['cls_boxes', 'None', 'None'], {}), '(cls_boxes, None, None)\n', (17541, 17564), True, 'import utils.vis_gt as vis_gt_utils\n'), ((18106, 18115), 'mitok.detect.bound3d.Bound3D', 'Bound3D', ([], {}), '()\n', (18113, 18115), False, 'from mitok.detect.bound3d import Bound3D, combine_bounds_3d_direct\n'), ((18881, 18905), 'numpy.max', 'np.max', (['slice_pred[:, 6]'], {}), '(slice_pred[:, 6])\n', (18887, 18905), True, 'import numpy as np\n'), ((20821, 20858), 'utils.ImageIO.windowing', 'windowing', (['image_tensor', '[-1400, 200]'], {}), '(image_tensor, [-1400, 200])\n', (20830, 20858), False, 'from utils.ImageIO import windowing\n'), ((20987, 21098), 'cv2.merge', 'cv2.merge', (['[image_tensor[center_idx, :, :], image_tensor[center_idx, :, :],\n image_tensor[center_idx, :, :]]'], {}), '([image_tensor[center_idx, :, :], image_tensor[center_idx, :, :],\n image_tensor[center_idx, :, :]])\n', (20996, 21098), False, 'import cv2\n'), ((21455, 21566), 'cv2.merge', 'cv2.merge', (['[image_tensor[center_idx, :, :], image_tensor[center_idx, :, :],\n image_tensor[center_idx, :, :]]'], {}), '([image_tensor[center_idx, :, :], image_tensor[center_idx, :, :],\n image_tensor[center_idx, :, :]])\n', (21464, 21566), False, 'import cv2\n'), ((5589, 5632), 'os.path.join', 'osp.join', (['image_dir', 'line', '"""norm_image.npz"""'], {}), "(image_dir, line, 'norm_image.npz')\n", (5597, 5632), True, 'import os.path as osp\n'), ((5667, 5709), 'os.path.join', 'osp.join', (['mask_dir', 'line', '"""mask_image.npz"""'], {}), "(mask_dir, line, 'mask_image.npz')\n", (5675, 5709), True, 'import os.path as osp\n'), ((5844, 5869), 'datasets.ct_json_dataset.JsonDataset', 'JsonDataset', (['dataset_name'], {}), '(dataset_name)\n', (5855, 5869), False, 'from datasets.ct_json_dataset import JsonDataset\n'), ((7898, 7926), 'os.path.abspath', 'os.path.abspath', (['CT_det_file'], {}), '(CT_det_file)\n', (7913, 7926), False, 'import os\n'), ((11870, 11898), 'os.path.abspath', 'os.path.abspath', (['CT_det_file'], {}), '(CT_det_file)\n', (11885, 11898), False, 'import os\n'), ((14173, 14216), 'os.path.join', 'osp.join', (['image_dir', 'line', '"""norm_image.npz"""'], {}), "(image_dir, line, 'norm_image.npz')\n", (14181, 14216), True, 'import os.path as osp\n'), ((14251, 14293), 'os.path.join', 'osp.join', (['mask_dir', 'line', '"""mask_image.npz"""'], {}), "(mask_dir, line, 'mask_image.npz')\n", (14259, 14293), True, 'import os.path as osp\n'), ((18946, 18973), 'numpy.unique', 'np.unique', (['slice_pred[:, 5]'], {}), '(slice_pred[:, 5])\n', (18955, 18973), True, 'import numpy as np\n'), ((19313, 19352), 'numpy.where', 'np.where', (['(slice_pred[:, 5] == cur_slice)'], {}), '(slice_pred[:, 5] == cur_slice)\n', (19321, 19352), True, 'import numpy as np\n'), ((20249, 20283), 'numpy.zeros', 'np.zeros', (['(1, 7)'], {'dtype': 'np.float32'}), '((1, 7), dtype=np.float32)\n', (20257, 20283), True, 'import numpy as np\n'), ((20377, 20416), 'numpy.append', 'np.append', (['slice_pred', 'tmp_pred'], {'axis': '(0)'}), '(slice_pred, tmp_pred, axis=0)\n', (20386, 20416), True, 'import numpy as np\n'), ((21823, 21868), 'os.path.join', 'os.path.join', (['output_dir', 'folder_name', 'im_dir'], {}), '(output_dir, folder_name, im_dir)\n', (21835, 21868), False, 'import os\n'), ((19549, 19588), 'numpy.where', 'np.where', (['(cur_bboxes[:, 6] == cur_class)'], {}), '(cur_bboxes[:, 6] == cur_class)\n', (19557, 19588), True, 'import numpy as np\n'), ((21251, 21295), 'numpy.where', 'np.where', (["(entry['boxes'][:, 4] == center_idx)"], {}), "(entry['boxes'][:, 4] == center_idx)\n", (21259, 21295), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bokeh Visualization Template
This template is a general outline for turning your data into a
visualization using Bokeh.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import numpy as np
# Bokeh libraries
from bokeh.io import output_file
from bokeh.plotting import figure, show
# My word count data
day_num = np.linspace(1, 10, 10)
daily_words = [450, 628, 488, 210, 287, 791, 508, 639, 397, 943]
cumulative_words = np.cumsum(daily_words)
# Output the visualization directly in the notebook
output_file('tutorial_progress.html', title='My Tutorial Progress')
# Create a figure with a datetime type x-axis
fig = figure(
title='My Tutorial Progress',
plot_height=400,
plot_width=700,
x_axis_label='Day Number',
y_axis_label='Words Written',
x_minor_ticks=2,
y_range=(0, 6000),
toolbar_location=None)
# The daily words will be represented as vertical bars (columns)
fig.vbar(
x=day_num,
bottom=0,
top=daily_words,
color='blue',
width=0.75,
legend='Daily')
# The cumulative sum will be a trend line
fig.line(
x=day_num,
y=cumulative_words,
color='gray',
line_width=1,
legend='Cumulative')
# Put the legend in the upper left corner
fig.legend.location = 'top_left'
# Let's check it out
show(fig)
|
[
"bokeh.io.output_file",
"bokeh.plotting.show",
"bokeh.plotting.figure",
"numpy.linspace",
"numpy.cumsum"
] |
[((428, 450), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (439, 450), True, 'import numpy as np\n'), ((535, 557), 'numpy.cumsum', 'np.cumsum', (['daily_words'], {}), '(daily_words)\n', (544, 557), True, 'import numpy as np\n'), ((611, 678), 'bokeh.io.output_file', 'output_file', (['"""tutorial_progress.html"""'], {'title': '"""My Tutorial Progress"""'}), "('tutorial_progress.html', title='My Tutorial Progress')\n", (622, 678), False, 'from bokeh.io import output_file\n'), ((732, 926), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""My Tutorial Progress"""', 'plot_height': '(400)', 'plot_width': '(700)', 'x_axis_label': '"""Day Number"""', 'y_axis_label': '"""Words Written"""', 'x_minor_ticks': '(2)', 'y_range': '(0, 6000)', 'toolbar_location': 'None'}), "(title='My Tutorial Progress', plot_height=400, plot_width=700,\n x_axis_label='Day Number', y_axis_label='Words Written', x_minor_ticks=\n 2, y_range=(0, 6000), toolbar_location=None)\n", (738, 926), False, 'from bokeh.plotting import figure, show\n'), ((1382, 1391), 'bokeh.plotting.show', 'show', (['fig'], {}), '(fig)\n', (1386, 1391), False, 'from bokeh.plotting import figure, show\n')]
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABC, abstractmethod
from io import StringIO
from typing import TYPE_CHECKING, Any, List, Union
import numpy as np # type: ignore
if TYPE_CHECKING:
from .query_compiler import QueryCompiler
class ArithmeticObject(ABC):
@property
@abstractmethod
def value(self) -> str:
pass
@abstractmethod
def dtype(self) -> np.dtype:
pass
@abstractmethod
def resolve(self) -> str:
pass
@abstractmethod
def __repr__(self) -> str:
pass
class ArithmeticString(ArithmeticObject):
def __init__(self, value: str):
self._value = value
def resolve(self) -> str:
return self.value
@property
def dtype(self) -> np.dtype:
return np.dtype(object)
@property
def value(self) -> str:
return f"'{self._value}'"
def __repr__(self) -> str:
return self.value
class ArithmeticNumber(ArithmeticObject):
def __init__(self, value: Union[int, float], dtype: np.dtype):
self._value = value
self._dtype = dtype
def resolve(self) -> str:
return self.value
@property
def value(self) -> str:
return f"{self._value}"
@property
def dtype(self) -> np.dtype:
return self._dtype
def __repr__(self) -> str:
return self.value
class ArithmeticSeries(ArithmeticObject):
"""Represents each item in a 'Series' by using painless scripts
to evaluate each document in an index as a part of a query.
"""
def __init__(
self, query_compiler: "QueryCompiler", display_name: str, dtype: np.dtype
):
# type defs
self._value: str
self._tasks: List["ArithmeticTask"]
task = query_compiler.get_arithmetic_op_fields()
if task is not None:
assert isinstance(task._arithmetic_series, ArithmeticSeries)
self._value = task._arithmetic_series.value
self._tasks = task._arithmetic_series._tasks.copy()
self._dtype = dtype
else:
aggregatable_field_name = query_compiler.display_name_to_aggregatable_name(
display_name
)
if aggregatable_field_name is None:
raise ValueError(
f"Can not perform arithmetic operations on non aggregatable fields"
f"{display_name} is not aggregatable."
)
self._value = f"doc['{aggregatable_field_name}'].value"
self._tasks = []
self._dtype = dtype
@property
def value(self) -> str:
return self._value
@property
def dtype(self) -> np.dtype:
return self._dtype
def __repr__(self) -> str:
buf = StringIO()
buf.write(f"Series: {self.value} ")
buf.write("Tasks: ")
for task in self._tasks:
buf.write(f"{task!r} ")
return buf.getvalue()
def resolve(self) -> str:
value = self._value
for task in self._tasks:
if task.op_name == "__add__":
value = f"({value} + {task.object.resolve()})"
elif task.op_name in {"__truediv__", "__div__"}:
value = f"({value} / {task.object.resolve()})"
elif task.op_name == "__floordiv__":
value = f"Math.floor({value} / {task.object.resolve()})"
elif task.op_name == "__mod__":
value = f"({value} % {task.object.resolve()})"
elif task.op_name == "__mul__":
value = f"({value} * {task.object.resolve()})"
elif task.op_name == "__pow__":
value = f"Math.pow({value}, {task.object.resolve()})"
elif task.op_name == "__sub__":
value = f"({value} - {task.object.resolve()})"
elif task.op_name == "__radd__":
value = f"({task.object.resolve()} + {value})"
elif task.op_name in {"__rtruediv__", "__rdiv__"}:
value = f"({task.object.resolve()} / {value})"
elif task.op_name == "__rfloordiv__":
value = f"Math.floor({task.object.resolve()} / {value})"
elif task.op_name == "__rmod__":
value = f"({task.object.resolve()} % {value})"
elif task.op_name == "__rmul__":
value = f"({task.object.resolve()} * {value})"
elif task.op_name == "__rpow__":
value = f"Math.pow({task.object.resolve()}, {value})"
elif task.op_name == "__rsub__":
value = f"({task.object.resolve()} - {value})"
return value
def arithmetic_operation(self, op_name: str, right: Any) -> "ArithmeticSeries":
# check if operation is supported (raises on unsupported)
self.check_is_supported(op_name, right)
task = ArithmeticTask(op_name, right)
self._tasks.append(task)
return self
def check_is_supported(self, op_name: str, right: Any) -> bool:
# supported set is
# series.number op_name number (all ops)
# series.string op_name string (only add)
# series.string op_name int (only mul)
# series.string op_name float (none)
# series.int op_name string (none)
# series.float op_name string (none)
# see end of https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=dtype
# for dtype hierarchy
right_is_integer = np.issubdtype(right.dtype, np.number)
if np.issubdtype(self.dtype, np.number) and right_is_integer:
# series.number op_name number (all ops)
return True
self_is_object = np.issubdtype(self.dtype, np.object_)
if self_is_object and np.issubdtype(right.dtype, np.object_):
# series.string op_name string (only add)
if op_name == "__add__" or op_name == "__radd__":
return True
if self_is_object and right_is_integer:
# series.string op_name int (only mul)
if op_name == "__mul__":
return True
raise TypeError(
f"Arithmetic operation on incompatible types {self.dtype} {op_name} {right.dtype}"
)
class ArithmeticTask:
def __init__(self, op_name: str, object: ArithmeticObject):
self._op_name = op_name
if not isinstance(object, ArithmeticObject):
raise TypeError(f"Task requires ArithmeticObject not {type(object)}")
self._object = object
def __repr__(self) -> str:
buf = StringIO()
buf.write(f"op_name: {self.op_name} object: {self.object!r} ")
return buf.getvalue()
@property
def op_name(self) -> str:
return self._op_name
@property
def object(self) -> ArithmeticObject:
return self._object
|
[
"numpy.issubdtype",
"numpy.dtype",
"io.StringIO"
] |
[((1530, 1546), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (1538, 1546), True, 'import numpy as np\n'), ((3521, 3531), 'io.StringIO', 'StringIO', ([], {}), '()\n', (3529, 3531), False, 'from io import StringIO\n'), ((6243, 6280), 'numpy.issubdtype', 'np.issubdtype', (['right.dtype', 'np.number'], {}), '(right.dtype, np.number)\n', (6256, 6280), True, 'import numpy as np\n'), ((6454, 6491), 'numpy.issubdtype', 'np.issubdtype', (['self.dtype', 'np.object_'], {}), '(self.dtype, np.object_)\n', (6467, 6491), True, 'import numpy as np\n'), ((7334, 7344), 'io.StringIO', 'StringIO', ([], {}), '()\n', (7342, 7344), False, 'from io import StringIO\n'), ((6292, 6328), 'numpy.issubdtype', 'np.issubdtype', (['self.dtype', 'np.number'], {}), '(self.dtype, np.number)\n', (6305, 6328), True, 'import numpy as np\n'), ((6522, 6560), 'numpy.issubdtype', 'np.issubdtype', (['right.dtype', 'np.object_'], {}), '(right.dtype, np.object_)\n', (6535, 6560), True, 'import numpy as np\n')]
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Collection, TypeVar, Union
import numpy as np
import tensorflow as tf
import torch
from fastestimator.util.util import STRING_TO_TF_DTYPE, STRING_TO_TORCH_DTYPE
Tensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)
def cast(data: Union[Collection, Tensor], dtype: str) -> Union[Collection, Tensor]:
"""Cast the data to a specific data type recursively.
This method can be used with Numpy data:
```python
data = {"x": np.ones((10,15)), "y":[np.ones((4)), np.ones((5, 3))], "z":{"key":np.ones((2,2))}}
fe.backend.to_type(data)
# {'x': dtype('float64'), 'y': [dtype('float64'), dtype('float64')], 'z': {'key': dtype('float64')}}
data = fe.backend.cast(data, "float16")
fe.backend.to_type(data)
# {'x': dtype('float16'), 'y': [dtype('float16'), dtype('float16')], 'z': {'key': dtype('float16')}}
```
This method can be used with TensorFlow tensors:
```python
data = {"x": tf.ones((10,15)), "y":[tf.ones((4)), tf.ones((5, 3))], "z":{"key":tf.ones((2,2))}}
fe.backend.to_type(data) # {'x': tf.float32, 'y': [tf.float32, tf.float32], 'z': {'key': tf.float32}}
data = fe.backend.cast(data, "uint8")
fe.backend.to_type(data) # {'x': tf.uint8, 'y': [tf.uint8, tf.uint8], 'z': {'key': tf.uint8}}
```
This method can be used with PyTorch tensors:
```python
data = {"x": torch.ones((10,15)), "y":[torch.ones((4)), torch.ones((5, 3))], "z":{"key":torch.ones((2,2))}}
fe.backend.to_type(data) # {'x': torch.float32, 'y': [torch.float32, torch.float32], 'z': {'key': torch.float32}}
data = fe.backend.cast(data, "float64")
fe.backend.to_type(data) # {'x': torch.float64, 'y': [torch.float64, torch.float64], 'z': {'key': torch.float64}}
```
Args:
data: A tensor or possibly nested collection of tensors.
dtype: Target data type, can be one of following: uint8, int8, int16, int32, int64, float16, float32, float64.
Returns:
A collection with the same structure as `data` with target data type.
"""
if isinstance(data, dict):
return {key: cast(value, dtype) for (key, value) in data.items()}
elif isinstance(data, list):
return [cast(val, dtype) for val in data]
elif isinstance(data, tuple):
return tuple([cast(val, dtype) for val in data])
elif isinstance(data, set):
return set([cast(val, dtype) for val in data])
elif tf.is_tensor(data):
return tf.cast(data, STRING_TO_TF_DTYPE[dtype])
elif isinstance(data, torch.Tensor):
return data.type(STRING_TO_TORCH_DTYPE[dtype])
else:
return np.array(data, dtype=dtype)
|
[
"numpy.array",
"tensorflow.cast",
"tensorflow.is_tensor",
"typing.TypeVar"
] |
[((884, 938), 'typing.TypeVar', 'TypeVar', (['"""Tensor"""', 'tf.Tensor', 'torch.Tensor', 'np.ndarray'], {}), "('Tensor', tf.Tensor, torch.Tensor, np.ndarray)\n", (891, 938), False, 'from typing import Collection, TypeVar, Union\n'), ((3119, 3137), 'tensorflow.is_tensor', 'tf.is_tensor', (['data'], {}), '(data)\n', (3131, 3137), True, 'import tensorflow as tf\n'), ((3154, 3194), 'tensorflow.cast', 'tf.cast', (['data', 'STRING_TO_TF_DTYPE[dtype]'], {}), '(data, STRING_TO_TF_DTYPE[dtype])\n', (3161, 3194), True, 'import tensorflow as tf\n'), ((3316, 3343), 'numpy.array', 'np.array', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (3324, 3343), True, 'import numpy as np\n')]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
from mindspore.train.model import Model
context.set_context(device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.tanh_grad = G.TanhGrad()
def construct(self, y, dy):
return self.tanh_grad(y, dy)
input_shape = [1]
input_np = np.random.randn(*input_shape).astype(np.float32)
input_me = Tensor(input_np)
def test_net():
context.set_context(mode=context.GRAPH_MODE)
tanh_grad = Net()
tanh_grad.set_train()
m = Model(tanh_grad)
out = m.predict(input_me, input_me)
print("out_me.dtype={}".format(out.dtype))
print("out_me.asnumpy={}".format(out.asnumpy()))
return out.asnumpy()
|
[
"mindspore.train.model.Model",
"mindspore.context.set_context",
"mindspore.ops.operations._grad_ops.TanhGrad",
"numpy.random.randn",
"mindspore.Tensor"
] |
[((871, 914), 'mindspore.context.set_context', 'context.set_context', ([], {'device_target': '"""Ascend"""'}), "(device_target='Ascend')\n", (890, 914), True, 'import mindspore.context as context\n'), ((1196, 1212), 'mindspore.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (1202, 1212), False, 'from mindspore import Tensor\n'), ((1235, 1279), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (1254, 1279), True, 'import mindspore.context as context\n'), ((1336, 1352), 'mindspore.train.model.Model', 'Model', (['tanh_grad'], {}), '(tanh_grad)\n', (1341, 1352), False, 'from mindspore.train.model import Model\n'), ((1022, 1034), 'mindspore.ops.operations._grad_ops.TanhGrad', 'G.TanhGrad', ([], {}), '()\n', (1032, 1034), True, 'from mindspore.ops.operations import _grad_ops as G\n'), ((1136, 1165), 'numpy.random.randn', 'np.random.randn', (['*input_shape'], {}), '(*input_shape)\n', (1151, 1165), True, 'import numpy as np\n')]
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2007-2020 The scikit-learn developers.
# BSD 3-Clause License
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is part of https://github.com/scikit-learn/scikit-learn/blob/114616d9f6ce9eba7c1aacd3d4a254f868010e25/sklearn/manifold/_spectral_embedding.py and
# https://github.com/tango4j/Auto-Tuning-Spectral-Clustering.
from collections import Counter
import numpy as np
import torch
from sklearn.cluster._kmeans import k_means
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from nemo.utils import logging
from nemo.utils.decorators.experimental import experimental
scaler = MinMaxScaler(feature_range=(0, 1))
try:
from torch.linalg import eigh as eigh
TORCH_EIGN = True
except ImportError:
TORCH_EIGN = False
from scipy.linalg import eigh as eigh
logging.warning("Using eigen decomposition from scipy, upgrade torch to 1.9 or higher for faster clustering")
def isGraphFullyConnected(affinity_mat):
return getTheLargestComponent(affinity_mat, 0).sum() == affinity_mat.shape[0]
def getTheLargestComponent(affinity_mat, seg_index):
"""
Find the largest affinity_mat connected components for each given node.
This is for checking whether the affinity_mat is fully connected.
"""
num_of_segments = affinity_mat.shape[0]
connected_nodes = np.zeros(num_of_segments).astype(np.bool)
nodes_to_explore = np.zeros(num_of_segments).astype(np.bool)
nodes_to_explore[seg_index] = True
for k in range(num_of_segments):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
neighbors = affinity_mat[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def getKneighborsConnections(affinity_mat, p_value):
"""
Binarize top-p values for each row from the given affinity matrix.
"""
binarized_affinity_mat = np.zeros_like(affinity_mat)
for i, line in enumerate(affinity_mat):
sorted_idx = np.argsort(line)
sorted_idx = sorted_idx[::-1]
indices = sorted_idx[:p_value]
binarized_affinity_mat[indices, i] = 1
return binarized_affinity_mat
def getAffinityGraphMat(affinity_mat_raw, p_value):
"""
Calculate a binarized graph matrix and
symmetrize the binarized graph matrix.
"""
X = getKneighborsConnections(affinity_mat_raw, p_value)
symm_affinity_mat = 0.5 * (X + X.T)
return symm_affinity_mat
def getMinimumConnection(mat, max_N, n_list):
"""
Generate connections until fully connect all the nodes in the graph.
If graph is not fully connected, it might generate an inaccurate results.
"""
p_value = 1
affinity_mat = getAffinityGraphMat(mat, p_value)
for i, p_value in enumerate(n_list):
fully_connected = isGraphFullyConnected(affinity_mat)
affinity_mat = getAffinityGraphMat(mat, p_value)
if fully_connected or p_value > max_N:
break
return affinity_mat, p_value
def getRepeatedList(mapping_argmat, score_mat_size):
"""
Count the numbers in the mapping dictionary and create lists that contain
repeated indices to be used for creating the repeated affinity matrix for
fusing the affinity values.
"""
count_dict = dict(Counter(mapping_argmat))
repeat_list = []
for k in range(score_mat_size):
if k in count_dict:
repeat_list.append(count_dict[k])
else:
repeat_list.append(0)
return repeat_list
@experimental
def get_argmin_mat(uniq_scale_dict):
"""
Calculate the mapping between the base scale and other scales. A segment from a longer scale is
repeatedly mapped to a segment from a shorter scale or the base scale.
Args:
uniq_scale_dict (dict) :
Dictionary of embeddings and timestamps for each scale.
Returns:
session_scale_mapping_dict (dict) :
Dictionary containing argmin arrays indexed by scale index.
"""
scale_list = sorted(list(uniq_scale_dict.keys()))
segment_anchor_dict = {}
for scale_idx in scale_list:
time_stamp_list = uniq_scale_dict[scale_idx]['time_stamps']
time_stamps_float = np.array([[float(x.split()[0]), float(x.split()[1])] for x in time_stamp_list])
segment_anchor_dict[scale_idx] = np.mean(time_stamps_float, axis=1)
base_scale_idx = max(scale_list)
base_scale_anchor = segment_anchor_dict[base_scale_idx]
session_scale_mapping_dict = {}
for scale_idx in scale_list:
curr_scale_anchor = segment_anchor_dict[scale_idx]
curr_mat = np.tile(curr_scale_anchor, (base_scale_anchor.shape[0], 1))
base_mat = np.tile(base_scale_anchor, (curr_scale_anchor.shape[0], 1)).T
argmin_mat = np.argmin(np.abs(curr_mat - base_mat), axis=1)
session_scale_mapping_dict[scale_idx] = argmin_mat
return session_scale_mapping_dict
@experimental
def getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps):
"""
Calculate cosine similarity values among speaker embeddings for each scale then
apply multiscale weights to calculate the fused similarity matrix.
Args:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
Returns:
fused_sim_d (np.array):
This function generates an ffinity matrix that is obtained by calculating
the weighted sum of the affinity matrices from the different scales.
base_scale_emb (np.array):
The base scale embedding (the embeddings from the finest scale)
"""
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
base_scale_idx = max(uniq_scale_dict.keys())
base_scale_emb = np.array(uniq_scale_dict[base_scale_idx]['embeddings'])
multiscale_weights = uniq_embs_and_timestamps['multiscale_weights']
score_mat_list, repeated_mat_list = [], []
session_scale_mapping_dict = get_argmin_mat(uniq_scale_dict)
for scale_idx in sorted(uniq_scale_dict.keys()):
mapping_argmat = session_scale_mapping_dict[scale_idx]
score_mat = getCosAffinityMatrix(uniq_scale_dict[scale_idx]['embeddings'])
score_mat_list.append(score_mat)
repeat_list = getRepeatedList(mapping_argmat, score_mat.shape[0])
repeated_mat = np.repeat(np.repeat(score_mat, repeat_list, axis=0), repeat_list, axis=1)
repeated_mat_list.append(repeated_mat)
fused_sim_d = np.average(np.array(repeated_mat_list), weights=multiscale_weights, axis=0)
return fused_sim_d, base_scale_emb
def addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma):
"""
Add randomly generated synthetic embeddings to make eigen analysis more stable.
We refer to these embeddings as anchor embeddings.
emb (np.array):
The input embedding from the emebedding extractor.
anchor_sample_n (int):
The number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
anchor_spk_n (int):
The number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
sigma (int):
The amplitude of synthetic noise for each embedding vector.
If sigma value is too small, under-counting could happen.
If sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
"""
emb_dim = emb.shape[1]
std_org = np.std(emb, axis=0)
new_emb_list = []
for _ in range(anchor_spk_n):
emb_m = np.tile(np.random.randn(1, emb_dim), (anchor_sample_n, 1))
emb_noise = np.random.randn(anchor_sample_n, emb_dim).T
emb_noise = np.dot(np.diag(std_org), emb_noise / np.max(np.abs(emb_noise))).T
emb_gen = emb_m + sigma * emb_noise
new_emb_list.append(emb_gen)
new_emb_list.append(emb)
new_emb_np = np.vstack(new_emb_list)
return new_emb_np
def getEnhancedSpeakerCount(emb, cuda, random_test_count=5, anchor_spk_n=3, anchor_sample_n=10, sigma=50):
"""
Calculate the number of speakers using NME analysis with anchor embeddings.
"""
est_num_of_spk_list = []
for seed in range(random_test_count):
np.random.seed(seed)
emb_aug = addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma)
mat = getCosAffinityMatrix(emb_aug)
nmesc = NMESC(
mat,
max_num_speaker=emb.shape[0],
max_rp_threshold=0.25,
sparse_search=True,
sparse_search_volume=30,
fixed_thres=None,
NME_mat_size=300,
cuda=cuda,
)
est_num_of_spk, _ = nmesc.NMEanalysis()
est_num_of_spk_list.append(est_num_of_spk)
ctt = Counter(est_num_of_spk_list)
oracle_num_speakers = max(ctt.most_common(1)[0][0] - anchor_spk_n, 1)
return oracle_num_speakers
def getCosAffinityMatrix(emb):
"""
Calculate cosine similarity values among speaker embeddings.
"""
sim_d = cosine_similarity(emb)
scaler.fit(sim_d)
sim_d = scaler.transform(sim_d)
return sim_d
def getLaplacian(X):
"""
Calculate a laplacian matrix from an affinity matrix X.
"""
X[np.diag_indices(X.shape[0])] = 0
A = X
D = np.sum(np.abs(A), axis=1)
D = np.diag(D)
L = D - A
return L
def eigDecompose(laplacian, cuda, device=None):
if TORCH_EIGN:
if cuda:
if device is None:
device = torch.cuda.current_device()
laplacian = torch.from_numpy(laplacian).float().to(device)
else:
laplacian = torch.from_numpy(laplacian).float()
lambdas, diffusion_map = eigh(laplacian)
lambdas = lambdas.cpu().numpy()
diffusion_map = diffusion_map.cpu().numpy()
else:
lambdas, diffusion_map = eigh(laplacian)
return lambdas, diffusion_map
def getLamdaGaplist(lambdas):
lambdas = np.real(lambdas)
return list(lambdas[1:] - lambdas[:-1])
def estimateNumofSpeakers(affinity_mat, max_num_speaker, is_cuda=False):
"""
Estimate the number of speakers using eigen decompose on laplacian Matrix.
affinity_mat: (array)
NxN affitnity matrix
max_num_speaker: (int)
Maximum number of clusters to consider for each session
is_cuda: (bool)
if cuda availble eigh decomposition would be computed on GPUs
"""
laplacian = getLaplacian(affinity_mat)
lambdas, _ = eigDecompose(laplacian, is_cuda)
lambdas = np.sort(lambdas)
lambda_gap_list = getLamdaGaplist(lambdas)
num_of_spk = np.argmax(lambda_gap_list[: min(max_num_speaker, len(lambda_gap_list))]) + 1
return num_of_spk, lambdas, lambda_gap_list
class _SpectralClustering:
def __init__(self, n_clusters=8, random_state=0, n_init=10, p_value=10, n_jobs=None, cuda=False):
self.n_clusters = n_clusters
self.random_state = random_state
self.n_init = n_init
self.p_value = p_value
self.affinity_matrix_ = None
self.cuda = cuda
def predict(self, X):
if X.shape[0] != X.shape[1]:
raise ValueError("The affinity matrix is not a square matrix.")
self.affinity_matrix_ = X
labels = self.clusterSpectralEmbeddings(self.affinity_matrix_, n_init=self.n_init, cuda=self.cuda)
return labels
def clusterSpectralEmbeddings(self, affinity, n_init=10, cuda=False):
spectral_emb = self.getSpectralEmbeddings(affinity, n_spks=self.n_clusters, drop_first=False, cuda=cuda)
_, labels, _ = k_means(spectral_emb, self.n_clusters, random_state=self.random_state, n_init=n_init)
return labels
def getSpectralEmbeddings(self, affinity_mat, n_spks=8, drop_first=True, cuda=False):
if not isGraphFullyConnected(affinity_mat):
logging.warning("Graph is not fully connected and the clustering result might not be accurate.")
laplacian = getLaplacian(affinity_mat)
lambdas_, diffusion_map_ = eigDecompose(laplacian, cuda)
diffusion_map = diffusion_map_[:, :n_spks]
embedding = diffusion_map.T[n_spks::-1]
return embedding[:n_spks].T
class NMESC:
"""
Normalized Maximum Eigengap based Spectral Clustering (NME-SC)
uses Eigengap analysis to get an estimated p-value for
affinity binarization and an estimated number of speakers.
p_value (also referred to as p_neighbors) is for taking
top p number of affinity values and convert those to 1 while
convert the rest of values to 0.
p_value can be also tuned on a development set without performing
NME-analysis.
Reference: Auto-Tuning Spectral Clustering for Speaker Diarization
Using Normalized Maximum Eigengap (https://arxiv.org/abs/2003.02405)
Parameters:
Please refer to def __init__()
Methods:
NMEanalysis():
Performs NME-analysis to estimate p_value and the number of speakers.
subsampleAffinityMat(NME_mat_size):
Subsamples the number of speakers to reduce the computational load.
getPvalueList():
Generates a list contains p-values that need to be examined.
getEigRatio(p_neighbors):
calculates g_p, which is a ratio between p_neighbors and the maximum eigengap.
getLamdaGaplist(lambdas):
Calculates lambda gap values from an array contains ambda values.
estimateNumofSpeakers(affinity_mat):
Estimates the number of speakers using lambda gap list.
"""
def __init__(
self,
mat,
max_num_speaker=10,
max_rp_threshold=0.250,
sparse_search=True,
sparse_search_volume=30,
use_subsampling_for_NME=True,
fixed_thres=None,
cuda=False,
NME_mat_size=512,
):
"""
Parameters:
mat: (numpy.array)
Cosine similarity matrix calculated from speaker embeddings.
max_num_speaker: (int)
Maximum number of speakers for estimating number of speakers.
Shows stable performance under 20.
max_rp_threshold: (float)
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
sparse_search: (bool)
To increase the speed of parameter estimation, sparse_search=True
limits the number of p_values we search.
sparse_search_volume: (int)
The number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
use_subsampling_for_NME: (bool)
Use subsampling to reduce the calculational complexity.
Default is True.
fixed_thres: (float or None)
A fixed threshould can be used instead of estimating the
threshold with NME analysis. If fixed_thres is float,
it skips NME analysis part.
cuda: (bool)
Use cuda for Eigen decomposition if cuda=True.
NME_mat_size: (int)
Targeted size of matrix for NME analysis.
"""
self.max_num_speaker = max_num_speaker
self.max_rp_threshold = max_rp_threshold
self.use_subsampling_for_NME = use_subsampling_for_NME
self.NME_mat_size = NME_mat_size
self.sparse_search = sparse_search
self.sparse_search_volume = sparse_search_volume
self.fixed_thres = fixed_thres
self.cuda = cuda
self.eps = 1e-10
self.max_N = None
self.mat = mat
self.p_value_list = []
def NMEanalysis(self):
"""
Subsample the input matrix to reduce the computational load.
"""
if self.use_subsampling_for_NME:
subsample_ratio = self.subsampleAffinityMat(self.NME_mat_size)
# Scans p_values and find a p_value that generates
# the smallest g_p value.
eig_ratio_list, est_spk_n_dict = [], {}
self.p_value_list = self.getPvalueList()
for p_value in self.p_value_list:
est_num_of_spk, g_p = self.getEigRatio(p_value)
est_spk_n_dict[p_value] = est_num_of_spk
eig_ratio_list.append(g_p)
index_nn = np.argmin(eig_ratio_list)
rp_p_value = self.p_value_list[index_nn]
affinity_mat = getAffinityGraphMat(self.mat, rp_p_value)
# Checks whether affinity graph is fully connected.
# If not, it adds minimum number of connections to make it fully connected.
if not isGraphFullyConnected(affinity_mat):
affinity_mat, rp_p_value = getMinimumConnection(self.mat, self.max_N, self.p_value_list)
p_hat_value = int(subsample_ratio * rp_p_value)
est_num_of_spk = est_spk_n_dict[rp_p_value]
return est_num_of_spk, p_hat_value
def subsampleAffinityMat(self, NME_mat_size):
"""
Perform Subsampling of affinity matrix.
This subsampling is for calculational complexity, not for performance.
The smaller NME_mat_size is,
- the bigger the chance of missing a speaker.
- the faster p-value estimation speed (based on eigen decomposition).
Recommended NME_mat_size is 250~750.
However, if there are speakers who speak for very short period of time in the recording,
this subsampling might make the system miss the underrepresented speaker.
Use this with caution.
Parameters:
NME_mat_size: (int)
Targeted matrix size
Returns:
subsample_ratio : (float)
The ratio between NME_mat_size and the original matrix size
"""
subsample_ratio = int(max(1, self.mat.shape[0] / NME_mat_size))
self.mat = self.mat[::subsample_ratio, ::subsample_ratio]
return subsample_ratio
def getEigRatio(self, p_neighbors):
"""
For a given p_neighbors value,
calculates g_p, which is a ratio
between p_neighbors and the maximum eigengap.
For more details: https://arxiv.org/abs/2003.02405
Parameters:
p_neighbors: (int)
Determines how many binary graph connections we want to keep for each row.
Returns:
est_num_of_spk: (int)
Estimated number of speakers
g_p: (float)
The ratio between p_neighbors value and the maximum eigen gap value.
"""
affinity_mat = getAffinityGraphMat(self.mat, p_neighbors)
est_num_of_spk, lambdas, lambda_gap_list = estimateNumofSpeakers(affinity_mat, self.max_num_speaker, self.cuda)
arg_sorted_idx = np.argsort(lambda_gap_list[: self.max_num_speaker])[::-1]
max_key = arg_sorted_idx[0]
max_eig_gap = lambda_gap_list[max_key] / (max(lambdas) + self.eps)
g_p = (p_neighbors / self.mat.shape[0]) / (max_eig_gap + self.eps)
return est_num_of_spk, g_p
def getPvalueList(self):
"""
Generates a p-value (p_neighbour) list for searching.
"""
if self.fixed_thres:
p_value_list = [int(self.mat.shape[0] * self.fixed_thres)]
self.max_N = p_value_list[0]
else:
self.max_N = int(self.mat.shape[0] * self.max_rp_threshold)
if self.sparse_search:
N = min(self.max_N, self.sparse_search_volume)
p_value_list = list(np.linspace(1, self.max_N, N, endpoint=True).astype(int))
else:
p_value_list = list(range(1, self.max_N))
return p_value_list
# emb,
def COSclustering(
uniq_embs_and_timestamps=None,
oracle_num_speakers=None,
max_num_speaker=8,
min_samples_for_NMESC=6,
enhanced_count_thres=80,
max_rp_threshold=0.25,
sparse_search_volume=30,
fixed_thres=None,
cuda=False,
):
"""
Clustering method for speaker diarization based on cosine similarity.
Parameters:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
oracle_num_speaker: (int or None)
Oracle number of speakers if known else None
max_num_speaker: (int)
Maximum number of clusters to consider for each session
min_samples_for_NMESC: (int)
Minimum number of samples required for NME clustering, this avoids
zero p_neighbour_lists. If the input has fewer segments than min_samples,
it is directed to the enhanced speaker counting mode.
enhanced_count_thres: (int)
For short audio recordings under 60 seconds, clustering algorithm cannot
accumulate enough amount of speaker profile for each cluster.
Thus, getEnhancedSpeakerCount() employs anchor embeddings (dummy representations)
to mitigate the effect of cluster sparsity.
enhanced_count_thres = 80 is recommended.
max_rp_threshold: (float)
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
sparse_search_volume: (int)
The number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
fixed_thres: (float)
If fixed_thres value is provided, NME-analysis process will be skipped.
This value should be optimized on a development set to obtain a quality result.
Default is None and performs NME-analysis to estimate the threshold.
Returns:
Y: (List[int])
Speaker label for each segment.
"""
# Get base-scale embedding from uniq_embs_and_timestamps.
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
emb = np.array(uniq_scale_dict[max(uniq_scale_dict.keys())]['embeddings'])
if emb.shape[0] == 1:
return np.array([0])
elif emb.shape[0] <= max(enhanced_count_thres, min_samples_for_NMESC) and oracle_num_speakers is None:
est_num_of_spk_enhanced = getEnhancedSpeakerCount(emb, cuda)
else:
est_num_of_spk_enhanced = None
if oracle_num_speakers:
max_num_speaker = oracle_num_speakers
mat, emb = getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps)
nmesc = NMESC(
mat,
max_num_speaker=max_num_speaker,
max_rp_threshold=max_rp_threshold,
sparse_search=True,
sparse_search_volume=sparse_search_volume,
fixed_thres=fixed_thres,
NME_mat_size=300,
cuda=cuda,
)
if emb.shape[0] > min_samples_for_NMESC:
est_num_of_spk, p_hat_value = nmesc.NMEanalysis()
affinity_mat = getAffinityGraphMat(mat, p_hat_value)
else:
affinity_mat = mat
if oracle_num_speakers:
est_num_of_spk = oracle_num_speakers
elif est_num_of_spk_enhanced:
est_num_of_spk = est_num_of_spk_enhanced
spectral_model = _SpectralClustering(n_clusters=est_num_of_spk, cuda=cuda)
Y = spectral_model.predict(affinity_mat)
return Y
|
[
"sklearn.cluster._kmeans.k_means",
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"numpy.mean",
"numpy.repeat",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.where",
"numpy.sort",
"numpy.diag_indices",
"numpy.real",
"numpy.linspace",
"numpy.vstack",
"numpy.random.seed",
"numpy.argmin",
"sklearn.preprocessing.MinMaxScaler",
"torch.cuda.current_device",
"numpy.tile",
"numpy.abs",
"scipy.linalg.eigh",
"numpy.std",
"numpy.random.randn",
"numpy.logical_or",
"numpy.diag",
"collections.Counter",
"nemo.utils.logging.warning",
"numpy.zeros",
"numpy.zeros_like"
] |
[((2002, 2036), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (2014, 2036), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3531, 3558), 'numpy.zeros_like', 'np.zeros_like', (['affinity_mat'], {}), '(affinity_mat)\n', (3544, 3558), True, 'import numpy as np\n'), ((7498, 7553), 'numpy.array', 'np.array', (["uniq_scale_dict[base_scale_idx]['embeddings']"], {}), "(uniq_scale_dict[base_scale_idx]['embeddings'])\n", (7506, 7553), True, 'import numpy as np\n'), ((9171, 9190), 'numpy.std', 'np.std', (['emb'], {'axis': '(0)'}), '(emb, axis=0)\n', (9177, 9190), True, 'import numpy as np\n'), ((9600, 9623), 'numpy.vstack', 'np.vstack', (['new_emb_list'], {}), '(new_emb_list)\n', (9609, 9623), True, 'import numpy as np\n'), ((10458, 10486), 'collections.Counter', 'Counter', (['est_num_of_spk_list'], {}), '(est_num_of_spk_list)\n', (10465, 10486), False, 'from collections import Counter\n'), ((10718, 10740), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['emb'], {}), '(emb)\n', (10735, 10740), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((11006, 11016), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (11013, 11016), True, 'import numpy as np\n'), ((11640, 11656), 'numpy.real', 'np.real', (['lambdas'], {}), '(lambdas)\n', (11647, 11656), True, 'import numpy as np\n'), ((12214, 12230), 'numpy.sort', 'np.sort', (['lambdas'], {}), '(lambdas)\n', (12221, 12230), True, 'import numpy as np\n'), ((2199, 2318), 'nemo.utils.logging.warning', 'logging.warning', (['"""Using eigen decomposition from scipy, upgrade torch to 1.9 or higher for faster clustering"""'], {}), "(\n 'Using eigen decomposition from scipy, upgrade torch to 1.9 or higher for faster clustering'\n )\n", (2214, 2318), False, 'from nemo.utils import logging\n'), ((2961, 3030), 'numpy.logical_or', 'np.logical_or', (['connected_nodes', 'nodes_to_explore'], {'out': 'connected_nodes'}), '(connected_nodes, nodes_to_explore, out=connected_nodes)\n', (2974, 3030), True, 'import numpy as np\n'), ((3624, 3640), 'numpy.argsort', 'np.argsort', (['line'], {}), '(line)\n', (3634, 3640), True, 'import numpy as np\n'), ((4908, 4931), 'collections.Counter', 'Counter', (['mapping_argmat'], {}), '(mapping_argmat)\n', (4915, 4931), False, 'from collections import Counter\n'), ((5954, 5988), 'numpy.mean', 'np.mean', (['time_stamps_float'], {'axis': '(1)'}), '(time_stamps_float, axis=1)\n', (5961, 5988), True, 'import numpy as np\n'), ((6234, 6293), 'numpy.tile', 'np.tile', (['curr_scale_anchor', '(base_scale_anchor.shape[0], 1)'], {}), '(curr_scale_anchor, (base_scale_anchor.shape[0], 1))\n', (6241, 6293), True, 'import numpy as np\n'), ((8227, 8254), 'numpy.array', 'np.array', (['repeated_mat_list'], {}), '(repeated_mat_list)\n', (8235, 8254), True, 'import numpy as np\n'), ((9930, 9950), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9944, 9950), True, 'import numpy as np\n'), ((10921, 10948), 'numpy.diag_indices', 'np.diag_indices', (['X.shape[0]'], {}), '(X.shape[0])\n', (10936, 10948), True, 'import numpy as np\n'), ((10979, 10988), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (10985, 10988), True, 'import numpy as np\n'), ((11392, 11407), 'scipy.linalg.eigh', 'eigh', (['laplacian'], {}), '(laplacian)\n', (11396, 11407), True, 'from scipy.linalg import eigh as eigh\n'), ((11543, 11558), 'scipy.linalg.eigh', 'eigh', (['laplacian'], {}), '(laplacian)\n', (11547, 11558), True, 'from scipy.linalg import eigh as eigh\n'), ((13266, 13355), 'sklearn.cluster._kmeans.k_means', 'k_means', (['spectral_emb', 'self.n_clusters'], {'random_state': 'self.random_state', 'n_init': 'n_init'}), '(spectral_emb, self.n_clusters, random_state=self.random_state,\n n_init=n_init)\n', (13273, 13355), False, 'from sklearn.cluster._kmeans import k_means\n'), ((18157, 18182), 'numpy.argmin', 'np.argmin', (['eig_ratio_list'], {}), '(eig_ratio_list)\n', (18166, 18182), True, 'import numpy as np\n'), ((24057, 24070), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (24065, 24070), True, 'import numpy as np\n'), ((2718, 2743), 'numpy.zeros', 'np.zeros', (['num_of_segments'], {}), '(num_of_segments)\n', (2726, 2743), True, 'import numpy as np\n'), ((2783, 2808), 'numpy.zeros', 'np.zeros', (['num_of_segments'], {}), '(num_of_segments)\n', (2791, 2808), True, 'import numpy as np\n'), ((3123, 3149), 'numpy.where', 'np.where', (['nodes_to_explore'], {}), '(nodes_to_explore)\n', (3131, 3149), True, 'import numpy as np\n'), ((3268, 3332), 'numpy.logical_or', 'np.logical_or', (['nodes_to_explore', 'neighbors'], {'out': 'nodes_to_explore'}), '(nodes_to_explore, neighbors, out=nodes_to_explore)\n', (3281, 3332), True, 'import numpy as np\n'), ((6313, 6372), 'numpy.tile', 'np.tile', (['base_scale_anchor', '(curr_scale_anchor.shape[0], 1)'], {}), '(base_scale_anchor, (curr_scale_anchor.shape[0], 1))\n', (6320, 6372), True, 'import numpy as np\n'), ((6406, 6433), 'numpy.abs', 'np.abs', (['(curr_mat - base_mat)'], {}), '(curr_mat - base_mat)\n', (6412, 6433), True, 'import numpy as np\n'), ((8086, 8127), 'numpy.repeat', 'np.repeat', (['score_mat', 'repeat_list'], {'axis': '(0)'}), '(score_mat, repeat_list, axis=0)\n', (8095, 8127), True, 'import numpy as np\n'), ((9271, 9298), 'numpy.random.randn', 'np.random.randn', (['(1)', 'emb_dim'], {}), '(1, emb_dim)\n', (9286, 9298), True, 'import numpy as np\n'), ((9342, 9383), 'numpy.random.randn', 'np.random.randn', (['anchor_sample_n', 'emb_dim'], {}), '(anchor_sample_n, emb_dim)\n', (9357, 9383), True, 'import numpy as np\n'), ((13529, 13635), 'nemo.utils.logging.warning', 'logging.warning', (['"""Graph is not fully connected and the clustering result might not be accurate."""'], {}), "(\n 'Graph is not fully connected and the clustering result might not be accurate.'\n )\n", (13544, 13635), False, 'from nemo.utils import logging\n'), ((20596, 20646), 'numpy.argsort', 'np.argsort', (['lambda_gap_list[:self.max_num_speaker]'], {}), '(lambda_gap_list[:self.max_num_speaker])\n', (20606, 20646), True, 'import numpy as np\n'), ((9413, 9429), 'numpy.diag', 'np.diag', (['std_org'], {}), '(std_org)\n', (9420, 9429), True, 'import numpy as np\n'), ((11186, 11213), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (11211, 11213), False, 'import torch\n'), ((11323, 11350), 'torch.from_numpy', 'torch.from_numpy', (['laplacian'], {}), '(laplacian)\n', (11339, 11350), False, 'import torch\n'), ((9450, 9467), 'numpy.abs', 'np.abs', (['emb_noise'], {}), '(emb_noise)\n', (9456, 9467), True, 'import numpy as np\n'), ((11238, 11265), 'torch.from_numpy', 'torch.from_numpy', (['laplacian'], {}), '(laplacian)\n', (11254, 11265), False, 'import torch\n'), ((21353, 21397), 'numpy.linspace', 'np.linspace', (['(1)', 'self.max_N', 'N'], {'endpoint': '(True)'}), '(1, self.max_N, N, endpoint=True)\n', (21364, 21397), True, 'import numpy as np\n')]
|
# %%
from functools import reduce
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
# %%
def build_gvkeys(prc, fund):
gvkeys_fund = fund.gvkey.unique()
gvkeys_prc = prc[prc.close > 5].gvkey.unique()
gvkeys = np.intersect1d(gvkeys_fund, gvkeys_prc)
return gvkeys
def fill_year(df):
first_date = df["date"].iloc[0]
last_date = df["date"].iloc[-1]
date_index = pd.date_range(
pd.to_datetime(first_date),
pd.to_datetime(last_date) + DateOffset(years=1),
freq="M",
name="date",
)
return (
df.drop("gvkey", axis=1)
.set_index("date")
.sort_index()
.reindex(date_index, method="ffill")
)
def fill_month(df):
first_date = df["date"].iloc[0]
last_date = df["date"].iloc[-1]
date_index = pd.date_range(
pd.to_datetime(first_date),
pd.to_datetime(last_date) + DateOffset(months=1),
freq="M",
name="date",
)
return (
df.drop("gvkey", axis=1)
.set_index("date")
.sort_index()
.reindex(date_index, method="ffill")
)
def build_fundamental(df):
oa = df.att - df.che
ol = df.att - df.dlc - df.dltt - df.mib - df.pstk - df.ceq
chact = df.act - df.act.shift(1)
chchee = df.chee - df.chee.shift(1)
chlct = df.lct - df.lct.shift(1)
chdlc = df.dlc - df.dlc.shift(1)
chtxp = df.txp - df.txp.shift(1)
chchee = df.chee - df.chee.shift(1)
avg_at = (df.att + df.att.shift(1)) / 2
nca = df.att - df.act - df.ivaeq
ncl = df.ltt - df.lct - df.dltt
ncoa = nca - ncl
coa = df.act - df.che
col = df.lct - df.dlc
wc = df.act - df.che - df.lct + df.dlc
fna = df.ivst + df.ivao
fnl = df.dltt + df.dlc + df.pstk
nfna = fna - fnl
be = df.seq - df.pstk
df = df.assign(
# Accruals
accruals_acc=((chact - chchee) - (chlct - chdlc - chtxp) - df.dp) / avg_at,
accruals_chcoa=(coa - coa.shift(1)) / df.att.shift(1),
accruals_chcol=(col - col.shift(1)) / df.att.shift(1),
accruals_chnncwc=(wc - wc.shift(1)) / df.att.shift(1),
accruals_chnncoa=(ncoa - ncoa.shift(1)) / df.att.shift(1),
accruals_chncoa=(nca - nca.shift(1)) / df.att.shift(1),
accruals_chncol=(ncl - ncl.shift(1)) / df.att.shift(1),
accruals_chnfa=nfna - nfna.shift(1) / df.att.shift(1),
accruals_chlti=(df.ivao - df.ivao.shift(1)) / df.att.shift(1),
accruals_chce=(df.ceq - df.ceq.shift(1)) / df.att.shift(1),
accruals_chfl=(
df.dltt + df.dlc + df.pstk - (df.dltt + df.dlc + df.pstk).shift(1)
)
/ df.att.shift(1),
accruals_grii=(df.invt - df.invt.shift(1)) / ((df.att + df.att.shift(1)) / 2),
accruals_ich=(df.invt - df.invt.shift(1)) / df.att.shift(1),
accruals_igr=(df.invt - df.invt.shift(1)) / df.invt.shift(1),
accruals_nwcch=(wc - wc.shift(1)) / df.att.shift(1),
accruals_poa=(df.nicon - df.oancf) / abs(df.nicon),
accruals_pta=(
df.nicon - (-df.sstk + df.prstkc + df.dv + df.oancf + df.ivncf + df.fincf)
)
/ abs(df.nicon),
accruals_ta=((ncoa + wc + nfna) - (ncoa + wc + nfna).shift(1))
/ df.att.shift(1),
# Efficiency
efficiency_itr=df.cogs / df.invt,
efficiency_rtr=df.revt / df.rect,
efficiency_apr=df.cogs / df.ap,
efficiency_dsi=365 * df.invt / df.cogs,
efficiency_dso=365 * df.rect / df.revt,
efficiency_dpo=365 * df.ap / df.cogs,
efficiency_dopl=(df.ebit / df.ebit.shift(1) - 1)
/ (df.revt / df.revt.shift(1) - 1),
# Profitablity
profitability_at=df.revt / ((oa - ol) + (oa - ol).shift(1)) / 2,
profitability_fat=df.revt / df.ppent,
profitability_ct=df.revt / df.att.shift(1),
profitability_gp=(df.revt - df.cogs) / df.att.shift(1),
profitability_opta=(df.revt - df.cogs - df.xsga + df.xrd) / df.att,
profitability_opte=(df.revt - df.cogs - df.xsga + df.xrd) / be,
profitability_gpm=(df.revt - df.cogs) / df.revt,
profitability_ebitdam=df.ebitda / df.revt,
profitability_ebitm=df.ebit / df.revt,
profitability_ptm=df.pi / df.revt,
profitability_npm=df.nicon / df.revt,
profitability_roa=df.nicon / df.att,
profitability_roe=df.nicon / be,
profitability_roic=(df.ebit * (df.nicon / df.pi)) / (df.dlc + df.dltt + df.teq),
# Intangible
intangible_rdm=df.xrd / df.mcap,
intangible_rds=df.xrd / df.revt,
# Investment
investment_agr=df.att / df.att.shift(1),
investment_cdi=np.log(
(df.dltt + df.dlc) / (df.dltt.shift(5) + df.dlc.shift(5))
),
investment_chnoa=(
((oa - ol) / df.att.shift(1)) - (((oa - ol) / df.att.shift(1)).shift(1))
)
/ df.att.shift(1),
investment_chppeia=(
(df.ppegt - df.ppegt.shift(1)) + (df.invt - df.invt.shift(1))
)
/ df.att.shift(1),
investment_griltnoa=(
((oa - ol) / df.att.shift(1))
- (((oa - ol) / df.att.shift(1)).shift(1))
- ((chact - chchee) - (chlct - chdlc - chtxp) - df.dp) / avg_at
),
investment_inv=(df.capx / df.revt)
/ (
(
(df.capx.shift(1) / df.revt.shift(1))
+ (df.capx.shift(2) / df.revt.shift(2))
+ (df.capx.shift(3) / df.revt.shift(3))
)
/ 3
),
investment_ndf=(df.dltis - df.dltr + df.dlcch)
/ ((df.att + df.att.shift(1)) / 2),
investment_nef=(df.sstk - df.prstkc - df.dv) / ((df.att + df.att.shift(1)) / 2),
investment_noa=(oa - ol) / df.att.shift(1),
investment_noach=(ncoa - ncoa.shift(1)) / df.att,
investment_txfin=(df.sstk - df.dv - df.prstkc + df.dltis - df.dltr) / df.att,
# Leverage
leverage_de=(df.dlc + df.dltt) / be,
leverage_da=(df.dltt + df.dlc) / df.att,
leverage_fl=df.att / be,
leverage_deda=(df.dltt + df.dlc) / df.ebitda,
leverage_ndeda=(df.dltt + df.dlc - df.chee) / df.ebitda,
leverage_eic=df.ebit / df.xint,
leverage_edaic=df.ebitda / df.xint,
leverage_cac=df.ch / df.xint,
leverage_dcap=(df.dltt + df.dlc) / (df.dltt + df.dlc + df.teq),
leverage_cad=df.oancf / (df.dlc + df.dltt),
# Liquidity
liquid_cur=df.act / df.lct,
liquid_qur=(df.act - df.invt) / df.lct,
liquid_car=df.chee / df.lct,
liquid_opr=df.oancf / df.lct,
liquid_capxr=df.capx / df.oancf,
# Market
market_dyr=df.dvc * 10 ** 6 / df.cshoc / df.prccd,
market_pe=df.mcap / (df.nicon * 10 ** 6),
market_pch=df.mcap / (df.oancf * 10 ** 6),
market_ps=df.mcap / (df.revt * 10 ** 6),
market_peg=(df.prccd / ((df.nicon * 10 ** 6) / df.cshoc))
/ (
(
((df.nicon * 10 ** 6) / df.cshoc)
/ (((df.nicon * 10 ** 6) / df.cshoc).shift(1))
)
- 1
),
market_mb=df.mcap / (df.ceq * 10 ** 6),
market_evs=(df.mcap + (df.dlc + df.dltt + df.pstk + df.mib - df.chee) * 10 ** 6)
/ (df.revt * 10 ** 6),
market_eveda=(
df.mcap + (df.dlc + df.dltt + df.pstk + df.mib - df.chee) * 10 ** 6
)
/ (df.ebitda * 10 ** 6),
market_eve=(df.mcap + (df.dlc + df.dltt + df.pstk + df.mib - df.chee) * 10 ** 6)
/ (df.ebit * 10 ** 6),
market_evedacpx=(
df.mcap + (df.dlc + df.dltt + df.pstk + df.mib - df.chee) * 10 ** 6
)
/ ((df.ebitda - df.capx) * 10 ** 6),
market_evocf=(
df.mcap + (df.dlc + df.dltt + df.pstk + df.mib - df.chee) * 10 ** 6
)
/ ((df.oancf) * 10 ** 6),
# Other
other_size=df.att,
other_ia=df.att / df.att.shift(1),
other_ir=(df.icapt - df.icapt.shift(1)) / (df.ebit * (df.nicon / df.pi)),
other_nopat_g=(df.icapt - df.icapt.shift(1)) / df.icapt,
other_rev_cagr_3=((df.revt / df.revt.shift(3)) ** (1 / 3)) - 1,
other_ebitda_cagr_3=((df.ebitda / df.ebitda.shift(3)) ** (1 / 3)) - 1,
).replace([np.nan, np.inf], 0.0)
return df
# %%
cap = pd.read_csv(
"../data/cap.csv", dtype={"gvkey": "object"}, parse_dates=["date"]
).drop_duplicates(subset=["date", "gvkey"], keep="last")
prc = pd.read_csv(
"../data/prc.csv",
dtype={"gvkey": "object", "volume": "Int64"},
parse_dates=["date"],
)
fund = (
pd.read_csv("../data/fund.csv", parse_dates=["date"], dtype={"gvkey": "object"})
.fillna(0)
.astype(
{
"country": "category",
"industry": "category",
"classification": "category",
}
)
)
prctg = pd.read_csv(
"../data/prctg.csv", dtype={"gvkey": "object"}, parse_dates=["date"]
).drop_duplicates(subset=["date", "gvkey"], keep="last")
surp = pd.read_csv(
"../data/surp.csv", dtype={"gvkey": "object"}, parse_dates=["date"]
).drop_duplicates(subset=["date", "gvkey"], keep="last")
# %%
gvkeys = build_gvkeys(prc, fund)
ohlcv_dict = {
"open": "first",
"high": "max",
"low": "min",
"close": "last",
"volume": "sum",
}
fund = (
fund[fund.gvkey.isin(gvkeys)]
.groupby("gvkey")
.apply(transform_fundamental)
.groupby("gvkey")
.apply(fill_year)
)
prc = (
prc[prc.gvkey.isin(gvkeys)]
.set_index(["gvkey", "date"])
.groupby("gvkey")
.resample("M", level="date")
.apply(ohlcv_dict)
)
cap = (
cap[cap.gvkey.isin(gvkeys) & (cap.date < "2016-01-01")]
.groupby("gvkey")
.apply(fill_month)
)
prctg = prctg[prctg.gvkey.isin(gvkeys)].groupby("gvkey").apply(fill_month)
surp = surp[surp.gvkey.isin(gvkeys)].groupby("gvkey").apply(fill_month)
# %%
df = pd.concat([cap, fund], join="inner", axis=1).join(prc)
df.to_parquet("data.parquet")
prc.to_csv("prc_eom.csv")
prc.to_parquet("prc_eom.parquet")
|
[
"numpy.intersect1d",
"pandas.read_csv",
"pandas.set_option",
"pandas.tseries.offsets.DateOffset",
"pandas.concat",
"pandas.to_datetime"
] |
[((120, 159), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (133, 159), True, 'import pandas as pd\n'), ((160, 202), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (173, 202), True, 'import pandas as pd\n'), ((8549, 8651), 'pandas.read_csv', 'pd.read_csv', (['"""../data/prc.csv"""'], {'dtype': "{'gvkey': 'object', 'volume': 'Int64'}", 'parse_dates': "['date']"}), "('../data/prc.csv', dtype={'gvkey': 'object', 'volume': 'Int64'},\n parse_dates=['date'])\n", (8560, 8651), True, 'import pandas as pd\n'), ((340, 379), 'numpy.intersect1d', 'np.intersect1d', (['gvkeys_fund', 'gvkeys_prc'], {}), '(gvkeys_fund, gvkeys_prc)\n', (354, 379), True, 'import numpy as np\n'), ((531, 557), 'pandas.to_datetime', 'pd.to_datetime', (['first_date'], {}), '(first_date)\n', (545, 557), True, 'import pandas as pd\n'), ((941, 967), 'pandas.to_datetime', 'pd.to_datetime', (['first_date'], {}), '(first_date)\n', (955, 967), True, 'import pandas as pd\n'), ((8401, 8480), 'pandas.read_csv', 'pd.read_csv', (['"""../data/cap.csv"""'], {'dtype': "{'gvkey': 'object'}", 'parse_dates': "['date']"}), "('../data/cap.csv', dtype={'gvkey': 'object'}, parse_dates=['date'])\n", (8412, 8480), True, 'import pandas as pd\n'), ((8936, 9022), 'pandas.read_csv', 'pd.read_csv', (['"""../data/prctg.csv"""'], {'dtype': "{'gvkey': 'object'}", 'parse_dates': "['date']"}), "('../data/prctg.csv', dtype={'gvkey': 'object'}, parse_dates=[\n 'date'])\n", (8947, 9022), True, 'import pandas as pd\n'), ((9087, 9172), 'pandas.read_csv', 'pd.read_csv', (['"""../data/surp.csv"""'], {'dtype': "{'gvkey': 'object'}", 'parse_dates': "['date']"}), "('../data/surp.csv', dtype={'gvkey': 'object'}, parse_dates=['date']\n )\n", (9098, 9172), True, 'import pandas as pd\n'), ((9963, 10007), 'pandas.concat', 'pd.concat', (['[cap, fund]'], {'join': '"""inner"""', 'axis': '(1)'}), "([cap, fund], join='inner', axis=1)\n", (9972, 10007), True, 'import pandas as pd\n'), ((567, 592), 'pandas.to_datetime', 'pd.to_datetime', (['last_date'], {}), '(last_date)\n', (581, 592), True, 'import pandas as pd\n'), ((595, 614), 'pandas.tseries.offsets.DateOffset', 'DateOffset', ([], {'years': '(1)'}), '(years=1)\n', (605, 614), False, 'from pandas.tseries.offsets import DateOffset\n'), ((977, 1002), 'pandas.to_datetime', 'pd.to_datetime', (['last_date'], {}), '(last_date)\n', (991, 1002), True, 'import pandas as pd\n'), ((1005, 1025), 'pandas.tseries.offsets.DateOffset', 'DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (1015, 1025), False, 'from pandas.tseries.offsets import DateOffset\n'), ((8677, 8762), 'pandas.read_csv', 'pd.read_csv', (['"""../data/fund.csv"""'], {'parse_dates': "['date']", 'dtype': "{'gvkey': 'object'}"}), "('../data/fund.csv', parse_dates=['date'], dtype={'gvkey': 'object'}\n )\n", (8688, 8762), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""
Orbital functions
-----------------
Functions used within multiple orbital classes in Stone Soup
"""
import numpy as np
from . import dotproduct
from ..types.array import StateVector
def stumpff_s(z):
r"""The Stumpff S function
.. math::
S(z) = \begin{cases}\frac{\sqrt(z) - \sin{\sqrt(z)}}{(\sqrt(z))^{3}}, & (z > 0)\\
\frac{\sinh(\sqrt(-z)) - \sqrt(-z)}{(\sqrt(-z))^{3}}, & (z < 0) \\
\frac{1}{6}, & (z = 0)\end{cases}
Parameters
----------
z : float
input parameter, :math:`z`
Returns
-------
: float
Output value, :math:`S(z)`
"""
if z > 0:
sqz = np.sqrt(z)
return (sqz - np.sin(sqz)) / sqz ** 3
elif z < 0:
sqz = np.sqrt(-z)
return (np.sinh(sqz) - sqz) / sqz ** 3
else: # which means z== 0:
return 1 / 6
def stumpff_c(z):
r"""The Stumpff C function
.. math::
C(z) = \begin{cases}\frac{1 - \cos{\sqrt(z)}}{z}, & (z > 0)\\
\frac{\cosh{\sqrt(-z)} - 1}{-z}, & (z < 0) \\
\frac{1}{2}, & (z = 0)\end{cases}
Parameters
----------
z : float
input parameter, :math:`z`
Returns
-------
: float
Output value, :math:`C(z)`
"""
if z > 0:
sqz = np.sqrt(z)
return (1 - np.cos(sqz)) / sqz ** 2
elif z < 0:
sqz = np.sqrt(-z)
return (np.cosh(sqz) - 1) / sqz ** 2
else: # which means z == 0:
return 1 / 2
def universal_anomaly_newton(o_state_vector, delta_t,
grav_parameter=3.986004418e14, precision=1e-8, max_iterations=1e5):
r"""Calculate the universal anomaly via Newton's method. Algorithm 3.3 in [1]_.
Parameters
----------
o_state_vector : :class:`~StateVector`
The orbital state vector formed as
:math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]^T`
delta_t : timedelta
The time interval over which to estimate the universal anomaly
grav_parameter : float, optional
The universal gravitational parameter. Defaults to that of the
Earth, :math:`3.986004418 \times 10^{14} \ \mathrm{m}^{3} \
\mathrm{s}^{-2}`
precision : float, optional
For Newton's method, the difference between new and old estimates of the universal anomaly
below which the iteration stops and the answer is returned, (default = 1e-8)
max_iterations : float, optional
Maximum number of iterations allowed in while loop (default = 1e5)
Returns
-------
: float
The universal anomaly, :math:`\chi`
References
----------
.. [1] <NAME>. 2010, Orbital Mechanics for Engineering Students, 3rd Ed., Elsevier
"""
# For convenience
mag_r_0 = np.sqrt(dotproduct(o_state_vector[0:3], o_state_vector[0:3]))
mag_v_0 = np.sqrt(dotproduct(o_state_vector[3:6], o_state_vector[3:6]))
v_rad_0 = dotproduct(o_state_vector[3:6], o_state_vector[0:3])/mag_r_0
root_mu = np.sqrt(grav_parameter)
inv_sma = 2/mag_r_0 - (mag_v_0**2)/grav_parameter
# Initial estimate of Chi
chi_i = root_mu * np.abs(inv_sma) * delta_t.total_seconds()
ratio = 1
count = 0
# Do Newton's method
while np.abs(ratio) > precision and count <= max_iterations:
z_i = inv_sma * chi_i ** 2
f_chi_i = mag_r_0 * v_rad_0 / root_mu * chi_i ** 2 * \
stumpff_c(z_i) + (1 - inv_sma * mag_r_0) * chi_i ** 3 * \
stumpff_s(z_i) + mag_r_0 * chi_i - root_mu * \
delta_t.total_seconds()
fp_chi_i = mag_r_0 * v_rad_0 / root_mu * chi_i * \
(1 - inv_sma * chi_i ** 2 * stumpff_s(z_i)) + \
(1 - inv_sma * mag_r_0) * chi_i ** 2 * stumpff_c(z_i) + \
mag_r_0
ratio = f_chi_i / fp_chi_i
chi_i = chi_i - ratio
count += 1
return chi_i
def lagrange_coefficients_from_universal_anomaly(o_state_vector, delta_t,
grav_parameter=3.986004418e14,
precision=1e-8, max_iterations=1e5):
r""" Calculate the Lagrangian coefficients, f and g, and their time derivatives, by way of the
universal anomaly and the Stumpff functions [2]_.
Parameters
----------
o_state_vector : StateVector
The (Cartesian) orbital state vector,
:math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]^T`
delta_t : timedelta
The time interval over which to calculate
grav_parameter : float, optional
The universal gravitational parameter. Defaults to that of the
Earth, :math:`3.986004418 \times 10^{14} \ \mathrm{m}^{3} \
\mathrm{s}^{-2}`. Note that the units of time must be seconds.
precision : float, optional
Precision to which to calculate the :meth:`universal anomaly` (default = 1e-8). See the doc
section for that function
max_iterations : float, optional
Maximum number of iterations in determining universal anomaly (default = 1e5)
Returns
-------
: float, float, float, float
The Lagrange coefficients, :math:`f, g, \dot{f}, \dot{g}`, in that order.
References
----------
.. [2] <NAME>., <NAME>. 1996, Modern Astrodynamics: Fundamentals and Perturbation
Methods, Princeton University Press
"""
# First get the universal anomaly using Newton's method
chii = universal_anomaly_newton(o_state_vector, delta_t,
grav_parameter=grav_parameter,
precision=precision, max_iterations=max_iterations)
# Get the position and velocity vectors
bold_r_0 = o_state_vector[0:3]
bold_v_0 = o_state_vector[3:6]
# Calculate the magnitude of the position and velocity vectors
r_0 = np.sqrt(dotproduct(bold_r_0, bold_r_0))
v_0 = np.sqrt(dotproduct(bold_v_0, bold_v_0))
# For convenience
root_mu = np.sqrt(grav_parameter)
inv_sma = 2 / r_0 - (v_0 ** 2) / grav_parameter
z = inv_sma * chii ** 2
# Get the Lagrange coefficients using Stumpf
f = 1 - chii ** 2 / r_0 * stumpff_c(z)
g = delta_t.total_seconds() - 1 / root_mu * chii ** 3 * \
stumpff_s(z)
# Get the position vector and magnitude of that vector
bold_r = f * bold_r_0 + g * bold_v_0
r = np.sqrt(dotproduct(bold_r, bold_r))
# and the Lagrange (time) derivatives also using Stumpf
fdot = root_mu / (r * r_0) * (inv_sma * chii ** 3 * stumpff_s(z) - chii)
gdot = 1 - (chii ** 2 / r) * stumpff_c(z)
return f, g, fdot, gdot
def eccentric_anomaly_from_mean_anomaly(mean_anomaly, eccentricity,
precision=1e-8, max_iterations=1e5):
r"""Approximately solve the transcendental equation :math:`E - e sin E = M_e` for E. This is
an iterative process using Newton's method.
Parameters
----------
mean_anomaly : float
Current mean anomaly
eccentricity : float
Orbital eccentricity
precision : float, optional
Precision used for the stopping point in determining eccentric anomaly from mean anomaly,
(default = 1e-8)
max_iterations : float, optional
Maximum number of iterations for the while loop, (default = 1e5)
Returns
-------
: float
Eccentric anomaly of the orbit
"""
if mean_anomaly < np.pi:
ecc_anomaly = mean_anomaly + eccentricity / 2
else:
ecc_anomaly = mean_anomaly - eccentricity / 2
ratio = 1
count = 0
while np.abs(ratio) > precision and count <= max_iterations:
f = ecc_anomaly - eccentricity * np.sin(ecc_anomaly) - mean_anomaly
fp = 1 - eccentricity * np.cos(ecc_anomaly)
ratio = f / fp # Need to check conditioning
ecc_anomaly = ecc_anomaly - ratio
count += 1
return ecc_anomaly # Check whether this ever goes outside 0 < 2pi
def tru_anom_from_mean_anom(mean_anomaly, eccentricity, precision=1e-8, max_iterations=1e5):
r"""Get the true anomaly from the mean anomaly via the eccentric anomaly
Parameters
----------
mean_anomaly : float
The mean anomaly
eccentricity : float
Eccentricity
precision : float, optional
Precision used for the stopping point in determining eccentric anomaly from mean anomaly,
(default = 1e-8)
max_iterations : float, optional
Maximum number of iterations in determining eccentric anomaly, (default = 1e5)
Returns
-------
: float
True anomaly
"""
cos_ecc_anom = np.cos(eccentric_anomaly_from_mean_anomaly(
mean_anomaly, eccentricity, precision=precision, max_iterations=max_iterations))
sin_ecc_anom = np.sin(eccentric_anomaly_from_mean_anomaly(
mean_anomaly, eccentricity, precision=precision, max_iterations=max_iterations))
# This only works for M_e < \pi
# return np.arccos(np.clip((eccentricity - cos_ecc_anom) /
# (eccentricity*cos_ecc_anom - 1), -1, 1))
return np.remainder(np.arctan2(np.sqrt(1 - eccentricity**2) *
sin_ecc_anom,
cos_ecc_anom - eccentricity), 2*np.pi)
def perifocal_position(eccentricity, semimajor_axis, true_anomaly):
r"""The position vector in perifocal coordinates calculated from the Keplerian elements
Parameters
----------
eccentricity : float
Orbit eccentricity
semimajor_axis : float
Orbit semi-major axis
true_anomaly
Orbit true anomaly
Returns
-------
: numpy.array
:math:`[r_x, r_y, r_z]` position in perifocal coordinates
"""
# Cache some trigonometric functions
c_tran = np.cos(true_anomaly)
s_tran = np.sin(true_anomaly)
return semimajor_axis * (1 - eccentricity ** 2) / \
(1 + eccentricity * c_tran) * np.array([[c_tran], [s_tran],
[0]])
def perifocal_velocity(eccentricity, semimajor_axis, true_anomaly,
grav_parameter=3.986004418e14):
r"""The velocity vector in perifocal coordinates calculated from the Keplerian elements
Parameters
----------
eccentricity : float
Orbit eccentricity
semimajor_axis : float
Orbit semi-major axis
true_anomaly : float
Orbit true anomaly
grav_parameter : float, optional
Standard gravitational parameter :math:`\mu = G M`. Default is
:math:`3.986004418 \times 10^{14} \mathrm{m}^3 \mathrm{s}^{-2}`
Returns
-------
: numpy.narray
:math:`[\dot{r}_x, \dot{r}_y, \dot{r}_z]` velocity in perifocal coordinates
"""
# Cache some trigonometric functions
c_tran = np.cos(true_anomaly)
s_tran = np.sin(true_anomaly)
return np.sqrt(grav_parameter / (semimajor_axis * (1 - eccentricity**2)))\
* np.array([[-s_tran], [eccentricity + c_tran], [0]])
def perifocal_to_geocentric_matrix(inclination, raan, argp):
r"""Return the matrix which transforms from perifocal to geocentric coordinates
Parameters
----------
inclination : float
Orbital inclination
raan : float
Orbit Right Ascension of the ascending node
argp : float
The orbit's argument of periapsis
Returns
-------
: numpy.array
The :math:`3 \times 3` array that transforms from perifocal coordinates to geocentric
coordinates
"""
# Cache some trig functions
s_incl = np.sin(inclination)
c_incl = np.cos(inclination)
s_raan = np.sin(raan)
c_raan = np.cos(raan)
s_aper = np.sin(argp)
c_aper = np.cos(argp)
# Build the matrix
return np.array([[-s_raan * c_incl * s_aper + c_raan * c_aper,
-s_raan * c_incl * c_aper - c_raan * s_aper,
s_raan * s_incl],
[c_raan * c_incl * s_aper + s_raan * c_aper,
c_raan * c_incl * c_aper - s_raan * s_aper,
-c_raan * s_incl],
[s_incl * s_aper, s_incl * c_aper, c_incl]])
def keplerian_to_rv(state_vector, grav_parameter=3.986004418e14):
r"""Convert the Keplerian orbital elements to position, velocity state vector
Parameters
----------
state_vector : :class:`~.StateVector`
The Keplerian orbital state vector is defined as
.. math::
X = [e, a, i, \Omega, \omega, \theta]^{T} \\
where:
:math:`e` is the orbital eccentricity (unitless),
:math:`a` the semi-major axis (m),
:math:`i` the inclination (rad),
:math:`\Omega` is the longitude of the ascending node (rad),
:math:`\omega` the argument of periapsis (rad), and
:math:`\theta` the true anomaly (rad)
grav_parameter : float, optional
Standard gravitational parameter :math:`\mu = G M`. The default is :math:`3.986004418
\times 10^{14} \mathrm{m}^3 \mathrm{s}^{-2}`
Returns
-------
: :class:`~.StateVector`
Orbital state vector as :math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]`
Warning
-------
No checking undertaken. Assumes Keplerian elements rendered correctly as above
"""
# Calculate the position vector in perifocal coordinates
rx = perifocal_position(state_vector[0], state_vector[1], state_vector[5])
# Calculate the velocity vector in perifocal coordinates
vx = perifocal_velocity(state_vector[0], state_vector[1], state_vector[5],
grav_parameter=grav_parameter)
# Transform position (perifocal) and velocity (perifocal)
# into geocentric
r = perifocal_to_geocentric_matrix(state_vector[2], state_vector[3], state_vector[4]) @ rx
v = perifocal_to_geocentric_matrix(state_vector[2], state_vector[3], state_vector[4]) @ vx
# And put them into the state vector
return StateVector(np.concatenate((r, v), axis=0))
def mod_inclination(x):
r"""Calculates the modulus of an inclination. Inclination angles are within the range :math:`0`
to :math:`\pi`.
Parameters
----------
x: float
inclination angle in radians
Returns
-------
float
Angle in radians in the range :math:`0` to :math:`+\pi`
"""
x = x % np.pi
return x
def mod_elongitude(x):
r"""Calculates the modulus of an ecliptic longitude in which angles are within the range
:math:`0` to :math:`2 \pi`.
Parameters
----------
x: float
longitudinal angle in radians
Returns
-------
float
Angle in radians in the range :math:`0` to :math:`+2 \pi`
"""
x = x % (2*np.pi)
return x
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.sinh",
"numpy.array",
"numpy.cos",
"numpy.concatenate",
"numpy.cosh",
"numpy.sin"
] |
[((3063, 3086), 'numpy.sqrt', 'np.sqrt', (['grav_parameter'], {}), '(grav_parameter)\n', (3070, 3086), True, 'import numpy as np\n'), ((6021, 6044), 'numpy.sqrt', 'np.sqrt', (['grav_parameter'], {}), '(grav_parameter)\n', (6028, 6044), True, 'import numpy as np\n'), ((9816, 9836), 'numpy.cos', 'np.cos', (['true_anomaly'], {}), '(true_anomaly)\n', (9822, 9836), True, 'import numpy as np\n'), ((9850, 9870), 'numpy.sin', 'np.sin', (['true_anomaly'], {}), '(true_anomaly)\n', (9856, 9870), True, 'import numpy as np\n'), ((10830, 10850), 'numpy.cos', 'np.cos', (['true_anomaly'], {}), '(true_anomaly)\n', (10836, 10850), True, 'import numpy as np\n'), ((10864, 10884), 'numpy.sin', 'np.sin', (['true_anomaly'], {}), '(true_anomaly)\n', (10870, 10884), True, 'import numpy as np\n'), ((11597, 11616), 'numpy.sin', 'np.sin', (['inclination'], {}), '(inclination)\n', (11603, 11616), True, 'import numpy as np\n'), ((11630, 11649), 'numpy.cos', 'np.cos', (['inclination'], {}), '(inclination)\n', (11636, 11649), True, 'import numpy as np\n'), ((11664, 11676), 'numpy.sin', 'np.sin', (['raan'], {}), '(raan)\n', (11670, 11676), True, 'import numpy as np\n'), ((11690, 11702), 'numpy.cos', 'np.cos', (['raan'], {}), '(raan)\n', (11696, 11702), True, 'import numpy as np\n'), ((11717, 11729), 'numpy.sin', 'np.sin', (['argp'], {}), '(argp)\n', (11723, 11729), True, 'import numpy as np\n'), ((11743, 11755), 'numpy.cos', 'np.cos', (['argp'], {}), '(argp)\n', (11749, 11755), True, 'import numpy as np\n'), ((11791, 12074), 'numpy.array', 'np.array', (['[[-s_raan * c_incl * s_aper + c_raan * c_aper, -s_raan * c_incl * c_aper - \n c_raan * s_aper, s_raan * s_incl], [c_raan * c_incl * s_aper + s_raan *\n c_aper, c_raan * c_incl * c_aper - s_raan * s_aper, -c_raan * s_incl],\n [s_incl * s_aper, s_incl * c_aper, c_incl]]'], {}), '([[-s_raan * c_incl * s_aper + c_raan * c_aper, -s_raan * c_incl *\n c_aper - c_raan * s_aper, s_raan * s_incl], [c_raan * c_incl * s_aper +\n s_raan * c_aper, c_raan * c_incl * c_aper - s_raan * s_aper, -c_raan *\n s_incl], [s_incl * s_aper, s_incl * c_aper, c_incl]])\n', (11799, 12074), True, 'import numpy as np\n'), ((703, 713), 'numpy.sqrt', 'np.sqrt', (['z'], {}), '(z)\n', (710, 713), True, 'import numpy as np\n'), ((1350, 1360), 'numpy.sqrt', 'np.sqrt', (['z'], {}), '(z)\n', (1357, 1360), True, 'import numpy as np\n'), ((9966, 10001), 'numpy.array', 'np.array', (['[[c_tran], [s_tran], [0]]'], {}), '([[c_tran], [s_tran], [0]])\n', (9974, 10001), True, 'import numpy as np\n'), ((10897, 10965), 'numpy.sqrt', 'np.sqrt', (['(grav_parameter / (semimajor_axis * (1 - eccentricity ** 2)))'], {}), '(grav_parameter / (semimajor_axis * (1 - eccentricity ** 2)))\n', (10904, 10965), True, 'import numpy as np\n'), ((10975, 11026), 'numpy.array', 'np.array', (['[[-s_tran], [eccentricity + c_tran], [0]]'], {}), '([[-s_tran], [eccentricity + c_tran], [0]])\n', (10983, 11026), True, 'import numpy as np\n'), ((14002, 14032), 'numpy.concatenate', 'np.concatenate', (['(r, v)'], {'axis': '(0)'}), '((r, v), axis=0)\n', (14016, 14032), True, 'import numpy as np\n'), ((790, 801), 'numpy.sqrt', 'np.sqrt', (['(-z)'], {}), '(-z)\n', (797, 801), True, 'import numpy as np\n'), ((1435, 1446), 'numpy.sqrt', 'np.sqrt', (['(-z)'], {}), '(-z)\n', (1442, 1446), True, 'import numpy as np\n'), ((3194, 3209), 'numpy.abs', 'np.abs', (['inv_sma'], {}), '(inv_sma)\n', (3200, 3209), True, 'import numpy as np\n'), ((3300, 3313), 'numpy.abs', 'np.abs', (['ratio'], {}), '(ratio)\n', (3306, 3313), True, 'import numpy as np\n'), ((7626, 7639), 'numpy.abs', 'np.abs', (['ratio'], {}), '(ratio)\n', (7632, 7639), True, 'import numpy as np\n'), ((736, 747), 'numpy.sin', 'np.sin', (['sqz'], {}), '(sqz)\n', (742, 747), True, 'import numpy as np\n'), ((1381, 1392), 'numpy.cos', 'np.cos', (['sqz'], {}), '(sqz)\n', (1387, 1392), True, 'import numpy as np\n'), ((7789, 7808), 'numpy.cos', 'np.cos', (['ecc_anomaly'], {}), '(ecc_anomaly)\n', (7795, 7808), True, 'import numpy as np\n'), ((9143, 9173), 'numpy.sqrt', 'np.sqrt', (['(1 - eccentricity ** 2)'], {}), '(1 - eccentricity ** 2)\n', (9150, 9173), True, 'import numpy as np\n'), ((818, 830), 'numpy.sinh', 'np.sinh', (['sqz'], {}), '(sqz)\n', (825, 830), True, 'import numpy as np\n'), ((1463, 1475), 'numpy.cosh', 'np.cosh', (['sqz'], {}), '(sqz)\n', (1470, 1475), True, 'import numpy as np\n'), ((7722, 7741), 'numpy.sin', 'np.sin', (['ecc_anomaly'], {}), '(ecc_anomaly)\n', (7728, 7741), True, 'import numpy as np\n')]
|
import numpy as np
class Player:
def __init__(self, strategy=0):
"""
:param strategy: defines the strategy to be played by the player as
0: always defect
1: always coorporate
2: random
3: tit for tat
4: tit for two tats
default: 0
"""
start_capital = 100
self.accumulated_resources = [start_capital]
self.actions = []
self.average_utility = []
self.utility = []
self.strategy = strategy
self.strategy_name = ""
self.set_strategy_name()
def alive(self):
if(self.accumulated_resources[-1]> 0):
return 1
else:
return 0
def update_history(self, u, s, t):
self.actions.append(s)
self.utility.append(u)
avg = sum(self.utility) / t
self.average_utility.append(avg)
resource = self.accumulated_resources[-1] + u
self.accumulated_resources.append(resource)
def get_strategy(self, me, opponent, t):
if(self.strategy == 0):
return self.always_defect(me, opponent, t)
elif (self.strategy == 1):
return self.always_coorporate(me, opponent, t)
elif (self.strategy == 2):
return self.random(me, opponent, t)
elif (self.strategy == 3):
return self.tit_for_tat(me, opponent, t)
elif (self.strategy == 4):
return self.tit_f2_tat(me, opponent, t)
else:
return 0 # default to 0
def set_strategy_name(self):
if(self.strategy == 0):
self.strategy_name = 'Always defect'
elif (self.strategy == 1):
self.strategy_name = 'Always cooporate'
elif (self.strategy == 2):
self.strategy_name = 'Random'
elif (self.strategy == 3):
self.strategy_name = 'Tit for tat'
elif (self.strategy == 4):
self.strategy_name = 'Tit for two tats'
def tit_f2_tat(self, me, opponent, t):
if (t <= 2):
return (1) # cooperate round 0 and 1
if (opponent[-2] == 0 and opponent[-1] == 0):
return (0) # defect if last two opponent moves were defect
return (1) # otherwise cooperate
def tit_for_tat(self, me, opponent, t):
if (t <= 1):
return (1)
return (opponent[-1])
def always_defect(self, me, opponent, t):
return (0)
def always_coorporate(self, me, opponent, t):
return (1)
def random(self, me, opponent, t):
return (round(np.random.rand()))
|
[
"numpy.random.rand"
] |
[((2579, 2595), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2593, 2595), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy
import sys
import subprocess
import platform
import shutil
import distutils.spawn
from setuptools import setup, Extension
from setuptools.command.sdist import sdist
from distutils.command.build_ext import build_ext
# some paranoia to start with
# if platform.architecture()[0] != '64bit':
# raise Exception('DuckDB only supports 64 bit at this point')
# make sure we are in the right directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
ARCHIVE_EXT = 'a'
LIB_PREFIX = 'lib'
if os.name == 'nt':
ARCHIVE_EXT = 'lib'
LIB_PREFIX = 'Release/'
DIR_PREFIX = 'src/duckdb'
if not os.path.exists(DIR_PREFIX):
# this is a build from within the tools/pythonpkg directory
DIR_PREFIX = '../../'
def get_library_name(lib):
return LIB_PREFIX + lib + '.' + ARCHIVE_EXT
DEFAULT_BUILD_DIR = os.path.join(DIR_PREFIX, 'build', 'release_notest')
BUILD_DIR = DEFAULT_BUILD_DIR
if 'DUCKDB_PYTHON_TARGET' in os.environ:
BUILD_DIR = os.environ['DUCKDB_PYTHON_TARGET']
INCLUDE_DIR = os.path.join(DIR_PREFIX, 'src', 'include')
DUCKDB_LIB = os.path.join(BUILD_DIR, 'src', get_library_name('duckdb_static'))
PG_LIB = os.path.join(BUILD_DIR, 'third_party', 'libpg_query', get_library_name('pg_query'))
RE2_LIB = os.path.join(BUILD_DIR, 'third_party', 're2', get_library_name('re2'))
MINIZ_LIB = os.path.join(BUILD_DIR, 'third_party', 'miniz', get_library_name('miniz'))
# wrapper that builds the main DuckDB library first
class CustomBuiltExtCommand(build_ext):
def build_duckdb(self):
cmake_bin = distutils.spawn.find_executable('cmake')
if (cmake_bin is None):
raise Exception('DuckDB needs cmake to build from source')
wd = os.getcwd()
os.chdir(DIR_PREFIX)
if not os.path.exists('build/release_notest'):
os.makedirs('build/release_notest')
os.chdir('build/release_notest')
configcmd = 'cmake -DCMAKE_BUILD_TYPE=Release -DLEAN=1 ../..'
buildcmd = 'cmake --build . --target duckdb_static'
if os.name == 'nt':
if platform.architecture()[0] == '64bit':
configcmd += ' -DCMAKE_GENERATOR_PLATFORM=x64'
buildcmd += ' --config Release'
subprocess.Popen(configcmd.split(' ')).wait()
subprocess.Popen(buildcmd.split(' ')).wait()
os.chdir(wd)
def run(self):
if BUILD_DIR == DEFAULT_BUILD_DIR:
self.build_duckdb()
for library in [DUCKDB_LIB, PG_LIB, RE2_LIB, MINIZ_LIB]:
if not os.path.isfile(library):
raise Exception('Build failed: could not find required library file "%s"' % library)
print(INCLUDE_DIR)
build_ext.run(self)
# create a distributable directory structure
class CustomSdistCommand(sdist):
def run(self):
if os.path.exists('src/duckdb'):
shutil.rmtree('src/duckdb')
if not os.path.exists('src/duckdb/third_party'):
os.makedirs('src/duckdb/third_party')
shutil.copyfile('../../CMakeLists.txt', 'src/duckdb/CMakeLists.txt')
shutil.copyfile('../../third_party/CMakeLists.txt', 'src/duckdb/third_party/CMakeLists.txt')
shutil.copytree('../../src', 'src/duckdb/src')
shutil.copytree('../../third_party/libpg_query', 'src/duckdb/third_party/libpg_query')
shutil.copytree('../../third_party/hyperloglog', 'src/duckdb/third_party/hyperloglog')
shutil.copytree('../../third_party/re2', 'src/duckdb/third_party/re2')
shutil.copytree('../../third_party/miniz', 'src/duckdb/third_party/miniz')
sdist.run(self)
includes = [numpy.get_include(), INCLUDE_DIR, '.']
sources = ['connection.cpp', 'cursor.cpp', 'module.cpp']
toolchain_args = ['-std=c++11', '-Wall']
if platform.system() == 'Darwin':
toolchain_args.extend(['-stdlib=libc++', '-mmacosx-version-min=10.7'])
libduckdb = Extension('duckdb',
include_dirs=includes,
sources=sources,
extra_compile_args=toolchain_args,
extra_link_args=toolchain_args,
language='c++',
extra_objects=[DUCKDB_LIB, PG_LIB, RE2_LIB, MINIZ_LIB])
# Only include pytest-runner in setup_requires if we're invoking tests
if {'pytest', 'test', 'ptr'}.intersection(sys.argv):
setup_requires = ['pytest-runner']
else:
setup_requires = []
setup(
name = "duckdb",
version = '0.1.0',
description = 'DuckDB embedded database',
keywords = 'DuckDB Database SQL OLAP',
url="https://github.com/cwida/duckdb",
long_description = '',
install_requires=[
'numpy>=1.16',
'pandas>=0.24'
],
packages=['duckdb_query_graph'],
include_package_data=True,
setup_requires=setup_requires,
tests_require=['pytest'],
classifiers = [
'Topic :: Database :: Database Engines/Servers',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha'
],
cmdclass={
'build_ext': CustomBuiltExtCommand,
'sdist': CustomSdistCommand
},
ext_modules = [libduckdb],
maintainer = "<NAME>",
maintainer_email = "<EMAIL>"
)
|
[
"os.path.exists",
"os.makedirs",
"distutils.command.build_ext.build_ext.run",
"os.path.join",
"setuptools.setup",
"setuptools.command.sdist.sdist.run",
"os.getcwd",
"setuptools.Extension",
"os.path.realpath",
"platform.system",
"os.chdir",
"shutil.copyfile",
"shutil.copytree",
"shutil.rmtree",
"numpy.get_include",
"os.path.isfile",
"platform.architecture"
] |
[((886, 937), 'os.path.join', 'os.path.join', (['DIR_PREFIX', '"""build"""', '"""release_notest"""'], {}), "(DIR_PREFIX, 'build', 'release_notest')\n", (898, 937), False, 'import os\n'), ((1077, 1119), 'os.path.join', 'os.path.join', (['DIR_PREFIX', '"""src"""', '"""include"""'], {}), "(DIR_PREFIX, 'src', 'include')\n", (1089, 1119), False, 'import os\n'), ((3927, 4133), 'setuptools.Extension', 'Extension', (['"""duckdb"""'], {'include_dirs': 'includes', 'sources': 'sources', 'extra_compile_args': 'toolchain_args', 'extra_link_args': 'toolchain_args', 'language': '"""c++"""', 'extra_objects': '[DUCKDB_LIB, PG_LIB, RE2_LIB, MINIZ_LIB]'}), "('duckdb', include_dirs=includes, sources=sources,\n extra_compile_args=toolchain_args, extra_link_args=toolchain_args,\n language='c++', extra_objects=[DUCKDB_LIB, PG_LIB, RE2_LIB, MINIZ_LIB])\n", (3936, 4133), False, 'from setuptools import setup, Extension\n'), ((4345, 5014), 'setuptools.setup', 'setup', ([], {'name': '"""duckdb"""', 'version': '"""0.1.0"""', 'description': '"""DuckDB embedded database"""', 'keywords': '"""DuckDB Database SQL OLAP"""', 'url': '"""https://github.com/cwida/duckdb"""', 'long_description': '""""""', 'install_requires': "['numpy>=1.16', 'pandas>=0.24']", 'packages': "['duckdb_query_graph']", 'include_package_data': '(True)', 'setup_requires': 'setup_requires', 'tests_require': "['pytest']", 'classifiers': "['Topic :: Database :: Database Engines/Servers',\n 'Intended Audience :: Developers', 'Development Status :: 3 - Alpha']", 'cmdclass': "{'build_ext': CustomBuiltExtCommand, 'sdist': CustomSdistCommand}", 'ext_modules': '[libduckdb]', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""'}), "(name='duckdb', version='0.1.0', description=\n 'DuckDB embedded database', keywords='DuckDB Database SQL OLAP', url=\n 'https://github.com/cwida/duckdb', long_description='',\n install_requires=['numpy>=1.16', 'pandas>=0.24'], packages=[\n 'duckdb_query_graph'], include_package_data=True, setup_requires=\n setup_requires, tests_require=['pytest'], classifiers=[\n 'Topic :: Database :: Database Engines/Servers',\n 'Intended Audience :: Developers', 'Development Status :: 3 - Alpha'],\n cmdclass={'build_ext': CustomBuiltExtCommand, 'sdist':\n CustomSdistCommand}, ext_modules=[libduckdb], maintainer='<NAME>',\n maintainer_email='<EMAIL>')\n", (4350, 5014), False, 'from setuptools import setup, Extension\n'), ((670, 696), 'os.path.exists', 'os.path.exists', (['DIR_PREFIX'], {}), '(DIR_PREFIX)\n', (684, 696), False, 'import os\n'), ((3667, 3686), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3684, 3686), False, 'import numpy\n'), ((3808, 3825), 'platform.system', 'platform.system', ([], {}), '()\n', (3823, 3825), False, 'import platform\n'), ((497, 523), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (513, 523), False, 'import os\n'), ((1762, 1773), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1771, 1773), False, 'import os\n'), ((1782, 1802), 'os.chdir', 'os.chdir', (['DIR_PREFIX'], {}), '(DIR_PREFIX)\n', (1790, 1802), False, 'import os\n'), ((1914, 1946), 'os.chdir', 'os.chdir', (['"""build/release_notest"""'], {}), "('build/release_notest')\n", (1922, 1946), False, 'import os\n'), ((2385, 2397), 'os.chdir', 'os.chdir', (['wd'], {}), '(wd)\n', (2393, 2397), False, 'import os\n'), ((2739, 2758), 'distutils.command.build_ext.build_ext.run', 'build_ext.run', (['self'], {}), '(self)\n', (2752, 2758), False, 'from distutils.command.build_ext import build_ext\n'), ((2868, 2896), 'os.path.exists', 'os.path.exists', (['"""src/duckdb"""'], {}), "('src/duckdb')\n", (2882, 2896), False, 'import os\n'), ((3053, 3121), 'shutil.copyfile', 'shutil.copyfile', (['"""../../CMakeLists.txt"""', '"""src/duckdb/CMakeLists.txt"""'], {}), "('../../CMakeLists.txt', 'src/duckdb/CMakeLists.txt')\n", (3068, 3121), False, 'import shutil\n'), ((3130, 3226), 'shutil.copyfile', 'shutil.copyfile', (['"""../../third_party/CMakeLists.txt"""', '"""src/duckdb/third_party/CMakeLists.txt"""'], {}), "('../../third_party/CMakeLists.txt',\n 'src/duckdb/third_party/CMakeLists.txt')\n", (3145, 3226), False, 'import shutil\n'), ((3231, 3277), 'shutil.copytree', 'shutil.copytree', (['"""../../src"""', '"""src/duckdb/src"""'], {}), "('../../src', 'src/duckdb/src')\n", (3246, 3277), False, 'import shutil\n'), ((3286, 3376), 'shutil.copytree', 'shutil.copytree', (['"""../../third_party/libpg_query"""', '"""src/duckdb/third_party/libpg_query"""'], {}), "('../../third_party/libpg_query',\n 'src/duckdb/third_party/libpg_query')\n", (3301, 3376), False, 'import shutil\n'), ((3381, 3471), 'shutil.copytree', 'shutil.copytree', (['"""../../third_party/hyperloglog"""', '"""src/duckdb/third_party/hyperloglog"""'], {}), "('../../third_party/hyperloglog',\n 'src/duckdb/third_party/hyperloglog')\n", (3396, 3471), False, 'import shutil\n'), ((3476, 3546), 'shutil.copytree', 'shutil.copytree', (['"""../../third_party/re2"""', '"""src/duckdb/third_party/re2"""'], {}), "('../../third_party/re2', 'src/duckdb/third_party/re2')\n", (3491, 3546), False, 'import shutil\n'), ((3555, 3629), 'shutil.copytree', 'shutil.copytree', (['"""../../third_party/miniz"""', '"""src/duckdb/third_party/miniz"""'], {}), "('../../third_party/miniz', 'src/duckdb/third_party/miniz')\n", (3570, 3629), False, 'import shutil\n'), ((3638, 3653), 'setuptools.command.sdist.sdist.run', 'sdist.run', (['self'], {}), '(self)\n', (3647, 3653), False, 'from setuptools.command.sdist import sdist\n'), ((1818, 1856), 'os.path.exists', 'os.path.exists', (['"""build/release_notest"""'], {}), "('build/release_notest')\n", (1832, 1856), False, 'import os\n'), ((1870, 1905), 'os.makedirs', 'os.makedirs', (['"""build/release_notest"""'], {}), "('build/release_notest')\n", (1881, 1905), False, 'import os\n'), ((2910, 2937), 'shutil.rmtree', 'shutil.rmtree', (['"""src/duckdb"""'], {}), "('src/duckdb')\n", (2923, 2937), False, 'import shutil\n'), ((2953, 2993), 'os.path.exists', 'os.path.exists', (['"""src/duckdb/third_party"""'], {}), "('src/duckdb/third_party')\n", (2967, 2993), False, 'import os\n'), ((3007, 3044), 'os.makedirs', 'os.makedirs', (['"""src/duckdb/third_party"""'], {}), "('src/duckdb/third_party')\n", (3018, 3044), False, 'import os\n'), ((2578, 2601), 'os.path.isfile', 'os.path.isfile', (['library'], {}), '(library)\n', (2592, 2601), False, 'import os\n'), ((2122, 2145), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (2143, 2145), False, 'import platform\n')]
|
# -*- encoding: utf-8 -*-
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.utils import *
def test_is_string_like():
assert_true(is_string_like("aaaa"))
assert_false(is_string_like(None))
assert_false(is_string_like(123))
def test_iterable():
assert_false(iterable(None))
assert_false(iterable(10))
assert_true(iterable([1,2,3]))
assert_true(iterable((1,2,3)))
assert_true(iterable({1:"A",2:"X"}))
assert_true(iterable("ABC"))
def test_graph_iterable():
K=nx.complete_graph(10)
assert_true(iterable(K))
assert_true(iterable(K.nodes_iter()))
assert_true(iterable(K.edges_iter()))
def test_is_list_of_ints():
assert_true(is_list_of_ints([1,2,3,42]))
assert_false(is_list_of_ints([1,2,3,"kermit"]))
def test_random_number_distribution():
# smoke test only
z=uniform_sequence(20)
z=powerlaw_sequence(20,exponent=2.5)
z=pareto_sequence(20,exponent=1.5)
z=discrete_sequence(20,distribution=[0,0,0,0,1,1,1,1,2,2,3])
def test_make_str_with_bytes():
import sys
PY2 = sys.version_info[0] == 2
x = "qualité"
y = make_str(x)
if PY2:
assert_true(isinstance(y, unicode))
# Since file encoding is utf-8, the é will be two bytes.
assert_true(len(y) == 8)
else:
assert_true(isinstance(y, str))
assert_true(len(y) == 7)
def test_make_str_with_unicode():
import sys
PY2 = sys.version_info[0] == 2
if PY2:
x = unicode("qualité", encoding='utf-8')
y = make_str(x)
assert_true(isinstance(y, unicode))
assert_true(len(y) == 7)
else:
x = "qualité"
y = make_str(x)
assert_true(isinstance(y, str))
assert_true(len(y) == 7)
class TestNumpyArray(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global assert_equal
global assert_almost_equal
try:
import numpy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('NumPy not available.')
def test_dict_to_numpy_array1(self):
d = {'a':1,'b':2}
a = dict_to_numpy_array1(d)
assert_equal(a, numpy.array(list(d.values())))
a = dict_to_numpy_array1(d, mapping = {'b':0,'a':1})
assert_equal(a, numpy.array([2,1]))
def test_dict_to_numpy_array2(self):
d = {'a': {'a':1,'b':2},
'b': {'a':10,'b':20}}
a = dict_to_numpy_array(d)
if list(d.keys())[0] == 'a':
assert_equal(a, numpy.array([[1,2],[10,20]]))
elif list(d.keys())[0] == 'b':
assert_equal(a, numpy.array([[20,10],[2,1]]))
else:
raise
a = dict_to_numpy_array2(d, mapping = {'b':0,'a':1})
assert_equal(a, numpy.array([[20,10],[2,1]]))
def test_dict_to_numpy_array(self):
d = {'a': {'a':1,'b':2},
'b': {'a':10,'b':20}}
a = dict_to_numpy_array(d)
if list(d.keys())[0] == 'a':
assert_equal(a, numpy.array([[1,2],[10,20]]))
elif list(d.keys())[0] == 'b':
assert_equal(a, numpy.array([[20,10],[2,1]]))
else:
raise
d = {'a':1,'b':2}
a = dict_to_numpy_array(d)
assert_equal(a, numpy.array(list(d.values())))
|
[
"nose.SkipTest",
"networkx.complete_graph",
"numpy.array"
] |
[((537, 558), 'networkx.complete_graph', 'nx.complete_graph', (['(10)'], {}), '(10)\n', (554, 558), True, 'import networkx as nx\n'), ((2434, 2453), 'numpy.array', 'numpy.array', (['[2, 1]'], {}), '([2, 1])\n', (2445, 2453), False, 'import numpy\n'), ((2908, 2939), 'numpy.array', 'numpy.array', (['[[20, 10], [2, 1]]'], {}), '([[20, 10], [2, 1]])\n', (2919, 2939), False, 'import numpy\n'), ((2157, 2189), 'nose.SkipTest', 'SkipTest', (['"""NumPy not available."""'], {}), "('NumPy not available.')\n", (2165, 2189), False, 'from nose import SkipTest\n'), ((2664, 2695), 'numpy.array', 'numpy.array', (['[[1, 2], [10, 20]]'], {}), '([[1, 2], [10, 20]])\n', (2675, 2695), False, 'import numpy\n'), ((3148, 3179), 'numpy.array', 'numpy.array', (['[[1, 2], [10, 20]]'], {}), '([[1, 2], [10, 20]])\n', (3159, 3179), False, 'import numpy\n'), ((2761, 2792), 'numpy.array', 'numpy.array', (['[[20, 10], [2, 1]]'], {}), '([[20, 10], [2, 1]])\n', (2772, 2792), False, 'import numpy\n'), ((3245, 3276), 'numpy.array', 'numpy.array', (['[[20, 10], [2, 1]]'], {}), '([[20, 10], [2, 1]])\n', (3256, 3276), False, 'import numpy\n')]
|
from sklearn.neighbors import KNeighborsClassifier as skl_knn
from sklearn.base import TransformerMixin, BaseEstimator
import numpy as np
from dtaidistance.dtw_ndim import distance_fast
class KNeighborsClassifier(skl_knn):
def __init__(self, n_neighbors=1, classes=None, useClasses=False, **kwargs):
self.classes = classes
self.useClasses = useClasses
super(KNeighborsClassifier, self).__init__(n_neighbors=n_neighbors, **kwargs)
def fit(self, X, y):
if self.useClasses:
return super(KNeighborsClassifier, self).fit(X, self.classes.ravel())
else:
return super(KNeighborsClassifier, self).fit(X, y)
def distance_matrix_ddtw(X):
n_observations, n_features, n_variables = X.shape
X_out = np.zeros((n_observations, n_observations), dtype=np.double)
X = X.astype(np.double)
for i in range(n_observations):
for j in range(i, n_observations):
d = distance_fast(X[i], X[j])
X_out[i,j] = d
X_out[j,i] = d
return X_out
def flattened_ddtw(x1, x2, shape=None):
if shape is None:
raise ValueError("shape needed")
# b bands/variables
assert len(x1) % shape[1] == 0
assert len(x2) % shape[1] == 0
arr1 = x1.reshape(shape).astype(np.double)
arr2 = x2.reshape(shape).astype(np.double)
return distance_fast(arr1, arr2)
class DependentDTWMatrix(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def fit_transform(self, X, y=None, **fit_params):
return distance_matrix_ddtw(X)
class Flatten3Dto2D(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
n_observations, n_features, n_variables = X.shape
return X.reshape(n_observations, n_features * n_variables)
def fit_transform(self, X, y=None, **fit_params):
n_observations, n_features, n_variables = X.shape
return X.reshape(n_observations, n_features * n_variables)
|
[
"dtaidistance.dtw_ndim.distance_fast",
"numpy.zeros"
] |
[((771, 830), 'numpy.zeros', 'np.zeros', (['(n_observations, n_observations)'], {'dtype': 'np.double'}), '((n_observations, n_observations), dtype=np.double)\n', (779, 830), True, 'import numpy as np\n'), ((1357, 1382), 'dtaidistance.dtw_ndim.distance_fast', 'distance_fast', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (1370, 1382), False, 'from dtaidistance.dtw_ndim import distance_fast\n'), ((954, 979), 'dtaidistance.dtw_ndim.distance_fast', 'distance_fast', (['X[i]', 'X[j]'], {}), '(X[i], X[j])\n', (967, 979), False, 'from dtaidistance.dtw_ndim import distance_fast\n')]
|
# -*- coding:utf-8 -*-
import numpy as np
import GVal
def initializationProcess():
localprefix = '/home/'
serverprefix = '/home/labcompute/'
GVal.setPARA('prefix_PARA', serverprefix)
# GVal.setPARA('prefix_PARA', localprefix)
return GVal.getPARA('prefix_PARA')
def processCodeEncoderConvertor(inp):
output = [0] * inp.shape[0]
decimal_bit = 0
output_bit = 0
for i in inp:
if np.logical_or(type(i) == float, type(i) == np.float64):
decimal_bit = max((decimal_bit, (len(str(i)) - len(str(int(i)))) - 1))
for i in range(len(inp)):
if np.logical_or(type(inp[i]) == float, type(inp[i]) == np.float64):
output[i] = int(str(inp[i] * (10**decimal_bit))[:-2])
else:
output[i] = inp[i]
for i in output:
output_bit = max((output_bit, len(str(i))))
return output, output_bit, decimal_bit
def controlPanel():
########################################
############## [ Control panel ] #############
########################################
#### [ Loop Parameters ] ####################
GVal.setPARA(
'recording_index_list_PARA',
[
# np.array([21])
# np.arange(1, 137, 1),
np.arange(138, 234, 1)
])
########################################
# select_index = np.arange(10, 20)
# Label Reading
# select_index
# 0 - 99 Lab Environment (High SNR)
# 100 - 137 Other Environment (High SNR)
# 138 - 234 Real Industrial Environment (Low SNR)
#### [ Static Parameters ] ####################
GVal.setPARA(
'classifier_list_PARA',
[
21,
24,
25,
30,
23,
32
])
########################################
# 2,3 - SciKitLearn, 1- Keras (Nerual Network), 4- Other
# classifier_num
# Sub Classifier cateloge.
# KT Frame
# 11- Sequenial NN
# SKL Frame
# 21 - SVM Linear(Default for SciKitLearn)
# 22 - SVM Kernal
# 221 - SVM RBF Kernal(Default if only 22 is input)
# 222 - SVM Poly Kernal
# 223 - SVM sigmoid Kernal
# 23 - LDA (Linear Discriminant Analysis)
# 24 - QDA (Quadratic Discriminant Analysis)
# 25 - Naive Bayes (Default Gaussian)
# 251 - Naive Bayes Gaussian
# 252 - Naive Bayes Multinominal
# 253 - Naive Bayes Bernoulli
# 26 - Neural Network ( by sklearn frame)
# 27 - Adaboost(Default for Decision Tree base)
# 271 - Adaboost with Decision Tree Weak Classifier
# 28 - Linear Regression Weighted [N/A]
# 29 - SGD Classifier
# 30 - Logistic Regression
# 31 - Decision Tree
# 32 - Random Forest
########################################
### [ Custom Loop Parameter] ###############
GVal.setPARA('loopPARA_name', 'random_seed_PARA')
# loopPARA_valuelist_raw = np.array([0.2])
loopPARA_valuelist_raw = np.arange(16, 36, 1)
loopPARA_valuelist, loopPARA_bit, loopPARA_decibit = processCodeEncoderConvertor(loopPARA_valuelist_raw)
GVal.setPARA('loopPARA_valuelist_PARA', loopPARA_valuelist)
GVal.setPARA('loopPARA_bit_PARA', loopPARA_bit)
GVal.setPARA('loopPARA_decibit_PARA', loopPARA_decibit)
########################################
# Flags
FLAG = {}
FLAG['data_prepare_flag'] = 0
FLAG['plotting_flag'] = 0
FLAG['save_flag'] = 0
# online_fea_name = [['眨眼时间'], ['眨眼速度'], ['perclos'], ['sem'], ['sem重心频率'], ['标准差'], ['眨眼数'], ['sem合成特征'], ['眨眼合成特征'], ['综合特征']]
GVal.setPARA('weights_on_PARA', 0)
GVal.setPARA('beta_PARA', 1.5)
#########################################
GVal.setPARA('kick_off_no1_PARA', 0)
# 0 - Do nothing
# 1 - Clean label 1(CSS7) both training and validation set
# 2 - Clean label 1 on training, set label 1 as label 0(Negative Sleep) in validation
# 3 - Clean label 1 on training only. label 1 still be label 1 in validation
# 4 - Set label 1 as label 0 on both training and validation set.
#########################################
FLAG['label_process_flag'] = 1
GVal.setPARA('noconfident_frame_process_PARA', 5)
# 0 - Remain the current label (Do nothing)
# 1 - Set all not confident frame as label 7 and remain all
# 2 - Set all not confident frame as label 0 and remain all
# 3 - Delete not confident frame with label 7
# 4 - Delete not confident frame with label 0
# 5 - Delete all the not confident frame
GVal.setPARA('noise_frame_process_PARA', 1)
# 0 - Remain all the noise frame (Do nothing)
# 1 - Simply Delete the noise frame and remain the noblink frame
# 2 - Delete the noise frame and the noblink frame
#########################################
# Weight_list, the key is referring the label num (0 -nofatigue, 1 -7gradefatigue ... )
# And the corresponding value for each key is the weight for this certain class
GVal.setPARA(
'weight_list_PARA', {
0: 0.8,
1: 1,
2: 2,
3: 3
})
#########################################
# 1: caseLOO, nonparameters
# 2: caseLPO, parameter P (positive integer)
# 3: caseKFold, parameter K fold (positive integer)
# 4: caseStratifiedKFold, parameter K fold (positive integer)
# 5: caseRawsplit, parameter test_proportion (range [ 0 : 1])
GVal.setPARA('split_type_num', 5)
GVal.setPARA('split_type_para', 0.3)
GVal.setPARA('random_seed_PARA', 2)
#########################################
FLAG['fea_selection_flag'] = 0
# 1 - tSNE(Default with PCA)
# 2 - normalPCA
GVal.setPARA('feaSelectionStrategy', 2)
GVal.setPARA('nComponent_PARA', 10)
#########################################
FLAG['data_balance_flag'] = 1
# 1 - Down sampling the negative class (para is the [0,1] float, indicating the percentage of retain)
GVal.setPARA('data_balance_process_PARA', 1)
GVal.setPARA('data_balance_para_PARA', 0.25)
#########################################
# Select several features from the online feature file
# OLD feature Info
# GVal.setPARA('online_fea_selectindex_PARA', np.array([0, 1, 2, 3, 4, 14, 15, 11, 12, 13]))
# GVal.setPARA('online_fea_name_PARA', [['眨眼时间'], ['眨眼速度'], ['perclos'], ['sem'], ['sem重心频率'], ['标准差'], ['眨眼数'], ['sem合成特征'], ['眨眼合成特征'], ['综合特征']])
# NEW feature Info
GVal.setPARA('online_fea_selectindex_PARA', np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 22, 23, 24, 25, 26]))
# GVal.setPARA('online_fea_selectindex_PARA', np.array([22, 23, 24]))
GVal.setPARA('feature_index_PARA', np.arange(0, len(GVal.getPARA('online_fea_selectindex_PARA'))))
# 0 -- 12 维 对应的分别是:眨眼时间、眨眼速度、perclos_first、闭眼时间、完全闭合时间、
# 睁眼时间、闭眼平均速度、睁眼平均速度、闭眼最大速度、睁眼最大速度、
# perclos_second、perclos_third、perclos_fourth
# perclos_first=完全闭合时间/闭眼时间
# perclos_second=完全闭合时间/眨眼时间
# perclos_third=(open_st-close_st)/(open_en-close_st)
# perclos_fourth=(close_en-close_st)/(open_en-close_st)
# 13 -- 15 维 对应的分别是: sem、sem重心频率、快速眼电相对功率(1.2Hz-3Hz/0.3Hz-4.8Hz)
# 16 维 对应的分别是:状态指数
# 18 维 对应的分别是:眨眼极值差的均值
# 19 维 噪声类型的标记,0--无噪声,
# 1--25Hz噪声,
# 2--低频晃动噪声,
# 3--空载(无明显中频脑电),
# 4--咀嚼噪声,
# 5--信号长度不足8秒,
# 6--空载(标准差过小4) ,
# 7--严重漂移
# 22 -- 24 维 对应的分别是:sem合成特征、眨眼合成特征、综合特征
# 25 -- 26 维 对应的分别是:标准差,眨眼数
### [ Essential Initialization Parameters ] #################################
GVal.setPARA('noise_label_index_PARA', np.array([19, 26]))
GVal.setPARA('firstTimeConstruction', FLAG['data_prepare_flag'])
return FLAG, processCodeEncoder()
def processCodeEncoder():
recording_index_list = GVal.getPARA('recording_index_list_PARA')
classifier_list = GVal.getPARA('classifier_list_PARA')
loopPARA_valuelist = GVal.getPARA('loopPARA_valuelist_PARA')
loopPARA_bit = GVal.getPARA('loopPARA_bit_PARA')
process_code_pack = []
for recording_index_list_lplb in np.arange(len(recording_index_list)):
for classifier_list_lplb in np.arange(len(classifier_list)):
for loopPARA_valuelist_lplb in np.arange(len(loopPARA_valuelist)):
code_temp = int(1e0 * classifier_list[classifier_list_lplb] + 1e3 * (recording_index_list_lplb + 1) + 1e5 * loopPARA_valuelist[loopPARA_valuelist_lplb])
process_code_pack.append(code_temp)
return process_code_pack
def processCodeDecoder(process_code, decoder_update_flag):
process_code_str = str(process_code)
classifier_num_temp = int(process_code_str[-3:])
recording_index_list_selectserial = int(process_code_str[-5: -3]) - 1
recording_index_list = GVal.getPARA('recording_index_list_PARA')
loopPARA_bit = GVal.getPARA('loopPARA_bit_PARA')
loopPARA_decibit = GVal.getPARA('loopPARA_decibit_PARA')
if not process_code_str[-5 - loopPARA_bit:-5]:
loopPARA_value_index = 0
else:
loopPARA_value_index = float(process_code_str[-5 - loopPARA_bit:-5])
GVal.setPARA(GVal.getPARA('loopPARA_name'), loopPARA_value_index / (10**loopPARA_decibit))
select_index_temp = recording_index_list[recording_index_list_selectserial]
if decoder_update_flag:
GVal.setPARA('classifier_num_PARA', classifier_num_temp)
GVal.setPARA('select_index_PARA', select_index_temp)
print('### Decoder Success! New parameters: [select_index] and [classifier_num] are updated into the workspace! ')
else:
print('### Decoder Success! Output parameters: [select_index] and [classifier_num] ')
print('### Caution! The parameters are not updated into the workspace! ')
return select_index_temp, classifier_num_temp
|
[
"GVal.getPARA",
"GVal.setPARA",
"numpy.array",
"numpy.arange"
] |
[((165, 206), 'GVal.setPARA', 'GVal.setPARA', (['"""prefix_PARA"""', 'serverprefix'], {}), "('prefix_PARA', serverprefix)\n", (177, 206), False, 'import GVal\n'), ((269, 296), 'GVal.getPARA', 'GVal.getPARA', (['"""prefix_PARA"""'], {}), "('prefix_PARA')\n", (281, 296), False, 'import GVal\n'), ((1675, 1737), 'GVal.setPARA', 'GVal.setPARA', (['"""classifier_list_PARA"""', '[21, 24, 25, 30, 23, 32]'], {}), "('classifier_list_PARA', [21, 24, 25, 30, 23, 32])\n", (1687, 1737), False, 'import GVal\n'), ((2928, 2977), 'GVal.setPARA', 'GVal.setPARA', (['"""loopPARA_name"""', '"""random_seed_PARA"""'], {}), "('loopPARA_name', 'random_seed_PARA')\n", (2940, 2977), False, 'import GVal\n'), ((3056, 3076), 'numpy.arange', 'np.arange', (['(16)', '(36)', '(1)'], {}), '(16, 36, 1)\n', (3065, 3076), True, 'import numpy as np\n'), ((3192, 3251), 'GVal.setPARA', 'GVal.setPARA', (['"""loopPARA_valuelist_PARA"""', 'loopPARA_valuelist'], {}), "('loopPARA_valuelist_PARA', loopPARA_valuelist)\n", (3204, 3251), False, 'import GVal\n'), ((3257, 3304), 'GVal.setPARA', 'GVal.setPARA', (['"""loopPARA_bit_PARA"""', 'loopPARA_bit'], {}), "('loopPARA_bit_PARA', loopPARA_bit)\n", (3269, 3304), False, 'import GVal\n'), ((3310, 3365), 'GVal.setPARA', 'GVal.setPARA', (['"""loopPARA_decibit_PARA"""', 'loopPARA_decibit'], {}), "('loopPARA_decibit_PARA', loopPARA_decibit)\n", (3322, 3365), False, 'import GVal\n'), ((3678, 3712), 'GVal.setPARA', 'GVal.setPARA', (['"""weights_on_PARA"""', '(0)'], {}), "('weights_on_PARA', 0)\n", (3690, 3712), False, 'import GVal\n'), ((3718, 3748), 'GVal.setPARA', 'GVal.setPARA', (['"""beta_PARA"""', '(1.5)'], {}), "('beta_PARA', 1.5)\n", (3730, 3748), False, 'import GVal\n'), ((3801, 3837), 'GVal.setPARA', 'GVal.setPARA', (['"""kick_off_no1_PARA"""', '(0)'], {}), "('kick_off_no1_PARA', 0)\n", (3813, 3837), False, 'import GVal\n'), ((4256, 4305), 'GVal.setPARA', 'GVal.setPARA', (['"""noconfident_frame_process_PARA"""', '(5)'], {}), "('noconfident_frame_process_PARA', 5)\n", (4268, 4305), False, 'import GVal\n'), ((4638, 4681), 'GVal.setPARA', 'GVal.setPARA', (['"""noise_frame_process_PARA"""', '(1)'], {}), "('noise_frame_process_PARA', 1)\n", (4650, 4681), False, 'import GVal\n'), ((5091, 5159), 'GVal.setPARA', 'GVal.setPARA', (['"""weight_list_PARA"""', '{(0): 0.8, (1): 1, (2): 2, (3): 3}'], {}), "('weight_list_PARA', {(0): 0.8, (1): 1, (2): 2, (3): 3})\n", (5103, 5159), False, 'import GVal\n'), ((5562, 5595), 'GVal.setPARA', 'GVal.setPARA', (['"""split_type_num"""', '(5)'], {}), "('split_type_num', 5)\n", (5574, 5595), False, 'import GVal\n'), ((5601, 5637), 'GVal.setPARA', 'GVal.setPARA', (['"""split_type_para"""', '(0.3)'], {}), "('split_type_para', 0.3)\n", (5613, 5637), False, 'import GVal\n'), ((5643, 5678), 'GVal.setPARA', 'GVal.setPARA', (['"""random_seed_PARA"""', '(2)'], {}), "('random_seed_PARA', 2)\n", (5655, 5678), False, 'import GVal\n'), ((5824, 5863), 'GVal.setPARA', 'GVal.setPARA', (['"""feaSelectionStrategy"""', '(2)'], {}), "('feaSelectionStrategy', 2)\n", (5836, 5863), False, 'import GVal\n'), ((5869, 5904), 'GVal.setPARA', 'GVal.setPARA', (['"""nComponent_PARA"""', '(10)'], {}), "('nComponent_PARA', 10)\n", (5881, 5904), False, 'import GVal\n'), ((6101, 6145), 'GVal.setPARA', 'GVal.setPARA', (['"""data_balance_process_PARA"""', '(1)'], {}), "('data_balance_process_PARA', 1)\n", (6113, 6145), False, 'import GVal\n'), ((6151, 6195), 'GVal.setPARA', 'GVal.setPARA', (['"""data_balance_para_PARA"""', '(0.25)'], {}), "('data_balance_para_PARA', 0.25)\n", (6163, 6195), False, 'import GVal\n'), ((8313, 8377), 'GVal.setPARA', 'GVal.setPARA', (['"""firstTimeConstruction"""', "FLAG['data_prepare_flag']"], {}), "('firstTimeConstruction', FLAG['data_prepare_flag'])\n", (8325, 8377), False, 'import GVal\n'), ((8478, 8519), 'GVal.getPARA', 'GVal.getPARA', (['"""recording_index_list_PARA"""'], {}), "('recording_index_list_PARA')\n", (8490, 8519), False, 'import GVal\n'), ((8543, 8579), 'GVal.getPARA', 'GVal.getPARA', (['"""classifier_list_PARA"""'], {}), "('classifier_list_PARA')\n", (8555, 8579), False, 'import GVal\n'), ((8606, 8645), 'GVal.getPARA', 'GVal.getPARA', (['"""loopPARA_valuelist_PARA"""'], {}), "('loopPARA_valuelist_PARA')\n", (8618, 8645), False, 'import GVal\n'), ((8666, 8699), 'GVal.getPARA', 'GVal.getPARA', (['"""loopPARA_bit_PARA"""'], {}), "('loopPARA_bit_PARA')\n", (8678, 8699), False, 'import GVal\n'), ((9472, 9513), 'GVal.getPARA', 'GVal.getPARA', (['"""recording_index_list_PARA"""'], {}), "('recording_index_list_PARA')\n", (9484, 9513), False, 'import GVal\n'), ((9536, 9569), 'GVal.getPARA', 'GVal.getPARA', (['"""loopPARA_bit_PARA"""'], {}), "('loopPARA_bit_PARA')\n", (9548, 9569), False, 'import GVal\n'), ((9594, 9631), 'GVal.getPARA', 'GVal.getPARA', (['"""loopPARA_decibit_PARA"""'], {}), "('loopPARA_decibit_PARA')\n", (9606, 9631), False, 'import GVal\n'), ((6656, 6748), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 22, 23, 24, 25, 26]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 22, 23,\n 24, 25, 26])\n', (6664, 6748), True, 'import numpy as np\n'), ((8288, 8306), 'numpy.array', 'np.array', (['[19, 26]'], {}), '([19, 26])\n', (8296, 8306), True, 'import numpy as np\n'), ((9827, 9856), 'GVal.getPARA', 'GVal.getPARA', (['"""loopPARA_name"""'], {}), "('loopPARA_name')\n", (9839, 9856), False, 'import GVal\n'), ((10026, 10082), 'GVal.setPARA', 'GVal.setPARA', (['"""classifier_num_PARA"""', 'classifier_num_temp'], {}), "('classifier_num_PARA', classifier_num_temp)\n", (10038, 10082), False, 'import GVal\n'), ((10092, 10144), 'GVal.setPARA', 'GVal.setPARA', (['"""select_index_PARA"""', 'select_index_temp'], {}), "('select_index_PARA', select_index_temp)\n", (10104, 10144), False, 'import GVal\n'), ((1305, 1327), 'numpy.arange', 'np.arange', (['(138)', '(234)', '(1)'], {}), '(138, 234, 1)\n', (1314, 1327), True, 'import numpy as np\n'), ((6878, 6921), 'GVal.getPARA', 'GVal.getPARA', (['"""online_fea_selectindex_PARA"""'], {}), "('online_fea_selectindex_PARA')\n", (6890, 6921), False, 'import GVal\n')]
|
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
import numpy as np
from collections.abc import Sequence, Mapping
from PIL import Image
import time
def get_random_list(n):
return list(np.random.permutation(range(n)))
def get_order_list(n):
return [i for i in range(n)]
def collate_batch(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
real_size = len(batch)
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, jt.Var):
temp_data = jt.stack([data for data in batch], 0)
return temp_data
if elem_type is np.ndarray:
temp_data = np.stack([data for data in batch], 0)
return temp_data
elif np.issubdtype(elem_type, np.integer):
return np.int32(batch)
elif isinstance(elem, int):
return np.int32(batch)
elif isinstance(elem, float):
return np.float32(batch)
elif isinstance(elem, str):
return batch
elif isinstance(elem, Mapping):
return {key: collate_batch([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple):
transposed = zip(*batch)
return tuple(collate_batch(samples) for samples in transposed)
elif isinstance(elem, Sequence):
transposed = zip(*batch)
return [collate_batch(samples) for samples in transposed]
elif isinstance(elem, Image.Image):
temp_data = np.stack([np.array(data) for data in batch], 0)
return temp_data
else:
raise TypeError(f"Not support type <{elem_type.__name__}>")
class HookTimer:
def __init__(self, obj, attr):
self.origin = getattr(obj, attr)
self.duration = 0.0
setattr(obj, attr, self)
def __call__(self, *args, **kw):
start = time.time()
rt = self.origin(*args, **kw)
self.duration += time.time() - start
return rt
|
[
"numpy.int32",
"numpy.stack",
"numpy.issubdtype",
"numpy.array",
"jittor.stack",
"time.time",
"numpy.float32"
] |
[((859, 896), 'jittor.stack', 'jt.stack', (['[data for data in batch]', '(0)'], {}), '([data for data in batch], 0)\n', (867, 896), True, 'import jittor as jt\n'), ((974, 1011), 'numpy.stack', 'np.stack', (['[data for data in batch]', '(0)'], {}), '([data for data in batch], 0)\n', (982, 1011), True, 'import numpy as np\n'), ((1046, 1082), 'numpy.issubdtype', 'np.issubdtype', (['elem_type', 'np.integer'], {}), '(elem_type, np.integer)\n', (1059, 1082), True, 'import numpy as np\n'), ((2105, 2116), 'time.time', 'time.time', ([], {}), '()\n', (2114, 2116), False, 'import time\n'), ((1099, 1114), 'numpy.int32', 'np.int32', (['batch'], {}), '(batch)\n', (1107, 1114), True, 'import numpy as np\n'), ((2180, 2191), 'time.time', 'time.time', ([], {}), '()\n', (2189, 2191), False, 'import time\n'), ((1162, 1177), 'numpy.int32', 'np.int32', (['batch'], {}), '(batch)\n', (1170, 1177), True, 'import numpy as np\n'), ((1227, 1244), 'numpy.float32', 'np.float32', (['batch'], {}), '(batch)\n', (1237, 1244), True, 'import numpy as np\n'), ((1755, 1769), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1763, 1769), True, 'import numpy as np\n')]
|
import os
import json
import numpy
import re
import torch
import torch_rl
import utils
class Vocabulary:
"""A mapping from tokens to ids with a capacity of `max_size` words.
It can be saved in a `vocab.json` file."""
def __init__(self, model_dir):
self.path = utils.get_vocab_path(model_dir)
self.max_size = 100
self.vocab = {}
if os.path.exists(self.path):
self.vocab = json.load(open(self.path))
def __getitem__(self, token):
if not(token in self.vocab.keys()):
if len(self.vocab) >= self.max_size:
raise ValueError("Maximum vocabulary capacity reached")
self.vocab[token] = len(self.vocab) + 1
return self.vocab[token]
def save(self):
utils.create_folders_if_necessary(self.path)
json.dump(self.vocab, open(self.path, "w"))
class ObssPreprocessor:
"""A preprocessor of observations returned by the environment.
It converts MiniGrid observation space and MiniGrid observations
into the format that the model can handle."""
def __init__(self, model_dir, obs_space):
self.vocab = Vocabulary(model_dir)
self.obs_space = {
"image": obs_space.spaces['image'].shape,
# "instr": self.vocab.max_size
}
def __call__(self, obss, device=None):
"""Converts a list of MiniGrid observations, i.e. a list of
(image, instruction) tuples into two PyTorch tensors.
The images are concatenated. The instructions are tokenified, then
tokens are converted into lists of ids using a Vocabulary object, and
finally, the lists of ids are concatenated.
Returns
-------
preprocessed_obss : DictList
Contains preprocessed images and preprocessed instructions.
"""
preprocessed_obss = torch_rl.DictList()
if "image" in self.obs_space.keys():
images = numpy.array([obs["image"] for obs in obss])
images = torch.tensor(images, device=device, dtype=torch.float)
preprocessed_obss.image = images
if "instr" in self.obs_space.keys():
raw_instrs = []
max_instr_len = 0
for obs in obss:
tokens = re.findall("([a-z]+)", obs["mission"].lower())
instr = numpy.array([self.vocab[token] for token in tokens])
raw_instrs.append(instr)
max_instr_len = max(len(instr), max_instr_len)
instrs = numpy.zeros((len(obss), max_instr_len))
for i, instr in enumerate(raw_instrs):
instrs[i, :len(instr)] = instr
instrs = torch.tensor(instrs, device=device, dtype=torch.long)
preprocessed_obss.instr = instrs
return preprocessed_obss
|
[
"os.path.exists",
"utils.get_vocab_path",
"utils.create_folders_if_necessary",
"numpy.array",
"torch.tensor",
"torch_rl.DictList"
] |
[((283, 314), 'utils.get_vocab_path', 'utils.get_vocab_path', (['model_dir'], {}), '(model_dir)\n', (303, 314), False, 'import utils\n'), ((378, 403), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (392, 403), False, 'import os\n'), ((771, 815), 'utils.create_folders_if_necessary', 'utils.create_folders_if_necessary', (['self.path'], {}), '(self.path)\n', (804, 815), False, 'import utils\n'), ((1866, 1885), 'torch_rl.DictList', 'torch_rl.DictList', ([], {}), '()\n', (1883, 1885), False, 'import torch_rl\n'), ((1953, 1996), 'numpy.array', 'numpy.array', (["[obs['image'] for obs in obss]"], {}), "([obs['image'] for obs in obss])\n", (1964, 1996), False, 'import numpy\n'), ((2018, 2072), 'torch.tensor', 'torch.tensor', (['images'], {'device': 'device', 'dtype': 'torch.float'}), '(images, device=device, dtype=torch.float)\n', (2030, 2072), False, 'import torch\n'), ((2689, 2742), 'torch.tensor', 'torch.tensor', (['instrs'], {'device': 'device', 'dtype': 'torch.long'}), '(instrs, device=device, dtype=torch.long)\n', (2701, 2742), False, 'import torch\n'), ((2349, 2401), 'numpy.array', 'numpy.array', (['[self.vocab[token] for token in tokens]'], {}), '([self.vocab[token] for token in tokens])\n', (2360, 2401), False, 'import numpy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Storage plugin for RDFlib
see license https://github.com/DerwenAI/kglab#license-and-copyright
"""
from dataclasses import dataclass
import inspect
import typing
from cryptography.hazmat.primitives import hashes # type: ignore # pylint: disable=E0401
from icecream import ic # type: ignore # pylint: disable=E0401,W0611
import chocolate # type: ignore # pylint: disable=E0401
import numpy as np # type: ignore # pylint: disable=E0401
from rdflib.store import Store # type: ignore # pylint: disable=E0401
import rdflib # type: ignore # pylint: disable=E0401
@dataclass(frozen=True)
class NodeRef:
"""
Represent a reference to a Node within this store.
"""
id: int
node_name: str
node_id: int
class PropertyStore (Store):
"""
A subclass of `rdflib.Store` to use as a plugin, integrating the W3C stack.
"""
def __init__ (
self,
configuration: typing.Optional[str] = None,
identifier: typing.Optional[str] = None,
) -> None:
"""
Instance constructor.
"""
if configuration is not None:
ic(configuration)
super().__init__(configuration)
self.identifier = identifier
self.digest: typing.Optional[ hashes.Hash ] = None
self.__namespace: dict = {}
self.__prefix: dict = {}
self._tuples: list = []
self._node_names: np.array = np.empty(shape=[0, 1], dtype=object) # type: ignore
self._rel_names: np.array = np.empty(shape=[0, 1], dtype=object) # type: ignore
######################################################################
## rdflib.Store implementation
@classmethod
def get_lpg (
cls,
graph: rdflib.Graph,
) -> "PropertyStore":
"""
An accessor method to extract the PropertyGraph from an RDF graph,
which is a private member of rdflib.Graph.
"""
return graph._Graph__store # type: ignore # pylint: disable=W0212
def get_node_id (
self,
node_name: str,
) -> int:
"""
An accessor method to map from the unique name of a node to its
`node_id` index.
"""
rows = np.where(self._node_names == node_name)
if len(rows[0]) < 1:
self._node_names = np.append(self._node_names, node_name)
node_id = len(self._node_names) - 1
else:
node_id = rows[0][0]
return node_id
def get_node_name (
self,
node_id: int,
) -> str:
"""
An accessor method to map from the `node_id` index of a node to its
unique name.
"""
return self._node_names[node_id] # type: ignore
def get_rel_id (
self,
rel_name: str,
) -> int:
"""
An accessor method to map from a unique name of a relation to its
`rel_id` index.
"""
rows = np.where(self._rel_names == rel_name)
if len(rows[0]) < 1:
self._rel_names = np.append(self._rel_names, rel_name)
rel_id = len(self._rel_names) - 1
else:
rel_id = rows[0][0]
return rel_id
def get_rel_name (
self,
rel_id: int,
) -> str:
"""
An accessor method to map from the `rel_id` index of a relation to its
unique name.
"""
return self._rel_names[rel_id] # type: ignore
def build_tuple (
self,
s,
p,
o,
context,
) -> typing.Tuple:
"""
Compose a tuple from the inputs supplied by `RDFlib`.
"""
if context is None:
c = None
else:
c = str(context.identifier) # type: ignore
src_id = self.get_node_id(str(s))
rel_id = self.get_rel_id(str(p))
if isinstance(o, rdflib.term.Literal):
_tuple = ( src_id, rel_id, str(o), True, c)
else:
dst_id = self.get_node_id(str(o))
_tuple = ( src_id, rel_id, dst_id, False, c) # type: ignore
return _tuple
def _find (
self,
_tuple: typing.Tuple,
) -> int:
"""
Locate the given tuple in the data, returning `-1` if not found.
"""
try:
idx = self._tuples.index(_tuple)
except ValueError as ex: # pylint: disable=W0612
# triple does not exist
idx = -1
return idx
def add ( # type: ignore # pylint: disable=R0201,W0221
self,
triple: typing.Tuple,
context: typing.Optional[ rdflib.term.URIRef ] = None, # pylint: disable=W0613
*,
quoted: bool = False, # pylint: disable=W0613
) -> None:
"""
Adds the given statement to a specific context or to the model.
The quoted argument is interpreted by formula-aware stores to indicate
this statement is quoted/hypothetical.
It should be an error to not specify a context and have the quoted
argument be `True`.
It should also be an error for the quoted argument to be `True` when
the store is not formula-aware.
"""
s, p, o = triple # pylint: disable=W0612
_tuple = self.build_tuple(str(s), str(p), o, context)
idx = self._find(_tuple)
if idx < 0:
self._tuples.append(_tuple)
# update digest
if self.digest is not None:
self.digest.update(inspect.currentframe().f_code.co_name.encode("utf-8")) # type: ignore
self.digest.update(str(s).encode("utf-8"))
self.digest.update(str(p).encode("utf-8"))
self.digest.update(str(o).encode("utf-8"))
c = _tuple[4]
if c is not None:
self.digest.update(c.encode("utf-8"))
def remove ( # type: ignore # pylint: disable=R0201,W0221
self,
triple_pattern: typing.Tuple,
*,
context: typing.Optional[ rdflib.term.URIRef ] = None, # pylint: disable=W0613
) -> None:
"""
Remove the set of triples matching the pattern from the store.
"""
s, p, o = triple_pattern # pylint: disable=W0612
_tuple = self.build_tuple(str(s), str(p), o, context)
idx = self._find(_tuple)
if idx >= 0:
del self._tuples[idx]
# update digest
if self.digest is not None:
self.digest.update(inspect.currentframe().f_code.co_name.encode("utf-8")) # type: ignore
self.digest.update(str(s).encode("utf-8"))
self.digest.update(str(p).encode("utf-8"))
self.digest.update(str(o).encode("utf-8"))
c = _tuple[4]
if c is not None:
self.digest.update(c.encode("utf-8"))
def triples ( # type: ignore # pylint: disable=R0201,W0221
self,
triple_pattern: typing.Tuple,
*,
context: typing.Optional[ rdflib.term.URIRef ] = None, # pylint: disable=W0613
) -> typing.Generator:
"""
A generator over all the triples matching the pattern.
triple_pattern:
Can include any objects for used for comparing against nodes in the store, for example, REGEXTerm, URIRef, Literal, BNode, Variable, Graph, QuotedGraph, Date? DateRange?
context:
A conjunctive query can be indicated by either providing a value of None, or a specific context can be queries by passing a Graph instance (if store is context aware).
"""
s, p, o = triple_pattern # pylint: disable=W0612
if s is not None:
s = str(s)
if p is not None:
p = str(p)
if o is not None:
o = str(o)
if context is None:
c = None
else:
c = str(context.identifier) # type: ignore
#_tuple = ( s, p, o, o_lit, c, )
for src, rel, dst, o_lit, ctx in self._tuples: # pylint: disable=R1702
if (s is None) or (s == src):
if (p is None) or (p == rel):
if (o is None) or (o == dst):
if (c is None) or (c == ctx):
if o_lit:
dst_ref: typing.Any = rdflib.term.Literal(dst)
else:
dst_ref = self.get_node_name(dst)
triple_result = (
rdflib.term.URIRef(self.get_node_name(src)),
rdflib.term.URIRef(self.get_rel_name(rel)),
dst_ref,
)
yield triple_result, self.__contexts()
def __len__ ( # type: ignore # pylint: disable=W0221,W0222
self,
*,
context: typing.Optional[ rdflib.term.URIRef ] = None, # pylint: disable=W0613
) -> int:
"""
Number of statements in the store. This should only account for
non-quoted (asserted) statements if the context is not specified,
otherwise it should return the number of statements in the formula or
context given.
context:
a graph instance to query or None
"""
if context is None:
return len(self._tuples)
c = str(context.identifier) # type: ignore
count = 0
for _, _, _, _, ctx in self._tuples:
if c == ctx:
count += 1
return count
def __contexts ( # pylint: disable=R0201
self
) -> typing.Iterable:
"""
Returns the set of contexts
"""
return { ctx for _, _, _, _, ctx in self._tuples }
def bind (
self,
prefix: str,
namespace: str,
) -> None:
"""
Bar.
"""
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace (
self,
prefix: str,
) -> str:
"""
Bar.
"""
return self.__namespace.get(prefix, None)
def prefix (
self,
namespace: str,
) -> str:
"""
Bar.
"""
return self.__prefix.get(namespace, None)
def namespaces (
self
) -> typing.Iterable:
"""
Bar.
"""
for prefix, namespace in self.__namespace.items():
yield prefix, namespace
def query ( # pylint: disable=W0235
self,
query: str,
initNs: dict,
initBindings: dict,
queryGraph: typing.Any,
**kwargs: typing.Any,
) -> None:
"""
queryGraph is None, a URIRef or '__UNION__'
If None the graph is specified in the query-string/object
If URIRef it specifies the graph to query,
If '__UNION__' the union of all named graphs should be queried
This is used by ConjunctiveGraphs
Values other than None obviously only makes sense for context-aware stores.)
"""
super().query(
query,
initNs,
initBindings,
queryGraph,
**chocolate.filter_args(kwargs, super().query),
)
def update ( # pylint: disable=W0235
self,
update: str,
initNs: dict,
initBindings: dict,
queryGraph: typing.Any,
**kwargs: typing.Any,
) -> None:
"""
queryGraph is None, a URIRef or '__UNION__'
If None the graph is specified in the query-string/object
If URIRef it specifies the graph to query,
If '__UNION__' the union of all named graphs should be queried
This is used by ConjunctiveGraphs
Values other than None obviously only makes sense for context-aware stores.)
"""
super().update(
update,
initNs,
initBindings,
queryGraph,
**chocolate.filter_args(kwargs, super().update),
)
|
[
"icecream.ic",
"rdflib.term.Literal",
"numpy.where",
"inspect.currentframe",
"dataclasses.dataclass",
"numpy.append",
"numpy.empty"
] |
[((625, 647), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (634, 647), False, 'from dataclasses import dataclass\n'), ((1447, 1483), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]', 'dtype': 'object'}), '(shape=[0, 1], dtype=object)\n', (1455, 1483), True, 'import numpy as np\n'), ((1536, 1572), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 1]', 'dtype': 'object'}), '(shape=[0, 1], dtype=object)\n', (1544, 1572), True, 'import numpy as np\n'), ((2210, 2249), 'numpy.where', 'np.where', (['(self._node_names == node_name)'], {}), '(self._node_names == node_name)\n', (2218, 2249), True, 'import numpy as np\n'), ((2910, 2947), 'numpy.where', 'np.where', (['(self._rel_names == rel_name)'], {}), '(self._rel_names == rel_name)\n', (2918, 2947), True, 'import numpy as np\n'), ((1151, 1168), 'icecream.ic', 'ic', (['configuration'], {}), '(configuration)\n', (1153, 1168), False, 'from icecream import ic\n'), ((2311, 2349), 'numpy.append', 'np.append', (['self._node_names', 'node_name'], {}), '(self._node_names, node_name)\n', (2320, 2349), True, 'import numpy as np\n'), ((3008, 3044), 'numpy.append', 'np.append', (['self._rel_names', 'rel_name'], {}), '(self._rel_names, rel_name)\n', (3017, 3044), True, 'import numpy as np\n'), ((8211, 8235), 'rdflib.term.Literal', 'rdflib.term.Literal', (['dst'], {}), '(dst)\n', (8230, 8235), False, 'import rdflib\n'), ((5397, 5419), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (5417, 5419), False, 'import inspect\n'), ((6404, 6426), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (6424, 6426), False, 'import inspect\n')]
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
import numpy as np
from scipy import stats
i = 0
arr = []
n = int(input(''))
arr = list(map(int, input().split()))
arr.sort()
x = np.mean(arr)
y = np.median(arr)
z = stats.mode(arr)
print(round(x,1))
print(round(y,1))
print('%d' %(z[0]))
|
[
"scipy.stats.mode",
"numpy.mean",
"numpy.median"
] |
[((200, 212), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (207, 212), True, 'import numpy as np\n'), ((217, 231), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (226, 231), True, 'import numpy as np\n'), ((236, 251), 'scipy.stats.mode', 'stats.mode', (['arr'], {}), '(arr)\n', (246, 251), False, 'from scipy import stats\n')]
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from itertools import product
import numpy as np
from scipy.linalg import eigh, fractional_matrix_power
from .exceptions import SolutionMatrixIsZeroCanNotComputePODError
# le2d is a LinearElasticity2dProblem, not imported due to circular import
# rb_data is ReducedOrderData, not imported due to circular import
# from ._linear_elasticity_2d_problem_class import LinearElasticity2DProblem
# from ._rb_data_class import ReducedOrderData
def get_vec_from_range(range_, m, mode):
"""
Get vector of m uniform or Gauss-Lobatto points from range_
Parameters
----------
range_ : tuple
the range of numbers to consider.
m : int
number of points in vector.
mode : str
sampling mode, uniform or Gauss-Lobatto.
Raises
------
NotImplementedError
if mode is not uniform or Gauss-Lobatto.
Returns
-------
np.array
array of sampling points.
"""
if mode.lower() == "uniform":
return np.linspace(range_[0], range_[1], m)
elif mode.lower() == "gauss-lobatto":
from quadpy.c1 import gauss_lobatto
return 0.5 * ((range_[1] - range_[0]) * gauss_lobatto(m).points + (range_[1] + range_[0]))
else:
raise NotImplementedError(
f"Mode {mode} is not implemented. The implemented modes are uniform and gauss lobatto.")
def make_solution_matrix(ns, e_young_vec, nu_poisson_vec, le2d):
"""
Parameters
----------
ns : int
number of snapshots.
e_young_vec : TYPE
array of young's modules.
nu_poisson_vec : np.array
array of poisson ratios.
le2d :
the solver.
Raises
------
SolutionMatrixIsZeroCanNotComputePODError
If all values is the snapshot matrix s_mat are zero.
Returns
-------
s_mat : np.array
snapshot matrix.
"""
s_mat = np.zeros((le2d.n_free, ns))
i = 0
# solve system for all combinations of (e_young, nu_poisson)
for (e_young, nu_poisson) in product(e_young_vec, nu_poisson_vec):
le2d.hfsolve(e_young, nu_poisson, print_info=False)
s_mat[:, i] = le2d.uh_free
i += 1
if (s_mat == 0).all():
error_text = "Solution matrix is zero, can not compute POD for building a reduced model. " \
+ "The most likely cause is f_func=0, dirichlet_bc_func=0 and neumann_bc_func=0, " \
+ "where two last may be None."
raise SolutionMatrixIsZeroCanNotComputePODError(error_text)
return s_mat
def pod_with_energy_norm(le2d, rb_data):
"""
Proper orthogonal decomposition with respect to the energy norm
Parameters
----------
le2d :
the solver.
rb_data :
reduced-order data.
Returns
-------
None.
"""
e_young_vec = get_vec_from_range(rb_data.e_young_range, rb_data.rb_grid[0], rb_data.pod_sampling_mode)
nu_poisson_vec = get_vec_from_range(rb_data.nu_poisson_range, rb_data.rb_grid[1], rb_data.pod_sampling_mode)
e_mean = np.mean(rb_data.e_young_range)
nu_mean = np.mean(rb_data.nu_poisson_range)
rb_data.s_mat = make_solution_matrix(rb_data.ns_rom, e_young_vec, nu_poisson_vec, le2d)
a_free = le2d.compute_a_free(e_mean, nu_mean)
if rb_data.ns_rom <= le2d.n_free:
# build correlation matrix
corr_mat = rb_data.s_mat.T @ a_free @ rb_data.s_mat
# find the eigenvalues and eigenvectors of it
sigma2_vec, z_mat = eigh(corr_mat)
# reverse arrays because they are in ascending order
rb_data.sigma2_vec = sigma2_vec[::-1]
rb_data.z_mat = z_mat[:, ::-1]
else:
rb_data.x05 = fractional_matrix_power(a_free.A, 0.5)
# build correlation matrix
corr_mat = rb_data.x05 @ rb_data.s_mat @ rb_data.s_mat.T @ rb_data.x05
# find the eigenvalues and eigenvectors of it
sigma2_vec, z_mat = eigh(corr_mat)
# reverse arrays because they are in ascending order
rb_data.sigma2_vec = sigma2_vec[::-1]
rb_data.z_mat = z_mat[:, ::-1]
# compute n_rom from relative information content
i_n = np.cumsum(rb_data.sigma2_vec) / np.sum(rb_data.sigma2_vec)
rb_data.n_rom = np.min(np.argwhere(i_n >= 1 - rb_data.eps_pod ** 2)) + 1
def compute_v(n_rom, n_free, rb_data):
"""
Compute the matrix V
Parameters
----------
n_rom : int
our chosen "reduced-order degrees of freedom" ("n_rom"),
can be set to different from n_rom-true.
n_free : int
the high-fidelity degrees of freedom.
rb_data :
reduced-order data.
Returns
-------
None.
"""
if rb_data.ns_rom <= n_free:
rb_data.v = rb_data.s_mat @ rb_data.z_mat[:, :n_rom] / np.sqrt(rb_data.sigma2_vec[:n_rom])
else:
rb_data.v = np.linalg.solve(rb_data.x05, rb_data.z_mat[:, :n_rom])
def get_e_young_nu_poisson_mat(rb_data):
"""
Get the matrix of all combinations of (e_young, nu_piosson)
Parameters
----------
rb_data :
reduced-order data.
Returns
-------
np.array
the matrix of all combinations of (e_young, nu_piosson).
"""
e_young_vec = get_vec_from_range(rb_data.e_young_range, rb_data.rb_grid[0], rb_data.pod_sampling_mode)
nu_poisson_vec = get_vec_from_range(rb_data.nu_poisson_range, rb_data.rb_grid[1], rb_data.pod_sampling_mode)
return np.array(list(product(e_young_vec, nu_poisson_vec)))
|
[
"scipy.linalg.eigh",
"numpy.mean",
"numpy.linalg.solve",
"numpy.sqrt",
"itertools.product",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.argwhere",
"quadpy.c1.gauss_lobatto",
"numpy.cumsum",
"scipy.linalg.fractional_matrix_power"
] |
[((1929, 1956), 'numpy.zeros', 'np.zeros', (['(le2d.n_free, ns)'], {}), '((le2d.n_free, ns))\n', (1937, 1956), True, 'import numpy as np\n'), ((2065, 2101), 'itertools.product', 'product', (['e_young_vec', 'nu_poisson_vec'], {}), '(e_young_vec, nu_poisson_vec)\n', (2072, 2101), False, 'from itertools import product\n'), ((3089, 3119), 'numpy.mean', 'np.mean', (['rb_data.e_young_range'], {}), '(rb_data.e_young_range)\n', (3096, 3119), True, 'import numpy as np\n'), ((3134, 3167), 'numpy.mean', 'np.mean', (['rb_data.nu_poisson_range'], {}), '(rb_data.nu_poisson_range)\n', (3141, 3167), True, 'import numpy as np\n'), ((1033, 1069), 'numpy.linspace', 'np.linspace', (['range_[0]', 'range_[1]', 'm'], {}), '(range_[0], range_[1], m)\n', (1044, 1069), True, 'import numpy as np\n'), ((3526, 3540), 'scipy.linalg.eigh', 'eigh', (['corr_mat'], {}), '(corr_mat)\n', (3530, 3540), False, 'from scipy.linalg import eigh, fractional_matrix_power\n'), ((3719, 3757), 'scipy.linalg.fractional_matrix_power', 'fractional_matrix_power', (['a_free.A', '(0.5)'], {}), '(a_free.A, 0.5)\n', (3742, 3757), False, 'from scipy.linalg import eigh, fractional_matrix_power\n'), ((3954, 3968), 'scipy.linalg.eigh', 'eigh', (['corr_mat'], {}), '(corr_mat)\n', (3958, 3968), False, 'from scipy.linalg import eigh, fractional_matrix_power\n'), ((4179, 4208), 'numpy.cumsum', 'np.cumsum', (['rb_data.sigma2_vec'], {}), '(rb_data.sigma2_vec)\n', (4188, 4208), True, 'import numpy as np\n'), ((4211, 4237), 'numpy.sum', 'np.sum', (['rb_data.sigma2_vec'], {}), '(rb_data.sigma2_vec)\n', (4217, 4237), True, 'import numpy as np\n'), ((4862, 4916), 'numpy.linalg.solve', 'np.linalg.solve', (['rb_data.x05', 'rb_data.z_mat[:, :n_rom]'], {}), '(rb_data.x05, rb_data.z_mat[:, :n_rom])\n', (4877, 4916), True, 'import numpy as np\n'), ((4265, 4309), 'numpy.argwhere', 'np.argwhere', (['(i_n >= 1 - rb_data.eps_pod ** 2)'], {}), '(i_n >= 1 - rb_data.eps_pod ** 2)\n', (4276, 4309), True, 'import numpy as np\n'), ((4796, 4831), 'numpy.sqrt', 'np.sqrt', (['rb_data.sigma2_vec[:n_rom]'], {}), '(rb_data.sigma2_vec[:n_rom])\n', (4803, 4831), True, 'import numpy as np\n'), ((5463, 5499), 'itertools.product', 'product', (['e_young_vec', 'nu_poisson_vec'], {}), '(e_young_vec, nu_poisson_vec)\n', (5470, 5499), False, 'from itertools import product\n'), ((1205, 1221), 'quadpy.c1.gauss_lobatto', 'gauss_lobatto', (['m'], {}), '(m)\n', (1218, 1221), False, 'from quadpy.c1 import gauss_lobatto\n')]
|
#!/usr/bin/env python
import numpy as np
from time import time
import pyfftw
from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2
from scipy.special import jv as besselj
import finufftpy
def translations_brute_force(Shathat, Mhat, cmul_trans):
# Shathat: (q, te, k)
# Mhat: (im, k × γ)
# cmul_trans: (tr, k × γ)
n_trans = cmul_trans.shape[-2]
n_images = Mhat.shape[-2]
Shathat = Shathat.transpose((2, 0, 1))
# Shathat: (q, te, k)
n_templates = Shathat.shape[-2]
ngridr = Shathat.shape[-1]
n_gamma = Shathat.shape[-3]
Mhat = Mhat.reshape((n_images, ngridr, n_gamma))
cmul_trans = cmul_trans.reshape((n_trans, ngridr, n_gamma))
# Mhat: (im, k, γ)
# cmul_trans: (tr, k, γ)
Mhat = Mhat[:, np.newaxis, :, :]
cmul_trans = cmul_trans[np.newaxis, :, :, :]
# Mhat: (im, 1, k, γ)
# cmul_trans: (1, tr, k, γ)
Mhat = Mhat.transpose((3, 2, 0, 1)).copy()
cmul_trans = cmul_trans.transpose((3, 2, 0, 1)).copy()
# Mhat: (γ, k, im, 1)
# cmul_trans: (γ, k, 1, tr)
Mhat_trans = pyfftw.empty_aligned((n_gamma, ngridr, n_images, n_trans),
dtype='complex128')
# Mhat_trans: (γ, k, im × tr)
plan = pyfftw.FFTW(Mhat_trans, Mhat_trans, axes=(0,),
direction='FFTW_FORWARD', flags=('FFTW_ESTIMATE',), threads=12)
tmr_start = time()
np.multiply(Mhat, cmul_trans, out=Mhat_trans)
plan()
Mhathat_trans = Mhat_trans.reshape((n_gamma, ngridr, n_images * n_trans))
# Mhathat_trans: (q, k, im × tr)
ptm = time() - tmr_start
tmr_start = time()
c_n2 = np.zeros((n_gamma, n_templates, n_images*n_trans),
dtype=np.complex128)
# c_n2: (q, te, im × tr)
for k1 in range(n_gamma):
k1p = (k1 + n_gamma // 2) % n_gamma
c_n2[k1, :, :] = np.matmul(np.conj(Shathat[k1p, :, :]), Mhathat_trans[k1, :, :])
c_n2 = 2 * np.pi * c_n2
c_n2 = ifft(c_n2, axis=0)
# c_n2: (γ, te, im × tr)
c_n2 = c_n2.reshape((n_gamma, n_templates, n_images, n_trans))
c_n2 = np.real(c_n2)
# c_n2: (γ, te, im, tr)
tm = time() - tmr_start
return c_n2, ptm, tm
def translations_brute_force_batch(Shathat, Mhat, pf_grid, tr_grid, n_psi,
n_batch_im=None, n_batch_trans=500):
n_templates = Shathat.shape[0]
n_images = Mhat.shape[0]
trans = tr_grid['trans']
n_trans = tr_grid['n_trans']
if n_batch_im is None:
n_batch_im = n_images
n_batch_trans = min(n_batch_trans, n_trans)
zprods1 = np.zeros((n_psi, n_templates, n_images, n_trans))
# zprods1: (γ, te, im, tr)
tm1 = 0
precomp1 = 0
for cn in range(0, n_images, n_batch_im):
idx_im = range(cn, min(cn + n_batch_im, n_images))
for ttt in range(0, n_trans, n_batch_trans):
idx_trans = range(ttt, min(ttt + n_batch_trans, n_trans))
cmul_trans = pft_phase_shift(-trans[idx_trans, :], pf_grid)
# cmul_trans: (tr, k × γ)
tmp, ptm, tm = translations_brute_force(
Shathat, Mhat[idx_im, :], cmul_trans)
zprods1[np.ix_(range(n_psi),
range(n_templates),
idx_im,
idx_trans)] = tmp
precomp1 += ptm
tm1 += tm
zprods1 = zprods1.transpose((2, 1, 0, 3))
return zprods1, precomp1, tm1
def svd_decomposition_alignment(SSS, Mhat, n_bessel, all_rnks, BigMul_left):
ngridr = SSS.shape[-1]
n_templates = SSS.shape[-2]
n_gamma = SSS.shape[-3]
n_images = Mhat.shape[-2]
n_trans = BigMul_left.shape[-1]
tmr_start = time()
Mhathat = Mhat.reshape((n_images, ngridr, n_gamma))
Mhathat = fftshift(fft(Mhathat, axis=-1), axes=-1) / n_gamma
MMM = np.zeros((n_images, 2 * n_bessel + 1, ngridr, n_gamma),
dtype=np.complex128)
for im in range(n_images):
for qp in range(-n_bessel, n_bessel + 1):
tmp = Mhathat[im, :, :]
MMM[im, qp + n_bessel, :, :] = np.roll(tmp, -qp, axis=-1)
MMM = MMM.transpose((1, 3, 2, 0)).copy()
precomp2 = time() - tmr_start
tmr_start = time()
BigMul_right = np.zeros((sum(all_rnks), n_gamma, n_templates, n_images),
dtype=np.complex128)
for qp in range(-n_bessel, n_bessel + 1):
rnk = all_rnks[qp + n_bessel]
ofst = sum(all_rnks[:qp + n_bessel])
for ll in range(rnk):
for q in range(n_gamma):
tmp = np.matmul(SSS[ofst + ll, q, :, :],
MMM[qp + n_bessel, q, :, :])
BigMul_right[ofst + ll, q, :, :] = tmp
BigMul_right = BigMul_right.transpose((3, 2, 1, 0)).copy()
c_n = np.zeros((n_images, n_templates, n_gamma, n_trans),
dtype=np.complex128)
for im in range(n_images):
for tt in range(n_templates):
c_n[im, tt, :, :] = np.matmul(BigMul_right[im, tt, :, :],
BigMul_left)
c_n = 2 * np.pi * c_n
zprods = ifft(ifftshift(c_n, axes=-2), axis=-2) * n_gamma
tm2 = time() - tmr_start
return zprods, precomp2, tm2
def cartesian_to_pft(templates, T, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
n_templates = templates.shape[0]
N = templates.shape[1]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
Shat = np.zeros((n_templates, ngridr * n_psi), dtype=np.complex128)
upsampfac = 1.25
fcc = np.empty(len(wx), dtype=np.complex128)
for k in range(n_templates):
template = templates[k, :, :]
# Need to force Fortran ordering because that's what the FINUFFT
# interface expects.
gg = np.asfortranarray(template.transpose((1, 0)))
isign = -1
eps = 1e-6
# Note: Crashes if gg is a 1D vector (raveled). Why?
finufftpy.nufft2d2(wx * dx, wy * dy, fcc,
isign, eps, gg, upsampfac=upsampfac)
Shat[k, :] = fcc
return Shat
def pft_to_cartesian(Shat, T, N, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
ngridr = xnodesr.shape[0]
n_templates = Shat.shape[0]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
templates1 = np.zeros((n_templates, N, N))
# Again, Fortran ordering is necessary for FINUFFT.
gxx = np.empty((N, N), dtype=np.complex128, order='F')
upsampfac = 1.25
for k in range(n_templates):
fcc1 = Shat[k, :] * quad_wts
isign = 1
eps = 1e-6
finufftpy.nufft2d1(wx * dx, wy * dy, fcc1, isign, eps, N, N, gxx,
upsampfac=upsampfac)
gxx = gxx*dx*dy/(4*np.pi**2)
templates1[k, :, :] = np.real(gxx.transpose((1, 0)))
return templates1
def rotate_pft(fcc, rgamma, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
ngridc = n_psi * np.ones(ngridr, dtype=np.int32)
fcc_rot = np.zeros(fcc.shape, dtype=np.complex128)
cnt = 0
for rr in range(ngridr):
tmp = fcc[:, cnt:cnt + ngridc[rr]]
ffcc = fft(tmp)
n_theta = ngridc[rr]
wth = ifftshift(np.arange(-n_theta/2, n_theta/2))
mul = np.exp(-1j * wth * rgamma[:, np.newaxis])
ffcc_rot = ffcc * mul
tmp = ifft(ffcc_rot)
fcc_rot[:, cnt:cnt + ngridc[rr]] = tmp
cnt += ngridc[rr]
return fcc_rot
def pft_phase_shift(sh, pf_grid):
all_psi = pf_grid['all_psi']
quad_xnodesr = pf_grid['all_r']
phase = (np.cos(all_psi) * sh[:, np.newaxis, 0]
+ np.sin(all_psi) * sh[:, np.newaxis, 1])
cmul = np.exp(-1j * quad_xnodesr * phase)
return cmul
def translate_pft(fcc, sh, pf_grid):
cmul = pft_phase_shift(sh, pf_grid)
return fcc * cmul
def pft_norm(Mhat, pf_grid):
quad_wts = pf_grid['quad_wts']
return np.sqrt(np.sum((np.abs(Mhat) ** 2) * quad_wts, axis=-1))
def pft_to_fb(Shat, pf_grid):
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
n_templates = Shat.shape[0]
quad_wts_sq = quad_wts.reshape((ngridr, n_psi))
Shathat = Shat.reshape((n_templates, ngridr, n_psi))
# Shathat: (te, k, γ)
Shathat = np.fft.fftshift(np.fft.fft(Shathat, axis=-1), axes=-1)
Shathat = Shathat * quad_wts_sq[np.newaxis, :, :]
# Shathat: (te, k, q)
# There was a 2π factor missing before. Let's remove it.
Shathat = Shathat / (2 * np.pi)
return Shathat
def make_tensor_grid(rmax, ngridr, n_psi):
dr = rmax/ngridr
xnodesr = dr*np.arange(1, ngridr+1)
weights = dr*np.ones(ngridr)
psi = 2 * np.pi / n_psi * np.arange(n_psi)
all_psi = np.repeat(psi[np.newaxis, :], ngridr, axis=0)
all_psi = np.ravel(all_psi)
all_r = np.repeat(xnodesr[:, np.newaxis], n_psi, axis=1)
all_r = np.ravel(all_r)
wts_theta = 2 * np.pi / n_psi
quad_wts = wts_theta * xnodesr * weights
quad_wts = np.repeat(quad_wts[:, np.newaxis], n_psi, axis=-1)
quad_wts = np.ravel(quad_wts)
wx = np.zeros(n_psi * ngridr)
wy = np.zeros(n_psi * ngridr)
cnt = 0
for rr in range(ngridr):
dd = xnodesr[rr]
theta = 2 * np.pi / n_psi * np.arange(n_psi)
wx[cnt:cnt + n_psi] = dd * np.cos(theta)
wy[cnt:cnt + n_psi] = dd * np.sin(theta)
cnt = cnt + n_psi
grid = dict()
grid['rmax'] = rmax
grid['ngridr'] = ngridr
grid['n_psi'] = n_psi
grid['xnodesr'] = xnodesr
grid['all_psi'] = all_psi
grid['all_r'] = all_r
grid['quad_wts'] = quad_wts
grid['wx'] = wx
grid['wy'] = wy
return grid
def make_adaptive_grid(delta_range, dx, oversampling):
all_delta = dx / oversampling * np.arange(oversampling * delta_range + 1e-10)
n_delta = all_delta.shape[0]
n_omega = oversampling * np.int32(np.ceil(2 * np.pi / dx * all_delta))
n_trans = np.sum(n_omega)
trans = np.zeros((n_trans, 2))
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
all_om = 2 * np.pi * np.arange(n_om) / n_om
trans[cnt:cnt + n_om, 0] = all_delta[kk] * np.cos(all_om)
trans[cnt:cnt + n_om, 1] = all_delta[kk] * np.sin(all_om)
cnt += n_om
grid = dict()
grid['all_delta'] = all_delta
grid['n_delta'] = n_delta
grid['n_omega'] = n_omega
grid['n_trans'] = n_trans
grid['trans'] = trans
return grid
def make_cartesian_grid(delta_range, dx, oversampling):
Nkeep = 2 * oversampling * delta_range
xfine = dx * np.arange(-Nkeep // 2, Nkeep // 2)
trans = xfine
trans = np.meshgrid(trans, trans, indexing='ij')
trans = np.stack(trans[::-1], -1)
trans = trans.reshape((Nkeep ** 2, 2))
grid = {'n_trans': Nkeep ** 2, 'trans': trans}
return grid
def extract_alignments(inner_prods3, tr_grid):
n_images = inner_prods3.shape[0]
n_templates = inner_prods3.shape[1]
n_psi = inner_prods3.shape[2]
n_trans = inner_prods3.shape[3]
trans = tr_grid['trans']
inner_prods3 = inner_prods3.reshape((n_images,
n_templates*n_psi*n_trans))
est_template_ind = np.zeros(n_images, dtype=np.int32)
est_trans = np.zeros((n_images, 2))
est_gamma = np.zeros(n_images)
idx = inner_prods3.argmax(axis=-1)
for cn in range(n_images):
I3, I2, I1 = np.unravel_index(idx[cn],
(n_templates, n_psi, n_trans))
shiftx = trans[I1, 0]
shifty = trans[I1, 1]
rgamma = I2 * 2 * np.pi / n_psi
est_template_ind[cn] = I3
est_trans[cn, 0] = shiftx
est_trans[cn, 1] = shifty
est_gamma[cn] = rgamma
return est_template_ind, est_trans, est_gamma
def rotations_brute_force(fimages, Shat, n_gamma, pf_grid, Nfine):
eval_results = False
if Shat.ndim == 2:
Shat = Shat[np.newaxis, :, :]
n_images, N, _ = fimages.shape
n_templates, ngridr, ngridp = Shat.shape
quad_wts_sq = pf_grid['quad_wts'].reshape((ngridr, ngridp))
wx = pf_grid['wx']
wy = pf_grid['wy']
all_gamma = 2 * np.pi / n_gamma * np.arange(n_gamma)
tmr_start = time()
Shathat = fft(Shat) / ngridp
# Shat: (te, k, γ)
# Shathat: (te, k, q)
Shathat = Shathat.reshape((n_templates, 1, ngridr, ngridp))
# Shathat: (te, 1, k, q)
wth = ifftshift(np.arange(-ngridp / 2, ngridp / 2))
mul = np.exp(-1j * wth[np.newaxis, :] * all_gamma[:,np.newaxis])
# mul: (γ, q)
Shathat_rot = Shathat * mul[:, np.newaxis, :]
# Shathat_rot: (te, γ, k, q)
# NOTE: This can be sped up by using PyFFTW. However, for the execution to
# be efficent, the plan must be created using FFTW_MEASURE, which takes a
# long time. The solution will be to separate this our to the BFR
# “planning” stage for some fixed number of images–template pairs, then
# loop over these, computing the IFFT batchwise at execution (since the
# exact number of pairs is not known as planning time).
Shat_rot = ifft(Shathat_rot)
fx1 = quad_wts_sq * Shat_rot
T = 2
dx = dy = T / N
templates_rot = np.empty((N, N, n_gamma, n_templates),
dtype=np.complex128, order='F')
upsampfac = 1.25
isign = 1
eps = 1e-2
finufftpy.nufft2d1many(wx * dx, wy * dy, fx1, isign, eps, N, N,
templates_rot, upsampfac=upsampfac)
templates_rot = templates_rot / (4 * np.pi ** 2)
# templates_rot: (trx, try, γ, te)
templates_rot = templates_rot.transpose((3, 2, 1, 0)).copy()
# templates_rot: (te, γ, try, trx)
ftemplates_rot = fft2(ifftshift(templates_rot, axes=(-2, -1)))
# ftemplates_rot: (te, γ, trky, trkx)
precomp = time() - tmr_start
tmr_start = time()
ftemplates_rot = ftemplates_rot[:, np.newaxis, :, :, :]
# ftemplates_rot: (te, im, γ, trky, trkx)
fxx = fimages[:, np.newaxis, :, :] * np.conj(ftemplates_rot)
# ftemplates_rot: (te, im, γ, trky, trkx)
inner_prods = pyfftw.zeros_aligned((n_templates, n_images, n_gamma, Nfine, Nfine), dtype='complex128')
inner_prods[:, :, :, :N // 2, :N // 2] = fxx[:, :, :, :N // 2, :N // 2]
inner_prods[:, :, :, :N // 2, -N // 2:] = fxx[:, :, :, :N // 2, -N // 2:]
inner_prods[:, :, :, -N // 2:, :N // 2] = fxx[:, :, :, -N // 2:, :N // 2]
inner_prods[:, :, :, -N // 2:, -N // 2:] = fxx[:, :, :, -N // 2:, -N // 2:]
plan = pyfftw.FFTW(inner_prods, inner_prods, axes=(-2, -1),
direction='FFTW_BACKWARD',
flags=('FFTW_MEASURE',), threads=12)
plan()
inner_prods = np.real(inner_prods)
inner_prods *= (Nfine / N) ** 2
# inner_prods: (te, im, γ, try, trx)
comp = time() - tmr_start
return inner_prods, precomp, comp
def calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid):
all_UU = [None] * (2 * n_bessel + 1)
all_SSVV = [None] * (2 * n_bessel + 1)
all_rnks = np.zeros(2 * n_bessel + 1, dtype=np.int32)
xnodesr = pf_grid['xnodesr']
all_delta = tr_grid['all_delta']
n_delta = tr_grid['n_delta']
n_omega = tr_grid['n_omega']
n_trans = tr_grid['n_trans']
for qp in range(-n_bessel, n_bessel + 1):
J_n = besselj(qp, -all_delta[:, np.newaxis] * xnodesr[np.newaxis, :])
U, S, Vh = np.linalg.svd(J_n)
ind = S > eps
rnk = sum(ind)
all_rnks[qp + n_bessel] = rnk
all_UU[qp + n_bessel] = U[:, :rnk]
all_SSVV[qp + n_bessel] = S[:rnk, np.newaxis] * Vh[:rnk, :]
SSVV_big = np.concatenate(all_SSVV, axis=0)
UUU = np.concatenate(all_UU, axis=1)
all_omega = np.concatenate([2 * np.pi / n_om * np.arange(n_om)
for n_om in n_omega if n_om > 0])
all_qp = np.concatenate([(k - n_bessel) * np.ones(n)
for k, n in enumerate(all_rnks)])
vec_omega = np.exp(1j * all_qp[np.newaxis, :]
* (all_omega[:, np.newaxis] - np.pi / 2))
BigMul_left = np.zeros((sum(all_rnks), n_trans), dtype=np.complex128)
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
BigMul_left[:, cnt:cnt + n_om] = (UUU[kk, :][np.newaxis, :].T
* vec_omega[cnt:cnt + n_om, :].T)
cnt += n_om
return all_rnks, BigMul_left, SSVV_big
def premult_right_fb(Shathat, SSVV_big, all_rnks):
n_psi = Shathat.shape[2]
ngridr = Shathat.shape[1]
n_templates = Shathat.shape[0]
Shathat = Shathat.transpose((2, 0, 1))
Shathat = Shathat.reshape((1, n_psi * n_templates, ngridr))
SSS = SSVV_big[:, np.newaxis, :] * Shathat.conj()
SSS = SSS.reshape((sum(all_rnks), n_psi, n_templates, ngridr))
return SSS
def bft_plan(tr_grid, pf_grid):
plan = {'tr_grid': tr_grid,
'pf_grid': pf_grid}
return plan
def bft_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
tr_grid = plan['tr_grid']
n_psi = pf_grid['n_psi']
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
MSnorm = Mnorm[:, np.newaxis] * Snorm[np.newaxis, :]
tmr_start = time()
Shathat = pft_to_fb(Shat, pf_grid)
precomp1 = time() - tmr_start
zprods1, ptm, tm = translations_brute_force_batch(Shathat, Mhat,
pf_grid, tr_grid, n_psi)
precomp1 += ptm
inner_prods3 = zprods1 / MSnorm[..., np.newaxis, np.newaxis]
return inner_prods3, (precomp1, tm)
def ftk_plan(tr_grid, pf_grid, n_bessel, eps):
all_rnks, BigMul_left, SSVV_big = calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid)
plan = {'tr_grid': tr_grid,
'pf_grid': pf_grid,
'n_bessel': n_bessel,
'eps': eps,
'all_rnks': all_rnks,
'BigMul_left': BigMul_left,
'SSVV_big': SSVV_big}
return plan
def ftk_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
SSVV_big = plan['SSVV_big']
all_rnks = plan['all_rnks']
n_bessel = plan['n_bessel']
BigMul_left = plan['BigMul_left']
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
MSnorm = Mnorm[:, np.newaxis] * Snorm[np.newaxis, :]
tmr_start = time()
Shathat = pft_to_fb(Shat, pf_grid)
SSS = premult_right_fb(Shathat, SSVV_big, all_rnks)
precomp2 = time() - tmr_start
zprods4, ptm, tm = svd_decomposition_alignment(SSS, Mhat, n_bessel,
all_rnks, BigMul_left)
precomp2 += ptm
inner_prods4 = np.real(zprods4) / MSnorm[..., np.newaxis, np.newaxis]
return inner_prods4, (precomp2, tm)
def bfr_plan(Nfine, Nkeep, n_gamma, pf_grid, T, N):
plan = {'Nfine': Nfine,
'Nkeep': Nkeep,
'n_gamma': n_gamma,
'pf_grid': pf_grid,
'T': T,
'N': N}
# TODO: FFTW plans, etc.
return plan
def bfr_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
T = plan['T']
N = plan['N']
Nfine = plan['Nfine']
Nkeep = plan['Nkeep']
n_gamma = plan['n_gamma']
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
n_templates = Shat.shape[0]
n_images = Mhat.shape[0]
dx = dy = T / N
images = pft_to_cartesian(Mhat, T, N, pf_grid) / (dx * dy)
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
fimages = fft2(ifftshift(images, axes=(-2, -1)))
SShat = Shat.reshape((n_templates, ngridr, n_psi))
fimages = fimages / Mnorm[:, np.newaxis, np.newaxis]
SShat = SShat / Snorm[:, np.newaxis, np.newaxis]
precomp3 = 0
comp3 = 0
inner_prods = np.zeros((n_images, n_templates, n_gamma, Nkeep, Nkeep), dtype=np.complex128)
for tt in range(n_templates):
inn, precomp, comp = rotations_brute_force(fimages, SShat[tt],
n_gamma, pf_grid, Nfine)
# NOTE: The following truncates *and* inverts the FFT shift.
inner_prods[:, tt, :, -Nkeep // 2:, -Nkeep // 2:] = inn[:, :, :, :Nkeep // 2, :Nkeep // 2]
inner_prods[:, tt, :, -Nkeep // 2:, :Nkeep // 2] = inn[:, :, :, :Nkeep // 2, -Nkeep // 2:]
inner_prods[:, tt, :, :Nkeep // 2, -Nkeep // 2:] = inn[:, :, :, -Nkeep // 2:, :Nkeep // 2]
inner_prods[:, tt, :, :Nkeep // 2, :Nkeep // 2] = inn[:, :, :, -Nkeep // 2:, -Nkeep // 2:]
precomp3 += precomp
comp3 += comp
inner_prods = inner_prods.reshape((n_images, n_templates, n_gamma, Nkeep ** 2))
return inner_prods, (precomp3, comp3)
|
[
"numpy.sin",
"numpy.arange",
"numpy.multiply",
"numpy.repeat",
"numpy.fft.fft",
"finufftpy.nufft2d1",
"pyfftw.FFTW",
"finufftpy.nufft2d1many",
"numpy.exp",
"numpy.real",
"numpy.stack",
"numpy.empty",
"numpy.unravel_index",
"numpy.concatenate",
"numpy.matmul",
"numpy.meshgrid",
"numpy.abs",
"numpy.ceil",
"numpy.ones",
"numpy.conj",
"finufftpy.nufft2d2",
"numpy.cos",
"numpy.fft.ifftshift",
"numpy.linalg.svd",
"scipy.special.jv",
"numpy.fft.ifft",
"time.time",
"numpy.roll",
"pyfftw.zeros_aligned",
"pyfftw.empty_aligned",
"numpy.zeros",
"numpy.sum",
"numpy.ravel"
] |
[((1078, 1156), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(n_gamma, ngridr, n_images, n_trans)'], {'dtype': '"""complex128"""'}), "((n_gamma, ngridr, n_images, n_trans), dtype='complex128')\n", (1098, 1156), False, 'import pyfftw\n'), ((1211, 1325), 'pyfftw.FFTW', 'pyfftw.FFTW', (['Mhat_trans', 'Mhat_trans'], {'axes': '(0,)', 'direction': '"""FFTW_FORWARD"""', 'flags': "('FFTW_ESTIMATE',)", 'threads': '(12)'}), "(Mhat_trans, Mhat_trans, axes=(0,), direction='FFTW_FORWARD',\n flags=('FFTW_ESTIMATE',), threads=12)\n", (1222, 1325), False, 'import pyfftw\n'), ((1347, 1353), 'time.time', 'time', ([], {}), '()\n', (1351, 1353), False, 'from time import time\n'), ((1359, 1404), 'numpy.multiply', 'np.multiply', (['Mhat', 'cmul_trans'], {'out': 'Mhat_trans'}), '(Mhat, cmul_trans, out=Mhat_trans)\n', (1370, 1404), True, 'import numpy as np\n'), ((1579, 1585), 'time.time', 'time', ([], {}), '()\n', (1583, 1585), False, 'from time import time\n'), ((1597, 1670), 'numpy.zeros', 'np.zeros', (['(n_gamma, n_templates, n_images * n_trans)'], {'dtype': 'np.complex128'}), '((n_gamma, n_templates, n_images * n_trans), dtype=np.complex128)\n', (1605, 1670), True, 'import numpy as np\n'), ((1921, 1939), 'numpy.fft.ifft', 'ifft', (['c_n2'], {'axis': '(0)'}), '(c_n2, axis=0)\n', (1925, 1939), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((2048, 2061), 'numpy.real', 'np.real', (['c_n2'], {}), '(c_n2)\n', (2055, 2061), True, 'import numpy as np\n'), ((2515, 2564), 'numpy.zeros', 'np.zeros', (['(n_psi, n_templates, n_images, n_trans)'], {}), '((n_psi, n_templates, n_images, n_trans))\n', (2523, 2564), True, 'import numpy as np\n'), ((3625, 3631), 'time.time', 'time', ([], {}), '()\n', (3629, 3631), False, 'from time import time\n'), ((3764, 3840), 'numpy.zeros', 'np.zeros', (['(n_images, 2 * n_bessel + 1, ngridr, n_gamma)'], {'dtype': 'np.complex128'}), '((n_images, 2 * n_bessel + 1, ngridr, n_gamma), dtype=np.complex128)\n', (3772, 3840), True, 'import numpy as np\n'), ((4145, 4151), 'time.time', 'time', ([], {}), '()\n', (4149, 4151), False, 'from time import time\n'), ((4722, 4794), 'numpy.zeros', 'np.zeros', (['(n_images, n_templates, n_gamma, n_trans)'], {'dtype': 'np.complex128'}), '((n_images, n_templates, n_gamma, n_trans), dtype=np.complex128)\n', (4730, 4794), True, 'import numpy as np\n'), ((5455, 5515), 'numpy.zeros', 'np.zeros', (['(n_templates, ngridr * n_psi)'], {'dtype': 'np.complex128'}), '((n_templates, ngridr * n_psi), dtype=np.complex128)\n', (5463, 5515), True, 'import numpy as np\n'), ((6381, 6410), 'numpy.zeros', 'np.zeros', (['(n_templates, N, N)'], {}), '((n_templates, N, N))\n', (6389, 6410), True, 'import numpy as np\n'), ((6478, 6526), 'numpy.empty', 'np.empty', (['(N, N)'], {'dtype': 'np.complex128', 'order': '"""F"""'}), "((N, N), dtype=np.complex128, order='F')\n", (6486, 6526), True, 'import numpy as np\n'), ((7106, 7146), 'numpy.zeros', 'np.zeros', (['fcc.shape'], {'dtype': 'np.complex128'}), '(fcc.shape, dtype=np.complex128)\n', (7114, 7146), True, 'import numpy as np\n'), ((7776, 7812), 'numpy.exp', 'np.exp', (['(-1.0j * quad_xnodesr * phase)'], {}), '(-1.0j * quad_xnodesr * phase)\n', (7782, 7812), True, 'import numpy as np\n'), ((8830, 8875), 'numpy.repeat', 'np.repeat', (['psi[np.newaxis, :]', 'ngridr'], {'axis': '(0)'}), '(psi[np.newaxis, :], ngridr, axis=0)\n', (8839, 8875), True, 'import numpy as np\n'), ((8890, 8907), 'numpy.ravel', 'np.ravel', (['all_psi'], {}), '(all_psi)\n', (8898, 8907), True, 'import numpy as np\n'), ((8921, 8969), 'numpy.repeat', 'np.repeat', (['xnodesr[:, np.newaxis]', 'n_psi'], {'axis': '(1)'}), '(xnodesr[:, np.newaxis], n_psi, axis=1)\n', (8930, 8969), True, 'import numpy as np\n'), ((8982, 8997), 'numpy.ravel', 'np.ravel', (['all_r'], {}), '(all_r)\n', (8990, 8997), True, 'import numpy as np\n'), ((9093, 9143), 'numpy.repeat', 'np.repeat', (['quad_wts[:, np.newaxis]', 'n_psi'], {'axis': '(-1)'}), '(quad_wts[:, np.newaxis], n_psi, axis=-1)\n', (9102, 9143), True, 'import numpy as np\n'), ((9159, 9177), 'numpy.ravel', 'np.ravel', (['quad_wts'], {}), '(quad_wts)\n', (9167, 9177), True, 'import numpy as np\n'), ((9188, 9212), 'numpy.zeros', 'np.zeros', (['(n_psi * ngridr)'], {}), '(n_psi * ngridr)\n', (9196, 9212), True, 'import numpy as np\n'), ((9222, 9246), 'numpy.zeros', 'np.zeros', (['(n_psi * ngridr)'], {}), '(n_psi * ngridr)\n', (9230, 9246), True, 'import numpy as np\n'), ((10026, 10041), 'numpy.sum', 'np.sum', (['n_omega'], {}), '(n_omega)\n', (10032, 10041), True, 'import numpy as np\n'), ((10055, 10077), 'numpy.zeros', 'np.zeros', (['(n_trans, 2)'], {}), '((n_trans, 2))\n', (10063, 10077), True, 'import numpy as np\n'), ((10723, 10763), 'numpy.meshgrid', 'np.meshgrid', (['trans', 'trans'], {'indexing': '"""ij"""'}), "(trans, trans, indexing='ij')\n", (10734, 10763), True, 'import numpy as np\n'), ((10776, 10801), 'numpy.stack', 'np.stack', (['trans[::-1]', '(-1)'], {}), '(trans[::-1], -1)\n', (10784, 10801), True, 'import numpy as np\n'), ((11285, 11319), 'numpy.zeros', 'np.zeros', (['n_images'], {'dtype': 'np.int32'}), '(n_images, dtype=np.int32)\n', (11293, 11319), True, 'import numpy as np\n'), ((11336, 11359), 'numpy.zeros', 'np.zeros', (['(n_images, 2)'], {}), '((n_images, 2))\n', (11344, 11359), True, 'import numpy as np\n'), ((11376, 11394), 'numpy.zeros', 'np.zeros', (['n_images'], {}), '(n_images)\n', (11384, 11394), True, 'import numpy as np\n'), ((12292, 12298), 'time.time', 'time', ([], {}), '()\n', (12296, 12298), False, 'from time import time\n'), ((12543, 12604), 'numpy.exp', 'np.exp', (['(-1.0j * wth[np.newaxis, :] * all_gamma[:, np.newaxis])'], {}), '(-1.0j * wth[np.newaxis, :] * all_gamma[:, np.newaxis])\n', (12549, 12604), True, 'import numpy as np\n'), ((13159, 13176), 'numpy.fft.ifft', 'ifft', (['Shathat_rot'], {}), '(Shathat_rot)\n', (13163, 13176), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((13263, 13333), 'numpy.empty', 'np.empty', (['(N, N, n_gamma, n_templates)'], {'dtype': 'np.complex128', 'order': '"""F"""'}), "((N, N, n_gamma, n_templates), dtype=np.complex128, order='F')\n", (13271, 13333), True, 'import numpy as np\n'), ((13419, 13522), 'finufftpy.nufft2d1many', 'finufftpy.nufft2d1many', (['(wx * dx)', '(wy * dy)', 'fx1', 'isign', 'eps', 'N', 'N', 'templates_rot'], {'upsampfac': 'upsampfac'}), '(wx * dx, wy * dy, fx1, isign, eps, N, N,\n templates_rot, upsampfac=upsampfac)\n', (13441, 13522), False, 'import finufftpy\n'), ((13905, 13911), 'time.time', 'time', ([], {}), '()\n', (13909, 13911), False, 'from time import time\n'), ((14150, 14243), 'pyfftw.zeros_aligned', 'pyfftw.zeros_aligned', (['(n_templates, n_images, n_gamma, Nfine, Nfine)'], {'dtype': '"""complex128"""'}), "((n_templates, n_images, n_gamma, Nfine, Nfine), dtype=\n 'complex128')\n", (14170, 14243), False, 'import pyfftw\n'), ((14564, 14685), 'pyfftw.FFTW', 'pyfftw.FFTW', (['inner_prods', 'inner_prods'], {'axes': '(-2, -1)', 'direction': '"""FFTW_BACKWARD"""', 'flags': "('FFTW_MEASURE',)", 'threads': '(12)'}), "(inner_prods, inner_prods, axes=(-2, -1), direction=\n 'FFTW_BACKWARD', flags=('FFTW_MEASURE',), threads=12)\n", (14575, 14685), False, 'import pyfftw\n'), ((14758, 14778), 'numpy.real', 'np.real', (['inner_prods'], {}), '(inner_prods)\n', (14765, 14778), True, 'import numpy as np\n'), ((15078, 15120), 'numpy.zeros', 'np.zeros', (['(2 * n_bessel + 1)'], {'dtype': 'np.int32'}), '(2 * n_bessel + 1, dtype=np.int32)\n', (15086, 15120), True, 'import numpy as np\n'), ((15667, 15699), 'numpy.concatenate', 'np.concatenate', (['all_SSVV'], {'axis': '(0)'}), '(all_SSVV, axis=0)\n', (15681, 15699), True, 'import numpy as np\n'), ((15711, 15741), 'numpy.concatenate', 'np.concatenate', (['all_UU'], {'axis': '(1)'}), '(all_UU, axis=1)\n', (15725, 15741), True, 'import numpy as np\n'), ((16014, 16091), 'numpy.exp', 'np.exp', (['(1.0j * all_qp[np.newaxis, :] * (all_omega[:, np.newaxis] - np.pi / 2))'], {}), '(1.0j * all_qp[np.newaxis, :] * (all_omega[:, np.newaxis] - np.pi / 2))\n', (16020, 16091), True, 'import numpy as np\n'), ((17249, 17255), 'time.time', 'time', ([], {}), '()\n', (17253, 17255), False, 'from time import time\n'), ((18335, 18341), 'time.time', 'time', ([], {}), '()\n', (18339, 18341), False, 'from time import time\n'), ((19753, 19830), 'numpy.zeros', 'np.zeros', (['(n_images, n_templates, n_gamma, Nkeep, Nkeep)'], {'dtype': 'np.complex128'}), '((n_images, n_templates, n_gamma, Nkeep, Nkeep), dtype=np.complex128)\n', (19761, 19830), True, 'import numpy as np\n'), ((1543, 1549), 'time.time', 'time', ([], {}), '()\n', (1547, 1549), False, 'from time import time\n'), ((2100, 2106), 'time.time', 'time', ([], {}), '()\n', (2104, 2106), False, 'from time import time\n'), ((4109, 4115), 'time.time', 'time', ([], {}), '()\n', (4113, 4115), False, 'from time import time\n'), ((5107, 5113), 'time.time', 'time', ([], {}), '()\n', (5111, 5113), False, 'from time import time\n'), ((5931, 6009), 'finufftpy.nufft2d2', 'finufftpy.nufft2d2', (['(wx * dx)', '(wy * dy)', 'fcc', 'isign', 'eps', 'gg'], {'upsampfac': 'upsampfac'}), '(wx * dx, wy * dy, fcc, isign, eps, gg, upsampfac=upsampfac)\n', (5949, 6009), False, 'import finufftpy\n'), ((6667, 6758), 'finufftpy.nufft2d1', 'finufftpy.nufft2d1', (['(wx * dx)', '(wy * dy)', 'fcc1', 'isign', 'eps', 'N', 'N', 'gxx'], {'upsampfac': 'upsampfac'}), '(wx * dx, wy * dy, fcc1, isign, eps, N, N, gxx, upsampfac\n =upsampfac)\n', (6685, 6758), False, 'import finufftpy\n'), ((7059, 7090), 'numpy.ones', 'np.ones', (['ngridr'], {'dtype': 'np.int32'}), '(ngridr, dtype=np.int32)\n', (7066, 7090), True, 'import numpy as np\n'), ((7246, 7254), 'numpy.fft.fft', 'fft', (['tmp'], {}), '(tmp)\n', (7249, 7254), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((7357, 7400), 'numpy.exp', 'np.exp', (['(-1.0j * wth * rgamma[:, np.newaxis])'], {}), '(-1.0j * wth * rgamma[:, np.newaxis])\n', (7363, 7400), True, 'import numpy as np\n'), ((7443, 7457), 'numpy.fft.ifft', 'ifft', (['ffcc_rot'], {}), '(ffcc_rot)\n', (7447, 7457), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((8392, 8420), 'numpy.fft.fft', 'np.fft.fft', (['Shathat'], {'axis': '(-1)'}), '(Shathat, axis=-1)\n', (8402, 8420), True, 'import numpy as np\n'), ((8712, 8736), 'numpy.arange', 'np.arange', (['(1)', '(ngridr + 1)'], {}), '(1, ngridr + 1)\n', (8721, 8736), True, 'import numpy as np\n'), ((8752, 8767), 'numpy.ones', 'np.ones', (['ngridr'], {}), '(ngridr)\n', (8759, 8767), True, 'import numpy as np\n'), ((8799, 8815), 'numpy.arange', 'np.arange', (['n_psi'], {}), '(n_psi)\n', (8808, 8815), True, 'import numpy as np\n'), ((9856, 9901), 'numpy.arange', 'np.arange', (['(oversampling * delta_range + 1e-10)'], {}), '(oversampling * delta_range + 1e-10)\n', (9865, 9901), True, 'import numpy as np\n'), ((10657, 10691), 'numpy.arange', 'np.arange', (['(-Nkeep // 2)', '(Nkeep // 2)'], {}), '(-Nkeep // 2, Nkeep // 2)\n', (10666, 10691), True, 'import numpy as np\n'), ((11488, 11544), 'numpy.unravel_index', 'np.unravel_index', (['idx[cn]', '(n_templates, n_psi, n_trans)'], {}), '(idx[cn], (n_templates, n_psi, n_trans))\n', (11504, 11544), True, 'import numpy as np\n'), ((12256, 12274), 'numpy.arange', 'np.arange', (['n_gamma'], {}), '(n_gamma)\n', (12265, 12274), True, 'import numpy as np\n'), ((12314, 12323), 'numpy.fft.fft', 'fft', (['Shat'], {}), '(Shat)\n', (12317, 12323), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((12497, 12531), 'numpy.arange', 'np.arange', (['(-ngridp / 2)', '(ngridp / 2)'], {}), '(-ngridp / 2, ngridp / 2)\n', (12506, 12531), True, 'import numpy as np\n'), ((13771, 13810), 'numpy.fft.ifftshift', 'ifftshift', (['templates_rot'], {'axes': '(-2, -1)'}), '(templates_rot, axes=(-2, -1))\n', (13780, 13810), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((13869, 13875), 'time.time', 'time', ([], {}), '()\n', (13873, 13875), False, 'from time import time\n'), ((14061, 14084), 'numpy.conj', 'np.conj', (['ftemplates_rot'], {}), '(ftemplates_rot)\n', (14068, 14084), True, 'import numpy as np\n'), ((14868, 14874), 'time.time', 'time', ([], {}), '()\n', (14872, 14874), False, 'from time import time\n'), ((15352, 15415), 'scipy.special.jv', 'besselj', (['qp', '(-all_delta[:, np.newaxis] * xnodesr[np.newaxis, :])'], {}), '(qp, -all_delta[:, np.newaxis] * xnodesr[np.newaxis, :])\n', (15359, 15415), True, 'from scipy.special import jv as besselj\n'), ((15436, 15454), 'numpy.linalg.svd', 'np.linalg.svd', (['J_n'], {}), '(J_n)\n', (15449, 15454), True, 'import numpy as np\n'), ((17310, 17316), 'time.time', 'time', ([], {}), '()\n', (17314, 17316), False, 'from time import time\n'), ((18452, 18458), 'time.time', 'time', ([], {}), '()\n', (18456, 18458), False, 'from time import time\n'), ((18658, 18674), 'numpy.real', 'np.real', (['zprods4'], {}), '(zprods4)\n', (18665, 18674), True, 'import numpy as np\n'), ((19501, 19533), 'numpy.fft.ifftshift', 'ifftshift', (['images'], {'axes': '(-2, -1)'}), '(images, axes=(-2, -1))\n', (19510, 19533), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((1828, 1855), 'numpy.conj', 'np.conj', (['Shathat[k1p, :, :]'], {}), '(Shathat[k1p, :, :])\n', (1835, 1855), True, 'import numpy as np\n'), ((3711, 3732), 'numpy.fft.fft', 'fft', (['Mhathat'], {'axis': '(-1)'}), '(Mhathat, axis=-1)\n', (3714, 3732), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((4021, 4047), 'numpy.roll', 'np.roll', (['tmp', '(-qp)'], {'axis': '(-1)'}), '(tmp, -qp, axis=-1)\n', (4028, 4047), True, 'import numpy as np\n'), ((4915, 4965), 'numpy.matmul', 'np.matmul', (['BigMul_right[im, tt, :, :]', 'BigMul_left'], {}), '(BigMul_right[im, tt, :, :], BigMul_left)\n', (4924, 4965), True, 'import numpy as np\n'), ((5053, 5076), 'numpy.fft.ifftshift', 'ifftshift', (['c_n'], {'axes': '(-2)'}), '(c_n, axes=-2)\n', (5062, 5076), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2\n'), ((7309, 7345), 'numpy.arange', 'np.arange', (['(-n_theta / 2)', '(n_theta / 2)'], {}), '(-n_theta / 2, n_theta / 2)\n', (7318, 7345), True, 'import numpy as np\n'), ((7671, 7686), 'numpy.cos', 'np.cos', (['all_psi'], {}), '(all_psi)\n', (7677, 7686), True, 'import numpy as np\n'), ((7725, 7740), 'numpy.sin', 'np.sin', (['all_psi'], {}), '(all_psi)\n', (7731, 7740), True, 'import numpy as np\n'), ((9350, 9366), 'numpy.arange', 'np.arange', (['n_psi'], {}), '(n_psi)\n', (9359, 9366), True, 'import numpy as np\n'), ((9402, 9415), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (9408, 9415), True, 'import numpy as np\n'), ((9451, 9464), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (9457, 9464), True, 'import numpy as np\n'), ((9974, 10009), 'numpy.ceil', 'np.ceil', (['(2 * np.pi / dx * all_delta)'], {}), '(2 * np.pi / dx * all_delta)\n', (9981, 10009), True, 'import numpy as np\n'), ((10251, 10265), 'numpy.cos', 'np.cos', (['all_om'], {}), '(all_om)\n', (10257, 10265), True, 'import numpy as np\n'), ((10317, 10331), 'numpy.sin', 'np.sin', (['all_om'], {}), '(all_om)\n', (10323, 10331), True, 'import numpy as np\n'), ((4496, 4559), 'numpy.matmul', 'np.matmul', (['SSS[ofst + ll, q, :, :]', 'MMM[qp + n_bessel, q, :, :]'], {}), '(SSS[ofst + ll, q, :, :], MMM[qp + n_bessel, q, :, :])\n', (4505, 4559), True, 'import numpy as np\n'), ((10177, 10192), 'numpy.arange', 'np.arange', (['n_om'], {}), '(n_om)\n', (10186, 10192), True, 'import numpy as np\n'), ((15794, 15809), 'numpy.arange', 'np.arange', (['n_om'], {}), '(n_om)\n', (15803, 15809), True, 'import numpy as np\n'), ((15923, 15933), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (15930, 15933), True, 'import numpy as np\n'), ((8024, 8036), 'numpy.abs', 'np.abs', (['Mhat'], {}), '(Mhat)\n', (8030, 8036), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from ctrm.environment import Instance, ObstacleSphere
from ctrm.roadmap import (
get_timed_roadmaps_fully_random,
get_timed_roadmaps_random,
get_timed_roadmaps_random_common,
)
@pytest.fixture
def ins():
return Instance(
2,
[np.array([0, 0]), np.array([1, 0])],
[np.array([1, 1]), np.array([0, 1])],
[0.5, 0.5],
[0.1, 0.1],
[0.1, 0.1],
[ObstacleSphere(pos=np.array([0.5, 0.5]), rad=0.2)],
2,
)
def test_get_timed_roadmaps_random(ins):
kwargs = {"ins": ins, "T": 3, "num": 10}
assert len(get_timed_roadmaps_random(**kwargs)) == ins.num_agents
assert len(get_timed_roadmaps_fully_random(**kwargs)) == ins.num_agents
assert len(get_timed_roadmaps_random_common(**kwargs)) == ins.num_agents
|
[
"numpy.array",
"ctrm.roadmap.get_timed_roadmaps_random",
"ctrm.roadmap.get_timed_roadmaps_random_common",
"ctrm.roadmap.get_timed_roadmaps_fully_random"
] |
[((293, 309), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (301, 309), True, 'import numpy as np\n'), ((311, 327), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (319, 327), True, 'import numpy as np\n'), ((339, 355), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (347, 355), True, 'import numpy as np\n'), ((357, 373), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (365, 373), True, 'import numpy as np\n'), ((617, 652), 'ctrm.roadmap.get_timed_roadmaps_random', 'get_timed_roadmaps_random', ([], {}), '(**kwargs)\n', (642, 652), False, 'from ctrm.roadmap import get_timed_roadmaps_fully_random, get_timed_roadmaps_random, get_timed_roadmaps_random_common\n'), ((687, 728), 'ctrm.roadmap.get_timed_roadmaps_fully_random', 'get_timed_roadmaps_fully_random', ([], {}), '(**kwargs)\n', (718, 728), False, 'from ctrm.roadmap import get_timed_roadmaps_fully_random, get_timed_roadmaps_random, get_timed_roadmaps_random_common\n'), ((763, 805), 'ctrm.roadmap.get_timed_roadmaps_random_common', 'get_timed_roadmaps_random_common', ([], {}), '(**kwargs)\n', (795, 805), False, 'from ctrm.roadmap import get_timed_roadmaps_fully_random, get_timed_roadmaps_random, get_timed_roadmaps_random_common\n'), ((464, 484), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (472, 484), True, 'import numpy as np\n')]
|
"""Tool for handling rigs"""
import logging
import re
from collections import defaultdict
from itertools import combinations
import networkx as nx
import numpy as np
from opensfm import actions, pygeometry, pymap
from opensfm.dataset import DataSet, DataSetBase
logger = logging.getLogger(__name__)
def find_image_rig(image, rig_patterns):
"""Given an image and candidates rig model patterns, return the
RigID/RigCameraID/Instance Member ID this image belongs to.
"""
for rig_id, patterns in rig_patterns.items():
for rig_camera_id, pattern in patterns.items():
instance_member_id = re.sub(pattern, "", image)
if instance_member_id == "":
continue
if instance_member_id != image:
return (rig_id, rig_camera_id, instance_member_id)
return None, None, None
def create_instances_with_patterns(images, rig_patterns):
"""Using the provided patterns, group images that should belong to the same rig instances.
Incomplete rig instances wrt. the expected size are not considered.
Returns :
A dict of list of list of images, each list being an instances being aggregated by Rig ID
"""
per_pattern = defaultdict(dict)
for image in images:
rig_id, rig_camera_id, instance_member_id = find_image_rig(image, rig_patterns)
if not rig_id:
continue
if instance_member_id not in per_pattern[rig_id]:
per_pattern[rig_id][instance_member_id] = []
per_pattern[rig_id][instance_member_id].append((image, rig_camera_id))
complete_instances = defaultdict(list)
problematic_images = []
for rig_id, patterns in per_pattern.items():
for pattern_images in patterns.values():
expected_size = len(rig_patterns[rig_id])
if len(pattern_images) != expected_size:
problematic_images += [im[0] for im in pattern_images]
else:
complete_instances[rig_id].append(pattern_images)
if problematic_images:
logger.warning(
(
"The following images are part of an incomplete rig, thus"
f"won't be considered of being part of a rig\n {problematic_images}"
)
)
return complete_instances
def create_subset_dataset_from_instances(data: DataSet, instances_per_rig, name):
"""Given a list of images grouped by rigs instances, pick a subset of images
and create a dataset subset with the provided name from them.
Returns :
A DataSet containing a subset of images containing enough rig instances
"""
subset_images = []
for instances in instances_per_rig.values():
instances_sorted = sorted(
instances, key=lambda x: data.load_exif(x[0][0])["capture_time"]
)
subset_size = data.config["rig_calibration_subset_size"]
middle = len(instances_sorted) / 2
instances_calibrate = instances_sorted[
max([0, middle - int(subset_size / 2)]) : min(
[middle + int(subset_size / 2), len(instances_sorted) - 1]
)
]
for instance in instances_calibrate:
subset_images += [x[0] for x in instance]
return data.subset(name, subset_images)
def compute_relative_pose(rig_id, pose_instances):
""" Compute a rig model relatives poses given poses grouped by rig instance. """
# Put all poses instances into some canonical frame taken as the mean of their R|t
centered_pose_instances = []
for instance in pose_instances:
origin_center = np.zeros(3)
rotation_center = np.zeros(3)
for shot, _ in instance:
rotation_center += shot.pose.rotation
origin_center += shot.pose.get_origin()
origin_center /= len(instance)
rotation_center /= len(instance)
centered_pose_instance = []
for shot, rig_camera_id in instance:
instance_pose = pygeometry.Pose(rotation_center)
instance_pose.set_origin(origin_center)
instance_pose_camera = shot.pose.relative_to(instance_pose)
centered_pose_instance.append(
(
instance_pose_camera,
rig_camera_id,
shot.camera.id,
)
)
centered_pose_instances.append(centered_pose_instance)
# Average canonical poses per RigCamera ID
average_origin, average_rotation, count_poses, camera_ids = {}, {}, {}, {}
for centered_pose_instance in centered_pose_instances:
for pose, rig_camera_id, camera_id in centered_pose_instance:
if rig_camera_id not in average_origin:
average_origin[rig_camera_id] = np.zeros(3)
average_rotation[rig_camera_id] = np.zeros(3)
count_poses[rig_camera_id] = 0
average_origin[rig_camera_id] += pose.get_origin()
average_rotation[rig_camera_id] += pose.rotation
camera_ids[rig_camera_id] = camera_id
count_poses[rig_camera_id] += 1
# Construct final rig_model results
rig_model = pymap.RigModel(rig_id)
for rig_camera_id, count in count_poses.items():
o = average_origin[rig_camera_id] / count
r = average_rotation[rig_camera_id] / count
pose = pygeometry.Pose(r)
pose.set_origin(o)
rig_model.add_rig_camera(pymap.RigCamera(pose, rig_camera_id))
return rig_model
def create_rig_models_from_reconstruction(reconstruction, instances_per_rig):
""" Computed rig model's, given a reconstruction and rig instances's shots. """
rig_models = {}
reconstructions_shots = set(reconstruction.shots)
for rig_id, instances in instances_per_rig.items():
pose_groups = []
for instance in instances:
if any(
True if shot_id not in reconstructions_shots else False
for shot_id, _ in instance
):
continue
pose_groups.append(
[
(reconstruction.shots[shot_id], rig_camera_id)
for shot_id, rig_camera_id in instance
]
)
rig_models[rig_id] = compute_relative_pose(rig_id, pose_groups)
return rig_models
def create_rigs_with_pattern(data: DataSet, patterns):
"""Create rig data (`rig_models.json` and `rig_assignments.json`) by performing
pattern matching to group images belonging to the same instances, followed
by a bit of ad-hoc SfM to find some initial relative poses.
"""
# Construct instances assignments for each rig
instances_per_rig = create_instances_with_patterns(data.images(), patterns)
for rig_id, instances in instances_per_rig.items():
logger.info(
f"Found {len(instances)} rig instances for rig {rig_id} using pattern matching."
)
# Create some subset DataSet with enough images from each rig
subset_data = create_subset_dataset_from_instances(
data, instances_per_rig, "rig_calibration"
)
# # Run a bit of SfM without any rig
logger.info(f"Running SfM on a subset of {len(subset_data.images())} images.")
actions.extract_metadata.run_dataset(subset_data)
actions.detect_features.run_dataset(subset_data)
actions.match_features.run_dataset(subset_data)
actions.create_tracks.run_dataset(subset_data)
actions.reconstruct.run_dataset(subset_data)
# Compute some relative poses
rig_models = create_rig_models_from_reconstruction(
subset_data.load_reconstruction()[0], instances_per_rig
)
data.save_rig_models(rig_models)
data.save_rig_assignments(instances_per_rig)
def same_rig_shot(meta1, meta2):
"""True if shots taken at the same time on a rig."""
have_gps = (
"gps" in meta1
and "gps" in meta2
and "latitude" in meta1["gps"]
and "latitude" in meta2["gps"]
)
same_gps = (
have_gps
and meta1["gps"]["latitude"] == meta2["gps"]["latitude"]
and meta1["gps"]["longitude"] == meta2["gps"]["longitude"]
)
same_time = meta1["capture_time"] == meta2["capture_time"]
return same_gps and same_time
def detect_rigs(images, data: DataSetBase):
"""Search for rigs in a set of images.
For each image on a rig, returns rig, rig_camera and rig_pose ids.
"""
# Build graph of connected images and sequences
image_graph = nx.Graph()
sequence_graph = nx.Graph()
for im1, im2 in combinations(images, 2):
meta1 = data.load_exif(im1)
meta2 = data.load_exif(im2)
if same_rig_shot(meta1, meta2):
image_graph.add_edge(im1, im2)
sequence_graph.add_edge(meta1["skey"], meta2["skey"])
# Build rigs
# pyre-fixme[16]: Module `nx` has no attribute `connected_components`.
sequence_cc = nx.connected_components(sequence_graph)
sequence_rig_info = {}
for i, cc in enumerate(sequence_cc):
for j, sequence in enumerate(cc):
sequence_rig_info[sequence] = {"rig": i, "rig_camera": j}
# Build rig poses
# pyre-fixme[16]: Module `nx` has no attribute `connected_components`.
image_cc = nx.connected_components(image_graph)
rig_info = {}
for i, cc in enumerate(image_cc):
for image in cc:
meta = data.load_exif(image)
sr = sequence_rig_info[meta["skey"]]
rig_info[image] = {
"rig": sr["rig"],
"rig_camera": sr["rig_camera"],
"rig_pose": i,
}
return rig_info
def pose_kernel(x, y, rotation_std, translation_std):
"""Gaussian kernel on the diff between two poses."""
diff = x.relative_to(y)
dr = sum(diff.rotation ** 2)
dt = sum(diff.translation ** 2)
return np.exp(-dr / rotation_std ** 2 - dt / translation_std ** 2)
def pose_mode(poses, rotation_std, translation_std):
"""Find the most popular pose.
Popular is defined by a Parzen estimatior with the given
Gaussian kernel standard deviations.
"""
best_score = 0
best_pose = None
for pose in poses:
score = 0
for other in poses:
score += pose_kernel(pose, other, rotation_std, translation_std)
if score > best_score:
best_score = score
best_pose = pose
return best_pose
|
[
"logging.getLogger",
"opensfm.pygeometry.Pose",
"opensfm.pymap.RigModel",
"opensfm.actions.reconstruct.run_dataset",
"networkx.Graph",
"networkx.connected_components",
"itertools.combinations",
"numpy.exp",
"numpy.zeros",
"collections.defaultdict",
"opensfm.actions.detect_features.run_dataset",
"opensfm.actions.match_features.run_dataset",
"opensfm.actions.create_tracks.run_dataset",
"re.sub",
"opensfm.actions.extract_metadata.run_dataset",
"opensfm.pymap.RigCamera"
] |
[((275, 302), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (292, 302), False, 'import logging\n'), ((1227, 1244), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1238, 1244), False, 'from collections import defaultdict\n'), ((1622, 1639), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1633, 1639), False, 'from collections import defaultdict\n'), ((5173, 5195), 'opensfm.pymap.RigModel', 'pymap.RigModel', (['rig_id'], {}), '(rig_id)\n', (5187, 5195), False, 'from opensfm import actions, pygeometry, pymap\n'), ((7248, 7297), 'opensfm.actions.extract_metadata.run_dataset', 'actions.extract_metadata.run_dataset', (['subset_data'], {}), '(subset_data)\n', (7284, 7297), False, 'from opensfm import actions, pygeometry, pymap\n'), ((7302, 7350), 'opensfm.actions.detect_features.run_dataset', 'actions.detect_features.run_dataset', (['subset_data'], {}), '(subset_data)\n', (7337, 7350), False, 'from opensfm import actions, pygeometry, pymap\n'), ((7355, 7402), 'opensfm.actions.match_features.run_dataset', 'actions.match_features.run_dataset', (['subset_data'], {}), '(subset_data)\n', (7389, 7402), False, 'from opensfm import actions, pygeometry, pymap\n'), ((7407, 7453), 'opensfm.actions.create_tracks.run_dataset', 'actions.create_tracks.run_dataset', (['subset_data'], {}), '(subset_data)\n', (7440, 7453), False, 'from opensfm import actions, pygeometry, pymap\n'), ((7458, 7502), 'opensfm.actions.reconstruct.run_dataset', 'actions.reconstruct.run_dataset', (['subset_data'], {}), '(subset_data)\n', (7489, 7502), False, 'from opensfm import actions, pygeometry, pymap\n'), ((8502, 8512), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (8510, 8512), True, 'import networkx as nx\n'), ((8534, 8544), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (8542, 8544), True, 'import networkx as nx\n'), ((8565, 8588), 'itertools.combinations', 'combinations', (['images', '(2)'], {}), '(images, 2)\n', (8577, 8588), False, 'from itertools import combinations\n'), ((8922, 8961), 'networkx.connected_components', 'nx.connected_components', (['sequence_graph'], {}), '(sequence_graph)\n', (8945, 8961), True, 'import networkx as nx\n'), ((9255, 9291), 'networkx.connected_components', 'nx.connected_components', (['image_graph'], {}), '(image_graph)\n', (9278, 9291), True, 'import networkx as nx\n'), ((9864, 9923), 'numpy.exp', 'np.exp', (['(-dr / rotation_std ** 2 - dt / translation_std ** 2)'], {}), '(-dr / rotation_std ** 2 - dt / translation_std ** 2)\n', (9870, 9923), True, 'import numpy as np\n'), ((3620, 3631), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3628, 3631), True, 'import numpy as np\n'), ((3658, 3669), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3666, 3669), True, 'import numpy as np\n'), ((5366, 5384), 'opensfm.pygeometry.Pose', 'pygeometry.Pose', (['r'], {}), '(r)\n', (5381, 5384), False, 'from opensfm import actions, pygeometry, pymap\n'), ((625, 651), 're.sub', 're.sub', (['pattern', '""""""', 'image'], {}), "(pattern, '', image)\n", (631, 651), False, 'import re\n'), ((3995, 4027), 'opensfm.pygeometry.Pose', 'pygeometry.Pose', (['rotation_center'], {}), '(rotation_center)\n', (4010, 4027), False, 'from opensfm import actions, pygeometry, pymap\n'), ((5445, 5481), 'opensfm.pymap.RigCamera', 'pymap.RigCamera', (['pose', 'rig_camera_id'], {}), '(pose, rig_camera_id)\n', (5460, 5481), False, 'from opensfm import actions, pygeometry, pymap\n'), ((4777, 4788), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4785, 4788), True, 'import numpy as np\n'), ((4839, 4850), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4847, 4850), True, 'import numpy as np\n')]
|
# coding: utf-8
from __future__ import division
import numpy as np
import pdb
import math
from . import data_generators
import copy
import cv2
import random
import keras
class_num =200
part_map_num = {'head':0,'legs':1,'wings':2,'back':3,'belly':4,'breast':5,'tail':6}
part_map_name = {}
crop_image = lambda img, x0, y0, w, h: img[y0:y0 + h, x0:x0 + w]
net_size=[38,56]
#[head_classifier,legs_classifier,wings_classifier,back_classifier,belly_classifier,breast_classifier,tail_classifier]
#def get_label_from_voc(all_imgs, classes_count, class_mapping,bird_classes_count,bird_class_mapping):
class get_voc_label(object):
def __init__(self,all_imgs, classes_count, class_mapping,bird_classes_count,bird_class_mapping,config,trainable=False):
self.all_imgs = all_imgs
self.classes_count = classes_count
self.class_mapping = class_mapping
self.bird_classes_count = bird_classes_count
self.bird_class_mapping = bird_class_mapping
self.max_batch = len(all_imgs)
self.batch_index = 0
self.epoch = 0
self.part_num = len(classes_count)
self.bird_class_num = len(bird_classes_count)
self.net_size = [38,56]
self.C =config
self.input_img_size_witdth = 300
self.input_img_size_heigth = 300
if self.C.network=='resnet50':
self.get_outputsize =self.get_img_output_length_res50
elif self.C.network=='vgg':
self.get_outputsize = self.get_img_output_length_vgg
else:
raise ValueError('DSFA')
if trainable:
self.trainable = 'trainval'
else:
self.trainable = 'test'
def get_next_batch(self):
img = self.all_imgs[self.batch_index]
while img['imageset']!= self.trainable:
self.batch_index+=1
if self.batch_index>=self.max_batch:
self.batch_index=0
self.epoch+=1
img = self.all_imgs[self.batch_index]
label = self.bird_class_mapping[img['bird_class_name']]
boxlist =[]
size_w = img['width']
size_h = img['height']
for bbox in img['bboxes']:
outbox ={}
outbox['name']=bbox['class']
cor = np.zeros(4)
x1 = bbox['x1']
x2 = bbox['x2']
y1= bbox['y1']
y2 = bbox['y2']
h = y2-y1
w = x2-x1
x1/=size_w
y1/=size_h
h/=size_h
w /= size_w
cor =np.array([x1,y1,w,h])
outbox['cor'] =cor
boxlist.append(outbox)
img_path = img['filepath']
boxdict, labellist ,labelnpout=self.match(boxlist, label)
self.batch_index += 1
if self.batch_index >= self.max_batch:
self.batch_index = 0
self.epoch += 1
return img_path,boxdict,labellist,labelnpout
def next_batch(self,batech_size):
img_input_np= np.zeros([batech_size,self.input_img_size_heigth,self.input_img_size_witdth,3])
netout_width, netout_height = self.get_outputsize(width=self.input_img_size_witdth, height=self.input_img_size_heigth)
part_roi_input = np.zeros([batech_size,self.part_num,4],dtype=np.int16)
labellist =[]
label_res_np = np.zeros([batech_size,200],dtype=np.int16)
for nn in range(self.part_num):
labellist.append(np.zeros([batech_size,self.bird_class_num+1]))
for n_b in range(batech_size):
img = self.all_imgs[self.batch_index]
while img['imageset'] != self.trainable:
self.batch_index += 1
if self.batch_index >= self.max_batch:
self.batch_index = 0
self.epoch += 1
img = self.all_imgs[self.batch_index]
img_path = img['filepath']
#print(img_path)
#img_np = self.read_prepare_img(img_path,img['width'],img['height'],width_to_resize=self.input_img_size_witdth,heigth_to_resize=self.input_img_size_heigth)
img_np,img_ori= self.read_prepare_img_aug(img_path, img['width'], img['height'],
width_to_resize=self.input_img_size_witdth,
heigth_to_resize=self.input_img_size_heigth, annota=img)
img_input_np[n_b,:,:,:]=img_np
#netout_width,netout_height= self.get_outputsize(width=self.input_img_size_witdth,height=self.input_img_size_heigth)
bird_class_label_num = self.bird_class_mapping[img['bird_class_name']]
label_res_np[n_b,:] = keras.utils.to_categorical(bird_class_label_num-1,200)
if 1:
boxlist = []
for i in range(self.part_num):
part_roi_input[n_b,i,:]=np.array([0,0,netout_width-1,netout_height-1],dtype=np.int16)
#boxlist.append(np.array([0,0,netout_width-1,netout_height-1],dtype=np.int16))
#boxnp = np.copy(boxlist)
#boxnp = np.expand_dims(boxnp,axis=0)
"""print boxnp.shape
print self.part_num
print boxnp[0,0,:]
assert boxnp.shape==[1,self.part_num,4]
assert boxnp[0,0,:]==np.array([0,0,netout_width,netout_height],dtype=np.int16)"""
#boxnp = np.zeros([1, self.part_num, 4])
check_dict = {}
nnn = 0
for bbox in img['bboxes']:
nnn+=1
part_index = self.class_mapping[bbox['class']]
if str(part_index) not in check_dict:
check_dict[str(part_index)] = part_index
else:
raise ValueError('sdff')
#print bbox
x1 = bbox['x1']
x2 = bbox['x2']
y1= bbox['y1']
y2 = bbox['y2']
w = x2-x1
h = y2-y1
x1= x1/img['width']*netout_width
w = w/img['width']*netout_width
y1 = y1/img['height']*netout_height
h = h/img['height']*netout_height
if x1<0:
x1=0
if y1<0:
y1=0
part_roi_input[n_b,part_index,:] = [x1,y1,w,h]
labellist[part_index][n_b,bird_class_label_num] = 1
labellist[part_index][n_b,0] = 1
for i in range(7):
if labellist[i][n_b,0] == 1:
if str(i) not in check_dict:
raise ValueError(str(i)+' nnn is :'+str(nnn))
self.batch_index += 1
if self.batch_index >= self.max_batch:
self.batch_index = 0
self.epoch += 1
if self.trainable == 'test':
return img_input_np,part_roi_input,label_res_np,int(img['index'])
return img_input_np,part_roi_input,label_res_np
def match(self,boxlist, label):
# boxlist的内容是一个dict,name为head,legs等,cor为左上角坐标,宽和长,在0-1之间
# label的内同是一个数
labellist = []
boxdict = {}
labelnp = np.zeros([1,class_num + 1])
for i in range(7):
labellist.append(labelnp)
labelnpout = np.zeros([7,class_num+1])
if len(labellist) != 7:
raise ValueError('SDFA')
for box in boxlist:
index = part_map_num[box['name']]
labellist[index][0][0] = 1
labellist[index][0][label+1] = 1
labelnpout[index][0] = 1
labelnpout[index][label+1] =1
x = box['cor'][0]
y = box['cor'][1]
w = box['cor'][2]
h = box['cor'][3]
x *= net_size[1]
w *= net_size[1]
y *= net_size[0]
h *= net_size[0]
cor_np = np.array([x, y, w, h])
cor_np = np.expand_dims(cor_np, axis=0)
boxdict[box['name']] = cor_np
npnone = np.zeros([1,1,4])
# [head_classifier,legs_classifier,wings_classifier,back_classifier,belly_classifier,breast_classifier,tail_classifier]
cname = ['head','legs','wings','back','belly','breast','tail']
for onecname in cname:
if onecname not in boxdict:
boxdict[onecname] = npnone
return boxdict, labellist,labelnpout
def read_prepare_img(self,img_path,width,height,width_to_resize,heigth_to_resize):
img = cv2.imread(img_path)
assert width==img.shape[1]
assert height==img.shape[0]
#resized_width, resized_height=self.get_new_img_size(width,height)
img = cv2.resize(img, (width_to_resize, heigth_to_resize), interpolation=cv2.INTER_CUBIC)
size =[heigth_to_resize, heigth_to_resize]
img = img[:, :, (2, 1, 0)] # BGR -> RGB
img = img.astype(np.float32)
img[:, :, 0] -= self.C.img_channel_mean[0]
img[:, :, 1] -= self.C.img_channel_mean[1]
img[:, :, 2] -= self.C.img_channel_mean[2]
img /= self.C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
img = np.transpose(img, (0, 2, 3, 1))
return img
def read_prepare_img_aug(self,img_path,width,height,width_to_resize,heigth_to_resize,annota):
img_np = cv2.imread(img_path)
if annota['aug']:
if annota['aug_med'] == 'flip_hor':
img_np = cv2.flip(img_np,1)
elif annota['aug_med'] == 'cut':
tscut_pix = annota['cut_pixes']
if annota['cut_type'] == 'both':
img_np = img_np[tscut_pix:-tscut_pix, tscut_pix:-tscut_pix, :]
elif annota['cut_type'] == 'width':
img_np = img_np[:, tscut_pix:-tscut_pix, :]
elif annota['cut_type'] == 'height':
img_np = img_np[tscut_pix:-tscut_pix, :, :]
elif annota['aug_med'] == 'hsv':
hue = annota['hsv_hue']
sat = annota['hsv_sat']
val = annota['hsv_val']
img_hsv = cv2.cvtColor(img_np, cv2.COLOR_BGR2HSV).astype(np.float)
img_hsv[:, :, 0] = (img_hsv[:, :, 0] + hue) % 180
img_hsv[:, :, 1] *= sat
img_hsv[:, :, 2] *= val
img_hsv[img_hsv > 255] = 255
img_np = cv2.cvtColor(np.round(img_hsv).astype(np.uint8), cv2.COLOR_HSV2BGR)
elif annota['aug_med'] =='rot':
angle = annota['rot_angle']
img_np = self.rotate_image(img_np,angle,crop=True)
elif annota['aug_med'] =='gamma':
gamma_aft_exp = annota['gamma_aft_exp']
img_np = self.gamma_transform(img_np,gamma_aft_exp)
img_ori =np.copy(img_np)# 展示
#assert width==img_np.shape[1]
#assert height==img_np.shape[0]
#resized_width, resized_height=self.get_new_img_size(width,height)
img_np = cv2.resize(img_np, (width_to_resize, heigth_to_resize), interpolation=cv2.INTER_CUBIC)
size =[heigth_to_resize, heigth_to_resize]
img_np = img_np[:, :, (2, 1, 0)] # BGR -> RGB
img_np = img_np.astype(np.float32)
img_np[:, :, 0] -= self.C.img_channel_mean[0]
img_np[:, :, 1] -= self.C.img_channel_mean[1]
img_np[:, :, 2] -= self.C.img_channel_mean[2]
img_np /= self.C.img_scaling_factor
img_np = np.transpose(img_np, (2, 0, 1))
img_np = np.expand_dims(img_np, axis=0)
img_np = np.transpose(img_np, (0, 2, 3, 1))
return img_np, img_ori #展示
'''def hsv_transform(self,img, hue_delta, sat_mult, val_mult):
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.float)
img_hsv[:, :, 0] = (img_hsv[:, :, 0] + hue_delta) % 180
img_hsv[:, :, 1] *= sat_mult
img_hsv[:, :, 2] *= val_mult
img_hsv[img_hsv > 255] = 255
return cv2.cvtColor(np.round(img_hsv).astype(np.uint8), cv2.COLOR_HSV2BGR)'''
def get_new_img_size(self,width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = img_min_side
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = img_min_side
return resized_width, resized_height
def get_img_output_length_res50(self,width, height):
def get_output_length(input_length):
# zero_pad
input_length += 6
# apply 4 strided convolutions
filter_sizes = [7, 3, 1, 1]
stride = 2
for filter_size in filter_sizes:
input_length = (input_length - filter_size + stride) // stride
return input_length
return get_output_length(width), get_output_length(height)
def get_img_output_length_vgg(self,width, height):
def get_output_length(input_length):
return input_length // 16
return get_output_length(width), get_output_length(height)
def shuffle_allimgs(self):
random.shuffle(self.all_imgs)
def rotate_image(self,img, angle, crop=True):
h, w = img.shape[:2]
# 旋转角度的周期是360°
angle %= 360
# 用OpenCV内置函数计算仿射矩阵
M_rotate = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1)
# 得到旋转后的图像
img_rotated = cv2.warpAffine(img, M_rotate, (w, h))
# 如果需要裁剪去除黑边
if crop:
# 对于裁剪角度的等效周期是180°
angle_crop = angle % 180
# 并且关于90°对称
if angle_crop > 90:
angle_crop = 180 - angle_crop
# 转化角度为弧度
theta = angle_crop * np.pi / 180.0
# 计算高宽比
hw_ratio = float(h) / float(w)
# 计算裁剪边长系数的分子项
tan_theta = np.tan(theta)
numerator = np.cos(theta) + np.sin(theta) * tan_theta
# 计算分母项中和宽高比相关的项
r = hw_ratio if h > w else 1 / hw_ratio
# 计算分母项
denominator = r * tan_theta + 1
# 计算最终的边长系数
crop_mult = numerator / denominator
# 得到裁剪区域
w_crop = int(round(crop_mult * w))
h_crop = int(round(crop_mult * h))
x0 = int((w - w_crop) / 2)
y0 = int((h - h_crop) / 2)
img_rotated = crop_image(img_rotated, x0, y0, w_crop, h_crop)
return img_rotated
#boxlist的内容是一个dict,name为head,legs等,cor为左上角坐标,宽和长,在0-1之间
def gamma_transform(self,img, gamma):
gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
return cv2.LUT(img, gamma_table)
#label的内同是一个数
def match(boxlist,label):
labellist= []
boxdict ={}
labelnp = np.zeros(class_num + 1)
for i in range(7):
labellist.append(labelnp)
if len(labellist)!=7:
raise ValueError('SDFA')
for box in boxlist:
index=part_map_num[box['name']]
labellist[index][0]=1
labellist[index][label]=1
x = box['cor'][0]
y = box['cor'][1]
w = box['cor'][2]
h = box['cor'][3]
x *= net_size[1]
w *=net_size[1]
y *= net_size[0]
h *=net_size[0]
cor_np = np.array([x,y,w,h])
cor_np =np.expand_dims(cor_np, axis=0)
cor_np = np.expand_dims(cor_np, axis=0)
boxdict[box['name']] = cor_np
return boxdict,labellist
|
[
"keras.utils.to_categorical",
"numpy.array",
"numpy.sin",
"cv2.LUT",
"numpy.round",
"cv2.warpAffine",
"random.shuffle",
"numpy.cos",
"cv2.cvtColor",
"cv2.getRotationMatrix2D",
"cv2.resize",
"numpy.transpose",
"cv2.imread",
"numpy.copy",
"numpy.tan",
"cv2.flip",
"numpy.power",
"numpy.zeros",
"numpy.expand_dims"
] |
[((14927, 14950), 'numpy.zeros', 'np.zeros', (['(class_num + 1)'], {}), '(class_num + 1)\n', (14935, 14950), True, 'import numpy as np\n'), ((2967, 3054), 'numpy.zeros', 'np.zeros', (['[batech_size, self.input_img_size_heigth, self.input_img_size_witdth, 3]'], {}), '([batech_size, self.input_img_size_heigth, self.\n input_img_size_witdth, 3])\n', (2975, 3054), True, 'import numpy as np\n'), ((3199, 3256), 'numpy.zeros', 'np.zeros', (['[batech_size, self.part_num, 4]'], {'dtype': 'np.int16'}), '([batech_size, self.part_num, 4], dtype=np.int16)\n', (3207, 3256), True, 'import numpy as np\n'), ((3299, 3343), 'numpy.zeros', 'np.zeros', (['[batech_size, 200]'], {'dtype': 'np.int16'}), '([batech_size, 200], dtype=np.int16)\n', (3307, 3343), True, 'import numpy as np\n'), ((7186, 7214), 'numpy.zeros', 'np.zeros', (['[1, class_num + 1]'], {}), '([1, class_num + 1])\n', (7194, 7214), True, 'import numpy as np\n'), ((7300, 7328), 'numpy.zeros', 'np.zeros', (['[7, class_num + 1]'], {}), '([7, class_num + 1])\n', (7308, 7328), True, 'import numpy as np\n'), ((8026, 8045), 'numpy.zeros', 'np.zeros', (['[1, 1, 4]'], {}), '([1, 1, 4])\n', (8034, 8045), True, 'import numpy as np\n'), ((8504, 8524), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (8514, 8524), False, 'import cv2\n'), ((8685, 8773), 'cv2.resize', 'cv2.resize', (['img', '(width_to_resize, heigth_to_resize)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (width_to_resize, heigth_to_resize), interpolation=cv2.\n INTER_CUBIC)\n', (8695, 8773), False, 'import cv2\n'), ((9115, 9143), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (9127, 9143), True, 'import numpy as np\n'), ((9158, 9185), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (9172, 9185), True, 'import numpy as np\n'), ((9200, 9231), 'numpy.transpose', 'np.transpose', (['img', '(0, 2, 3, 1)'], {}), '(img, (0, 2, 3, 1))\n', (9212, 9231), True, 'import numpy as np\n'), ((9367, 9387), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (9377, 9387), False, 'import cv2\n'), ((10839, 10854), 'numpy.copy', 'np.copy', (['img_np'], {}), '(img_np)\n', (10846, 10854), True, 'import numpy as np\n'), ((11030, 11121), 'cv2.resize', 'cv2.resize', (['img_np', '(width_to_resize, heigth_to_resize)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img_np, (width_to_resize, heigth_to_resize), interpolation=cv2.\n INTER_CUBIC)\n', (11040, 11121), False, 'import cv2\n'), ((11490, 11521), 'numpy.transpose', 'np.transpose', (['img_np', '(2, 0, 1)'], {}), '(img_np, (2, 0, 1))\n', (11502, 11521), True, 'import numpy as np\n'), ((11539, 11569), 'numpy.expand_dims', 'np.expand_dims', (['img_np'], {'axis': '(0)'}), '(img_np, axis=0)\n', (11553, 11569), True, 'import numpy as np\n'), ((11587, 11621), 'numpy.transpose', 'np.transpose', (['img_np', '(0, 2, 3, 1)'], {}), '(img_np, (0, 2, 3, 1))\n', (11599, 11621), True, 'import numpy as np\n'), ((13205, 13234), 'random.shuffle', 'random.shuffle', (['self.all_imgs'], {}), '(self.all_imgs)\n', (13219, 13234), False, 'import random\n'), ((13408, 13457), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(w / 2, h / 2)', 'angle', '(1)'], {}), '((w / 2, h / 2), angle, 1)\n', (13431, 13457), False, 'import cv2\n'), ((13500, 13537), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M_rotate', '(w, h)'], {}), '(img, M_rotate, (w, h))\n', (13514, 13537), False, 'import cv2\n'), ((14810, 14835), 'cv2.LUT', 'cv2.LUT', (['img', 'gamma_table'], {}), '(img, gamma_table)\n', (14817, 14835), False, 'import cv2\n'), ((15416, 15438), 'numpy.array', 'np.array', (['[x, y, w, h]'], {}), '([x, y, w, h])\n', (15424, 15438), True, 'import numpy as np\n'), ((15452, 15482), 'numpy.expand_dims', 'np.expand_dims', (['cor_np'], {'axis': '(0)'}), '(cor_np, axis=0)\n', (15466, 15482), True, 'import numpy as np\n'), ((15500, 15530), 'numpy.expand_dims', 'np.expand_dims', (['cor_np'], {'axis': '(0)'}), '(cor_np, axis=0)\n', (15514, 15530), True, 'import numpy as np\n'), ((2251, 2262), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2259, 2262), True, 'import numpy as np\n'), ((2527, 2551), 'numpy.array', 'np.array', (['[x1, y1, w, h]'], {}), '([x1, y1, w, h])\n', (2535, 2551), True, 'import numpy as np\n'), ((4656, 4713), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['(bird_class_label_num - 1)', '(200)'], {}), '(bird_class_label_num - 1, 200)\n', (4682, 4713), False, 'import keras\n'), ((7891, 7913), 'numpy.array', 'np.array', (['[x, y, w, h]'], {}), '([x, y, w, h])\n', (7899, 7913), True, 'import numpy as np\n'), ((7935, 7965), 'numpy.expand_dims', 'np.expand_dims', (['cor_np'], {'axis': '(0)'}), '(cor_np, axis=0)\n', (7949, 7965), True, 'import numpy as np\n'), ((13934, 13947), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (13940, 13947), True, 'import numpy as np\n'), ((3411, 3459), 'numpy.zeros', 'np.zeros', (['[batech_size, self.bird_class_num + 1]'], {}), '([batech_size, self.bird_class_num + 1])\n', (3419, 3459), True, 'import numpy as np\n'), ((9487, 9506), 'cv2.flip', 'cv2.flip', (['img_np', '(1)'], {}), '(img_np, 1)\n', (9495, 9506), False, 'import cv2\n'), ((13972, 13985), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (13978, 13985), True, 'import numpy as np\n'), ((14668, 14694), 'numpy.power', 'np.power', (['(x / 255.0)', 'gamma'], {}), '(x / 255.0, gamma)\n', (14676, 14694), True, 'import numpy as np\n'), ((4849, 4918), 'numpy.array', 'np.array', (['[0, 0, netout_width - 1, netout_height - 1]'], {'dtype': 'np.int16'}), '([0, 0, netout_width - 1, netout_height - 1], dtype=np.int16)\n', (4857, 4918), True, 'import numpy as np\n'), ((13988, 14001), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (13994, 14001), True, 'import numpy as np\n'), ((14755, 14776), 'numpy.array', 'np.array', (['gamma_table'], {}), '(gamma_table)\n', (14763, 14776), True, 'import numpy as np\n'), ((10155, 10194), 'cv2.cvtColor', 'cv2.cvtColor', (['img_np', 'cv2.COLOR_BGR2HSV'], {}), '(img_np, cv2.COLOR_BGR2HSV)\n', (10167, 10194), False, 'import cv2\n'), ((10441, 10458), 'numpy.round', 'np.round', (['img_hsv'], {}), '(img_hsv)\n', (10449, 10458), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import pathlib
import random
import numpy as np
import torch
import uuid
import activemri.envs.loupe_envs as loupe_envs
from activemri.baselines.non_rl import NonRLTrainer, NonRLTester
import matplotlib
matplotlib.use('Agg')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='MRI Reconstruction Example')
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--num-step', type=int, default=2, help='Number of LSTM iterations')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=64, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
parser.add_argument('--noise-type', type=str, default='none', help='Type of additive noise to measurements')
parser.add_argument('--noise-level', type=float, default=0, help='Noise level')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--report-interval', type=int, default=100, help='Period of loss reporting')
parser.add_argument('--data-parallel', action='store_true',
help='If set, use multiple GPUs using data parallelism')
parser.add_argument('--device', type=str, default='cuda',
help='Which device to train on. Set to "cuda" to use the GPU')
parser.add_argument('--exp-dir', type=pathlib.Path, required=True,
help='Path where model and results should be saved')
parser.add_argument('--checkpoint1', type=str,
help='Path to an existing checkpoint. Used along with "--resume"')
parser.add_argument('--entropy_weight', type=float, default=0.0,
help='weight for the entropy/diversity loss')
parser.add_argument('--recon_weight', type=float, default=1.0,
help='weight for the reconsturction loss')
parser.add_argument('--sparsity_weight', type=float, default=0.0,
help='weight for the sparsity loss')
parser.add_argument('--save-model', type=bool, default=False, help='save model every iteration or not')
parser.add_argument('--seed', default=42, type=int, help='Seed for random number generators')
parser.add_argument('--resolution', default=[128, 128], nargs='+', type=int, help='Resolution of images')
parser.add_argument('--dataset-name', type=str, choices=['fashion-mnist', 'dicom-knee', 'real-knee', 'brain'],
required=True, help='name of the dataset')
parser.add_argument('--sample-rate', type=float, default=1.,
help='Fraction of total volumes to include')
# Mask parameters
parser.add_argument('--accelerations', nargs='+', default=[4], type=float,
help='Ratio of k-space columns to be sampled. If multiple values are '
'provided, then one of those is chosen uniformly at random for '
'each volume.')
parser.add_argument('--label_range', nargs='+', type=int, help='train using images of specific class')
parser.add_argument('--model', type=str, help='name of the model to run', required=True)
parser.add_argument('--input_chans', type=int, choices=[1, 2], required=True, help='number of input channels. One for real image, 2 for compelx image')
parser.add_argument('--output_chans', type=int, default=1, help='number of output channels. One for real image')
parser.add_argument('--line-constrained', type=int, default=0)
parser.add_argument('--unet', action='store_true')
parser.add_argument('--conjugate_mask', action='store_true', help='force loupe model to use conjugate symmetry.')
parser.add_argument('--bi-dir', type=int, default=0)
parser.add_argument('--loss_type', type=str, choices=['l1', 'ssim', 'psnr'], default='l1')
parser.add_argument('--test_visual_frequency', type=int, default=1000)
parser.add_argument('--test', action='store_true')
parser.add_argument('--preselect', type=int, default=0)
parser.add_argument('--preselect_num', type=int, default=2)
parser.add_argument('--random_rotate', type=int, default=0)
parser.add_argument('--random_baseline', type=int, default=0)
parser.add_argument('--poisson', type=int, default=0)
parser.add_argument('--spectrum', type=int, default=0)
parser.add_argument("--equispaced", type=int, default=0)
args = parser.parse_args()
args.equispaced = args.equispaced > 0
args.spectrum = args.spectrum > 0
args.poisson = args.poisson > 0
args.random = args.random_baseline > 0
args.random_rotate = args.random_rotate > 0
args.kspace_weight = 0
args.line_constrained = args.line_constrained > 0
if args.checkpoint1 is not None:
args.resume = True
else:
args.resume = False
noise_str = ''
if args.noise_type is 'none':
noise_str = '_no_noise_'
else:
noise_str = '_' + args.noise_type + str(args.noise_level) + '_'
if args.preselect > 0:
args.preselect = True
else:
args.preselect = False
if args.bi_dir > 0 :
args.bi_dir = True
else:
args.bi_dir = False
if str(args.exp_dir) is 'auto':
args.exp_dir =('checkpoints/'+args.dataset_name + '_' + str(float(args.accelerations[0])) +
'x_' + args.model + '_bi_dir_{}'.format(args.bi_dir)+ '_preselect_{}'.format(args.preselect) +
noise_str + 'lr=' + str(args.lr) + '_bs=' + str(args.batch_size) + '_loss_type='+args.loss_type +
'_epochs=' + str(args.num_epochs))
args.exp_dir = pathlib.Path(args.exp_dir+'_uuid_'+uuid.uuid4().hex.upper()[0:6])
print('save logs to {}'.format(args.exp_dir))
args.visualization_dir = args.exp_dir / 'visualizations'
if args.test:
args.batch_size = 1
if args.dataset_name == 'real-knee':
args.data_path = 'datasets/knee'
# args.resolution = [128, 128]
env = loupe_envs.LOUPERealKspaceEnv(args)
elif args.dataset_name == 'brain':
args.data_path = 'datasets/brain'
env = loupe_envs.LoupeBrainEnv(args)
else:
raise NotImplementedError
# set random seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.test:
policy = NonRLTester(env, args.exp_dir, args, None)
else:
policy = NonRLTrainer(args, env, torch.device(args.device))
policy()
|
[
"torch.manual_seed",
"activemri.baselines.non_rl.NonRLTester",
"argparse.ArgumentParser",
"matplotlib.use",
"activemri.envs.loupe_envs.LoupeBrainEnv",
"random.seed",
"uuid.uuid4",
"numpy.random.seed",
"activemri.envs.loupe_envs.LOUPERealKspaceEnv",
"torch.device"
] |
[((396, 417), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (410, 417), False, 'import matplotlib\n'), ((460, 525), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MRI Reconstruction Example"""'}), "(description='MRI Reconstruction Example')\n", (483, 525), False, 'import argparse\n'), ((7052, 7074), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (7063, 7074), False, 'import random\n'), ((7079, 7104), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (7093, 7104), True, 'import numpy as np\n'), ((7109, 7137), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (7126, 7137), False, 'import torch\n'), ((6818, 6853), 'activemri.envs.loupe_envs.LOUPERealKspaceEnv', 'loupe_envs.LOUPERealKspaceEnv', (['args'], {}), '(args)\n', (6847, 6853), True, 'import activemri.envs.loupe_envs as loupe_envs\n'), ((7174, 7216), 'activemri.baselines.non_rl.NonRLTester', 'NonRLTester', (['env', 'args.exp_dir', 'args', 'None'], {}), '(env, args.exp_dir, args, None)\n', (7185, 7216), False, 'from activemri.baselines.non_rl import NonRLTrainer, NonRLTester\n'), ((6949, 6979), 'activemri.envs.loupe_envs.LoupeBrainEnv', 'loupe_envs.LoupeBrainEnv', (['args'], {}), '(args)\n', (6973, 6979), True, 'import activemri.envs.loupe_envs as loupe_envs\n'), ((7268, 7293), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (7280, 7293), False, 'import torch\n'), ((6490, 6502), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6500, 6502), False, 'import uuid\n')]
|
from __future__ import print_function
# Copyright (c) 2013, <NAME>
# All rights reserved.
"""
Example timeseries reduction
"""
from collections import OrderedDict
import os
import glob
import time
import numpy as np
from pyvttbl import DataFrame
from undaqTools import Daq
# dependent variables and indices that we want to analyze
dvs = [('CFS_Accelerator_Pedal_Position', 0),
('CFS_Brake_Pedal_Force', 0),
('CFS_Steering_Wheel_Angle', 0),
('SCC_Lane_Deviation', 1),
('VDS_Veh_Speed', 0)]
if __name__ == '__main__':
# data is on a local SSD drive. This is very important for performance.
data_dir = 'C:\\LocalData\\Left Lane\\'
# change the directory of the kernel
print("Changing wd to '%s'"%data_dir)
os.chdir(data_dir)
# pyvttbl is in pypi
# container to hold the collaborated results
df = DataFrame()
print('\nCollaborating timeseries measures...')
t0 = time.time()
hd5_files = tuple(glob.glob('*/*.hdf5'))
for hd5_file in hd5_files:
print(" analyzing '%s'..."%hd5_file)
# load hd5
daq = Daq()
daq.read_hd5(hd5_file)
# daq.etc was configured in Example02_*
for (epoch, fslice) in daq.etc['epochs'].items():
# figure out pid and independent variable conditions
pid = daq.etc['pid']
trial = epoch / 10
scenario = daq.etc['scen_order'][trial]
section = epoch % 10
# pack pid and IV conditions into OrderedDict
row = OrderedDict([('pid', pid),
('trial', trial),
('scenario', scenario),
('section', section)])
for (dv, indx) in dvs:
vec = daq[dv][indx,fslice].flatten()
row['%s_mean'%dv] = np.mean(vec)
row['%s_min'%dv] = np.min(vec)
row['%s_max'%dv] = np.max(vec)
row['%s_range'%dv] = row['%s_max'%dv] - row['%s_min'%dv]
row['%s_amean'%dv] = np.mean(np.abs(vec))
row['%s_sd'%dv] = np.std(vec)
row['%s_rms'%dv] = np.linalg.norm(vec)/np.sqrt(len(vec))
# insert the row into the dataframe
df.insert(row)
df.write(fname='collaborated_ts.csv')
print('\nDone.\n\ncollaborating timeseries measureas took %.1f s'%(time.time()-t0))
|
[
"numpy.mean",
"collections.OrderedDict",
"numpy.abs",
"numpy.std",
"numpy.linalg.norm",
"pyvttbl.DataFrame",
"numpy.max",
"os.chdir",
"numpy.min",
"undaqTools.Daq",
"time.time",
"glob.glob"
] |
[((775, 793), 'os.chdir', 'os.chdir', (['data_dir'], {}), '(data_dir)\n', (783, 793), False, 'import os\n'), ((878, 889), 'pyvttbl.DataFrame', 'DataFrame', ([], {}), '()\n', (887, 889), False, 'from pyvttbl import DataFrame\n'), ((956, 967), 'time.time', 'time.time', ([], {}), '()\n', (965, 967), False, 'import time\n'), ((990, 1011), 'glob.glob', 'glob.glob', (['"""*/*.hdf5"""'], {}), "('*/*.hdf5')\n", (999, 1011), False, 'import glob\n'), ((1133, 1138), 'undaqTools.Daq', 'Daq', ([], {}), '()\n', (1136, 1138), False, 'from undaqTools import Daq\n'), ((1582, 1678), 'collections.OrderedDict', 'OrderedDict', (["[('pid', pid), ('trial', trial), ('scenario', scenario), ('section', section)]"], {}), "([('pid', pid), ('trial', trial), ('scenario', scenario), (\n 'section', section)])\n", (1593, 1678), False, 'from collections import OrderedDict\n'), ((1905, 1917), 'numpy.mean', 'np.mean', (['vec'], {}), '(vec)\n', (1912, 1917), True, 'import numpy as np\n'), ((1953, 1964), 'numpy.min', 'np.min', (['vec'], {}), '(vec)\n', (1959, 1964), True, 'import numpy as np\n'), ((2000, 2011), 'numpy.max', 'np.max', (['vec'], {}), '(vec)\n', (2006, 2011), True, 'import numpy as np\n'), ((2177, 2188), 'numpy.std', 'np.std', (['vec'], {}), '(vec)\n', (2183, 2188), True, 'import numpy as np\n'), ((2457, 2468), 'time.time', 'time.time', ([], {}), '()\n', (2466, 2468), False, 'import time\n'), ((2130, 2141), 'numpy.abs', 'np.abs', (['vec'], {}), '(vec)\n', (2136, 2141), True, 'import numpy as np\n'), ((2224, 2243), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (2238, 2243), True, 'import numpy as np\n')]
|
import numpy as np
from skimage.measure import label
import skimage.measure._ccomp as ccomp
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal
BG = 0 # background value
class TestConnectedComponents:
def setup(self):
self.x = np.array([
[0, 0, 3, 2, 1, 9],
[0, 1, 1, 9, 2, 9],
[0, 0, 1, 9, 9, 9],
[3, 1, 1, 5, 3, 0]])
self.labels = np.array([
[0, 0, 1, 2, 3, 4],
[0, 5, 5, 4, 2, 4],
[0, 0, 5, 4, 4, 4],
[6, 5, 5, 7, 8, 0]])
# No background - there is no label 0, instead, labelling starts with 1
# and all labels are incremented by 1.
self.labels_nobg = self.labels + 1
# The 0 at lower right corner is isolated, so it should get a new label
self.labels_nobg[-1, -1] = 10
# We say that background value is 9 (and bg label is 0)
self.labels_bg_9 = self.labels_nobg.copy()
self.labels_bg_9[self.x == 9] = 0
# Then, where there was the label 5, we now expect 4 etc.
# (we assume that the label of value 9 would normally be 5)
self.labels_bg_9[self.labels_bg_9 > 5] -= 1
def test_basic(self):
assert_array_equal(label(self.x), self.labels)
# Make sure data wasn't modified
assert self.x[0, 2] == 3
# Check that everything works if there is no background
assert_array_equal(label(self.x, background=99), self.labels_nobg)
# Check that everything works if background value != 0
assert_array_equal(label(self.x, background=9), self.labels_bg_9)
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.array([[0, 1],
[1, 0]], dtype=int)
assert_array_equal(label(x, connectivity=1),
[[0, 1],
[2, 0]])
assert_array_equal(label(x, connectivity=2),
[[0, 1],
[1, 0]])
def test_background(self):
x = np.array([[1, 0, 0],
[1, 1, 5],
[0, 0, 0]])
assert_array_equal(label(x), [[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
assert_array_equal(label(x, background=0),
[[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
def test_background_two_regions(self):
x = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
res = label(x, background=0)
assert_array_equal(res,
[[0, 0, 1],
[0, 0, 1],
[2, 2, 2]])
def test_background_one_region_center(self):
x = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
assert_array_equal(label(x, connectivity=1, background=0),
[[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
class TestConnectedComponents3d:
def setup(self):
self.x = np.zeros((3, 4, 5), int)
self.x[0] = np.array([[0, 3, 2, 1, 9],
[0, 1, 9, 2, 9],
[0, 1, 9, 9, 9],
[3, 1, 5, 3, 0]])
self.x[1] = np.array([[3, 3, 2, 1, 9],
[0, 3, 9, 2, 1],
[0, 3, 3, 1, 1],
[3, 1, 3, 3, 0]])
self.x[2] = np.array([[3, 3, 8, 8, 0],
[2, 3, 9, 8, 8],
[2, 3, 0, 8, 0],
[2, 1, 0, 0, 0]])
self.labels = np.zeros((3, 4, 5), int)
self.labels[0] = np.array([[0, 1, 2, 3, 4],
[0, 5, 4, 2, 4],
[0, 5, 4, 4, 4],
[1, 5, 6, 1, 0]])
self.labels[1] = np.array([[1, 1, 2, 3, 4],
[0, 1, 4, 2, 3],
[0, 1, 1, 3, 3],
[1, 5, 1, 1, 0]])
self.labels[2] = np.array([[1, 1, 7, 7, 0],
[8, 1, 4, 7, 7],
[8, 1, 0, 7, 0],
[8, 5, 0, 0, 0]])
def test_basic(self):
labels = label(self.x)
assert_array_equal(labels, self.labels)
assert self.x[0, 0, 2] == 2, \
"Data was modified!"
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.zeros((3, 3, 3), int)
x[0, 2, 2] = 1
x[1, 1, 1] = 1
x[2, 0, 0] = 1
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label4 = x.copy()
label4[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label4)
assert_array_equal(label(x, connectivity=3), x)
def test_connectivity_1_vs_2(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label1 = x.copy()
label1[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label1)
assert_array_equal(label(x, connectivity=3), x)
def test_background(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[1, 0, 0],
[1, 0, 0],
[0, 0, 0]])
x[1] = np.array([[0, 0, 0],
[0, 1, 5],
[0, 0, 0]])
lnb = x.copy()
lnb[0] = np.array([[1, 2, 2],
[1, 2, 2],
[2, 2, 2]])
lnb[1] = np.array([[2, 2, 2],
[2, 1, 3],
[2, 2, 2]])
lb = x.copy()
lb[0] = np.array([[1, BG, BG],
[1, BG, BG],
[BG, BG, BG]])
lb[1] = np.array([[BG, BG, BG],
[BG, 1, 2],
[BG, BG, BG]])
assert_array_equal(label(x), lb)
assert_array_equal(label(x, background=-1), lnb)
def test_background_two_regions(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
x[1] = np.array([[6, 6, 0],
[5, 0, 0],
[0, 0, 0]])
lb = x.copy()
lb[0] = np.array([[BG, BG, 1],
[BG, BG, 1],
[2, 2, 2]])
lb[1] = np.array([[1, 1, BG],
[2, BG, BG],
[BG, BG, BG]])
res = label(x, background=0)
assert_array_equal(res, lb)
def test_background_one_region_center(self):
x = np.zeros((3, 3, 3), int)
x[1, 1, 1] = 1
lb = np.ones_like(x) * BG
lb[1, 1, 1] = 1
assert_array_equal(label(x, connectivity=1, background=0), lb)
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
def test_1D(self):
x = np.array((0, 1, 2, 2, 1, 1, 0, 0))
xlen = len(x)
y = np.array((0, 1, 2, 2, 3, 3, 0, 0))
reshapes = ((xlen,),
(1, xlen), (xlen, 1),
(1, xlen, 1), (xlen, 1, 1), (1, 1, xlen))
for reshape in reshapes:
x2 = x.reshape(reshape)
labelled = label(x2)
assert_array_equal(y, labelled.flatten())
def test_nd(self):
x = np.ones((1, 2, 3, 4))
with testing.raises(NotImplementedError):
label(x)
class TestSupport:
def test_reshape(self):
shapes_in = ((3, 1, 2), (1, 4, 5), (3, 1, 1), (2, 1), (1,))
for shape in shapes_in:
shape = np.array(shape)
numones = sum(shape == 1)
inp = np.random.random(shape)
fixed, swaps = ccomp.reshape_array(inp)
shape2 = fixed.shape
# now check that all ones are at the beginning
for i in range(numones):
assert shape2[i] == 1
back = ccomp.undo_reshape_array(fixed, swaps)
# check that the undo works as expected
assert_array_equal(inp, back)
|
[
"numpy.ones_like",
"skimage._shared.testing.assert_array_equal",
"skimage.measure._ccomp.undo_reshape_array",
"numpy.ones",
"numpy.random.rand",
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"skimage.measure._ccomp.reshape_array",
"skimage._shared.testing.raises",
"numpy.all",
"skimage.measure.label"
] |
[((284, 378), 'numpy.array', 'np.array', (['[[0, 0, 3, 2, 1, 9], [0, 1, 1, 9, 2, 9], [0, 0, 1, 9, 9, 9], [3, 1, 1, 5, 3, 0]\n ]'], {}), '([[0, 0, 3, 2, 1, 9], [0, 1, 1, 9, 2, 9], [0, 0, 1, 9, 9, 9], [3, 1,\n 1, 5, 3, 0]])\n', (292, 378), True, 'import numpy as np\n'), ((447, 541), 'numpy.array', 'np.array', (['[[0, 0, 1, 2, 3, 4], [0, 5, 5, 4, 2, 4], [0, 0, 5, 4, 4, 4], [6, 5, 5, 7, 8, 0]\n ]'], {}), '([[0, 0, 1, 2, 3, 4], [0, 5, 5, 4, 2, 4], [0, 0, 5, 4, 4, 4], [6, 5,\n 5, 7, 8, 0]])\n', (455, 541), True, 'import numpy as np\n'), ((1752, 1760), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (1757, 1760), False, 'from skimage.measure import label\n'), ((1935, 1978), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 0], [1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [1, 0, 0]])\n', (1943, 1978), True, 'import numpy as np\n'), ((2103, 2140), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {'dtype': 'int'}), '([[0, 1], [1, 0]], dtype=int)\n', (2111, 2140), True, 'import numpy as np\n'), ((2460, 2503), 'numpy.array', 'np.array', (['[[1, 0, 0], [1, 1, 5], [0, 0, 0]]'], {}), '([[1, 0, 0], [1, 1, 5], [0, 0, 0]])\n', (2468, 2503), True, 'import numpy as np\n'), ((2923, 2966), 'numpy.array', 'np.array', (['[[0, 0, 6], [0, 0, 6], [5, 5, 5]]'], {}), '([[0, 0, 6], [0, 0, 6], [5, 5, 5]])\n', (2931, 2966), True, 'import numpy as np\n'), ((3026, 3048), 'skimage.measure.label', 'label', (['x'], {'background': '(0)'}), '(x, background=0)\n', (3031, 3048), False, 'from skimage.measure import label\n'), ((3057, 3115), 'skimage._shared.testing.assert_array_equal', 'assert_array_equal', (['res', '[[0, 0, 1], [0, 0, 1], [2, 2, 2]]'], {}), '(res, [[0, 0, 1], [0, 0, 1], [2, 2, 2]])\n', (3075, 3115), False, 'from skimage._shared.testing import assert_array_equal\n'), ((3261, 3304), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n', (3269, 3304), True, 'import numpy as np\n'), ((3580, 3623), 'numpy.array', 'np.array', (['[[1, 0, 6], [0, 0, 6], [5, 5, 5]]'], {}), '([[1, 0, 6], [0, 0, 6], [5, 5, 5]])\n', (3588, 3623), True, 'import numpy as np\n'), ((3878, 3902), 'numpy.zeros', 'np.zeros', (['(3, 4, 5)', 'int'], {}), '((3, 4, 5), int)\n', (3886, 3902), True, 'import numpy as np\n'), ((3923, 4001), 'numpy.array', 'np.array', (['[[0, 3, 2, 1, 9], [0, 1, 9, 2, 9], [0, 1, 9, 9, 9], [3, 1, 5, 3, 0]]'], {}), '([[0, 3, 2, 1, 9], [0, 1, 9, 2, 9], [0, 1, 9, 9, 9], [3, 1, 5, 3, 0]])\n', (3931, 4001), True, 'import numpy as np\n'), ((4113, 4191), 'numpy.array', 'np.array', (['[[3, 3, 2, 1, 9], [0, 3, 9, 2, 1], [0, 3, 3, 1, 1], [3, 1, 3, 3, 0]]'], {}), '([[3, 3, 2, 1, 9], [0, 3, 9, 2, 1], [0, 3, 3, 1, 1], [3, 1, 3, 3, 0]])\n', (4121, 4191), True, 'import numpy as np\n'), ((4303, 4381), 'numpy.array', 'np.array', (['[[3, 3, 8, 8, 0], [2, 3, 9, 8, 8], [2, 3, 0, 8, 0], [2, 1, 0, 0, 0]]'], {}), '([[3, 3, 8, 8, 0], [2, 3, 9, 8, 8], [2, 3, 0, 8, 0], [2, 1, 0, 0, 0]])\n', (4311, 4381), True, 'import numpy as np\n'), ((4495, 4519), 'numpy.zeros', 'np.zeros', (['(3, 4, 5)', 'int'], {}), '((3, 4, 5), int)\n', (4503, 4519), True, 'import numpy as np\n'), ((4546, 4624), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4], [0, 5, 4, 2, 4], [0, 5, 4, 4, 4], [1, 5, 6, 1, 0]]'], {}), '([[0, 1, 2, 3, 4], [0, 5, 4, 2, 4], [0, 5, 4, 4, 4], [1, 5, 6, 1, 0]])\n', (4554, 4624), True, 'import numpy as np\n'), ((4756, 4834), 'numpy.array', 'np.array', (['[[1, 1, 2, 3, 4], [0, 1, 4, 2, 3], [0, 1, 1, 3, 3], [1, 5, 1, 1, 0]]'], {}), '([[1, 1, 2, 3, 4], [0, 1, 4, 2, 3], [0, 1, 1, 3, 3], [1, 5, 1, 1, 0]])\n', (4764, 4834), True, 'import numpy as np\n'), ((4966, 5044), 'numpy.array', 'np.array', (['[[1, 1, 7, 7, 0], [8, 1, 4, 7, 7], [8, 1, 0, 7, 0], [8, 5, 0, 0, 0]]'], {}), '([[1, 1, 7, 7, 0], [8, 1, 4, 7, 7], [8, 1, 0, 7, 0], [8, 5, 0, 0, 0]])\n', (4974, 5044), True, 'import numpy as np\n'), ((5194, 5207), 'skimage.measure.label', 'label', (['self.x'], {}), '(self.x)\n', (5199, 5207), False, 'from skimage.measure import label\n'), ((5216, 5255), 'skimage._shared.testing.assert_array_equal', 'assert_array_equal', (['labels', 'self.labels'], {}), '(labels, self.labels)\n', (5234, 5255), False, 'from skimage._shared.testing import assert_array_equal\n'), ((5427, 5435), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (5432, 5435), False, 'from skimage.measure import label\n'), ((5610, 5634), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)', 'int'], {}), '((3, 3, 3), int)\n', (5618, 5634), True, 'import numpy as np\n'), ((5784, 5808), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)', 'int'], {}), '((2, 2, 2), int)\n', (5792, 5808), True, 'import numpy as np\n'), ((6079, 6103), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)', 'int'], {}), '((2, 2, 2), int)\n', (6087, 6103), True, 'import numpy as np\n'), ((6365, 6389), 'numpy.zeros', 'np.zeros', (['(2, 3, 3)', 'int'], {}), '((2, 3, 3), int)\n', (6373, 6389), True, 'import numpy as np\n'), ((6405, 6448), 'numpy.array', 'np.array', (['[[1, 0, 0], [1, 0, 0], [0, 0, 0]]'], {}), '([[1, 0, 0], [1, 0, 0], [0, 0, 0]])\n', (6413, 6448), True, 'import numpy as np\n'), ((6514, 6557), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 5], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 5], [0, 0, 0]])\n', (6522, 6557), True, 'import numpy as np\n'), ((6649, 6692), 'numpy.array', 'np.array', (['[[1, 2, 2], [1, 2, 2], [2, 2, 2]]'], {}), '([[1, 2, 2], [1, 2, 2], [2, 2, 2]])\n', (6657, 6692), True, 'import numpy as np\n'), ((6764, 6807), 'numpy.array', 'np.array', (['[[2, 2, 2], [2, 1, 3], [2, 2, 2]]'], {}), '([[2, 2, 2], [2, 1, 3], [2, 2, 2]])\n', (6772, 6807), True, 'import numpy as np\n'), ((6900, 6950), 'numpy.array', 'np.array', (['[[1, BG, BG], [1, BG, BG], [BG, BG, BG]]'], {}), '([[1, BG, BG], [1, BG, BG], [BG, BG, BG]])\n', (6908, 6950), True, 'import numpy as np\n'), ((7021, 7071), 'numpy.array', 'np.array', (['[[BG, BG, BG], [BG, 1, 2], [BG, BG, BG]]'], {}), '([[BG, BG, BG], [BG, 1, 2], [BG, BG, BG]])\n', (7029, 7071), True, 'import numpy as np\n'), ((7281, 7305), 'numpy.zeros', 'np.zeros', (['(2, 3, 3)', 'int'], {}), '((2, 3, 3), int)\n', (7289, 7305), True, 'import numpy as np\n'), ((7321, 7364), 'numpy.array', 'np.array', (['[[0, 0, 6], [0, 0, 6], [5, 5, 5]]'], {}), '([[0, 0, 6], [0, 0, 6], [5, 5, 5]])\n', (7329, 7364), True, 'import numpy as np\n'), ((7430, 7473), 'numpy.array', 'np.array', (['[[6, 6, 0], [5, 0, 0], [0, 0, 0]]'], {}), '([[6, 6, 0], [5, 0, 0], [0, 0, 0]])\n', (7438, 7473), True, 'import numpy as np\n'), ((7562, 7609), 'numpy.array', 'np.array', (['[[BG, BG, 1], [BG, BG, 1], [2, 2, 2]]'], {}), '([[BG, BG, 1], [BG, BG, 1], [2, 2, 2]])\n', (7570, 7609), True, 'import numpy as np\n'), ((7680, 7729), 'numpy.array', 'np.array', (['[[1, 1, BG], [2, BG, BG], [BG, BG, BG]]'], {}), '([[1, 1, BG], [2, BG, BG], [BG, BG, BG]])\n', (7688, 7729), True, 'import numpy as np\n'), ((7800, 7822), 'skimage.measure.label', 'label', (['x'], {'background': '(0)'}), '(x, background=0)\n', (7805, 7822), False, 'from skimage.measure import label\n'), ((7831, 7858), 'skimage._shared.testing.assert_array_equal', 'assert_array_equal', (['res', 'lb'], {}), '(res, lb)\n', (7849, 7858), False, 'from skimage._shared.testing import assert_array_equal\n'), ((7921, 7945), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)', 'int'], {}), '((3, 3, 3), int)\n', (7929, 7945), True, 'import numpy as np\n'), ((8144, 8187), 'numpy.array', 'np.array', (['[[1, 0, 6], [0, 0, 6], [5, 5, 5]]'], {}), '([[1, 0, 6], [0, 0, 6], [5, 5, 5]])\n', (8152, 8187), True, 'import numpy as np\n'), ((8404, 8438), 'numpy.array', 'np.array', (['(0, 1, 2, 2, 1, 1, 0, 0)'], {}), '((0, 1, 2, 2, 1, 1, 0, 0))\n', (8412, 8438), True, 'import numpy as np\n'), ((8473, 8507), 'numpy.array', 'np.array', (['(0, 1, 2, 2, 3, 3, 0, 0)'], {}), '((0, 1, 2, 2, 3, 3, 0, 0))\n', (8481, 8507), True, 'import numpy as np\n'), ((8833, 8854), 'numpy.ones', 'np.ones', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (8840, 8854), True, 'import numpy as np\n'), ((1274, 1287), 'skimage.measure.label', 'label', (['self.x'], {}), '(self.x)\n', (1279, 1287), False, 'from skimage.measure import label\n'), ((1469, 1497), 'skimage.measure.label', 'label', (['self.x'], {'background': '(99)'}), '(self.x, background=99)\n', (1474, 1497), False, 'from skimage.measure import label\n'), ((1607, 1634), 'skimage.measure.label', 'label', (['self.x'], {'background': '(9)'}), '(self.x, background=9)\n', (1612, 1634), False, 'from skimage.measure import label\n'), ((1869, 1896), 'numpy.all', 'np.all', (['(values == values[0])'], {}), '(values == values[0])\n', (1875, 1896), True, 'import numpy as np\n'), ((2050, 2058), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (2055, 2058), False, 'from skimage.measure import label\n'), ((2191, 2215), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(1)'}), '(x, connectivity=1)\n', (2196, 2215), False, 'from skimage.measure import label\n'), ((2317, 2341), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(2)'}), '(x, connectivity=2)\n', (2322, 2341), False, 'from skimage.measure import label\n'), ((2576, 2584), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (2581, 2584), False, 'from skimage.measure import label\n'), ((2725, 2747), 'skimage.measure.label', 'label', (['x'], {'background': '(0)'}), '(x, background=0)\n', (2730, 2747), False, 'from skimage.measure import label\n'), ((3377, 3415), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(1)', 'background': '(0)'}), '(x, connectivity=1, background=0)\n', (3382, 3415), False, 'from skimage.measure import label\n'), ((5544, 5571), 'numpy.all', 'np.all', (['(values == values[0])'], {}), '(values == values[0])\n', (5550, 5571), True, 'import numpy as np\n'), ((5731, 5739), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (5736, 5739), False, 'from skimage.measure import label\n'), ((5936, 5960), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(1)'}), '(x, connectivity=1)\n', (5941, 5960), False, 'from skimage.measure import label\n'), ((5997, 6021), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(3)'}), '(x, connectivity=3)\n', (6002, 6021), False, 'from skimage.measure import label\n'), ((6231, 6255), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(1)'}), '(x, connectivity=1)\n', (6236, 6255), False, 'from skimage.measure import label\n'), ((6292, 6316), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(3)'}), '(x, connectivity=3)\n', (6297, 6316), False, 'from skimage.measure import label\n'), ((7154, 7162), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (7159, 7162), False, 'from skimage.measure import label\n'), ((7195, 7218), 'skimage.measure.label', 'label', (['x'], {'background': '(-1)'}), '(x, background=-1)\n', (7200, 7218), False, 'from skimage.measure import label\n'), ((7983, 7998), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (7995, 7998), True, 'import numpy as np\n'), ((8056, 8094), 'skimage.measure.label', 'label', (['x'], {'connectivity': '(1)', 'background': '(0)'}), '(x, connectivity=1, background=0)\n', (8061, 8094), False, 'from skimage.measure import label\n'), ((8733, 8742), 'skimage.measure.label', 'label', (['x2'], {}), '(x2)\n', (8738, 8742), False, 'from skimage.measure import label\n'), ((8868, 8903), 'skimage._shared.testing.raises', 'testing.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (8882, 8903), False, 'from skimage._shared import testing\n'), ((8917, 8925), 'skimage.measure.label', 'label', (['x'], {}), '(x)\n', (8922, 8925), False, 'from skimage.measure import label\n'), ((9095, 9110), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (9103, 9110), True, 'import numpy as np\n'), ((9167, 9190), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (9183, 9190), True, 'import numpy as np\n'), ((9219, 9243), 'skimage.measure._ccomp.reshape_array', 'ccomp.reshape_array', (['inp'], {}), '(inp)\n', (9238, 9243), True, 'import skimage.measure._ccomp as ccomp\n'), ((9431, 9469), 'skimage.measure._ccomp.undo_reshape_array', 'ccomp.undo_reshape_array', (['fixed', 'swaps'], {}), '(fixed, swaps)\n', (9455, 9469), True, 'import skimage.measure._ccomp as ccomp\n'), ((9534, 9563), 'skimage._shared.testing.assert_array_equal', 'assert_array_equal', (['inp', 'back'], {}), '(inp, back)\n', (9552, 9563), False, 'from skimage._shared.testing import assert_array_equal\n'), ((3696, 3721), 'skimage.measure.label', 'label', (['x'], {'return_num': '(True)'}), '(x, return_num=True)\n', (3701, 3721), False, 'from skimage.measure import label\n'), ((3757, 3797), 'skimage.measure.label', 'label', (['x'], {'background': '(-1)', 'return_num': '(True)'}), '(x, background=-1, return_num=True)\n', (3762, 3797), False, 'from skimage.measure import label\n'), ((8260, 8285), 'skimage.measure.label', 'label', (['x'], {'return_num': '(True)'}), '(x, return_num=True)\n', (8265, 8285), False, 'from skimage.measure import label\n'), ((8320, 8360), 'skimage.measure.label', 'label', (['x'], {'background': '(-1)', 'return_num': '(True)'}), '(x, background=-1, return_num=True)\n', (8325, 8360), False, 'from skimage.measure import label\n'), ((1695, 1717), 'numpy.random.rand', 'np.random.rand', (['(20)', '(30)'], {}), '(20, 30)\n', (1709, 1717), True, 'import numpy as np\n'), ((5370, 5392), 'numpy.random.rand', 'np.random.rand', (['(20)', '(30)'], {}), '(20, 30)\n', (5384, 5392), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch import optim
import torch.nn.functional as F
import random
import numpy as np
# import matplotlib.pyplot as plt
# import seaborn as sns
import os
import json
from utils.measures import wer, moses_multi_bleu
from utils.masked_cross_entropy import *
from utils.config import *
from models.modules import *
class GLMP(nn.Module):
def __init__(self, hidden_size, lang, max_resp_len, path, task, lr, n_layers, dropout):
super(GLMP, self).__init__()
self.name = "GLMP"
self.task = task
self.input_size = lang.n_words
self.output_size = lang.n_words
self.hidden_size = hidden_size
self.lang = lang
self.lr = lr
self.n_layers = n_layers
self.dropout = dropout
self.max_resp_len = max_resp_len
self.decoder_hop = n_layers
self.softmax = nn.Softmax(dim=0)
if path:
if USE_CUDA:
print("MODEL {} LOADED".format(str(path)))
self.encoder = torch.load(str(path)+'/enc.th')
self.extKnow = torch.load(str(path)+'/enc_kb.th')
self.decoder = torch.load(str(path)+'/dec.th')
else:
print("MODEL {} LOADED".format(str(path)))
self.encoder = torch.load(str(path)+'/enc.th',lambda storage, loc: storage)
self.extKnow = torch.load(str(path)+'/enc_kb.th',lambda storage, loc: storage)
self.decoder = torch.load(str(path)+'/dec.th',lambda storage, loc: storage)
else:
self.encoder = ContextRNN(lang.n_words, hidden_size, dropout)
self.extKnow = ExternalKnowledge(lang.n_words, hidden_size, n_layers, dropout)
self.decoder = LocalMemoryDecoder(self.encoder.embedding, lang, hidden_size, self.decoder_hop, dropout) #Generator(lang, hidden_size, dropout)
# # FOR DEBUG
# # pdb.set_trace()
# if path:
# enc_embedding = self.encoder.embedding.weight.data.numpy()
# for name, params in self.encoder.gru.named_parameters():
# name = name
# params = params
# if name == 'weight_ih_l0':
# t1 = name
# t2 = params
# if name == 'weight_ih_l0_reverse':
# t3 = name
# t4 = params
# else:
# continue
# pdb.set_trace()
# for name, params in self.encoder.W.named_parameters():
# t5 = name
# t6 = params
# Initialize optimizers and criterion
self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=lr)
self.extKnow_optimizer = optim.Adam(self.extKnow.parameters(), lr=lr)
self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=lr)
self.scheduler = lr_scheduler.ReduceLROnPlateau(self.decoder_optimizer, mode='max', factor=0.5, patience=1, min_lr=0.0001, verbose=True)
self.criterion_bce = nn.BCELoss()
self.reset()
if USE_CUDA:
self.encoder.cuda()
self.extKnow.cuda()
self.decoder.cuda()
def print_loss(self):
print_loss_avg = self.loss / self.print_every
print_loss_g = self.loss_g / self.print_every
print_loss_v = self.loss_v / self.print_every
print_loss_l = self.loss_l / self.print_every
self.print_every += 1
return 'L:{:.2f},LE:{:.2f},LG:{:.2f},LP:{:.2f}'.format(print_loss_avg, print_loss_g, print_loss_v, print_loss_l)
def save_model(self, dec_type):
name_data = "KVR/" if self.task=='' else "BABI/"
layer_info = str(self.n_layers)
directory = 'save/GLMP-'+args["addName"]+name_data+str(self.task)+'HDD'+str(self.hidden_size)+'BSZ'+str(args['batch'])+'DR'+str(self.dropout)+'L'+layer_info+'lr'+str(self.lr)+str(dec_type)
if not os.path.exists(directory):
os.makedirs(directory)
torch.save(self.encoder, directory + '/enc.th')
torch.save(self.extKnow, directory + '/enc_kb.th')
torch.save(self.decoder, directory + '/dec.th')
def reset(self):
self.loss, self.print_every, self.loss_g, self.loss_v, self.loss_l = 0, 1, 0, 0, 0
def _cuda(self, x):
if USE_CUDA:
return torch.Tensor(x).cuda()
else:
return torch.Tensor(x)
def train_batch(self, data, clip, reset=0):
if reset: self.reset()
# Zero gradients of both optimizers
self.encoder_optimizer.zero_grad()
self.extKnow_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
# Encode and Decode
use_teacher_forcing = random.random() < args['teacher_forcing_ratio']
max_target_length = max(data['response_lengths'])
all_decoder_outputs_vocab, all_decoder_outputs_ptr, _, _, global_pointer = self.encode_and_decode(data, max_target_length, use_teacher_forcing, False)
# Loss calculation and backpropagation
# pdb.set_trace()
loss_g = self.criterion_bce(global_pointer, data['selector_index'])
loss_v = masked_cross_entropy(
all_decoder_outputs_vocab.transpose(0, 1).contiguous(),
data['sketch_response'].contiguous(),
data['response_lengths'])
loss_l = masked_cross_entropy(
all_decoder_outputs_ptr.transpose(0, 1).contiguous(),
data['ptr_index'].contiguous(),
data['response_lengths'])
loss = loss_g + loss_v + loss_l
loss.backward()
# Clip gradient norms
ec = torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), clip)
kc = torch.nn.utils.clip_grad_norm_(self.extKnow.parameters(), clip)
dc = torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), clip)
# Update parameters with optimizers
self.encoder_optimizer.step()
self.extKnow_optimizer.step()
self.decoder_optimizer.step()
self.loss += loss.item()
self.loss_g += loss_g.item()
self.loss_v += loss_v.item()
self.loss_l += loss_l.item()
def encode_and_decode(self, data, max_target_length, use_teacher_forcing, get_decoded_words):
# Build unknown mask for memory
if args['unk_mask'] and self.decoder.training:
story_size = data['context_arr'].size()
rand_mask = np.ones(story_size)
bi_mask = np.random.binomial([np.ones((story_size[0],story_size[1]))], 1-self.dropout)[0]
rand_mask[:,:,0] = rand_mask[:,:,0] * bi_mask
conv_rand_mask = np.ones(data['conv_arr'].size())
for bi in range(story_size[0]):
start, end = data['kb_arr_lengths'][bi], data['kb_arr_lengths'][bi] + data['conv_arr_lengths'][bi]
conv_rand_mask[:end-start,bi,:] = rand_mask[bi,start:end,:]
rand_mask = self._cuda(rand_mask)
conv_rand_mask = self._cuda(conv_rand_mask)
conv_story = data['conv_arr'] * conv_rand_mask.long()
story = data['context_arr'] * rand_mask.long()
else:
story, conv_story = data['context_arr'], data['conv_arr']
# Encode dialog history and KB to vectors
dh_outputs, dh_hidden = self.encoder(conv_story, data['conv_arr_lengths'])
global_pointer, kb_readout = self.extKnow.load_memory(story, data['kb_arr_lengths'], data['conv_arr_lengths'], dh_hidden, dh_outputs)
# encoded_hidden = torch.cat((dh_hidden.squeeze(0), kb_readout), dim=1)
encoded_hidden = torch.cat((dh_hidden.squeeze(0), dh_hidden.squeeze(0)), dim=1)
# Get the words that can be copy from the memory
batch_size = len(data['context_arr_lengths'])
self.copy_list = []
for elm in data['context_arr_plain']:
elm_temp = [ word_arr[0] for word_arr in elm ]
self.copy_list.append(elm_temp)
outputs_vocab, outputs_ptr, decoded_fine, decoded_coarse = self.decoder(
self.extKnow,
story.size(),
data['context_arr_lengths'],
self.copy_list,
encoded_hidden,
data['sketch_response'],
max_target_length,
batch_size,
use_teacher_forcing,
get_decoded_words,
global_pointer)
return outputs_vocab, outputs_ptr, decoded_fine, decoded_coarse, global_pointer
def evaluate(self, dev, matric_best, early_stop=None):
print("STARTING EVALUATION")
# Set to not-training mode to disable dropout
self.encoder.train(False)
self.extKnow.train(False)
self.decoder.train(False)
ref, hyp = [], []
acc, total = 0, 0
dialog_acc_dict = {}
F1_pred, F1_cal_pred, F1_nav_pred, F1_wet_pred, F1_restaurant_pred, F1_hotel_pred, F1_attraction_pred, F1_train_pred, F1_travel_pred, F1_events_pred, F1_weather_pred, F1_others_pred = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
F1_count, F1_cal_count, F1_nav_count, F1_wet_count, F1_restaurant_count, F1_hotel_count, F1_attraction_count, F1_train_count, F1_travel_count, F1_events_count, F1_weather_count, F1_others_count = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
pbar = tqdm(enumerate(dev), total=len(dev))
new_precision, new_recall, new_f1_score = 0, 0, 0
global_entity_list = []
if args['dataset'] == 'kvr':
with open('data/KVR/kvret_entities.json') as f:
global_entity = json.load(f)
global_entity_list = []
for key in global_entity.keys():
if key != 'poi':
global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]
else:
for item in global_entity['poi']:
global_entity_list += [item[k].lower().replace(' ', '_') for k in item.keys()]
global_entity_list = list(set(global_entity_list))
if args['dataset'] == 'multiwoz':
with open('data/multiwoz/multiwoz_entities.json') as f:
global_entity = json.load(f)
global_entity_list = []
for key in global_entity.keys():
global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]
global_entity_list = list(set(global_entity_list))
if args['dataset'] == 'sgd':
with open('data/sgd/sgd_entities.json') as f:
global_entity = json.load(f)
global_entity_list = []
for key in global_entity.keys():
global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]
global_entity_list = list(set(global_entity_list))
for j, data_dev in pbar:
# Encode and Decode
max_target_length = max(data_dev['response_lengths'])
_, _, decoded_fine, decoded_coarse, global_pointer, _ = self.encode_and_decode(data_dev, max_target_length,
False, True)
decoded_coarse = np.transpose(decoded_coarse)
decoded_fine = np.transpose(decoded_fine)
for bi, row in enumerate(decoded_fine):
st = ''
for e in row:
if e == 'EOS':
break
else:
st += e + ' '
st_c = ''
for e in decoded_coarse[bi]:
if e == 'EOS':
break
else:
st_c += e + ' '
pred_sent = st.lstrip().rstrip()
pred_sent_coarse = st_c.lstrip().rstrip()
gold_sent = data_dev['response_plain'][bi].lstrip().rstrip()
ref.append(gold_sent)
hyp.append(pred_sent)
if args['dataset'] == 'kvr':
# compute F1 SCORE
single_f1, count = self.compute_prf(data_dev['ent_index'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][bi])
F1_pred += single_f1
F1_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_cal'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][bi])
F1_cal_pred += single_f1
F1_cal_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_nav'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][bi])
F1_nav_pred += single_f1
F1_nav_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_wet'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][bi])
F1_wet_pred += single_f1
F1_wet_count += count
elif args['dataset'] == 'multiwoz':
# compute F1 SCORE
single_f1, count = self.compute_prf(data_dev['ent_index'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[14]: ent_index, data[9]: kb_arr_plain.
F1_pred += single_f1
F1_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_restaurant'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[28]: ent_idx_restaurant, data[9]: kb_arr_plain.
F1_restaurant_pred += single_f1
F1_restaurant_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_hotel'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[29]: ent_idx_hotel, data[9]: kb_arr_plain.
F1_hotel_pred += single_f1
F1_hotel_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_attraction'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[30]: ent_idx_attraction, data[9]: kb_arr_plain.
F1_attraction_pred += single_f1
F1_attraction_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_train'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[31]: ent_idx_train, data[9]: kb_arr_plain.
F1_train_pred += single_f1
F1_train_count += count
elif args['dataset'] == 'sgd':
# compute F1 SCORE
single_f1, count = self.compute_prf(data_dev['ent_index'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[14]: ent_index, data[9]: kb_arr_plain.
F1_pred += single_f1
F1_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_travel'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[28]: ent_idx_restaurant, data[9]: kb_arr_plain.
F1_travel_pred += single_f1
F1_travel_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_hotel'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[29]: ent_idx_hotel, data[9]: kb_arr_plain.
F1_hotel_pred += single_f1
F1_hotel_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_events'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[30]: ent_idx_attraction, data[9]: kb_arr_plain.
F1_events_pred += single_f1
F1_events_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_weather'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[31]: ent_idx_train, data[9]: kb_arr_plain.
F1_weather_pred += single_f1
F1_weather_count += count
single_f1, count = self.compute_prf(data_dev['ent_idx_others'][bi], pred_sent.split(),
global_entity_list, data_dev['kb_arr_plain'][
bi]) # data[31]: ent_idx_train, data[9]: kb_arr_plain.
F1_others_pred += single_f1
F1_others_count += count
else:
# compute Dialogue Accuracy Score
current_id = data_dev['ID'][bi]
if current_id not in dialog_acc_dict.keys():
dialog_acc_dict[current_id] = []
if gold_sent == pred_sent:
dialog_acc_dict[current_id].append(1)
else:
dialog_acc_dict[current_id].append(0)
# compute Per-response Accuracy Score
total += 1
if (gold_sent == pred_sent):
acc += 1
if args['genSample']:
self.print_examples(bi, data_dev, pred_sent, pred_sent_coarse, gold_sent)
# Set back to training mode
self.encoder.train(True)
self.extKnow.train(True)
self.decoder.train(True)
bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True)
acc_score = acc / float(total)
print("ACC SCORE:\t" + str(acc_score))
if args['dataset'] == 'kvr':
F1_score = F1_pred / float(F1_count)
cal_f1 = 0.0 if F1_travel_count == 0 else (F1_cal_pred / float(F1_cal_count))
nav_f1 = 0.0 if F1_nav_count == 0 else (F1_nav_pred / float(F1_nav_count))
wet_f1 = 0.0 if F1_events_count == 0 else (F1_wet_pred / float(F1_wet_count))
print("F1 SCORE:\t{:.4f}".format(F1_pred / float(F1_count)))
print("CAL F1:\t{:.4f}".format(cal_f1))
print("NAV F1:\t{:.4f}".format(nav_f1))
print("WET F1:\t{:.4f}".format(wet_f1))
print("BLEU SCORE:\t" + str(bleu_score))
elif args['dataset'] == 'multiwoz':
F1_score = F1_pred / float(F1_count)
rest_f1 = 0.0 if F1_restaurant_count == 0 else (F1_restaurant_pred / float(F1_restaurant_count))
hotel_f1 = 0.0 if F1_hotel_count == 0 else (F1_hotel_pred / float(F1_hotel_count))
attraction_f1 = 0.0 if F1_attraction_count == 0 else (F1_attraction_pred / float(F1_attraction_count))
train_f1 = 0.0 if F1_train_count == 0 else (F1_train_pred / float(F1_train_count))
print("F1 SCORE:\t{:.4f}".format(F1_pred / float(F1_count)))
print("Restaurant F1:\t{:.4f}".format(rest_f1))
print("Hotel F1:\t{:.4f}".format(hotel_f1))
print("Attraction F1:\t{:.4f}".format(attraction_f1))
print("Train F1:\t{:.4f}".format(train_f1))
print("BLEU SCORE:\t" + str(bleu_score))
elif args['dataset'] == 'sgd':
F1_score = F1_pred / float(F1_count)
travel_f1 = 0.0 if F1_travel_count == 0 else (F1_restaurant_pred / float(F1_travel_count))
hotel_f1 = 0.0 if F1_hotel_count == 0 else (F1_hotel_pred / float(F1_hotel_count))
events_f1 = 0.0 if F1_events_count == 0 else (F1_events_pred / float(F1_events_count))
weather_f1 = 0.0 if F1_weather_count == 0 else (F1_weather_pred / float(F1_weather_count))
others_f1 = 0.0 if F1_others_count == 0 else (F1_others_pred / float(F1_others_count))
print("F1 SCORE:\t{:.4f}".format(F1_pred / float(F1_count)))
print("Travel F1:\t{:.4f}".format(travel_f1))
print("Hotel F1:\t{:.4f}".format(hotel_f1))
print("Events F1:\t{:.4f}".format(events_f1))
print("Weather F1:\t{:.4f}".format(weather_f1))
print("Others F1:\t{:.4f}".format(others_f1))
print("BLEU SCORE:\t" + str(bleu_score))
else:
dia_acc = 0
for k in dialog_acc_dict.keys():
if len(dialog_acc_dict[k]) == sum(dialog_acc_dict[k]):
dia_acc += 1
print("Dialog Accuracy:\t" + str(dia_acc * 1.0 / len(dialog_acc_dict.keys())))
if (early_stop == 'BLEU'):
if (bleu_score >= matric_best):
self.save_model('BLEU-' + str(bleu_score))
print("MODEL SAVED")
return bleu_score
elif (early_stop == 'ENTF1'):
if (F1_score >= matric_best):
self.save_model('ENTF1-{:.4f}'.format(F1_score))
print("MODEL SAVED")
return F1_score
else:
if (acc_score >= matric_best):
self.save_model('ACC-{:.4f}'.format(acc_score))
print("MODEL SAVED")
return acc_score
def compute_prf(self, gold, pred, global_entity_list, kb_plain):
local_kb_word = [k[0] for k in kb_plain]
TP, FP, FN = 0, 0, 0
if len(gold)!= 0:
count = 1
for g in gold:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list or p in local_kb_word:
if p not in gold:
FP += 1
precision = TP / float(TP+FP) if (TP+FP)!=0 else 0
recall = TP / float(TP+FN) if (TP+FN)!=0 else 0
F1 = 2 * precision * recall / float(precision + recall) if (precision+recall)!=0 else 0
else:
precision, recall, F1, count = 0, 0, 0, 0
return F1, count
def print_examples(self, batch_idx, data, pred_sent, pred_sent_coarse, gold_sent):
kb_len = len(data['context_arr_plain'][batch_idx])-data['conv_arr_lengths'][batch_idx]-1
print("{}: ID{} id{} ".format(data['domain'][batch_idx], data['ID'][batch_idx], data['id'][batch_idx]))
for i in range(kb_len):
kb_temp = [w for w in data['context_arr_plain'][batch_idx][i] if w!='PAD']
kb_temp = kb_temp[::-1]
if 'poi' not in kb_temp:
print(kb_temp)
flag_uttr, uttr = '$u', []
for word_idx, word_arr in enumerate(data['context_arr_plain'][batch_idx][kb_len:]):
if word_arr[1]==flag_uttr:
uttr.append(word_arr[0])
else:
print(flag_uttr,': ', " ".join(uttr))
flag_uttr = word_arr[1]
uttr = [word_arr[0]]
print('Sketch System Response : ', pred_sent_coarse)
print('Final System Response : ', pred_sent)
print('Gold System Response : ', gold_sent)
print('\n')
|
[
"os.path.exists",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.ones",
"torch.nn.Softmax",
"os.makedirs",
"torch.Tensor",
"numpy.array",
"torch.nn.BCELoss",
"torch.save",
"json.load",
"random.random",
"numpy.transpose"
] |
[((926, 943), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(0)'}), '(dim=0)\n', (936, 943), True, 'import torch.nn as nn\n'), ((2954, 3078), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'lr_scheduler.ReduceLROnPlateau', (['self.decoder_optimizer'], {'mode': '"""max"""', 'factor': '(0.5)', 'patience': '(1)', 'min_lr': '(0.0001)', 'verbose': '(True)'}), "(self.decoder_optimizer, mode='max', factor=\n 0.5, patience=1, min_lr=0.0001, verbose=True)\n", (2984, 3078), False, 'from torch.optim import lr_scheduler\n'), ((3103, 3115), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (3113, 3115), True, 'import torch.nn as nn\n'), ((4095, 4142), 'torch.save', 'torch.save', (['self.encoder', "(directory + '/enc.th')"], {}), "(self.encoder, directory + '/enc.th')\n", (4105, 4142), False, 'import torch\n'), ((4151, 4201), 'torch.save', 'torch.save', (['self.extKnow', "(directory + '/enc_kb.th')"], {}), "(self.extKnow, directory + '/enc_kb.th')\n", (4161, 4201), False, 'import torch\n'), ((4210, 4257), 'torch.save', 'torch.save', (['self.decoder', "(directory + '/dec.th')"], {}), "(self.decoder, directory + '/dec.th')\n", (4220, 4257), False, 'import torch\n'), ((4025, 4050), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (4039, 4050), False, 'import os\n'), ((4064, 4086), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (4075, 4086), False, 'import os\n'), ((4496, 4511), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (4508, 4511), False, 'import torch\n'), ((4832, 4847), 'random.random', 'random.random', ([], {}), '()\n', (4845, 4847), False, 'import random\n'), ((6545, 6564), 'numpy.ones', 'np.ones', (['story_size'], {}), '(story_size)\n', (6552, 6564), True, 'import numpy as np\n'), ((11384, 11412), 'numpy.transpose', 'np.transpose', (['decoded_coarse'], {}), '(decoded_coarse)\n', (11396, 11412), True, 'import numpy as np\n'), ((11440, 11466), 'numpy.transpose', 'np.transpose', (['decoded_fine'], {}), '(decoded_fine)\n', (11452, 11466), True, 'import numpy as np\n'), ((19105, 19118), 'numpy.array', 'np.array', (['hyp'], {}), '(hyp)\n', (19113, 19118), True, 'import numpy as np\n'), ((19120, 19133), 'numpy.array', 'np.array', (['ref'], {}), '(ref)\n', (19128, 19133), True, 'import numpy as np\n'), ((9671, 9683), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9680, 9683), False, 'import json\n'), ((10321, 10333), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10330, 10333), False, 'import json\n'), ((10724, 10736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10733, 10736), False, 'import json\n'), ((4440, 4455), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (4452, 4455), False, 'import torch\n'), ((6607, 6646), 'numpy.ones', 'np.ones', (['(story_size[0], story_size[1])'], {}), '((story_size[0], story_size[1]))\n', (6614, 6646), True, 'import numpy as np\n')]
|
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import grpc
import numpy as np
import tensorflow as tf
from google.protobuf import empty_pb2
from elasticai_api.util.grpc_utils import build_channel
from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
from elasticdl.python.common.model_utils import (
get_module_file_path,
load_module,
)
from elasticdl.python.common.save_utils import CheckpointSaver
from elasticdl.python.common.tensor_utils import (
Tensor,
pb_to_ndarray,
serialize_indexed_slices,
serialize_ndarray,
)
from elasticdl.python.ps.embedding_table import (
EmbeddingTable,
get_slot_table_name,
)
from elasticdl.python.ps.parameter_server import ParameterServer
from elasticdl.python.ps.parameters import Parameters
from elasticdl.python.ps.servicer import PserverServicer
from elasticdl.python.tests.test_utils import PserverArgs
_test_model_zoo_path = os.path.dirname(os.path.realpath(__file__))
_module_file = get_module_file_path(
_test_model_zoo_path, "test_module.custom_model"
)
class PserverServicerTest(unittest.TestCase):
def setUp(self):
self._port = 9999
addr = "localhost:%d" % self._port
self._channel = build_channel(addr)
embedding_info = elasticdl_pb2.EmbeddingTableInfo()
embedding_info.name = "layer_a"
embedding_info.dim = 32
embedding_info.initializer = "normal"
self._embedding_info = embedding_info
self._server = None
def tearDown(self):
if self._server:
self._server.stop(0)
def create_server_and_stub(
self, grads_to_wait, lr_staleness_modulation, use_async, **kwargs
):
args = PserverArgs(
grads_to_wait=grads_to_wait,
lr_staleness_modulation=lr_staleness_modulation,
use_async=use_async,
port=self._port,
model_zoo=_test_model_zoo_path,
model_def="test_module.custom_model",
**kwargs
)
pserver = ParameterServer(args)
pserver.prepare()
self._parameters = pserver.parameters
self._server = pserver.server
self._stub = elasticdl_pb2_grpc.PserverStub(self._channel)
grpc.channel_ready_future(self._channel).result()
self._lr = 0.1
def create_default_server_and_stub(self, **kwargs):
grads_to_wait = 8
lr_staleness_modulation = False
use_async = True
self.create_server_and_stub(
grads_to_wait, lr_staleness_modulation, use_async, **kwargs
)
def get_embedding_vectors(self, name, ids):
pull_req = elasticdl_pb2.PullEmbeddingVectorRequest()
pull_req.name = name
pull_req.ids.extend(ids)
res = self._stub.pull_embedding_vectors(pull_req)
if res.tensor_content:
return pb_to_ndarray(res)
else:
return None
def test_push_model(self):
opt_func_name = "ftrl_optimizer"
opt = load_module(_module_file).__dict__[opt_func_name]()
opt_config = opt.get_config()
slot_names = ["accumulator", "linear"]
slot_init_value = {
"accumulator": opt_config["initial_accumulator_value"],
"linear": 0.0,
}
self.create_default_server_and_stub(optimizer=opt_func_name)
param0 = {
"v0": np.random.rand(3, 2).astype(np.float32),
"v1": np.random.rand(10, 32).astype(np.float32),
}
param1 = {
"v0": np.ones([3, 2], dtype=np.float32),
"v1": np.ones([10, 32], dtype=np.float32),
}
models = [param0, param1]
for idx, model in enumerate(models):
req = elasticdl_pb2.Model()
req.version = idx + 1
for name in model:
serialize_ndarray(model[name], req.dense_parameters[name])
req.embedding_table_infos.append(self._embedding_info)
res = self._stub.push_model(req)
self.assertEqual(res, empty_pb2.Empty())
# self._parameters is initialized with the first push_model call
# and the second push_model has no effect
self.assertEqual(self._parameters.version, 1)
for name in param0:
self.assertTrue(
np.allclose(
param0[name],
self._parameters.non_embedding_params[name].numpy(),
)
)
self.assertEqual(
self._embedding_info.name,
self._parameters.embedding_params[
self._embedding_info.name
].name,
)
self.assertEqual(
self._embedding_info.dim,
self._parameters.embedding_params[
self._embedding_info.name
].dim,
)
self.assertEqual(
tf.keras.initializers.get(
self._embedding_info.initializer
).__class__,
self._parameters.embedding_params[
self._embedding_info.name
].initializer.__class__,
)
for slot_name in slot_names:
name = get_slot_table_name(
self._embedding_info.name, slot_name
)
table = self._parameters.embedding_params[name]
self.assertTrue(name, table.name)
self.assertTrue(self._embedding_info.dim, table.dim)
embedding = table.get([2])
self.assertTrue(
(embedding - slot_init_value[slot_name] < 0.0001).all()
)
def test_pull_dense_parameters(self):
self.create_default_server_and_stub()
param0 = {
"v0": np.random.rand(3, 2).astype(np.float32),
"v1": np.random.rand(10, 32).astype(np.float32),
}
pull_req = elasticdl_pb2.PullDenseParametersRequest()
pull_req.version = -1
# try to pull variable
res = self._stub.pull_dense_parameters(pull_req)
# not initialized
self.assertFalse(res.initialized)
# init variable
req = elasticdl_pb2.Model()
req.version = 1
for name, var in param0.items():
serialize_ndarray(var, req.dense_parameters[name])
res = self._stub.push_model(req)
self.assertEqual(res, empty_pb2.Empty())
# pull variable back
res = self._stub.pull_dense_parameters(pull_req)
self.assertTrue(res.initialized)
self.assertEqual(res.version, req.version)
for name, pb in res.dense_parameters.items():
tensor = pb_to_ndarray(pb)
self.assertTrue(np.allclose(param0[name], tensor))
# pull variable again, no param as no updated version
pull_req.version = res.version
res = self._stub.pull_dense_parameters(pull_req)
self.assertTrue(res.initialized)
self.assertEqual(res.version, pull_req.version)
self.assertTrue(not res.dense_parameters)
def test_pull_embedding_vectors(self):
self.create_default_server_and_stub()
id_list_0 = [1, 3, 9, 6]
id_list_1 = [8, 9, 1, 0, 6]
req = elasticdl_pb2.Model()
req.version = 1
req.embedding_table_infos.append(self._embedding_info)
another_embedding_info = elasticdl_pb2.EmbeddingTableInfo()
another_embedding_info.name = "layer_b"
another_embedding_info.dim = 16
another_embedding_info.initializer = "normal"
req.embedding_table_infos.append(another_embedding_info)
res = self._stub.push_model(req)
self.assertEqual(res, empty_pb2.Empty())
vectors_a_0 = self.get_embedding_vectors("layer_a", id_list_0)
self.assertEqual(vectors_a_0.shape[0], len(id_list_0))
self.assertEqual(vectors_a_0.shape[1], 32)
vectors_a_1 = self.get_embedding_vectors("layer_a", id_list_1)
self.assertEqual(vectors_a_1.shape[0], len(id_list_1))
self.assertEqual(vectors_a_1.shape[1], 32)
vectors_b_1 = self.get_embedding_vectors("layer_b", id_list_1)
self.assertEqual(vectors_b_1.shape[0], len(id_list_1))
self.assertEqual(vectors_b_1.shape[1], 16)
vectors_b_0 = self.get_embedding_vectors("layer_b", id_list_0)
self.assertEqual(vectors_b_0.shape[0], len(id_list_0))
self.assertEqual(vectors_b_0.shape[1], 16)
for idx0, id0 in enumerate(id_list_0):
for idx1, id1 in enumerate(id_list_1):
if id0 == id1:
self.assertTrue(
np.array_equal(vectors_a_0[idx0], vectors_a_1[idx1])
)
self.assertTrue(
np.array_equal(vectors_b_0[idx0], vectors_b_1[idx1])
)
vectors = self.get_embedding_vectors("layer_a", [])
self.assertEqual(vectors, None)
def push_gradient_test_setup(self):
self.var_names = ["test_1", "test_2"]
self.var_values = [
np.array([10.0, 20.0, 30.0], np.float32),
np.array([20.0, 40.0, 60.0], np.float32),
]
self.grad_values0 = [
np.array([1.0, 2.0, 3.0], np.float32),
np.array([2.0, 4.0, 6.0], np.float32),
]
self.grad_values1 = [
np.array([0.0, 0.0, 7.0], np.float32),
np.array([9.0, 9.0, 6.0], np.float32),
]
dim = self._embedding_info.dim
self.embedding_table = (
np.random.rand(4 * dim).reshape((4, dim)).astype(np.float32)
)
self.embedding_grads0 = Tensor(
None,
np.random.rand(3 * dim).reshape((3, dim)).astype(np.float32),
np.asarray([3, 1, 3]),
)
self.embedding_grads1 = Tensor(
None,
np.random.rand(3 * dim).reshape((3, dim)).astype(np.float32),
np.asarray([2, 2, 3]),
)
push_model_req = elasticdl_pb2.Model()
push_model_req.version = self._parameters.version
for name, value in zip(self.var_names, self.var_values):
serialize_ndarray(value, push_model_req.dense_parameters[name])
push_model_req.embedding_table_infos.append(self._embedding_info)
self._stub.push_model(push_model_req)
for name, var in zip(self.var_names, self.var_values):
self._parameters.non_embedding_params[name] = tf.Variable(var)
self._parameters.embedding_params[self._embedding_info.name].set(
range(len(self.embedding_table)), self.embedding_table
)
def test_push_gradient_async_update(self):
self.create_default_server_and_stub()
self.push_gradient_test_setup()
# Test applying gradients to embedding and non-embedding parameters
req = elasticdl_pb2.PushGradientsRequest()
for g, name in zip(self.grad_values0, self.var_names):
serialize_ndarray(g, req.gradients.dense_parameters[name])
serialize_indexed_slices(
self.embedding_grads0,
req.gradients.embedding_tables[self._embedding_info.name],
)
res = self._stub.push_gradients(req)
self.assertEqual(res.accepted, True)
self.assertEqual(res.version, 1)
expected_values = [
v - self._lr * g
for v, g in zip(self.var_values, self.grad_values0)
]
for name, expected_value in zip(self.var_names, expected_values):
self.assertTrue(
np.allclose(
expected_value,
self._parameters.non_embedding_params[name].numpy(),
)
)
expected_embed_table = np.copy(self.embedding_table)
for gv, gi in zip(
self.embedding_grads0.values, self.embedding_grads0.indices
):
expected_embed_table[gi] -= self._lr * gv
actual_embed_table = self._parameters.get_embedding_param(
self._embedding_info.name, range(len(expected_embed_table))
)
self.assertTrue(np.allclose(expected_embed_table, actual_embed_table))
# Test applying gradients with same name
for name, var in zip(self.var_names, self.var_values):
self._parameters.non_embedding_params[name] = tf.Variable(var)
req = elasticdl_pb2.PushGradientsRequest()
serialize_ndarray(
self.grad_values1[1],
req.gradients.dense_parameters[self.var_names[0]],
)
res = self._stub.push_gradients(req)
self.assertEqual(res.accepted, True)
self.assertEqual(res.version, 2)
expected_values = [
self.var_values[0] - self._lr * self.grad_values1[1],
self.var_values[1],
]
for expected_value, name in zip(expected_values, self.var_names):
self.assertTrue(
np.allclose(
expected_value,
self._parameters.non_embedding_params[name].numpy(),
)
)
def test_push_gradient_sync_update(self):
self.create_server_and_stub(
grads_to_wait=2, lr_staleness_modulation=False, use_async=False
)
self.push_gradient_test_setup()
req = elasticdl_pb2.PushGradientsRequest()
req.gradients.version = 0
for g, name in zip(self.grad_values0, self.var_names):
serialize_ndarray(g, req.gradients.dense_parameters[name])
serialize_indexed_slices(
self.embedding_grads0,
req.gradients.embedding_tables[self._embedding_info.name],
)
res = self._stub.push_gradients(req)
self.assertEqual(res.accepted, True)
self.assertEqual(res.version, 0)
req = elasticdl_pb2.PushGradientsRequest()
req.gradients.version = 0
for g, name in zip(self.grad_values1, self.var_names):
serialize_ndarray(g, req.gradients.dense_parameters[name])
serialize_indexed_slices(
self.embedding_grads1,
req.gradients.embedding_tables[self._embedding_info.name],
)
res = self._stub.push_gradients(req)
self.assertEqual(res.accepted, True)
self.assertEqual(res.version, 1)
req = elasticdl_pb2.PushGradientsRequest()
req.gradients.version = 0
for g, name in zip(self.grad_values1, self.var_names):
serialize_ndarray(g, req.gradients.dense_parameters[name])
res = self._stub.push_gradients(req)
self.assertEqual(res.accepted, False)
self.assertEqual(res.version, 1)
expected_values = [
self.var_values[0]
- self._lr * (self.grad_values0[0] + self.grad_values1[0]) / 2,
self.var_values[1]
- self._lr * (self.grad_values0[1] + self.grad_values1[1]) / 2,
]
for expected_value, name in zip(expected_values, self.var_names):
self.assertTrue(
np.allclose(
expected_value,
self._parameters.non_embedding_params[name].numpy(),
)
)
expected_embed_table = np.copy(self.embedding_table)
for gv, gi in zip(
self.embedding_grads0.values, self.embedding_grads0.indices
):
expected_embed_table[gi] -= self._lr * gv
for gv, gi in zip(
self.embedding_grads1.values, self.embedding_grads1.indices
):
expected_embed_table[gi] -= self._lr * gv
actual_embed_table = self._parameters.get_embedding_param(
self._embedding_info.name, range(len(expected_embed_table))
)
self.assertTrue(np.allclose(expected_embed_table, actual_embed_table))
def test_save_parameters_to_checkpoint_file(self):
with tempfile.TemporaryDirectory() as tempdir:
checkpoint_saver = CheckpointSaver(
checkpoint_dir=os.path.join(tempdir, "ckpt/"),
checkpoint_steps=5,
keep_checkpoint_max=3,
include_evaluation=False,
)
pserver_servicer = PserverServicer(
parameters=Parameters(),
grads_to_wait=0,
optimizer="optimizer",
checkpoint_saver=checkpoint_saver,
ps_id=0,
num_ps_pods=1,
)
model_params = {
"v0": tf.Variable([[1, 1, 1], [1, 1, 1]]),
"v1": tf.Variable([[2, 2, 2], [2, 2, 2]]),
}
server_params = pserver_servicer._parameters
for var_name, var_value in model_params.items():
server_params.non_embedding_params[var_name] = var_value
embedding_table = EmbeddingTable(
name="embedding_0", dim=3, initializer="random_uniform"
)
server_params.embedding_params["embedding_0"] = embedding_table
server_params.set_embedding_param(
name="embedding_0",
indices=np.array([0, 1]),
values=np.array([[1, 1, 1], [2, 2, 2]]),
)
for i in range(100):
pserver_servicer._parameters.version += 1
pserver_servicer._save_params_to_checkpoint_if_needed()
self.assertEqual(len(os.listdir(checkpoint_saver._directory)), 3)
self.assertEqual(
sorted(os.listdir(checkpoint_saver._directory)),
["version-100", "version-90", "version-95"],
)
self.assertEqual(
os.listdir(checkpoint_saver._directory + "/version-100"),
["variables-0-of-1.ckpt"],
)
def test_restore_parameters_from_checkpoint(self):
checkpoint_dir = "elasticdl/python/tests/testdata/ps_ckpt"
checkpoint_saver = CheckpointSaver(checkpoint_dir, 0, 0, False)
params = Parameters()
table = EmbeddingTable("embedding", 2, "random_uniform")
table.set([0, 1, 2, 3], np.ones((4, 2), dtype=np.float32))
params.embedding_params["embedding"] = table
params.non_embedding_params["dense/kernel:0"] = tf.Variable(
[[1.0], [1.0]]
)
params.non_embedding_params["dense/bias:0"] = tf.Variable([1.0])
params.version = 100
model_pb = params.to_model_pb()
checkpoint_saver.save(100, model_pb, False)
checkpoint_dir_for_init = checkpoint_dir + "/version-100"
args = PserverArgs(
ps_id=0,
num_ps_pods=2,
model_zoo=_test_model_zoo_path,
model_def="test_module.custom_model",
checkpoint_dir_for_init=checkpoint_dir_for_init,
)
pserver_0 = ParameterServer(args)
embedding_table = pserver_0.parameters.embedding_params["embedding"]
self.assertEqual(
list(embedding_table.embedding_vectors.keys()), [0, 2]
)
self.assertEqual(
list(pserver_0.parameters.non_embedding_params.keys()),
["dense/kernel:0"],
)
self.assertTrue(
np.array_equal(
pserver_0.parameters.non_embedding_params[
"dense/kernel:0"
].numpy(),
np.array([[1], [1]], dtype=int),
)
)
self.assertEqual(pserver_0.parameters.version, 100)
args = PserverArgs(
ps_id=1,
num_ps_pods=2,
model_zoo=_test_model_zoo_path,
model_def="test_module.custom_model",
checkpoint_dir_for_init=checkpoint_dir_for_init,
)
pserver_1 = ParameterServer(args)
embedding_table = pserver_1.parameters.embedding_params["embedding"]
self.assertEqual(
list(embedding_table.embedding_vectors.keys()), [1, 3]
)
self.assertEqual(
list(pserver_1.parameters.non_embedding_params.keys()),
["dense/bias:0"],
)
self.assertTrue(
np.array_equal(
pserver_1.parameters.non_embedding_params[
"dense/bias:0"
].numpy(),
np.array([1], dtype=int),
)
)
self.assertEqual(pserver_1.parameters.version, 100)
if __name__ == "__main__":
unittest.main()
|
[
"elasticdl.proto.elasticdl_pb2.Model",
"elasticdl.python.ps.embedding_table.get_slot_table_name",
"numpy.random.rand",
"elasticdl.proto.elasticdl_pb2.PullEmbeddingVectorRequest",
"elasticdl.python.ps.parameters.Parameters",
"numpy.array",
"elasticdl.python.common.save_utils.CheckpointSaver",
"elasticdl.python.common.tensor_utils.pb_to_ndarray",
"unittest.main",
"elasticdl.python.tests.test_utils.PserverArgs",
"grpc.channel_ready_future",
"os.listdir",
"elasticdl.python.common.tensor_utils.serialize_ndarray",
"elasticdl.python.common.model_utils.get_module_file_path",
"elasticdl.proto.elasticdl_pb2.EmbeddingTableInfo",
"numpy.asarray",
"elasticdl.proto.elasticdl_pb2.PullDenseParametersRequest",
"google.protobuf.empty_pb2.Empty",
"elasticdl.proto.elasticdl_pb2.PushGradientsRequest",
"numpy.allclose",
"elasticdl.python.common.model_utils.load_module",
"numpy.ones",
"elasticdl.python.common.tensor_utils.serialize_indexed_slices",
"tensorflow.Variable",
"elasticdl.proto.elasticdl_pb2_grpc.PserverStub",
"elasticdl.python.ps.parameter_server.ParameterServer",
"numpy.copy",
"tempfile.TemporaryDirectory",
"elasticdl.python.ps.embedding_table.EmbeddingTable",
"os.path.join",
"os.path.realpath",
"numpy.array_equal",
"tensorflow.keras.initializers.get",
"elasticai_api.util.grpc_utils.build_channel"
] |
[((1566, 1636), 'elasticdl.python.common.model_utils.get_module_file_path', 'get_module_file_path', (['_test_model_zoo_path', '"""test_module.custom_model"""'], {}), "(_test_model_zoo_path, 'test_module.custom_model')\n", (1586, 1636), False, 'from elasticdl.python.common.model_utils import get_module_file_path, load_module\n'), ((1523, 1549), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1539, 1549), False, 'import os\n'), ((21042, 21057), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21055, 21057), False, 'import unittest\n'), ((1805, 1824), 'elasticai_api.util.grpc_utils.build_channel', 'build_channel', (['addr'], {}), '(addr)\n', (1818, 1824), False, 'from elasticai_api.util.grpc_utils import build_channel\n'), ((1850, 1884), 'elasticdl.proto.elasticdl_pb2.EmbeddingTableInfo', 'elasticdl_pb2.EmbeddingTableInfo', ([], {}), '()\n', (1882, 1884), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((2289, 2509), 'elasticdl.python.tests.test_utils.PserverArgs', 'PserverArgs', ([], {'grads_to_wait': 'grads_to_wait', 'lr_staleness_modulation': 'lr_staleness_modulation', 'use_async': 'use_async', 'port': 'self._port', 'model_zoo': '_test_model_zoo_path', 'model_def': '"""test_module.custom_model"""'}), "(grads_to_wait=grads_to_wait, lr_staleness_modulation=\n lr_staleness_modulation, use_async=use_async, port=self._port,\n model_zoo=_test_model_zoo_path, model_def='test_module.custom_model',\n **kwargs)\n", (2300, 2509), False, 'from elasticdl.python.tests.test_utils import PserverArgs\n'), ((2609, 2630), 'elasticdl.python.ps.parameter_server.ParameterServer', 'ParameterServer', (['args'], {}), '(args)\n', (2624, 2630), False, 'from elasticdl.python.ps.parameter_server import ParameterServer\n'), ((2762, 2807), 'elasticdl.proto.elasticdl_pb2_grpc.PserverStub', 'elasticdl_pb2_grpc.PserverStub', (['self._channel'], {}), '(self._channel)\n', (2792, 2807), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((3226, 3268), 'elasticdl.proto.elasticdl_pb2.PullEmbeddingVectorRequest', 'elasticdl_pb2.PullEmbeddingVectorRequest', ([], {}), '()\n', (3266, 3268), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((6569, 6611), 'elasticdl.proto.elasticdl_pb2.PullDenseParametersRequest', 'elasticdl_pb2.PullDenseParametersRequest', ([], {}), '()\n', (6609, 6611), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((6837, 6858), 'elasticdl.proto.elasticdl_pb2.Model', 'elasticdl_pb2.Model', ([], {}), '()\n', (6856, 6858), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((7893, 7914), 'elasticdl.proto.elasticdl_pb2.Model', 'elasticdl_pb2.Model', ([], {}), '()\n', (7912, 7914), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((8035, 8069), 'elasticdl.proto.elasticdl_pb2.EmbeddingTableInfo', 'elasticdl_pb2.EmbeddingTableInfo', ([], {}), '()\n', (8067, 8069), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((10666, 10687), 'elasticdl.proto.elasticdl_pb2.Model', 'elasticdl_pb2.Model', ([], {}), '()\n', (10685, 10687), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((11523, 11559), 'elasticdl.proto.elasticdl_pb2.PushGradientsRequest', 'elasticdl_pb2.PushGradientsRequest', ([], {}), '()\n', (11557, 11559), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((11702, 11813), 'elasticdl.python.common.tensor_utils.serialize_indexed_slices', 'serialize_indexed_slices', (['self.embedding_grads0', 'req.gradients.embedding_tables[self._embedding_info.name]'], {}), '(self.embedding_grads0, req.gradients.\n embedding_tables[self._embedding_info.name])\n', (11726, 11813), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((12411, 12440), 'numpy.copy', 'np.copy', (['self.embedding_table'], {}), '(self.embedding_table)\n', (12418, 12440), True, 'import numpy as np\n'), ((13036, 13072), 'elasticdl.proto.elasticdl_pb2.PushGradientsRequest', 'elasticdl_pb2.PushGradientsRequest', ([], {}), '()\n', (13070, 13072), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((13081, 13176), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['self.grad_values1[1]', 'req.gradients.dense_parameters[self.var_names[0]]'], {}), '(self.grad_values1[1], req.gradients.dense_parameters[self\n .var_names[0]])\n', (13098, 13176), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((13972, 14008), 'elasticdl.proto.elasticdl_pb2.PushGradientsRequest', 'elasticdl_pb2.PushGradientsRequest', ([], {}), '()\n', (14006, 14008), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((14185, 14296), 'elasticdl.python.common.tensor_utils.serialize_indexed_slices', 'serialize_indexed_slices', (['self.embedding_grads0', 'req.gradients.embedding_tables[self._embedding_info.name]'], {}), '(self.embedding_grads0, req.gradients.\n embedding_tables[self._embedding_info.name])\n', (14209, 14296), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((14474, 14510), 'elasticdl.proto.elasticdl_pb2.PushGradientsRequest', 'elasticdl_pb2.PushGradientsRequest', ([], {}), '()\n', (14508, 14510), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((14687, 14798), 'elasticdl.python.common.tensor_utils.serialize_indexed_slices', 'serialize_indexed_slices', (['self.embedding_grads1', 'req.gradients.embedding_tables[self._embedding_info.name]'], {}), '(self.embedding_grads1, req.gradients.\n embedding_tables[self._embedding_info.name])\n', (14711, 14798), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((14975, 15011), 'elasticdl.proto.elasticdl_pb2.PushGradientsRequest', 'elasticdl_pb2.PushGradientsRequest', ([], {}), '()\n', (15009, 15011), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((15870, 15899), 'numpy.copy', 'np.copy', (['self.embedding_table'], {}), '(self.embedding_table)\n', (15877, 15899), True, 'import numpy as np\n'), ((18574, 18618), 'elasticdl.python.common.save_utils.CheckpointSaver', 'CheckpointSaver', (['checkpoint_dir', '(0)', '(0)', '(False)'], {}), '(checkpoint_dir, 0, 0, False)\n', (18589, 18618), False, 'from elasticdl.python.common.save_utils import CheckpointSaver\n'), ((18636, 18648), 'elasticdl.python.ps.parameters.Parameters', 'Parameters', ([], {}), '()\n', (18646, 18648), False, 'from elasticdl.python.ps.parameters import Parameters\n'), ((18665, 18713), 'elasticdl.python.ps.embedding_table.EmbeddingTable', 'EmbeddingTable', (['"""embedding"""', '(2)', '"""random_uniform"""'], {}), "('embedding', 2, 'random_uniform')\n", (18679, 18713), False, 'from elasticdl.python.ps.embedding_table import EmbeddingTable, get_slot_table_name\n'), ((18890, 18917), 'tensorflow.Variable', 'tf.Variable', (['[[1.0], [1.0]]'], {}), '([[1.0], [1.0]])\n', (18901, 18917), True, 'import tensorflow as tf\n'), ((18994, 19012), 'tensorflow.Variable', 'tf.Variable', (['[1.0]'], {}), '([1.0])\n', (19005, 19012), True, 'import tensorflow as tf\n'), ((19216, 19379), 'elasticdl.python.tests.test_utils.PserverArgs', 'PserverArgs', ([], {'ps_id': '(0)', 'num_ps_pods': '(2)', 'model_zoo': '_test_model_zoo_path', 'model_def': '"""test_module.custom_model"""', 'checkpoint_dir_for_init': 'checkpoint_dir_for_init'}), "(ps_id=0, num_ps_pods=2, model_zoo=_test_model_zoo_path,\n model_def='test_module.custom_model', checkpoint_dir_for_init=\n checkpoint_dir_for_init)\n", (19227, 19379), False, 'from elasticdl.python.tests.test_utils import PserverArgs\n'), ((19462, 19483), 'elasticdl.python.ps.parameter_server.ParameterServer', 'ParameterServer', (['args'], {}), '(args)\n', (19477, 19483), False, 'from elasticdl.python.ps.parameter_server import ParameterServer\n'), ((20126, 20289), 'elasticdl.python.tests.test_utils.PserverArgs', 'PserverArgs', ([], {'ps_id': '(1)', 'num_ps_pods': '(2)', 'model_zoo': '_test_model_zoo_path', 'model_def': '"""test_module.custom_model"""', 'checkpoint_dir_for_init': 'checkpoint_dir_for_init'}), "(ps_id=1, num_ps_pods=2, model_zoo=_test_model_zoo_path,\n model_def='test_module.custom_model', checkpoint_dir_for_init=\n checkpoint_dir_for_init)\n", (20137, 20289), False, 'from elasticdl.python.tests.test_utils import PserverArgs\n'), ((20372, 20393), 'elasticdl.python.ps.parameter_server.ParameterServer', 'ParameterServer', (['args'], {}), '(args)\n', (20387, 20393), False, 'from elasticdl.python.ps.parameter_server import ParameterServer\n'), ((3439, 3457), 'elasticdl.python.common.tensor_utils.pb_to_ndarray', 'pb_to_ndarray', (['res'], {}), '(res)\n', (3452, 3457), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((4109, 4142), 'numpy.ones', 'np.ones', (['[3, 2]'], {'dtype': 'np.float32'}), '([3, 2], dtype=np.float32)\n', (4116, 4142), True, 'import numpy as np\n'), ((4162, 4197), 'numpy.ones', 'np.ones', (['[10, 32]'], {'dtype': 'np.float32'}), '([10, 32], dtype=np.float32)\n', (4169, 4197), True, 'import numpy as np\n'), ((4308, 4329), 'elasticdl.proto.elasticdl_pb2.Model', 'elasticdl_pb2.Model', ([], {}), '()\n', (4327, 4329), False, 'from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n'), ((6936, 6986), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['var', 'req.dense_parameters[name]'], {}), '(var, req.dense_parameters[name])\n', (6953, 6986), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((7058, 7075), 'google.protobuf.empty_pb2.Empty', 'empty_pb2.Empty', ([], {}), '()\n', (7073, 7075), False, 'from google.protobuf import empty_pb2\n'), ((7331, 7348), 'elasticdl.python.common.tensor_utils.pb_to_ndarray', 'pb_to_ndarray', (['pb'], {}), '(pb)\n', (7344, 7348), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((8348, 8365), 'google.protobuf.empty_pb2.Empty', 'empty_pb2.Empty', ([], {}), '()\n', (8363, 8365), False, 'from google.protobuf import empty_pb2\n'), ((9741, 9781), 'numpy.array', 'np.array', (['[10.0, 20.0, 30.0]', 'np.float32'], {}), '([10.0, 20.0, 30.0], np.float32)\n', (9749, 9781), True, 'import numpy as np\n'), ((9795, 9835), 'numpy.array', 'np.array', (['[20.0, 40.0, 60.0]', 'np.float32'], {}), '([20.0, 40.0, 60.0], np.float32)\n', (9803, 9835), True, 'import numpy as np\n'), ((9889, 9926), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]', 'np.float32'], {}), '([1.0, 2.0, 3.0], np.float32)\n', (9897, 9926), True, 'import numpy as np\n'), ((9940, 9977), 'numpy.array', 'np.array', (['[2.0, 4.0, 6.0]', 'np.float32'], {}), '([2.0, 4.0, 6.0], np.float32)\n', (9948, 9977), True, 'import numpy as np\n'), ((10031, 10068), 'numpy.array', 'np.array', (['[0.0, 0.0, 7.0]', 'np.float32'], {}), '([0.0, 0.0, 7.0], np.float32)\n', (10039, 10068), True, 'import numpy as np\n'), ((10082, 10119), 'numpy.array', 'np.array', (['[9.0, 9.0, 6.0]', 'np.float32'], {}), '([9.0, 9.0, 6.0], np.float32)\n', (10090, 10119), True, 'import numpy as np\n'), ((10431, 10452), 'numpy.asarray', 'np.asarray', (['[3, 1, 3]'], {}), '([3, 1, 3])\n', (10441, 10452), True, 'import numpy as np\n'), ((10608, 10629), 'numpy.asarray', 'np.asarray', (['[2, 2, 3]'], {}), '([2, 2, 3])\n', (10618, 10629), True, 'import numpy as np\n'), ((10823, 10886), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['value', 'push_model_req.dense_parameters[name]'], {}), '(value, push_model_req.dense_parameters[name])\n', (10840, 10886), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((11129, 11145), 'tensorflow.Variable', 'tf.Variable', (['var'], {}), '(var)\n', (11140, 11145), True, 'import tensorflow as tf\n'), ((11635, 11693), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['g', 'req.gradients.dense_parameters[name]'], {}), '(g, req.gradients.dense_parameters[name])\n', (11652, 11693), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((12779, 12832), 'numpy.allclose', 'np.allclose', (['expected_embed_table', 'actual_embed_table'], {}), '(expected_embed_table, actual_embed_table)\n', (12790, 12832), True, 'import numpy as np\n'), ((13005, 13021), 'tensorflow.Variable', 'tf.Variable', (['var'], {}), '(var)\n', (13016, 13021), True, 'import tensorflow as tf\n'), ((14118, 14176), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['g', 'req.gradients.dense_parameters[name]'], {}), '(g, req.gradients.dense_parameters[name])\n', (14135, 14176), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((14620, 14678), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['g', 'req.gradients.dense_parameters[name]'], {}), '(g, req.gradients.dense_parameters[name])\n', (14637, 14678), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((15121, 15179), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['g', 'req.gradients.dense_parameters[name]'], {}), '(g, req.gradients.dense_parameters[name])\n', (15138, 15179), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((16402, 16455), 'numpy.allclose', 'np.allclose', (['expected_embed_table', 'actual_embed_table'], {}), '(expected_embed_table, actual_embed_table)\n', (16413, 16455), True, 'import numpy as np\n'), ((16526, 16555), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (16553, 16555), False, 'import tempfile\n'), ((17476, 17547), 'elasticdl.python.ps.embedding_table.EmbeddingTable', 'EmbeddingTable', ([], {'name': '"""embedding_0"""', 'dim': '(3)', 'initializer': '"""random_uniform"""'}), "(name='embedding_0', dim=3, initializer='random_uniform')\n", (17490, 17547), False, 'from elasticdl.python.ps.embedding_table import EmbeddingTable, get_slot_table_name\n'), ((18746, 18779), 'numpy.ones', 'np.ones', (['(4, 2)'], {'dtype': 'np.float32'}), '((4, 2), dtype=np.float32)\n', (18753, 18779), True, 'import numpy as np\n'), ((2816, 2856), 'grpc.channel_ready_future', 'grpc.channel_ready_future', (['self._channel'], {}), '(self._channel)\n', (2841, 2856), False, 'import grpc\n'), ((4411, 4469), 'elasticdl.python.common.tensor_utils.serialize_ndarray', 'serialize_ndarray', (['model[name]', 'req.dense_parameters[name]'], {}), '(model[name], req.dense_parameters[name])\n', (4428, 4469), False, 'from elasticdl.python.common.tensor_utils import Tensor, pb_to_ndarray, serialize_indexed_slices, serialize_ndarray\n'), ((4616, 4633), 'google.protobuf.empty_pb2.Empty', 'empty_pb2.Empty', ([], {}), '()\n', (4631, 4633), False, 'from google.protobuf import empty_pb2\n'), ((5863, 5920), 'elasticdl.python.ps.embedding_table.get_slot_table_name', 'get_slot_table_name', (['self._embedding_info.name', 'slot_name'], {}), '(self._embedding_info.name, slot_name)\n', (5882, 5920), False, 'from elasticdl.python.ps.embedding_table import EmbeddingTable, get_slot_table_name\n'), ((7377, 7410), 'numpy.allclose', 'np.allclose', (['param0[name]', 'tensor'], {}), '(param0[name], tensor)\n', (7388, 7410), True, 'import numpy as np\n'), ((17143, 17178), 'tensorflow.Variable', 'tf.Variable', (['[[1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1]])\n', (17154, 17178), True, 'import tensorflow as tf\n'), ((17202, 17237), 'tensorflow.Variable', 'tf.Variable', (['[[2, 2, 2], [2, 2, 2]]'], {}), '([[2, 2, 2], [2, 2, 2]])\n', (17213, 17237), True, 'import tensorflow as tf\n'), ((18309, 18365), 'os.listdir', 'os.listdir', (["(checkpoint_saver._directory + '/version-100')"], {}), "(checkpoint_saver._directory + '/version-100')\n", (18319, 18365), False, 'import os\n'), ((19993, 20024), 'numpy.array', 'np.array', (['[[1], [1]]'], {'dtype': 'int'}), '([[1], [1]], dtype=int)\n', (20001, 20024), True, 'import numpy as np\n'), ((20899, 20923), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'int'}), '([1], dtype=int)\n', (20907, 20923), True, 'import numpy as np\n'), ((3583, 3608), 'elasticdl.python.common.model_utils.load_module', 'load_module', (['_module_file'], {}), '(_module_file)\n', (3594, 3608), False, 'from elasticdl.python.common.model_utils import get_module_file_path, load_module\n'), ((3960, 3980), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (3974, 3980), True, 'import numpy as np\n'), ((4019, 4041), 'numpy.random.rand', 'np.random.rand', (['(10)', '(32)'], {}), '(10, 32)\n', (4033, 4041), True, 'import numpy as np\n'), ((5537, 5596), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['self._embedding_info.initializer'], {}), '(self._embedding_info.initializer)\n', (5562, 5596), True, 'import tensorflow as tf\n'), ((6438, 6458), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (6452, 6458), True, 'import numpy as np\n'), ((6497, 6519), 'numpy.random.rand', 'np.random.rand', (['(10)', '(32)'], {}), '(10, 32)\n', (6511, 6519), True, 'import numpy as np\n'), ((16647, 16677), 'os.path.join', 'os.path.join', (['tempdir', '"""ckpt/"""'], {}), "(tempdir, 'ckpt/')\n", (16659, 16677), False, 'import os\n'), ((16885, 16897), 'elasticdl.python.ps.parameters.Parameters', 'Parameters', ([], {}), '()\n', (16895, 16897), False, 'from elasticdl.python.ps.parameters import Parameters\n'), ((17761, 17777), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (17769, 17777), True, 'import numpy as np\n'), ((17802, 17834), 'numpy.array', 'np.array', (['[[1, 1, 1], [2, 2, 2]]'], {}), '([[1, 1, 1], [2, 2, 2]])\n', (17810, 17834), True, 'import numpy as np\n'), ((18048, 18087), 'os.listdir', 'os.listdir', (['checkpoint_saver._directory'], {}), '(checkpoint_saver._directory)\n', (18058, 18087), False, 'import os\n'), ((18146, 18185), 'os.listdir', 'os.listdir', (['checkpoint_saver._directory'], {}), '(checkpoint_saver._directory)\n', (18156, 18185), False, 'import os\n'), ((9302, 9354), 'numpy.array_equal', 'np.array_equal', (['vectors_a_0[idx0]', 'vectors_a_1[idx1]'], {}), '(vectors_a_0[idx0], vectors_a_1[idx1])\n', (9316, 9354), True, 'import numpy as np\n'), ((9438, 9490), 'numpy.array_equal', 'np.array_equal', (['vectors_b_0[idx0]', 'vectors_b_1[idx1]'], {}), '(vectors_b_0[idx0], vectors_b_1[idx1])\n', (9452, 9490), True, 'import numpy as np\n'), ((10216, 10239), 'numpy.random.rand', 'np.random.rand', (['(4 * dim)'], {}), '(4 * dim)\n', (10230, 10239), True, 'import numpy as np\n'), ((10357, 10380), 'numpy.random.rand', 'np.random.rand', (['(3 * dim)'], {}), '(3 * dim)\n', (10371, 10380), True, 'import numpy as np\n'), ((10534, 10557), 'numpy.random.rand', 'np.random.rand', (['(3 * dim)'], {}), '(3 * dim)\n', (10548, 10557), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import sys
from os import walk
import matplotlib.colors as mcolors
'''
Return: table of lists of intervals {[%f,%f]}
'''
def read_log_file(filename):
result = {}
f = open(filename, 'r')
lines = f.readlines()
for line in lines:
line = line.replace('\n', '')
time_ranges = line.split(',')
entry_key = time_ranges[0]
result[entry_key] = []
for i in range(1, len(time_ranges)):
time_range = time_ranges[i].split("-")
result[entry_key].append([float(time_range[0]),float(time_range[1])])
'''
if len(result[entry_key]) == 0:
del result[entry_key]
'''
f.close()
return result
'''
Return: table of timings {%f}
'''
def read_timing_file(filename):
result = {}
f = open(filename, 'r')
lines = f.readlines()
for line in lines:
line = line.replace('\n', '')
timings = line.split(',')
entry_key = timings[0]
value = float(timings[1])
result[entry_key] = float(timings[1])
f.close()
return result
'''
Input: list of tables for lists of intervals [{[%f, %f]}]
Output: list of tables for timings [{%f}]
'''
def summarize_log_file(time_logs):
time_log_means = {}
time_log_counts = {}
time_log_max = {}
time_log_min = {}
keys = set()
for time_log in time_logs:
for k in time_log:
keys.add(k)
for time_log in time_logs:
for k in keys:
if k in time_log:
if k not in time_log_means:
time_log_counts[k] = 1
time_log_means[k] = time_log[k]
time_log_max[k] = time_log[k]
time_log_min[k] = time_log[k]
else:
time_log_counts[k] += 1
time_log_means[k] += time_log[k]
time_log_max[k] = max(time_log_max[k], time_log[k])
time_log_min[k] = min(time_log_min[k], time_log[k])
return [time_log_means, time_log_max, time_log_min, time_log_counts]
def interval_to_log(all_intervals):
results = []
for intervals in all_intervals:
next_timing = {}
for k in intervals:
next_timing[k] = .0
for i in range(0,len(intervals[k])):
next_timing[k] += intervals[k][i][1] - intervals[k][i][0]
results.append(next_timing)
return results
'''
Reshape the intervals
Input: Table for lists of intervals {[%f,%f]}
'''
def rescale_time(all_intervals, base):
for k in all_intervals:
for i in range(0, len(all_intervals[k])):
all_intervals[k][i] -= base
def plot_interval(all_intervals, index, key_name):
y = [index, index]
for interval in all_intervals:
plt.plot(interval, y, color = 'r')
index += 1
def check_interval_overlap(interval1, interval2):
return not (interval1[0] > interval2[1] or interval2[0] > interval1[1])
def check_all_intervals(input_intervals):
intervals = input_intervals[:]
intervals.sort(key=sort_by_lower_bound)
for i in range(1, len(intervals)):
if check_interval_overlap(intervals[i-1], intervals[i]):
return 1
return 0
'''
Intervals that overlap with each other will be merged.
'''
def coalesce_intervals(intervals):
intervals.sort(key=sort_by_lower_bound)
previous = 0
for i in range(1, len(intervals)):
if intervals[i][0] < intervals[previous][1]:
intervals[previous][1] = max(intervals[i][1], intervals[previous][1])
else:
previous += 1
intervals[previous][0] = intervals[i][0]
intervals[previous][1] = intervals[i][1]
if previous != len(intervals) - 1:
del intervals[-(len(intervals) - previous - 1):]
'''
Coalesce all intervals in each of the table entry.
Input: List of tables for interval lists [{[%f,%f]}]
Output: Same shape as intervals, but the inner list [%f,%f] are coalesced intervals
'''
def coalesce_all_intervals(all_intervals):
for intervals in all_intervals:
for k in intervals:
coalesce_intervals(intervals[k])
def sort_by_lower_bound(interval):
return interval[0]
def check_interval_gaps(input_intervals):
if input_intervals is None or len(input_intervals) ==0:
return None
intervals=input_intervals[:]
gaps = []
intervals.sort(key=sort_by_lower_bound)
previous = intervals[0][1]
for i in range(1, len(intervals)):
if previous < intervals[i][0]:
gaps.append((previous,intervals[i][0]))
previous = intervals[i][1]
return gaps
def total_interval_length(gaps):
return np.sum([(gap[1] - gap[0]) for gap in gaps])
def total_interval_std(gaps):
return np.std([(gap[1] - gap[0]) for gap in gaps])
def max_interval(gaps):
return np.max([(gap[1] - gap[0]) for gap in gaps])
def merge_intervals(all_intervals):
result = []
for k in all_intervals:
for interval in all_intervals[k]:
result.append(interval)
return result
def read_clients(n_clients, base, path):
result = []
for i in range(0, n_clients):
filename = '{0}/pdc_client_log_rank_{1}.csv'.format(path, i)
result.append(read_log_file(filename))
rescale_time(result[i], base)
return result
def plot_all(server_intervals, client_intervals):
plt.figure()
plt.xlabel('time/sec')
for k in server_intervals:
plot_interval(server_intervals[k], 2, k)
for i in range(0, len(client_intervals)):
for k in client_intervals[i]:
plot_interval(client_intervals[i][k], 3 + i, k)
plt.savefig('{0}'.format("test_figure.pdf"))
plt.close()
def pdc_log_analysis(path):
print('====== Start analyzing path {0} ======'.format(path))
filenames = next(walk(path), (None, None, []))[2]
time_logs = []
interval_logs = []
for filename in filenames:
if 'pdc_server_timings' in filename:
full_filename = '{0}/{1}'.format(path, filename)
time_logs.append(read_timing_file(full_filename))
elif 'pdc_server_log_rank' in filename:
full_filename = '{0}/{1}'.format(path, filename)
interval_logs.append(read_log_file(full_filename))
time_log_means, time_log_max, time_log_min, time_log_counts = summarize_log_file(time_logs)
for k in time_log_means:
time_log_means[k] /= time_log_counts[k]
print('Key = {0}, mean = {1:.4}, min = {2:.4}, max = {3:.4}, count = {4}'.format(k, time_log_means[k], time_log_min[k], time_log_max[k], time_log_counts[k]))
base = np.min([np.min([server_intervals[x][0] for x in server_intervals if len(server_intervals[x]) > 0]) for server_intervals in interval_logs])
for server_intervals in interval_logs:
rescale_time(server_intervals, base)
coalesce_all_intervals(interval_logs)
interval_time_logs = interval_to_log(interval_logs)
time_log_means, time_log_max, time_log_min, time_log_counts = summarize_log_file(interval_time_logs)
print('start to print data from interval logs')
for k in time_log_means:
time_log_means[k] /= time_log_counts[k]
print('Key = {0}, mean = {1:.4}, min = {2:.4}, max = {3:.4}, count = {4}'.format(k, time_log_means[k], time_log_min[k], time_log_max[k], time_log_counts[k]))
return interval_time_logs, time_logs
def wrap_io_data(interval_time_logs, time_logs):
return np.mean([time_logs[i]['PDCreg_release_lock_bulk_transfer_inner_write_rpc'] + time_logs[i]['PDCregion_transfer_request_inner_write_bulk_rpc'] for i in range(0, len(time_logs))])
def wrap_comm_data(interval_time_logs, time_logs):
return np.mean([interval_time_logs[i]['transfer_request_wait_write_bulk'] + interval_time_logs[i]['release_lock_bulk_transfer_write'] for i in range(0, len(interval_time_logs))])
def wrap_other_data(interval_time_logs, time_logs):
return np.mean([time_logs[i]['PDCregion_transfer_start_write_rpc'] + time_logs[i]['PDCregion_transfer_wait_write_rpc'] + time_logs[i]['PDCreg_release_lock_write_rpc'] + time_logs[i]['PDCreg_obtain_lock_write_rpc'] for i in range(0, len(time_logs))] )
def main():
if len(sys.argv) == 2:
base_path = sys.argv[1]
if base_path[len(base_path) - 1] != '/':
base_path = '{0}/'.format(base_path)
else:
base_path = ''
path = "{0}shared_mode/vpic_old_results".format(base_path)
shared_old_interval_logs, shared_old_time_logs = pdc_log_analysis(path)
path = "{0}shared_mode/vpic_results".format(base_path)
shared_interval_logs, shared_time_logs = pdc_log_analysis(path)
path = "{0}dedicated_mode/vpic_old_results".format(base_path)
dedicated_old_interval_logs, dedicated_old_time_logs = pdc_log_analysis(path)
path = "{0}dedicated_mode/vpic_results".format(base_path)
dedicated_interval_logs, dedicated_time_logs = pdc_log_analysis(path)
all_interval_logs = [shared_old_interval_logs, shared_interval_logs, dedicated_old_interval_logs, dedicated_interval_logs]
all_time_logs = [shared_old_time_logs, shared_time_logs, dedicated_old_time_logs, dedicated_time_logs]
plt.figure()
width = 0.35
n_bars = 4
x_labels = np.arange(n_bars)
io_bar = [wrap_io_data(all_interval_logs[i], all_time_logs[i]) for i in range(0, n_bars)]
p_io_bar = plt.bar(x_labels, io_bar, width)
print("I/O bar")
print(io_bar)
comm_bar = [wrap_comm_data(all_interval_logs[i], all_time_logs[i]) for i in range(0, n_bars)]
p_comm_bar = plt.bar(x_labels, comm_bar, width, bottom=io_bar)
print("Comm bar")
print(comm_bar)
other_bar = [wrap_other_data(all_interval_logs[i], all_time_logs[i]) for i in range(0, n_bars)]
p_other_bar = plt.bar(x_labels, other_bar, width, bottom=[comm_bar[i]+io_bar[i] for i in range(0, n_bars)])
print("Other bar")
print(other_bar)
plt.ylabel('Timing/sec')
plt.title('Server Breakdown Timing With Cache')
plt.legend((p_io_bar[0], p_comm_bar[0], p_other_bar[0]), ('I/O', 'Comm', 'Other'))
plt.xticks(x_labels, ('shared_bm', 'shared_tr', 'dedicated_bm', 'dedicated_tr'))
plt.savefig('{0}'.format("server_breakdown_cache.pdf"))
plt.close()
if __name__== "__main__":
main()
|
[
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.walk",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"numpy.std",
"matplotlib.pyplot.title",
"numpy.arange"
] |
[((4717, 4760), 'numpy.sum', 'np.sum', (['[(gap[1] - gap[0]) for gap in gaps]'], {}), '([(gap[1] - gap[0]) for gap in gaps])\n', (4723, 4760), True, 'import numpy as np\n'), ((4803, 4846), 'numpy.std', 'np.std', (['[(gap[1] - gap[0]) for gap in gaps]'], {}), '([(gap[1] - gap[0]) for gap in gaps])\n', (4809, 4846), True, 'import numpy as np\n'), ((4882, 4925), 'numpy.max', 'np.max', (['[(gap[1] - gap[0]) for gap in gaps]'], {}), '([(gap[1] - gap[0]) for gap in gaps])\n', (4888, 4925), True, 'import numpy as np\n'), ((5422, 5434), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5432, 5434), True, 'import matplotlib.pyplot as plt\n'), ((5439, 5461), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time/sec"""'], {}), "('time/sec')\n", (5449, 5461), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5750), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5748, 5750), True, 'import matplotlib.pyplot as plt\n'), ((9203, 9215), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9213, 9215), True, 'import matplotlib.pyplot as plt\n'), ((9263, 9280), 'numpy.arange', 'np.arange', (['n_bars'], {}), '(n_bars)\n', (9272, 9280), True, 'import numpy as np\n'), ((9390, 9422), 'matplotlib.pyplot.bar', 'plt.bar', (['x_labels', 'io_bar', 'width'], {}), '(x_labels, io_bar, width)\n', (9397, 9422), True, 'import matplotlib.pyplot as plt\n'), ((9578, 9627), 'matplotlib.pyplot.bar', 'plt.bar', (['x_labels', 'comm_bar', 'width'], {'bottom': 'io_bar'}), '(x_labels, comm_bar, width, bottom=io_bar)\n', (9585, 9627), True, 'import matplotlib.pyplot as plt\n'), ((9932, 9956), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Timing/sec"""'], {}), "('Timing/sec')\n", (9942, 9956), True, 'import matplotlib.pyplot as plt\n'), ((9961, 10008), 'matplotlib.pyplot.title', 'plt.title', (['"""Server Breakdown Timing With Cache"""'], {}), "('Server Breakdown Timing With Cache')\n", (9970, 10008), True, 'import matplotlib.pyplot as plt\n'), ((10013, 10099), 'matplotlib.pyplot.legend', 'plt.legend', (['(p_io_bar[0], p_comm_bar[0], p_other_bar[0])', "('I/O', 'Comm', 'Other')"], {}), "((p_io_bar[0], p_comm_bar[0], p_other_bar[0]), ('I/O', 'Comm',\n 'Other'))\n", (10023, 10099), True, 'import matplotlib.pyplot as plt\n'), ((10101, 10186), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_labels', "('shared_bm', 'shared_tr', 'dedicated_bm', 'dedicated_tr')"], {}), "(x_labels, ('shared_bm', 'shared_tr', 'dedicated_bm', 'dedicated_tr')\n )\n", (10111, 10186), True, 'import matplotlib.pyplot as plt\n'), ((10248, 10259), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10257, 10259), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2858), 'matplotlib.pyplot.plot', 'plt.plot', (['interval', 'y'], {'color': '"""r"""'}), "(interval, y, color='r')\n", (2834, 2858), True, 'import matplotlib.pyplot as plt\n'), ((5866, 5876), 'os.walk', 'walk', (['path'], {}), '(path)\n', (5870, 5876), False, 'from os import walk\n')]
|
#!/usr/bin/env python3
import argparse
import os
import re
import glog as log
import numpy as np
import pandas as pd
import ray
from factorized_sampler_lib import data_utils
from factorized_sampler_lib import rustlib
import join_utils
NULL = -1
@ray.remote
def get_first_jct(join_name, table, base_count_table):
@data_utils.save_result(f"{table}.jct", join_name,
f"join count table of `{table}`")
def work(table, base_count_table):
log.info(f"Creating join count table for `{table}`.")
ret = base_count_table
ret.columns = [f"{table}.{k}" for k in ret.columns]
return ret
return work(table, base_count_table)
@ray.remote
def get_jct(table, base_count_table, dependencies, dependency_jcts, join_spec):
@data_utils.save_result(f"{table}.jct", join_spec.join_name,
f"join count table of `{table}`")
def work(table, bct, dependencies, dependency_jcts, join_spec):
"""
The base count table (BCT) contains the following columns:
{k1}, {k2}, ..., weight, {k1}.cnt, {k2}.cnt, ...
The join count table (JCT) contains the following columns:
{table}.{k1}, ..., {table}.weight, {table}.{k1}.cnt, ...
The only difference is that the `weight` values in the JCT are
aggregated from the dependency JCTs. The fanout counts are copied
from the BCT. The JCT contains at most one extra row than the BCT,
namely the NULL row, if the dependency JCTs contain values not in
the BCT.
"""
log.info(
f"Creating join count table for `{table}` from dependencies {dependencies}"
)
jct_columns = [f"{table}.{k}" for k in bct.columns]
bct.columns = jct_columns
keys = join_spec.join_keys[table]
groupby_keys = [f"{table}.{k}" for k in keys]
table_weight = f"{table}.weight"
ret_keys = groupby_keys + [table_weight]
ret = bct[ret_keys]
dependency_jcts = ray.get(dependency_jcts)
for other, other_jct in zip(dependencies, dependency_jcts):
join_keys = join_spec.join_graph[table][other]["join_keys"]
table_key = f"{table}.{join_keys[table]}"
other_key = f"{other}.{join_keys[other]}"
other_weight = f"{other}.weight"
ret = ret.merge(
other_jct[[other_key, other_weight]],
how=join_spec.join_how,
left_on=table_key,
right_on=other_key,
)
ret[table_weight] = np.nanprod(
[ret[table_weight], ret[other_weight]], axis=0)
ret = ret[ret_keys]
ret = ret.fillna(NULL).groupby(groupby_keys).sum().reset_index()
# At this point ret contains the aggregated weights. We now need to
# copy the *.cnt columns from the BCT, and handle the potential NULL
# row. If a NULL row exists, then `ret` contains one more row than
# `bct`. Otherwise, they are of the same length.
#
# Do not assert this for inner joins.
if join_spec.join_how == "outer":
assert 0 <= len(ret) - len(bct) <= 1, (ret, bct)
# First, we get the BCT minus the weight column, i.e. only the keys and
# their fanouts. Then we need to concatenate the fanout columns to ret.
bct_sans_weight = bct.drop(table_weight, axis=1)
ret = ret.merge(bct_sans_weight,
how="left",
left_on=groupby_keys,
right_on=groupby_keys)
ret = ret[jct_columns]
# This fillna(1) sets the fanout columns in the NULL row to be 1. It is
# a no-op if ret does not contain NULL.
jct = ret.fillna(1).astype(np.int64, copy=False)
return jct
return work(table, base_count_table, dependencies, dependency_jcts,
join_spec)
def get_join_count_tables(join_spec):
base_count_tables_dict = {
table: get_base_count_table.remote(join_spec.join_name, table, keys)
for table, keys in join_spec.join_keys.items()
}
join_count_tables_dict = {}
# FIXME: properly traverse the tree via bottom-up order.
for table in join_utils.get_bottom_up_table_ordering(join_spec):
dependencies = list(join_spec.join_tree.neighbors(table))
if len(dependencies) == 0:
jct = get_first_jct.remote(join_spec.join_name, table,
base_count_tables_dict[table])
else:
bct = base_count_tables_dict[table]
dependency_jcts = [join_count_tables_dict[d] for d in dependencies]
jct = get_jct.remote(table, bct, dependencies, dependency_jcts,
join_spec)
join_count_tables_dict[table] = jct
return join_count_tables_dict
@ray.remote
def get_base_count_table(join_name, table, keys):
@data_utils.save_result(f"{table}.bct", join_name,
f"base count table of `{table}`")
def work(table, keys):
df = data_utils.load_table(table,
usecols=keys,
dtype={k: pd.Int64Dtype() for k in keys})
groupby_ss = df.groupby(keys).size()
bct = groupby_ss.to_frame(name="weight").reset_index()
for key in keys:
kct = df.groupby(key).size().rename(f"{key}.cnt")
bct = bct.merge(kct, how="left", left_on=key, right_index=True)
return bct.astype(np.int64, copy=False)
return work(table, keys)
def get_null_set(my_jct, my_key, parent_jct, parent_key):
parent_keyset = parent_jct[parent_key].unique()
parent_keyset = parent_keyset[parent_keyset != NULL]
my_keyset = my_jct[my_key]
assert my_keyset.dtype == parent_keyset.dtype, (my_keyset.dtype,
parent_keyset.dtype)
null_set = my_keyset[~np.isin(my_keyset.values, parent_keyset)]
return null_set
@ray.remote
def get_join_key_groups(table, jcts, join_spec):
jct = ray.get(jcts[table])
parents = list(join_spec.join_tree.predecessors(table))
if len(parents) == 0:
return "Skipped"
parent = parents[0]
parent_jct = ray.get(jcts[parent])
join_keys = join_spec.join_graph[parent][table]["join_keys"]
my_key = f"{table}.{join_keys[table]}"
parent_key = f"{parent}.{join_keys[parent]}"
indices = jct.groupby(my_key).indices
null_set = get_null_set(jct, my_key, parent_jct, parent_key)
if null_set.size > 0:
indices[NULL] = null_set.index.values
indices = {(k,): v for k, v in indices.items()}
weights = jct[f"{table}.weight"].values
rustlib.prepare_indices(f"{join_spec.join_name}/{table}.jk.indices",
indices, weights)
return "OK"
@ray.remote
def get_primary_key_groups(table, keys, df, join_spec):
indices = df.groupby(keys).indices
# Manually patch the dictionary to make sure its keys are tuples.
if len(keys) == 1:
indices = {(k,): v for k, v in indices.items()}
rustlib.prepare_indices(f"{join_spec.join_name}/{table}.pk.indices",
indices, None)
return "OK"
@ray.remote
def load_data_table(table, join_keys):
return data_utils.load_table(table,
dtype={k: pd.Int64Dtype() for k in join_keys})
def check_required_files(join_spec):
for table in join_spec.join_tables:
for f in [f"{table}.jct", f"{table}.pk.indices"]:
path = os.path.join(data_utils.CACHE_DIR, join_spec.join_name, f)
if not os.path.exists(path):
return False
return True
def prepare(join_spec):
"""Prepares all required files for the factorized sampler.
The *.bct and *.jct files are in Feather format and can be loaded with
pd.read_feather(). The *.indices files are in RON format and can be
loaded in Rust.
- {table}.bct: For every table, this is the tuple counts grouped by all
the join keys used in the join spec. i.e.
SELECT COUNT(*) FROM {table} GROUP BY {join keys}
This is only used to produce the factorized join count tables (*.jct).
- {table}.jct: For every table, this is its factorized join count table.
The most important column is `{title}.weight`, which induces the sampling
probability of a given tuple. This table also contains the fanout
counts for each key tuple.
- {table}.jk.indices: This is a reverse lookup table into the join count
tables. When a row is sampled from the parent JCT, the sampler needs to
pick a row from the rows in the current JCT that match the parent join
key. This file is for this purpose: it is a hash map from the parent keys
to the row IDs in this JCT that match that key.
This is a weighted distribution because each row has its own join count
and should be sampled proportionately.
- {table}.pk.indices: The factorized sampler only produces samples of the
join key columns. To fetch the data columns, one needs to pick a row
from the original table that matches the join keys in this join sample.
This file is for this purpose: it is a hash map from the join keys to
the primary key IDs in the data table that match the keys.
This is a uniform distribution because each row in the data table should
be equally likely to be sampled.
"""
ray.init(ignore_reinit_error=True)
if check_required_files(join_spec):
return
jcts = get_join_count_tables(join_spec)
dts = {
table: load_data_table.remote(table, keys)
for table, keys in join_spec.join_keys.items()
}
jk_groups_weights = {
table: get_join_key_groups.remote(table, jcts, join_spec)
for table, jct in jcts.items()
}
pk_groups = {
table: get_primary_key_groups.remote(table, keys, dts[table], join_spec)
for table, keys in join_spec.join_keys.items()
}
for table, jkg in jk_groups_weights.items():
print(table, ray.get(jkg))
for table, pkg in pk_groups.items():
print(table, ray.get(pkg))
|
[
"os.path.exists",
"ray.get",
"pandas.Int64Dtype",
"join_utils.get_bottom_up_table_ordering",
"numpy.nanprod",
"os.path.join",
"numpy.isin",
"glog.info",
"factorized_sampler_lib.rustlib.prepare_indices",
"factorized_sampler_lib.data_utils.save_result",
"ray.init"
] |
[((324, 411), 'factorized_sampler_lib.data_utils.save_result', 'data_utils.save_result', (['f"""{table}.jct"""', 'join_name', 'f"""join count table of `{table}`"""'], {}), "(f'{table}.jct', join_name,\n f'join count table of `{table}`')\n", (346, 411), False, 'from factorized_sampler_lib import data_utils\n'), ((789, 886), 'factorized_sampler_lib.data_utils.save_result', 'data_utils.save_result', (['f"""{table}.jct"""', 'join_spec.join_name', 'f"""join count table of `{table}`"""'], {}), "(f'{table}.jct', join_spec.join_name,\n f'join count table of `{table}`')\n", (811, 886), False, 'from factorized_sampler_lib import data_utils\n'), ((4257, 4307), 'join_utils.get_bottom_up_table_ordering', 'join_utils.get_bottom_up_table_ordering', (['join_spec'], {}), '(join_spec)\n', (4296, 4307), False, 'import join_utils\n'), ((4957, 5044), 'factorized_sampler_lib.data_utils.save_result', 'data_utils.save_result', (['f"""{table}.bct"""', 'join_name', 'f"""base count table of `{table}`"""'], {}), "(f'{table}.bct', join_name,\n f'base count table of `{table}`')\n", (4979, 5044), False, 'from factorized_sampler_lib import data_utils\n'), ((6116, 6136), 'ray.get', 'ray.get', (['jcts[table]'], {}), '(jcts[table])\n', (6123, 6136), False, 'import ray\n'), ((6289, 6310), 'ray.get', 'ray.get', (['jcts[parent]'], {}), '(jcts[parent])\n', (6296, 6310), False, 'import ray\n'), ((6748, 6838), 'factorized_sampler_lib.rustlib.prepare_indices', 'rustlib.prepare_indices', (['f"""{join_spec.join_name}/{table}.jk.indices"""', 'indices', 'weights'], {}), "(f'{join_spec.join_name}/{table}.jk.indices',\n indices, weights)\n", (6771, 6838), False, 'from factorized_sampler_lib import rustlib\n'), ((7141, 7228), 'factorized_sampler_lib.rustlib.prepare_indices', 'rustlib.prepare_indices', (['f"""{join_spec.join_name}/{table}.pk.indices"""', 'indices', 'None'], {}), "(f'{join_spec.join_name}/{table}.pk.indices',\n indices, None)\n", (7164, 7228), False, 'from factorized_sampler_lib import rustlib\n'), ((9534, 9568), 'ray.init', 'ray.init', ([], {'ignore_reinit_error': '(True)'}), '(ignore_reinit_error=True)\n', (9542, 9568), False, 'import ray\n'), ((483, 536), 'glog.info', 'log.info', (['f"""Creating join count table for `{table}`."""'], {}), "(f'Creating join count table for `{table}`.')\n", (491, 536), True, 'import glog as log\n'), ((1584, 1679), 'glog.info', 'log.info', (['f"""Creating join count table for `{table}` from dependencies {dependencies}"""'], {}), "(\n f'Creating join count table for `{table}` from dependencies {dependencies}'\n )\n", (1592, 1679), True, 'import glog as log\n'), ((2026, 2050), 'ray.get', 'ray.get', (['dependency_jcts'], {}), '(dependency_jcts)\n', (2033, 2050), False, 'import ray\n'), ((2584, 2642), 'numpy.nanprod', 'np.nanprod', (['[ret[table_weight], ret[other_weight]]'], {'axis': '(0)'}), '([ret[table_weight], ret[other_weight]], axis=0)\n', (2594, 2642), True, 'import numpy as np\n'), ((5981, 6021), 'numpy.isin', 'np.isin', (['my_keyset.values', 'parent_keyset'], {}), '(my_keyset.values, parent_keyset)\n', (5988, 6021), True, 'import numpy as np\n'), ((7598, 7656), 'os.path.join', 'os.path.join', (['data_utils.CACHE_DIR', 'join_spec.join_name', 'f'], {}), '(data_utils.CACHE_DIR, join_spec.join_name, f)\n', (7610, 7656), False, 'import os\n'), ((10161, 10173), 'ray.get', 'ray.get', (['jkg'], {}), '(jkg)\n', (10168, 10173), False, 'import ray\n'), ((10237, 10249), 'ray.get', 'ray.get', (['pkg'], {}), '(pkg)\n', (10244, 10249), False, 'import ray\n'), ((7405, 7420), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (7418, 7420), True, 'import pandas as pd\n'), ((7676, 7696), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7690, 7696), False, 'import os\n'), ((5232, 5247), 'pandas.Int64Dtype', 'pd.Int64Dtype', ([], {}), '()\n', (5245, 5247), True, 'import pandas as pd\n')]
|
import numpy as np
from matplotlib import pyplot as plt
import pickle
file = open('Data/Alpha0Bw7', 'rb')
Data = np.array(pickle.load(file))
Alpha0 = [0.01, 0.02, 0.05, 0.1, 0.2, 0.4, 0.6, 0.9]
Bw = np.linspace(0.4, 3.2, 15)
Names = ['alpha0', 'bw',
'IS est', 'IS a-var', 'n0/ESS', 'n0/RSS', 'kernel number',
'mean bdwth', 'kde ESS', 'sqrt(ISE/Rf)', 'KLD',
'NIS est', 'NIS a-var', 'MIS est', 'MIS a-var',
'CI>30', 'R2(O)', 'R2(R)', 'R2(L)',
'RIS(O) est', 'RIS(O) a-var', 'RIS(R) est', 'RIS(R) a-var', 'RIS(L) est', 'RIS(L) a-var',
'RIS(O,u) est', 'RIS(O,u) a-var', 'RIS(R,u) est', 'RIS(R,u) a-var', 'RIS(L,u) est', 'RIS(L,u) a-var']
def draw(alpha0, name, to_ax, log=False):
if alpha0 not in Alpha0:
print('alpha0 error')
return
if name not in Names:
print('name error')
return
data = Data[Data[:, 0] == alpha0]
x = data[:, 1]
y = data[:, Names.index(name)]
if log:
y = np.log(y)
name = 'log(' + name + ')'
to_ax.plot(x, y, label=name)
return x, y
def draw_main():
f, axs = plt.subplots(3, 1, figsize=(10, 12))
axs = axs.flatten()
names = ['MIS a-var', 'RIS(O) a-var', 'RIS(O) a-var/MIS a-var']
for i, name in enumerate(names):
labels = ['alpha0=' + str(alpha0) for alpha0 in Alpha0]
if i == 0:
axs[i].plot(Bw, np.log(Data[0, Names.index('IS a-var')]) * np.ones(Bw.size), c='k')
axs[i].plot(Bw, np.log(Data[Data[:, 0] == Alpha0[0], Names.index('NIS a-var')]), c='k')
labels = ['reference 1', 'NIS a-var'] + labels
for alpha0 in Alpha0:
if name == 'RIS(O) a-var/MIS a-var':
data = Data[Data[:, 0] == alpha0]
x = data[:, 1]
y = np.log(data[:, Names.index('RIS(O) a-var')] / data[:, Names.index('MIS a-var')])
axs[i].plot(x, y)
else:
draw(alpha0=alpha0, name=name, to_ax=axs[i], log=True)
axs[i].legend(labels)
axs[i].set_title('log('+name+')')
plt.show()
if __name__ == '__main__':
draw_main()
|
[
"numpy.ones",
"numpy.log",
"pickle.load",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((201, 226), 'numpy.linspace', 'np.linspace', (['(0.4)', '(3.2)', '(15)'], {}), '(0.4, 3.2, 15)\n', (212, 226), True, 'import numpy as np\n'), ((124, 141), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (135, 141), False, 'import pickle\n'), ((1121, 1157), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(10, 12)'}), '(3, 1, figsize=(10, 12))\n', (1133, 1157), True, 'from matplotlib import pyplot as plt\n'), ((2088, 2098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2096, 2098), True, 'from matplotlib import pyplot as plt\n'), ((994, 1003), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (1000, 1003), True, 'import numpy as np\n'), ((1441, 1457), 'numpy.ones', 'np.ones', (['Bw.size'], {}), '(Bw.size)\n', (1448, 1457), True, 'import numpy as np\n')]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = self.evaluate(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = self.evaluate(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
return self.evaluate(broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x1 = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
x2 = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
def div_x1(x1):
return math_ops.truediv(x1, x2) * math_ops.cast(1.1, dtype=x1.dtype)
def div_x2(x2):
return math_ops.truediv(x1, x2) * math_ops.cast(1.1, dtype=x2.dtype)
with self.cached_session():
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
div_x1, [x1]))
self.assertLess(err, self._GRAD_TOL[dtypes.as_dtype(x1.dtype)])
err = gradient_checker_v2.max_error(*gradient_checker_v2.compute_gradient(
div_x2, [x2]))
self.assertLess(err, self._GRAD_TOL[dtypes.as_dtype(x2.dtype)])
self._compareGpu(x1, x2, np.true_divide, math_ops.truediv)
self._compareGpu(x1, x2 + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
@test_util.run_v1_only('b/126596827 needs graph mode in multiple threads')
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set(itertools.chain(*results))
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
[
"itertools.chain",
"numpy.prod",
"numpy.sqrt",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"six.moves.xrange",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.math_ops.multiply",
"numpy.arange",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.framework.ops.Graph",
"numpy.linspace",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.truediv",
"tensorflow.python.ops.math_ops.floor",
"tensorflow.python.ops.gen_array_ops.broadcast_gradient_args",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.array_ops.transpose",
"numpy.floor_divide",
"threading.Thread",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.platform.test.main"
] |
[((9072, 9145), 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/126596827 needs graph mode in multiple threads"""'], {}), "('b/126596827 needs graph mode in multiple threads')\n", (9093, 9145), False, 'from tensorflow.python.framework import test_util\n'), ((9779, 9790), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (9788, 9790), False, 'from tensorflow.python.platform import test\n'), ((6184, 6211), 'numpy.floor_divide', 'np.floor_divide', (['x', '(y + 0.1)'], {}), '(x, y + 0.1)\n', (6199, 6211), True, 'import numpy as np\n'), ((9247, 9264), 'six.moves.xrange', 'xrange', (['n_threads'], {}), '(n_threads)\n', (9253, 9264), False, 'from six.moves import xrange\n'), ((1674, 1698), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['x'], {}), '(x)\n', (1695, 1698), False, 'from tensorflow.python.framework import ops\n'), ((1711, 1735), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['y'], {}), '(y)\n', (1732, 1735), False, 'from tensorflow.python.framework import ops\n'), ((1858, 1882), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['x'], {}), '(x)\n', (1879, 1882), False, 'from tensorflow.python.framework import ops\n'), ((1895, 1919), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['y'], {}), '(y)\n', (1916, 1919), False, 'from tensorflow.python.framework import ops\n'), ((4186, 4210), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['x'], {}), '(x)\n', (4207, 4210), False, 'from tensorflow.python.framework import ops\n'), ((4400, 4410), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (4407, 4410), True, 'import numpy as np\n'), ((6250, 6274), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['x'], {}), '(x)\n', (6271, 6274), False, 'from tensorflow.python.framework import ops\n'), ((6287, 6317), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['(y + 0.1)'], {}), '(y + 0.1)\n', (6308, 6317), False, 'from tensorflow.python.framework import ops\n'), ((6360, 6381), 'tensorflow.python.ops.math_ops.floor', 'math_ops.floor', (['ofunc'], {}), '(ofunc)\n', (6374, 6381), False, 'from tensorflow.python.ops import math_ops\n'), ((6571, 6602), 'tensorflow.python.ops.gen_array_ops.broadcast_gradient_args', 'broadcast_gradient_args', (['xs', 'ys'], {}), '(xs, ys)\n', (6594, 6602), False, 'from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args\n'), ((6913, 6937), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['x'], {}), '(x)\n', (6934, 6937), False, 'from tensorflow.python.framework import ops\n'), ((6950, 6974), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['y'], {}), '(y)\n', (6971, 6974), False, 'from tensorflow.python.framework import ops\n'), ((8348, 8371), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(1.0)'], {}), '(1.0)\n', (8366, 8371), False, 'from tensorflow.python.ops import variables\n'), ((9588, 9613), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (9603, 9613), False, 'import itertools\n'), ((7439, 7463), 'tensorflow.python.ops.math_ops.truediv', 'math_ops.truediv', (['x1', 'x2'], {}), '(x1, x2)\n', (7455, 7463), False, 'from tensorflow.python.ops import math_ops\n'), ((7466, 7500), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(1.1)'], {'dtype': 'x1.dtype'}), '(1.1, dtype=x1.dtype)\n', (7479, 7500), False, 'from tensorflow.python.ops import math_ops\n'), ((7535, 7559), 'tensorflow.python.ops.math_ops.truediv', 'math_ops.truediv', (['x1', 'x2'], {}), '(x1, x2)\n', (7551, 7559), False, 'from tensorflow.python.ops import math_ops\n'), ((7562, 7596), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(1.1)'], {'dtype': 'x2.dtype'}), '(1.1, dtype=x2.dtype)\n', (7575, 7596), False, 'from tensorflow.python.ops import math_ops\n'), ((8383, 8410), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (8393, 8410), False, 'from tensorflow.python.framework import ops\n'), ((8420, 8450), 'tensorflow.python.framework.random_seed.set_random_seed', 'random_seed.set_random_seed', (['(1)'], {}), '(1)\n', (8447, 8450), False, 'from tensorflow.python.framework import random_seed\n'), ((8676, 8719), 'tensorflow.python.ops.math_ops.multiply', 'math_ops.multiply', (['data', 'matrix1'], {'name': '"""x1"""'}), "(data, matrix1, name='x1')\n", (8693, 8719), False, 'from tensorflow.python.ops import math_ops\n'), ((8931, 8951), 'six.moves.xrange', 'xrange', (['n_iterations'], {}), '(n_iterations)\n', (8937, 8951), False, 'from six.moves import xrange\n'), ((9413, 9466), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._run_session', 'args': 'args'}), '(target=self._run_session, args=args)\n', (9429, 9466), False, 'import threading\n'), ((7673, 7723), 'tensorflow.python.ops.gradient_checker_v2.compute_gradient', 'gradient_checker_v2.compute_gradient', (['div_x1', '[x1]'], {}), '(div_x1, [x1])\n', (7709, 7723), False, 'from tensorflow.python.ops import gradient_checker_v2\n'), ((7778, 7803), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['x1.dtype'], {}), '(x1.dtype)\n', (7793, 7803), False, 'from tensorflow.python.framework import dtypes\n'), ((7850, 7900), 'tensorflow.python.ops.gradient_checker_v2.compute_gradient', 'gradient_checker_v2.compute_gradient', (['div_x2', '[x2]'], {}), '(div_x2, [x2])\n', (7886, 7900), False, 'from tensorflow.python.ops import gradient_checker_v2\n'), ((7955, 7980), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['x2.dtype'], {}), '(x2.dtype)\n', (7970, 7980), False, 'from tensorflow.python.framework import dtypes\n'), ((8501, 8539), 'tensorflow.python.ops.random_ops.truncated_normal', 'random_ops.truncated_normal', (['[1024, 1]'], {}), '([1024, 1])\n', (8528, 8539), False, 'from tensorflow.python.ops import random_ops\n'), ((8607, 8645), 'tensorflow.python.ops.random_ops.truncated_normal', 'random_ops.truncated_normal', (['[1, 1024]'], {}), '([1, 1024])\n', (8634, 8645), False, 'from tensorflow.python.ops import random_ops\n'), ((8753, 8786), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['matrix2', 'matrix1'], {}), '(matrix2, matrix1)\n', (8768, 8786), False, 'from tensorflow.python.ops import math_ops\n'), ((8817, 8840), 'tensorflow.python.ops.array_ops.transpose', 'array_ops.transpose', (['x3'], {}), '(x3)\n', (8836, 8840), False, 'from tensorflow.python.ops import array_ops\n'), ((8871, 8911), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (8909, 8911), False, 'from tensorflow.python.ops import variables\n'), ((9301, 9312), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (9310, 9312), False, 'from tensorflow.python.framework import ops\n'), ((2062, 2085), 'numpy.linspace', 'np.linspace', (['(-5)', '(20)', '(15)'], {}), '(-5, 20, 15)\n', (2073, 2085), True, 'import numpy as np\n'), ((2172, 2195), 'numpy.linspace', 'np.linspace', (['(20)', '(-5)', '(15)'], {}), '(20, -5, 15)\n', (2183, 2195), True, 'import numpy as np\n'), ((2669, 2692), 'numpy.linspace', 'np.linspace', (['(-5)', '(20)', '(15)'], {}), '(-5, 20, 15)\n', (2680, 2692), True, 'import numpy as np\n'), ((2734, 2757), 'numpy.linspace', 'np.linspace', (['(20)', '(-5)', '(30)'], {}), '(20, -5, 30)\n', (2745, 2757), True, 'import numpy as np\n'), ((3108, 3131), 'numpy.linspace', 'np.linspace', (['(-5)', '(20)', '(15)'], {}), '(-5, 20, 15)\n', (3119, 3131), True, 'import numpy as np\n'), ((3218, 3241), 'numpy.linspace', 'np.linspace', (['(20)', '(-5)', '(15)'], {}), '(20, -5, 15)\n', (3229, 3241), True, 'import numpy as np\n'), ((3596, 3619), 'numpy.linspace', 'np.linspace', (['(-5)', '(20)', '(15)'], {}), '(-5, 20, 15)\n', (3607, 3619), True, 'import numpy as np\n'), ((3661, 3684), 'numpy.linspace', 'np.linspace', (['(20)', '(-5)', '(30)'], {}), '(20, -5, 30)\n', (3672, 3684), True, 'import numpy as np\n'), ((4465, 4481), 'numpy.arange', 'np.arange', (['(-3)', '(3)'], {}), '(-3, 3)\n', (4474, 4481), True, 'import numpy as np\n'), ((6003, 6021), 'numpy.prod', 'np.prod', (['[1, 3, 2]'], {}), '([1, 3, 2])\n', (6010, 6021), True, 'import numpy as np\n'), ((6102, 6120), 'numpy.prod', 'np.prod', (['[1, 3, 2]'], {}), '([1, 3, 2])\n', (6109, 6120), True, 'import numpy as np\n'), ((7237, 7255), 'numpy.prod', 'np.prod', (['[1, 3, 2]'], {}), '([1, 3, 2])\n', (7244, 7255), True, 'import numpy as np\n'), ((7337, 7355), 'numpy.prod', 'np.prod', (['[1, 3, 2]'], {}), '([1, 3, 2])\n', (7344, 7355), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 15:00:21 2019
@author: agarwal.270a
"""
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal as sig
from scipy.signal import windows as win
import pandas as pd
from scipy import io
import pickle
from scipy.stats import norm
# Import CC functions
#from cerebralcortex.core.datatypes import DataStream
#from cerebralcortex.core.metadata_manager.stream.metadata import Metadata, DataDescriptor, ModuleMetadata
#from cerebralcortex.core.util.spark_helper import get_or_create_sc
# Declare constants and load data
Fs=25 #Hz
len_in_s=20.48 #s
len_out=4
len_in=Fs*len_in_s
#arr_t=np.arange(250,290,len_in_s) #change time duration when longer noise exists
arr_t=np.arange(250,900,len_in_s) #change time duration when longer noise exists
path_prefix= 'E:/Box Sync/' #'C:/Users/agarwal.270/Box/'
path=path_prefix+'SU19/Research/PPG_ECG_Proj/py_code/MA_function/'
mdict=pickle.load(open(path+'data/sim_data.dat','rb'))
RR_distro=mdict['RR_distro']
HR_clusters=mdict['HR_clusters']
del mdict
#peak_data=mdict['peaks']
#led_id=mdict['led_id']
#verify after meeting
list_pdf_RR_joint=[RR_distro[j,0] for j in range(len(RR_distro))]
list_pdf_RR_row_sum=[np.sum(arr,axis=0) for arr in list_pdf_RR_joint]
list_pdf_RR_col_sum=[np.sum(arr,axis=1) for arr in list_pdf_RR_joint]
diff_arr=np.array([np.linalg.norm(list_pdf_RR_row_sum[k]-list_pdf_RR_col_sum[k])\
for k in range(len(list_pdf_RR_row_sum))]).round(4)
# =============================================================================
# plt.figure();
# for j in range(len(list_pdf_RR_row_sum)):
# plt.subplot(7,2,j+1);plt.plot(list_pdf_RR_row_sum[j],'b-o')
# plt.plot(list_pdf_RR_col_sum[j],'r--x');plt.legend(['row','col'])
# plt.grid(True);plt.title('z={}, rmse={}'.format(j+1,diff_arr[j]))
#
# =============================================================================
#%% Helper funcs
# =============================================================================
# def sample_RR(HR,RR_prev):
# #get cluster
# HR_up=(HR_clusters>HR).astype(int)
# z=(np.arange(len(HR_clusters)-1))[(np.diff(HR_up)).astype(bool)][0]
# #RR_z=RR_distro[z]
# RR_z_distro=RR_distro[z,0];RR_z_vals=RR_distro[z,1].reshape(-1)
# if RR_prev==0: #beginning of sampling. sample uniform randomly
# RR_next=RR_z_vals[np.random.randint(len(RR_z_vals))]
# else:
# idx_Rp=np.arange(len(RR_z_vals))[RR_z_vals==RR_prev]
# RR_z_Rp=RR_z_distro[idx_Rp,:] #conditional distro given z, RR_p
# idx_Rn=np.random.choice(len(RR_z_vals),p=RR_z_Rp/np.sum(RR_z_Rp)) #sample RR_next idx
# RR_next=RR_z_vals[idx_Rn]
# return RR_next
# =============================================================================
def sample_RR(HR,RR_prev):
#get cluster
HR_up=(HR_clusters>HR).astype(int)
z=(np.arange(len(HR_clusters)-1))[(np.diff(HR_up)).astype(bool)][0]
#get distros
RR_z_distro=list_pdf_RR_row_sum[z]
RR_z_vals=RR_distro[z,1].reshape(-1)
#sample
idx_Rn=np.random.choice(len(RR_z_vals),p=RR_z_distro) #sample RR_next idx
RR_next=RR_z_vals[idx_Rn]
return RR_next
def sinusoid(t,w,phi,Fs=25):
'''
Takes in inputs as numpy arrays of same size. Returns the sinewave with
desired characteristics.
t: array of time values in seconds. If a scalar is supplied, it is
considered as duration of the time series in seconds starting from 0. It is
divided into t*Fs divisions.
w: array of angular frequencies in radians/seconds. If a scalar is
supplied, it is made into a constant array of same shape as t and value w.
phi: array of phase values in radians. If a scalar is supplied, it is made
into a constant array of same shape as t and value phi.
Fs= Sampling frequency in Hz. Only needed in case t is not an array.
returns: t, s=np.sin(w*t+phi)
'''
# Handle Scalar inputs
if not(hasattr(t, "__len__")):
t=np.linspace(0,t,num=t*Fs,endpoint=False)
if not(hasattr(w, "__len__")):
w=w*np.ones(t.shape)
if not(hasattr(phi, "__len__")):
phi=phi*np.ones(t.shape)
# Check shapes are same
if (w.shape!=t.shape and phi.shape!=t.shape):
raise TypeError('Dimensional mismatch between input arrays. Please check the dimensions are same')
s=np.sin(w*t+phi)
return t,s
def HR_func_generator(t1):
arr_HR=np.arange(50,180) # Possible heart rates
# make a array of functions
f1=lambda B,D:((D*win.triang(len(t1))).astype(int)+B).astype(np.float32) #triang
f2=lambda B,D:((D*win.triang(2*len(t1))).astype(int)+B).astype(np.float32)\
[:len(t1)] # 1st half of triang
f3=lambda B,D:((D*win.tukey(len(t1),alpha=(0.3*np.random.rand()+0.7))).astype(int)+B).astype(np.float32) #tukey
f4=lambda B,D:((D*win.tukey(2*len(t1),alpha=(0.3*np.random.rand()+0.7))).astype(int)+B)\
.astype(np.float32)[:len(t1)] # 1st half of tukey
arr_f=np.array(1*[f1]+1*[f2]+1*[f3]+1*[f4]) # possible to change the proportion of functions
#randomly select elements
D_HR=0;HRs=[];D_HR_max=50
while D_HR==0: # we don't want D_HR to be zero so keep resampling
HRs+=[arr_HR[np.random.randint(len(arr_HR))]]
HR_range=np.arange(HRs[0]+1,min([HRs[0]+D_HR_max,180])+1)
HRs+=[HR_range[np.random.randint(len(HR_range))]]
B_HR,D_HR=HRs[0],HRs[1]-HRs[0]
#B_HR,D_HR=arr_B_HR[np.random.randint(len(arr_B_HR))],arr_D_HR[np.random.randint(len(arr_D_HR))]
HR_curve_f=arr_f[np.random.randint(len(arr_f))](B_HR,D_HR) #trend
return HR_curve_f,D_HR
def filtr(X0,Fs=25,filt=True):
nyq=Fs/2;flag=False
if len(X0.shape)==1:
X0=X0.reshape(-1,1)
flag=True
X1 = sig.detrend(X0,type='constant',axis=0); # Subtract mean
if filt:
# filter design used from Ju's code with slight changes for python syntax
b = sig.firls(219,np.array([0,0.3,0.5,4.5,5,nyq]),np.array([0,0,1,1,0,0]),np.array([10,1,1]),nyq=nyq);
X=np.zeros(X1.shape)
for i in range(X1.shape[1]):
#X[:,i] = sig.convolve(X1[:,i],b,mode='same'); # filtering using convolution, mode='same' returns the centered signal without any delay
X[:,i] = sig.filtfilt(b, [1], X1[:,i])
else:
X=X1
if flag:
X=X.reshape(-1)
#X=sig.detrend(X,type='constant',axis=0); # subtracted mean again to center around x=0 just in case things changed during filtering
return X
def filtr_HR(X0,Fs=25,filt=True):
nyq=Fs/2;flag=False
if len(X0.shape)==1:
X0=X0.reshape(-1,1)
flag=True
X1 = np.copy(X0)#sig.detrend(X0,type='constant',axis=0); # Subtract mean
if filt:
# filter design used from Ju's code with slight changes for python syntax
b = sig.firls(219,np.array([0,0.5,1,nyq]),np.array([1,1,0,0]),np.array([1,1]),nyq=nyq);
X=np.zeros(X1.shape)
for i in range(X1.shape[1]):
#X[:,i] = sig.convolve(X1[:,i],b,mode='same'); # filtering using convolution, mode='same' returns the centered signal without any delay
X[:,i] = sig.filtfilt(b, [1], X1[:,i])
else:
X=X1
if flag:
X=X.reshape(-1)
#X=sig.detrend(X,type='constant',axis=0); # subtracted mean again to center around x=0 just in case things changed during filtering
return X
def normalize_AC(data_left_filt,Fn=25,c=0,make_plots=False):
'''
data_left_filt: filtered ppg data
Fn: Sampling frequency in Hz
c: Column (Channel) in the array to be normalized
'''
data_left_filt=1*data_left_filt
flag=False
if len(data_left_filt.shape)==1:
data_left_filt=data_left_filt.reshape((-1,1))
flag=True
prc_l=50
pk_idx_start=2*Fn;pk_idx_end=29*Fn;
y=data_left_filt[pk_idx_start:pk_idx_end,c]
locs,pk_props = sig.find_peaks(y,distance=8,height=0);
pks_l=y[locs]
locs=locs+pk_idx_start;
if make_plots:
plt.figure(); plt.subplot(211);
plt.plot(data_left_filt[:pk_idx_end,c]);plt.plot(locs,pks_l,'r+')
temp_mins_l=[];
#for j=[-5,-4,-3,-2,-1,1,2,3,4,5]
for j in range(-7,0):
temp_mins_l+=[data_left_filt[locs+j,c]];
temp_min_l=np.min(np.array(temp_mins_l),axis=0);
amp_left=np.nanpercentile(pks_l-temp_min_l,prc_l);
#amp_left=np.mean(pks_l-temp_min_l);
data_left_filt[:,c]=data_left_filt[:,c]/amp_left;
if flag:
data_left_filt=data_left_filt.reshape(-1)
return data_left_filt
def form_data(X,Y,len_in,len_out):
'''
X:timeseries with inputs
Y:timeseries with outputs
'''
in_size=int(len_in)
out_size=int(len_out)
step_size=int(len_out/4)#np.max([out_size,4]) #change this as desired
#clip timeseries to nearest multiple of step_size
#lenth1=(((len(X)-in_size)//step_size)*step_size)+in_size
lenth=len(X)
#print(lenth1,lenth)
X,Y=X.T,Y.T # Transpose to make it look like time-series
X,Y=X.reshape(X.shape+(1,)),Y.reshape(Y.shape+(1,)) # add a dimension for concatenation
#print(X.shape,Y.shape)
#idx=np.arange(0,lenth-in_size,step_size)+in_size
idx=step_size*np.arange(0,1+((lenth-in_size)//step_size))+in_size
#print(idx[-1])
#print(lenth,X.shape[1],len(idx),(X.shape[1]-in_size+1)//step_size)
#print(X.shape,Y.shape,HR.shape)
data_X=np.concatenate([X[:,i-in_size:i,:] for i in idx],axis=-1).T
data_Y=np.concatenate([Y[i-out_size:i,:] for i in idx],axis=-1).T
#kernel_size=100;stride=1
#idxHR=np.arange(i-out_size+kernel_size,i,stride)
return data_X,data_Y
def pd_ffill(arr):
df = pd.DataFrame(arr)
df.fillna(method='ffill', axis=0, inplace=True)
out = df.values.reshape(arr.shape)
return out
def add_motion_noise(ppg1,flag=True):
# Noise for SNR=10log10(P_s/P_n)=20 dB => sigma=(ppg_pow**0.5)/10
acc1=0.00*np.random.standard_normal(ppg1.shape) # random normal noise with (0,0.1^2)
if flag: #extreme motion artefacts to be added or not
acc1=acc1+(2*np.random.random_sample(ppg1.shape)-1) # [-2,2] random uniform
#f=lambda z: (3 / (1 + np.exp(-10*z))) # A saturating sigmoid
f=lambda z: 2*np.tanh(2*z)
ppg1=ppg1+f(acc1) #noise added making values [-2,2] or [-4,4] depending on mode
return ppg1,acc1
def extract_rand_noise(noiz_list,lenth):
'''
noiz_list: Available components to choose from
lenth: Desired length of the noise signal
'''
noiz_list=[n for n in noiz_list if len(n)>lenth]
if len(noiz_list)==0:
raise AssertionError('Please use a smaller duration of ppg.')
noiz=noiz_list[np.random.randint(len(noiz_list))]
idx_start=np.random.randint(len(noiz)-lenth)
noiz=noiz[idx_start:idx_start+lenth]
return noiz
def gen_ppg_from_HR(t1,HR_curve_f,D_HR,peak_id,make_plots=False):
'''
mode={0:basic sinusoid, 1:mixture of sinusoids, 2:mixture of sinusoids with
a lot of motion artifacts}
'''
# Randomly insert consecutive Nan's and then ffill
perc_change=5;cons_reps=len(t1)//(np.abs(D_HR*2))
#idx=1+np.random.RandomState(seed=seed1).permutation(len(t1)-2-cons_reps)[:int((len(t1)-2)/cons_reps*perc_change/100)]
idx=1+np.random.permutation(len(t1)-2-cons_reps)[:int((len(t1)-2)/cons_reps*perc_change/100)]
try:
idx=np.concatenate([np.arange(i,i+cons_reps) for i in idx])
HR_curve_f[idx]=np.nan
HR_curve1=pd_ffill(HR_curve_f)
except ValueError:
HR_curve1=1*HR_curve_f
# TODO: Removed 0.1 Hz and 0.4 Hz in HRV
#HRV_w1=2*np.pi*0.1;HRV_w2=2*np.pi*0.4
#rand_mix=np.repeat(np.random.random_sample(1+(len(t1)//1500)),1500)[:len(t1)]
#rand_mix=0.55
#print(len(t1),rand_mix)
#gain_list=np.array([0,1,2,2,1,1,1,1])
#HR_curve1+=0.03*((rand_mix*sinusoid(t1,HRV_w1,phi=0)[-1])+\
# ((1-rand_mix)*sinusoid(t1,HRV_w2,phi=0)[-1]))#*gain_list[(300/HR_curve1).astype(int)]
#plt.figure();plt.plot(t1,sinusoid(t1,HRV_w1,phi=0)[-1],t1,sinusoid(t1,HRV_w2,phi=0)[-1])
#HR_curve1,_=add_motion_noise(HR_curve1,flag=False)
#print(HR_curve1.shape,t1.shape)
# =============================================================================
# w1=2*np.pi*(HR_curve1/60)
# #phi_PTT=(0.5*np.pi)/(HR_curve1/60)
# phi_PTT=0
# _,ppg0=sinusoid(t1,w1,phi=phi_PTT)
#
# ppg1=ppg0*2
# PTT=np.random.randint(4,6) #sample a PTT value
# ppg1=np.concatenate([np.zeros(PTT),ppg1[:-1*PTT]])
#
#
# # Peak Detection & check figure for its accuracy
# #out = ecg.ecg(signal=ppg01, sampling_rate=25,show=False)
# #ind=out['rpeaks']
# #arr_peaks=np.zeros(len(ppg01));arr_peaks[ind]=1
# #arr_peaks=(ppg01==np.max(ppg01)).astype(int)
# ind,_=find_peaks(ppg1,distance=6,height=0.9)
#
# =============================================================================
w_l=12;w_pk=25;w_r=w_pk-w_l-1
n_peaks=int(len(HR_curve1)/5)
#remove terminal pk_locs
#ind=ind[ind>=w_l]
#ind=ind[ind<(len(ppg1)-w_r)]
#sample bunch of peaks using PCA components
path2base='E:/Box Sync/'+\
'AU19/Research/PPG_ECG_proj/data/Wen_data_28_Sep/clean_lrsynced\\'
base_dict = io.loadmat(path2base+"green_ppg_basis.mat")
#base_dict=mdict[peak_id+'_G']['peaks']
eig_vec=base_dict['eig_vec'];eig_val=base_dict['eig_val'].reshape((-1,1))
avg=base_dict['mean'].reshape((-1,1))
k=10;eig_vec=eig_vec[:,:k];eig_val=eig_val[:k]
l_peaks,n_coeff=eig_vec.shape
weights=np.random.random_sample((n_coeff,n_peaks))*(eig_val**0.5)
rand_pks=np.matmul(eig_vec,weights)+avg #form peaks
#rand_pks=rand_pks[int(l_peaks/2)-w_l:int(l_peaks/2)+w_r+1,:] #keep desired width
#OR
# =============================================================================
# # Sample peaks randomly from those available in peak_mat
# peak_mat=peak_dict[peak_id];l_peaks=peak_mat.shape[0]
# rand_pks_idx=np.random.randint(peak_mat.shape[1],size=n_peaks)
# rand_pks=peak_mat[int(l_peaks/2)-w_l:int(l_peaks/2)+w_r+1,rand_pks_idx]
#
# =============================================================================
arr_ppg=np.zeros(len(HR_curve1))
arr_pk=np.zeros(len(HR_curve1))
#TODO: bunch of changes here
gauss=norm(loc = 0., scale = 1.5).pdf(np.arange(-3,3+1))
PTT=np.random.randint(4,8) #sample a PTT value
#plt.figure();plt.plot(gauss)
RR_prev=0;i=1*w_l;cntr=0
while i < (len(HR_curve1)-w_r-1):
#get next RR
arr_ppg[i-w_l:i+w_r+1]+=rand_pks[:,cntr]
arr_pk[i-3-PTT:i+3+1-PTT]=gauss
#get next RR_interval
#avg_HR=np.mean(HR_curve1[i-w_l:i+w_r+1])
avg_HR=np.mean(HR_curve1[i+w_r+1:i+w_r+1+Fs]) #look ahead HR
RR_next=sample_RR(avg_HR,RR_prev)
i+=RR_next
cntr+=1
# =============================================================================
# #sample bunch of noise using PCA components
# noise_dict=mdict[peak_id+'_G']['noise']
# #DC_list=noise_dict['DC']
# NP_list=noise_dict['NP']
# P_list=noise_dict['P'];N_list=noise_dict['N']
# # Randomly pick one element from each list
# #DC=DC_list[np.random.randint(len(DC_list))]
# NP=extract_rand_noise(NP_list,len(arr_ppg))
# P=extract_rand_noise(P_list,len(arr_ppg))
# N=extract_rand_noise(N_list,len(arr_ppg))
#
# #get random gains for noise signals
# gain_NP=(1-0.5)*np.random.rand()+0.5 #in [0.5,1)
# gain_P,gain_N=gain_NP*np.random.rand(2) # in [0,gain_NP)
# #if j<2:
# # gain_NP,gain_P,gain_N=0,0,0
#
# arr_ppg+=(gain_NP*NP+gain_P*P+gain_N*N) #Add noise
# #arr_ppg=arr_ppg[:,j]*DC
#
#
#
# #arr_ppg_norm=1*arr_ppg
# #plt.figure();plt.plot(arr_ppg);plt.plot(arr_ppg_norm,'--')
# #plt.legend(['actual','AC Normalized'])
# #add motion noise
# #ppg2,acc1=add_motion_noise(arr_ppg,False)
#
#
# ppg2=1*arr_ppg
# ppg2_filt=filtr(ppg2.reshape(-1,1),Fs=25)
# # Normalize AC component
# ppg2_filt=normalize_AC(ppg2_filt,make_plots=False)
# =============================================================================
#TODO: Converted HR to Hz from BPM and made it smoother
ppg2=1*arr_ppg
ppg2_filt=filtr(ppg2.reshape(-1,1),Fs=25)
HR_filt=filtr_HR(HR_curve1/60)
#arr_pk_filt=filtr(arr_pk,Fs=25)
#ppg2=((ppg2+2)/4) # normalize using min-max of [-2,2]
#acc1=((acc1+1)/2) # normalize using min-max of [-2,2]
#plots
if make_plots:
#plt.figure()
#plt.psd(HR_curve1[-Fs*10:], NFFT=Fs*10, Fs=Fs,detrend='constant')
plt.figure()
ax1=plt.subplot(311);ax1.plot(t1,HR_filt)
ax1.set_title('HR');plt.grid(True)
#ax2=plt.subplot(412,sharex=ax1);ax2.plot(t1,ppg1,t1[ind],ppg1[ind],'r+')
#ax2.set_title('PPG_clean with detected peaks');plt.grid(True)
#ax3=plt.subplot(413,sharex=ax1);ax3.plot(t1,acc1)
#ax3.set_title('Acc');plt.grid(True)
ax3=plt.subplot(312,sharex=ax1);ax3.plot(t1,arr_pk)
ax3.set_title('filtered peak train');plt.grid(True)
ax4=plt.subplot(313,sharex=ax1);ax4.plot(t1,ppg2_filt)
ax4.set_title('filtered_PPG');plt.grid(True)
return ppg2_filt,HR_filt
#%% Main
def main(data_size=10000,for_test=False,make_plots=False,save_data=False):
while(True):
t=arr_t[np.random.randint(len(arr_t))] # sample seq. length in s.
# form HR curve
t1=np.linspace(0,t,num=t*Fs,endpoint=False)
HR_curve_f,D_HR=HR_func_generator(t1)
peak_id='white'
ppg1,HR1=gen_ppg_from_HR(t1,HR_curve_f,D_HR,peak_id,make_plots=make_plots)
#print(HR1.shape,ppg1.shape)
len_in=Fs*len_in_s;len_out=1*len_in
data_X,data_Y=form_data(ppg1,HR1,len_in=len_in,len_out=len_out)
#test
if for_test:
if save_data:
mdict={'ppg':ppg1,'HR':HR1}
io.savemat('eig_peaks_s.mat',mdict=mdict)
return ppg1,HR1,data_X,data_Y
if 'dataset_X' not in locals():
dataset_X,dataset_Y=data_X,data_Y
else:
dataset_X=np.concatenate([dataset_X,data_X],axis=0)
dataset_Y=np.concatenate([dataset_Y,data_Y],axis=0)
if (len(dataset_Y)>=data_size):
break
dataset_X=dataset_X[:data_size].astype(np.float32)
dataset_Y=dataset_Y[:data_size].astype(np.float32)
#separate
ratio=0.1;cut_idx=int(ratio*len(dataset_X))
val_data = (dataset_X[:cut_idx],dataset_Y[:cut_idx])
train_data = (dataset_X[cut_idx:],dataset_Y[cut_idx:])
#shuffle
idx = np.random.permutation(cut_idx)
val_data=(val_data[0][idx],val_data[1][idx])
idx = np.random.permutation(len(dataset_Y)-cut_idx)
train_data=(train_data[0][idx],train_data[1][idx])
return train_data,val_data
if __name__=='__main__':
plt.close('all')
X,Y=main()
|
[
"numpy.random.standard_normal",
"scipy.signal.detrend",
"matplotlib.pyplot.grid",
"scipy.io.savemat",
"numpy.nanpercentile",
"numpy.random.rand",
"scipy.signal.filtfilt",
"scipy.io.loadmat",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.tanh",
"numpy.diff",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.matmul",
"numpy.concatenate",
"scipy.signal.find_peaks",
"pandas.DataFrame",
"numpy.random.permutation",
"numpy.abs",
"numpy.random.random_sample",
"numpy.ones",
"numpy.copy",
"scipy.stats.norm",
"numpy.sum",
"numpy.random.randint",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot"
] |
[((776, 805), 'numpy.arange', 'np.arange', (['(250)', '(900)', 'len_in_s'], {}), '(250, 900, len_in_s)\n', (785, 805), True, 'import numpy as np\n'), ((1279, 1298), 'numpy.sum', 'np.sum', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1285, 1298), True, 'import numpy as np\n'), ((1350, 1369), 'numpy.sum', 'np.sum', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (1356, 1369), True, 'import numpy as np\n'), ((4487, 4506), 'numpy.sin', 'np.sin', (['(w * t + phi)'], {}), '(w * t + phi)\n', (4493, 4506), True, 'import numpy as np\n'), ((4561, 4579), 'numpy.arange', 'np.arange', (['(50)', '(180)'], {}), '(50, 180)\n', (4570, 4579), True, 'import numpy as np\n'), ((5122, 5173), 'numpy.array', 'np.array', (['(1 * [f1] + 1 * [f2] + 1 * [f3] + 1 * [f4])'], {}), '(1 * [f1] + 1 * [f2] + 1 * [f3] + 1 * [f4])\n', (5130, 5173), True, 'import numpy as np\n'), ((5919, 5959), 'scipy.signal.detrend', 'sig.detrend', (['X0'], {'type': '"""constant"""', 'axis': '(0)'}), "(X0, type='constant', axis=0)\n", (5930, 5959), True, 'from scipy import signal as sig\n'), ((6830, 6841), 'numpy.copy', 'np.copy', (['X0'], {}), '(X0)\n', (6837, 6841), True, 'import numpy as np\n'), ((8102, 8141), 'scipy.signal.find_peaks', 'sig.find_peaks', (['y'], {'distance': '(8)', 'height': '(0)'}), '(y, distance=8, height=0)\n', (8116, 8141), True, 'from scipy import signal as sig\n'), ((8540, 8583), 'numpy.nanpercentile', 'np.nanpercentile', (['(pks_l - temp_min_l)', 'prc_l'], {}), '(pks_l - temp_min_l, prc_l)\n', (8556, 8583), True, 'import numpy as np\n'), ((9919, 9936), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {}), '(arr)\n', (9931, 9936), True, 'import pandas as pd\n'), ((13549, 13594), 'scipy.io.loadmat', 'io.loadmat', (["(path2base + 'green_ppg_basis.mat')"], {}), "(path2base + 'green_ppg_basis.mat')\n", (13559, 13594), False, 'from scipy import io\n'), ((14707, 14730), 'numpy.random.randint', 'np.random.randint', (['(4)', '(8)'], {}), '(4, 8)\n', (14724, 14730), True, 'import numpy as np\n'), ((19145, 19175), 'numpy.random.permutation', 'np.random.permutation', (['cut_idx'], {}), '(cut_idx)\n', (19166, 19175), True, 'import numpy as np\n'), ((19412, 19428), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (19421, 19428), True, 'import matplotlib.pyplot as plt\n'), ((4113, 4158), 'numpy.linspace', 'np.linspace', (['(0)', 't'], {'num': '(t * Fs)', 'endpoint': '(False)'}), '(0, t, num=t * Fs, endpoint=False)\n', (4124, 4158), True, 'import numpy as np\n'), ((6195, 6213), 'numpy.zeros', 'np.zeros', (['X1.shape'], {}), '(X1.shape)\n', (6203, 6213), True, 'import numpy as np\n'), ((7103, 7121), 'numpy.zeros', 'np.zeros', (['X1.shape'], {}), '(X1.shape)\n', (7111, 7121), True, 'import numpy as np\n'), ((8224, 8236), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8234, 8236), True, 'import matplotlib.pyplot as plt\n'), ((8238, 8254), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (8249, 8254), True, 'import matplotlib.pyplot as plt\n'), ((8265, 8305), 'matplotlib.pyplot.plot', 'plt.plot', (['data_left_filt[:pk_idx_end, c]'], {}), '(data_left_filt[:pk_idx_end, c])\n', (8273, 8305), True, 'import matplotlib.pyplot as plt\n'), ((8305, 8332), 'matplotlib.pyplot.plot', 'plt.plot', (['locs', 'pks_l', '"""r+"""'], {}), "(locs, pks_l, 'r+')\n", (8313, 8332), True, 'import matplotlib.pyplot as plt\n'), ((8495, 8516), 'numpy.array', 'np.array', (['temp_mins_l'], {}), '(temp_mins_l)\n', (8503, 8516), True, 'import numpy as np\n'), ((9644, 9706), 'numpy.concatenate', 'np.concatenate', (['[X[:, i - in_size:i, :] for i in idx]'], {'axis': '(-1)'}), '([X[:, i - in_size:i, :] for i in idx], axis=-1)\n', (9658, 9706), True, 'import numpy as np\n'), ((9716, 9776), 'numpy.concatenate', 'np.concatenate', (['[Y[i - out_size:i, :] for i in idx]'], {'axis': '(-1)'}), '([Y[i - out_size:i, :] for i in idx], axis=-1)\n', (9730, 9776), True, 'import numpy as np\n'), ((10173, 10210), 'numpy.random.standard_normal', 'np.random.standard_normal', (['ppg1.shape'], {}), '(ppg1.shape)\n', (10198, 10210), True, 'import numpy as np\n'), ((11370, 11386), 'numpy.abs', 'np.abs', (['(D_HR * 2)'], {}), '(D_HR * 2)\n', (11376, 11386), True, 'import numpy as np\n'), ((13860, 13903), 'numpy.random.random_sample', 'np.random.random_sample', (['(n_coeff, n_peaks)'], {}), '((n_coeff, n_peaks))\n', (13883, 13903), True, 'import numpy as np\n'), ((13932, 13959), 'numpy.matmul', 'np.matmul', (['eig_vec', 'weights'], {}), '(eig_vec, weights)\n', (13941, 13959), True, 'import numpy as np\n'), ((14679, 14699), 'numpy.arange', 'np.arange', (['(-3)', '(3 + 1)'], {}), '(-3, 3 + 1)\n', (14688, 14699), True, 'import numpy as np\n'), ((15065, 15113), 'numpy.mean', 'np.mean', (['HR_curve1[i + w_r + 1:i + w_r + 1 + Fs]'], {}), '(HR_curve1[i + w_r + 1:i + w_r + 1 + Fs])\n', (15072, 15113), True, 'import numpy as np\n'), ((17050, 17062), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17060, 17062), True, 'import matplotlib.pyplot as plt\n'), ((17076, 17092), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (17087, 17092), True, 'import matplotlib.pyplot as plt\n'), ((17143, 17157), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (17151, 17157), True, 'import matplotlib.pyplot as plt\n'), ((17432, 17460), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {'sharex': 'ax1'}), '(312, sharex=ax1)\n', (17443, 17460), True, 'import matplotlib.pyplot as plt\n'), ((17526, 17540), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (17534, 17540), True, 'import matplotlib.pyplot as plt\n'), ((17554, 17582), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {'sharex': 'ax1'}), '(313, sharex=ax1)\n', (17565, 17582), True, 'import matplotlib.pyplot as plt\n'), ((17644, 17658), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (17652, 17658), True, 'import matplotlib.pyplot as plt\n'), ((17919, 17964), 'numpy.linspace', 'np.linspace', (['(0)', 't'], {'num': '(t * Fs)', 'endpoint': '(False)'}), '(0, t, num=t * Fs, endpoint=False)\n', (17930, 17964), True, 'import numpy as np\n'), ((4203, 4219), 'numpy.ones', 'np.ones', (['t.shape'], {}), '(t.shape)\n', (4210, 4219), True, 'import numpy as np\n'), ((4275, 4291), 'numpy.ones', 'np.ones', (['t.shape'], {}), '(t.shape)\n', (4282, 4291), True, 'import numpy as np\n'), ((6099, 6135), 'numpy.array', 'np.array', (['[0, 0.3, 0.5, 4.5, 5, nyq]'], {}), '([0, 0.3, 0.5, 4.5, 5, nyq])\n', (6107, 6135), True, 'import numpy as np\n'), ((6131, 6159), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0]'], {}), '([0, 0, 1, 1, 0, 0])\n', (6139, 6159), True, 'import numpy as np\n'), ((6155, 6175), 'numpy.array', 'np.array', (['[10, 1, 1]'], {}), '([10, 1, 1])\n', (6163, 6175), True, 'import numpy as np\n'), ((6423, 6453), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', '[1]', 'X1[:, i]'], {}), '(b, [1], X1[:, i])\n', (6435, 6453), True, 'from scipy import signal as sig\n'), ((7022, 7048), 'numpy.array', 'np.array', (['[0, 0.5, 1, nyq]'], {}), '([0, 0.5, 1, nyq])\n', (7030, 7048), True, 'import numpy as np\n'), ((7046, 7068), 'numpy.array', 'np.array', (['[1, 1, 0, 0]'], {}), '([1, 1, 0, 0])\n', (7054, 7068), True, 'import numpy as np\n'), ((7066, 7082), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (7074, 7082), True, 'import numpy as np\n'), ((7331, 7361), 'scipy.signal.filtfilt', 'sig.filtfilt', (['b', '[1]', 'X1[:, i]'], {}), '(b, [1], X1[:, i])\n', (7343, 7361), True, 'from scipy import signal as sig\n'), ((9448, 9496), 'numpy.arange', 'np.arange', (['(0)', '(1 + (lenth - in_size) // step_size)'], {}), '(0, 1 + (lenth - in_size) // step_size)\n', (9457, 9496), True, 'import numpy as np\n'), ((10478, 10492), 'numpy.tanh', 'np.tanh', (['(2 * z)'], {}), '(2 * z)\n', (10485, 10492), True, 'import numpy as np\n'), ((14647, 14671), 'scipy.stats.norm', 'norm', ([], {'loc': '(0.0)', 'scale': '(1.5)'}), '(loc=0.0, scale=1.5)\n', (14651, 14671), False, 'from scipy.stats import norm\n'), ((18633, 18676), 'numpy.concatenate', 'np.concatenate', (['[dataset_X, data_X]'], {'axis': '(0)'}), '([dataset_X, data_X], axis=0)\n', (18647, 18676), True, 'import numpy as np\n'), ((18698, 18741), 'numpy.concatenate', 'np.concatenate', (['[dataset_Y, data_Y]'], {'axis': '(0)'}), '([dataset_Y, data_Y], axis=0)\n', (18712, 18741), True, 'import numpy as np\n'), ((1419, 1482), 'numpy.linalg.norm', 'np.linalg.norm', (['(list_pdf_RR_row_sum[k] - list_pdf_RR_col_sum[k])'], {}), '(list_pdf_RR_row_sum[k] - list_pdf_RR_col_sum[k])\n', (1433, 1482), True, 'import numpy as np\n'), ((11648, 11675), 'numpy.arange', 'np.arange', (['i', '(i + cons_reps)'], {}), '(i, i + cons_reps)\n', (11657, 11675), True, 'import numpy as np\n'), ((18408, 18450), 'scipy.io.savemat', 'io.savemat', (['"""eig_peaks_s.mat"""'], {'mdict': 'mdict'}), "('eig_peaks_s.mat', mdict=mdict)\n", (18418, 18450), False, 'from scipy import io\n'), ((3013, 3027), 'numpy.diff', 'np.diff', (['HR_up'], {}), '(HR_up)\n', (3020, 3027), True, 'import numpy as np\n'), ((10329, 10364), 'numpy.random.random_sample', 'np.random.random_sample', (['ppg1.shape'], {}), '(ppg1.shape)\n', (10352, 10364), True, 'import numpy as np\n'), ((4897, 4913), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4911, 4913), True, 'import numpy as np\n'), ((5016, 5032), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5030, 5032), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from sklego.neighbors import BayesianKernelDensityClassifier
from sklego.common import flatten
from sklego.testing import check_shape_remains_same_classifier
from tests.conftest import nonmeta_checks, general_checks, estimator_checks
@pytest.fixture()
def simple_dataset():
# Two linearly separable mvn should have a 100% prediction accuracy
x = np.concatenate(
[np.random.normal(-10, 1, (100, 2)), np.random.normal(10, 1, (100, 2))]
)
y = np.concatenate([np.zeros(100), np.ones(100)])
return x, y
@pytest.mark.parametrize(
"test_fn",
flatten(
[
nonmeta_checks,
general_checks,
estimator_checks.check_classifier_data_not_an_array,
estimator_checks.check_classifiers_one_label,
estimator_checks.check_classifiers_classes,
estimator_checks.check_classifiers_train,
estimator_checks.check_supervised_y_2d,
estimator_checks.check_supervised_y_no_nan,
estimator_checks.check_estimators_unfitted,
check_shape_remains_same_classifier,
]
),
)
def test_estimator_checks(test_fn):
test_fn(BayesianKernelDensityClassifier.__name__, BayesianKernelDensityClassifier())
def test_trivial_classification(simple_dataset):
x, y = simple_dataset
model = BayesianKernelDensityClassifier().fit(x, y)
assert (model.predict(x) == y).all()
|
[
"numpy.random.normal",
"numpy.ones",
"sklego.common.flatten",
"numpy.zeros",
"pytest.fixture",
"sklego.neighbors.BayesianKernelDensityClassifier"
] |
[((272, 288), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (286, 288), False, 'import pytest\n'), ((610, 1028), 'sklego.common.flatten', 'flatten', (['[nonmeta_checks, general_checks, estimator_checks.\n check_classifier_data_not_an_array, estimator_checks.\n check_classifiers_one_label, estimator_checks.check_classifiers_classes,\n estimator_checks.check_classifiers_train, estimator_checks.\n check_supervised_y_2d, estimator_checks.check_supervised_y_no_nan,\n estimator_checks.check_estimators_unfitted,\n check_shape_remains_same_classifier]'], {}), '([nonmeta_checks, general_checks, estimator_checks.\n check_classifier_data_not_an_array, estimator_checks.\n check_classifiers_one_label, estimator_checks.check_classifiers_classes,\n estimator_checks.check_classifiers_train, estimator_checks.\n check_supervised_y_2d, estimator_checks.check_supervised_y_no_nan,\n estimator_checks.check_estimators_unfitted,\n check_shape_remains_same_classifier])\n', (617, 1028), False, 'from sklego.common import flatten\n'), ((1240, 1273), 'sklego.neighbors.BayesianKernelDensityClassifier', 'BayesianKernelDensityClassifier', ([], {}), '()\n', (1271, 1273), False, 'from sklego.neighbors import BayesianKernelDensityClassifier\n'), ((416, 450), 'numpy.random.normal', 'np.random.normal', (['(-10)', '(1)', '(100, 2)'], {}), '(-10, 1, (100, 2))\n', (432, 450), True, 'import numpy as np\n'), ((452, 485), 'numpy.random.normal', 'np.random.normal', (['(10)', '(1)', '(100, 2)'], {}), '(10, 1, (100, 2))\n', (468, 485), True, 'import numpy as np\n'), ((517, 530), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (525, 530), True, 'import numpy as np\n'), ((532, 544), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (539, 544), True, 'import numpy as np\n'), ((1364, 1397), 'sklego.neighbors.BayesianKernelDensityClassifier', 'BayesianKernelDensityClassifier', ([], {}), '()\n', (1395, 1397), False, 'from sklego.neighbors import BayesianKernelDensityClassifier\n')]
|
from itertools import cycle
from toolz.itertoolz import concatv, take
import numpy as np
import pytest
from tensorforce.environments import Environment
from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements
def test_initialization():
bad_seeds_01_env = Environment.create(
environment=BadSeeds01, seed_count=10, bad_seed_count=3, max_episode_length=100
)
assert bad_seeds_01_env.state.shape == (100, 10)
assert len(bad_seeds_01_env.bad_seeds) == 3
assert len(bad_seeds_01_env.good_seeds) == 7
def test_bad_initialization():
with pytest.raises(ValueError):
BadSeeds01(seed_count=3, bad_seed_count=10, max_episode_length=100)
def test_count_measurements():
state = np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, -0.5, 0.0],
[0.5, 0.0, 0.0, 0.0],
[0.0, -0.5, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.5, 0.0, 0.0],
]
)
measurement_counts, measured_seed_counts = count_measurements(
time_steps_by_seeds_state=state
)
assert np.all(measurement_counts == np.array([1, 3, 2, 0]))
assert measured_seed_counts == 3
def test_play_the_game_badly():
bad_seeds_01_env = BadSeeds01(seed_count=5, bad_seed_count=3, max_episode_length=5)
# measure all seeds but the last seed
for time_i, seed_i in enumerate(range(len(bad_seeds_01_env.all_seeds) - 1)):
next_state, terminal, reward = bad_seeds_01_env.execute(actions=seed_i)
assert next_state[time_i, seed_i] != 0.0
assert terminal is False
assert reward == 0.0
# measurement_counts looks like this
# time_i = 0: [1 0 0 0 0 ]
# time_i = 1: [1 1 0 0 0 ]
# ...
# time_i = 3: [1 1 1 1 0 ]
measurement_counts, measured_seed_counts = count_measurements(
bad_seeds_01_env.state
)
for seed_j in range(seed_i):
assert measurement_counts[0, seed_j] == 1
assert measured_seed_counts == (seed_i + 1)
# measure the first seed again
# no reward because the last seed is never measured
next_state, terminal, reward = bad_seeds_01_env.execute(actions=0)
assert next_state[len(bad_seeds_01_env.all_seeds) - 1, 0] != 0.0
assert terminal is True
assert reward == 0.0
measurement_counts, measured_seed_counts = count_measurements(
bad_seeds_01_env.state
)
assert np.all(measurement_counts == np.array([[2, 1, 1, 1, 0]]))
assert measured_seed_counts == 4
def test_play_the_game_less_badly():
bad_seeds_01_env = BadSeeds01(
seed_count=5, bad_seed_count=3, max_episode_length=2 * 2 + 3 * 3 + 1
)
# measure the good seeds twice
# measure the bad seeds three times
for time_i, seed_i in enumerate(
concatv(
take(
n=2 * len(bad_seeds_01_env.good_seeds),
seq=cycle(bad_seeds_01_env.good_seed_indices),
),
take(
n=3 * len(bad_seeds_01_env.bad_seeds),
seq=cycle(bad_seeds_01_env.bad_seed_indices),
),
)
):
next_state, terminal, reward = bad_seeds_01_env.execute(actions=seed_i)
assert next_state[time_i, seed_i] != 0.0
assert terminal is False
assert reward == 0.0
# measure the first good seed again
next_state, terminal, reward = bad_seeds_01_env.execute(
actions=bad_seeds_01_env.good_seed_indices[0]
)
assert next_state[-1, bad_seeds_01_env.good_seed_indices[0]] != 0.0
assert terminal is True
# reward is the number of times the least-measured seed was measured
assert reward == 2.0
|
[
"bad_seeds.simple.bad_seeds_01.count_measurements",
"tensorforce.environments.Environment.create",
"itertools.cycle",
"numpy.array",
"pytest.raises",
"bad_seeds.simple.bad_seeds_01.BadSeeds01"
] |
[((279, 382), 'tensorforce.environments.Environment.create', 'Environment.create', ([], {'environment': 'BadSeeds01', 'seed_count': '(10)', 'bad_seed_count': '(3)', 'max_episode_length': '(100)'}), '(environment=BadSeeds01, seed_count=10, bad_seed_count=3,\n max_episode_length=100)\n', (297, 382), False, 'from tensorforce.environments import Environment\n'), ((734, 882), 'numpy.array', 'np.array', (['[[0.0, 0.5, 0.0, 0.0], [0.0, 0.0, -0.5, 0.0], [0.5, 0.0, 0.0, 0.0], [0.0, -\n 0.5, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0], [0.0, 0.5, 0.0, 0.0]]'], {}), '([[0.0, 0.5, 0.0, 0.0], [0.0, 0.0, -0.5, 0.0], [0.5, 0.0, 0.0, 0.0],\n [0.0, -0.5, 0.0, 0.0], [0.0, 0.0, 0.5, 0.0], [0.0, 0.5, 0.0, 0.0]])\n', (742, 882), True, 'import numpy as np\n'), ((1024, 1075), 'bad_seeds.simple.bad_seeds_01.count_measurements', 'count_measurements', ([], {'time_steps_by_seeds_state': 'state'}), '(time_steps_by_seeds_state=state)\n', (1042, 1075), False, 'from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements\n'), ((1248, 1312), 'bad_seeds.simple.bad_seeds_01.BadSeeds01', 'BadSeeds01', ([], {'seed_count': '(5)', 'bad_seed_count': '(3)', 'max_episode_length': '(5)'}), '(seed_count=5, bad_seed_count=3, max_episode_length=5)\n', (1258, 1312), False, 'from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements\n'), ((2393, 2435), 'bad_seeds.simple.bad_seeds_01.count_measurements', 'count_measurements', (['bad_seeds_01_env.state'], {}), '(bad_seeds_01_env.state)\n', (2411, 2435), False, 'from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements\n'), ((2618, 2703), 'bad_seeds.simple.bad_seeds_01.BadSeeds01', 'BadSeeds01', ([], {'seed_count': '(5)', 'bad_seed_count': '(3)', 'max_episode_length': '(2 * 2 + 3 * 3 + 1)'}), '(seed_count=5, bad_seed_count=3, max_episode_length=2 * 2 + 3 * 3 + 1\n )\n', (2628, 2703), False, 'from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements\n'), ((586, 611), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (599, 611), False, 'import pytest\n'), ((621, 688), 'bad_seeds.simple.bad_seeds_01.BadSeeds01', 'BadSeeds01', ([], {'seed_count': '(3)', 'bad_seed_count': '(10)', 'max_episode_length': '(100)'}), '(seed_count=3, bad_seed_count=10, max_episode_length=100)\n', (631, 688), False, 'from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements\n'), ((1852, 1894), 'bad_seeds.simple.bad_seeds_01.count_measurements', 'count_measurements', (['bad_seeds_01_env.state'], {}), '(bad_seeds_01_env.state)\n', (1870, 1894), False, 'from bad_seeds.simple.bad_seeds_01 import BadSeeds01, count_measurements\n'), ((1130, 1152), 'numpy.array', 'np.array', (['[1, 3, 2, 0]'], {}), '([1, 3, 2, 0])\n', (1138, 1152), True, 'import numpy as np\n'), ((2490, 2517), 'numpy.array', 'np.array', (['[[2, 1, 1, 1, 0]]'], {}), '([[2, 1, 1, 1, 0]])\n', (2498, 2517), True, 'import numpy as np\n'), ((2937, 2978), 'itertools.cycle', 'cycle', (['bad_seeds_01_env.good_seed_indices'], {}), '(bad_seeds_01_env.good_seed_indices)\n', (2942, 2978), False, 'from itertools import cycle\n'), ((3088, 3128), 'itertools.cycle', 'cycle', (['bad_seeds_01_env.bad_seed_indices'], {}), '(bad_seeds_01_env.bad_seed_indices)\n', (3093, 3128), False, 'from itertools import cycle\n')]
|
from pathlib import Path
import numpy as np
import torch
import subprocess
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("source_file", help="Absolute path to the Pytorch weights file to convert")
args = parser.parse_args()
source_file = Path(args.source_file)
target_folder = source_file.parent
weights = torch.load(str(source_file), map_location='cpu')
nps = {}
for k, v in weights.items():
k = k.replace("gamma", "weight").replace("beta", "bias")
nps[k] = np.ascontiguousarray(v.cpu().numpy())
np.savez(target_folder / 'model.npz', **nps)
source = str(target_folder / 'model.npz')
target = str(target_folder / 'rust_model.ot')
toml_location = (Path(__file__).resolve() / '..' / '..' / 'Cargo.toml').resolve()
subprocess.run(
['cargo', 'run', '--bin=convert-tensor', '--manifest-path=%s' % toml_location, '--', source, target],
)
|
[
"numpy.savez",
"subprocess.run",
"argparse.ArgumentParser",
"pathlib.Path"
] |
[((132, 157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (155, 157), False, 'import argparse\n'), ((308, 330), 'pathlib.Path', 'Path', (['args.source_file'], {}), '(args.source_file)\n', (312, 330), False, 'from pathlib import Path\n'), ((606, 650), 'numpy.savez', 'np.savez', (["(target_folder / 'model.npz')"], {}), "(target_folder / 'model.npz', **nps)\n", (614, 650), True, 'import numpy as np\n'), ((839, 960), 'subprocess.run', 'subprocess.run', (["['cargo', 'run', '--bin=convert-tensor', '--manifest-path=%s' %\n toml_location, '--', source, target]"], {}), "(['cargo', 'run', '--bin=convert-tensor', \n '--manifest-path=%s' % toml_location, '--', source, target])\n", (853, 960), False, 'import subprocess\n'), ((770, 784), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (774, 784), False, 'from pathlib import Path\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
from tvm import relay
from tvm import autotvm
from tvm import topi
from tvm.relay.backend import te_compiler
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
@autotvm.register_topi_compute("test/conv2d_2")
def _compute_conv2d_2(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_2")
def _schedule_conv2d_2(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
def _compute_conv2d_3(input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
def _schedule_conv2d_3(outs):
return topi.generic.schedule_conv2d_nchw(outs)
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_1",
plevel=10,
)
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_2),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_2),
name="conv2d_2",
plevel=15,
)
ic = inputs[0].shape[1]
with tvm.te.SpecializedCondition(ic >= 16):
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_3),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_3),
name="conv2d_3",
plevel=20,
)
return strategy
def _create_record(task_name, dshape, wshape, target, cost):
args = [te.placeholder(dshape), te.placeholder(wshape), (1, 1), (1, 1, 1, 1), (1, 1), "float32"]
task = autotvm.task.create(task_name, args, target)
cfg = autotvm.ConfigEntity(0, None, {}, [])
cfg.cost = cost
inp = autotvm.MeasureInput(target=target, task=task, config=cfg)
result = autotvm.MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
return (inp, result)
def test_get_valid_implementations():
target = tvm.target.Target("llvm")
def _get_impls(dshape, wshape):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.get_valid_implementations(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impls = _get_impls((1, 8, 7, 7), (32, 8, 3, 3))
assert len(impls) == 2
impls = _get_impls((1, 16, 7, 7), (32, 16, 3, 3))
assert len(impls) == 3
def test_select_implementation():
target = tvm.target.Target("llvm")
def _select_impl(dshape, wshape, use_autotvm=False):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
use_autotvm,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3))
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3))
assert impl.name == "conv2d_3"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_3"
# add autotvm record
records = []
records.append(_create_record("test/conv2d_1", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.5))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.0))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_1"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
records.append(_create_record("test/conv2d_2", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.2))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.2))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
def test_te_compiler():
tec = relay.backend.te_compiler.get()
def get_func(shape):
x = relay.var("x", shape=shape)
y = relay.add(x, x)
z = relay.add(y, x)
f = relay.Function([x], z)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod["main"]
z1 = tec.lower(get_func((10,)), "llvm")
z2 = tec.lower(get_func((10,)), "llvm")
z3 = tec.lower(get_func(()), "llvm")
assert z1.same_as(z2)
assert not z3.same_as(z1)
if tvm.testing.device_enabled("cuda"):
z4 = tec.lower(get_func(()), "cuda")
assert not z3.same_as(z4)
# Test JIT target
for target in ["llvm"]:
dev = tvm.device(target)
if tvm.testing.device_enabled(target):
f = tec.jit(get_func((10,)), target)
x = tvm.nd.array(np.ones(10).astype("float32"), device=dev)
y = tvm.nd.empty((10,), device=dev)
f(x, y)
tvm.testing.assert_allclose(y.numpy(), x.numpy() * 3)
# Note: Once the te compiler is removed, we should keep this test so that
# we make sure that opt_level=0 passes are being called correctly.
def test_compile_placeholder_bypass():
te_compiler = relay.backend.te_compiler.get()
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
z = relay.var("z", shape=(2, 3))
result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
func = relay.Function(relay.analysis.free_vars(result), result)
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_injective_with_tuple():
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
x_transpose = relay.transpose(x)
output = relay.Tuple([x_transpose, y])
func = relay.Function([x, y], output)
relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_tuple_dup():
x = relay.var("data", shape=(16, 16))
log = relay.log(x)
output = relay.Tuple([log, log])
f = relay.Function([x], output)
relay.build(tvm.IRModule.from_expr(f), "llvm")
def test_compile_full():
# Shape calculations can happen in int64. The test checks that full operator
# can handle when shapes are not int32
shape = (
tvm.tir.IntImm("int32", 1),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int32", 64),
)
output = relay.full(relay.const(0, "int32"), shape=shape, dtype="int32")
f = relay.Function([], output)
mod = tvm.IRModule.from_expr(f)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay.build(mod, "llvm")
def test_compile_nhwc_pack():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = relay.Function(relay.analysis.free_vars(func), func)
relay.build(mod, target="llvm")
def test_compile_propogate_hash():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = tvm.IRModule.from_expr(relay.Function(relay.analysis.free_vars(func), func))
vm = relay.vm.VMCompiler()
opt_mod, _ = vm.optimize(mod, target="llvm")
for f in opt_mod.functions.values():
assert "hash" in f.attrs.keys()
if __name__ == "__main__":
test_get_valid_implementations()
test_select_implementation()
test_te_compiler()
test_compile_placeholder_bypass()
test_compile_injective_with_tuple()
test_compile_tuple_dup()
test_compile_full()
test_compile_nhwc_pack()
|
[
"tvm.autotvm.ConfigEntity",
"tvm.target.override_native_generic_func",
"tvm.relay.tile",
"tvm.relay.backend.te_compiler.get",
"tvm.relay.Tuple",
"tvm.autotvm.apply_history_best",
"tvm.relay.subtract",
"tvm.autotvm.MeasureInput",
"tvm.relay.analysis.free_vars",
"tvm.testing.device_enabled",
"tvm.relay.Function",
"tvm.autotvm.register_topi_schedule",
"tvm.relay.vm.VMCompiler",
"tvm.relay.nn.conv2d",
"tvm.relay.add",
"tvm.te.placeholder",
"tvm.nd.empty",
"tvm.transform.PassContext",
"tvm.autotvm.MeasureResult",
"tvm.relay.log",
"tvm.target.Target",
"tvm.IRModule.from_expr",
"tvm.relay.op.concatenate",
"tvm.relay.build",
"tvm.autotvm.register_topi_compute",
"tvm.relay.op.OpStrategy",
"numpy.ones",
"tvm.topi.nn.conv2d_nchw",
"tvm.relay.transpose",
"tvm.te.SpecializedCondition",
"tvm.relay.testing.temp_op_attr.TempOpAttr",
"tvm.relay.op.strategy.wrap_topi_schedule",
"tvm.topi.generic.schedule_conv2d_nchw",
"tvm.relay.qnn.transform.CanonicalizeOps",
"tvm.relay.op.strategy.wrap_compute_conv2d",
"tvm.relay.testing.run_infer_type",
"tvm.autotvm.task.create",
"tvm.relay.var",
"tvm.tir.IntImm",
"tvm.relay.const",
"tvm.relay.transform.InferType",
"tvm.device",
"tvm.relay.op.get"
] |
[((1064, 1110), 'tvm.autotvm.register_topi_compute', 'autotvm.register_topi_compute', (['"""test/conv2d_1"""'], {}), "('test/conv2d_1')\n", (1093, 1110), False, 'from tvm import autotvm\n'), ((1281, 1328), 'tvm.autotvm.register_topi_schedule', 'autotvm.register_topi_schedule', (['"""test/conv2d_1"""'], {}), "('test/conv2d_1')\n", (1311, 1328), False, 'from tvm import autotvm\n'), ((1418, 1464), 'tvm.autotvm.register_topi_compute', 'autotvm.register_topi_compute', (['"""test/conv2d_2"""'], {}), "('test/conv2d_2')\n", (1447, 1464), False, 'from tvm import autotvm\n'), ((1635, 1682), 'tvm.autotvm.register_topi_schedule', 'autotvm.register_topi_schedule', (['"""test/conv2d_2"""'], {}), "('test/conv2d_2')\n", (1665, 1682), False, 'from tvm import autotvm\n'), ((2019, 2082), 'tvm.target.override_native_generic_func', 'tvm.target.override_native_generic_func', (['"""test_conv2d_strategy"""'], {}), "('test_conv2d_strategy')\n", (2058, 2082), False, 'import tvm\n'), ((1204, 1277), 'tvm.topi.nn.conv2d_nchw', 'topi.nn.conv2d_nchw', (['input', 'filter', 'strides', 'padding', 'dilation', 'out_dtype'], {}), '(input, filter, strides, padding, dilation, out_dtype)\n', (1223, 1277), False, 'from tvm import topi\n'), ((1375, 1414), 'tvm.topi.generic.schedule_conv2d_nchw', 'topi.generic.schedule_conv2d_nchw', (['outs'], {}), '(outs)\n', (1408, 1414), False, 'from tvm import topi\n'), ((1558, 1631), 'tvm.topi.nn.conv2d_nchw', 'topi.nn.conv2d_nchw', (['input', 'filter', 'strides', 'padding', 'dilation', 'out_dtype'], {}), '(input, filter, strides, padding, dilation, out_dtype)\n', (1577, 1631), False, 'from tvm import topi\n'), ((1729, 1768), 'tvm.topi.generic.schedule_conv2d_nchw', 'topi.generic.schedule_conv2d_nchw', (['outs'], {}), '(outs)\n', (1762, 1768), False, 'from tvm import topi\n'), ((1859, 1932), 'tvm.topi.nn.conv2d_nchw', 'topi.nn.conv2d_nchw', (['input', 'filter', 'strides', 'padding', 'dilation', 'out_dtype'], {}), '(input, filter, strides, padding, dilation, out_dtype)\n', (1878, 1932), False, 'from tvm import topi\n'), ((1976, 2015), 'tvm.topi.generic.schedule_conv2d_nchw', 'topi.generic.schedule_conv2d_nchw', (['outs'], {}), '(outs)\n', (2009, 2015), False, 'from tvm import topi\n'), ((2150, 2171), 'tvm.relay.op.OpStrategy', 'relay.op.OpStrategy', ([], {}), '()\n', (2169, 2171), False, 'from tvm import relay\n'), ((3112, 3156), 'tvm.autotvm.task.create', 'autotvm.task.create', (['task_name', 'args', 'target'], {}), '(task_name, args, target)\n', (3131, 3156), False, 'from tvm import autotvm\n'), ((3167, 3204), 'tvm.autotvm.ConfigEntity', 'autotvm.ConfigEntity', (['(0)', 'None', '{}', '[]'], {}), '(0, None, {}, [])\n', (3187, 3204), False, 'from tvm import autotvm\n'), ((3235, 3293), 'tvm.autotvm.MeasureInput', 'autotvm.MeasureInput', ([], {'target': 'target', 'task': 'task', 'config': 'cfg'}), '(target=target, task=task, config=cfg)\n', (3255, 3293), False, 'from tvm import autotvm\n'), ((3307, 3382), 'tvm.autotvm.MeasureResult', 'autotvm.MeasureResult', ([], {'costs': '(cost,)', 'error_no': '(0)', 'all_cost': '(-1)', 'timestamp': '(-1)'}), '(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)\n', (3328, 3382), False, 'from tvm import autotvm\n'), ((3461, 3486), 'tvm.target.Target', 'tvm.target.Target', (['"""llvm"""'], {}), "('llvm')\n", (3478, 3486), False, 'import tvm\n'), ((4259, 4284), 'tvm.target.Target', 'tvm.target.Target', (['"""llvm"""'], {}), "('llvm')\n", (4276, 4284), False, 'import tvm\n'), ((6408, 6439), 'tvm.relay.backend.te_compiler.get', 'relay.backend.te_compiler.get', ([], {}), '()\n', (6437, 6439), False, 'from tvm import relay\n'), ((6904, 6938), 'tvm.testing.device_enabled', 'tvm.testing.device_enabled', (['"""cuda"""'], {}), "('cuda')\n", (6930, 6938), False, 'import tvm\n'), ((7605, 7636), 'tvm.relay.backend.te_compiler.get', 'relay.backend.te_compiler.get', ([], {}), '()\n', (7634, 7636), False, 'from tvm import relay\n'), ((7645, 7673), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(2, 3)'}), "('x', shape=(2, 3))\n", (7654, 7673), False, 'from tvm import relay\n'), ((7682, 7710), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(2, 3)'}), "('y', shape=(2, 3))\n", (7691, 7710), False, 'from tvm import relay\n'), ((7719, 7747), 'tvm.relay.var', 'relay.var', (['"""z"""'], {'shape': '(2, 3)'}), "('z', shape=(2, 3))\n", (7728, 7747), False, 'from tvm import relay\n'), ((8063, 8091), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(2, 3)'}), "('x', shape=(2, 3))\n", (8072, 8091), False, 'from tvm import relay\n'), ((8100, 8128), 'tvm.relay.var', 'relay.var', (['"""y"""'], {'shape': '(2, 3)'}), "('y', shape=(2, 3))\n", (8109, 8128), False, 'from tvm import relay\n'), ((8147, 8165), 'tvm.relay.transpose', 'relay.transpose', (['x'], {}), '(x)\n', (8162, 8165), False, 'from tvm import relay\n'), ((8179, 8208), 'tvm.relay.Tuple', 'relay.Tuple', (['[x_transpose, y]'], {}), '([x_transpose, y])\n', (8190, 8208), False, 'from tvm import relay\n'), ((8220, 8250), 'tvm.relay.Function', 'relay.Function', (['[x, y]', 'output'], {}), '([x, y], output)\n', (8234, 8250), False, 'from tvm import relay\n'), ((8345, 8378), 'tvm.relay.var', 'relay.var', (['"""data"""'], {'shape': '(16, 16)'}), "('data', shape=(16, 16))\n", (8354, 8378), False, 'from tvm import relay\n'), ((8389, 8401), 'tvm.relay.log', 'relay.log', (['x'], {}), '(x)\n', (8398, 8401), False, 'from tvm import relay\n'), ((8415, 8438), 'tvm.relay.Tuple', 'relay.Tuple', (['[log, log]'], {}), '([log, log])\n', (8426, 8438), False, 'from tvm import relay\n'), ((8447, 8474), 'tvm.relay.Function', 'relay.Function', (['[x]', 'output'], {}), '([x], output)\n', (8461, 8474), False, 'from tvm import relay\n'), ((8929, 8955), 'tvm.relay.Function', 'relay.Function', (['[]', 'output'], {}), '([], output)\n', (8943, 8955), False, 'from tvm import relay\n'), ((8966, 8991), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['f'], {}), '(f)\n', (8988, 8991), False, 'import tvm\n'), ((9049, 9073), 'tvm.relay.build', 'relay.build', (['mod', '"""llvm"""'], {}), "(mod, 'llvm')\n", (9060, 9073), False, 'from tvm import relay\n'), ((9117, 9172), 'tvm.relay.var', 'relay.var', (['"""data"""'], {'shape': '(1, 1, 1, 1024)', 'dtype': '"""uint8"""'}), "('data', shape=(1, 1, 1, 1024), dtype='uint8')\n", (9126, 9172), False, 'from tvm import relay\n'), ((9186, 9245), 'tvm.relay.var', 'relay.var', (['"""weight"""'], {'shape': '(1, 1, 1024, 1001)', 'dtype': '"""int8"""'}), "('weight', shape=(1, 1, 1024, 1001), dtype='int8')\n", (9195, 9245), False, 'from tvm import relay\n'), ((9255, 9305), 'tvm.relay.var', 'relay.var', (['"""p2"""'], {'shape': '(1, 1, 1, 1)', 'dtype': '"""int32"""'}), "('p2', shape=(1, 1, 1, 1), dtype='int32')\n", (9264, 9305), False, 'from tvm import relay\n'), ((9317, 9431), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['data', 'weight'], {'kernel_size': '(1, 1)', 'data_layout': '"""NHWC"""', 'kernel_layout': '"""HWIO"""', 'out_dtype': '"""int32"""'}), "(data, weight, kernel_size=(1, 1), data_layout='NHWC',\n kernel_layout='HWIO', out_dtype='int32')\n", (9332, 9431), False, 'from tvm import relay\n'), ((9561, 9603), 'tvm.relay.tile', 'relay.tile', (['multiply'], {'reps': '(1, 1, 1, 1001)'}), '(multiply, reps=(1, 1, 1, 1001))\n', (9571, 9603), False, 'from tvm import relay\n'), ((9619, 9645), 'tvm.relay.subtract', 'relay.subtract', (['conv', 'tile'], {}), '(conv, tile)\n', (9633, 9645), False, 'from tvm import relay\n'), ((9734, 9765), 'tvm.relay.build', 'relay.build', (['mod'], {'target': '"""llvm"""'}), "(mod, target='llvm')\n", (9745, 9765), False, 'from tvm import relay\n'), ((9814, 9869), 'tvm.relay.var', 'relay.var', (['"""data"""'], {'shape': '(1, 1, 1, 1024)', 'dtype': '"""uint8"""'}), "('data', shape=(1, 1, 1, 1024), dtype='uint8')\n", (9823, 9869), False, 'from tvm import relay\n'), ((9883, 9942), 'tvm.relay.var', 'relay.var', (['"""weight"""'], {'shape': '(1, 1, 1024, 1001)', 'dtype': '"""int8"""'}), "('weight', shape=(1, 1, 1024, 1001), dtype='int8')\n", (9892, 9942), False, 'from tvm import relay\n'), ((9952, 10002), 'tvm.relay.var', 'relay.var', (['"""p2"""'], {'shape': '(1, 1, 1, 1)', 'dtype': '"""int32"""'}), "('p2', shape=(1, 1, 1, 1), dtype='int32')\n", (9961, 10002), False, 'from tvm import relay\n'), ((10014, 10128), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['data', 'weight'], {'kernel_size': '(1, 1)', 'data_layout': '"""NHWC"""', 'kernel_layout': '"""HWIO"""', 'out_dtype': '"""int32"""'}), "(data, weight, kernel_size=(1, 1), data_layout='NHWC',\n kernel_layout='HWIO', out_dtype='int32')\n", (10029, 10128), False, 'from tvm import relay\n'), ((10258, 10300), 'tvm.relay.tile', 'relay.tile', (['multiply'], {'reps': '(1, 1, 1, 1001)'}), '(multiply, reps=(1, 1, 1, 1001))\n', (10268, 10300), False, 'from tvm import relay\n'), ((10316, 10342), 'tvm.relay.subtract', 'relay.subtract', (['conv', 'tile'], {}), '(conv, tile)\n', (10330, 10342), False, 'from tvm import relay\n'), ((10460, 10481), 'tvm.relay.vm.VMCompiler', 'relay.vm.VMCompiler', ([], {}), '()\n', (10479, 10481), False, 'from tvm import relay\n'), ((2213, 2269), 'tvm.relay.op.strategy.wrap_compute_conv2d', 'relay.op.strategy.wrap_compute_conv2d', (['_compute_conv2d_1'], {}), '(_compute_conv2d_1)\n', (2250, 2269), False, 'from tvm import relay\n'), ((2279, 2335), 'tvm.relay.op.strategy.wrap_topi_schedule', 'relay.op.strategy.wrap_topi_schedule', (['_schedule_conv2d_1'], {}), '(_schedule_conv2d_1)\n', (2315, 2335), False, 'from tvm import relay\n'), ((2428, 2484), 'tvm.relay.op.strategy.wrap_compute_conv2d', 'relay.op.strategy.wrap_compute_conv2d', (['_compute_conv2d_2'], {}), '(_compute_conv2d_2)\n', (2465, 2484), False, 'from tvm import relay\n'), ((2494, 2550), 'tvm.relay.op.strategy.wrap_topi_schedule', 'relay.op.strategy.wrap_topi_schedule', (['_schedule_conv2d_2'], {}), '(_schedule_conv2d_2)\n', (2530, 2550), False, 'from tvm import relay\n'), ((2639, 2676), 'tvm.te.SpecializedCondition', 'tvm.te.SpecializedCondition', (['(ic >= 16)'], {}), '(ic >= 16)\n', (2666, 2676), False, 'import tvm\n'), ((3012, 3034), 'tvm.te.placeholder', 'te.placeholder', (['dshape'], {}), '(dshape)\n', (3026, 3034), False, 'from tvm import te\n'), ((3036, 3058), 'tvm.te.placeholder', 'te.placeholder', (['wshape'], {}), '(wshape)\n', (3050, 3058), False, 'from tvm import te\n'), ((3539, 3570), 'tvm.relay.var', 'relay.var', (['"""data"""'], {'shape': 'dshape'}), "('data', shape=dshape)\n", (3548, 3570), False, 'from tvm import relay\n'), ((3588, 3621), 'tvm.relay.var', 'relay.var', (['"""wshape"""'], {'shape': 'wshape'}), "('wshape', shape=wshape)\n", (3597, 3621), False, 'from tvm import relay\n'), ((3636, 3681), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['data', 'weight'], {'padding': '(1, 1)'}), '(data, weight, padding=(1, 1))\n', (3651, 3681), False, 'from tvm import relay\n'), ((3696, 3715), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['out'], {}), '(out)\n', (3710, 3715), False, 'from tvm.relay.testing import run_infer_type\n'), ((3978, 4032), 'tvm.relay.testing.temp_op_attr.TempOpAttr', 'TempOpAttr', (['"""nn.conv2d"""', '"""FTVMStrategy"""', '_tmp_strategy'], {}), "('nn.conv2d', 'FTVMStrategy', _tmp_strategy)\n", (3988, 4032), False, 'from tvm.relay.testing.temp_op_attr import TempOpAttr\n'), ((4358, 4389), 'tvm.relay.var', 'relay.var', (['"""data"""'], {'shape': 'dshape'}), "('data', shape=dshape)\n", (4367, 4389), False, 'from tvm import relay\n'), ((4407, 4440), 'tvm.relay.var', 'relay.var', (['"""wshape"""'], {'shape': 'wshape'}), "('wshape', shape=wshape)\n", (4416, 4440), False, 'from tvm import relay\n'), ((4455, 4500), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['data', 'weight'], {'padding': '(1, 1)'}), '(data, weight, padding=(1, 1))\n', (4470, 4500), False, 'from tvm import relay\n'), ((4515, 4534), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['out'], {}), '(out)\n', (4529, 4534), False, 'from tvm.relay.testing import run_infer_type\n'), ((4818, 4872), 'tvm.relay.testing.temp_op_attr.TempOpAttr', 'TempOpAttr', (['"""nn.conv2d"""', '"""FTVMStrategy"""', '_tmp_strategy'], {}), "('nn.conv2d', 'FTVMStrategy', _tmp_strategy)\n", (4828, 4872), False, 'from tvm.relay.testing.temp_op_attr import TempOpAttr\n'), ((6478, 6505), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'shape'}), "('x', shape=shape)\n", (6487, 6505), False, 'from tvm import relay\n'), ((6518, 6533), 'tvm.relay.add', 'relay.add', (['x', 'x'], {}), '(x, x)\n', (6527, 6533), False, 'from tvm import relay\n'), ((6546, 6561), 'tvm.relay.add', 'relay.add', (['y', 'x'], {}), '(y, x)\n', (6555, 6561), False, 'from tvm import relay\n'), ((6574, 6596), 'tvm.relay.Function', 'relay.Function', (['[x]', 'z'], {}), '([x], z)\n', (6588, 6596), False, 'from tvm import relay\n'), ((6611, 6636), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['f'], {}), '(f)\n', (6633, 6636), False, 'import tvm\n'), ((7084, 7102), 'tvm.device', 'tvm.device', (['target'], {}), '(target)\n', (7094, 7102), False, 'import tvm\n'), ((7114, 7148), 'tvm.testing.device_enabled', 'tvm.testing.device_enabled', (['target'], {}), '(target)\n', (7140, 7148), False, 'import tvm\n'), ((7842, 7874), 'tvm.relay.analysis.free_vars', 'relay.analysis.free_vars', (['result'], {}), '(result)\n', (7866, 7874), False, 'from tvm import relay\n'), ((7893, 7931), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'opt_level': '(0)'}), '(opt_level=0)\n', (7918, 7931), False, 'import tvm\n'), ((8267, 8295), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['func'], {}), '(func)\n', (8289, 8295), False, 'import tvm\n'), ((8491, 8516), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['f'], {}), '(f)\n', (8513, 8516), False, 'import tvm\n'), ((8699, 8725), 'tvm.tir.IntImm', 'tvm.tir.IntImm', (['"""int32"""', '(1)'], {}), "('int32', 1)\n", (8713, 8725), False, 'import tvm\n'), ((8735, 8762), 'tvm.tir.IntImm', 'tvm.tir.IntImm', (['"""int64"""', '(16)'], {}), "('int64', 16)\n", (8749, 8762), False, 'import tvm\n'), ((8772, 8799), 'tvm.tir.IntImm', 'tvm.tir.IntImm', (['"""int64"""', '(16)'], {}), "('int64', 16)\n", (8786, 8799), False, 'import tvm\n'), ((8809, 8836), 'tvm.tir.IntImm', 'tvm.tir.IntImm', (['"""int32"""', '(64)'], {}), "('int32', 64)\n", (8823, 8836), False, 'import tvm\n'), ((8868, 8891), 'tvm.relay.const', 'relay.const', (['(0)', '"""int32"""'], {}), "(0, 'int32')\n", (8879, 8891), False, 'from tvm import relay\n'), ((9002, 9039), 'tvm.relay.qnn.transform.CanonicalizeOps', 'relay.qnn.transform.CanonicalizeOps', ([], {}), '()\n', (9037, 9039), False, 'from tvm import relay\n'), ((9513, 9544), 'tvm.relay.const', 'relay.const', (['(-22)'], {'dtype': '"""int32"""'}), "(-22, dtype='int32')\n", (9524, 9544), False, 'from tvm import relay\n'), ((9692, 9722), 'tvm.relay.analysis.free_vars', 'relay.analysis.free_vars', (['func'], {}), '(func)\n', (9716, 9722), False, 'from tvm import relay\n'), ((10210, 10241), 'tvm.relay.const', 'relay.const', (['(-22)'], {'dtype': '"""int32"""'}), "(-22, dtype='int32')\n", (10221, 10241), False, 'from tvm import relay\n'), ((2727, 2783), 'tvm.relay.op.strategy.wrap_compute_conv2d', 'relay.op.strategy.wrap_compute_conv2d', (['_compute_conv2d_3'], {}), '(_compute_conv2d_3)\n', (2764, 2783), False, 'from tvm import relay\n'), ((2797, 2853), 'tvm.relay.op.strategy.wrap_topi_schedule', 'relay.op.strategy.wrap_topi_schedule', (['_schedule_conv2d_3'], {}), '(_schedule_conv2d_3)\n', (2833, 2853), False, 'from tvm import relay\n'), ((3796, 3821), 'tvm.relay.op.get', 'relay.op.get', (['"""nn.conv2d"""'], {}), "('nn.conv2d')\n", (3808, 3821), False, 'from tvm import relay\n'), ((4611, 4636), 'tvm.relay.op.get', 'relay.op.get', (['"""nn.conv2d"""'], {}), "('nn.conv2d')\n", (4623, 4636), False, 'from tvm import relay\n'), ((6651, 6678), 'tvm.relay.transform.InferType', 'relay.transform.InferType', ([], {}), '()\n', (6676, 6678), False, 'from tvm import relay\n'), ((7287, 7318), 'tvm.nd.empty', 'tvm.nd.empty', (['(10,)'], {'device': 'dev'}), '((10,), device=dev)\n', (7299, 7318), False, 'import tvm\n'), ((7777, 7813), 'tvm.relay.op.concatenate', 'relay.op.concatenate', (['[y, z]'], {'axis': '(0)'}), '([y, z], axis=0)\n', (7797, 7813), False, 'from tvm import relay\n'), ((7974, 8002), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['func'], {}), '(func)\n', (7996, 8002), False, 'import tvm\n'), ((10412, 10442), 'tvm.relay.analysis.free_vars', 'relay.analysis.free_vars', (['func'], {}), '(func)\n', (10436, 10442), False, 'from tvm import relay\n'), ((3859, 3881), 'tvm.te.placeholder', 'te.placeholder', (['dshape'], {}), '(dshape)\n', (3873, 3881), False, 'from tvm import te\n'), ((3883, 3905), 'tvm.te.placeholder', 'te.placeholder', (['wshape'], {}), '(wshape)\n', (3897, 3905), False, 'from tvm import te\n'), ((4674, 4696), 'tvm.te.placeholder', 'te.placeholder', (['dshape'], {}), '(dshape)\n', (4688, 4696), False, 'from tvm import te\n'), ((4698, 4720), 'tvm.te.placeholder', 'te.placeholder', (['wshape'], {}), '(wshape)\n', (4712, 4720), False, 'from tvm import te\n'), ((5573, 5608), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['records'], {}), '(records)\n', (5599, 5608), False, 'from tvm import autotvm\n'), ((6091, 6126), 'tvm.autotvm.apply_history_best', 'autotvm.apply_history_best', (['records'], {}), '(records)\n', (6117, 6126), False, 'from tvm import autotvm\n'), ((7228, 7239), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (7235, 7239), True, 'import numpy as np\n')]
|
from __future__ import print_function
import os
import cv2
import string
import random
import numpy as np
class dataLoader(object):
def __init__(self, directory, dataset_dir, dataset_name, max_steps,
image_width, image_height, image_patch_width, image_patch_height,
grd_attn=False, mode='Train'):
self.mode = mode
self.grd_attn = grd_attn
self.max_steps = max_steps
self.image_width = image_width
self.image_height = image_height
self.directory = directory
self.dataset_dir = dataset_dir
self.dataset_name = dataset_name
self.image_patch_width = image_patch_height
self.image_patch_height = image_patch_height
self.load_data()
def load_data(self):
all_data = []
# Full images file path
file_path = os.path.join(self.directory, self.dataset_name)
#-----------------------------------------------------------------------
# Get characters
az = string.ascii_lowercase
AZ = string.ascii_uppercase
nm = string.digits
#-----------------------------------------------------------------------
# Append all characters
all_selections = []
for i in range(len(az)):
all_selections.append(az[i])
for i in range(len(AZ)):
all_selections.append(AZ[i])
for i in range(len(nm)):
all_selections.append(nm[i])
#-----------------------------------------------------------------------
with open(file_path, 'r') as f:
frames = f.readlines()
for i in range(0, len(frames), self.max_steps):
for u in range(self.max_steps):
frame = frames[i+u]
path, label, w1, h1, w2, h2 = frame.split(', ')
h2 = h2[:-1] # Remove /n at the end
label_num = all_selections.index(label) # Convert to label category
all_data.append([path, int(label_num), int(w1), int(h1), int(w2), int(h2)])
self.all_data = all_data
self.max_length = len(self.all_data)
self.possible_pred = len(all_selections)
print('All data Loaded!')
def randomFlip(self, image):
flip_p = np.random.rand()
if flip_p > 0.5:
flip_image = image[:, ::-1]
else:
flip_image = image
return flip_image
def gen_random_data(self):
while True:
indices = list(range(len(self.all_data)))
random.shuffle(indices)
for i in indices:
data = self.all_data[i]
yield data
def gen_val_data(self):
while True:
indices = range(len(self.all_data))
for i in indices:
data = self.all_data[i]
yield data
def gen_data_batch(self, batch_size):
# Generate data based on training/validation
if self.mode == 'Train':
# Randomize data
data_gen = self.gen_random_data()
else:
# Validation Data generation
data_gen = self.gen_val_data()
# Loop
while True:
image_batch = []
label_batch = []
# Generate training batch
for _ in range(batch_size):
valid = None
while valid is None:
sample_data = next(data_gen)
sample_img_path = os.path.join(self.directory, self.dataset_dir, sample_data[0])
try:
image = cv2.imread(sample_img_path)
org_img_hgth, org_img_wdth, _ = image.shape
image = cv2.resize(image, (self.image_width, self.image_height))
# Gather sample data
# path, label, int(w1), int(h1), int(w2), int(h2)
sample_label = sample_data[1]
#print(sample_label)
one_hot_label = np.zeros(self.possible_pred)
one_hot_label[sample_label] = 1.0
# Get Bboxes
sample_left = sample_data[2] * 1.0
sample_top = sample_data[3] * 1.0
sample_width = sample_data[4] * 1.0
sample_height = sample_data[5] * 1.0
# Rescale axis to resized image
sample_left = np.ceil((sample_left * self.image_width)/org_img_wdth)
sample_top = np.ceil((sample_top * self.image_height)/org_img_hgth)
sample_width = np.floor((sample_width * self.image_width)/org_img_wdth)
sample_height = np.floor((sample_height * self.image_height)/org_img_hgth)
# Extract image_patch
image_patch = image[int(sample_top):int(sample_top+sample_height),\
int(sample_left):int(sample_left+sample_width), :]
# Resize
image_patch_rz = cv2.resize(image_patch, (self.image_patch_width, self.image_patch_height),\
interpolation = cv2.INTER_LINEAR)
# Data Augmentation
# image_patch_rz = self.randomFlip(image_patch_rz)
# Set image patch between -1 and 1
image_patch_rz = image_patch_rz/127.5 - 1.0
# Append to generated batch
image_batch.append(image_patch_rz)
label_batch.append(one_hot_label)
# Set valid flag to not None
valid = 'True'
except cv2.error as e:
print('File error at: ', sample_img_path, ' resampling..')
yield np.array(image_batch), np.array(label_batch)
|
[
"numpy.ceil",
"random.shuffle",
"numpy.random.rand",
"os.path.join",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"cv2.resize",
"cv2.imread"
] |
[((874, 921), 'os.path.join', 'os.path.join', (['self.directory', 'self.dataset_name'], {}), '(self.directory, self.dataset_name)\n', (886, 921), False, 'import os\n'), ((2289, 2305), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2303, 2305), True, 'import numpy as np\n'), ((2561, 2584), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (2575, 2584), False, 'import random\n'), ((3506, 3568), 'os.path.join', 'os.path.join', (['self.directory', 'self.dataset_dir', 'sample_data[0]'], {}), '(self.directory, self.dataset_dir, sample_data[0])\n', (3518, 3568), False, 'import os\n'), ((6007, 6028), 'numpy.array', 'np.array', (['image_batch'], {}), '(image_batch)\n', (6015, 6028), True, 'import numpy as np\n'), ((6030, 6051), 'numpy.array', 'np.array', (['label_batch'], {}), '(label_batch)\n', (6038, 6051), True, 'import numpy as np\n'), ((3626, 3653), 'cv2.imread', 'cv2.imread', (['sample_img_path'], {}), '(sample_img_path)\n', (3636, 3653), False, 'import cv2\n'), ((3754, 3810), 'cv2.resize', 'cv2.resize', (['image', '(self.image_width, self.image_height)'], {}), '(image, (self.image_width, self.image_height))\n', (3764, 3810), False, 'import cv2\n'), ((4070, 4098), 'numpy.zeros', 'np.zeros', (['self.possible_pred'], {}), '(self.possible_pred)\n', (4078, 4098), True, 'import numpy as np\n'), ((4534, 4588), 'numpy.ceil', 'np.ceil', (['(sample_left * self.image_width / org_img_wdth)'], {}), '(sample_left * self.image_width / org_img_wdth)\n', (4541, 4588), True, 'import numpy as np\n'), ((4631, 4685), 'numpy.ceil', 'np.ceil', (['(sample_top * self.image_height / org_img_hgth)'], {}), '(sample_top * self.image_height / org_img_hgth)\n', (4638, 4685), True, 'import numpy as np\n'), ((4729, 4785), 'numpy.floor', 'np.floor', (['(sample_width * self.image_width / org_img_wdth)'], {}), '(sample_width * self.image_width / org_img_wdth)\n', (4737, 4785), True, 'import numpy as np\n'), ((4827, 4885), 'numpy.floor', 'np.floor', (['(sample_height * self.image_height / org_img_hgth)'], {}), '(sample_height * self.image_height / org_img_hgth)\n', (4835, 4885), True, 'import numpy as np\n'), ((5193, 5303), 'cv2.resize', 'cv2.resize', (['image_patch', '(self.image_patch_width, self.image_patch_height)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(image_patch, (self.image_patch_width, self.image_patch_height),\n interpolation=cv2.INTER_LINEAR)\n', (5203, 5303), False, 'import cv2\n')]
|
import cv2
import numpy as np
import random
#########################################################
# FUNCTION TO FIND THE CONNECTED COMPONENTS
#########################################################
def drawComponents(image, adj, block_size):
#ret, labels = cv2.connectedComponents(image)
#print(ret)
#print(labels)
#cv2.imshow('test1', labels.astype(np.uint8))
image = image.astype('uint8')
#print (image.shape)
block_w = block_size
block_h = block_size
nb = 0
comp = []
for r in range(0, image.shape[0] - block_w, block_h):
for c in range(0, image.shape[1] - block_w, block_h):
window = image[r:r+block_w, c:c+block_h]
x = list(cv2.connectedComponents(window, adj))
nb += x[0]
x[1] = x[1] * random.randint(1, 16) * random.randint(1, 16)
comp.append(x[1])
bc = image.shape[0]//block_size
br = image.shape[1]//block_size
img = np.zeros(image.shape)
#print (img.shape)
for r in range(0, img.shape[0] - block_w, block_h):
for c in range(0, img.shape[1] - block_w, block_h):
for i in range(len(comp)):
img[r:r+block_w, c:c+block_h] = comp[i]*255
for k in range(len(comp)):
for i in range(block_size):
for j in range(block_size):
if k%br == 0 and k!=0:
c = (((k+1)*block_size)//img.shape[1])*block_size + j
else:
c = ((k*block_size)//img.shape[1])*block_size + j
r = (k*block_size + i) % (br*block_size)
img[c][r] = comp[k][j][i]
cv2.imshow('Test Image', img)
#image = image.astype('uint8')
#nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, adj)
#label_hue = (107*output%np.max(output)).astype(np.uint8)
label_hue = (107*img%np.max(img)).astype(np.uint8)
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2RGB)
labeled_img[label_hue==0] = 0
'''
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
img2 = img2 + output
'''
return labeled_img
#########################################################
# INPUTS
#########################################################
flag = 15
while flag != 0:
block = int(input("Please enter block size (m X m): "))
if flag == 1 or flag == 15:
adj = int(input("Enter the adjacency for detection (4 or 8): "))
if flag == 2 or flag == 15:
thresh = list(map(int, input("Enter the range of threshold separated by space(Example: 150 200): ").split(" ")))
if adj != 4 and adj != 8:
flag = 1
print("Inoperable value for adjacency. Please enter 4 or 8")
continue
elif len(thresh) != 2:
print("Please input exactly 2 numbers in the given format.")
flag = 2
continue
elif thresh[0] > thresh [1]:
thresh[0], thresh[1] = thresh[1], thresh[0]
else:
flag = 0
if thresh[0] < 0 or thresh[1] > 255:
print("Values are beyond limits. Please enter values between 0 and 255")
flag = 2
#########################################################
# READING IMAGE
#########################################################
img_orig = cv2.imread('../../Images/2.jpg')
cv2.imshow('Original', img_orig)
#im = cv2.UMat(Image.fromarray(img_orig).convert("L"))
#Image.fromarray(img_orig)
bw = cv2.cvtColor(img_orig, cv2.COLOR_RGB2GRAY)
#cv2.imshow("BW", bw)
#cv2.imwrite("./Outputs/Grayscale.jpg", bw)
x, img = cv2.threshold(bw, thresh[0], thresh[1], cv2.THRESH_BINARY) #ensuring binary
img[img==x] = 255
cv2.imshow("Binary", img)
#cv2.imwrite("./Outputs/Binary Image {V=("+str(thresh[0])+", "+str(thresh[1])+"), adj="+str(adj)+"}.jpg", img)
img2 = drawComponents(img, adj, block) # calling implementation function
#print(img2.shape)
cv2.imshow('Connected Components', img2)
#cv2.imwrite("./Outputs/Paths{V=("+str(thresh[0])+", "+str(thresh[1])+"), adj="+str(adj)+"}.jpg", img2)
#########################################################
# PRINTING OUTPUT
#########################################################
#img3 = bw * (img2.reshape(img2.shape[0],img2.shape[1]))
# Using the hues from img2 and the saturation and luminosity from the original image to get proper results.
cvt = cv2.cvtColor(img_orig, cv2.COLOR_RGB2HSV)
img4 = np.zeros(cvt.shape)
img2 = cv2.cvtColor(img2.astype(np.uint8), cv2.COLOR_RGB2HSV)
for i in range(img2.shape[0]):
for j in range(img2.shape[1]):
img4[i][j][0] = (img2[i][j][0]*9 + cvt[i][j][1]*1)//10 # HUE
img4[i][j][1] = (img2[i][j][1]*2 + cvt[i][j][1]*8)//10 # SATURATION
img4[i][j][2] = cvt[i][j][2] # LIGHT VALUE
if img2[i][j][0] == 0:
img4[i][j] = 0
img4 = cv2.cvtColor(img4.astype(np.uint8), cv2.COLOR_HSV2RGB)
#img3 = bw + (img2.reshape(img2.shape[0],img2.shape[1]))
#img4 = [[[i, i, i] for i in j] for j in img2]
#img5 = img_orig * img4
cv2.imshow('Result', img4.astype(np.uint8))
#cv2.imwrite("./Outputs/Result{V=("+str(thresh[0])+", "+str(thresh[1])+"), adj="+str(adj)+"}.jpg", img4.astype(np.uint8))
print ("Job done!")
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"numpy.ones_like",
"cv2.merge",
"cv2.threshold",
"cv2.imshow",
"numpy.max",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.connectedComponents",
"cv2.cvtColor",
"cv2.imread",
"random.randint"
] |
[((3501, 3533), 'cv2.imread', 'cv2.imread', (['"""../../Images/2.jpg"""'], {}), "('../../Images/2.jpg')\n", (3511, 3533), False, 'import cv2\n'), ((3535, 3567), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'img_orig'], {}), "('Original', img_orig)\n", (3545, 3567), False, 'import cv2\n'), ((3657, 3699), 'cv2.cvtColor', 'cv2.cvtColor', (['img_orig', 'cv2.COLOR_RGB2GRAY'], {}), '(img_orig, cv2.COLOR_RGB2GRAY)\n', (3669, 3699), False, 'import cv2\n'), ((3776, 3834), 'cv2.threshold', 'cv2.threshold', (['bw', 'thresh[0]', 'thresh[1]', 'cv2.THRESH_BINARY'], {}), '(bw, thresh[0], thresh[1], cv2.THRESH_BINARY)\n', (3789, 3834), False, 'import cv2\n'), ((3871, 3896), 'cv2.imshow', 'cv2.imshow', (['"""Binary"""', 'img'], {}), "('Binary', img)\n", (3881, 3896), False, 'import cv2\n'), ((4103, 4143), 'cv2.imshow', 'cv2.imshow', (['"""Connected Components"""', 'img2'], {}), "('Connected Components', img2)\n", (4113, 4143), False, 'import cv2\n'), ((4575, 4616), 'cv2.cvtColor', 'cv2.cvtColor', (['img_orig', 'cv2.COLOR_RGB2HSV'], {}), '(img_orig, cv2.COLOR_RGB2HSV)\n', (4587, 4616), False, 'import cv2\n'), ((4624, 4643), 'numpy.zeros', 'np.zeros', (['cvt.shape'], {}), '(cvt.shape)\n', (4632, 4643), True, 'import numpy as np\n'), ((5458, 5472), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5469, 5472), False, 'import cv2\n'), ((5473, 5496), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5494, 5496), False, 'import cv2\n'), ((996, 1017), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (1004, 1017), True, 'import numpy as np\n'), ((1704, 1733), 'cv2.imshow', 'cv2.imshow', (['"""Test Image"""', 'img'], {}), "('Test Image', img)\n", (1714, 1733), False, 'import cv2\n'), ((2049, 2091), 'cv2.merge', 'cv2.merge', (['[label_hue, blank_ch, blank_ch]'], {}), '([label_hue, blank_ch, blank_ch])\n', (2058, 2091), False, 'import cv2\n'), ((2111, 2155), 'cv2.cvtColor', 'cv2.cvtColor', (['labeled_img', 'cv2.COLOR_HSV2RGB'], {}), '(labeled_img, cv2.COLOR_HSV2RGB)\n', (2123, 2155), False, 'import cv2\n'), ((2007, 2030), 'numpy.ones_like', 'np.ones_like', (['label_hue'], {}), '(label_hue)\n', (2019, 2030), True, 'import numpy as np\n'), ((741, 777), 'cv2.connectedComponents', 'cv2.connectedComponents', (['window', 'adj'], {}), '(window, adj)\n', (764, 777), False, 'import cv2\n'), ((852, 873), 'random.randint', 'random.randint', (['(1)', '(16)'], {}), '(1, 16)\n', (866, 873), False, 'import random\n'), ((1958, 1969), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1964, 1969), True, 'import numpy as np\n'), ((828, 849), 'random.randint', 'random.randint', (['(1)', '(16)'], {}), '(1, 16)\n', (842, 849), False, 'import random\n')]
|
"""
# Authors:
* <NAME> 2021
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
"""
import torch
import numpy as np
smallVal = np.finfo("float").eps # To avoid divide by zero
def si_snr_loss(y_pred_batch, y_true_batch, lens, reduction="mean"):
"""Compute the si_snr score and return -1 * that score.
This function can be used as a loss function for training
with SGD-based updates.
Arguments
---------
y_pred_batch : torch.Tensor
The degraded (enhanced) waveforms.
y_true_batch : torch.Tensor
The clean (reference) waveforms.
lens : torch.Tensor
The relative lengths of the waveforms within the batch.
reduction : str
The type of reduction ("mean" or "batch") to use.
Example
-------
"""
y_pred_batch = torch.squeeze(y_pred_batch, dim=-1)
y_true_batch = torch.squeeze(y_true_batch, dim=-1)
batch_size = y_pred_batch.shape[0]
SI_SNR = torch.zeros(batch_size)
for i in range(0, batch_size): # Run over mini-batches
s_target = y_true_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
s_estimate = y_pred_batch[i, 0 : int(lens[i] * y_pred_batch.shape[1])]
# s_target = <s', s>s / ||s||^2
dot = torch.sum(s_estimate * s_target, dim=0, keepdim=True)
s_target_energy = (
torch.sum(s_target ** 2, dim=0, keepdim=True) + smallVal
)
proj = dot * s_target / s_target_energy
# e_noise = s' - s_target
e_noise = s_estimate - proj
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
si_snr_beforelog = torch.sum(proj ** 2, dim=0) / (
torch.sum(e_noise ** 2, dim=0) + smallVal
)
SI_SNR[i] = 10 * torch.log10(si_snr_beforelog + smallVal)
if reduction == "mean":
return -SI_SNR.mean()
return -SI_SNR
|
[
"torch.log10",
"torch.sum",
"torch.squeeze",
"numpy.finfo",
"torch.zeros"
] |
[((167, 184), 'numpy.finfo', 'np.finfo', (['"""float"""'], {}), "('float')\n", (175, 184), True, 'import numpy as np\n'), ((858, 893), 'torch.squeeze', 'torch.squeeze', (['y_pred_batch'], {'dim': '(-1)'}), '(y_pred_batch, dim=-1)\n', (871, 893), False, 'import torch\n'), ((914, 949), 'torch.squeeze', 'torch.squeeze', (['y_true_batch'], {'dim': '(-1)'}), '(y_true_batch, dim=-1)\n', (927, 949), False, 'import torch\n'), ((1006, 1029), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (1017, 1029), False, 'import torch\n'), ((1309, 1362), 'torch.sum', 'torch.sum', (['(s_estimate * s_target)'], {'dim': '(0)', 'keepdim': '(True)'}), '(s_estimate * s_target, dim=0, keepdim=True)\n', (1318, 1362), False, 'import torch\n'), ((1405, 1450), 'torch.sum', 'torch.sum', (['(s_target ** 2)'], {'dim': '(0)', 'keepdim': '(True)'}), '(s_target ** 2, dim=0, keepdim=True)\n', (1414, 1450), False, 'import torch\n'), ((1690, 1717), 'torch.sum', 'torch.sum', (['(proj ** 2)'], {'dim': '(0)'}), '(proj ** 2, dim=0)\n', (1699, 1717), False, 'import torch\n'), ((1814, 1854), 'torch.log10', 'torch.log10', (['(si_snr_beforelog + smallVal)'], {}), '(si_snr_beforelog + smallVal)\n', (1825, 1854), False, 'import torch\n'), ((1735, 1765), 'torch.sum', 'torch.sum', (['(e_noise ** 2)'], {'dim': '(0)'}), '(e_noise ** 2, dim=0)\n', (1744, 1765), False, 'import torch\n')]
|
#!/usr/bin/env python3
# Author: <NAME>
# Date: 2021/1/29
# Functions to generate csv summaries of data statistics and merge result statistics
import os
import argparse
import re
from typing import Tuple
import pandas as pd
import numpy as np
import consts as C
from processing.marsdataloader import MARSDataLoader
def merge_results(verbose: bool=False) -> None:
"""merge results and configuration files into results/hpcc_results.csv"""
# read files
exp_ID_name = C.EXP_COL_CONV[C.EXP_ID_COL]
res_df = pd.read_csv(C.ALL_RES_CSV_PATH, dtype={exp_ID_name: int}, index_col=exp_ID_name)
config_df = pd.read_csv(C.EXP_ID_LOG, dtype={exp_ID_name: int}, index_col=exp_ID_name)
assert len(res_df.index) == len(config_df.index), "Numbers of experiments recorded don't match!"
# check output path to avoid overwriting previous combined results
comb_output_path = __find_next_available_filename(C.COMBINED_FILE_FORMAT)
# join and save
res_df.join(config_df, on=exp_ID_name).to_csv(comb_output_path)
if verbose:
print(f"{len(res_df.index)} experiment entries combined and saved at {comb_output_path}")
def generate_dataset_stats(dataset_parent_dir:str, stats_csv_save_path: str, verbose=False):
"""generate dataset stats and save at given path"""
# find dataset folders in dataset dir
datasets = []
pat = re.compile(C.DATASET_PATTERN)
for path in os.listdir(dataset_parent_dir):
match = pat.match(path)
if match:
# record dataset path, win, ahead, rolling
datasets.append([os.path.join(dataset_parent_dir, match.group(0)),
match.group(1), match.group(2), match.group(3)])
all_dataset_stats = []
for dataset_dir, window, ahead, rolling in datasets:
# get train, test inds, and labels
train_inds, test_inds, labels = __get_train_test_inds_labels(dataset_dir)
train_labels, test_labels = labels[train_inds], labels[test_inds]
exc_non, exc_crash = __get_excluded_number(dataset_dir)
# Record stats
all_dataset_stats.append({
"window": window,
"time ahead": ahead,
"rolling step": rolling,
"train crash count": np.sum(train_labels == 1),
"train noncrash count": np.sum(train_labels == 0),
"train 0:1 ratio": np.sum(train_labels == 0)/np.sum(train_labels == 1),
"test crash count": np.sum(test_labels == 1),
"test noncrash count": np.sum(test_labels == 0),
"test 0:1 ratio": np.sum(test_labels == 0) / np.sum(test_labels == 1),
"dataset total": labels.shape[0],
"train set total": train_labels.shape[0],
"test set total": test_labels.shape[0],
"excluded non-crashed segment total": exc_non,
"excluded crashed segment total": exc_crash
})
# generate and save dataset csv file
pd.DataFrame(all_dataset_stats).to_csv(stats_csv_save_path)
if verbose:
print(f"Statistics for {len(datasets)} datasets saved at {stats_csv_save_path}")
def __find_next_available_filename(path_format):
"""find the next available relative path name"""
# check output path to avoid overwriting previous combined results
if os.path.exists(path_format.format("")):
collision_n = 2
while os.path.exists(path_format.format("_" + str(collision_n))):
collision_n += 1
return path_format.format("_" + str(collision_n))
else:
return path_format.format("")
def __get_excluded_number(dataset_dir: str) -> Tuple[int, int]:
"""return the numbers of 1) excluded non-crashed human segments, and 2) excluded crashed segments in given dataset directory"""
# get excluded crash numbers
debug_pat = re.compile(C.DEBUG_PATTERN)
# load label and train, test inds
try:
exclude_path = [path for path in os.listdir(dataset_dir) if debug_pat.match(path)][0]
except IndexError:
raise ValueError(f"Cannot find excluded crashes under {dataset_dir}")
# get excluded segments
df = pd.read_csv(os.path.join(dataset_dir, exclude_path))
non_crash_segs = sum(df.crash_ind == -1)
return non_crash_segs, len(df.index) - non_crash_segs
def __get_train_test_inds_labels(dataset_dir: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""return the train inds, test inds, and labels as three numpy arrays"""
try:
train_inds = np.load(os.path.join(dataset_dir, C.INDS_PATH["train"]))
test_inds = np.load(os.path.join(dataset_dir, C.INDS_PATH["test"]))
labels = np.load(os.path.join(dataset_dir, C.COL_PATHS["label"]))
except IOError:
raise FileNotFoundError(f"At least 1 of the following files missing under {dataset_dir}: "
f"{C.INDS_PATH['train']}, {C.INDS_PATH['test']}, {C.COL_PATHS['label']}")
return train_inds, test_inds, labels
def debug_datasets(dataset_parent_dir:str, stats_csv_save_path: str, verbose=False):
"""generate dataset stats and save at given path"""
# find dataset folders in dataset dir
datasets = []
pat = re.compile(C.DATASET_PATTERN)
for path in os.listdir(dataset_parent_dir):
match = pat.match(path)
if match:
# record dataset path, win, ahead, rolling
datasets.append([os.path.join(dataset_parent_dir, match.group(0)),
match.group(1), match.group(2), match.group(3)])
all_dataset_stats = []
for dataset_dir, window, ahead, rolling in datasets:
# get train, test inds, and labels
train_inds, test_inds, labels = __get_train_test_inds_labels(dataset_dir)
train_labels, test_labels = labels[train_inds], labels[test_inds]
exc_non, exc_crash = __get_excluded_number(dataset_dir)
# Record stats
all_dataset_stats.append({
"window": window,
"time ahead": ahead,
"rolling step": rolling,
"train crash count": np.sum(train_labels == 1),
"train noncrash count": np.sum(train_labels == 0),
"train 0:1 ratio": np.sum(train_labels == 0)/np.sum(train_labels == 1),
"test crash count": np.sum(test_labels == 1),
"test noncrash count": np.sum(test_labels == 0),
"test 0:1 ratio": np.sum(test_labels == 0) / np.sum(test_labels == 1),
"dataset total": labels.shape[0],
"train set total": train_labels.shape[0],
"test set total": test_labels.shape[0],
"excluded non-crashed segment total": exc_non,
"excluded crashed segment total": exc_crash
})
# generate and save dataset csv file
pd.DataFrame(all_dataset_stats).to_csv(stats_csv_save_path)
if verbose:
print(f"Statistics for {len(datasets)} datasets saved at {stats_csv_save_path}")
def main():
# Argparser
# noinspection PyTypeChecker
argparser = argparse.ArgumentParser(prog="Summary Argparser",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# argparser.add_argument(
# '--conv_crit', type=str.lower, default=C.VAL_AUC, choices=C.CONV_CRIT,
# help='type of convergence criteria, if early stopping')
argparser.add_argument(
'--merge', action='store_true',
help='whether to merge results and experiment configuration files')
argparser.add_argument(
'--dataset', action='store_true',
help='whether to summarize dataset stats (e.g. total samples, train-test split sizes, crash-noncrash ratios, etc.)')
argparser.add_argument(
'--silent', action='store_true',
help='whether to disable console output')
args = argparser.parse_args()
if args.merge:
merge_results(verbose=not args.silent)
if args.dataset:
generate_dataset_stats(C.DATA_DIR, __find_next_available_filename(C.DATASET_STATS_FORMAT), verbose=not args.silent)
if __name__ == "__main__":
main()
|
[
"os.listdir",
"argparse.ArgumentParser",
"pandas.read_csv",
"re.compile",
"os.path.join",
"numpy.sum",
"pandas.DataFrame"
] |
[((523, 608), 'pandas.read_csv', 'pd.read_csv', (['C.ALL_RES_CSV_PATH'], {'dtype': '{exp_ID_name: int}', 'index_col': 'exp_ID_name'}), '(C.ALL_RES_CSV_PATH, dtype={exp_ID_name: int}, index_col=exp_ID_name\n )\n', (534, 608), True, 'import pandas as pd\n'), ((620, 694), 'pandas.read_csv', 'pd.read_csv', (['C.EXP_ID_LOG'], {'dtype': '{exp_ID_name: int}', 'index_col': 'exp_ID_name'}), '(C.EXP_ID_LOG, dtype={exp_ID_name: int}, index_col=exp_ID_name)\n', (631, 694), True, 'import pandas as pd\n'), ((1370, 1399), 're.compile', 're.compile', (['C.DATASET_PATTERN'], {}), '(C.DATASET_PATTERN)\n', (1380, 1399), False, 'import re\n'), ((1416, 1446), 'os.listdir', 'os.listdir', (['dataset_parent_dir'], {}), '(dataset_parent_dir)\n', (1426, 1446), False, 'import os\n'), ((4121, 4148), 're.compile', 're.compile', (['C.DEBUG_PATTERN'], {}), '(C.DEBUG_PATTERN)\n', (4131, 4148), False, 'import re\n'), ((5479, 5508), 're.compile', 're.compile', (['C.DATASET_PATTERN'], {}), '(C.DATASET_PATTERN)\n', (5489, 5508), False, 'import re\n'), ((5525, 5555), 'os.listdir', 'os.listdir', (['dataset_parent_dir'], {}), '(dataset_parent_dir)\n', (5535, 5555), False, 'import os\n'), ((7607, 7717), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""Summary Argparser"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(prog='Summary Argparser', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (7630, 7717), False, 'import argparse\n'), ((4441, 4480), 'os.path.join', 'os.path.join', (['dataset_dir', 'exclude_path'], {}), '(dataset_dir, exclude_path)\n', (4453, 4480), False, 'import os\n'), ((3253, 3284), 'pandas.DataFrame', 'pd.DataFrame', (['all_dataset_stats'], {}), '(all_dataset_stats)\n', (3265, 3284), True, 'import pandas as pd\n'), ((4800, 4847), 'os.path.join', 'os.path.join', (['dataset_dir', "C.INDS_PATH['train']"], {}), "(dataset_dir, C.INDS_PATH['train'])\n", (4812, 4847), False, 'import os\n'), ((4877, 4923), 'os.path.join', 'os.path.join', (['dataset_dir', "C.INDS_PATH['test']"], {}), "(dataset_dir, C.INDS_PATH['test'])\n", (4889, 4923), False, 'import os\n'), ((4950, 4997), 'os.path.join', 'os.path.join', (['dataset_dir', "C.COL_PATHS['label']"], {}), "(dataset_dir, C.COL_PATHS['label'])\n", (4962, 4997), False, 'import os\n'), ((7362, 7393), 'pandas.DataFrame', 'pd.DataFrame', (['all_dataset_stats'], {}), '(all_dataset_stats)\n', (7374, 7393), True, 'import pandas as pd\n'), ((2330, 2355), 'numpy.sum', 'np.sum', (['(train_labels == 1)'], {}), '(train_labels == 1)\n', (2336, 2355), True, 'import numpy as np\n'), ((2413, 2438), 'numpy.sum', 'np.sum', (['(train_labels == 0)'], {}), '(train_labels == 0)\n', (2419, 2438), True, 'import numpy as np\n'), ((2596, 2620), 'numpy.sum', 'np.sum', (['(test_labels == 1)'], {}), '(test_labels == 1)\n', (2602, 2620), True, 'import numpy as np\n'), ((2677, 2701), 'numpy.sum', 'np.sum', (['(test_labels == 0)'], {}), '(test_labels == 0)\n', (2683, 2701), True, 'import numpy as np\n'), ((6439, 6464), 'numpy.sum', 'np.sum', (['(train_labels == 1)'], {}), '(train_labels == 1)\n', (6445, 6464), True, 'import numpy as np\n'), ((6522, 6547), 'numpy.sum', 'np.sum', (['(train_labels == 0)'], {}), '(train_labels == 0)\n', (6528, 6547), True, 'import numpy as np\n'), ((6705, 6729), 'numpy.sum', 'np.sum', (['(test_labels == 1)'], {}), '(test_labels == 1)\n', (6711, 6729), True, 'import numpy as np\n'), ((6786, 6810), 'numpy.sum', 'np.sum', (['(test_labels == 0)'], {}), '(test_labels == 0)\n', (6792, 6810), True, 'import numpy as np\n'), ((2491, 2516), 'numpy.sum', 'np.sum', (['(train_labels == 0)'], {}), '(train_labels == 0)\n', (2497, 2516), True, 'import numpy as np\n'), ((2517, 2542), 'numpy.sum', 'np.sum', (['(train_labels == 1)'], {}), '(train_labels == 1)\n', (2523, 2542), True, 'import numpy as np\n'), ((2753, 2777), 'numpy.sum', 'np.sum', (['(test_labels == 0)'], {}), '(test_labels == 0)\n', (2759, 2777), True, 'import numpy as np\n'), ((2780, 2804), 'numpy.sum', 'np.sum', (['(test_labels == 1)'], {}), '(test_labels == 1)\n', (2786, 2804), True, 'import numpy as np\n'), ((4237, 4260), 'os.listdir', 'os.listdir', (['dataset_dir'], {}), '(dataset_dir)\n', (4247, 4260), False, 'import os\n'), ((6600, 6625), 'numpy.sum', 'np.sum', (['(train_labels == 0)'], {}), '(train_labels == 0)\n', (6606, 6625), True, 'import numpy as np\n'), ((6626, 6651), 'numpy.sum', 'np.sum', (['(train_labels == 1)'], {}), '(train_labels == 1)\n', (6632, 6651), True, 'import numpy as np\n'), ((6862, 6886), 'numpy.sum', 'np.sum', (['(test_labels == 0)'], {}), '(test_labels == 0)\n', (6868, 6886), True, 'import numpy as np\n'), ((6889, 6913), 'numpy.sum', 'np.sum', (['(test_labels == 1)'], {}), '(test_labels == 1)\n', (6895, 6913), True, 'import numpy as np\n')]
|
"""7 compounds containing only carbon and hydrogen,
and having only two topological symmetry classes each
ethane
benzene
cyclopentane
ethylene
methane
cyclopropane
cyclohexane
"""
import numpy as np
from openeye.oechem import OEPerceiveSymmetry
from simtk import unit
from bayes_implicit_solvent.freesolv import cid_to_smiles
from bayes_implicit_solvent.molecule import Molecule
def sample_path_to_unitted_snapshots(path_to_npy_samples):
xyz = np.load(path_to_npy_samples)
traj = [snapshot * unit.nanometer for snapshot in xyz]
return traj
from glob import glob
from pkg_resources import resource_filename
ll = 'gaussian'
n_conf = 2
path_to_vacuum_samples = resource_filename('bayes_implicit_solvent',
'vacuum_samples/vacuum_samples_*.npy')
paths_to_samples = glob(path_to_vacuum_samples)
def extract_cid_key(path):
i = path.find('mobley_')
j = path.find('.npy')
return path[i:j]
cids = list(map(extract_cid_key, paths_to_samples))
mols = []
n_configuration_samples = n_conf
for path in paths_to_samples:
cid = extract_cid_key(path)
smiles = cid_to_smiles[cid]
vacuum_samples = sample_path_to_unitted_snapshots(path)
thinning = int(len(vacuum_samples) / n_configuration_samples)
mol = Molecule(smiles, vacuum_samples=vacuum_samples[::thinning], ll=ll)
OEPerceiveSymmetry(mol.mol)
atoms = list(mol.mol.GetAtoms())
symmetry_types = np.array([atom.GetSymmetryClass() for atom in atoms])
if (len(set(symmetry_types)) == 2) and (set([a.element.symbol for a in mol.top.atoms()]) == {'C', 'H'}):
print(mol.mol_name)
mols.append(mol)
|
[
"openeye.oechem.OEPerceiveSymmetry",
"pkg_resources.resource_filename",
"bayes_implicit_solvent.molecule.Molecule",
"numpy.load",
"glob.glob"
] |
[((679, 765), 'pkg_resources.resource_filename', 'resource_filename', (['"""bayes_implicit_solvent"""', '"""vacuum_samples/vacuum_samples_*.npy"""'], {}), "('bayes_implicit_solvent',\n 'vacuum_samples/vacuum_samples_*.npy')\n", (696, 765), False, 'from pkg_resources import resource_filename\n'), ((824, 852), 'glob.glob', 'glob', (['path_to_vacuum_samples'], {}), '(path_to_vacuum_samples)\n', (828, 852), False, 'from glob import glob\n'), ((453, 481), 'numpy.load', 'np.load', (['path_to_npy_samples'], {}), '(path_to_npy_samples)\n', (460, 481), True, 'import numpy as np\n'), ((1288, 1354), 'bayes_implicit_solvent.molecule.Molecule', 'Molecule', (['smiles'], {'vacuum_samples': 'vacuum_samples[::thinning]', 'll': 'll'}), '(smiles, vacuum_samples=vacuum_samples[::thinning], ll=ll)\n', (1296, 1354), False, 'from bayes_implicit_solvent.molecule import Molecule\n'), ((1359, 1386), 'openeye.oechem.OEPerceiveSymmetry', 'OEPerceiveSymmetry', (['mol.mol'], {}), '(mol.mol)\n', (1377, 1386), False, 'from openeye.oechem import OEPerceiveSymmetry\n')]
|
from typing import List, Tuple, Union
import numpy as np
import torch
from allrank.click_models.base import ClickModel
from allrank.data.dataset_loading import PADDED_Y_VALUE
def click_on_slates(slates: Union[Tuple[np.ndarray, np.ndarray], Tuple[torch.Tensor, torch.Tensor]],
click_model: ClickModel, include_empty: bool) -> Tuple[List[Union[np.ndarray, torch.Tensor]], List[List[int]]]:
"""
This metod runs a click model on a list of slates and returns new slates with `y` taken from clicks
:param slates: a Tuple of X, y:
X being a list of slates represented by document vectors
y being a list of slates represented by document relevancies
:param click_model: a click model to be applied to every slate
:param include_empty: if True - will return even slates that didn't get any click
:return: Tuple of X, clicks, X representing the same document vectors as input 'X', clicks representing click mask for every slate
"""
X, y = slates
clicks = [MaskedRemainMasked(click_model).click(slate) for slate in zip(X, y)]
X_with_clicks = [[X, slate_clicks] for X, slate_clicks in list(zip(X, clicks)) if
(np.sum(slate_clicks > 0) > 0 or include_empty)]
return_X, clicks = map(list, zip(*X_with_clicks))
return return_X, clicks # type: ignore
class MaskedRemainMasked(ClickModel):
"""
This click model wraps another click model and:
1. ensures inner click model do not get documents that were padded
2. ensures padded documents get '-1' in 'clicked' vector
"""
def __init__(self, inner_click_model: ClickModel):
"""
:param inner_click_model: a click model that is run on the list of non-padded documents
"""
self.inner_click_model = inner_click_model
def click(self, documents: Union[Tuple[np.ndarray, np.ndarray], Tuple[torch.Tensor, torch.Tensor]]) -> np.ndarray:
X, y = documents
padded_values_mask = y == PADDED_Y_VALUE
real_X = X[~padded_values_mask]
real_y = y[~padded_values_mask]
clicks = self.inner_click_model.click((real_X, real_y))
final_clicks = np.zeros_like(y)
final_clicks[padded_values_mask] = PADDED_Y_VALUE
final_clicks[~padded_values_mask] = clicks
return final_clicks
|
[
"numpy.sum",
"numpy.zeros_like"
] |
[((2179, 2195), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (2192, 2195), True, 'import numpy as np\n'), ((1200, 1224), 'numpy.sum', 'np.sum', (['(slate_clicks > 0)'], {}), '(slate_clicks > 0)\n', (1206, 1224), True, 'import numpy as np\n')]
|
"""
nc2pd
~~~~~
A thin python-netCDF4 wrapper to turn netCDF files into pandas data
structures, with a focus on extracting time series from regularly
spatial gridded data (with the ability to interpolate spatially).
Copyright 2015 <NAME>
License: MIT (see LICENSE file)
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import itertools
import numpy as np
import pandas as pd
from scipy import interpolate, ndimage
from netCDF4 import Dataset, num2date
def parse_time_string(string):
# datetools.parse_time_string returns a list of datetime objects
return pd.datetools.parse_time_string(string)[0]
class NetCDFDataset(object):
"""NetCDFDataset"""
def __init__(self, path):
super(NetCDFDataset, self).__init__()
self.path = path
self.rootgrp = Dataset(self.path)
# Determine latitute and longitude variable names
self.latlon_names = self._latlon_names()
# Generate datetime labels for the time variable
# Also sets self.time_name
self.datetimes = self._datetime_labels()
# Get array of latitude and longitude values
lat_name, lon_name = self.latlon_names
self.lon_array = self.rootgrp.variables[lon_name][:]
self.lat_array = self.rootgrp.variables[lat_name][:]
# Additional dimension slices set up internally as needed
self.dim_slices_collapse = {}
self.dim_slices_select = {}
def _latlon_names(self):
"""Determines the lat/lon variable names in the dataset"""
if 'latitude' in self.rootgrp.variables:
lat_name = 'latitude'
lon_name = 'longitude'
elif 'lat' in self.rootgrp.variables:
lat_name = 'lat'
lon_name = 'lon'
elif 'XDim' in self.rootgrp.variables:
lat_name = 'YDim'
lon_name = 'XDim'
else:
raise ValueError('Cannot determine lat and lon variable names')
return (lat_name, lon_name)
def _datetime_labels(self):
"""Return datetime labels for the dataset"""
if ('dfb' in self.rootgrp.variables
and 'hour' in self.rootgrp.variables):
# solargis data has dimensions ('dfb', 'hour', 'latitude', 'longitude')
# we must do some manual processing of the time dimension
# dfb - 1 to account for 00:00 representation of 24:00
# pushing us into the next day
days = num2date(self.rootgrp.variables['dfb'][:] - 1,
'days since 1980-01-01')
dt_from = '{} 00:00'.format(days[0].strftime('%Y-%m-%d'))
dt_to = '{} 23:00'.format(days[-1].strftime('%Y-%m-%d'))
dates = pd.date_range(dt_from, dt_to, freq='H')
# Set an additional slice on hour internally
self.dim_slices_collapse['hour'] = slice(None, None, None) # [::]
self.time_name = 'dfb'
else:
try:
time_name = 'time'
timevar = self.rootgrp.variables[time_name]
except AttributeError:
try:
time_name = 'TIME'
timevar = self.rootgrp.variables[time_name]
except AttributeError:
raise ValueError('Cannot find time variable.')
self.time_name = time_name
try:
timevar_units = timevar.units.decode()
except AttributeError:
timevar_units = timevar.units
dates = num2date(timevar[:], timevar_units, calendar='standard')
labels = pd.Series(range(len(dates)), index=dates)
return labels
def _find_coordinates(self, lat, lon, bounds=False):
"""
Finds the index of given lat/lon pair in the dataset.
Uses binary search to find closest coordinates if the exact ones
don't exist.
Parameters
----------
lat : float
latitude
lon : float
longitude
Returns
-------
x, y : 4-tuple
x and y (lon and lat) coordinate indices
"""
def _find_closest(array, value, bounds):
"""Searches array for value and returns the index of the entry
closest to value."""
if bounds:
pos = np.searchsorted(array, value)
return (pos - 1, pos)
else:
return (np.abs(array - value)).argmin()
if lon in self.lon_array:
x = np.argmax(self.lon_array == lon)
if bounds:
x = (x, x)
else:
x = _find_closest(self.lon_array, lon, bounds)
if lat in self.lat_array:
y = np.argmax(self.lat_array == lat)
if bounds:
y = (y, y)
else:
y = _find_closest(self.lat_array, lat, bounds)
# Return either a single x, y pair or a list of pairs: [(x, y)]
if bounds:
return list(zip(x, y))
else:
return (x, y)
def get_gridpoints(self, latlon_pairs, bounds=False):
"""Take a list of lat-lon pairs and return a list of x-y indices."""
points = [self._find_coordinates(lat, lon, bounds)
for lat, lon in latlon_pairs]
result = [i for i in itertools.chain.from_iterable(points)]
if len(latlon_pairs) == 1 and not bounds:
result = [result]
return result
def get_timerange(self, start=None, end=None, include_end=True):
"""
Take a start and end datetime and return a time index range.
If include_end is True, the returned range is 1 longer so that
the final timestep given in the range is included in slicing.
If the desired end point is not found in the data, the most recent
available end point is used.
"""
if start:
try:
start_idx = self.datetimes[start].ix[0]
except AttributeError: # because it's a single value already
start_idx = self.datetimes[start]
else:
start_idx = self.datetimes.ix[0]
if end:
try:
end_idx = self.datetimes[end].ix[-1]
except AttributeError: # because it's a single value already
end_idx = self.datetimes[end]
except IndexError: # because we've hit a missing datetime entry
# First get closest available end index
end_idx = np.argmin(np.abs(self.datetimes.index.to_pydatetime() -
parse_time_string(end)))
# Now check if this is beyond the desired end date, and if so,
# move back one in the list of existing datetimes, which
# will put us within the desired endpoint (given that the
# desired endpoint didn't exist in the first place!)
if self.datetimes.index[end_idx] > parse_time_string(end):
end_idx = end_idx - 1
else:
end_idx = self.datetimes.ix[-1]
if include_end:
end_idx += 1
return (start_idx, end_idx)
def read_data(self, variable, x_range=None, y_range=None,
time_range=None, fixed_dims={},
friendly_labels=False):
"""
Return a panel of data with the dimensions [time, lat, lon], i.e.
items are time, major_axis is latitude, minor_axis is longitude.
Parameters
----------
variable : str
name of variable
x_range : int or (int, int), default None
range of x grid points to select, if None, entire x range is used
y_range : int or (int, int), default None
range of y grid points to select, if None, entire y range is used
time_range : int or (int, int), default None
range of timesteps to select, if None, entire time range is used
fixed_dims : dict, default {}
map selections to other dimensions that may exist in the data,
e.g. {'level': 0}
friendly_labels : bool, default False
if True, sets the axis labels to datetimes, latitudes and
longitudes, instead of just integer indices
"""
# Helpers
slice_all = slice(None, None, None)
def add_slice(setting, var_slice):
if not setting:
slicer = slice_all
else:
if isinstance(setting, int) or isinstance(setting, np.integer):
slicer = slice(setting, setting + 1)
else: # Assume two or more integers
slicer = slice(*setting)
var_slice.append(slicer)
return slicer
# Start work
var = self.rootgrp.variables[variable]
var_slice = []
dim_pos = 0
# Transposition so that the panel order is always time, lat, lon
transposition = [None, None, None]
for dim in var.dimensions:
# 1. check if it's time, lat or lon name
# and assign appropriate slice if so
if dim == self.time_name:
time_slice = add_slice(time_range, var_slice)
transposition[0] = dim_pos
dim_pos += 1
elif dim == self.latlon_names[0]: # lat --> y
y_slice = add_slice(y_range, var_slice)
transposition[1] = dim_pos
dim_pos += 1
elif dim == self.latlon_names[1]: # lon --> x
x_slice = add_slice(x_range, var_slice)
transposition[2] = dim_pos
dim_pos += 1
# 2. check if it's in self.dim_slices
elif dim in self.dim_slices_collapse:
var_slice.append(self.dim_slices_collapse[dim])
# FIXME after taking var[var_slice], will also need
# to collapse all dim_slices_collapse simensions,
# or else reading e.g. solargis files won't work
raise NotImplementedError('well, that did not work!')
elif dim in self.dim_slices_select:
var_slice.append(self.dim_slices_select[dim])
# 3. check if it's in fixed_dims
elif dim in fixed_dims:
var_slice.append(fixed_dims[dim])
# 4. else, raise a KeyError or something
else:
raise KeyError('Dimension `{}` unknown'.format(dim))
panel = pd.Panel(var[var_slice]).transpose(*transposition)
if friendly_labels:
panel.items = self.datetimes.index[time_slice]
panel.major_axis = self.lat_array[y_slice]
panel.minor_axis = self.lon_array[x_slice]
return panel
def read_timeseries(self, variable, latlon_pairs,
start=None, end=None,
buffer_size=0,
fixed_dims={},
return_metadata=False):
"""
Return a time series for each given lat-lon pair.
Parameters
----------
variable : str
name of variable
latlon_pairs : list of (lat, lon) tuples
list of (lat, lon) tuples
start : str, default None
datetime string of the form 'YYYY-MM-DD hh:mm' or similar
end : str, default None
datetime string, like for start
fixed_dims : dict, default {}
map selections to other dimensions that may exist in the data,
e.g. {'level': 0}
Returns
-------
data : one or two pandas DataFrames
the first DataFrame contains each requested lat-lon pair
as a column
if return_metadata is True, the second DataFrame maps from
the requested latitudes/longitudes to grid points and
their latitudes/longitudes
"""
gridpoints = self.get_gridpoints(latlon_pairs)
timerange = self.get_timerange(start, end)
# Data
dfs = []
for x, y in gridpoints:
if buffer_size:
x_slice = (x - buffer_size, x + 1 + buffer_size)
y_slice = (y - buffer_size, y + 1 + buffer_size)
else:
x_slice = x
y_slice = y
panel = self.read_data(variable,
x_range=x_slice,
y_range=y_slice,
time_range=timerange,
fixed_dims=fixed_dims,
friendly_labels=True)
dfs.append(panel.to_frame().T)
# Metadata
md = pd.DataFrame(latlon_pairs, columns=['lat', 'lon'])
grid_cols = list(zip(*gridpoints))
md['y_gridpoint'] = grid_cols[1]
md['x_gridpoint'] = grid_cols[0]
md['lat_gridpoint'] = [self.lat_array[i] for i in md['y_gridpoint']]
md['lon_gridpoint'] = [self.lon_array[i] for i in md['x_gridpoint']]
data = pd.concat(dfs, axis=1)
if return_metadata:
return (data, md)
else:
return data
def read_boundingbox(self, variable, latlon_pairs,
start=None, end=None,
buffer_size=0,
fixed_dims={}):
"""
Return a time-lat-lon panel encompassing all the given lat-lon pairs,
with a surrounding buffer.
Parameters
----------
variable : str
name of variable
latlon_pairs : list of (lat, lon) tuples
list of (lat, lon) tuples
start : str, default None
datetime string of the form 'YYYY-MM-DD hh:mm' or similar
end : str, default None
datetime string, like for start
buffer_size : int, default 0
Grid points by which to extend the bounding box around the
outermost points. Set to 0 to disable.
fixed_dims : dict, default {}
map selections to other dimensions that may exist in the data,
e.g. {'level': 0}
"""
gridpoints = self.get_gridpoints(latlon_pairs, bounds=True)
timerange = self.get_timerange(start, end)
# Get bounding box
x, y = list(zip(*gridpoints))
x_slice = (min(x) - buffer_size, max(x) + 1 + buffer_size)
y_slice = (min(y) - buffer_size, max(y) + 1 + buffer_size)
panel = self.read_data(variable,
x_range=x_slice,
y_range=y_slice,
time_range=timerange,
fixed_dims=fixed_dims,
friendly_labels=True)
return panel
def read_interpolated_timeseries(self, variable, latlon_pairs,
start=None, end=None,
buffer_size=0,
order=1, **kwargs):
"""
Return an interpolated time series for each given lat-lon pair.
Parameters
----------
variable : str
name of variable
latlon_pairs : list of (lat, lon) tuples
list of (lat, lon) tuples
start : str, default None
datetime string of the form 'YYYY-MM-DD hh:mm' or similar
end : str, default None
datetime string, like for start
buffer_size : int, default 1
Grid points by which to extend the bounding box around the
outermost points. Set to 0 to disable.
order : int, default 1
order of spline to use, 1 is linear
**kwargs
additional keyword args are passed to ndimage.map_coordinates
Returns
-------
data : pandas DataFrame
each requested lat-lon pair as a column
"""
data = self.read_boundingbox(variable, latlon_pairs,
start=start, end=end,
buffer_size=buffer_size)
return spatial_interpolation(data, latlon_pairs, order=1, **kwargs)
def spatial_interpolation(data, latlon_pairs, order=1, **kwargs):
"""
Parameters
----------
data : pandas Panel
with dimensions time (items), lat (major_axis), lon(minor_axis)
latlon : (float, float) tuple
latitude and longitude for which to interpolate
order : int, default 1
order of spline to use, 1 is linear
**kwargs
additional keyword args are passed to ndimage.map_coordinates
"""
# lat, lon to array dimensions y, z
m = {}
for var, dim in [('y', data.major_axis), ('z', data.minor_axis)]:
try:
m[var] = interpolate.interp1d(dim, list(range(len(dim))))
except ValueError: # Raised if there is only one entry
m[var] = lambda x: 0 # 0 is the only index that exists
# x dimension is time, we want ALL timesteps from the data
x = list(range(len(data.items)))
results = []
# do the actual interpolation to y, z array coordinates
for lat, lon in latlon_pairs:
y = np.ones_like(x) * m['y'](lat)
z = np.ones_like(x) * m['z'](lon)
interp = ndimage.map_coordinates(data.as_matrix(), [x, y, z],
order=order, **kwargs)
results.append(pd.Series(interp))
df = pd.concat(results, axis=1)
df.index = data.items
# If latlon_pairs contains only one pair, we ensure it's a tuple
# because we'd try to set two columns otherwise
if len(latlon_pairs) == 1:
latlon_pairs = tuple(latlon_pairs)
df.columns = latlon_pairs
return df
|
[
"pandas.Series",
"numpy.ones_like",
"numpy.abs",
"netCDF4.num2date",
"numpy.searchsorted",
"netCDF4.Dataset",
"numpy.argmax",
"pandas.Panel",
"pandas.datetools.parse_time_string",
"itertools.chain.from_iterable",
"pandas.DataFrame",
"pandas.concat",
"pandas.date_range"
] |
[((17595, 17621), 'pandas.concat', 'pd.concat', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (17604, 17621), True, 'import pandas as pd\n'), ((637, 675), 'pandas.datetools.parse_time_string', 'pd.datetools.parse_time_string', (['string'], {}), '(string)\n', (667, 675), True, 'import pandas as pd\n'), ((858, 876), 'netCDF4.Dataset', 'Dataset', (['self.path'], {}), '(self.path)\n', (865, 876), False, 'from netCDF4 import Dataset, num2date\n'), ((12836, 12886), 'pandas.DataFrame', 'pd.DataFrame', (['latlon_pairs'], {'columns': "['lat', 'lon']"}), "(latlon_pairs, columns=['lat', 'lon'])\n", (12848, 12886), True, 'import pandas as pd\n'), ((13182, 13204), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (13191, 13204), True, 'import pandas as pd\n'), ((2507, 2578), 'netCDF4.num2date', 'num2date', (["(self.rootgrp.variables['dfb'][:] - 1)", '"""days since 1980-01-01"""'], {}), "(self.rootgrp.variables['dfb'][:] - 1, 'days since 1980-01-01')\n", (2515, 2578), False, 'from netCDF4 import Dataset, num2date\n'), ((2766, 2805), 'pandas.date_range', 'pd.date_range', (['dt_from', 'dt_to'], {'freq': '"""H"""'}), "(dt_from, dt_to, freq='H')\n", (2779, 2805), True, 'import pandas as pd\n'), ((3580, 3636), 'netCDF4.num2date', 'num2date', (['timevar[:]', 'timevar_units'], {'calendar': '"""standard"""'}), "(timevar[:], timevar_units, calendar='standard')\n", (3588, 3636), False, 'from netCDF4 import Dataset, num2date\n'), ((4584, 4616), 'numpy.argmax', 'np.argmax', (['(self.lon_array == lon)'], {}), '(self.lon_array == lon)\n', (4593, 4616), True, 'import numpy as np\n'), ((4790, 4822), 'numpy.argmax', 'np.argmax', (['(self.lat_array == lat)'], {}), '(self.lat_array == lat)\n', (4799, 4822), True, 'import numpy as np\n'), ((17335, 17350), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (17347, 17350), True, 'import numpy as np\n'), ((17377, 17392), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (17389, 17392), True, 'import numpy as np\n'), ((17566, 17583), 'pandas.Series', 'pd.Series', (['interp'], {}), '(interp)\n', (17575, 17583), True, 'import pandas as pd\n'), ((4391, 4420), 'numpy.searchsorted', 'np.searchsorted', (['array', 'value'], {}), '(array, value)\n', (4406, 4420), True, 'import numpy as np\n'), ((5384, 5421), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['points'], {}), '(points)\n', (5413, 5421), False, 'import itertools\n'), ((10610, 10634), 'pandas.Panel', 'pd.Panel', (['var[var_slice]'], {}), '(var[var_slice])\n', (10618, 10634), True, 'import pandas as pd\n'), ((4501, 4522), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (4507, 4522), True, 'import numpy as np\n')]
|
#===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import sys
import os
import argparse
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import bench
import daal4py
import numpy as np
from os import environ
import xgboost as xgb
import utils
parser = argparse.ArgumentParser(
description='xgboost gbt + model transform + daal predict benchmark')
parser.add_argument('--colsample-bytree', type=float, default=1,
help='Subsample ratio of columns '
'when constructing each tree')
parser.add_argument('--count-dmatrix', default=False, action='store_true',
help='Count DMatrix creation in time measurements')
parser.add_argument('--enable-experimental-json-serialization', default=True,
choices=('True', 'False'), help='Use JSON to store memory snapshots')
parser.add_argument('--grow-policy', type=str, default='depthwise',
help='Controls a way new nodes are added to the tree')
parser.add_argument('--learning-rate', '--eta', type=float, default=0.3,
help='Step size shrinkage used in update '
'to prevents overfitting')
parser.add_argument('--max-bin', type=int, default=256,
help='Maximum number of discrete bins to '
'bucket continuous features')
parser.add_argument('--max-delta-step', type=float, default=0,
help='Maximum delta step we allow each leaf output to be')
parser.add_argument('--max-depth', type=int, default=6,
help='Maximum depth of a tree')
parser.add_argument('--max-leaves', type=int, default=0,
help='Maximum number of nodes to be added')
parser.add_argument('--min-child-weight', type=float, default=1,
help='Minimum sum of instance weight needed in a child')
parser.add_argument('--min-split-loss', '--gamma', type=float, default=0,
help='Minimum loss reduction required to make'
' partition on a leaf node')
parser.add_argument('--n-estimators', type=int, default=100,
help='Number of gradient boosted trees')
parser.add_argument('--objective', type=str, required=True,
choices=('reg:squarederror', 'binary:logistic',
'multi:softmax', 'multi:softprob'),
help='Control a balance of positive and negative weights')
parser.add_argument('--reg-alpha', type=float, default=0,
help='L1 regularization term on weights')
parser.add_argument('--reg-lambda', type=float, default=1,
help='L2 regularization term on weights')
parser.add_argument('--scale-pos-weight', type=float, default=1,
help='Controls a balance of positive and negative weights')
parser.add_argument('--single-precision-histogram', default=False, action='store_true',
help='Build histograms instead of double precision')
parser.add_argument('--subsample', type=float, default=1,
help='Subsample ratio of the training instances')
parser.add_argument('--tree-method', type=str, required=True,
help='The tree construction algorithm used in XGBoost')
params = bench.parse_args(parser)
X_train, X_test, y_train, y_test = bench.load_data(params)
xgb_params = {
'booster': 'gbtree',
'verbosity': 0,
'learning_rate': params.learning_rate,
'min_split_loss': params.min_split_loss,
'max_depth': params.max_depth,
'min_child_weight': params.min_child_weight,
'max_delta_step': params.max_delta_step,
'subsample': params.subsample,
'sampling_method': 'uniform',
'colsample_bytree': params.colsample_bytree,
'colsample_bylevel': 1,
'colsample_bynode': 1,
'reg_lambda': params.reg_lambda,
'reg_alpha': params.reg_alpha,
'tree_method': params.tree_method,
'scale_pos_weight': params.scale_pos_weight,
'grow_policy': params.grow_policy,
'max_leaves': params.max_leaves,
'max_bin': params.max_bin,
'objective': params.objective,
'seed': params.seed,
'single_precision_histogram': params.single_precision_histogram,
'enable_experimental_json_serialization':
params.enable_experimental_json_serialization
}
if params.threads != -1:
xgb_params.update({'nthread': params.threads})
if 'OMP_NUM_THREADS' in environ.keys():
xgb_params['nthread'] = int(environ['OMP_NUM_THREADS'])
if params.objective.startswith('reg'):
task = 'regression'
metric_name, metric_func = 'rmse', bench.rmse_score
else:
task = 'classification'
metric_name, metric_func = 'accuracy[%]', utils.get_accuracy
if 'cudf' in str(type(y_train)):
params.n_classes = y_train[y_train.columns[0]].nunique()
else:
params.n_classes = len(np.unique(y_train))
if params.n_classes > 2:
xgb_params['num_class'] = params.n_classes
t_creat_train, dtrain = bench.measure_function_time(xgb.DMatrix, X_train,
params=params, label=y_train)
t_creat_test, dtest = bench.measure_function_time(xgb.DMatrix, X_test, params=params)
def fit(dmatrix=None):
if dmatrix is None:
dmatrix = xgb.DMatrix(X_train, y_train)
return xgb.train(xgb_params, dmatrix, params.n_estimators)
def predict():
dmatrix = xgb.DMatrix(X_test, y_test)
return model_xgb.predict(dmatrix)
t_train, model_xgb = bench.measure_function_time(
fit, None if params.count_dmatrix else dtrain, params=params)
train_metric = None
if not X_train.equals(X_test):
y_train_pred = model_xgb.predict(dtrain)
train_metric = metric_func(y_train, y_train_pred)
t_xgb_pred, y_test_pred = bench.measure_function_time(predict, params=params)
test_metric_xgb = metric_func(y_test, y_test_pred)
t_trans, model_daal = bench.measure_function_time(
daal4py.get_gbt_model_from_xgboost, model_xgb, params=params)
if hasattr(params, 'n_classes'):
predict_algo = daal4py.gbt_classification_prediction(
nClasses=params.n_classes, resultsToEvaluate='computeClassLabels', fptype='float')
t_daal_pred, daal_pred = bench.measure_function_time(
predict_algo.compute, X_test, model_daal, params=params)
test_metric_daal = metric_func(y_test, daal_pred.prediction)
else:
predict_algo = daal4py.gbt_regression_prediction()
t_daal_pred, daal_pred = bench.measure_function_time(
predict_algo.compute, X_test, model_daal, params=params)
test_metric_daal = metric_func(y_test, daal_pred.prediction)
utils.print_output(library='modelbuilders',
algorithm=f'xgboost_{task}_and_modelbuilder',
stages=['xgboost_train', 'xgboost_predict', 'daal4py_predict'],
params=params, functions=['xgb_dmatrix', 'xgb_dmatrix',
'xgb_train', 'xgb_predict',
'xgb_to_daal', 'daal_compute'],
times=[t_creat_train, t_train, t_creat_test, t_xgb_pred,
t_trans, t_daal_pred],
accuracy_type=metric_name,
accuracies=[train_metric, test_metric_xgb, test_metric_daal],
data=[X_train, X_test, X_test])
|
[
"os.environ.keys",
"bench.load_data",
"xgboost.DMatrix",
"daal4py.gbt_regression_prediction",
"argparse.ArgumentParser",
"xgboost.train",
"numpy.unique",
"bench.parse_args",
"daal4py.gbt_classification_prediction",
"bench.measure_function_time",
"os.path.abspath",
"utils.print_output"
] |
[((978, 1076), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""xgboost gbt + model transform + daal predict benchmark"""'}), "(description=\n 'xgboost gbt + model transform + daal predict benchmark')\n", (1001, 1076), False, 'import argparse\n'), ((3999, 4023), 'bench.parse_args', 'bench.parse_args', (['parser'], {}), '(parser)\n', (4015, 4023), False, 'import bench\n'), ((4060, 4083), 'bench.load_data', 'bench.load_data', (['params'], {}), '(params)\n', (4075, 4083), False, 'import bench\n'), ((5698, 5777), 'bench.measure_function_time', 'bench.measure_function_time', (['xgb.DMatrix', 'X_train'], {'params': 'params', 'label': 'y_train'}), '(xgb.DMatrix, X_train, params=params, label=y_train)\n', (5725, 5777), False, 'import bench\n'), ((5852, 5915), 'bench.measure_function_time', 'bench.measure_function_time', (['xgb.DMatrix', 'X_test'], {'params': 'params'}), '(xgb.DMatrix, X_test, params=params)\n', (5879, 5915), False, 'import bench\n'), ((6196, 6289), 'bench.measure_function_time', 'bench.measure_function_time', (['fit', '(None if params.count_dmatrix else dtrain)'], {'params': 'params'}), '(fit, None if params.count_dmatrix else dtrain,\n params=params)\n', (6223, 6289), False, 'import bench\n'), ((6468, 6519), 'bench.measure_function_time', 'bench.measure_function_time', (['predict'], {'params': 'params'}), '(predict, params=params)\n', (6495, 6519), False, 'import bench\n'), ((6594, 6687), 'bench.measure_function_time', 'bench.measure_function_time', (['daal4py.get_gbt_model_from_xgboost', 'model_xgb'], {'params': 'params'}), '(daal4py.get_gbt_model_from_xgboost, model_xgb,\n params=params)\n', (6621, 6687), False, 'import bench\n'), ((7310, 7811), 'utils.print_output', 'utils.print_output', ([], {'library': '"""modelbuilders"""', 'algorithm': 'f"""xgboost_{task}_and_modelbuilder"""', 'stages': "['xgboost_train', 'xgboost_predict', 'daal4py_predict']", 'params': 'params', 'functions': "['xgb_dmatrix', 'xgb_dmatrix', 'xgb_train', 'xgb_predict', 'xgb_to_daal',\n 'daal_compute']", 'times': '[t_creat_train, t_train, t_creat_test, t_xgb_pred, t_trans, t_daal_pred]', 'accuracy_type': 'metric_name', 'accuracies': '[train_metric, test_metric_xgb, test_metric_daal]', 'data': '[X_train, X_test, X_test]'}), "(library='modelbuilders', algorithm=\n f'xgboost_{task}_and_modelbuilder', stages=['xgboost_train',\n 'xgboost_predict', 'daal4py_predict'], params=params, functions=[\n 'xgb_dmatrix', 'xgb_dmatrix', 'xgb_train', 'xgb_predict', 'xgb_to_daal',\n 'daal_compute'], times=[t_creat_train, t_train, t_creat_test,\n t_xgb_pred, t_trans, t_daal_pred], accuracy_type=metric_name,\n accuracies=[train_metric, test_metric_xgb, test_metric_daal], data=[\n X_train, X_test, X_test])\n", (7328, 7811), False, 'import utils\n'), ((5135, 5149), 'os.environ.keys', 'environ.keys', ([], {}), '()\n', (5147, 5149), False, 'from os import environ\n'), ((6024, 6075), 'xgboost.train', 'xgb.train', (['xgb_params', 'dmatrix', 'params.n_estimators'], {}), '(xgb_params, dmatrix, params.n_estimators)\n', (6033, 6075), True, 'import xgboost as xgb\n'), ((6107, 6134), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_test', 'y_test'], {}), '(X_test, y_test)\n', (6118, 6134), True, 'import xgboost as xgb\n'), ((6742, 6866), 'daal4py.gbt_classification_prediction', 'daal4py.gbt_classification_prediction', ([], {'nClasses': 'params.n_classes', 'resultsToEvaluate': '"""computeClassLabels"""', 'fptype': '"""float"""'}), "(nClasses=params.n_classes,\n resultsToEvaluate='computeClassLabels', fptype='float')\n", (6779, 6866), False, 'import daal4py\n'), ((6901, 6989), 'bench.measure_function_time', 'bench.measure_function_time', (['predict_algo.compute', 'X_test', 'model_daal'], {'params': 'params'}), '(predict_algo.compute, X_test, model_daal,\n params=params)\n', (6928, 6989), False, 'import bench\n'), ((7085, 7120), 'daal4py.gbt_regression_prediction', 'daal4py.gbt_regression_prediction', ([], {}), '()\n', (7118, 7120), False, 'import daal4py\n'), ((7150, 7238), 'bench.measure_function_time', 'bench.measure_function_time', (['predict_algo.compute', 'X_test', 'model_daal'], {'params': 'params'}), '(predict_algo.compute, X_test, model_daal,\n params=params)\n', (7177, 7238), False, 'import bench\n'), ((5983, 6012), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (5994, 6012), True, 'import xgboost as xgb\n'), ((834, 859), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (849, 859), False, 'import os\n'), ((5573, 5591), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (5582, 5591), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import numpy as np
print("GPU is", "available" if torch.cuda.is_available() else "NOT AVAILABLE")
print(torch.__version__)
# import images dataset
import os
from PIL import Image
#import cv2
BATCH_SIZE = 32
IMG_SIZE = (300, 300)
#dataset_meta: {img_filename:tags}
#tags:[1,0,0,0,1,...0,1], each index represents a tag, can be found in labels_dict_rev
import json
manifest_path = "./data/safebooru-pic-meta/list.json"
label_path = "./safebooru-labels-dict.json"
model_path = "./safebooru-anime_vgg16.pth"
with open(manifest_path, "r") as fp1:
dataset_meta=json.load(fp1)
print("dataset size:{}".format(len(dataset_meta)))
with open(label_path,"r") as fp1:
labels_dict = json.load(fp1)
labels_dict_rev = {int(labels_dict[k]):k for k in labels_dict}
print(labels_dict_rev)
#load dataset
import random
dataset=[]
classes = {}
labelcounter=0
for fn in dataset_meta:
tags=np.array(dataset_meta[fn]).astype(np.float32)
img_bytes = Image.open(fn)
img = torchvision.transforms.functional.resize(img=img_bytes,size=IMG_SIZE)#resize img
img=(np.array(img)/255.0).astype(np.float32)
if(img.shape != (IMG_SIZE[0],IMG_SIZE[1],3)):
#skip some black-white image or non-standard rgb image
continue
dataset.append([img,tags])
print("dataset size:{}".format(len(dataset))) #3670
train_size=int(len(dataset)*0.8)
test_size=len(dataset)-train_size
#remember to shuffle
random.shuffle(dataset)
#doesn't hurt to shuffle again
trainloader = torch.utils.data.DataLoader(dataset[:train_size], shuffle=True, batch_size=BATCH_SIZE)
testloader = torch.utils.data.DataLoader(dataset[train_size:], shuffle=True, batch_size=BATCH_SIZE)
print("loaded data.")
# plot some of the images
import matplotlib.pyplot as plt
import random
import math
import json
import utils
random.seed(2333)
classes = labels_dict #just alias
classes_rev = labels_dict_rev
subset = random.sample(dataset,25)
utils.show_imgs_multi_label(data=[item[0] for item in subset],real_labels=[item[1] for item in subset],classes_rev=classes_rev,dispMax=3)
import torchvision.models as models
from torchsummary import summary
vgg16 = models.vgg16(pretrained=True)
for param in vgg16.parameters():
#print(param)
param.requires_grad = False
vgg16.classifier[-1] = nn.Linear(in_features=4096, out_features=len(classes))
# vgg16.classifier. torch.nn.Softmax
print(vgg16)
#vgg16.cuda() #move it to cuda
#summary(vgg16, (3, 300, 300))
'''
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=50, bias=True)
)
)
'''
#take a look of example input-output of mdel
## compute accuracy
def get_accuracy(logit, target, batch_size):
''' Obtain accuracy for training round '''
# torch.max[0] stores the data, torch.max[1] stores the index (of the max)
tag_len = list(target[0].size())[0]#would be 50 tags in current setting
# print("val:{}".format( torch.sum((logit==target) * (target==True)) ))#if we only want precision use this
corrects = torch.sum((torch.gt(logit,0.5)==torch.gt(target,0.5))) #number of true pos + number of true neg
#corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = 100.0 * corrects/(batch_size*tag_len)
#print("correct guess:{}, total num:{},acc:{}".format(corrects, batch_size*tag_len, accuracy))
return accuracy.item()
for images, labels in trainloader:
b = images.permute(0,3,1,2)#rearrange shape
print(b.shape)
print(labels.shape)
out = vgg16(b)
print("batch size:", images.shape)
print(out.shape)
acc = get_accuracy(out,labels,BATCH_SIZE)
#out[i] is array[5] with probability; labels[i] is ground truth
#use max of out[i] as prediction
#print("{},{}".format(out[0],labels[0]))
break
def freeze_model(model):
for param in model.parameters():
param.requires_grad = False
return
# for images, labels in trainloader:
# pass
# for images, labels in testloader:
# pass
print("ok")
#start training
import torch.optim as optim
learning_rate = 0.01
num_epochs=20
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(vgg16.classifier[-1].parameters(),lr=learning_rate,momentum=0.9)
torch.cuda.empty_cache()
res=[]
torch.cuda.empty_cache()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
vgg16.to(device)
for epoch in range(num_epochs):
train_running_loss = 0.0
train_acc = 0.0
vgg16 = vgg16.train()
for i, (images, labels) in enumerate(trainloader):
images = images.to(device)
labels = labels.to(device)
b = images.permute(0,3,1,2)
out = vgg16(b)
loss = criterion(out, labels)
optimizer.zero_grad()
loss.backward()
del b
del images
## update model params
optimizer.step()
train_running_loss += loss.detach().item() #make a copy without tracking it
train_acc += get_accuracy(out, labels, BATCH_SIZE)#use += instead of =
del labels
del loss
vgg16.eval()
test_acc = 0.0
if(True):
for j, (images, labels) in enumerate(testloader):
images = images.to(device)
labels = labels.to(device)
b = images.permute(0,3,1,2)
outputs = vgg16(b)
test_acc += get_accuracy(outputs, labels, BATCH_SIZE)
del images
del labels
del b
print('Epoch: %d | Loss: %.4f | Train Accuracy: %.2f | Test Accuracy: %.2f' \
%(epoch, train_running_loss / i, train_acc/i, test_acc/j ))#accumulated, so use += before
res.append([learning_rate, epoch, train_running_loss / i, train_acc/i, test_acc/j])
freeze_model(vgg16)#freeze the model
import importlib
importlib.reload(utils)
torch.save(vgg16, model_path)
print("model saved to {}".format(model_path))
subset = random.sample(dataset,25)
imgs = np.asarray([item[0] for item in subset])
#subset=np.append(subset,item[0])
imgs = torch.tensor(imgs)
imgs = imgs.to(device)
b = imgs.permute(0,3,1,2)
print(b.shape)
outputs = vgg16(b)
corrects = torch.gt(outputs,0.5).data
correct_num=0
for i in range(25):
correct_num=0
true_labels=subset[i][1]
for j in range(len(true_labels)):
if(corrects[i][j] == true_labels[j]):
correct_num+=1
print("correct prediction for image[{}]:{}/{}".format(i,correct_num,len(subset[i][1])))
utils.show_imgs_multi_label(data=[item[0] for item in subset],real_labels=[item[1] for item in subset],pred_labels=corrects,classes_rev=classes_rev,dispMax=3)
#utils.show_imgs(data=[item[0] for item in subset],real_labels=[item[1] for item in subset],pred_labels=None,classes_rev=classes_rev)
|
[
"random.sample",
"torchvision.models.vgg16",
"PIL.Image.open",
"random.shuffle",
"torch.gt",
"numpy.asarray",
"random.seed",
"json.load",
"torch.tensor",
"numpy.array",
"torch.cuda.is_available",
"importlib.reload",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.save",
"torchvision.transforms.functional.resize",
"utils.show_imgs_multi_label",
"torch.cuda.empty_cache"
] |
[((1571, 1594), 'random.shuffle', 'random.shuffle', (['dataset'], {}), '(dataset)\n', (1585, 1594), False, 'import random\n'), ((1640, 1731), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset[:train_size]'], {'shuffle': '(True)', 'batch_size': 'BATCH_SIZE'}), '(dataset[:train_size], shuffle=True, batch_size=\n BATCH_SIZE)\n', (1667, 1731), False, 'import torch\n'), ((1740, 1831), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset[train_size:]'], {'shuffle': '(True)', 'batch_size': 'BATCH_SIZE'}), '(dataset[train_size:], shuffle=True, batch_size=\n BATCH_SIZE)\n', (1767, 1831), False, 'import torch\n'), ((1960, 1977), 'random.seed', 'random.seed', (['(2333)'], {}), '(2333)\n', (1971, 1977), False, 'import random\n'), ((2060, 2086), 'random.sample', 'random.sample', (['dataset', '(25)'], {}), '(dataset, 25)\n', (2073, 2086), False, 'import random\n'), ((2086, 2231), 'utils.show_imgs_multi_label', 'utils.show_imgs_multi_label', ([], {'data': '[item[0] for item in subset]', 'real_labels': '[item[1] for item in subset]', 'classes_rev': 'classes_rev', 'dispMax': '(3)'}), '(data=[item[0] for item in subset], real_labels=\n [item[1] for item in subset], classes_rev=classes_rev, dispMax=3)\n', (2113, 2231), False, 'import utils\n'), ((2302, 2331), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2314, 2331), True, 'import torchvision.models as models\n'), ((6405, 6427), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (6425, 6427), True, 'import torch.nn as nn\n'), ((6517, 6541), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6539, 6541), False, 'import torch\n'), ((6551, 6575), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6573, 6575), False, 'import torch\n'), ((8082, 8105), 'importlib.reload', 'importlib.reload', (['utils'], {}), '(utils)\n', (8098, 8105), False, 'import importlib\n'), ((8107, 8136), 'torch.save', 'torch.save', (['vgg16', 'model_path'], {}), '(vgg16, model_path)\n', (8117, 8136), False, 'import torch\n'), ((8192, 8218), 'random.sample', 'random.sample', (['dataset', '(25)'], {}), '(dataset, 25)\n', (8205, 8218), False, 'import random\n'), ((8225, 8265), 'numpy.asarray', 'np.asarray', (['[item[0] for item in subset]'], {}), '([item[0] for item in subset])\n', (8235, 8265), True, 'import numpy as np\n'), ((8311, 8329), 'torch.tensor', 'torch.tensor', (['imgs'], {}), '(imgs)\n', (8323, 8329), False, 'import torch\n'), ((8738, 8910), 'utils.show_imgs_multi_label', 'utils.show_imgs_multi_label', ([], {'data': '[item[0] for item in subset]', 'real_labels': '[item[1] for item in subset]', 'pred_labels': 'corrects', 'classes_rev': 'classes_rev', 'dispMax': '(3)'}), '(data=[item[0] for item in subset], real_labels=\n [item[1] for item in subset], pred_labels=corrects, classes_rev=\n classes_rev, dispMax=3)\n', (8765, 8910), False, 'import utils\n'), ((695, 709), 'json.load', 'json.load', (['fp1'], {}), '(fp1)\n', (704, 709), False, 'import json\n'), ((814, 828), 'json.load', 'json.load', (['fp1'], {}), '(fp1)\n', (823, 828), False, 'import json\n'), ((1095, 1109), 'PIL.Image.open', 'Image.open', (['fn'], {}), '(fn)\n', (1105, 1109), False, 'from PIL import Image\n'), ((1120, 1190), 'torchvision.transforms.functional.resize', 'torchvision.transforms.functional.resize', ([], {'img': 'img_bytes', 'size': 'IMG_SIZE'}), '(img=img_bytes, size=IMG_SIZE)\n', (1160, 1190), False, 'import torchvision\n'), ((8426, 8448), 'torch.gt', 'torch.gt', (['outputs', '(0.5)'], {}), '(outputs, 0.5)\n', (8434, 8448), False, 'import torch\n'), ((181, 206), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (204, 206), False, 'import torch\n'), ((6610, 6635), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6633, 6635), False, 'import torch\n'), ((1028, 1054), 'numpy.array', 'np.array', (['dataset_meta[fn]'], {}), '(dataset_meta[fn])\n', (1036, 1054), True, 'import numpy as np\n'), ((5324, 5344), 'torch.gt', 'torch.gt', (['logit', '(0.5)'], {}), '(logit, 0.5)\n', (5332, 5344), False, 'import torch\n'), ((5345, 5366), 'torch.gt', 'torch.gt', (['target', '(0.5)'], {}), '(target, 0.5)\n', (5353, 5366), False, 'import torch\n'), ((1210, 1223), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1218, 1223), True, 'import numpy as np\n')]
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 <NAME>, <NAME>, <NAME>,
# <NAME>. All rights reserved.
# Copyright (C) 2011-2014 <NAME>
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import casadi as c
import numpy
import numpy as n
import unittest
from types import *
from helpers import *
scipy_available = True
try:
import scipy.special
from scipy.linalg import expm
except:
scipy_available = False
class Simulatortests(casadiTestCase):
def setUp(self):
# Reference solution is q0 e^((t^3-t0^3)/(3 p))
t=SX.sym('t')
q=SX.sym('q')
p=SX.sym('p')
f={'t':t, 'x':q, 'p':p, 'ode':q/p*t**2}
opts = {}
opts['reltol'] = 1e-15
opts['abstol'] = 1e-15
opts['fsens_err_con'] = True
#opts['verbose'] = True
opts['t0'] = 0
opts['tf'] = 2.3
integrator = casadi.integrator('integrator', 'cvodes', f, opts)
q0 = MX.sym('q0')
par = MX.sym('p')
qend = integrator.call({'x0':q0,'p':par})['xf']
qe=Function('qe', [q0,par],[qend])
self.dae = f
self.integrator = integrator
self.qe=qe
self.qend=qend
self.q0=q0
self.par=par
self.f = f
self.num={'tend':2.3,'q0':7.1,'p':2}
pass
def test_sim_full(self):
self.message('Simulator inputs')
num = self.num
N = 4
tc = n.linspace(0,num['tend'], N)
t=SX.sym('t')
q=SX.sym('q')
p=SX.sym('p')
f={'t':t, 'x':q, 'p':p, 'ode':q/p*t**2}
opts = {}
opts['reltol'] = 1e-15
opts['abstol'] = 1e-15
opts['fsens_err_con'] = True
#opts['verbose'] = True
opts['grid'] = tc
opts['output_t0'] = True
integrator = casadi.integrator('integrator', 'cvodes', f, opts)
solution = Function('solution', {'x0':q, 'p':p, 'xf':horzcat(*[q*exp(t**3/(3*p)) for t in tc])},
casadi.integrator_in(), casadi.integrator_out())
f_in = {}
f_in['x0']=0.3
f_in['p']=0.7
self.checkfunction(integrator,solution,inputs=f_in,adj=False,jacobian=False,sens_der=False,evals=False,digits=6)
def test_simulator_time_offset(self):
self.message('CVodes integration: simulator time offset')
num=self.num
t = n.linspace(0.7,num['tend'],100)
opts = {}
opts['reltol'] = 1e-15
opts['abstol'] = 1e-15
opts['fsens_err_con'] = True
opts['grid'] = t
opts['output_t0'] = True
integrator = casadi.integrator('integrator', 'cvodes', self.dae, opts)
integrator_in = [0]*integrator.n_in();integrator_in[0]=[num['q0']]
integrator_in[1]=[num['p']]
integrator_out = integrator.call(integrator_in)
tend=num['tend']
q0=num['q0']
p=num['p']
self.assertAlmostEqual(integrator_out[0][0,-1],q0*exp((tend**3-0.7**3)/(3*p)),9,'Evaluation output mismatch')
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.linspace"
] |
[((3590, 3605), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3603, 3605), False, 'import unittest\n'), ((2125, 2154), 'numpy.linspace', 'n.linspace', (['(0)', "num['tend']", 'N'], {}), "(0, num['tend'], N)\n", (2135, 2154), True, 'import numpy as n\n'), ((2974, 3007), 'numpy.linspace', 'n.linspace', (['(0.7)', "num['tend']", '(100)'], {}), "(0.7, num['tend'], 100)\n", (2984, 3007), True, 'import numpy as n\n')]
|
"""
implement bayesian analysis of two diff pops, X1 and X2, called here x and y
"""
from math import sqrt, exp, log
import numpy as np
import matplotlib.pyplot as plt
from SciInf_utilities import *
import sys
#--------------------------------------
print("\n implement bayesian analysis of two diff population means")
print(" using gaussian approx to distributions, i.e. large sample case")
print(" work with logp \n")
# main
x = []
y = []
if(len(sys.argv) > 2):
file1 = sys.argv[1]
file2 = sys.argv[2]
else:
file1 = input('first data file with one value per line> ')
file2 = input('second data file with one value per line> ')
print('\n input file 1: ',file1)
print(' input file 2: ',file2,'\n')
n_x = read_x(x,file1)
n_y = read_x(y,file2)
av_x = average_x(x)
av_y = average_x(y)
av_xx = average_xy(x,x)
av_yy = average_xy(y,y)
var_x = av_xx - av_x**2
var_y = av_yy - av_y**2
sd_x = sqrt(var_x)
sd_y = sqrt(var_y)
#
# sigma's of the posterior pdf for means
#
sigma_x = sqrt(var_x/(n_x - 1.))
sigma_y = sqrt(var_y/(n_y - 1.))
#
# 'joint' distbn. sigma
#
sigma_xy = sqrt(var_x/(n_x - 1.) + var_y/(n_y - 1.))
s_ratio = sd_x/sd_y
dav = av_y - av_x
print('\n===========================================================')
print('sample (data) summary')
print('===========================================================')
print(' Av X1 {:12.5f} Av X2 {:12.5f} Var of X1 {:12.5f} Var of X2 {:12.5f} '.format(av_x,av_y,var_x,var_y))
print(' Av X2 - Av X1 {:12.5f} '.format(dav))
print(' sigma of X1 data {:12.5f} sigma of X2 data {:12.5f} '.format(sd_x,sd_y))
print(' sigma of <X1> {:12.5f} sigma of <X2> {:12.5f} sigma of <X2-X1> {:12.5f} '.format(sigma_x,sigma_y,sigma_xy))
print(' st.dev ratio data (s1/s2): {:12.5} '.format(s_ratio))
print('===========================================================\n')
#
# generate posterior pdf and cdf for diff in means
#
#npoint = 301
xrange = 4. # sigma range for x-axis
dav_min = dav - xrange*sigma_xy
dav_incr = 2*xrange*sigma_xy/(NPOINT - 1)
dav_axis = np.zeros(NPOINT)
dav_pdf = np.zeros(NPOINT)
for i in range(NPOINT):
dav_axis[i] = dav_min + i*dav_incr
dav_pdf[i] = exp(-1.*(dav_axis[i] - dav)**2/2./sigma_xy**2)
pdf_max = max(dav_pdf)
dav_pdf = dav_pdf/pdf_max
dav_cdf = pdf_to_cdf(dav_axis,dav_pdf)
write_pdf_cdf(dav_axis,dav_pdf,dav_cdf,title='Diff in means pdf cdf',filename='dMean_pdf_cdf.dat')
#
summarize(dav_axis,dav_pdf,dav_cdf,title='difference (set 2 - set 1) of population means')
#
# calculate p(dMean) <, > 0
#
i = 0
dav_val = dav_axis[i]
while((dav_val < 0.) and (i < len(dav_axis))):
dav_val = dav_axis[i]
i += 1
if(i >= len(dav_axis)):
print('Could not find cdf value for dMean = 0.')
else:
p_dmean_neg = dav_cdf[i]
p_dmean_pos = 1. - p_dmean_neg
print('p(dMean) < 0., >0.: %10.3f %10.3f' % (p_dmean_neg,p_dmean_pos))
#
# plot original data
#
data_all = [x, y]
if(MAKEPLOT):
plt.figure(1)
plt.subplot(211)
plt.boxplot(data_all,notch=0,sym='b+',vert=0,showmeans=True)
plt.yticks([1,2],['X 1','X 2'],rotation=0,fontsize=12)
#plt.title('SciInf difference in means')
#plt.xlim(xmin=0.3,xmax=1.)
#plt.show()
plt.subplot(212)
plt.plot(dav_axis,dav_pdf,'g-')
plt.plot(dav_axis,dav_cdf,'r-')
#plt.title('posterior pdf,cdf for diff. in means')
plt.ylim((0.,1.2))
plt.xlabel('Difference in means')
plt.ylabel('p(dmu)')
plt.grid(True)
plt.show()
#
# generate posterior pdf's for st.dev
#
xrange = 4. # range for x-axis
sd_min = min(sd_x/xrange,sd_y/xrange)
sd_max = max(sd_x*xrange,sd_y*xrange)
sd_incr = (sd_max - sd_min)/(NPOINT - 1)
sd_axis = np.zeros(NPOINT)
log_sd_x_pdf = np.zeros(NPOINT)
log_sd_y_pdf = np.zeros(NPOINT)
for i in range(NPOINT):
sd_i = sd_min + i*sd_incr
var_i = sd_i*sd_i
sd_axis[i] = sd_i
log_sd_x_pdf[i] = -0.5*n_x*var_x/var_i - n_x*log(sd_i)
log_sd_y_pdf[i] = -0.5*n_y*var_y/var_i - n_y*log(sd_i)
pdf_max = max(log_sd_x_pdf)
log_sd_x_pdf = log_sd_x_pdf - pdf_max
sd_x_pdf = np.exp(log_sd_x_pdf)
sd_x_cdf = pdf_to_cdf(sd_axis,sd_x_pdf)
pdf_max = max(log_sd_y_pdf)
log_sd_y_pdf = log_sd_y_pdf - pdf_max
sd_y_pdf = np.exp(log_sd_y_pdf)
sd_y_cdf = pdf_to_cdf(sd_axis,sd_y_pdf)
#
summarize(sd_axis,sd_x_pdf,sd_x_cdf,title='set 1 std. deviation')
summarize(sd_axis,sd_y_pdf,sd_y_cdf,title='set 2 std. deviation')
#
#
# plot posterior pdf, cdf of st. dev
#
if(MAKEPLOT):
plt.figure(2)
plt.plot(sd_axis,sd_x_pdf,'g-')
plt.plot(sd_axis,sd_y_pdf,'b-')
plt.title('posterior pdf for st. devs')
plt.xlabel('st.dev')
plt.ylabel('p(st.dev)')
plt.grid(True)
plt.show()
#
# calculate pdf for F = ratio of (sample variance/st.dev^2)
# using marginalization integral over sd_x
#
xrange = 5. # range for x-axis
f_min = 0.25/xrange
f_max = xrange
f_incr = (f_max - f_min)/(NPOINT - 1)
f_axis = np.zeros(NPOINT)
f_pdf = np.zeros(NPOINT)
f_dp = np.zeros(NPOINT)
f_exp = (n_y - 3.)/2. # gives standard f-distribution
#f_exp = (n_y - 3.)/2. + 1. # gives 'symmetric' f-distribution
s_exp = - n_x - n_y + 1
for i in range(NPOINT):
f_i = f_min + i*f_incr
f_axis[i] = f_i
for j in range(NPOINT):
sd_x = sd_axis[j]
f_dp[j] = f_i**f_exp * exp(-0.5*var_x*(n_x + n_y*f_i)/sd_x**2) * sd_x**s_exp
for j in range(1,NPOINT):
f_pdf[i] += 0.5*(f_dp[j] + f_dp[j-1])/(sd_axis[j] - sd_axis[j-1])
pdf_max = max(f_pdf)
f_pdf = f_pdf/pdf_max
f_cdf = pdf_to_cdf(f_axis,f_pdf)
#summarize(f_axis,f_pdf,f_cdf,title='F = (V2/V1)*(sigma1/sigma2)^2')
#
# convert from F to ratio of population std. devs
#
for i in range(NPOINT):
f_axis[i] = sqrt(f_axis[i]*var_x/var_y)
summarize(f_axis,f_pdf,f_cdf,title='sigma1/sigma2')
write_pdf_cdf(f_axis,f_pdf,f_cdf,title='sigma1/sigma2 pdf cdf',filename='sigma_ratio_pdf_cdf.dat')
#
if(MAKEPLOT):
plt.figure(3)
plt.plot(f_axis,f_pdf,'g-')
plt.plot(f_axis,f_cdf,'r-')
plt.title('posterior pdf for f = ratio of st. devs')
plt.xlabel('f')
plt.ylabel('p(f)')
plt.grid(True)
plt.show()
|
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"math.log",
"numpy.exp",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"math.exp",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((894, 905), 'math.sqrt', 'sqrt', (['var_x'], {}), '(var_x)\n', (898, 905), False, 'from math import sqrt, exp, log\n'), ((913, 924), 'math.sqrt', 'sqrt', (['var_y'], {}), '(var_y)\n', (917, 924), False, 'from math import sqrt, exp, log\n'), ((980, 1005), 'math.sqrt', 'sqrt', (['(var_x / (n_x - 1.0))'], {}), '(var_x / (n_x - 1.0))\n', (984, 1005), False, 'from math import sqrt, exp, log\n'), ((1013, 1038), 'math.sqrt', 'sqrt', (['(var_y / (n_y - 1.0))'], {}), '(var_y / (n_y - 1.0))\n', (1017, 1038), False, 'from math import sqrt, exp, log\n'), ((1075, 1122), 'math.sqrt', 'sqrt', (['(var_x / (n_x - 1.0) + var_y / (n_y - 1.0))'], {}), '(var_x / (n_x - 1.0) + var_y / (n_y - 1.0))\n', (1079, 1122), False, 'from math import sqrt, exp, log\n'), ((2002, 2018), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (2010, 2018), True, 'import numpy as np\n'), ((2029, 2045), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (2037, 2045), True, 'import numpy as np\n'), ((3553, 3569), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (3561, 3569), True, 'import numpy as np\n'), ((3585, 3601), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (3593, 3601), True, 'import numpy as np\n'), ((3617, 3633), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (3625, 3633), True, 'import numpy as np\n'), ((3917, 3937), 'numpy.exp', 'np.exp', (['log_sd_x_pdf'], {}), '(log_sd_x_pdf)\n', (3923, 3937), True, 'import numpy as np\n'), ((4055, 4075), 'numpy.exp', 'np.exp', (['log_sd_y_pdf'], {}), '(log_sd_y_pdf)\n', (4061, 4075), True, 'import numpy as np\n'), ((4732, 4748), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (4740, 4748), True, 'import numpy as np\n'), ((4757, 4773), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (4765, 4773), True, 'import numpy as np\n'), ((4781, 4797), 'numpy.zeros', 'np.zeros', (['NPOINT'], {}), '(NPOINT)\n', (4789, 4797), True, 'import numpy as np\n'), ((2122, 2180), 'math.exp', 'exp', (['(-1.0 * (dav_axis[i] - dav) ** 2 / 2.0 / sigma_xy ** 2)'], {}), '(-1.0 * (dav_axis[i] - dav) ** 2 / 2.0 / sigma_xy ** 2)\n', (2125, 2180), False, 'from math import sqrt, exp, log\n'), ((2863, 2876), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2873, 2876), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2895), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2890, 2895), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2962), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['data_all'], {'notch': '(0)', 'sym': '"""b+"""', 'vert': '(0)', 'showmeans': '(True)'}), "(data_all, notch=0, sym='b+', vert=0, showmeans=True)\n", (2909, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2961, 3020), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[1, 2]', "['X 1', 'X 2']"], {'rotation': '(0)', 'fontsize': '(12)'}), "([1, 2], ['X 1', 'X 2'], rotation=0, fontsize=12)\n", (2971, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3121), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (3116, 3121), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3157), 'matplotlib.pyplot.plot', 'plt.plot', (['dav_axis', 'dav_pdf', '"""g-"""'], {}), "(dav_axis, dav_pdf, 'g-')\n", (3132, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3191), 'matplotlib.pyplot.plot', 'plt.plot', (['dav_axis', 'dav_cdf', '"""r-"""'], {}), "(dav_axis, dav_cdf, 'r-')\n", (3166, 3191), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3265), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0, 1.2)'], {}), '((0.0, 1.2))\n', (3253, 3265), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Difference in means"""'], {}), "('Difference in means')\n", (3276, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3322), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(dmu)"""'], {}), "('p(dmu)')\n", (3312, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3325, 3339), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3333, 3339), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3350, 3352), True, 'import matplotlib.pyplot as plt\n'), ((4309, 4322), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4319, 4322), True, 'import matplotlib.pyplot as plt\n'), ((4325, 4358), 'matplotlib.pyplot.plot', 'plt.plot', (['sd_axis', 'sd_x_pdf', '"""g-"""'], {}), "(sd_axis, sd_x_pdf, 'g-')\n", (4333, 4358), True, 'import matplotlib.pyplot as plt\n'), ((4359, 4392), 'matplotlib.pyplot.plot', 'plt.plot', (['sd_axis', 'sd_y_pdf', '"""b-"""'], {}), "(sd_axis, sd_y_pdf, 'b-')\n", (4367, 4392), True, 'import matplotlib.pyplot as plt\n'), ((4393, 4432), 'matplotlib.pyplot.title', 'plt.title', (['"""posterior pdf for st. devs"""'], {}), "('posterior pdf for st. devs')\n", (4402, 4432), True, 'import matplotlib.pyplot as plt\n'), ((4435, 4455), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""st.dev"""'], {}), "('st.dev')\n", (4445, 4455), True, 'import matplotlib.pyplot as plt\n'), ((4458, 4481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(st.dev)"""'], {}), "('p(st.dev)')\n", (4468, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4484, 4498), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4492, 4498), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4511), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4509, 4511), True, 'import matplotlib.pyplot as plt\n'), ((5470, 5501), 'math.sqrt', 'sqrt', (['(f_axis[i] * var_x / var_y)'], {}), '(f_axis[i] * var_x / var_y)\n', (5474, 5501), False, 'from math import sqrt, exp, log\n'), ((5667, 5680), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (5677, 5680), True, 'import matplotlib.pyplot as plt\n'), ((5683, 5712), 'matplotlib.pyplot.plot', 'plt.plot', (['f_axis', 'f_pdf', '"""g-"""'], {}), "(f_axis, f_pdf, 'g-')\n", (5691, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5713, 5742), 'matplotlib.pyplot.plot', 'plt.plot', (['f_axis', 'f_cdf', '"""r-"""'], {}), "(f_axis, f_cdf, 'r-')\n", (5721, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5795), 'matplotlib.pyplot.title', 'plt.title', (['"""posterior pdf for f = ratio of st. devs"""'], {}), "('posterior pdf for f = ratio of st. devs')\n", (5752, 5795), True, 'import matplotlib.pyplot as plt\n'), ((5798, 5813), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f"""'], {}), "('f')\n", (5808, 5813), True, 'import matplotlib.pyplot as plt\n'), ((5816, 5834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(f)"""'], {}), "('p(f)')\n", (5826, 5834), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5851), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5845, 5851), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5862, 5864), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3782), 'math.log', 'log', (['sd_i'], {}), '(sd_i)\n', (3776, 3782), False, 'from math import sqrt, exp, log\n'), ((3830, 3839), 'math.log', 'log', (['sd_i'], {}), '(sd_i)\n', (3833, 3839), False, 'from math import sqrt, exp, log\n'), ((5081, 5130), 'math.exp', 'exp', (['(-0.5 * var_x * (n_x + n_y * f_i) / sd_x ** 2)'], {}), '(-0.5 * var_x * (n_x + n_y * f_i) / sd_x ** 2)\n', (5084, 5130), False, 'from math import sqrt, exp, log\n')]
|
"""Sample script of word embedding model.
This code implements skip-gram model and continuous-bow model.
Use ../ptb/download.py to download 'ptb.train.txt'.
"""
import argparse
import collections
import numpy as np
import six
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.initializers as I
import chainer.links as L
import chainer.optimizers as O
from chainer import reporter
from chainer import training
from chainer.training import extensions
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--unit', '-u', default=100, type=int,
help='number of units')
parser.add_argument('--window', '-w', default=5, type=int,
help='window size')
parser.add_argument('--batchsize', '-b', type=int, default=1000,
help='learning minibatch size')
parser.add_argument('--epoch', '-e', default=20, type=int,
help='number of epochs to learn')
parser.add_argument('--model', '-m', choices=['skipgram', 'cbow'],
default='skipgram',
help='model type ("skipgram", "cbow")')
parser.add_argument('--negative-size', default=5, type=int,
help='number of negative samples')
parser.add_argument('--out-type', '-o', choices=['hsm', 'ns', 'original'],
default='hsm',
help='output model type ("hsm": hierarchical softmax, '
'"ns": negative sampling, "original": no approximation)')
parser.add_argument('--out', default='result',
help='Directory to output the result')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
cuda.check_cuda_available()
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('Window: {}'.format(args.window))
print('Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('Training model: {}'.format(args.model))
print('Output type: {}'.format(args.out_type))
print('')
class ContinuousBoW(chainer.Chain):
def __init__(self, n_vocab, n_units, loss_func):
super(ContinuousBoW, self).__init__(
embed=F.EmbedID(
n_vocab, n_units, initialW=I.Uniform(1. / n_units)),
loss_func=loss_func,
)
def __call__(self, x, context):
e = self.embed(context)
h = F.sum(e, axis=1) * (1. / context.data.shape[1])
loss = self.loss_func(h, x)
reporter.report({'loss': loss}, self)
return loss
class SkipGram(chainer.Chain):
def __init__(self, n_vocab, n_units, loss_func):
super(SkipGram, self).__init__(
embed=L.EmbedID(
n_vocab, n_units, initialW=I.Uniform(1. / n_units)),
loss_func=loss_func,
)
def __call__(self, x, context):
e = self.embed(context)
shape = e.data.shape
x = F.broadcast_to(x[:, None], (shape[0], shape[1]))
e = F.reshape(e, (shape[0] * shape[1], shape[2]))
x = F.reshape(x, (shape[0] * shape[1],))
loss = self.loss_func(e, x)
reporter.report({'loss': loss}, self)
return loss
class SoftmaxCrossEntropyLoss(chainer.Chain):
def __init__(self, n_in, n_out):
super(SoftmaxCrossEntropyLoss, self).__init__(
out=L.Linear(n_in, n_out, initialW=0),
)
def __call__(self, x, t):
return F.softmax_cross_entropy(self.out(x), t)
class WindowIterator(chainer.dataset.Iterator):
def __init__(self, dataset, window, batch_size, repeat=True):
self.dataset = np.array(dataset, np.int32)
self.window = window
self.batch_size = batch_size
self._repeat = repeat
self.order = np.random.permutation(
len(dataset) - window * 2).astype(np.int32)
self.order += window
self.current_position = 0
self.epoch = 0
self.is_new_epoch = False
def __next__(self):
if not self._repeat and self.epoch > 0:
raise StopIteration
i = self.current_position
i_end = i + self.batch_size
position = self.order[i: i_end]
w = np.random.randint(self.window - 1) + 1
offset = np.concatenate([np.arange(-w, 0), np.arange(1, w + 1)])
pos = position[:, None] + offset[None, :]
context = self.dataset.take(pos)
center = self.dataset.take(position)
if i_end >= len(self.order):
np.random.shuffle(self.order)
self.epoch += 1
self.is_new_epoch = True
self.current_position = 0
else:
self.is_new_epoch = False
self.current_position = i_end
return center, context
@property
def epoch_detail(self):
return self.epoch + float(self.current_position) / len(self.order)
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
if self._order is not None:
serializer('_order', self._order)
def convert(batch, device):
center, context = batch
if device >= 0:
center = cuda.to_gpu(center)
context = cuda.to_gpu(context)
return center, context
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
train, val, _ = chainer.datasets.get_ptb_words()
counts = collections.Counter(train)
counts.update(collections.Counter(val))
n_vocab = max(train) + 1
if args.test:
train = train[:100]
val = val[:100]
vocab = chainer.datasets.get_ptb_words_vocabulary()
index2word = {wid: word for word, wid in six.iteritems(vocab)}
print('n_vocab: %d' % n_vocab)
print('data length: %d' % len(train))
if args.out_type == 'hsm':
HSM = L.BinaryHierarchicalSoftmax
tree = HSM.create_huffman_tree(counts)
loss_func = HSM(args.unit, tree)
loss_func.W.data[...] = 0
elif args.out_type == 'ns':
cs = [counts[w] for w in range(len(counts))]
loss_func = L.NegativeSampling(args.unit, cs, args.negative_size)
loss_func.W.data[...] = 0
elif args.out_type == 'original':
loss_func = SoftmaxCrossEntropyLoss(args.unit, n_vocab)
else:
raise Exception('Unknown output type: {}'.format(args.out_type))
if args.model == 'skipgram':
model = SkipGram(n_vocab, args.unit, loss_func)
elif args.model == 'cbow':
model = ContinuousBoW(n_vocab, args.unit, loss_func)
else:
raise Exception('Unknown model type: {}'.format(args.model))
if args.gpu >= 0:
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
train_iter = WindowIterator(train, args.window, args.batchsize)
val_iter = WindowIterator(val, args.window, args.batchsize, repeat=False)
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(
val_iter, model, converter=convert, device=args.gpu))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss']))
trainer.extend(extensions.ProgressBar())
trainer.run()
with open('word2vec.model', 'w') as f:
f.write('%d %d\n' % (len(index2word), args.unit))
w = cuda.to_cpu(model.embed.W.data)
for i, wi in enumerate(w):
v = ' '.join(map(str, wi))
f.write('%s %s\n' % (index2word[i], v))
|
[
"chainer.training.extensions.PrintReport",
"chainer.datasets.get_ptb_words",
"chainer.training.StandardUpdater",
"numpy.array",
"numpy.arange",
"argparse.ArgumentParser",
"chainer.training.Trainer",
"chainer.training.extensions.Evaluator",
"chainer.cuda.to_cpu",
"chainer.links.Linear",
"chainer.reporter.report",
"chainer.cuda.to_gpu",
"chainer.cuda.check_cuda_available",
"chainer.training.extensions.ProgressBar",
"chainer.functions.sum",
"chainer.functions.reshape",
"six.iteritems",
"chainer.training.extensions.LogReport",
"chainer.cuda.get_device",
"chainer.optimizers.Adam",
"chainer.initializers.Uniform",
"collections.Counter",
"numpy.random.randint",
"chainer.links.NegativeSampling",
"chainer.functions.broadcast_to",
"chainer.datasets.get_ptb_words_vocabulary",
"numpy.random.shuffle"
] |
[((497, 522), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (520, 522), False, 'import argparse\n'), ((5708, 5740), 'chainer.datasets.get_ptb_words', 'chainer.datasets.get_ptb_words', ([], {}), '()\n', (5738, 5740), False, 'import chainer\n'), ((5750, 5776), 'collections.Counter', 'collections.Counter', (['train'], {}), '(train)\n', (5769, 5776), False, 'import collections\n'), ((5910, 5953), 'chainer.datasets.get_ptb_words_vocabulary', 'chainer.datasets.get_ptb_words_vocabulary', ([], {}), '()\n', (5951, 5953), False, 'import chainer\n'), ((6898, 6906), 'chainer.optimizers.Adam', 'O.Adam', ([], {}), '()\n', (6904, 6906), True, 'import chainer.optimizers as O\n'), ((7079, 7167), 'chainer.training.StandardUpdater', 'training.StandardUpdater', (['train_iter', 'optimizer'], {'converter': 'convert', 'device': 'args.gpu'}), '(train_iter, optimizer, converter=convert, device=\n args.gpu)\n', (7103, 7167), False, 'from chainer import training\n'), ((7178, 7240), 'chainer.training.Trainer', 'training.Trainer', (['updater', "(args.epoch, 'epoch')"], {'out': 'args.out'}), "(updater, (args.epoch, 'epoch'), out=args.out)\n", (7194, 7240), False, 'from chainer import training\n'), ((1921, 1948), 'chainer.cuda.check_cuda_available', 'cuda.check_cuda_available', ([], {}), '()\n', (1946, 1948), False, 'from chainer import cuda\n'), ((5791, 5815), 'collections.Counter', 'collections.Counter', (['val'], {}), '(val)\n', (5810, 5815), False, 'import collections\n'), ((7257, 7330), 'chainer.training.extensions.Evaluator', 'extensions.Evaluator', (['val_iter', 'model'], {'converter': 'convert', 'device': 'args.gpu'}), '(val_iter, model, converter=convert, device=args.gpu)\n', (7277, 7330), False, 'from chainer.training import extensions\n'), ((7352, 7374), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {}), '()\n', (7372, 7374), False, 'from chainer.training import extensions\n'), ((7391, 7461), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['epoch', 'main/loss', 'validation/main/loss']"], {}), "(['epoch', 'main/loss', 'validation/main/loss'])\n", (7413, 7461), False, 'from chainer.training import extensions\n'), ((7483, 7507), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {}), '()\n', (7505, 7507), False, 'from chainer.training import extensions\n'), ((7625, 7656), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['model.embed.W.data'], {}), '(model.embed.W.data)\n', (7636, 7656), False, 'from chainer import cuda\n'), ((2708, 2745), 'chainer.reporter.report', 'reporter.report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (2723, 2745), False, 'from chainer import reporter\n'), ((3144, 3192), 'chainer.functions.broadcast_to', 'F.broadcast_to', (['x[:, None]', '(shape[0], shape[1])'], {}), '(x[:, None], (shape[0], shape[1]))\n', (3158, 3192), True, 'import chainer.functions as F\n'), ((3205, 3250), 'chainer.functions.reshape', 'F.reshape', (['e', '(shape[0] * shape[1], shape[2])'], {}), '(e, (shape[0] * shape[1], shape[2]))\n', (3214, 3250), True, 'import chainer.functions as F\n'), ((3263, 3299), 'chainer.functions.reshape', 'F.reshape', (['x', '(shape[0] * shape[1],)'], {}), '(x, (shape[0] * shape[1],))\n', (3272, 3299), True, 'import chainer.functions as F\n'), ((3344, 3381), 'chainer.reporter.report', 'reporter.report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (3359, 3381), False, 'from chainer import reporter\n'), ((3830, 3857), 'numpy.array', 'np.array', (['dataset', 'np.int32'], {}), '(dataset, np.int32)\n', (3838, 3857), True, 'import numpy as np\n'), ((5549, 5568), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['center'], {}), '(center)\n', (5560, 5568), False, 'from chainer import cuda\n'), ((5587, 5607), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['context'], {}), '(context)\n', (5598, 5607), False, 'from chainer import cuda\n'), ((5995, 6015), 'six.iteritems', 'six.iteritems', (['vocab'], {}), '(vocab)\n', (6008, 6015), False, 'import six\n'), ((6356, 6409), 'chainer.links.NegativeSampling', 'L.NegativeSampling', (['args.unit', 'cs', 'args.negative_size'], {}), '(args.unit, cs, args.negative_size)\n', (6374, 6409), True, 'import chainer.links as L\n'), ((1877, 1910), 'chainer.cuda.get_device', 'chainer.cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (1900, 1910), False, 'import chainer\n'), ((2616, 2632), 'chainer.functions.sum', 'F.sum', (['e'], {'axis': '(1)'}), '(e, axis=1)\n', (2621, 2632), True, 'import chainer.functions as F\n'), ((4403, 4437), 'numpy.random.randint', 'np.random.randint', (['(self.window - 1)'], {}), '(self.window - 1)\n', (4420, 4437), True, 'import numpy as np\n'), ((4701, 4730), 'numpy.random.shuffle', 'np.random.shuffle', (['self.order'], {}), '(self.order)\n', (4718, 4730), True, 'import numpy as np\n'), ((5659, 5684), 'chainer.cuda.get_device', 'cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (5674, 5684), False, 'from chainer import cuda\n'), ((3559, 3592), 'chainer.links.Linear', 'L.Linear', (['n_in', 'n_out'], {'initialW': '(0)'}), '(n_in, n_out, initialW=0)\n', (3567, 3592), True, 'import chainer.links as L\n'), ((4475, 4491), 'numpy.arange', 'np.arange', (['(-w)', '(0)'], {}), '(-w, 0)\n', (4484, 4491), True, 'import numpy as np\n'), ((4493, 4512), 'numpy.arange', 'np.arange', (['(1)', '(w + 1)'], {}), '(1, w + 1)\n', (4502, 4512), True, 'import numpy as np\n'), ((2466, 2490), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_units)'], {}), '(1.0 / n_units)\n', (2475, 2490), True, 'import chainer.initializers as I\n'), ((2965, 2989), 'chainer.initializers.Uniform', 'I.Uniform', (['(1.0 / n_units)'], {}), '(1.0 / n_units)\n', (2974, 2989), True, 'import chainer.initializers as I\n')]
|
import jittor as jt
import numpy as np
from advance import *
import matplotlib.pyplot as plt
import argparse
import matplotlib.pyplot as plt
from tqdm import trange
from utils import get_model, modelSet, dataset_choices
import argparse
plt.switch_backend('agg')
# CUDA_VISIBLE_DEVICES=0 log_silent=1 python3.7 run_ssl.py --model deeplab --layer aspp --channel 256 --dataset pancreas --save checkpoints/deeplab-ssl.pkl -e 50 --lr 5e-6
if __name__ == '__main__':
jt.flags.use_cuda = int(1)
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='unet', type=str, choices=modelSet, help='choose a model network')
parser.add_argument('--dataset', type=str, choices=dataset_choices, required=True, help='select a dataset')
parser.add_argument('--save', default='checkpoints/ssl.pkl', type=str, help='model weights save path')
parser.add_argument('-e', '--epochs', type=int, default=20, help='number of training epochs', dest='epochs')
parser.add_argument('-c', '--class-num', type=int, default=2, help='class number', dest='class_num')
parser.add_argument('-b', '--batch-size', type=int, default=8, help='training batch size', dest='batch_size')
parser.add_argument('--channel', dest='embedding_channel', type=int, default=512, help='number of channels of embedded feature maps')
parser.add_argument('--layer', type=str, default='down4', help='layer to extract features from')
parser.add_argument('--lr', type=float, default=1e-5, help='learning rate')
parser.add_argument('--pretrain', action='store_true')
args = parser.parse_args()
model = get_model(args)
train_loader = retrieve_aug_data(args, 'train', augmentation)
learner = MoCoLearner(
model=model,
layer=args.layer,
loader=train_loader,
embedding_channel=args.embedding_channel,
project_dim=128,
lr=args.lr
)
loss_min = 1e4
losses = []
with open('./log/ssl.txt', 'w') as f:
# bar = trange(args.epochs)
for epoch in range(args.epochs):
loss = learner.train()
# bar.set_description('epoch[%02d] loss:[%.6f\n]' % (epoch + 1, loss))
print('epoch[%02d] loss:[%.6f\n]' % (epoch + 1, loss))
f.write('epoch[%02d] loss:[%.6f\n]' % (epoch + 1, loss))
if loss < loss_min:
model.save(args.save)
losses.append(loss)
np.savetxt('./log/ssl_loss.txt', loss)
plt.plot(losses)
plt.savefig('./result/ssl_losses.png')
|
[
"utils.get_model",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.savetxt",
"matplotlib.pyplot.switch_backend"
] |
[((238, 263), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (256, 263), True, 'import matplotlib.pyplot as plt\n'), ((509, 534), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (532, 534), False, 'import argparse\n'), ((1618, 1633), 'utils.get_model', 'get_model', (['args'], {}), '(args)\n', (1627, 1633), False, 'from utils import get_model, modelSet, dataset_choices\n'), ((2419, 2457), 'numpy.savetxt', 'np.savetxt', (['"""./log/ssl_loss.txt"""', 'loss'], {}), "('./log/ssl_loss.txt', loss)\n", (2429, 2457), True, 'import numpy as np\n'), ((2462, 2478), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (2470, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2521), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result/ssl_losses.png"""'], {}), "('./result/ssl_losses.png')\n", (2494, 2521), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import sys
# # temp solution for directory.
sys.path.append("./src/")
import math
from undefined.UDFunction import UDFunction
from undefined.GraphGenerator import UDGraph
from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc
def cos(udobject):
"""calculate the cosine operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function
Raises:
TypeError:raised if input is not compatiable with cosine operation
Returns:
if input is udfunction object,update val and der by cosine operation.
if input is UDGraph object,update notes and function by cosine operation.
if input is int,float,ndarray object,update them in cosine operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.cos(udobject._val)
new_der = - 1 * math.sin(udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.cos(udobject._val)
new_der = -1 * np.sin(udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.COS
if isinstance(udobject._val, (int, float)):
new_val = math.cos(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.cos(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.cos(udobject)
elif isinstance(udobject, (int, float)):
return math.cos(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def sin(udobject):
"""calculate the sin operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with sin operation
Returns:
if input is udfunction object,update val and der by sin operation.
if input is UDGraph object,update notes and function by sin operation.
if input is int,float,ndarray object,update them in sin operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.sin(udobject._val)
new_der = math.cos(udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.sin(udobject._val)
new_der = np.cos(udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.SIN
if isinstance(udobject._val, (int, float)):
new_val = math.sin(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.sin(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.sin(udobject)
elif isinstance(udobject, (int, float)):
return math.sin(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def tan(udobject):
"""calculate the tangent operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with tangent operation
Returns:
if input is udfunction object,update val and der by tangent operation.
if input is UDGraph object,update notes and function by tangent operation.
if input is int,float,ndarray object,update them in tangent operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
check_division_by_zero(math.cos(udobject._val))
new_val = math.tan(udobject._val)
new_der = (1 / (math.cos(udobject._val)) ** 2) * udobject._der
elif isinstance(udobject._val, np.ndarray):
check_division_by_zero(np.cos(udobject._val))
new_val = np.tan(udobject._val)
new_der = (1 / (np.cos(udobject._val)) ** 2) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.TAN
if isinstance(udobject._val, (int, float)):
check_division_by_zero(math.cos(udobject._val))
new_val = math.tan(udobject._val)
elif isinstance(udobject._val, np.ndarray):
check_division_by_zero(np.cos(udobject._val))
new_val = np.tan(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_division_by_zero(np.cos(udobject))
return np.tan(udobject)
elif isinstance(udobject, (int, float)):
check_division_by_zero(math.cos(udobject))
return math.tan(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def sinh(udobject):
"""calculate the sinh operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the sinh operation.
"""
return (exp(udobject) - exp(-udobject)) / 2
def cosh(udobject):
"""calculate the cosh operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the cosh operation.
"""
return (exp(udobject) + exp(-udobject)) / 2
def tanh(udobject):
"""calculate the tanh operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the tanh operation.
"""
return sinh(udobject) / cosh(udobject)
def coth(udobject):
"""calculate the coth operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the coth operation.
"""
return cosh(udobject) / sinh(udobject)
def sech(udobject):
"""calculate the sech operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the sech operation.
"""
return 1 / cosh(udobject)
def csch(udobject):
"""calculate the csch operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
The result from the csch operation.
"""
return 1 / sinh(udobject)
def arccos(udobject):
"""calculate the arccos operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with arccos operation
Returns:
if input is udfunction object,update val and der by arccos operation.
if input is UDGraph object,update notes and function by arccos operation.
if input is int,float,ndarray object,update them in arccos operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_arc(udobject._val)
if isinstance(udobject._val, (int, float)):
new_val = math.acos(udobject._val)
new_der = (-1 / math.sqrt(1 - udobject._val**2)) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.arccos(udobject._val)
new_der = (-1 / np.sqrt(1 - udobject._val**2)) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_arc(udobject._val)
new_func = UDPrimitive.ACOS
if isinstance(udobject._val, (int, float)):
new_val = math.acos(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.arccos(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_arc(udobject)
return np.arccos(udobject)
elif isinstance(udobject, (int, float)):
check_arc(udobject)
return math.acos(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def arcsin(udobject):
"""calculate the arcsin operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with arcsin operation
Returns:
if input is udfunction object,update val and der by arcsin operation.
if input is UDGraph object,update notes and function by arcsin operation.
if input is int,float,ndarray object,update them in arcsin operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_arc(udobject._val)
if isinstance(udobject._val, (int, float)):
new_val = math.asin(udobject._val)
new_der = (1 / math.sqrt(1 - udobject._val**2)) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.arcsin(udobject._val)
new_der = (1 / np.sqrt(1 - udobject._val**2)) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_arc(udobject._val)
new_func = UDPrimitive.ASIN
if isinstance(udobject._val, (int, float)):
new_val = math.asin(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.arcsin(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_arc(udobject)
return np.arcsin(udobject)
elif isinstance(udobject, (int, float)):
check_arc(udobject)
return math.asin(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def arctan(udobject):
"""calculate the arctan operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with arctan operation
Returns:
if input is udfunction object,update val and der by arctan operation.
if input is UDGraph object,update notes and function by arctan operation.
if input is int,float,ndarray object,update them in arctan operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.atan(udobject._val)
new_der = (1 / (1 + udobject._val ** 2)) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.arctan(udobject._val)
new_der = (1 / (1 + udobject._val ** 2)) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.ATAN
if isinstance(udobject._val, (int, float)):
new_val = math.atan(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.arctan(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.arctan(udobject)
elif isinstance(udobject, (int, float)):
return math.atan(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def sqrt(udobject):
"""calculate the square root operation of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with square root operation
Returns:
if input is udfunction object,update val and der by square root operation.
if input is UDGraph object,update notes and function by square root operation.
if input is int,float,ndarray object,update them in square root operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_pow(udobject._val, 0.5)
if isinstance(udobject._val, (int, float)):
new_val = math.sqrt(udobject._val)
new_der = 0.5 * math.pow(udobject._val, -0.5) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.sqrt(udobject._val)
new_der = 0.5 * np.power(udobject._val, -0.5) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_pow(udobject._val, 0.5)
new_func = UDPrimitive.SQRT
if isinstance(udobject._val, (int, float)):
new_val = math.sqrt(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.sqrt(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
check_pow(udobject, 0.5)
return np.sqrt(udobject)
elif isinstance(udobject, (int, float)):
check_pow(udobject, 0.5)
return math.sqrt(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def exp(udobject):
"""calculate the square exponential of input
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with exponential operation
Returns:
if input is udfunction object,update val and der by exponential operation.
if input is UDGraph object,update notes and function by exponential operation.
if input is int,float,ndarray object,update them in exponential operation by their own types.
"""
if isinstance(udobject, UDFunction):
if isinstance(udobject._val, (int, float)):
new_val = math.exp(udobject._val)
new_der = math.exp(udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.exp(udobject._val)
new_der = np.exp(udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
new_func = UDPrimitive.EXP
if isinstance(udobject._val, (int, float)):
new_val = math.exp(udobject._val)
elif isinstance(udobject._val, np.ndarray):
new_val = np.exp(udobject._val)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
return udgraph
elif isinstance(udobject, np.ndarray):
return np.exp(udobject)
elif isinstance(udobject, (int, float)):
return math.exp(udobject)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
def standard_logistic(udobject):
"""this is the function we calculate the standard logistic.
It is different than the log() function
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Returns:
return the standard logistic results.
"""
return 1 / (1 + exp(-udobject))
def log(udobject, base=math.e):
"""calculate the log of input.
We can handle the any bases in this log. Users can pass in the base argument.
Args:
udobject (udfunction object,UDGraph object,ndarray,ndarray,int,float): User defined function/number
Raises:
TypeError:raised if input is not compatiable with log operation
Returns:
if input is udfunction object,update val and der by log operation.
if input is UDGraph object,update notes and function by log operation.
if input is int,float,ndarray object,update them in log operation by their own types.
"""
if isinstance(udobject, UDFunction):
check_log(udobject._val, base)
if isinstance(udobject._val, (int, float)):
new_val = math.log(udobject._val, base)
new_der = 1 / (math.log(base) * udobject._val) * udobject._der
elif isinstance(udobject._val, np.ndarray):
new_val = np.log(udobject._val)
new_val = new_val / math.log(base)
new_der = 1 / (math.log(base) * udobject._val) * udobject._der
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
return UDFunction(new_val, new_der)
elif isinstance(udobject, UDGraph):
check_log(udobject._val, base)
new_func = UDPrimitive.LOG
if isinstance(udobject._val, (int, float)):
new_val = math.log(udobject._val, base)
elif isinstance(udobject._val, np.ndarray):
new_val = np.log(udobject._val) / math.log(base)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
udgraph = UDGraph(new_val, new_func)
udgraph._parents.append(udobject)
udgraph._params["base"] = base
return udgraph
elif isinstance(udobject, np.ndarray):
check_log(udobject, base)
return np.log(udobject) / math.log(base)
elif isinstance(udobject, (int, float)):
check_log(udobject, base)
return math.log(udobject, base)
else:
raise TypeError("error raised by undefined: unsupported attribute type.")
|
[
"numpy.arccos",
"numpy.sqrt",
"math.acos",
"undefined.Utils.check_pow",
"math.sqrt",
"numpy.log",
"math.log",
"math.cos",
"numpy.sin",
"math.exp",
"sys.path.append",
"math.atan",
"undefined.UDFunction.UDFunction",
"math.tan",
"numpy.exp",
"numpy.arctan",
"undefined.Utils.check_arc",
"numpy.cos",
"undefined.Utils.check_log",
"numpy.tan",
"numpy.power",
"math.pow",
"math.asin",
"numpy.arcsin",
"undefined.GraphGenerator.UDGraph",
"math.sin"
] |
[((63, 88), 'sys.path.append', 'sys.path.append', (['"""./src/"""'], {}), "('./src/')\n", (78, 88), False, 'import sys\n'), ((1320, 1348), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (1330, 1348), False, 'from undefined.UDFunction import UDFunction\n'), ((3091, 3119), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (3101, 3119), False, 'from undefined.UDFunction import UDFunction\n'), ((5025, 5053), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (5035, 5053), False, 'from undefined.UDFunction import UDFunction\n'), ((8367, 8391), 'undefined.Utils.check_arc', 'check_arc', (['udobject._val'], {}), '(udobject._val)\n', (8376, 8391), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((8857, 8885), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (8867, 8885), False, 'from undefined.UDFunction import UDFunction\n'), ((10320, 10344), 'undefined.Utils.check_arc', 'check_arc', (['udobject._val'], {}), '(udobject._val)\n', (10329, 10344), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((10808, 10836), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (10818, 10836), False, 'from undefined.UDFunction import UDFunction\n'), ((12712, 12740), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (12722, 12740), False, 'from undefined.UDFunction import UDFunction\n'), ((14109, 14138), 'undefined.Utils.check_pow', 'check_pow', (['udobject._val', '(0.5)'], {}), '(udobject._val, 0.5)\n', (14118, 14138), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((14598, 14626), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (14608, 14626), False, 'from undefined.UDFunction import UDFunction\n'), ((16515, 16543), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (16525, 16543), False, 'from undefined.UDFunction import UDFunction\n'), ((18316, 18346), 'undefined.Utils.check_log', 'check_log', (['udobject._val', 'base'], {}), '(udobject._val, base)\n', (18325, 18346), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((18859, 18887), 'undefined.UDFunction.UDFunction', 'UDFunction', (['new_val', 'new_der'], {}), '(new_val, new_der)\n', (18869, 18887), False, 'from undefined.UDFunction import UDFunction\n'), ((952, 975), 'math.cos', 'math.cos', (['udobject._val'], {}), '(udobject._val)\n', (960, 975), False, 'import math\n'), ((1737, 1763), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (1744, 1763), False, 'from undefined.GraphGenerator import UDGraph\n'), ((2734, 2757), 'math.sin', 'math.sin', (['udobject._val'], {}), '(udobject._val)\n', (2742, 2757), False, 'import math\n'), ((3507, 3533), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (3514, 3533), False, 'from undefined.GraphGenerator import UDGraph\n'), ((4584, 4607), 'math.tan', 'math.tan', (['udobject._val'], {}), '(udobject._val)\n', (4592, 4607), False, 'import math\n'), ((5559, 5585), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (5566, 5585), False, 'from undefined.GraphGenerator import UDGraph\n'), ((8466, 8490), 'math.acos', 'math.acos', (['udobject._val'], {}), '(udobject._val)\n', (8475, 8490), False, 'import math\n'), ((8935, 8959), 'undefined.Utils.check_arc', 'check_arc', (['udobject._val'], {}), '(udobject._val)\n', (8944, 8959), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((9312, 9338), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (9319, 9338), False, 'from undefined.GraphGenerator import UDGraph\n'), ((10419, 10443), 'math.asin', 'math.asin', (['udobject._val'], {}), '(udobject._val)\n', (10428, 10443), False, 'import math\n'), ((10885, 10909), 'undefined.Utils.check_arc', 'check_arc', (['udobject._val'], {}), '(udobject._val)\n', (10894, 10909), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((11262, 11288), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (11269, 11288), False, 'from undefined.GraphGenerator import UDGraph\n'), ((12335, 12359), 'math.atan', 'math.atan', (['udobject._val'], {}), '(udobject._val)\n', (12344, 12359), False, 'import math\n'), ((13133, 13159), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (13140, 13159), False, 'from undefined.GraphGenerator import UDGraph\n'), ((14213, 14237), 'math.sqrt', 'math.sqrt', (['udobject._val'], {}), '(udobject._val)\n', (14222, 14237), False, 'import math\n'), ((14676, 14705), 'undefined.Utils.check_pow', 'check_pow', (['udobject._val', '(0.5)'], {}), '(udobject._val, 0.5)\n', (14685, 14705), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((15056, 15082), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (15063, 15082), False, 'from undefined.GraphGenerator import UDGraph\n'), ((16158, 16181), 'math.exp', 'math.exp', (['udobject._val'], {}), '(udobject._val)\n', (16166, 16181), False, 'import math\n'), ((16932, 16958), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (16939, 16958), False, 'from undefined.GraphGenerator import UDGraph\n'), ((18421, 18450), 'math.log', 'math.log', (['udobject._val', 'base'], {}), '(udobject._val, base)\n', (18429, 18450), False, 'import math\n'), ((18937, 18967), 'undefined.Utils.check_log', 'check_log', (['udobject._val', 'base'], {}), '(udobject._val, base)\n', (18946, 18967), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((19338, 19364), 'undefined.GraphGenerator.UDGraph', 'UDGraph', (['new_val', 'new_func'], {}), '(new_val, new_func)\n', (19345, 19364), False, 'from undefined.GraphGenerator import UDGraph\n'), ((1118, 1139), 'numpy.cos', 'np.cos', (['udobject._val'], {}), '(udobject._val)\n', (1124, 1139), True, 'import numpy as np\n'), ((1499, 1522), 'math.cos', 'math.cos', (['udobject._val'], {}), '(udobject._val)\n', (1507, 1522), False, 'import math\n'), ((1888, 1904), 'numpy.cos', 'np.cos', (['udobject'], {}), '(udobject)\n', (1894, 1904), True, 'import numpy as np\n'), ((2780, 2803), 'math.cos', 'math.cos', (['udobject._val'], {}), '(udobject._val)\n', (2788, 2803), False, 'import math\n'), ((2894, 2915), 'numpy.sin', 'np.sin', (['udobject._val'], {}), '(udobject._val)\n', (2900, 2915), True, 'import numpy as np\n'), ((3269, 3292), 'math.sin', 'math.sin', (['udobject._val'], {}), '(udobject._val)\n', (3277, 3292), False, 'import math\n'), ((3658, 3674), 'numpy.sin', 'np.sin', (['udobject'], {}), '(udobject)\n', (3664, 3674), True, 'import numpy as np\n'), ((4537, 4560), 'math.cos', 'math.cos', (['udobject._val'], {}), '(udobject._val)\n', (4545, 4560), False, 'import math\n'), ((4815, 4836), 'numpy.tan', 'np.tan', (['udobject._val'], {}), '(udobject._val)\n', (4821, 4836), True, 'import numpy as np\n'), ((5263, 5286), 'math.tan', 'math.tan', (['udobject._val'], {}), '(udobject._val)\n', (5271, 5286), False, 'import math\n'), ((5759, 5775), 'numpy.tan', 'np.tan', (['udobject'], {}), '(udobject)\n', (5765, 5775), True, 'import numpy as np\n'), ((8642, 8666), 'numpy.arccos', 'np.arccos', (['udobject._val'], {}), '(udobject._val)\n', (8651, 8666), True, 'import numpy as np\n'), ((9070, 9094), 'math.acos', 'math.acos', (['udobject._val'], {}), '(udobject._val)\n', (9079, 9094), False, 'import math\n'), ((9456, 9475), 'undefined.Utils.check_arc', 'check_arc', (['udobject'], {}), '(udobject)\n', (9465, 9475), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((9491, 9510), 'numpy.arccos', 'np.arccos', (['udobject'], {}), '(udobject)\n', (9500, 9510), True, 'import numpy as np\n'), ((10594, 10618), 'numpy.arcsin', 'np.arcsin', (['udobject._val'], {}), '(udobject._val)\n', (10603, 10618), True, 'import numpy as np\n'), ((11020, 11044), 'math.asin', 'math.asin', (['udobject._val'], {}), '(udobject._val)\n', (11029, 11044), False, 'import math\n'), ((11406, 11425), 'undefined.Utils.check_arc', 'check_arc', (['udobject'], {}), '(udobject)\n', (11415, 11425), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((11441, 11460), 'numpy.arcsin', 'np.arcsin', (['udobject'], {}), '(udobject)\n', (11450, 11460), True, 'import numpy as np\n'), ((12503, 12527), 'numpy.arctan', 'np.arctan', (['udobject._val'], {}), '(udobject._val)\n', (12512, 12527), True, 'import numpy as np\n'), ((12891, 12915), 'math.atan', 'math.atan', (['udobject._val'], {}), '(udobject._val)\n', (12900, 12915), False, 'import math\n'), ((13284, 13303), 'numpy.arctan', 'np.arctan', (['udobject'], {}), '(udobject)\n', (13293, 13303), True, 'import numpy as np\n'), ((14386, 14408), 'numpy.sqrt', 'np.sqrt', (['udobject._val'], {}), '(udobject._val)\n', (14393, 14408), True, 'import numpy as np\n'), ((14816, 14840), 'math.sqrt', 'math.sqrt', (['udobject._val'], {}), '(udobject._val)\n', (14825, 14840), False, 'import math\n'), ((15200, 15224), 'undefined.Utils.check_pow', 'check_pow', (['udobject', '(0.5)'], {}), '(udobject, 0.5)\n', (15209, 15224), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((15240, 15257), 'numpy.sqrt', 'np.sqrt', (['udobject'], {}), '(udobject)\n', (15247, 15257), True, 'import numpy as np\n'), ((16204, 16227), 'math.exp', 'math.exp', (['udobject._val'], {}), '(udobject._val)\n', (16212, 16227), False, 'import math\n'), ((16318, 16339), 'numpy.exp', 'np.exp', (['udobject._val'], {}), '(udobject._val)\n', (16324, 16339), True, 'import numpy as np\n'), ((16694, 16717), 'math.exp', 'math.exp', (['udobject._val'], {}), '(udobject._val)\n', (16702, 16717), False, 'import math\n'), ((17083, 17099), 'numpy.exp', 'np.exp', (['udobject'], {}), '(udobject)\n', (17089, 17099), True, 'import numpy as np\n'), ((18600, 18621), 'numpy.log', 'np.log', (['udobject._val'], {}), '(udobject._val)\n', (18606, 18621), True, 'import numpy as np\n'), ((19077, 19106), 'math.log', 'math.log', (['udobject._val', 'base'], {}), '(udobject._val, base)\n', (19085, 19106), False, 'import math\n'), ((19521, 19546), 'undefined.Utils.check_log', 'check_log', (['udobject', 'base'], {}), '(udobject, base)\n', (19530, 19546), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((1004, 1027), 'math.sin', 'math.sin', (['udobject._val'], {}), '(udobject._val)\n', (1012, 1027), False, 'import math\n'), ((1597, 1618), 'numpy.cos', 'np.cos', (['udobject._val'], {}), '(udobject._val)\n', (1603, 1618), True, 'import numpy as np\n'), ((1966, 1984), 'math.cos', 'math.cos', (['udobject'], {}), '(udobject)\n', (1974, 1984), False, 'import math\n'), ((2938, 2959), 'numpy.cos', 'np.cos', (['udobject._val'], {}), '(udobject._val)\n', (2944, 2959), True, 'import numpy as np\n'), ((3367, 3388), 'numpy.sin', 'np.sin', (['udobject._val'], {}), '(udobject._val)\n', (3373, 3388), True, 'import numpy as np\n'), ((3736, 3754), 'math.sin', 'math.sin', (['udobject'], {}), '(udobject)\n', (3744, 3754), False, 'import math\n'), ((4770, 4791), 'numpy.cos', 'np.cos', (['udobject._val'], {}), '(udobject._val)\n', (4776, 4791), True, 'import numpy as np\n'), ((5216, 5239), 'math.cos', 'math.cos', (['udobject._val'], {}), '(udobject._val)\n', (5224, 5239), False, 'import math\n'), ((5419, 5440), 'numpy.tan', 'np.tan', (['udobject._val'], {}), '(udobject._val)\n', (5425, 5440), True, 'import numpy as np\n'), ((5726, 5742), 'numpy.cos', 'np.cos', (['udobject'], {}), '(udobject)\n', (5732, 5742), True, 'import numpy as np\n'), ((5888, 5906), 'math.tan', 'math.tan', (['udobject'], {}), '(udobject)\n', (5896, 5906), False, 'import math\n'), ((8519, 8552), 'math.sqrt', 'math.sqrt', (['(1 - udobject._val ** 2)'], {}), '(1 - udobject._val ** 2)\n', (8528, 8552), False, 'import math\n'), ((9169, 9193), 'numpy.arccos', 'np.arccos', (['udobject._val'], {}), '(udobject._val)\n', (9178, 9193), True, 'import numpy as np\n'), ((9565, 9584), 'undefined.Utils.check_arc', 'check_arc', (['udobject'], {}), '(udobject)\n', (9574, 9584), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((9600, 9619), 'math.acos', 'math.acos', (['udobject'], {}), '(udobject)\n', (9609, 9619), False, 'import math\n'), ((10471, 10504), 'math.sqrt', 'math.sqrt', (['(1 - udobject._val ** 2)'], {}), '(1 - udobject._val ** 2)\n', (10480, 10504), False, 'import math\n'), ((11119, 11143), 'numpy.arcsin', 'np.arcsin', (['udobject._val'], {}), '(udobject._val)\n', (11128, 11143), True, 'import numpy as np\n'), ((11515, 11534), 'undefined.Utils.check_arc', 'check_arc', (['udobject'], {}), '(udobject)\n', (11524, 11534), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((11550, 11569), 'math.asin', 'math.asin', (['udobject'], {}), '(udobject)\n', (11559, 11569), False, 'import math\n'), ((12990, 13014), 'numpy.arctan', 'np.arctan', (['udobject._val'], {}), '(udobject._val)\n', (12999, 13014), True, 'import numpy as np\n'), ((13365, 13384), 'math.atan', 'math.atan', (['udobject'], {}), '(udobject)\n', (13374, 13384), False, 'import math\n'), ((14266, 14295), 'math.pow', 'math.pow', (['udobject._val', '(-0.5)'], {}), '(udobject._val, -0.5)\n', (14274, 14295), False, 'import math\n'), ((14915, 14937), 'numpy.sqrt', 'np.sqrt', (['udobject._val'], {}), '(udobject._val)\n', (14922, 14937), True, 'import numpy as np\n'), ((15312, 15336), 'undefined.Utils.check_pow', 'check_pow', (['udobject', '(0.5)'], {}), '(udobject, 0.5)\n', (15321, 15336), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((15352, 15371), 'math.sqrt', 'math.sqrt', (['udobject'], {}), '(udobject)\n', (15361, 15371), False, 'import math\n'), ((16362, 16383), 'numpy.exp', 'np.exp', (['udobject._val'], {}), '(udobject._val)\n', (16368, 16383), True, 'import numpy as np\n'), ((16792, 16813), 'numpy.exp', 'np.exp', (['udobject._val'], {}), '(udobject._val)\n', (16798, 16813), True, 'import numpy as np\n'), ((17161, 17179), 'math.exp', 'math.exp', (['udobject'], {}), '(udobject)\n', (17169, 17179), False, 'import math\n'), ((18654, 18668), 'math.log', 'math.log', (['base'], {}), '(base)\n', (18662, 18668), False, 'import math\n'), ((19562, 19578), 'numpy.log', 'np.log', (['udobject'], {}), '(udobject)\n', (19568, 19578), True, 'import numpy as np\n'), ((19581, 19595), 'math.log', 'math.log', (['base'], {}), '(base)\n', (19589, 19595), False, 'import math\n'), ((19650, 19675), 'undefined.Utils.check_log', 'check_log', (['udobject', 'base'], {}), '(udobject, base)\n', (19659, 19675), False, 'from undefined.Utils import UDPrimitive, check_division_by_zero, check_log, check_pow, check_arc\n'), ((19691, 19715), 'math.log', 'math.log', (['udobject', 'base'], {}), '(udobject, base)\n', (19699, 19715), False, 'import math\n'), ((1167, 1188), 'numpy.sin', 'np.sin', (['udobject._val'], {}), '(udobject._val)\n', (1173, 1188), True, 'import numpy as np\n'), ((4636, 4659), 'math.cos', 'math.cos', (['udobject._val'], {}), '(udobject._val)\n', (4644, 4659), False, 'import math\n'), ((5374, 5395), 'numpy.cos', 'np.cos', (['udobject._val'], {}), '(udobject._val)\n', (5380, 5395), True, 'import numpy as np\n'), ((5853, 5871), 'math.cos', 'math.cos', (['udobject'], {}), '(udobject)\n', (5861, 5871), False, 'import math\n'), ((8695, 8726), 'numpy.sqrt', 'np.sqrt', (['(1 - udobject._val ** 2)'], {}), '(1 - udobject._val ** 2)\n', (8702, 8726), True, 'import numpy as np\n'), ((10646, 10677), 'numpy.sqrt', 'np.sqrt', (['(1 - udobject._val ** 2)'], {}), '(1 - udobject._val ** 2)\n', (10653, 10677), True, 'import numpy as np\n'), ((14437, 14466), 'numpy.power', 'np.power', (['udobject._val', '(-0.5)'], {}), '(udobject._val, -0.5)\n', (14445, 14466), True, 'import numpy as np\n'), ((18478, 18492), 'math.log', 'math.log', (['base'], {}), '(base)\n', (18486, 18492), False, 'import math\n'), ((19181, 19202), 'numpy.log', 'np.log', (['udobject._val'], {}), '(udobject._val)\n', (19187, 19202), True, 'import numpy as np\n'), ((19205, 19219), 'math.log', 'math.log', (['base'], {}), '(base)\n', (19213, 19219), False, 'import math\n'), ((4865, 4886), 'numpy.cos', 'np.cos', (['udobject._val'], {}), '(udobject._val)\n', (4871, 4886), True, 'import numpy as np\n'), ((18696, 18710), 'math.log', 'math.log', (['base'], {}), '(base)\n', (18704, 18710), False, 'import math\n')]
|
#! /bin/env python
import os
import sys
import numpy as np
from ...grids import RasterField
class BovError(Exception):
pass
class MissingRequiredKeyError(BovError):
def __init__(self, opt):
self.opt = opt
def __str__(self):
return "%s: Missing required key" % self.opt
class BadKeyValueError(BovError):
def __init__(self, key, value):
self.key = key
self.value = value
def __str__(self):
return "%s, %s: Bad value" % (self.key, self.value)
class ReadError(BovError):
def __init__(self, filename):
self.filename = filename
def __str__(self):
return "%s: Unable to read" % self.filename
class FileExists(BovError):
def __init__(self, filename):
self.filename = filename
def __str__(self):
return "%s: Unable to write to file" % self.filename
class BadFileExtension(BovError):
def __init__(self, ext):
self.ext = ext
def __str__(self):
return "%s: Extension should be '.bov' or empty" % self.ext
_BOV_TO_NP_TYPE = {
"BYTE": "uint8",
"SHORT": "int32",
"INT": "int64",
"FLOAT": "float32",
"DOUBLE": "float64",
}
_NP_TO_BOV_TYPE = dict(zip(_BOV_TO_NP_TYPE.values(), _BOV_TO_NP_TYPE.keys()))
_SYS_TO_BOV_ENDIAN = {"little": "LITTLE", "big": "BIG"}
def array_to_str(array):
s = [str(x) for x in array]
return " ".join(s)
def fromfile(filename, allow_singleton=True):
header = {}
with open(filename, "r") as f:
for line in f:
try:
(data, _) = line.split("#")
except ValueError:
data = line
try:
(key, value) = data.split(":")
header[key.strip()] = value.strip()
except ValueError:
pass
keys_found = set(header.keys())
keys_required = {
"DATA_SIZE",
"DATA_FORMAT",
"DATA_FILE",
"BRICK_ORIGIN",
"BRICK_SIZE",
"VARIABLE",
}
if not keys_required.issubset(keys_found):
missing = ", ".join(keys_required - keys_found)
raise MissingRequiredKeyError(missing)
shape = header["DATA_SIZE"].split()
header["DATA_SIZE"] = np.array([int(i) for i in shape], dtype=np.int64)
origin = header["BRICK_ORIGIN"].split()
header["BRICK_ORIGIN"] = np.array([float(i) for i in origin], dtype=np.float64)
size = header["BRICK_SIZE"].split()
header["BRICK_SIZE"] = np.array([float(i) for i in size], dtype=np.float64)
if not allow_singleton:
not_singleton = header["DATA_SIZE"] > 1
header["DATA_SIZE"] = header["DATA_SIZE"][not_singleton]
header["BRICK_SIZE"] = header["BRICK_SIZE"][not_singleton]
header["BRICK_ORIGIN"] = header["BRICK_ORIGIN"][not_singleton]
type_str = header["DATA_FORMAT"]
try:
data_type = _BOV_TO_NP_TYPE[type_str]
except KeyError:
raise BadKeyValueError("DATA_FORMAT", type_str)
dat_file = header["DATA_FILE"]
if not os.path.isabs(dat_file):
dat_file = os.path.join(os.path.dirname(filename), dat_file)
try:
data = np.fromfile(dat_file, dtype=data_type)
except Exception:
raise
try:
data.shape = header["DATA_SIZE"]
except ValueError:
raise BadKeyValueError(
"DATA_SIZE", "%d != %d" % (np.prod(header["DATA_SIZE"]), data.size)
)
try:
header["TIME"] = float(header["TIME"])
except KeyError:
pass
shape = header["DATA_SIZE"]
origin = header["BRICK_ORIGIN"]
spacing = header["BRICK_SIZE"] / (shape - 1)
grid = RasterField(shape, spacing, origin, indexing="ij")
if "CENTERING" in header and header["CENTERING"] == "zonal":
grid.add_field(header["VARIABLE"], data, centering="zonal")
else:
grid.add_field(header["VARIABLE"], data, centering="point")
return grid, header
def array_tofile(
filename,
array,
name="",
spacing=(1.0, 1.0),
origin=(0.0, 0.0),
no_clobber=False,
options=None,
):
options = options or {}
(base, ext) = os.path.splitext(filename)
if len(ext) > 0 and ext != ".bov":
raise BadFileExtension(ext)
spacing = np.array(spacing, dtype=np.float64)
origin = np.array(origin, dtype=np.float64)
shape = np.array(array.shape, dtype=np.int64)
size = shape * spacing
if len(shape) < 3:
shape = np.append(shape, [1] * (3 - len(shape)))
if len(origin) < 3:
origin = np.append(origin, [1.0] * (3 - len(origin)))
if len(size) < 3:
size = np.append(size, [1.0] * (3 - len(size)))
dat_file = "%s.dat" % base
bov_file = "%s.bov" % base
if no_clobber:
if os.path.isfile(bov_file):
raise FileExists(bov_file)
if os.path.isfile(dat_file):
raise FileExists(dat_file)
array.tofile(dat_file)
header = dict(
DATA_FILE=dat_file,
DATA_SIZE=array_to_str(shape),
BRICK_ORIGIN=array_to_str(origin),
BRICK_SIZE=array_to_str(size),
DATA_ENDIAN=_SYS_TO_BOV_ENDIAN[sys.byteorder],
DATA_FORMAT=_NP_TO_BOV_TYPE[str(array.dtype)],
VARIABLE=name,
)
header.update(options)
with open(bov_file, "w") as f:
for item in header.items():
f.write("%s: %s\n" % item)
return bov_file
def tofile(filename, grid, var_name=None, no_clobber=False, options=None):
"""
Write a grid-like object to a BOV file.
Parameters
----------
filename : str
Name of the BOV file to write.
grid : Grid-like
A uniform rectilinear grid.
var_name : str, optional
Name of variable contained within *field* to write.
no_clobber : boolean
If `True`, and the output file exists, clobber it.
options : dict
Additional options to include in the header.
Returns
-------
list
A list of BOV files written.
Notes
-----
The *grid* object requires the following methods be implemented:
* get_shape
* get_origin
* get_spacing
* items
* get_field
"""
try:
shape, spacing, origin = (
grid.get_shape(),
grid.get_spacing(),
grid.get_origin(),
)
except (AttributeError, TypeError):
raise TypeError("'%s' object is not grid-like" % type(grid))
if var_name is None:
names = grid.get_point_fields().keys()
else:
names = [var_name]
if len(names) > 1:
(base, ext) = os.path.splitext(filename)
filenames = ["%s_%s" % (base, name) + ext for name in names]
else:
filenames = [filename]
files_written = []
for (name, filename) in zip(names, filenames):
vals = grid.get_field(name).reshape(shape)
bov_file = array_tofile(
filename,
vals,
name=name,
spacing=spacing,
origin=origin,
no_clobber=no_clobber,
options=options,
)
files_written.append(bov_file)
return files_written
|
[
"numpy.prod",
"numpy.fromfile",
"os.path.isabs",
"os.path.splitext",
"os.path.isfile",
"numpy.array",
"os.path.dirname"
] |
[((4109, 4135), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4125, 4135), False, 'import os\n'), ((4226, 4261), 'numpy.array', 'np.array', (['spacing'], {'dtype': 'np.float64'}), '(spacing, dtype=np.float64)\n', (4234, 4261), True, 'import numpy as np\n'), ((4275, 4309), 'numpy.array', 'np.array', (['origin'], {'dtype': 'np.float64'}), '(origin, dtype=np.float64)\n', (4283, 4309), True, 'import numpy as np\n'), ((4322, 4359), 'numpy.array', 'np.array', (['array.shape'], {'dtype': 'np.int64'}), '(array.shape, dtype=np.int64)\n', (4330, 4359), True, 'import numpy as np\n'), ((3016, 3039), 'os.path.isabs', 'os.path.isabs', (['dat_file'], {}), '(dat_file)\n', (3029, 3039), False, 'import os\n'), ((3135, 3173), 'numpy.fromfile', 'np.fromfile', (['dat_file'], {'dtype': 'data_type'}), '(dat_file, dtype=data_type)\n', (3146, 3173), True, 'import numpy as np\n'), ((4726, 4750), 'os.path.isfile', 'os.path.isfile', (['bov_file'], {}), '(bov_file)\n', (4740, 4750), False, 'import os\n'), ((4802, 4826), 'os.path.isfile', 'os.path.isfile', (['dat_file'], {}), '(dat_file)\n', (4816, 4826), False, 'import os\n'), ((6564, 6590), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6580, 6590), False, 'import os\n'), ((3073, 3098), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (3088, 3098), False, 'import os\n'), ((3355, 3383), 'numpy.prod', 'np.prod', (["header['DATA_SIZE']"], {}), "(header['DATA_SIZE'])\n", (3362, 3383), True, 'import numpy as np\n')]
|
from csv import DictReader
from functools import lru_cache
from itertools import groupby
from pathlib import Path
from typing import TextIO
import click
import h5py
from skelshop.corpus import index_corpus_desc
from skelshop.face.consts import DEFAULT_METRIC
from skelshop.iden.idsegs import ref_arg
from skelshop.utils.click import PathPath
PENALTY_WEIGHT = 1e6
@lru_cache(maxsize=128)
def get_sparse_reader(face_path: str):
from skelshop.face.io import SparseFaceReader
h5_file = h5py.File(face_path)
face_reader = SparseFaceReader(h5_file)
return face_reader
@click.command()
@ref_arg
@click.argument("protos", type=click.File("r"))
@click.argument("corpus_desc", type=PathPath(exists=True))
@click.argument("assign_out", type=click.File("w"))
@click.option("--thresh", type=float, default=float("inf"))
@click.option("--corpus-base", type=PathPath(exists=True))
def idclus(
ref,
protos: TextIO,
corpus_desc: Path,
assign_out: TextIO,
thresh: float,
corpus_base: Path,
):
"""
Identifies clusters by comparing against a reference and forcing a match
"""
import numpy as np
corpus = index_corpus_desc(corpus_desc, corpus_base)
reader = DictReader(protos)
proto_embeddings = []
proto_group_sizes = []
clus_idxs = []
for clus_idx, clus_grp in groupby(reader, lambda row: row["clus_idx"]):
num_protos = 0
for proto in clus_grp:
faces = corpus[int(proto["video_idx"])]["faces"]
face_reader = get_sparse_reader(faces)
proto_embeddings.append(
face_reader[(int(proto["frame_num"]), int(proto["pers_id"]))]["embed"]
)
num_protos += 1
proto_group_sizes.append(num_protos)
clus_idxs.append("c" + clus_idx)
proto_embeddings_np = np.vstack(proto_embeddings)
assign_out.write("label,clus\n")
ref_labels = list(ref.labels())
for ref_idx, clus in ref.assignment(
DEFAULT_METRIC, thresh, proto_embeddings_np, proto_group_sizes
):
assign_out.write("{},{}\n".format(ref_labels[ref_idx], clus_idxs[clus]))
|
[
"skelshop.utils.click.PathPath",
"csv.DictReader",
"itertools.groupby",
"skelshop.face.io.SparseFaceReader",
"click.File",
"h5py.File",
"skelshop.corpus.index_corpus_desc",
"numpy.vstack",
"functools.lru_cache",
"click.command"
] |
[((369, 391), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(128)'}), '(maxsize=128)\n', (378, 391), False, 'from functools import lru_cache\n'), ((587, 602), 'click.command', 'click.command', ([], {}), '()\n', (600, 602), False, 'import click\n'), ((496, 516), 'h5py.File', 'h5py.File', (['face_path'], {}), '(face_path)\n', (505, 516), False, 'import h5py\n'), ((535, 560), 'skelshop.face.io.SparseFaceReader', 'SparseFaceReader', (['h5_file'], {}), '(h5_file)\n', (551, 560), False, 'from skelshop.face.io import SparseFaceReader\n'), ((1153, 1196), 'skelshop.corpus.index_corpus_desc', 'index_corpus_desc', (['corpus_desc', 'corpus_base'], {}), '(corpus_desc, corpus_base)\n', (1170, 1196), False, 'from skelshop.corpus import index_corpus_desc\n'), ((1210, 1228), 'csv.DictReader', 'DictReader', (['protos'], {}), '(protos)\n', (1220, 1228), False, 'from csv import DictReader\n'), ((1331, 1375), 'itertools.groupby', 'groupby', (['reader', "(lambda row: row['clus_idx'])"], {}), "(reader, lambda row: row['clus_idx'])\n", (1338, 1375), False, 'from itertools import groupby\n'), ((1821, 1848), 'numpy.vstack', 'np.vstack', (['proto_embeddings'], {}), '(proto_embeddings)\n', (1830, 1848), True, 'import numpy as np\n'), ((643, 658), 'click.File', 'click.File', (['"""r"""'], {}), "('r')\n", (653, 658), False, 'import click\n'), ((696, 717), 'skelshop.utils.click.PathPath', 'PathPath', ([], {'exists': '(True)'}), '(exists=True)\n', (704, 717), False, 'from skelshop.utils.click import PathPath\n'), ((754, 769), 'click.File', 'click.File', (['"""w"""'], {}), "('w')\n", (764, 769), False, 'import click\n'), ((867, 888), 'skelshop.utils.click.PathPath', 'PathPath', ([], {'exists': '(True)'}), '(exists=True)\n', (875, 888), False, 'from skelshop.utils.click import PathPath\n')]
|
from helper.shapenet.shapenetMapper import desc_to_id
from deformations.FFD import get_template_ffd
from deformations.meshDeformation import get_thresholded_template_mesh
from mayavi import mlab
import numpy as np
from graphicUtils.visualizer.mayaviVisualizer import visualize_mesh, visualize_point_cloud
ds = get_template_ffd("/media/saurabh/e56e40fb-030d-4f7f-9e63-42ed5f7f6c711/preprocessing_new", desc_to_id("pistol"),
edge_length_threshold=None, n_samples=16384)
key = "1f646ff59cabdddcd810dcd63f342aca"
with ds:
b = np.array(ds[key]['b'])
p = np.array(ds[key]['p'])
mesh_dataset = get_thresholded_template_mesh("/media/saurabh/e56e40fb-030d-4f7f-9e63-42ed5f7f6c711/preprocessing_new", desc_to_id("pistol"),
None)
with mesh_dataset:
f = np.array(mesh_dataset[key]['faces'])
v_orignal = np.array(mesh_dataset[key]['vertices'])
# print(b)
# visualize_mesh(v_orignal, f)
# mlab.show()
visualize_mesh(np.matmul(b, p), f)
mlab.show()
visualize_point_cloud(np.matmul(b, p))
mlab.show()
# from deformations.FFD import calculate_ffd
# b, p = calculate_ffd(v_orignal, f)
# isualize_mesh(np.matmul(b, p), f)
# mlab.show()
|
[
"numpy.array",
"numpy.matmul",
"helper.shapenet.shapenetMapper.desc_to_id",
"mayavi.mlab.show"
] |
[((991, 1002), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (1000, 1002), False, 'from mayavi import mlab\n'), ((1043, 1054), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (1052, 1054), False, 'from mayavi import mlab\n'), ((404, 424), 'helper.shapenet.shapenetMapper.desc_to_id', 'desc_to_id', (['"""pistol"""'], {}), "('pistol')\n", (414, 424), False, 'from helper.shapenet.shapenetMapper import desc_to_id\n'), ((553, 575), 'numpy.array', 'np.array', (["ds[key]['b']"], {}), "(ds[key]['b'])\n", (561, 575), True, 'import numpy as np\n'), ((584, 606), 'numpy.array', 'np.array', (["ds[key]['p']"], {}), "(ds[key]['p'])\n", (592, 606), True, 'import numpy as np\n'), ((727, 747), 'helper.shapenet.shapenetMapper.desc_to_id', 'desc_to_id', (['"""pistol"""'], {}), "('pistol')\n", (737, 747), False, 'from helper.shapenet.shapenetMapper import desc_to_id\n'), ((805, 841), 'numpy.array', 'np.array', (["mesh_dataset[key]['faces']"], {}), "(mesh_dataset[key]['faces'])\n", (813, 841), True, 'import numpy as np\n'), ((858, 897), 'numpy.array', 'np.array', (["mesh_dataset[key]['vertices']"], {}), "(mesh_dataset[key]['vertices'])\n", (866, 897), True, 'import numpy as np\n'), ((971, 986), 'numpy.matmul', 'np.matmul', (['b', 'p'], {}), '(b, p)\n', (980, 986), True, 'import numpy as np\n'), ((1026, 1041), 'numpy.matmul', 'np.matmul', (['b', 'p'], {}), '(b, p)\n', (1035, 1041), True, 'import numpy as np\n')]
|
import cv2
import selectivesearch
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
#step1
image2="images/test2.png"
#用cv2读取图片
img = cv2.imread(image2,cv2.IMREAD_UNCHANGED)
#白底黑字图 改为黑底白字图
img=255-img
img_lbl, regions =selectivesearch.selective_search(img, scale=500, sigma=0.9, min_size=20)
print('start count===',len(regions))
#第二步:过滤掉冗余的窗口
#1)第一次过滤
candidates = []
for r in regions:
# 重复的不要
if r['rect'] in candidates:
continue
# 太小和太大的不要
if r['size'] < 200 or r['size']>20000:
continue
x, y, w, h = r['rect']
# 太不方的不要
if w / h > 1.5 or h / w > 2.0:
continue
candidates.append((x,y,w,h))
##('len(candidates)', 34) 一次过滤后剩余34个窗
print ('len(candidates)',len(candidates))
#2)第二次过滤 大圈套小圈的目标 只保留大圈
num_array=[]
for i in candidates:
if len(num_array)==0:
num_array.append(i)
else:
content=False
replace=-1
index=0
for j in num_array:
##新窗口在小圈 则滤除
if i[0]>=j[0] and i[0]+i[2]<=j[0]+j[2] and i[1]>=j[1] and i[1]+i[3]<=j[1]+j[3]:
content=True
break
##新窗口不在小圈 而在老窗口外部 替换老窗口
elif i[0]<=j[0] and i[0]+i[2]>=j[0]+j[2] and i[1]<=j[1] and i[1]+i[3]>=j[1]+j[3]:
replace=index
break
index+=1
if not content:
if replace>=0:
num_array[replace]=i
else:
num_array.append(i)
#窗口过滤完之后的数量
len=len(num_array)
#二次过滤后剩余10个窗
print('len====',len)
print('raw num_array===',num_array)
#第三步:搜索完后的窗口,上下是有序的 左右是无序的,所以上下分别进行排序 并合并
L1=num_array[0:len//2]
L2=num_array[len//2:]
L1.sort(key=lambda x:x[0]) ##上半部分排序
print ('sorted L1==',L1)
L2.sort(key=lambda x:x[0]) ##下半部分排序
print('sorted L2',L2)
L1.extend(L2)
print ("最终筛选后的窗口是:",L1)
#第五步:提取窗口图片后转化为28*28的标准图
Width=28
Height=28
#横向图片数组
img_sample = np.zeros((len, Width*Height))
i = 0
for rect in L1:
x, y, w, h = rect
#大图中截图窗口图片
img_cut = img[y :y+h, x:x +w,:]
#截取后的小图添加padding 生成方形图
if w > h:
real_size=w
else:
real_size=h
top_padding=(real_size - h) // 2
left_padding=(real_size - w)//2
#加padding方法
img_cut = cv2.copyMakeBorder(img_cut,top_padding,top_padding,left_padding,left_padding,borderType=cv2.BORDER_REPLICATE)
#把方形图 压缩成28*28的图
img_resize = cv2.resize(img_cut, (Width, Height), interpolation=cv2.INTER_NEAREST)
#压缩后的图转化成灰度图
gray = cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY)
#生成的小图保存到本地
cv2.imwrite('images/img_'+str(i)+'.png',gray)
#生成的小图展平 放到img_sample里
img_sample[i, :] = gray.ravel()
i += 1
#第六步:把转换后的数据用长图来显示
img_s = np.zeros((Width, Height * img_sample.shape[0]))
print('img_sample.shape===',img_sample.shape)
for i in range(img_sample.shape[0]):
img_s[:, i * Width:Height * (i + 1)] =img_sample[i, :].reshape(Width, Height)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img_s, cmap='gray')
plt.savefig("images/number.jpg", bbox_inch="tight")
plt.show()
|
[
"matplotlib.pyplot.savefig",
"cv2.copyMakeBorder",
"numpy.zeros",
"cv2.cvtColor",
"selectivesearch.selective_search",
"cv2.resize",
"cv2.imread",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((175, 215), 'cv2.imread', 'cv2.imread', (['image2', 'cv2.IMREAD_UNCHANGED'], {}), '(image2, cv2.IMREAD_UNCHANGED)\n', (185, 215), False, 'import cv2\n'), ((260, 332), 'selectivesearch.selective_search', 'selectivesearch.selective_search', (['img'], {'scale': '(500)', 'sigma': '(0.9)', 'min_size': '(20)'}), '(img, scale=500, sigma=0.9, min_size=20)\n', (292, 332), False, 'import selectivesearch\n'), ((1907, 1938), 'numpy.zeros', 'np.zeros', (['(len, Width * Height)'], {}), '((len, Width * Height))\n', (1915, 1938), True, 'import numpy as np\n'), ((2684, 2731), 'numpy.zeros', 'np.zeros', (['(Width, Height * img_sample.shape[0])'], {}), '((Width, Height * img_sample.shape[0]))\n', (2692, 2731), True, 'import numpy as np\n'), ((2907, 2953), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(1)', 'figsize': '(6, 6)'}), '(ncols=1, nrows=1, figsize=(6, 6))\n', (2919, 2953), True, 'import matplotlib.pyplot as plt\n'), ((2984, 3035), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/number.jpg"""'], {'bbox_inch': '"""tight"""'}), "('images/number.jpg', bbox_inch='tight')\n", (2995, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3036, 3046), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3044, 3046), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2344), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img_cut', 'top_padding', 'top_padding', 'left_padding', 'left_padding'], {'borderType': 'cv2.BORDER_REPLICATE'}), '(img_cut, top_padding, top_padding, left_padding,\n left_padding, borderType=cv2.BORDER_REPLICATE)\n', (2244, 2344), False, 'import cv2\n'), ((2374, 2443), 'cv2.resize', 'cv2.resize', (['img_cut', '(Width, Height)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img_cut, (Width, Height), interpolation=cv2.INTER_NEAREST)\n', (2384, 2443), False, 'import cv2\n'), ((2472, 2516), 'cv2.cvtColor', 'cv2.cvtColor', (['img_resize', 'cv2.COLOR_BGR2GRAY'], {}), '(img_resize, cv2.COLOR_BGR2GRAY)\n', (2484, 2516), False, 'import cv2\n')]
|
import argparse
import logging
from os import path
import signal
import subprocess
import sys
import time
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from socketIO_client import SocketIO, BaseNamespace
# enable safe shutdown with ctl+c
global running
running = True
def signal_handler(signal, frame):
global running
running = False
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description='Do fancy OpenCV stuff')
parser.add_argument('--preview', action='store_true')
args = parser.parse_args()
logging.basicConfig(format='[%(levelname)s|%(asctime)s] %(message)s', level=logging.WARNING, datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('magicmirror')
logger.setLevel(logging.INFO)
logger.info('starting socketio...')
io = SocketIO('localhost', 8101)
io2 = SocketIO('localhost', 8104)
io_namespace = io.define(BaseNamespace, '/vision')
io_namespace2 = io2.define(BaseNamespace, '/vision')
resolution = (320, 240)
box_size = (.3, .5)
box_w = int(box_size[0] * resolution[0])
box_h = int(box_size[1] * resolution[1])
box_x = int((resolution[0] - box_w) / 2)
box_y = int((resolution[1] - box_h) / 2)
box_top_left = (box_x, box_y)
box_bot_right = (box_w + box_x, box_h + box_y)
current_dir = path.dirname(__file__)
face_cascade = cv2.CascadeClassifier(path.join(current_dir, 'haarcascade_frontalface_default.xml'))
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
class MotionObject(object):
"""
Super simple tracking of a point over time
"""
def __init__(self):
now = time.time()
self.last_hit = now
self.last_point = None
self.initialized = now
self.average_point = None
def reset(self):
self.last_hit = now
self.last_point = None
self.average_point = None
self.initialized = now
def hit(self, point, now=None):
if now is None:
now = time.time()
self.last_hit = now
self.last_point = point
# running average, weighted towards most recent
x = point[0]
y = point[1]
if self.average_point is None:
self.average_point = (x, y)
else:
avg_x = (self.average_point[0] + x) / 2
avg_y = (self.average_point[1] + y) / 2
self.average_point = (avg_x, avg_y)
def active_for(self, now=None):
if now is None:
now = time.time()
return now - self.initialized
def inactive_for(self):
return time.time() - self.last_hit
def diff_from_mean(self, point=None):
if point is None:
point = self.last_point
if point is None:
return None
return (point[0] - self.average_point[0],
point[1] - self.average_point[1])
class RateLimit(object):
"""
Initialize with a function to be called. Calling this object calls the
function a maximum of once every min_time.
"""
def __init__(self, func, min_time=1):
self.min_time = min_time
self.func = func
self.last_called = 0 # forever ago
def __call__(self, *args, **kwargs):
now = time.time()
if now - self.last_called > self.min_time:
self.func(*args, **kwargs)
self.last_called = now
class WaitLimit(RateLimit):
"""
Same functionality as RateLimit, but it sets timeout every time called.
This means the delay between any execution is greater than time.
"""
def __call__(self, *args, **kwargs):
now = time.time()
if now - self.last_called > self.min_time:
self.func(*args, **kwargs)
self.last_called = now
def _fpsLogger(t):
logger.debug('fps: {}'.format(1 / t))
fpsLogger = RateLimit(_fpsLogger, 5)
def _triggerMotion(val):
logger.info('motion')
io_namespace.emit('motion', {'val': val})
io_namespace2.emit('motion', {'val': val})
triggerMotion = RateLimit(_triggerMotion, 2)
def _wakeTv():
# if someone looks at this in the middle of the night, turn on and let cron
# turn it off at the next opportunity
subprocess.Popen(' '.join([path.join(current_dir, '..', 'scripts', 'tv.sh'), 'on']), shell=True, stderr=subprocess.STDOUT)
wakeTv = WaitLimit(_wakeTv, 60)
# continous data
first_frame = True
seeing_face = 0
last_seen_face = 0
first_saw_face = 0
last_action = 0
last_fps = 0
last_image = None
# initialize camera
logger.info('initializing camera...')
with PiCamera() as camera:
# give camera time to start up
time.sleep(1)
camera.resolution = resolution
camera.framerate = 30 # max framerate
with PiRGBArray(camera, size=resolution) as stream:
logger.info('starting capture...')
for frame in camera.capture_continuous(stream, format="bgr", use_video_port=True):
now = time.time()
if not running:
break
image = frame.array
if args.preview:
preview = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # img[y: y + h, x: x + w]
# face detection
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
num_faces = len(faces)
if num_faces:
io_namespace.emit('faces', [{
'x': x / resolution[0],
'y': y / resolution[1],
'w': w / resolution[0],
'h': h / resolution[1]
} for (x, y, w, h) in faces])
io_namespace2.emit('faces', [{
'x': x / resolution[0],
'y': y / resolution[1],
'w': w / resolution[0],
'h': h / resolution[1]
} for (x, y, w, h) in faces])
if args.preview:
for (x, y, w, h) in faces:
cv2.rectangle(preview, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Record some data about the face being seen
last_seen_face = now
if not seeing_face:
first_saw_face = now
if seeing_face != num_faces:
logger.info('{} face{} found'.format(num_faces, '' if num_faces == 1 else 's'))
seeing_face = num_faces
#wakeTv()
else:
# timeout for a face to really be gone
# this accounts for not recognizing a face for a frame or two at a time
if now - last_seen_face > 1:
if seeing_face:
logger.info('face lost')
seeing_face = 0
if first_frame:
fps = None
else:
frame_time = now - last_frame_time
if fps is None:
fps = frame_time
else:
fps = (fps + frame_time) / 2
fpsLogger(frame_time)
processedImage = cv2.GaussianBlur(gray, (21, 21), 0)
processedImage = cv2.absdiff(last_image, gray)
processedImage = cv2.threshold(processedImage, 25, 255, cv2.THRESH_BINARY)[1]
(means, stds) = cv2.meanStdDev(processedImage)
stdsum = int(np.sum(stds))
# print('|-{}'.format(''.join(['-'] * int(stdsum))), means, stds, stdsum)
if stdsum > 2:
triggerMotion(stdsum)
if args.preview:
cv2.imshow('Preview', processedImage)
#if not first_frame:
# cv2.imshow('Data', diff_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
running = False
first_frame = False
last_frame_time = now
last_image = gray
fps = 0
# clear the stream for next frame
stream.seek(0)
stream.truncate()
|
[
"logging.getLogger",
"cv2.meanStdDev",
"cv2.rectangle",
"time.sleep",
"cv2.imshow",
"socketIO_client.SocketIO",
"argparse.ArgumentParser",
"cv2.threshold",
"picamera.array.PiRGBArray",
"cv2.waitKey",
"picamera.PiCamera",
"os.path.dirname",
"cv2.cvtColor",
"cv2.GaussianBlur",
"time.time",
"logging.basicConfig",
"signal.signal",
"os.path.join",
"numpy.sum",
"cv2.absdiff"
] |
[((398, 442), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (411, 442), False, 'import signal\n'), ((453, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Do fancy OpenCV stuff"""'}), "(description='Do fancy OpenCV stuff')\n", (476, 513), False, 'import argparse\n'), ((596, 722), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(levelname)s|%(asctime)s] %(message)s"""', 'level': 'logging.WARNING', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='[%(levelname)s|%(asctime)s] %(message)s', level\n =logging.WARNING, datefmt='%Y-%m-%d %H:%M:%S')\n", (615, 722), False, 'import logging\n'), ((727, 759), 'logging.getLogger', 'logging.getLogger', (['"""magicmirror"""'], {}), "('magicmirror')\n", (744, 759), False, 'import logging\n'), ((832, 859), 'socketIO_client.SocketIO', 'SocketIO', (['"""localhost"""', '(8101)'], {}), "('localhost', 8101)\n", (840, 859), False, 'from socketIO_client import SocketIO, BaseNamespace\n'), ((866, 893), 'socketIO_client.SocketIO', 'SocketIO', (['"""localhost"""', '(8104)'], {}), "('localhost', 8104)\n", (874, 893), False, 'from socketIO_client import SocketIO, BaseNamespace\n'), ((1299, 1321), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (1311, 1321), False, 'from os import path\n'), ((1359, 1420), 'os.path.join', 'path.join', (['current_dir', '"""haarcascade_frontalface_default.xml"""'], {}), "(current_dir, 'haarcascade_frontalface_default.xml')\n", (1368, 1420), False, 'from os import path\n'), ((4888, 4898), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (4896, 4898), False, 'from picamera import PiCamera\n'), ((4949, 4962), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4959, 4962), False, 'import time\n'), ((1985, 1996), 'time.time', 'time.time', ([], {}), '()\n', (1994, 1996), False, 'import time\n'), ((3583, 3594), 'time.time', 'time.time', ([], {}), '()\n', (3592, 3594), False, 'import time\n'), ((3966, 3977), 'time.time', 'time.time', ([], {}), '()\n', (3975, 3977), False, 'import time\n'), ((5051, 5086), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': 'resolution'}), '(camera, size=resolution)\n', (5061, 5086), False, 'from picamera.array import PiRGBArray\n'), ((2346, 2357), 'time.time', 'time.time', ([], {}), '()\n', (2355, 2357), False, 'import time\n'), ((2842, 2853), 'time.time', 'time.time', ([], {}), '()\n', (2851, 2853), False, 'import time\n'), ((2936, 2947), 'time.time', 'time.time', ([], {}), '()\n', (2945, 2947), False, 'import time\n'), ((5250, 5261), 'time.time', 'time.time', ([], {}), '()\n', (5259, 5261), False, 'import time\n'), ((5432, 5471), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (5444, 5471), False, 'import cv2\n'), ((4558, 4606), 'os.path.join', 'path.join', (['current_dir', '""".."""', '"""scripts"""', '"""tv.sh"""'], {}), "(current_dir, '..', 'scripts', 'tv.sh')\n", (4567, 4606), False, 'from os import path\n'), ((7408, 7443), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(21, 21)', '(0)'], {}), '(gray, (21, 21), 0)\n', (7424, 7443), False, 'import cv2\n'), ((7477, 7506), 'cv2.absdiff', 'cv2.absdiff', (['last_image', 'gray'], {}), '(last_image, gray)\n', (7488, 7506), False, 'import cv2\n'), ((7633, 7663), 'cv2.meanStdDev', 'cv2.meanStdDev', (['processedImage'], {}), '(processedImage)\n', (7647, 7663), False, 'import cv2\n'), ((7540, 7597), 'cv2.threshold', 'cv2.threshold', (['processedImage', '(25)', '(255)', 'cv2.THRESH_BINARY'], {}), '(processedImage, 25, 255, cv2.THRESH_BINARY)\n', (7553, 7597), False, 'import cv2\n'), ((7693, 7705), 'numpy.sum', 'np.sum', (['stds'], {}), '(stds)\n', (7699, 7705), True, 'import numpy as np\n'), ((7925, 7962), 'cv2.imshow', 'cv2.imshow', (['"""Preview"""', 'processedImage'], {}), "('Preview', processedImage)\n", (7935, 7962), False, 'import cv2\n'), ((8076, 8090), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8087, 8090), False, 'import cv2\n'), ((6294, 6356), 'cv2.rectangle', 'cv2.rectangle', (['preview', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(preview, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (6307, 6356), False, 'import cv2\n')]
|
import tensorflow as tf
import os
import time
import numpy as np
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
import argparse
import dataset as dt
INPUT_SHAPE = (32, 32, 1)
tf.random.set_seed(777)
NORM_LIST = ["interframe_minmax", "est_minmax", "zscore"]
class ConvVAE(tf.keras.Model):
def __init__(self, latent_dim):
super(ConvVAE, self).__init__()
self.latent_dim = latent_dim
self.inference_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=INPUT_SHAPE),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation='relu'),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
]
)
self.generative_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=8*8*32, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(8, 8, 32)),
tf.keras.layers.Conv2DTranspose(
filters=64,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation='relu'),
tf.keras.layers.Conv2DTranspose(
filters=32,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME"),
]
)
@tf.function
def sample(self, eps=None):
if eps is None:
eps=tf.random.normal(shape=(100, self.latent_dim))
return self.decode(eps, apply_sigmoid=False)
def encode(self, x):
mean, logvar=tf.split(self.inference_net(
x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
eps=tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def decode(self, z, apply_sigmoid=False):
logits=self.generative_net(z)
if apply_sigmoid:
probs=tf.sigmoid(logits)
return probs
return logits
#END OF CLASS
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi=tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * \
tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
@tf.function
def compute_loss(model, x):
mean, logvar=model.encode(x)
z=model.reparameterize(mean, logvar)
x_logit=model.decode(z)
cross_ent=tf.nn.sigmoid_cross_entropy_with_logits(
logits=x_logit, labels=x)
logpx_z=-tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz=log_normal_pdf(z, 0., 0.)
logqz_x=log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
@tf.function
def compute_apply_gradients(model, x, optimizer):
with tf.GradientTape() as tape:
loss=compute_loss(model, x)
gradients=tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(
zip(gradients, model.trainable_variables))
def generate_and_save_images(model, epoch, test_input, directory, title):
predictions = model.sample(test_input)
fig = plt.figure(figsize=(4,4))
plt.suptitle(title)
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0], cmap='jet')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.savefig(os.path.join(directory, 'image_at_epoch_{:04d}.png'.format(epoch)))
plt.close(fig)
def plot_ELBO(train_elbo_log, test_elbo_log, model_dir, prefix="", suffix=""):
plt.plot(np.array(train_elbo_log), ls='-', color='blue')
plt.plot(np.array(test_elbo_log), ls='--', color='blue')
plt.title('model ELBO')
plt.ylabel('ELBO')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(os.path.join(model_dir, prefix+"model_ELBO"+suffix+".png"))
plt.close()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir',
type=str,
default="output",
help='Path to the output folder')
parser.add_argument('--tmp_dir',
type=str,
default="tmp",
help='Path to the tmp files folder')
parser.add_argument('--epochs',
type=int,
default=250,
help='How many epochs to train.')
parser.add_argument('--norm',
type=str,
default="interframe_minmax",
help='Normalization method.')
parser.add_argument('--lr',
type=float,
default=1e-4,
help='Learning rate.')
parser.add_argument('--num_examples_to_generate',
type=int,
default=16,
help='How many examples to genereate in visualization gif.')
parser.add_argument('--latent_dim',
type=int,
default=100,
help='How many examples to genereate in visualization gif.')
parser.add_argument('--prefix',
type=str,
default="",
help='Prefix to identify the files.')
parser.add_argument('--suffix',
type=str,
default="",
help='Prefix to identify the files.')
parser.add_argument('--min',
type=float,
default=None,
help='Estimate of min temp.')
parser.add_argument('--max',
type=float,
default=None,
help='Estimate of max temp.')
FLAGS, unparsed = parser.parse_known_args()
def make_sure_path_exists(dir):
if not os.path.exists(dir):
os.mkdir(dir)
make_sure_path_exists(FLAGS.tmp_dir)
make_sure_path_exists(FLAGS.output_dir)
filenames = glob.glob(os.path.join(FLAGS.tmp_dir,'image*.png'))
for filename in filenames:
os.remove(filename)
if FLAGS.norm not in NORM_LIST:
raise ValueError
directory_path, ids = dt.DATASETS["20200131"]["filepath"], dt.DATASETS["20200131"]["ids"]
dataset = dt.Dataloader_RAM(directory_path, ids)
processor = dt.Processor()
data = dataset.load()
data = processor.align_timestamps(data) # align frames ()
data = processor.retime(data, step = 3)
train_images = np.vstack(data[0][0])
test_images = np.vstack(data[0][1])
train_images = train_images.reshape(train_images.shape[0], *INPUT_SHAPE).astype('float32')
test_images = test_images.reshape(test_images.shape[0], *INPUT_SHAPE).astype('float32')
#normaliation
def minmax_norm(images, min = None, max = None):
#interframe normalization, the set is assumed to come from the same recording here!
if not min:
min = images.min()
if not max:
max = images.max()
return (images-min)/(max-min)
def zscore(images, mean, std):
if not mean:
mean = images.mean()
if not std:
std = images.std()
return (images - mean)/std
def normalize_sets(norm : str, train_set, test_set, min = None, max = None):
mean = None
std = None
if (norm == "interframe_minmax") or (norm == "est_minmax"):
return minmax_norm(train_set, min, max), minmax_norm(test_set, min, max)
if norm == "zscore":
if not (mean or std):
tmp_stack = np.vstack([train_images, test_images])
if not mean:
mean = tmp_stack.mean()
if not std:
std = tmp_stack.std()
#mean = 0 for the project.
return zscore(train_set, 0, std)/10, zscore(test_set, 0, std)/10
return None
train_images, test_images = normalize_sets(FLAGS.norm, train_images, test_images, FLAGS.min, FLAGS.max)
TRAIN_BUF = 60000
BATCH_SIZE = 2300
TEST_BUF = 10000
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(TRAIN_BUF).batch(BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_images).shuffle(TEST_BUF).batch(BATCH_SIZE)
optimizer=tf.keras.optimizers.Adam(FLAGS.lr)
random_vector_for_generation=tf.random.normal(
shape=[FLAGS.num_examples_to_generate, FLAGS.latent_dim])
model=ConvVAE(FLAGS.latent_dim)
gasi_title_template = "Normalization: {} \n LR {} LS {} Epoch {}"
gasi_title = gasi_title_template.format(FLAGS.norm, str(FLAGS.lr), str(FLAGS.latent_dim), str(0))
generate_and_save_images(model, 0, random_vector_for_generation, FLAGS.tmp_dir, title=gasi_title)
train_loss_log = []
test_loss_log = []
for epoch in range(1, FLAGS.epochs + 1):
start_time = time.time()
train_loss = tf.keras.metrics.Mean()
for train_x in train_dataset:
compute_apply_gradients(model, train_x, optimizer)
train_loss(compute_loss(model, train_x))
train_elbo = -train_loss.result()
end_time = time.time()
if epoch % 1 == 0:
loss = tf.keras.metrics.Mean()
for test_x in test_dataset:
loss(compute_loss(model, test_x))
elbo = -loss.result()
print('Epoch: {}, Train set ELBO: {}. Test set ELBO: {}, '
'time elapse for current epoch {}'.format(epoch, train_elbo,
elbo,
end_time - start_time))
gasi_title = gasi_title_template.format(FLAGS.norm, str(FLAGS.lr), str(FLAGS.latent_dim), str(epoch))
generate_and_save_images(
model, epoch, random_vector_for_generation, FLAGS.tmp_dir, title=gasi_title)
if (np.any(np.isnan(train_loss_log)) or np.any(np.isnan(test_loss_log))):
break
train_loss_log.append(train_elbo)
test_loss_log.append(elbo)
plot_ELBO(train_loss_log, test_loss_log, FLAGS.output_dir, FLAGS.prefix, FLAGS.suffix)
anim_file = os.path.join(FLAGS.output_dir, FLAGS.prefix+'convVAE'+FLAGS.suffix+'.gif')
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob(os.path.join(FLAGS.tmp_dir,'image*.png'))
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
|
[
"matplotlib.pyplot.ylabel",
"tensorflow.math.log",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.GradientTape",
"numpy.array",
"dataset.Processor",
"tensorflow.keras.layers.Dense",
"tensorflow.reduce_mean",
"imageio.get_writer",
"os.remove",
"matplotlib.pyplot.imshow",
"tensorflow.random.normal",
"os.path.exists",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv2D",
"argparse.ArgumentParser",
"tensorflow.data.Dataset.from_tensor_slices",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"dataset.Dataloader_RAM",
"numpy.vstack",
"os.mkdir",
"matplotlib.pyplot.axis",
"tensorflow.keras.layers.InputLayer",
"tensorflow.keras.metrics.Mean",
"tensorflow.sigmoid",
"numpy.isnan",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"matplotlib.pyplot.title",
"imageio.imread",
"tensorflow.keras.layers.Flatten",
"time.time",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"tensorflow.random.set_seed",
"tensorflow.keras.layers.Conv2DTranspose",
"os.path.join",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"tensorflow.exp"
] |
[((201, 224), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(777)'], {}), '(777)\n', (219, 224), True, 'import tensorflow as tf\n'), ((2581, 2605), 'tensorflow.math.log', 'tf.math.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (2592, 2605), True, 'import tensorflow as tf\n'), ((2903, 2968), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'x_logit', 'labels': 'x'}), '(logits=x_logit, labels=x)\n', (2942, 2968), True, 'import tensorflow as tf\n'), ((3570, 3596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (3580, 3596), True, 'import matplotlib.pyplot as plt\n'), ((3598, 3617), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (3610, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3908, 3922), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3917, 3922), True, 'import matplotlib.pyplot as plt\n'), ((4129, 4152), 'matplotlib.pyplot.title', 'plt.title', (['"""model ELBO"""'], {}), "('model ELBO')\n", (4138, 4152), True, 'import matplotlib.pyplot as plt\n'), ((4157, 4175), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ELBO"""'], {}), "('ELBO')\n", (4167, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4180, 4199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4190, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4257), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'validation']"], {'loc': '"""upper left"""'}), "(['train', 'validation'], loc='upper left')\n", (4214, 4257), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4349), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4347, 4349), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4431), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4429, 4431), False, 'import argparse\n'), ((6865, 6903), 'dataset.Dataloader_RAM', 'dt.Dataloader_RAM', (['directory_path', 'ids'], {}), '(directory_path, ids)\n', (6882, 6903), True, 'import dataset as dt\n'), ((6920, 6934), 'dataset.Processor', 'dt.Processor', ([], {}), '()\n', (6932, 6934), True, 'import dataset as dt\n'), ((7087, 7108), 'numpy.vstack', 'np.vstack', (['data[0][0]'], {}), '(data[0][0])\n', (7096, 7108), True, 'import numpy as np\n'), ((7127, 7148), 'numpy.vstack', 'np.vstack', (['data[0][1]'], {}), '(data[0][1])\n', (7136, 7148), True, 'import numpy as np\n'), ((8920, 8954), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['FLAGS.lr'], {}), '(FLAGS.lr)\n', (8944, 8954), True, 'import tensorflow as tf\n'), ((8994, 9068), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[FLAGS.num_examples_to_generate, FLAGS.latent_dim]'}), '(shape=[FLAGS.num_examples_to_generate, FLAGS.latent_dim])\n', (9010, 9068), True, 'import tensorflow as tf\n'), ((10843, 10928), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "(FLAGS.prefix + 'convVAE' + FLAGS.suffix + '.gif')"], {}), "(FLAGS.output_dir, FLAGS.prefix + 'convVAE' + FLAGS.suffix + '.gif'\n )\n", (10855, 10928), False, 'import os\n'), ((2225, 2259), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': 'mean.shape'}), '(shape=mean.shape)\n', (2241, 2259), True, 'import tensorflow as tf\n'), ((2991, 3031), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cross_ent'], {'axis': '[1, 2, 3]'}), '(cross_ent, axis=[1, 2, 3])\n', (3004, 3031), True, 'import tensorflow as tf\n'), ((3124, 3165), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(logpx_z + logpz - logqz_x)'], {}), '(logpx_z + logpz - logqz_x)\n', (3138, 3165), True, 'import tensorflow as tf\n'), ((3240, 3257), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3255, 3257), True, 'import tensorflow as tf\n'), ((3665, 3689), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', '(i + 1)'], {}), '(4, 4, i + 1)\n', (3676, 3689), True, 'import matplotlib.pyplot as plt\n'), ((3694, 3741), 'matplotlib.pyplot.imshow', 'plt.imshow', (['predictions[i, :, :, 0]'], {'cmap': '"""jet"""'}), "(predictions[i, :, :, 0], cmap='jet')\n", (3704, 3741), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3763), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3756, 3763), True, 'import matplotlib.pyplot as plt\n'), ((4016, 4040), 'numpy.array', 'np.array', (['train_elbo_log'], {}), '(train_elbo_log)\n', (4024, 4040), True, 'import numpy as np\n'), ((4077, 4100), 'numpy.array', 'np.array', (['test_elbo_log'], {}), '(test_elbo_log)\n', (4085, 4100), True, 'import numpy as np\n'), ((4274, 4338), 'os.path.join', 'os.path.join', (['model_dir', "(prefix + 'model_ELBO' + suffix + '.png')"], {}), "(model_dir, prefix + 'model_ELBO' + suffix + '.png')\n", (4286, 4338), False, 'import os\n'), ((6590, 6631), 'os.path.join', 'os.path.join', (['FLAGS.tmp_dir', '"""image*.png"""'], {}), "(FLAGS.tmp_dir, 'image*.png')\n", (6602, 6631), False, 'import os\n'), ((6671, 6690), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6680, 6690), False, 'import os\n'), ((9507, 9518), 'time.time', 'time.time', ([], {}), '()\n', (9516, 9518), False, 'import time\n'), ((9540, 9563), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (9561, 9563), True, 'import tensorflow as tf\n'), ((9779, 9790), 'time.time', 'time.time', ([], {}), '()\n', (9788, 9790), False, 'import time\n'), ((10927, 10966), 'imageio.get_writer', 'imageio.get_writer', (['anim_file'], {'mode': '"""I"""'}), "(anim_file, mode='I')\n", (10945, 10966), False, 'import imageio\n'), ((11399, 11423), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (11413, 11423), False, 'import imageio\n'), ((1918, 1964), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '(100, self.latent_dim)'}), '(shape=(100, self.latent_dim))\n', (1934, 1964), True, 'import tensorflow as tf\n'), ((2437, 2455), 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), '(logits)\n', (2447, 2455), True, 'import tensorflow as tf\n'), ((6432, 6451), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (6446, 6451), False, 'import os\n'), ((6465, 6478), 'os.mkdir', 'os.mkdir', (['dir'], {}), '(dir)\n', (6473, 6478), False, 'import os\n'), ((9838, 9861), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (9859, 9861), True, 'import tensorflow as tf\n'), ((11008, 11049), 'os.path.join', 'os.path.join', (['FLAGS.tmp_dir', '"""image*.png"""'], {}), "(FLAGS.tmp_dir, 'image*.png')\n", (11020, 11049), False, 'import os\n'), ((11320, 11344), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (11334, 11344), False, 'import imageio\n'), ((501, 552), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': 'INPUT_SHAPE'}), '(input_shape=INPUT_SHAPE)\n', (527, 552), True, 'import tensorflow as tf\n'), ((566, 654), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, strides=(2, 2),\n activation='relu')\n", (588, 654), True, 'import tensorflow as tf\n'), ((681, 717), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (715, 717), True, 'import tensorflow as tf\n'), ((731, 819), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, strides=(2, 2),\n activation='relu')\n", (753, 819), True, 'import tensorflow as tf\n'), ((846, 871), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (869, 871), True, 'import tensorflow as tf\n'), ((913, 959), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(latent_dim + latent_dim)'], {}), '(latent_dim + latent_dim)\n', (934, 959), True, 'import tensorflow as tf\n'), ((1059, 1112), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(latent_dim,)'}), '(input_shape=(latent_dim,))\n', (1085, 1112), True, 'import tensorflow as tf\n'), ((1126, 1188), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(8 * 8 * 32)', 'activation': 'tf.nn.relu'}), '(units=8 * 8 * 32, activation=tf.nn.relu)\n', (1147, 1188), True, 'import tensorflow as tf\n'), ((1198, 1246), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', ([], {'target_shape': '(8, 8, 32)'}), '(target_shape=(8, 8, 32))\n', (1221, 1246), True, 'import tensorflow as tf\n'), ((1260, 1373), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2, 2)', 'padding': '"""SAME"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, strides=(2, 2),\n padding='SAME', activation='relu')\n", (1291, 1373), True, 'import tensorflow as tf\n'), ((1464, 1577), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(2, 2)', 'padding': '"""SAME"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, strides=(2, 2),\n padding='SAME', activation='relu')\n", (1495, 1577), True, 'import tensorflow as tf\n'), ((1696, 1789), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', ([], {'filters': '(1)', 'kernel_size': '(3)', 'strides': '(1, 1)', 'padding': '"""SAME"""'}), "(filters=1, kernel_size=3, strides=(1, 1),\n padding='SAME')\n", (1727, 1789), True, 'import tensorflow as tf\n'), ((2281, 2301), 'tensorflow.exp', 'tf.exp', (['(logvar * 0.5)'], {}), '(logvar * 0.5)\n', (2287, 2301), True, 'import tensorflow as tf\n'), ((8196, 8234), 'numpy.vstack', 'np.vstack', (['[train_images, test_images]'], {}), '([train_images, test_images])\n', (8205, 8234), True, 'import numpy as np\n'), ((8716, 8764), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_images'], {}), '(train_images)\n', (8750, 8764), True, 'import tensorflow as tf\n'), ((8821, 8868), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['test_images'], {}), '(test_images)\n', (8855, 8868), True, 'import tensorflow as tf\n'), ((10552, 10576), 'numpy.isnan', 'np.isnan', (['train_loss_log'], {}), '(train_loss_log)\n', (10560, 10576), True, 'import numpy as np\n'), ((10588, 10611), 'numpy.isnan', 'np.isnan', (['test_loss_log'], {}), '(test_loss_log)\n', (10596, 10611), True, 'import numpy as np\n'), ((2688, 2703), 'tensorflow.exp', 'tf.exp', (['(-logvar)'], {}), '(-logvar)\n', (2694, 2703), True, 'import tensorflow as tf\n')]
|
import numpy
import six
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import type_check
def _cu_conv_sum(y, x, n):
# Convolutional sum
# TODO(beam2d): Use scan computation
rdim = x.size // (x.shape[0] * x.shape[1])
cuda.elementwise(
'raw T x, int32 rdim, int32 N, int32 n_', 'raw T y',
'''
int half_n = n_ / 2;
int offset = i / rdim * N * rdim + i % rdim;
float sum_part = 0;
for (int j = 0; j < N + half_n; ++j) {
if (j < N) {
sum_part += x[offset + j * rdim];
}
if (j >= n_) {
sum_part -= x[offset + (j - n_) * rdim];
}
if (j >= half_n) {
y[offset + (j - half_n) * rdim] = sum_part;
}
}
''', 'lrn_conv_sum')(x, rdim, x.shape[1], n, y,
size=x.shape[0] * rdim)
class LocalResponseNormalization(function_node.FunctionNode):
"""Cross-channel normalization function used in AlexNet."""
_use_ideep = False
def __init__(self, n=5, k=2, alpha=1e-4, beta=.75):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self.scale = None
self.indexes = None
self.unit_scale = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))):
self._use_ideep = True
return self.forward_ideep(inputs)
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
half_n = self.n // 2
x2 = numpy.square(x)
sum_part = x2.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += x2[:, :-i]
sum_part[:, :-i] += x2[:, i:]
self.unit_scale = self.k + self.alpha * sum_part
self.scale = self.unit_scale ** -self.beta
y = x * self.scale
return y,
def forward_ideep(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels)
y, indexes = intel64.ideep.localResponseNormalization.Forward(
intel64.ideep.array(x), param)
self.indexes = indexes
return y,
def forward_gpu(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
self.y = cuda.cupy.square(x) # temporary
self.scale = cuda.cupy.empty_like(self.y)
_cu_conv_sum(self.scale, self.y, self.n)
cuda.elementwise(
'T x, T k, T alpha, T beta',
'T y, T scale',
'''scale = k + alpha * scale;
y = x * pow(scale, -beta);''',
'lrn_fwd')(x, self.k, self.alpha, self.beta,
self.y, self.scale)
return self.y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
y, = self.get_retained_outputs()
gy, = grad_outputs
f = LocalResponseNormalizationGrad(
self.n, self.k, self.alpha, self.beta, self._use_ideep,
self.scale, self.indexes, self.unit_scale,)
return f.apply((x, y, gy))
class LocalResponseNormalizationGrad(function_node.FunctionNode):
def __init__(self, n, k, alpha, beta, use_ideep,
scale=None, indexes=None, unit_scale=None):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self._use_ideep = use_ideep
self.scale = scale
self.indexes = indexes
self.unit_scale = unit_scale
def forward_cpu(self, inputs):
if self._use_ideep:
return self._backward_ideep(inputs)
x, y, gy = inputs
half_n = self.n // 2
summand = y * gy / self.unit_scale
sum_part = summand.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += summand[:, :-i]
sum_part[:, :-i] += summand[:, i:]
gx = gy * self.scale - 2 * self.alpha * self.beta * x * sum_part
return gx,
def _backward_ideep(self, inputs):
x, y, gy = inputs
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels
)
gx = intel64.ideep.localResponseNormalization.Backward(
intel64.ideep.array(x),
intel64.ideep.array(gy),
self.indexes,
param)
return gx,
def forward_gpu(self, inputs):
x, y, gy = inputs
summand = cuda.elementwise(
'T scale, T y, T gy', 'T summand',
'summand = y * gy / scale',
'lrn_bwd_summand')(self.scale, y, gy)
gx = cuda.cupy.empty_like(x)
_cu_conv_sum(gx, summand, self.n)
cuda.elementwise(
' T x, T gy, T scale, T beta, T coeff', 'T gx',
'gx = pow(scale, -beta) * gy - coeff * x * gx',
'lrn_bwd')(x, gy, self.scale,
self.beta, 2 * self.alpha * self.beta, gx)
return gx,
def backward(self, indexes, grad_outputs):
# No trivial way to implement double-backward for this function.
raise NotImplementedError
def local_response_normalization(x, n=5, k=2, alpha=1e-4, beta=.75):
"""Local response normalization across neighboring channels.
This function implements normalization across channels. Let :math:`x` an
input image with :math:`N` channels. Then, this function computes an output
image :math:`y` by following formula:
.. math::
y_i = {x_i \\over \\left( k + \\
\\alpha \\sum_{j=\\max{1, i - n/2}}^{\\min{N, i + n/2}} \\
x_j^2 \\right)^\\beta}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
n (int): Normalization window width.
k (float): Smoothing parameter.
alpha (float): Normalizer scaling parameter.
beta (float): Normalizer power parameter.
Returns:
~chainer.Variable: Output variable.
See: Section 3.3 of `ImageNet Classification with Deep Convolutional
Neural Networks <https://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_
"""
return LocalResponseNormalization(n, k, alpha, beta).apply((x,))[0]
|
[
"chainer.backends.intel64.ideep.localResponseNormalizationParam",
"six.moves.range",
"chainer.backends.cuda.cupy.square",
"chainer.backends.intel64.ideep.array",
"chainer.backends.cuda.cupy.empty_like",
"numpy.square",
"chainer.backends.cuda.elementwise",
"chainer.backends.intel64.inputs_all_ready",
"chainer.utils.type_check.expect",
"chainer.backends.intel64.should_use_ideep"
] |
[((312, 887), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""raw T x, int32 rdim, int32 N, int32 n_"""', '"""raw T y"""', '"""\n int half_n = n_ / 2;\n int offset = i / rdim * N * rdim + i % rdim;\n\n float sum_part = 0;\n for (int j = 0; j < N + half_n; ++j) {\n if (j < N) {\n sum_part += x[offset + j * rdim];\n }\n if (j >= n_) {\n sum_part -= x[offset + (j - n_) * rdim];\n }\n if (j >= half_n) {\n y[offset + (j - half_n) * rdim] = sum_part;\n }\n }\n """', '"""lrn_conv_sum"""'], {}), '(\'raw T x, int32 rdim, int32 N, int32 n_\', \'raw T y\',\n """\n int half_n = n_ / 2;\n int offset = i / rdim * N * rdim + i % rdim;\n\n float sum_part = 0;\n for (int j = 0; j < N + half_n; ++j) {\n if (j < N) {\n sum_part += x[offset + j * rdim];\n }\n if (j >= n_) {\n sum_part -= x[offset + (j - n_) * rdim];\n }\n if (j >= half_n) {\n y[offset + (j - half_n) * rdim] = sum_part;\n }\n }\n """\n , \'lrn_conv_sum\')\n', (328, 887), False, 'from chainer.backends import cuda\n'), ((1491, 1552), 'chainer.utils.type_check.expect', 'type_check.expect', (["(x_type.dtype.kind == 'f')", '(x_type.ndim >= 2)'], {}), "(x_type.dtype.kind == 'f', x_type.ndim >= 2)\n", (1508, 1552), False, 'from chainer.utils import type_check\n'), ((1944, 1959), 'numpy.square', 'numpy.square', (['x'], {}), '(x)\n', (1956, 1959), False, 'import numpy\n'), ((2006, 2036), 'six.moves.range', 'six.moves.range', (['(1)', '(half_n + 1)'], {}), '(1, half_n + 1)\n', (2021, 2036), False, 'import six\n'), ((2417, 2587), 'chainer.backends.intel64.ideep.localResponseNormalizationParam', 'intel64.ideep.localResponseNormalizationParam', (['self.n', 'self.k', '(self.n * self.alpha)', 'self.beta', 'intel64.ideep.localResponseNormalizationParam.lrn_across_channels'], {}), '(self.n, self.k, self.n * self\n .alpha, self.beta, intel64.ideep.localResponseNormalizationParam.\n lrn_across_channels)\n', (2462, 2587), False, 'from chainer.backends import intel64\n'), ((2907, 2926), 'chainer.backends.cuda.cupy.square', 'cuda.cupy.square', (['x'], {}), '(x)\n', (2923, 2926), False, 'from chainer.backends import cuda\n'), ((2961, 2989), 'chainer.backends.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['self.y'], {}), '(self.y)\n', (2981, 2989), False, 'from chainer.backends import cuda\n'), ((4372, 4402), 'six.moves.range', 'six.moves.range', (['(1)', '(half_n + 1)'], {}), '(1, half_n + 1)\n', (4387, 4402), False, 'import six\n'), ((4674, 4844), 'chainer.backends.intel64.ideep.localResponseNormalizationParam', 'intel64.ideep.localResponseNormalizationParam', (['self.n', 'self.k', '(self.n * self.alpha)', 'self.beta', 'intel64.ideep.localResponseNormalizationParam.lrn_across_channels'], {}), '(self.n, self.k, self.n * self\n .alpha, self.beta, intel64.ideep.localResponseNormalizationParam.\n lrn_across_channels)\n', (4719, 4844), False, 'from chainer.backends import intel64\n'), ((5318, 5341), 'chainer.backends.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['x'], {}), '(x)\n', (5338, 5341), False, 'from chainer.backends import cuda\n'), ((1636, 1670), 'chainer.backends.intel64.should_use_ideep', 'intel64.should_use_ideep', (['""">=auto"""'], {}), "('>=auto')\n", (1660, 1670), False, 'from chainer.backends import intel64\n'), ((1691, 1729), 'chainer.backends.intel64.inputs_all_ready', 'intel64.inputs_all_ready', (['inputs', '(4,)'], {}), '(inputs, (4,))\n', (1715, 1729), False, 'from chainer.backends import intel64\n'), ((2686, 2708), 'chainer.backends.intel64.ideep.array', 'intel64.ideep.array', (['x'], {}), '(x)\n', (2705, 2708), False, 'from chainer.backends import intel64\n'), ((3047, 3203), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""T x, T k, T alpha, T beta"""', '"""T y, T scale"""', '"""scale = k + alpha * scale;\n y = x * pow(scale, -beta);"""', '"""lrn_fwd"""'], {}), '(\'T x, T k, T alpha, T beta\', \'T y, T scale\',\n """scale = k + alpha * scale;\n y = x * pow(scale, -beta);""",\n \'lrn_fwd\')\n', (3063, 3203), False, 'from chainer.backends import cuda\n'), ((4945, 4967), 'chainer.backends.intel64.ideep.array', 'intel64.ideep.array', (['x'], {}), '(x)\n', (4964, 4967), False, 'from chainer.backends import intel64\n'), ((4981, 5004), 'chainer.backends.intel64.ideep.array', 'intel64.ideep.array', (['gy'], {}), '(gy)\n', (5000, 5004), False, 'from chainer.backends import intel64\n'), ((5150, 5252), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""T scale, T y, T gy"""', '"""T summand"""', '"""summand = y * gy / scale"""', '"""lrn_bwd_summand"""'], {}), "('T scale, T y, T gy', 'T summand',\n 'summand = y * gy / scale', 'lrn_bwd_summand')\n", (5166, 5252), False, 'from chainer.backends import cuda\n'), ((5392, 5519), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['""" T x, T gy, T scale, T beta, T coeff"""', '"""T gx"""', '"""gx = pow(scale, -beta) * gy - coeff * x * gx"""', '"""lrn_bwd"""'], {}), "(' T x, T gy, T scale, T beta, T coeff', 'T gx',\n 'gx = pow(scale, -beta) * gy - coeff * x * gx', 'lrn_bwd')\n", (5408, 5519), False, 'from chainer.backends import cuda\n')]
|
# coding: utf-8
import numpy as np
import torch
import pysptk
import pyworld
import librosa
from sklearn.preprocessing import MinMaxScaler
from nnmnkwii.io import hts
from nnmnkwii.frontend import merlin as fe
from nnmnkwii.postfilters import merlin_post_filter
from nnmnkwii.preprocessing.f0 import interp1d
from nnsvs.io.hts import get_note_indices
from nnsvs.multistream import multi_stream_mlpg, get_static_stream_sizes
from nnsvs.multistream import select_streams, split_streams
def get_windows(num_window=1):
windows = [(0, 0, np.array([1.0]))]
if num_window >= 2:
windows.append((1, 1, np.array([-0.5, 0.0, 0.5])))
if num_window >= 3:
windows.append((1, 1, np.array([1.0, -2.0, 1.0])))
if num_window >= 4:
raise ValueError(f"Not supported num windows: {num_window}")
return windows
def _midi_to_hz(x, idx, log_f0=False):
z = np.zeros(len(x))
indices = x[:, idx] > 0
z[indices] = librosa.midi_to_hz(x[indices, idx])
if log_f0:
z[indices] = np.log(z[indices])
return z
def _is_silence(l):
is_full_context = "@" in l
if is_full_context:
is_silence = ("-sil" in l or "-pau" in l)
else:
is_silence = (l == "sil" or l == "pau")
return is_silence
def predict_timelag(device, labels, timelag_model, timelag_in_scaler, timelag_out_scaler,
binary_dict, continuous_dict,
pitch_indices=None, log_f0_conditioning=True,
allowed_range=[-20, 20], allowed_range_rest=[-40, 40]):
# round start/end times just in case.
labels.round_()
# Extract note-level labels
note_indices = get_note_indices(labels)
note_labels = labels[note_indices]
# Extract musical/linguistic context
timelag_linguistic_features = fe.linguistic_features(
note_labels, binary_dict, continuous_dict,
add_frame_features=False, subphone_features=None).astype(np.float32)
# Adjust input features if we use log-f0 conditioning
if log_f0_conditioning:
if pitch_indices is None:
raise ValueError("Pitch feature indices must be specified!")
for idx in pitch_indices:
timelag_linguistic_features[:, idx] = interp1d(
_midi_to_hz(timelag_linguistic_features, idx, log_f0_conditioning),
kind="slinear")
# Normalization
timelag_linguistic_features = timelag_in_scaler.transform(timelag_linguistic_features)
if isinstance(timelag_in_scaler, MinMaxScaler):
# clip to feature range
timelag_linguistic_features = np.clip(
timelag_linguistic_features, timelag_in_scaler.feature_range[0],
timelag_in_scaler.feature_range[1])
# Run model
x = torch.from_numpy(timelag_linguistic_features).unsqueeze(0).to(device)
y = timelag_model(x, [x.shape[1]]).squeeze(0).cpu()
# De-normalization and rounding
lag = np.round(timelag_out_scaler.inverse_transform(y.data.numpy()))
# Clip to the allowed range
for idx in range(len(lag)):
if _is_silence(note_labels.contexts[idx]):
lag[idx] = np.clip(lag[idx], allowed_range_rest[0], allowed_range_rest[1])
else:
lag[idx] = np.clip(lag[idx], allowed_range[0], allowed_range[1])
# frames -> 100 ns
lag *= 50000
return lag
def postprocess_duration(labels, pred_durations, lag):
note_indices = get_note_indices(labels)
# append the end of note
note_indices.append(len(labels))
output_labels = hts.HTSLabelFile()
for i in range(1, len(note_indices)):
# Apply time lag
p = labels[note_indices[i-1]:note_indices[i]]
p.start_times = np.minimum(
np.asarray(p.start_times) + lag[i-1].reshape(-1),
np.asarray(p.end_times) - 50000 * len(p))
p.start_times = np.maximum(p.start_times, 0)
if len(output_labels) > 0:
p.start_times = np.maximum(p.start_times, output_labels.start_times[-1] + 50000)
# Compute normalized phoneme durations
d = fe.duration_features(p)
d_hat = pred_durations[note_indices[i-1]:note_indices[i]]
d_norm = d[0] * d_hat / d_hat.sum()
d_norm = np.round(d_norm)
d_norm[d_norm <= 0] = 1
# TODO: better way to adjust?
if d_norm.sum() != d[0]:
d_norm[-1] += d[0] - d_norm.sum()
p.set_durations(d_norm)
if len(output_labels) > 0:
output_labels.end_times[-1] = p.start_times[0]
for n in p:
output_labels.append(n)
return output_labels
def predict_duration(device, labels, duration_model, duration_in_scaler, duration_out_scaler,
lag, binary_dict, continuous_dict, pitch_indices=None, log_f0_conditioning=True):
# Extract musical/linguistic features
duration_linguistic_features = fe.linguistic_features(
labels, binary_dict, continuous_dict,
add_frame_features=False, subphone_features=None).astype(np.float32)
if log_f0_conditioning:
for idx in pitch_indices:
duration_linguistic_features[:, idx] = interp1d(
_midi_to_hz(duration_linguistic_features, idx, log_f0_conditioning),
kind="slinear")
# Apply normalization
duration_linguistic_features = duration_in_scaler.transform(duration_linguistic_features)
if isinstance(duration_in_scaler, MinMaxScaler):
# clip to feature range
duration_linguistic_features = np.clip(
duration_linguistic_features, duration_in_scaler.feature_range[0],
duration_in_scaler.feature_range[1])
# Apply model
x = torch.from_numpy(duration_linguistic_features).float().to(device)
x = x.view(1, -1, x.size(-1))
pred_durations = duration_model(x, [x.shape[1]]).squeeze(0).cpu().data.numpy()
# Apply denormalization
pred_durations = duration_out_scaler.inverse_transform(pred_durations)
pred_durations[pred_durations <= 0] = 1
pred_durations = np.round(pred_durations)
return pred_durations
def predict_acoustic(device, labels, acoustic_model, acoustic_in_scaler,
acoustic_out_scaler, binary_dict, continuous_dict,
subphone_features="coarse_coding",
pitch_indices=None, log_f0_conditioning=True):
# Musical/linguistic features
linguistic_features = fe.linguistic_features(labels,
binary_dict, continuous_dict,
add_frame_features=True,
subphone_features=subphone_features)
if log_f0_conditioning:
for idx in pitch_indices:
linguistic_features[:, idx] = interp1d(
_midi_to_hz(linguistic_features, idx, log_f0_conditioning),
kind="slinear")
# Apply normalization
linguistic_features = acoustic_in_scaler.transform(linguistic_features)
if isinstance(acoustic_in_scaler, MinMaxScaler):
# clip to feature range
linguistic_features = np.clip(
linguistic_features, acoustic_in_scaler.feature_range[0],
acoustic_in_scaler.feature_range[1])
# Predict acoustic features
x = torch.from_numpy(linguistic_features).float().to(device)
x = x.view(1, -1, x.size(-1))
pred_acoustic = acoustic_model(x, [x.shape[1]]).squeeze(0).cpu().data.numpy()
# Apply denormalization
pred_acoustic = acoustic_out_scaler.inverse_transform(pred_acoustic)
return pred_acoustic
def gen_waveform(labels, acoustic_features, acoustic_out_scaler,
binary_dict, continuous_dict, stream_sizes, has_dynamic_features,
subphone_features="coarse_coding", log_f0_conditioning=True, pitch_idx=None,
num_windows=3, post_filter=True, sample_rate=48000, frame_period=5,
relative_f0=True):
windows = get_windows(num_windows)
# Apply MLPG if necessary
if np.any(has_dynamic_features):
acoustic_features = multi_stream_mlpg(
acoustic_features, acoustic_out_scaler.var_, windows, stream_sizes,
has_dynamic_features)
static_stream_sizes = get_static_stream_sizes(
stream_sizes, has_dynamic_features, len(windows))
else:
static_stream_sizes = stream_sizes
# Split multi-stream features
mgc, target_f0, vuv, bap = split_streams(acoustic_features, static_stream_sizes)
# Gen waveform by the WORLD vocodoer
fftlen = pyworld.get_cheaptrick_fft_size(sample_rate)
alpha = pysptk.util.mcepalpha(sample_rate)
if post_filter:
mgc = merlin_post_filter(mgc, alpha)
spectrogram = pysptk.mc2sp(mgc, fftlen=fftlen, alpha=alpha)
aperiodicity = pyworld.decode_aperiodicity(bap.astype(np.float64), sample_rate, fftlen)
# fill aperiodicity with ones for unvoiced regions
aperiodicity[vuv.reshape(-1) < 0.5, :] = 1.0
# WORLD fails catastrophically for out of range aperiodicity
aperiodicity = np.clip(aperiodicity, 0.0, 1.0)
### F0 ###
if relative_f0:
diff_lf0 = target_f0
# need to extract pitch sequence from the musical score
linguistic_features = fe.linguistic_features(labels,
binary_dict, continuous_dict,
add_frame_features=True,
subphone_features=subphone_features)
f0_score = _midi_to_hz(linguistic_features, pitch_idx, False)[:, None]
lf0_score = f0_score.copy()
nonzero_indices = np.nonzero(lf0_score)
lf0_score[nonzero_indices] = np.log(f0_score[nonzero_indices])
lf0_score = interp1d(lf0_score, kind="slinear")
f0 = diff_lf0 + lf0_score
f0[vuv < 0.5] = 0
f0[np.nonzero(f0)] = np.exp(f0[np.nonzero(f0)])
else:
f0 = target_f0
generated_waveform = pyworld.synthesize(f0.flatten().astype(np.float64),
spectrogram.astype(np.float64),
aperiodicity.astype(np.float64),
sample_rate, frame_period)
return generated_waveform
|
[
"numpy.clip",
"librosa.midi_to_hz",
"pysptk.util.mcepalpha",
"numpy.log",
"pysptk.mc2sp",
"nnmnkwii.io.hts.HTSLabelFile",
"torch.from_numpy",
"numpy.array",
"numpy.asarray",
"nnmnkwii.preprocessing.f0.interp1d",
"numpy.maximum",
"nnmnkwii.frontend.merlin.linguistic_features",
"numpy.round",
"pyworld.get_cheaptrick_fft_size",
"nnsvs.multistream.multi_stream_mlpg",
"numpy.any",
"nnmnkwii.frontend.merlin.duration_features",
"numpy.nonzero",
"nnsvs.multistream.split_streams",
"nnmnkwii.postfilters.merlin_post_filter",
"nnsvs.io.hts.get_note_indices"
] |
[((950, 985), 'librosa.midi_to_hz', 'librosa.midi_to_hz', (['x[indices, idx]'], {}), '(x[indices, idx])\n', (968, 985), False, 'import librosa\n'), ((1622, 1646), 'nnsvs.io.hts.get_note_indices', 'get_note_indices', (['labels'], {}), '(labels)\n', (1638, 1646), False, 'from nnsvs.io.hts import get_note_indices\n'), ((3378, 3402), 'nnsvs.io.hts.get_note_indices', 'get_note_indices', (['labels'], {}), '(labels)\n', (3394, 3402), False, 'from nnsvs.io.hts import get_note_indices\n'), ((3490, 3508), 'nnmnkwii.io.hts.HTSLabelFile', 'hts.HTSLabelFile', ([], {}), '()\n', (3506, 3508), False, 'from nnmnkwii.io import hts\n'), ((5968, 5992), 'numpy.round', 'np.round', (['pred_durations'], {}), '(pred_durations)\n', (5976, 5992), True, 'import numpy as np\n'), ((6313, 6439), 'nnmnkwii.frontend.merlin.linguistic_features', 'fe.linguistic_features', (['labels', 'binary_dict', 'continuous_dict'], {'add_frame_features': '(True)', 'subphone_features': 'subphone_features'}), '(labels, binary_dict, continuous_dict,\n add_frame_features=True, subphone_features=subphone_features)\n', (6335, 6439), True, 'from nnmnkwii.frontend import merlin as fe\n'), ((7908, 7936), 'numpy.any', 'np.any', (['has_dynamic_features'], {}), '(has_dynamic_features)\n', (7914, 7936), True, 'import numpy as np\n'), ((8335, 8388), 'nnsvs.multistream.split_streams', 'split_streams', (['acoustic_features', 'static_stream_sizes'], {}), '(acoustic_features, static_stream_sizes)\n', (8348, 8388), False, 'from nnsvs.multistream import select_streams, split_streams\n'), ((8444, 8488), 'pyworld.get_cheaptrick_fft_size', 'pyworld.get_cheaptrick_fft_size', (['sample_rate'], {}), '(sample_rate)\n', (8475, 8488), False, 'import pyworld\n'), ((8501, 8535), 'pysptk.util.mcepalpha', 'pysptk.util.mcepalpha', (['sample_rate'], {}), '(sample_rate)\n', (8522, 8535), False, 'import pysptk\n'), ((8621, 8666), 'pysptk.mc2sp', 'pysptk.mc2sp', (['mgc'], {'fftlen': 'fftlen', 'alpha': 'alpha'}), '(mgc, fftlen=fftlen, alpha=alpha)\n', (8633, 8666), False, 'import pysptk\n'), ((8948, 8979), 'numpy.clip', 'np.clip', (['aperiodicity', '(0.0)', '(1.0)'], {}), '(aperiodicity, 0.0, 1.0)\n', (8955, 8979), True, 'import numpy as np\n'), ((1022, 1040), 'numpy.log', 'np.log', (['z[indices]'], {}), '(z[indices])\n', (1028, 1040), True, 'import numpy as np\n'), ((2556, 2668), 'numpy.clip', 'np.clip', (['timelag_linguistic_features', 'timelag_in_scaler.feature_range[0]', 'timelag_in_scaler.feature_range[1]'], {}), '(timelag_linguistic_features, timelag_in_scaler.feature_range[0],\n timelag_in_scaler.feature_range[1])\n', (2563, 2668), True, 'import numpy as np\n'), ((3807, 3835), 'numpy.maximum', 'np.maximum', (['p.start_times', '(0)'], {}), '(p.start_times, 0)\n', (3817, 3835), True, 'import numpy as np\n'), ((4024, 4047), 'nnmnkwii.frontend.merlin.duration_features', 'fe.duration_features', (['p'], {}), '(p)\n', (4044, 4047), True, 'from nnmnkwii.frontend import merlin as fe\n'), ((4175, 4191), 'numpy.round', 'np.round', (['d_norm'], {}), '(d_norm)\n', (4183, 4191), True, 'import numpy as np\n'), ((5452, 5567), 'numpy.clip', 'np.clip', (['duration_linguistic_features', 'duration_in_scaler.feature_range[0]', 'duration_in_scaler.feature_range[1]'], {}), '(duration_linguistic_features, duration_in_scaler.feature_range[0],\n duration_in_scaler.feature_range[1])\n', (5459, 5567), True, 'import numpy as np\n'), ((7031, 7137), 'numpy.clip', 'np.clip', (['linguistic_features', 'acoustic_in_scaler.feature_range[0]', 'acoustic_in_scaler.feature_range[1]'], {}), '(linguistic_features, acoustic_in_scaler.feature_range[0],\n acoustic_in_scaler.feature_range[1])\n', (7038, 7137), True, 'import numpy as np\n'), ((7966, 8077), 'nnsvs.multistream.multi_stream_mlpg', 'multi_stream_mlpg', (['acoustic_features', 'acoustic_out_scaler.var_', 'windows', 'stream_sizes', 'has_dynamic_features'], {}), '(acoustic_features, acoustic_out_scaler.var_, windows,\n stream_sizes, has_dynamic_features)\n', (7983, 8077), False, 'from nnsvs.multistream import multi_stream_mlpg, get_static_stream_sizes\n'), ((8571, 8601), 'nnmnkwii.postfilters.merlin_post_filter', 'merlin_post_filter', (['mgc', 'alpha'], {}), '(mgc, alpha)\n', (8589, 8601), False, 'from nnmnkwii.postfilters import merlin_post_filter\n'), ((9139, 9265), 'nnmnkwii.frontend.merlin.linguistic_features', 'fe.linguistic_features', (['labels', 'binary_dict', 'continuous_dict'], {'add_frame_features': '(True)', 'subphone_features': 'subphone_features'}), '(labels, binary_dict, continuous_dict,\n add_frame_features=True, subphone_features=subphone_features)\n', (9161, 9265), True, 'from nnmnkwii.frontend import merlin as fe\n'), ((9559, 9580), 'numpy.nonzero', 'np.nonzero', (['lf0_score'], {}), '(lf0_score)\n', (9569, 9580), True, 'import numpy as np\n'), ((9618, 9651), 'numpy.log', 'np.log', (['f0_score[nonzero_indices]'], {}), '(f0_score[nonzero_indices])\n', (9624, 9651), True, 'import numpy as np\n'), ((9672, 9707), 'nnmnkwii.preprocessing.f0.interp1d', 'interp1d', (['lf0_score'], {'kind': '"""slinear"""'}), "(lf0_score, kind='slinear')\n", (9680, 9707), False, 'from nnmnkwii.preprocessing.f0 import interp1d\n'), ((541, 556), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (549, 556), True, 'import numpy as np\n'), ((1762, 1881), 'nnmnkwii.frontend.merlin.linguistic_features', 'fe.linguistic_features', (['note_labels', 'binary_dict', 'continuous_dict'], {'add_frame_features': '(False)', 'subphone_features': 'None'}), '(note_labels, binary_dict, continuous_dict,\n add_frame_features=False, subphone_features=None)\n', (1784, 1881), True, 'from nnmnkwii.frontend import merlin as fe\n'), ((3090, 3153), 'numpy.clip', 'np.clip', (['lag[idx]', 'allowed_range_rest[0]', 'allowed_range_rest[1]'], {}), '(lag[idx], allowed_range_rest[0], allowed_range_rest[1])\n', (3097, 3153), True, 'import numpy as np\n'), ((3191, 3244), 'numpy.clip', 'np.clip', (['lag[idx]', 'allowed_range[0]', 'allowed_range[1]'], {}), '(lag[idx], allowed_range[0], allowed_range[1])\n', (3198, 3244), True, 'import numpy as np\n'), ((3899, 3963), 'numpy.maximum', 'np.maximum', (['p.start_times', '(output_labels.start_times[-1] + 50000)'], {}), '(p.start_times, output_labels.start_times[-1] + 50000)\n', (3909, 3963), True, 'import numpy as np\n'), ((4815, 4929), 'nnmnkwii.frontend.merlin.linguistic_features', 'fe.linguistic_features', (['labels', 'binary_dict', 'continuous_dict'], {'add_frame_features': '(False)', 'subphone_features': 'None'}), '(labels, binary_dict, continuous_dict,\n add_frame_features=False, subphone_features=None)\n', (4837, 4929), True, 'from nnmnkwii.frontend import merlin as fe\n'), ((9780, 9794), 'numpy.nonzero', 'np.nonzero', (['f0'], {}), '(f0)\n', (9790, 9794), True, 'import numpy as np\n'), ((613, 639), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.5])\n', (621, 639), True, 'import numpy as np\n'), ((696, 722), 'numpy.array', 'np.array', (['[1.0, -2.0, 1.0]'], {}), '([1.0, -2.0, 1.0])\n', (704, 722), True, 'import numpy as np\n'), ((3679, 3704), 'numpy.asarray', 'np.asarray', (['p.start_times'], {}), '(p.start_times)\n', (3689, 3704), True, 'import numpy as np\n'), ((3741, 3764), 'numpy.asarray', 'np.asarray', (['p.end_times'], {}), '(p.end_times)\n', (3751, 3764), True, 'import numpy as np\n'), ((9808, 9822), 'numpy.nonzero', 'np.nonzero', (['f0'], {}), '(f0)\n', (9818, 9822), True, 'import numpy as np\n'), ((2715, 2760), 'torch.from_numpy', 'torch.from_numpy', (['timelag_linguistic_features'], {}), '(timelag_linguistic_features)\n', (2731, 2760), False, 'import torch\n'), ((5616, 5662), 'torch.from_numpy', 'torch.from_numpy', (['duration_linguistic_features'], {}), '(duration_linguistic_features)\n', (5632, 5662), False, 'import torch\n'), ((7200, 7237), 'torch.from_numpy', 'torch.from_numpy', (['linguistic_features'], {}), '(linguistic_features)\n', (7216, 7237), False, 'import torch\n')]
|
import os
import time
import argparse
import math
from numpy import finfo
import numpy as np
import torch
from distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from model import Tacotron2
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
from plotting_utils import save_figure_to_numpy
from layers import TacotronSTFT
from audio_processing import griffin_lim
import librosa
def plot_spectrogram(spectrogram, out_path=''):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
fig.savefig(out_path)
# data = save_figure_to_numpy(fig)
plt.close()
# return data
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def spec_to_waveform(taco_stft, mel_outputs_postnet, n_iter=60):
mel_decompress = taco_stft.spectral_de_normalize(mel_outputs_postnet).unsqueeze(0)
mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress[0], taco_stft.mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
waveform = griffin_lim(torch.autograd.Variable(spec_from_mel[:, :, :-1]), taco_stft.stft_fn, n_iter)
return waveform[0]
def infer(output_directory, checkpoint_path, warm_start, hparams, debug=False):
"""Inference with teaching force
Params
------
output_directory (string): directory to the spectrograms
checkpoint_path(string): checkpoint path
hparams (object): comma separated list of "name=value" pairs.
"""
os.makedirs(output_directory, exist_ok=True)
taco_stft = TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
sampling_rate=hparams.sampling_rate)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return_file_name = True
trainset = TextMelLoader(hparams.training_files, hparams, return_file_name=return_file_name)
collate_fn = TextMelCollate(hparams.n_frames_per_step, return_file_name=return_file_name)
train_sampler = None
train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
sampler=train_sampler,
batch_size=hparams.batch_size,
pin_memory=False,
collate_fn=collate_fn)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.eval()
for i, batch in enumerate(train_loader):
x, y = model.parse_batch(batch[:][:-1])
files_name = batch[:][-1]
mel_outputs, mel_outputs_postnet, _, alignments = model(x)
_, _, mel_expected_padded, _, mel_lengths = x
for idx in range(mel_outputs_postnet.size(0)):
name = os.path.basename(files_name[idx]).replace(".wav", '')
mel_padded = mel_outputs_postnet[idx]
mel_length = mel_lengths[idx]
mel = mel_padded[:, :mel_length]
np.save(os.path.join(output_directory, name+'.npy'), mel.detach().cpu().numpy())
if debug:
print("Debug Mode ON: Saving Wave files and Spectrograms Plot in:", output_directory)
# plot audios
librosa.output.write_wav(os.path.join(output_directory, name+'.wav'), spec_to_waveform(taco_stft, mel).detach().cpu().numpy(), sr=hparams.sampling_rate)
librosa.output.write_wav(os.path.join(output_directory, name+'_padded.wav'), spec_to_waveform(taco_stft, mel_padded).detach().cpu().numpy(), sr=hparams.sampling_rate)
librosa.output.write_wav(os.path.join(output_directory, name+'_expected_padded.wav'), spec_to_waveform(taco_stft, mel_expected_padded[idx]).detach().cpu().numpy(), sr=hparams.sampling_rate)
# plot figures
plot_spectrogram(mel.detach().cpu().numpy(), )
plot_spectrogram(mel_padded.detach().cpu().numpy(), os.path.join(output_directory, name+'_padded.png'))
plot_spectrogram(mel_expected_padded[idx].detach().cpu().numpy(), os.path.join(output_directory, name+'_expect_padded.png'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints', default='mels_specs-test/')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--debug', type=bool, default=False,
required=False, help='Active Degub Mode')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
infer(args.output_directory, args.checkpoint_path,
args.warm_start, hparams, args.debug)
|
[
"matplotlib.pylab.subplots",
"apex.amp.initialize",
"argparse.ArgumentParser",
"model.Tacotron2",
"torch.autograd.Variable",
"matplotlib.pylab.close",
"matplotlib.use",
"matplotlib.pylab.xlabel",
"distributed.apply_gradient_allreduce",
"os.path.isfile",
"data_utils.TextMelCollate",
"numpy.finfo",
"layers.TacotronSTFT",
"torch.manual_seed",
"os.makedirs",
"matplotlib.pylab.tight_layout",
"torch.load",
"os.path.join",
"matplotlib.pylab.colorbar",
"torch.mm",
"hparams.create_hparams",
"data_utils.TextMelLoader",
"os.path.basename",
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed",
"matplotlib.pylab.ylabel"
] |
[((500, 521), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (514, 521), False, 'import matplotlib\n'), ((753, 782), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (765, 782), True, 'import matplotlib.pylab as plt\n'), ((891, 914), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax'}), '(im, ax=ax)\n', (903, 914), True, 'import matplotlib.pylab as plt\n'), ((919, 939), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Frames"""'], {}), "('Frames')\n", (929, 939), True, 'import matplotlib.pylab as plt\n'), ((944, 966), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Channels"""'], {}), "('Channels')\n", (954, 966), True, 'import matplotlib.pylab as plt\n'), ((971, 989), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (987, 989), True, 'import matplotlib.pylab as plt\n'), ((1083, 1094), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (1092, 1094), True, 'import matplotlib.pylab as plt\n'), ((1454, 1485), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1468, 1485), False, 'import os\n'), ((1586, 1633), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '"""cpu"""'}), "(checkpoint_path, map_location='cpu')\n", (1596, 1633), False, 'import torch\n'), ((2053, 2084), 'os.path.isfile', 'os.path.isfile', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2067, 2084), False, 'import os\n'), ((2168, 2215), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '"""cpu"""'}), "(checkpoint_path, map_location='cpu')\n", (2178, 2215), False, 'import torch\n'), ((2853, 2901), 'torch.mm', 'torch.mm', (['mel_decompress[0]', 'taco_stft.mel_basis'], {}), '(mel_decompress[0], taco_stft.mel_basis)\n', (2861, 2901), False, 'import torch\n'), ((3477, 3521), 'os.makedirs', 'os.makedirs', (['output_directory'], {'exist_ok': '(True)'}), '(output_directory, exist_ok=True)\n', (3488, 3521), False, 'import os\n'), ((3539, 3655), 'layers.TacotronSTFT', 'TacotronSTFT', (['hparams.filter_length', 'hparams.hop_length', 'hparams.win_length'], {'sampling_rate': 'hparams.sampling_rate'}), '(hparams.filter_length, hparams.hop_length, hparams.win_length,\n sampling_rate=hparams.sampling_rate)\n', (3551, 3655), False, 'from layers import TacotronSTFT\n'), ((3706, 3737), 'torch.manual_seed', 'torch.manual_seed', (['hparams.seed'], {}), '(hparams.seed)\n', (3723, 3737), False, 'import torch\n'), ((3742, 3778), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['hparams.seed'], {}), '(hparams.seed)\n', (3764, 3778), False, 'import torch\n'), ((4263, 4349), 'data_utils.TextMelLoader', 'TextMelLoader', (['hparams.training_files', 'hparams'], {'return_file_name': 'return_file_name'}), '(hparams.training_files, hparams, return_file_name=\n return_file_name)\n', (4276, 4349), False, 'from data_utils import TextMelLoader, TextMelCollate\n'), ((4362, 4438), 'data_utils.TextMelCollate', 'TextMelCollate', (['hparams.n_frames_per_step'], {'return_file_name': 'return_file_name'}), '(hparams.n_frames_per_step, return_file_name=return_file_name)\n', (4376, 4438), False, 'from data_utils import TextMelLoader, TextMelCollate\n'), ((4487, 4636), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'num_workers': '(1)', 'shuffle': '(False)', 'sampler': 'train_sampler', 'batch_size': 'hparams.batch_size', 'pin_memory': '(False)', 'collate_fn': 'collate_fn'}), '(trainset, num_workers=1, shuffle=False, sampler=train_sampler,\n batch_size=hparams.batch_size, pin_memory=False, collate_fn=collate_fn)\n', (4497, 4636), False, 'from torch.utils.data import DataLoader\n'), ((7175, 7200), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7198, 7200), False, 'import argparse\n'), ((8056, 8084), 'hparams.create_hparams', 'create_hparams', (['args.hparams'], {}), '(args.hparams)\n', (8070, 8084), False, 'from hparams import create_hparams\n'), ((1330, 1361), 'distributed.apply_gradient_allreduce', 'apply_gradient_allreduce', (['model'], {}), '(model)\n', (1354, 1361), False, 'from distributed import apply_gradient_allreduce\n'), ((3050, 3099), 'torch.autograd.Variable', 'torch.autograd.Variable', (['spec_from_mel[:, :, :-1]'], {}), '(spec_from_mel[:, :, :-1])\n', (3073, 3099), False, 'import torch\n'), ((4075, 4123), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': '"""O2"""'}), "(model, optimizer, opt_level='O2')\n", (4089, 4123), False, 'from apex import amp\n'), ((4186, 4217), 'distributed.apply_gradient_allreduce', 'apply_gradient_allreduce', (['model'], {}), '(model)\n', (4210, 4217), False, 'from distributed import apply_gradient_allreduce\n'), ((1152, 1170), 'model.Tacotron2', 'Tacotron2', (['hparams'], {}), '(hparams)\n', (1161, 1170), False, 'from model import Tacotron2\n'), ((1260, 1276), 'numpy.finfo', 'finfo', (['"""float16"""'], {}), "('float16')\n", (1265, 1276), False, 'from numpy import finfo\n'), ((5953, 5998), 'os.path.join', 'os.path.join', (['output_directory', "(name + '.npy')"], {}), "(output_directory, name + '.npy')\n", (5965, 5998), False, 'import os\n'), ((5726, 5759), 'os.path.basename', 'os.path.basename', (['files_name[idx]'], {}), '(files_name[idx])\n', (5742, 5759), False, 'import os\n'), ((6238, 6283), 'os.path.join', 'os.path.join', (['output_directory', "(name + '.wav')"], {}), "(output_directory, name + '.wav')\n", (6250, 6283), False, 'import os\n'), ((6411, 6463), 'os.path.join', 'os.path.join', (['output_directory', "(name + '_padded.wav')"], {}), "(output_directory, name + '_padded.wav')\n", (6423, 6463), False, 'import os\n'), ((6598, 6659), 'os.path.join', 'os.path.join', (['output_directory', "(name + '_expected_padded.wav')"], {}), "(output_directory, name + '_expected_padded.wav')\n", (6610, 6659), False, 'import os\n'), ((6937, 6989), 'os.path.join', 'os.path.join', (['output_directory', "(name + '_padded.png')"], {}), "(output_directory, name + '_padded.png')\n", (6949, 6989), False, 'import os\n'), ((7075, 7134), 'os.path.join', 'os.path.join', (['output_directory', "(name + '_expect_padded.png')"], {}), "(output_directory, name + '_expect_padded.png')\n", (7087, 7134), False, 'import os\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import cv2
from PIL import Image, ImageEnhance
import random
import box_utils
def random_distort(img):
def random_brightness(img, lower=0.5, upper=1.5):
e = np.random.uniform(lower, upper)
return ImageEnhance.Brightness(img).enhance(e)
def random_contrast(img, lower=0.5, upper=1.5):
e = np.random.uniform(lower, upper)
return ImageEnhance.Contrast(img).enhance(e)
def random_color(img, lower=0.5, upper=1.5):
e = np.random.uniform(lower, upper)
return ImageEnhance.Color(img).enhance(e)
ops = [random_brightness, random_contrast, random_color]
np.random.shuffle(ops)
img = Image.fromarray(img)
img = ops[0](img)
img = ops[1](img)
img = ops[2](img)
img = np.asarray(img)
return img
def random_crop(img,
boxes,
labels,
scores,
scales=[0.3, 1.0],
max_ratio=2.0,
constraints=None,
max_trial=50):
if len(boxes) == 0:
return img, boxes
if not constraints:
constraints = [(0.1, 1.0), (0.3, 1.0), (0.5, 1.0), (0.7, 1.0),
(0.9, 1.0), (0.0, 1.0)]
img = Image.fromarray(img)
w, h = img.size
crops = [(0, 0, w, h)]
for min_iou, max_iou in constraints:
for _ in range(max_trial):
scale = random.uniform(scales[0], scales[1])
aspect_ratio = random.uniform(max(1 / max_ratio, scale * scale), \
min(max_ratio, 1 / scale / scale))
crop_h = int(h * scale / np.sqrt(aspect_ratio))
crop_w = int(w * scale * np.sqrt(aspect_ratio))
crop_x = random.randrange(w - crop_w)
crop_y = random.randrange(h - crop_h)
crop_box = np.array([[(crop_x + crop_w / 2.0) / w,
(crop_y + crop_h / 2.0) / h,
crop_w / float(w), crop_h / float(h)]])
iou = box_utils.box_iou_xywh(crop_box, boxes)
if min_iou <= iou.min() and max_iou >= iou.max():
crops.append((crop_x, crop_y, crop_w, crop_h))
break
while crops:
crop = crops.pop(np.random.randint(0, len(crops)))
crop_boxes, crop_labels, crop_scores, box_num = \
box_utils.box_crop(boxes, labels, scores, crop, (w, h))
if box_num < 1:
continue
img = img.crop((crop[0], crop[1], crop[0] + crop[2],
crop[1] + crop[3])).resize(img.size, Image.LANCZOS)
img = np.asarray(img)
return img, crop_boxes, crop_labels, crop_scores
img = np.asarray(img)
return img, boxes, labels, scores
def random_flip(img, gtboxes, thresh=0.5):
if random.random() > thresh:
img = img[:, ::-1, :]
gtboxes[:, 0] = 1.0 - gtboxes[:, 0]
return img, gtboxes
def random_interp(img, size, interp=None):
interp_method = [
cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
]
if not interp or interp not in interp_method:
interp = interp_method[random.randint(0, len(interp_method) - 1)]
h, w, _ = img.shape
im_scale_x = size / float(w)
im_scale_y = size / float(h)
img = cv2.resize(
img, None, None, fx=im_scale_x, fy=im_scale_y, interpolation=interp)
return img
def random_expand(img,
gtboxes,
max_ratio=4.,
fill=None,
keep_ratio=True,
thresh=0.5):
if random.random() > thresh:
return img, gtboxes
if max_ratio < 1.0:
return img, gtboxes
h, w, c = img.shape
ratio_x = random.uniform(1, max_ratio)
if keep_ratio:
ratio_y = ratio_x
else:
ratio_y = random.uniform(1, max_ratio)
oh = int(h * ratio_y)
ow = int(w * ratio_x)
off_x = random.randint(0, ow - w)
off_y = random.randint(0, oh - h)
out_img = np.zeros((oh, ow, c))
if fill and len(fill) == c:
for i in range(c):
out_img[:, :, i] = fill[i] * 255.0
out_img[off_y:off_y + h, off_x:off_x + w, :] = img
gtboxes[:, 0] = ((gtboxes[:, 0] * w) + off_x) / float(ow)
gtboxes[:, 1] = ((gtboxes[:, 1] * h) + off_y) / float(oh)
gtboxes[:, 2] = gtboxes[:, 2] / ratio_x
gtboxes[:, 3] = gtboxes[:, 3] / ratio_y
return out_img.astype('uint8'), gtboxes
def shuffle_gtbox(gtbox, gtlabel, gtscore):
gt = np.concatenate(
[gtbox, gtlabel[:, np.newaxis], gtscore[:, np.newaxis]], axis=1)
idx = np.arange(gt.shape[0])
np.random.shuffle(idx)
gt = gt[idx, :]
return gt[:, :4], gt[:, 4], gt[:, 5]
def image_mixup(img1, gtboxes1, gtlabels1, gtscores1, img2, gtboxes2, gtlabels2,
gtscores2):
factor = np.random.beta(1.5, 1.5)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return img1, gtboxes1, gtlabels1
if factor <= 0.0:
return img2, gtboxes2, gtlabels2
gtscores1 = gtscores1 * factor
gtscores2 = gtscores2 * (1.0 - factor)
h = max(img1.shape[0], img2.shape[0])
w = max(img1.shape[1], img2.shape[1])
img = np.zeros((h, w, img1.shape[2]), 'float32')
img[:img1.shape[0], :img1.shape[1], :] = img1.astype('float32') * factor
img[:img2.shape[0], :img2.shape[1], :] += \
img2.astype('float32') * (1.0 - factor)
gtboxes = np.zeros_like(gtboxes1)
gtlabels = np.zeros_like(gtlabels1)
gtscores = np.zeros_like(gtscores1)
gt_valid_mask1 = np.logical_and(gtboxes1[:, 2] > 0, gtboxes1[:, 3] > 0)
gtboxes1 = gtboxes1[gt_valid_mask1]
gtlabels1 = gtlabels1[gt_valid_mask1]
gtscores1 = gtscores1[gt_valid_mask1]
gtboxes1[:, 0] = gtboxes1[:, 0] * img1.shape[1] / w
gtboxes1[:, 1] = gtboxes1[:, 1] * img1.shape[0] / h
gtboxes1[:, 2] = gtboxes1[:, 2] * img1.shape[1] / w
gtboxes1[:, 3] = gtboxes1[:, 3] * img1.shape[0] / h
gt_valid_mask2 = np.logical_and(gtboxes2[:, 2] > 0, gtboxes2[:, 3] > 0)
gtboxes2 = gtboxes2[gt_valid_mask2]
gtlabels2 = gtlabels2[gt_valid_mask2]
gtscores2 = gtscores2[gt_valid_mask2]
gtboxes2[:, 0] = gtboxes2[:, 0] * img2.shape[1] / w
gtboxes2[:, 1] = gtboxes2[:, 1] * img2.shape[0] / h
gtboxes2[:, 2] = gtboxes2[:, 2] * img2.shape[1] / w
gtboxes2[:, 3] = gtboxes2[:, 3] * img2.shape[0] / h
gtboxes_all = np.concatenate((gtboxes1, gtboxes2), axis=0)
gtlabels_all = np.concatenate((gtlabels1, gtlabels2), axis=0)
gtscores_all = np.concatenate((gtscores1, gtscores2), axis=0)
gt_num = min(len(gtboxes), len(gtboxes_all))
gtboxes[:gt_num] = gtboxes_all[:gt_num]
gtlabels[:gt_num] = gtlabels_all[:gt_num]
gtscores[:gt_num] = gtscores_all[:gt_num]
return img.astype('uint8'), gtboxes, gtlabels, gtscores
def image_augment(img, gtboxes, gtlabels, gtscores, size, means=None):
img = random_distort(img)
img, gtboxes = random_expand(img, gtboxes, fill=means)
img, gtboxes, gtlabels, gtscores = \
random_crop(img, gtboxes, gtlabels, gtscores)
img = random_interp(img, size)
img, gtboxes = random_flip(img, gtboxes)
gtboxes, gtlabels, gtscores = shuffle_gtbox(gtboxes, gtlabels, gtscores)
return img.astype('float32'), gtboxes.astype('float32'), \
gtlabels.astype('int32'), gtscores.astype('float32')
|
[
"numpy.sqrt",
"box_utils.box_crop",
"PIL.ImageEnhance.Contrast",
"box_utils.box_iou_xywh",
"numpy.arange",
"numpy.asarray",
"PIL.ImageEnhance.Color",
"numpy.concatenate",
"random.randint",
"random.uniform",
"numpy.random.beta",
"random.randrange",
"PIL.ImageEnhance.Brightness",
"cv2.resize",
"PIL.Image.fromarray",
"numpy.logical_and",
"numpy.zeros",
"numpy.random.uniform",
"random.random",
"numpy.zeros_like",
"numpy.random.shuffle"
] |
[((1398, 1420), 'numpy.random.shuffle', 'np.random.shuffle', (['ops'], {}), '(ops)\n', (1415, 1420), True, 'import numpy as np\n'), ((1432, 1452), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1447, 1452), False, 'from PIL import Image, ImageEnhance\n'), ((1529, 1544), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1539, 1544), True, 'import numpy as np\n'), ((1990, 2010), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2005, 2010), False, 'from PIL import Image, ImageEnhance\n'), ((3455, 3470), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (3465, 3470), True, 'import numpy as np\n'), ((4112, 4191), 'cv2.resize', 'cv2.resize', (['img', 'None', 'None'], {'fx': 'im_scale_x', 'fy': 'im_scale_y', 'interpolation': 'interp'}), '(img, None, None, fx=im_scale_x, fy=im_scale_y, interpolation=interp)\n', (4122, 4191), False, 'import cv2\n'), ((4548, 4576), 'random.uniform', 'random.uniform', (['(1)', 'max_ratio'], {}), '(1, max_ratio)\n', (4562, 4576), False, 'import random\n'), ((4743, 4768), 'random.randint', 'random.randint', (['(0)', '(ow - w)'], {}), '(0, ow - w)\n', (4757, 4768), False, 'import random\n'), ((4781, 4806), 'random.randint', 'random.randint', (['(0)', '(oh - h)'], {}), '(0, oh - h)\n', (4795, 4806), False, 'import random\n'), ((4822, 4843), 'numpy.zeros', 'np.zeros', (['(oh, ow, c)'], {}), '((oh, ow, c))\n', (4830, 4843), True, 'import numpy as np\n'), ((5318, 5397), 'numpy.concatenate', 'np.concatenate', (['[gtbox, gtlabel[:, np.newaxis], gtscore[:, np.newaxis]]'], {'axis': '(1)'}), '([gtbox, gtlabel[:, np.newaxis], gtscore[:, np.newaxis]], axis=1)\n', (5332, 5397), True, 'import numpy as np\n'), ((5417, 5439), 'numpy.arange', 'np.arange', (['gt.shape[0]'], {}), '(gt.shape[0])\n', (5426, 5439), True, 'import numpy as np\n'), ((5444, 5466), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (5461, 5466), True, 'import numpy as np\n'), ((5652, 5676), 'numpy.random.beta', 'np.random.beta', (['(1.5)', '(1.5)'], {}), '(1.5, 1.5)\n', (5666, 5676), True, 'import numpy as np\n'), ((6016, 6058), 'numpy.zeros', 'np.zeros', (['(h, w, img1.shape[2])', '"""float32"""'], {}), "((h, w, img1.shape[2]), 'float32')\n", (6024, 6058), True, 'import numpy as np\n'), ((6250, 6273), 'numpy.zeros_like', 'np.zeros_like', (['gtboxes1'], {}), '(gtboxes1)\n', (6263, 6273), True, 'import numpy as np\n'), ((6289, 6313), 'numpy.zeros_like', 'np.zeros_like', (['gtlabels1'], {}), '(gtlabels1)\n', (6302, 6313), True, 'import numpy as np\n'), ((6329, 6353), 'numpy.zeros_like', 'np.zeros_like', (['gtscores1'], {}), '(gtscores1)\n', (6342, 6353), True, 'import numpy as np\n'), ((6376, 6430), 'numpy.logical_and', 'np.logical_and', (['(gtboxes1[:, 2] > 0)', '(gtboxes1[:, 3] > 0)'], {}), '(gtboxes1[:, 2] > 0, gtboxes1[:, 3] > 0)\n', (6390, 6430), True, 'import numpy as np\n'), ((6801, 6855), 'numpy.logical_and', 'np.logical_and', (['(gtboxes2[:, 2] > 0)', '(gtboxes2[:, 3] > 0)'], {}), '(gtboxes2[:, 2] > 0, gtboxes2[:, 3] > 0)\n', (6815, 6855), True, 'import numpy as np\n'), ((7223, 7267), 'numpy.concatenate', 'np.concatenate', (['(gtboxes1, gtboxes2)'], {'axis': '(0)'}), '((gtboxes1, gtboxes2), axis=0)\n', (7237, 7267), True, 'import numpy as np\n'), ((7287, 7333), 'numpy.concatenate', 'np.concatenate', (['(gtlabels1, gtlabels2)'], {'axis': '(0)'}), '((gtlabels1, gtlabels2), axis=0)\n', (7301, 7333), True, 'import numpy as np\n'), ((7353, 7399), 'numpy.concatenate', 'np.concatenate', (['(gtscores1, gtscores2)'], {'axis': '(0)'}), '((gtscores1, gtscores2), axis=0)\n', (7367, 7399), True, 'import numpy as np\n'), ((951, 982), 'numpy.random.uniform', 'np.random.uniform', (['lower', 'upper'], {}), '(lower, upper)\n', (968, 982), True, 'import numpy as np\n'), ((1103, 1134), 'numpy.random.uniform', 'np.random.uniform', (['lower', 'upper'], {}), '(lower, upper)\n', (1120, 1134), True, 'import numpy as np\n'), ((1250, 1281), 'numpy.random.uniform', 'np.random.uniform', (['lower', 'upper'], {}), '(lower, upper)\n', (1267, 1281), True, 'import numpy as np\n'), ((3120, 3175), 'box_utils.box_crop', 'box_utils.box_crop', (['boxes', 'labels', 'scores', 'crop', '(w, h)'], {}), '(boxes, labels, scores, crop, (w, h))\n', (3138, 3175), False, 'import box_utils\n'), ((3372, 3387), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (3382, 3387), True, 'import numpy as np\n'), ((3561, 3576), 'random.random', 'random.random', ([], {}), '()\n', (3574, 3576), False, 'import random\n'), ((4402, 4417), 'random.random', 'random.random', ([], {}), '()\n', (4415, 4417), False, 'import random\n'), ((4650, 4678), 'random.uniform', 'random.uniform', (['(1)', 'max_ratio'], {}), '(1, max_ratio)\n', (4664, 4678), False, 'import random\n'), ((2154, 2190), 'random.uniform', 'random.uniform', (['scales[0]', 'scales[1]'], {}), '(scales[0], scales[1])\n', (2168, 2190), False, 'import random\n'), ((2488, 2516), 'random.randrange', 'random.randrange', (['(w - crop_w)'], {}), '(w - crop_w)\n', (2504, 2516), False, 'import random\n'), ((2538, 2566), 'random.randrange', 'random.randrange', (['(h - crop_h)'], {}), '(h - crop_h)\n', (2554, 2566), False, 'import random\n'), ((2786, 2825), 'box_utils.box_iou_xywh', 'box_utils.box_iou_xywh', (['crop_box', 'boxes'], {}), '(crop_box, boxes)\n', (2808, 2825), False, 'import box_utils\n'), ((998, 1026), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['img'], {}), '(img)\n', (1021, 1026), False, 'from PIL import Image, ImageEnhance\n'), ((1150, 1176), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['img'], {}), '(img)\n', (1171, 1176), False, 'from PIL import Image, ImageEnhance\n'), ((1297, 1320), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['img'], {}), '(img)\n', (1315, 1320), False, 'from PIL import Image, ImageEnhance\n'), ((2384, 2405), 'numpy.sqrt', 'np.sqrt', (['aspect_ratio'], {}), '(aspect_ratio)\n', (2391, 2405), True, 'import numpy as np\n'), ((2444, 2465), 'numpy.sqrt', 'np.sqrt', (['aspect_ratio'], {}), '(aspect_ratio)\n', (2451, 2465), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
from sklearn.utils.multiclass import unique_labels
import numpy as np
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.multiclass import OneVsRestClassifier
import inspect
class Classifier:
def svm(self, X_train, y_train, X_test=None, kernel='rbf', C=1.0, degree=3, class_weight = None):
'''
This function fits an SVM on (X_train, y_train) and returns predictions for X_test.
Possible kernels: ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’
will be used.
The fit time scales at least quadratically with the number of samples and may be impractical beyond tens of
thousands of samples.
Parameters:
- C: inverse of the regularization strength, smaller values specify stronger regularization
- class_weight: {dict, ‘balanced’}, optional
The “balanced” mode uses the values of y to automatically adjust weights inversely proportional to class frequencies
'''
classifier = svm.SVC(kernel=kernel, C=C, degree=degree, class_weight = class_weight)
# If the user hasn't provided any test data set, we simply predict on the train dataset
if X_test is None:
y_pred = classifier.fit(X_train, y_train).predict(X_train)
# distances from the separating hyperplane
distances = classifier.decision_function(X_train)
else:
y_pred = classifier.fit(X_train, y_train).predict(X_test)
distances = classifier.decision_function(X_test)
return classifier, y_pred, distances
def plot_confusion_matrix(self, y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues):
"""
This function plots and returns the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# in case we have a list instead of a numpy array convert it
classes = np.array(classes)
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.show()
return cm
def roc(self, y_true, distances):
'''
This function plots the Receiver Operating Characteristic Curve and returns the False Positive Rate,
True Positive Rate and Thresholds used.
:param y_true: true labels
:param distances: could be probability estimates, confidences values or non-thresholded measure
of decision
:return: fpr, tpr and thresholds
'''
fpr, tpr, thresholds = roc_curve(y_true, distances, pos_label=1)
plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % auc(fpr, tpr))
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TPR)')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
return fpr, tpr, thresholds
# AUROC
def auroc(self, y_true, distances):
'''
This function calculates the Area Under the Receiver Operating Characteristic Curve
:param y_true: trues labels
:param distances: target scores, could be probability estimates, confidences values or non-thresholded measure
of decision
:return: area under the Receiver Operating Characteristic Curve
'''
return roc_auc_score(y_true, distances)
# Accuracy, F1 score, etc... to do
def metrics(self, y_true, y_pred, average = None):
'''
This function calculates the metrics: accuracy, precision, recall, F1-score
:param average: the average method used for multi-class labels
return a dictionary of metrics
'''
res = {}
for metric in [accuracy_score, precision_score, recall_score, f1_score]:
if 'average' in inspect.getfullargspec(metric).args:
res[metric.__name__] = metric(y_true = y_true, y_pred = y_pred, average = average)
else:
res[metric.__name__] = metric(y_true = y_true, y_pred = y_pred)
return res
# multi-class example
def svm_multi(self, X_train, y_train, X_test=None, kernel='rbf', C=1.0, degree=3, class_weight = None):
'''
This function fits an SVM on (X_train, y_train) and returns predictions for X_test.
Possible kernels: ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’
will be used.
The fit time scales at least quadratically with the number of samples and may be impractical beyond tens of
thousands of samples.
Parameters:
- C: inverse of the regularization strength, smaller values specify stronger regularization
- class_weight: {dict, ‘balanced’}, optional
The “balanced” mode uses the values of y to automatically adjust weights inversely proportional to class frequencies
'''
classifier = OneVsRestClassifier(svm.SVC(kernel=kernel, C=C, degree=degree, class_weight = class_weight)) # wrap into one-vs-rest
# If the user hasn't provided any test data set, we simply predict on the train dataset
if X_test is None:
y_pred = classifier.fit(X_train, y_train).predict(X_train)
# distances from the separating hyperplane
distances = classifier.decision_function(X_train)
else:
y_pred = classifier.fit(X_train, y_train).predict(X_test)
distances = classifier.decision_function(X_test)
return classifier, y_pred, distances
|
[
"sklearn.svm.SVC",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"inspect.getfullargspec",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.title",
"sklearn.utils.multiclass.unique_labels",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"sklearn.metrics.confusion_matrix"
] |
[((1361, 1430), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': 'kernel', 'C': 'C', 'degree': 'degree', 'class_weight': 'class_weight'}), '(kernel=kernel, C=C, degree=degree, class_weight=class_weight)\n', (1368, 1430), False, 'from sklearn import svm\n'), ((2286, 2303), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (2294, 2303), True, 'import numpy as np\n'), ((2539, 2571), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2555, 2571), False, 'from sklearn.metrics import confusion_matrix\n'), ((2916, 2930), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2928, 2930), True, 'import matplotlib.pyplot as plt\n'), ((3986, 3996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3994, 3996), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4508), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true', 'distances'], {'pos_label': '(1)'}), '(y_true, distances, pos_label=1)\n', (4476, 4508), False, 'from sklearn.metrics import roc_curve\n'), ((4594, 4654), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': '(2)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n", (4602, 4654), True, 'import matplotlib.pyplot as plt\n'), ((4663, 4702), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate (FPR)"""'], {}), "('False Positive Rate (FPR)')\n", (4673, 4702), True, 'import matplotlib.pyplot as plt\n'), ((4711, 4749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate (TPR)"""'], {}), "('True Positive Rate (TPR)')\n", (4721, 4749), True, 'import matplotlib.pyplot as plt\n'), ((4758, 4804), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic"""'], {}), "('Receiver Operating Characteristic')\n", (4767, 4804), True, 'import matplotlib.pyplot as plt\n'), ((4813, 4842), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4823, 4842), True, 'import matplotlib.pyplot as plt\n'), ((5310, 5342), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'distances'], {}), '(y_true, distances)\n', (5323, 5342), False, 'from sklearn.metrics import roc_auc_score\n'), ((2652, 2681), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2665, 2681), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((6917, 6986), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': 'kernel', 'C': 'C', 'degree': 'degree', 'class_weight': 'class_weight'}), '(kernel=kernel, C=C, degree=degree, class_weight=class_weight)\n', (6924, 6986), False, 'from sklearn import svm\n'), ((3093, 3115), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (3102, 3115), True, 'import numpy as np\n'), ((3139, 3161), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (3148, 3161), True, 'import numpy as np\n'), ((4571, 4584), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (4574, 4584), False, 'from sklearn.metrics import auc\n'), ((5782, 5812), 'inspect.getfullargspec', 'inspect.getfullargspec', (['metric'], {}), '(metric)\n', (5804, 5812), False, 'import inspect\n')]
|
import sklearn
import copy
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
# from viz import viz
from bokeh.plotting import figure, show, output_notebook, output_file, save
from functions import merge_data
from sklearn.model_selection import RandomizedSearchCV
import load_data
import naive_autoreg_baselines
import exponential_modeling
import pmdl_weight
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
very_important_vars = ['PopulationDensityperSqMile2010',
# 'MedicareEnrollment,AgedTot2017',
'PopulationEstimate2018',
'#ICU_beds',
'MedianAge2010',
'Smokers_Percentage',
'DiabetesPercentage',
'HeartDiseaseMortality',
'#Hospitals']
exponential = {'model_type':'exponential'}
shared_exponential = {'model_type':'shared_exponential'}
demographics = {'model_type':'shared_exponential', 'demographic_vars':very_important_vars}
def fit_and_predict(df,
outcome: str='deaths',
method: str='exponential',
mode: str='predict_future',
target_day: np.ndarray=np.array([1]),
output_key: str=None,
demographic_vars=[]):
"""
Trains a method (method) to predict a current number of days ahead (target_day)
Predicts the values of the number of deaths for the final day of test_df and writes to the column
'predicted_deaths_'+method+'_'+str(target_day[-1]) of the test_df
Params
------
df
a df with county level deaths and cases and demographic information
outcome
key for the outcome to predict (the values in this column should have a list for each row)
method
what method to use to do forecasting
target_day
np.array([1,2,..,n]) predicts these number of days ahead (can just be np.array([3])) for example if you just want 3 days ahead)
output_key
key to save the output as
mode:
either 'predict_future' or 'eval_mode'
predict_future is predicting deaths on FUTURE days, so target_day=np.array([1])) means it predicts tomorrow's deaths
eval_mode is for evaluating the performance of the classifier.
target_day=np.array([k])) will predict the current days death count using information from k days ago.
target_day= np.array([1,2,3,...,k]) will predict todays deaths, yesterdays deaths, deaths k-1 days ago using information from k days ago.
Returns
-------
test_df
returns dataframe with added column
"""
assert mode == 'predict_future' or mode == 'eval_mode', 'unknown mode'
if output_key is None:
output_key = f'predicted_{outcome}_{method}_{target_day[-1]}'
if len(demographic_vars) > 0:
output_key += '_demographics'
if method == 'AR':
print('currently deprecated')
raise NotImplementedError
loss, model, best_window = naive_autoreg_baselines.train_and_evaluate_model(train_df,test_df)
return naive_autoreg_baselines.make_predictions(test_df,model,best_window)
elif method == 'exponential':
preds = exponential_modeling.exponential_fit(df[outcome].values,
mode=mode,
target_day=target_day)
df[output_key] = preds
#del test_df['predicted_deaths_exponential']
return df
elif method == 'linear':
preds = exponential_modeling.linear_fit(df[outcome].values,
mode=mode,
target_day=target_day)
df[output_key] = preds
#del test_df['predicted_deaths_exponential']
return df
elif method == 'shared_exponential':
# Fit a poisson GLM with shared parameters across counties. Input to the poisson GLM is demographic_vars and log(previous_days_deaths+1)
cur_day_predictions = exponential_modeling.fit_and_predict_shared_exponential(df,mode,outcome=outcome,demographic_vars=demographic_vars,target_day=target_day)
#if len(demographic_vars) > 0:
# output_key += '_demographics'
# import IPython
# IPython.embed()
df[output_key] = cur_day_predictions
return df
elif method == 'ensemble':
print('please use fit_and_predict_ensemble instead')
else:
print('Unknown method')
raise ValueError
def fit_and_predict_ensemble(df,
target_day: np.ndarray=np.array([1]),
outcome: str='deaths',
methods: list=[exponential, shared_exponential, demographics],
mode: str='predict_future',
output_key: str=None):
"""
Function for ensemble prediction
Input:
df: pd.DataFrame
target_day: array
outcome: str
method: list of dictionary
each dictionary specify the type and parameters of the model
mode: str
output_key: str
Output:
df with ensemble prediction
"""
if output_key is None:
output_key = f'predicted_{outcome}_ensemble_{target_day[-1]}'
predictions = {}
for (i, model) in enumerate(methods):
if 'demographic_vars' in model:
demographic_vars = model['demographic_vars']
else:
demographic_vars = []
predictions[i] = fit_and_predict(df,
outcome=outcome,
method=model['model_type'],
mode=mode,
target_day=target_day,
output_key=f'y_preds_{i}',
demographic_vars=demographic_vars)[f'y_preds_{i}'].values
if mode == 'predict_future':
use_df = df
else:
use_df = exponential_modeling.leave_t_day_out(df, target_day[-1])
weights = pmdl_weight.compute_pmdl_weight(use_df,
methods=methods,
outcome=outcome)
sum_weights = np.zeros(len(use_df))
for model_index in weights:
sum_weights = sum_weights + np.array(weights[model_index])
#weighted_preds = np.zeros((len(use_df), len(target_day)))
weighted_preds = [np.zeros(len(target_day)) for i in range(len(use_df))]
for i in range(len(df)):
for model_index in weights:
weighted_preds[i] += np.array(predictions[model_index][i]) * weights[model_index][i] / sum_weights[i]
df[output_key] = weighted_preds
return df
def get_forecasts(df,
outcome,
method,
output_key,
target_day=np.array([1]),
demographic_vars=[]
):
"""
This is a tentative interface for extracting cases/deaths forecasts of future days
df: county_level df
outcome: 'cases' or 'deaths'
method: currently only support 'exponential' and 'shared_exponential'
target_day:
output_key
output: df with forecasts in output_key
"""
## not tested yet
if method == 'exponential':
return exponential_modeling.get_exponential_forecasts(df=df,
outcome=outcome,
target_day=target_day,
output_key=output_key)
elif method == 'shared_exponential':
df[output_key] = exponential_modeling.fit_and_predict_shared_exponential(df,
mode='predict_future',
demographic_vars=[],
outcome=outcome)
return df
elif method == 'shared_demographic':
assert len(demographic_vars) > 0
df[output_key] = exponential_modeling.fit_and_predict_shared_exponential(df,
mode='predict_future',
demographic_vars=demographic_vars,
outcome=outcome)
return df
elif method == 'ensemble':
df[output_key] = fit_and_predict.fit_and_predict(df,
method='ensemble',
mode='predict_future',
demographic_vars=[],
outcome=outcome)[f'predicted_{outcome}_{method}_{target_day[-1]}']
return df
else:
print('Unknown method')
raise ValueError
|
[
"naive_autoreg_baselines.train_and_evaluate_model",
"naive_autoreg_baselines.make_predictions",
"exponential_modeling.get_exponential_forecasts",
"pmdl_weight.compute_pmdl_weight",
"numpy.array",
"exponential_modeling.linear_fit",
"exponential_modeling.leave_t_day_out",
"exponential_modeling.fit_and_predict_shared_exponential",
"exponential_modeling.exponential_fit"
] |
[((1423, 1436), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1431, 1436), True, 'import numpy as np\n'), ((5002, 5015), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (5010, 5015), True, 'import numpy as np\n'), ((6580, 6653), 'pmdl_weight.compute_pmdl_weight', 'pmdl_weight.compute_pmdl_weight', (['use_df'], {'methods': 'methods', 'outcome': 'outcome'}), '(use_df, methods=methods, outcome=outcome)\n', (6611, 6653), False, 'import pmdl_weight\n'), ((7423, 7436), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (7431, 7436), True, 'import numpy as np\n'), ((3256, 3323), 'naive_autoreg_baselines.train_and_evaluate_model', 'naive_autoreg_baselines.train_and_evaluate_model', (['train_df', 'test_df'], {}), '(train_df, test_df)\n', (3304, 3323), False, 'import naive_autoreg_baselines\n'), ((3338, 3407), 'naive_autoreg_baselines.make_predictions', 'naive_autoreg_baselines.make_predictions', (['test_df', 'model', 'best_window'], {}), '(test_df, model, best_window)\n', (3378, 3407), False, 'import naive_autoreg_baselines\n'), ((6491, 6547), 'exponential_modeling.leave_t_day_out', 'exponential_modeling.leave_t_day_out', (['df', 'target_day[-1]'], {}), '(df, target_day[-1])\n', (6527, 6547), False, 'import exponential_modeling\n'), ((7906, 8026), 'exponential_modeling.get_exponential_forecasts', 'exponential_modeling.get_exponential_forecasts', ([], {'df': 'df', 'outcome': 'outcome', 'target_day': 'target_day', 'output_key': 'output_key'}), '(df=df, outcome=outcome,\n target_day=target_day, output_key=output_key)\n', (7952, 8026), False, 'import exponential_modeling\n'), ((3461, 3555), 'exponential_modeling.exponential_fit', 'exponential_modeling.exponential_fit', (['df[outcome].values'], {'mode': 'mode', 'target_day': 'target_day'}), '(df[outcome].values, mode=mode,\n target_day=target_day)\n', (3497, 3555), False, 'import exponential_modeling\n'), ((6856, 6886), 'numpy.array', 'np.array', (['weights[model_index]'], {}), '(weights[model_index])\n', (6864, 6886), True, 'import numpy as np\n'), ((8293, 8418), 'exponential_modeling.fit_and_predict_shared_exponential', 'exponential_modeling.fit_and_predict_shared_exponential', (['df'], {'mode': '"""predict_future"""', 'demographic_vars': '[]', 'outcome': 'outcome'}), "(df, mode=\n 'predict_future', demographic_vars=[], outcome=outcome)\n", (8348, 8418), False, 'import exponential_modeling\n'), ((3835, 3925), 'exponential_modeling.linear_fit', 'exponential_modeling.linear_fit', (['df[outcome].values'], {'mode': 'mode', 'target_day': 'target_day'}), '(df[outcome].values, mode=mode, target_day=\n target_day)\n', (3866, 3925), False, 'import exponential_modeling\n'), ((8785, 8924), 'exponential_modeling.fit_and_predict_shared_exponential', 'exponential_modeling.fit_and_predict_shared_exponential', (['df'], {'mode': '"""predict_future"""', 'demographic_vars': 'demographic_vars', 'outcome': 'outcome'}), "(df, mode=\n 'predict_future', demographic_vars=demographic_vars, outcome=outcome)\n", (8840, 8924), False, 'import exponential_modeling\n'), ((4383, 4528), 'exponential_modeling.fit_and_predict_shared_exponential', 'exponential_modeling.fit_and_predict_shared_exponential', (['df', 'mode'], {'outcome': 'outcome', 'demographic_vars': 'demographic_vars', 'target_day': 'target_day'}), '(df, mode, outcome=\n outcome, demographic_vars=demographic_vars, target_day=target_day)\n', (4438, 4528), False, 'import exponential_modeling\n'), ((7130, 7167), 'numpy.array', 'np.array', (['predictions[model_index][i]'], {}), '(predictions[model_index][i])\n', (7138, 7167), True, 'import numpy as np\n')]
|
"""Tests for the design module"""
import numpy as np
import turbigen.compflow as cf
from turbigen import design
# Set up test data
# Ranges of velocity triangle parameters covering the classic Smith chart
phi = np.linspace(0.4, 1.2, 5)
psi = np.linspace(0.8, 2.4, 5)
# "Reasonable" range of reaction (usually close to Lam = 0.5 in gas turbines)
# Known limitation: does not converge for very high reaction Lam > 0.8
Lam = np.linspace(0.3, 0.7, 5)
# Other parameters
Al1 = 10.0
Ma2 = 0.6
ga = 1.33
eta = 0.9
Lam_ref = 0.5
Al_range = np.linspace(-30.0, 30.0, 11)
Vx_rat = (0.9, 1.2)
Ma_low = 0.01
eta_ideal = 1.0
eta_all = (0.7, 0.9)
htr = 0.9
cpTo1 = 1.0e6
Omega = 2.0 * np.pi * 50.0
# Function to initialise a test set of geometries. We only do this once, when
# called by a test function, otherwise retrieve from a module attribute.
geometries = {}
Lam_target = []
Al_target = []
def get_geometries(kind):
if not kind in geometries:
geometries[kind] = []
if kind == "datum":
for phii in phi:
for psii in psi:
for Lami in Lam:
for etai in eta_all:
Lam_target.append(Lami)
geometries[kind].append(design.nondim_stage_from_Lam(phii, psii, Lami, Al1, Ma2, ga, etai, Vx_rat))
elif kind == "incompressible":
for phii in phi:
for psii in psi:
for Al1i in Al_range:
geometries[kind].append(design.nondim_stage_from_Al(phii, psii, (Al1i, Al1i), Ma_low, ga, eta_ideal))
elif kind == "repeating":
for phii in phi:
for psii in psi:
for Al1i in Al_range:
geometries[kind].append(design.nondim_stage_from_Al(phii, psii, (Al1i, Al1i), Ma2, ga, eta))
elif kind == "angles":
for phii in phi:
for psii in psi:
for Al1i in Al_range:
for Al3i in Al_range:
Alnow = (Al1i, Al3i)
Al_target.append(Alnow)
geometries[kind].append(design.nondim_stage_from_Al(phii, psii, Alnow, Ma2, ga, eta))
return geometries[kind]
# Begin test functions
def test_Zweifel():
"""Verify Zweifel pitch-to-chord for low-speed lossless repeating stages."""
for stg in get_geometries('incompressible'):
# Evaluate pitch using built in function
Z = 0.8
s_c_out = np.array(design.pitch_Zweifel(stg, (Z, Z)))
# Evaluate low-speed lossless approximation
Alr = np.radians(stg.Al)
s_c_stator = (
Z
/ 2.0
/ (np.cos(Alr[1]) ** 2.0)
/ (np.tan(Alr[1]) - np.tan(Alr[0]))
)
Alrelr = np.radians(stg.Alrel)
s_c_rotor = (
Z
/ 2.0
/ (np.cos(Alrelr[2]) ** 2.0)
/ np.abs(np.tan(Alrelr[2]) - np.tan(Alrelr[1]))
)
# Check that the two are within a tolerance
assert np.all(
np.abs(s_c_out - np.array((s_c_stator, s_c_rotor))) < 1e-4
)
def test_circulation_coeff():
"""Verify circulation pitch-to-chord for low-speed lossless repeating stages."""
for stg in get_geometries('incompressible'):
# Evaluate pitch using built in function
C0 = 0.65
s_c_out = np.array(design.pitch_circulation(stg, C0))
print(s_c_out)
# Evaluate low-speed lossless approximation
Alr = np.radians(stg.Al[:2])
s_c_stator = (
C0
* design._integrate_length(Alr)
/ np.cos(Alr[1])
/ np.abs(np.tan(Alr[0]) - np.tan(Alr[1]))
)
Alrelr = np.radians(stg.Alrel[1:])
s_c_rotor = (
C0
* design._integrate_length(Alrelr)
/ np.cos(Alrelr[1])
/ np.abs(np.tan(Alrelr[0]) - np.tan(Alrelr[1]))
)
print((s_c_stator, s_c_rotor))
# Check that the two are within a tolerance
assert np.all(
np.abs(s_c_out - np.array((s_c_stator, s_c_rotor))) < 1e-4
)
def test_repeating():
"""Verify analytically some repeating stage velocity triangles."""
for stg in get_geometries('repeating'):
psi_out = 2.0 * (1.0 - stg.Lam - stg.phi * np.tan(np.radians(stg.Al[0])))
assert np.isclose(stg.psi, psi_out)
def test_mass():
"""Check for mass conservation."""
for stg in get_geometries("datum"):
mdot_out = (
cf.mcpTo_APo_from_Ma(stg.Ma, ga)
* stg.Ax_Ax1
* stg.Po_Po1
* np.cos(np.radians(stg.Al))
/ np.sqrt(stg.To_To1)
)
assert np.isclose(*mdot_out)
def test_Lam():
"""Check target reaction is achieved by the yaw angle iteration."""
for stg, Lami in zip(get_geometries("datum"), Lam_target):
assert np.isclose(stg.Lam, Lami)
def test_Vx():
"""Verify that the axial velocity is as required."""
Vx_rat_target = np.insert(Vx_rat, 1, 1)
for stg in get_geometries('datum'):
V_cpTo = cf.V_cpTo_from_Ma(stg.Ma, ga) * np.sqrt(stg.To_To1)
Vx_cpTo = V_cpTo * np.cos(np.radians(stg.Al))
Vx_U = Vx_cpTo / stg.U_sqrt_cpTo1
Vx_rat_out = Vx_U / stg.phi
assert np.all(np.isclose(Vx_rat_out, Vx_rat_target))
def test_euler():
"""Verify that the Euler's work equation is satisfied."""
for stg in get_geometries('datum'):
V_cpTo = cf.V_cpTo_from_Ma(stg.Ma, ga) * np.sqrt(stg.To_To1)
Vt_cpTo = V_cpTo * np.sin(np.radians(stg.Al))
Vt_U = Vt_cpTo / stg.U_sqrt_cpTo1
dVt_U = Vt_U[1] - Vt_U[2]
assert np.all(np.isclose(dVt_U, stg.psi))
def test_loss():
"""Check that polytropic efficiency, loss coeffs and Po are correct."""
for stg in get_geometries('datum'):
# Check efficiency
eta_out = (
np.log(stg.To_To1[-1]) / np.log(stg.Po_Po1[-1]) * ga / (ga - 1.0)
)
assert np.isclose(eta_out, stg.eta)
# Check loss coeffs
# Note compressor definition using inlet dyn head
Po2_Po1 = stg.Po_Po1[1]
Po3_Po2_rel = (
cf.Po_P_from_Ma(stg.Marel[2], ga)
/ cf.Po_P_from_Ma(stg.Marel[1], ga)
* cf.Po_P_from_Ma(stg.Ma[1], ga)
/ cf.Po_P_from_Ma(stg.Ma[2], ga)
* stg.Po_Po1[2]
/ stg.Po_Po1[1]
)
Po1_P1 = cf.Po_P_from_Ma(stg.Ma[0], ga)
Po2_P2_rel = cf.Po_P_from_Ma(stg.Marel[1], ga)
Yp_stator_out = (1.0 - Po2_Po1) / (1.0 - 1.0 / Po1_P1)
assert np.isclose(Yp_stator_out, stg.Yp[0])
Yp_rotor_out = (1.0 - Po3_Po2_rel) / (1.0 - 1.0 / Po2_P2_rel)
assert np.isclose(Yp_rotor_out, stg.Yp[1])
def test_psi():
"""Check that stage loading coefficient is correct."""
for stg in get_geometries('datum'):
psi_out = (1.0 - stg.To_To1[2]) / stg.U_sqrt_cpTo1 ** 2.0
assert np.isclose(stg.psi, psi_out)
def test_Al():
"""Check that inlet and exit yaw angles are as specified."""
for stg, Alnow in zip(get_geometries('angles'),Al_target):
assert np.all( np.isclose( np.array(stg.Al)[ (0, 2), ], Alnow,))
def test_valid():
"""Check that output data is always physically sensible."""
for stg in get_geometries('datum'):
# No nans or infinities
for xi in stg:
assert np.all(np.isfinite(xi))
# All variables excluding flow angles should be non-negative
for vi, xi in stg._asdict().items():
if vi not in ["Al", "Alrel", "Vt_U", "Vtrel_U"]:
assert np.all(np.array(xi) >= 0.0)
# Flow angles less than 90 degrees
for vi in ["Al", "Alrel"]:
assert np.all(np.abs(getattr(stg, vi)) < 90.0)
# No diverging annuli (for these designs Vx=const)
# assert np.all(np.array(stg.Ax_Ax1) >= 1.0)
def test_annulus():
"""Ensure that annulus lines are created successfully."""
for stg in get_geometries('datum'):
rm, Dr = design.annulus_line(stg, htr, cpTo1, Omega)
# Basic validity checks
assert np.all(rm > 0.0)
assert np.all(Dr > 0.0)
assert np.all(rm > Dr)
# Verify that U/sqrt(cpTo1) is correct
U = Omega * rm
assert np.isclose(stg.U_sqrt_cpTo1, U / np.sqrt(cpTo1))
# Verify hub-to-tip ratio
rt = rm + Dr[1] / 2.0
rh = rm - Dr[1] / 2.0
assert np.isclose(htr, rh / rt)
def test_chord():
"""Verify chord calculation with incompressible cases."""
To1 = 300.0
mu = design.muref * (To1 / design.Tref) ** design.expon
cp = 1150.0
rgas = cp / ga * (ga - 1)
cpTo1_inc = cp * To1
Po1 = 1.0e5
Re = 4e3
tol = Re * 0.001
for stg in get_geometries('incompressible'):
cx = design.chord_from_Re(stg, Re, cpTo1_inc, Po1, rgas)
V2 = cf.V_cpTo_from_Ma(stg.Ma[1], ga) * np.sqrt(cpTo1_inc)
rho2 = Po1 / rgas / To1
Re_out = rho2 * V2 * cx / mu
assert np.abs(Re - Re_out) < tol
def test_free_vortex():
"""Verify that vortex distributions have constant angular momentum."""
for stg in get_geometries('datum'):
# Generate stage with annulus line
rm, Dr = design.annulus_line(stg, htr, cpTo1, Omega)
# Make radius ratios
rh = rm - Dr / 2.0
rc = rm + Dr / 2.0
r_rm = (
np.stack([np.linspace(rhi, rci, 20) for rhi, rci in zip(rh, rc)])
/ rm
)
# Run through the free-vortex functions with no deviation
chi_vane, chi_blade = design.free_vortex(stg, r_rm, (0.0, 0.0))
# Check angular momentum is constant to within tolerance
tol = 1e-10
mom_vane = r_rm[:2, :] * np.tan(np.radians(chi_vane))
assert np.all(np.ptp(mom_vane, axis=1) < tol)
mom_blade = r_rm[2:, :] * (
r_rm[2:, :] / stg.phi + np.tan(np.radians(chi_blade))
)
assert np.all(np.ptp(mom_blade, axis=1) < tol)
def test_deviation():
"""Verify that deviation goes in the correct direction."""
for stg in get_geometries('datum'):
rm, Dr = design.annulus_line(stg, htr, cpTo1, Omega)
# Make radius ratios
rh = rm - Dr / 2.0
rc = rm + Dr / 2.0
r_rm = (
np.stack([np.linspace(rhi, rci, 20) for rhi, rci in zip(rh, rc)])
/ rm
)
# Loop over deviations
dev = [0.0, 1.0]
chi_all = np.stack(
[design.free_vortex(stg, r_rm, (devi, devi)) for devi in dev]
)
chi_vane = chi_all[:, 0, :, :]
chi_blade = chi_all[:, 1, :, :]
# Our sign conventions mean that turning is
# +ve through vane, -ve through rotor
# So more deviation should mean that outlet flow angle
# reduces for vane, increases for rotor
# But we aim to counteract this effect by moving metal
# So with more deviation, the metal angle must
# increase for vane, decrease for blade
assert np.all(
np.isclose(np.diff(chi_vane[:, 1, :], 1, 0), np.diff(dev, 1))
)
assert np.all(
np.isclose(np.diff(chi_blade[:, 1, :], 1, 0), -np.diff(dev, 1))
)
|
[
"numpy.radians",
"numpy.ptp",
"turbigen.design.nondim_stage_from_Lam",
"numpy.sqrt",
"turbigen.compflow.V_cpTo_from_Ma",
"numpy.log",
"turbigen.design.pitch_circulation",
"turbigen.design.pitch_Zweifel",
"numpy.array",
"numpy.isfinite",
"turbigen.compflow.Po_P_from_Ma",
"numpy.diff",
"numpy.linspace",
"turbigen.design._integrate_length",
"turbigen.design.chord_from_Re",
"numpy.abs",
"turbigen.design.free_vortex",
"numpy.cos",
"turbigen.design.nondim_stage_from_Al",
"numpy.insert",
"numpy.isclose",
"numpy.tan",
"turbigen.compflow.mcpTo_APo_from_Ma",
"numpy.all",
"turbigen.design.annulus_line"
] |
[((213, 237), 'numpy.linspace', 'np.linspace', (['(0.4)', '(1.2)', '(5)'], {}), '(0.4, 1.2, 5)\n', (224, 237), True, 'import numpy as np\n'), ((244, 268), 'numpy.linspace', 'np.linspace', (['(0.8)', '(2.4)', '(5)'], {}), '(0.8, 2.4, 5)\n', (255, 268), True, 'import numpy as np\n'), ((425, 449), 'numpy.linspace', 'np.linspace', (['(0.3)', '(0.7)', '(5)'], {}), '(0.3, 0.7, 5)\n', (436, 449), True, 'import numpy as np\n'), ((536, 564), 'numpy.linspace', 'np.linspace', (['(-30.0)', '(30.0)', '(11)'], {}), '(-30.0, 30.0, 11)\n', (547, 564), True, 'import numpy as np\n'), ((5074, 5097), 'numpy.insert', 'np.insert', (['Vx_rat', '(1)', '(1)'], {}), '(Vx_rat, 1, 1)\n', (5083, 5097), True, 'import numpy as np\n'), ((2646, 2664), 'numpy.radians', 'np.radians', (['stg.Al'], {}), '(stg.Al)\n', (2656, 2664), True, 'import numpy as np\n'), ((2833, 2854), 'numpy.radians', 'np.radians', (['stg.Alrel'], {}), '(stg.Alrel)\n', (2843, 2854), True, 'import numpy as np\n'), ((3562, 3584), 'numpy.radians', 'np.radians', (['stg.Al[:2]'], {}), '(stg.Al[:2])\n', (3572, 3584), True, 'import numpy as np\n'), ((3777, 3802), 'numpy.radians', 'np.radians', (['stg.Alrel[1:]'], {}), '(stg.Alrel[1:])\n', (3787, 3802), True, 'import numpy as np\n'), ((4421, 4449), 'numpy.isclose', 'np.isclose', (['stg.psi', 'psi_out'], {}), '(stg.psi, psi_out)\n', (4431, 4449), True, 'import numpy as np\n'), ((4764, 4785), 'numpy.isclose', 'np.isclose', (['*mdot_out'], {}), '(*mdot_out)\n', (4774, 4785), True, 'import numpy as np\n'), ((4954, 4979), 'numpy.isclose', 'np.isclose', (['stg.Lam', 'Lami'], {}), '(stg.Lam, Lami)\n', (4964, 4979), True, 'import numpy as np\n'), ((6056, 6084), 'numpy.isclose', 'np.isclose', (['eta_out', 'stg.eta'], {}), '(eta_out, stg.eta)\n', (6066, 6084), True, 'import numpy as np\n'), ((6495, 6525), 'turbigen.compflow.Po_P_from_Ma', 'cf.Po_P_from_Ma', (['stg.Ma[0]', 'ga'], {}), '(stg.Ma[0], ga)\n', (6510, 6525), True, 'import turbigen.compflow as cf\n'), ((6547, 6580), 'turbigen.compflow.Po_P_from_Ma', 'cf.Po_P_from_Ma', (['stg.Marel[1]', 'ga'], {}), '(stg.Marel[1], ga)\n', (6562, 6580), True, 'import turbigen.compflow as cf\n'), ((6660, 6696), 'numpy.isclose', 'np.isclose', (['Yp_stator_out', 'stg.Yp[0]'], {}), '(Yp_stator_out, stg.Yp[0])\n', (6670, 6696), True, 'import numpy as np\n'), ((6782, 6817), 'numpy.isclose', 'np.isclose', (['Yp_rotor_out', 'stg.Yp[1]'], {}), '(Yp_rotor_out, stg.Yp[1])\n', (6792, 6817), True, 'import numpy as np\n'), ((7016, 7044), 'numpy.isclose', 'np.isclose', (['stg.psi', 'psi_out'], {}), '(stg.psi, psi_out)\n', (7026, 7044), True, 'import numpy as np\n'), ((8101, 8144), 'turbigen.design.annulus_line', 'design.annulus_line', (['stg', 'htr', 'cpTo1', 'Omega'], {}), '(stg, htr, cpTo1, Omega)\n', (8120, 8144), False, 'from turbigen import design\n'), ((8193, 8209), 'numpy.all', 'np.all', (['(rm > 0.0)'], {}), '(rm > 0.0)\n', (8199, 8209), True, 'import numpy as np\n'), ((8225, 8241), 'numpy.all', 'np.all', (['(Dr > 0.0)'], {}), '(Dr > 0.0)\n', (8231, 8241), True, 'import numpy as np\n'), ((8257, 8272), 'numpy.all', 'np.all', (['(rm > Dr)'], {}), '(rm > Dr)\n', (8263, 8272), True, 'import numpy as np\n'), ((8518, 8542), 'numpy.isclose', 'np.isclose', (['htr', '(rh / rt)'], {}), '(htr, rh / rt)\n', (8528, 8542), True, 'import numpy as np\n'), ((8886, 8937), 'turbigen.design.chord_from_Re', 'design.chord_from_Re', (['stg', 'Re', 'cpTo1_inc', 'Po1', 'rgas'], {}), '(stg, Re, cpTo1_inc, Po1, rgas)\n', (8906, 8937), False, 'from turbigen import design\n'), ((9318, 9361), 'turbigen.design.annulus_line', 'design.annulus_line', (['stg', 'htr', 'cpTo1', 'Omega'], {}), '(stg, htr, cpTo1, Omega)\n', (9337, 9361), False, 'from turbigen import design\n'), ((9665, 9706), 'turbigen.design.free_vortex', 'design.free_vortex', (['stg', 'r_rm', '(0.0, 0.0)'], {}), '(stg, r_rm, (0.0, 0.0))\n', (9683, 9706), False, 'from turbigen import design\n'), ((10220, 10263), 'turbigen.design.annulus_line', 'design.annulus_line', (['stg', 'htr', 'cpTo1', 'Omega'], {}), '(stg, htr, cpTo1, Omega)\n', (10239, 10263), False, 'from turbigen import design\n'), ((2544, 2577), 'turbigen.design.pitch_Zweifel', 'design.pitch_Zweifel', (['stg', '(Z, Z)'], {}), '(stg, (Z, Z))\n', (2564, 2577), False, 'from turbigen import design\n'), ((3437, 3470), 'turbigen.design.pitch_circulation', 'design.pitch_circulation', (['stg', 'C0'], {}), '(stg, C0)\n', (3461, 3470), False, 'from turbigen import design\n'), ((4719, 4738), 'numpy.sqrt', 'np.sqrt', (['stg.To_To1'], {}), '(stg.To_To1)\n', (4726, 4738), True, 'import numpy as np\n'), ((5155, 5184), 'turbigen.compflow.V_cpTo_from_Ma', 'cf.V_cpTo_from_Ma', (['stg.Ma', 'ga'], {}), '(stg.Ma, ga)\n', (5172, 5184), True, 'import turbigen.compflow as cf\n'), ((5187, 5206), 'numpy.sqrt', 'np.sqrt', (['stg.To_To1'], {}), '(stg.To_To1)\n', (5194, 5206), True, 'import numpy as np\n'), ((5361, 5398), 'numpy.isclose', 'np.isclose', (['Vx_rat_out', 'Vx_rat_target'], {}), '(Vx_rat_out, Vx_rat_target)\n', (5371, 5398), True, 'import numpy as np\n'), ((5539, 5568), 'turbigen.compflow.V_cpTo_from_Ma', 'cf.V_cpTo_from_Ma', (['stg.Ma', 'ga'], {}), '(stg.Ma, ga)\n', (5556, 5568), True, 'import turbigen.compflow as cf\n'), ((5571, 5590), 'numpy.sqrt', 'np.sqrt', (['stg.To_To1'], {}), '(stg.To_To1)\n', (5578, 5590), True, 'import numpy as np\n'), ((5743, 5769), 'numpy.isclose', 'np.isclose', (['dVt_U', 'stg.psi'], {}), '(dVt_U, stg.psi)\n', (5753, 5769), True, 'import numpy as np\n'), ((8951, 8983), 'turbigen.compflow.V_cpTo_from_Ma', 'cf.V_cpTo_from_Ma', (['stg.Ma[1]', 'ga'], {}), '(stg.Ma[1], ga)\n', (8968, 8983), True, 'import turbigen.compflow as cf\n'), ((8986, 9004), 'numpy.sqrt', 'np.sqrt', (['cpTo1_inc'], {}), '(cpTo1_inc)\n', (8993, 9004), True, 'import numpy as np\n'), ((9089, 9108), 'numpy.abs', 'np.abs', (['(Re - Re_out)'], {}), '(Re - Re_out)\n', (9095, 9108), True, 'import numpy as np\n'), ((2773, 2787), 'numpy.tan', 'np.tan', (['Alr[1]'], {}), '(Alr[1])\n', (2779, 2787), True, 'import numpy as np\n'), ((2790, 2804), 'numpy.tan', 'np.tan', (['Alr[0]'], {}), '(Alr[0])\n', (2796, 2804), True, 'import numpy as np\n'), ((3681, 3695), 'numpy.cos', 'np.cos', (['Alr[1]'], {}), '(Alr[1])\n', (3687, 3695), True, 'import numpy as np\n'), ((3901, 3918), 'numpy.cos', 'np.cos', (['Alrelr[1]'], {}), '(Alrelr[1])\n', (3907, 3918), True, 'import numpy as np\n'), ((5241, 5259), 'numpy.radians', 'np.radians', (['stg.Al'], {}), '(stg.Al)\n', (5251, 5259), True, 'import numpy as np\n'), ((5625, 5643), 'numpy.radians', 'np.radians', (['stg.Al'], {}), '(stg.Al)\n', (5635, 5643), True, 'import numpy as np\n'), ((7468, 7483), 'numpy.isfinite', 'np.isfinite', (['xi'], {}), '(xi)\n', (7479, 7483), True, 'import numpy as np\n'), ((8392, 8406), 'numpy.sqrt', 'np.sqrt', (['cpTo1'], {}), '(cpTo1)\n', (8399, 8406), True, 'import numpy as np\n'), ((9833, 9853), 'numpy.radians', 'np.radians', (['chi_vane'], {}), '(chi_vane)\n', (9843, 9853), True, 'import numpy as np\n'), ((9877, 9901), 'numpy.ptp', 'np.ptp', (['mom_vane'], {'axis': '(1)'}), '(mom_vane, axis=1)\n', (9883, 9901), True, 'import numpy as np\n'), ((10043, 10068), 'numpy.ptp', 'np.ptp', (['mom_blade'], {'axis': '(1)'}), '(mom_blade, axis=1)\n', (10049, 10068), True, 'import numpy as np\n'), ((10568, 10611), 'turbigen.design.free_vortex', 'design.free_vortex', (['stg', 'r_rm', '(devi, devi)'], {}), '(stg, r_rm, (devi, devi))\n', (10586, 10611), False, 'from turbigen import design\n'), ((11140, 11172), 'numpy.diff', 'np.diff', (['chi_vane[:, 1, :]', '(1)', '(0)'], {}), '(chi_vane[:, 1, :], 1, 0)\n', (11147, 11172), True, 'import numpy as np\n'), ((11174, 11189), 'numpy.diff', 'np.diff', (['dev', '(1)'], {}), '(dev, 1)\n', (11181, 11189), True, 'import numpy as np\n'), ((11247, 11280), 'numpy.diff', 'np.diff', (['chi_blade[:, 1, :]', '(1)', '(0)'], {}), '(chi_blade[:, 1, :], 1, 0)\n', (11254, 11280), True, 'import numpy as np\n'), ((2735, 2749), 'numpy.cos', 'np.cos', (['Alr[1]'], {}), '(Alr[1])\n', (2741, 2749), True, 'import numpy as np\n'), ((2924, 2941), 'numpy.cos', 'np.cos', (['Alrelr[2]'], {}), '(Alrelr[2])\n', (2930, 2941), True, 'import numpy as np\n'), ((2971, 2988), 'numpy.tan', 'np.tan', (['Alrelr[2]'], {}), '(Alrelr[2])\n', (2977, 2988), True, 'import numpy as np\n'), ((2991, 3008), 'numpy.tan', 'np.tan', (['Alrelr[1]'], {}), '(Alrelr[1])\n', (2997, 3008), True, 'import numpy as np\n'), ((3637, 3666), 'turbigen.design._integrate_length', 'design._integrate_length', (['Alr'], {}), '(Alr)\n', (3661, 3666), False, 'from turbigen import design\n'), ((3717, 3731), 'numpy.tan', 'np.tan', (['Alr[0]'], {}), '(Alr[0])\n', (3723, 3731), True, 'import numpy as np\n'), ((3734, 3748), 'numpy.tan', 'np.tan', (['Alr[1]'], {}), '(Alr[1])\n', (3740, 3748), True, 'import numpy as np\n'), ((3854, 3886), 'turbigen.design._integrate_length', 'design._integrate_length', (['Alrelr'], {}), '(Alrelr)\n', (3878, 3886), False, 'from turbigen import design\n'), ((3940, 3957), 'numpy.tan', 'np.tan', (['Alrelr[0]'], {}), '(Alrelr[0])\n', (3946, 3957), True, 'import numpy as np\n'), ((3960, 3977), 'numpy.tan', 'np.tan', (['Alrelr[1]'], {}), '(Alrelr[1])\n', (3966, 3977), True, 'import numpy as np\n'), ((4685, 4703), 'numpy.radians', 'np.radians', (['stg.Al'], {}), '(stg.Al)\n', (4695, 4703), True, 'import numpy as np\n'), ((5965, 5987), 'numpy.log', 'np.log', (['stg.To_To1[-1]'], {}), '(stg.To_To1[-1])\n', (5971, 5987), True, 'import numpy as np\n'), ((5990, 6012), 'numpy.log', 'np.log', (['stg.Po_Po1[-1]'], {}), '(stg.Po_Po1[-1])\n', (5996, 6012), True, 'import numpy as np\n'), ((6381, 6411), 'turbigen.compflow.Po_P_from_Ma', 'cf.Po_P_from_Ma', (['stg.Ma[2]', 'ga'], {}), '(stg.Ma[2], ga)\n', (6396, 6411), True, 'import turbigen.compflow as cf\n'), ((7225, 7241), 'numpy.array', 'np.array', (['stg.Al'], {}), '(stg.Al)\n', (7233, 7241), True, 'import numpy as np\n'), ((9485, 9510), 'numpy.linspace', 'np.linspace', (['rhi', 'rci', '(20)'], {}), '(rhi, rci, 20)\n', (9496, 9510), True, 'import numpy as np\n'), ((9988, 10009), 'numpy.radians', 'np.radians', (['chi_blade'], {}), '(chi_blade)\n', (9998, 10009), True, 'import numpy as np\n'), ((10387, 10412), 'numpy.linspace', 'np.linspace', (['rhi', 'rci', '(20)'], {}), '(rhi, rci, 20)\n', (10398, 10412), True, 'import numpy as np\n'), ((11283, 11298), 'numpy.diff', 'np.diff', (['dev', '(1)'], {}), '(dev, 1)\n', (11290, 11298), True, 'import numpy as np\n'), ((3125, 3158), 'numpy.array', 'np.array', (['(s_c_stator, s_c_rotor)'], {}), '((s_c_stator, s_c_rotor))\n', (3133, 3158), True, 'import numpy as np\n'), ((4133, 4166), 'numpy.array', 'np.array', (['(s_c_stator, s_c_rotor)'], {}), '((s_c_stator, s_c_rotor))\n', (4141, 4166), True, 'import numpy as np\n'), ((4382, 4403), 'numpy.radians', 'np.radians', (['stg.Al[0]'], {}), '(stg.Al[0])\n', (4392, 4403), True, 'import numpy as np\n'), ((4581, 4613), 'turbigen.compflow.mcpTo_APo_from_Ma', 'cf.mcpTo_APo_from_Ma', (['stg.Ma', 'ga'], {}), '(stg.Ma, ga)\n', (4601, 4613), True, 'import turbigen.compflow as cf\n'), ((6336, 6366), 'turbigen.compflow.Po_P_from_Ma', 'cf.Po_P_from_Ma', (['stg.Ma[1]', 'ga'], {}), '(stg.Ma[1], ga)\n', (6351, 6366), True, 'import turbigen.compflow as cf\n'), ((7690, 7702), 'numpy.array', 'np.array', (['xi'], {}), '(xi)\n', (7698, 7702), True, 'import numpy as np\n'), ((6240, 6273), 'turbigen.compflow.Po_P_from_Ma', 'cf.Po_P_from_Ma', (['stg.Marel[2]', 'ga'], {}), '(stg.Marel[2], ga)\n', (6255, 6273), True, 'import turbigen.compflow as cf\n'), ((6288, 6321), 'turbigen.compflow.Po_P_from_Ma', 'cf.Po_P_from_Ma', (['stg.Marel[1]', 'ga'], {}), '(stg.Marel[1], ga)\n', (6303, 6321), True, 'import turbigen.compflow as cf\n'), ((1251, 1325), 'turbigen.design.nondim_stage_from_Lam', 'design.nondim_stage_from_Lam', (['phii', 'psii', 'Lami', 'Al1', 'Ma2', 'ga', 'etai', 'Vx_rat'], {}), '(phii, psii, Lami, Al1, Ma2, ga, etai, Vx_rat)\n', (1279, 1325), False, 'from turbigen import design\n'), ((1518, 1594), 'turbigen.design.nondim_stage_from_Al', 'design.nondim_stage_from_Al', (['phii', 'psii', '(Al1i, Al1i)', 'Ma_low', 'ga', 'eta_ideal'], {}), '(phii, psii, (Al1i, Al1i), Ma_low, ga, eta_ideal)\n', (1545, 1594), False, 'from turbigen import design\n'), ((1782, 1849), 'turbigen.design.nondim_stage_from_Al', 'design.nondim_stage_from_Al', (['phii', 'psii', '(Al1i, Al1i)', 'Ma2', 'ga', 'eta'], {}), '(phii, psii, (Al1i, Al1i), Ma2, ga, eta)\n', (1809, 1849), False, 'from turbigen import design\n'), ((2185, 2245), 'turbigen.design.nondim_stage_from_Al', 'design.nondim_stage_from_Al', (['phii', 'psii', 'Alnow', 'Ma2', 'ga', 'eta'], {}), '(phii, psii, Alnow, Ma2, ga, eta)\n', (2212, 2245), False, 'from turbigen import design\n')]
|
#!/usr/bin/env python
# coding: utf-8
# #<NAME>
# ## <b> Problem Description </b>
#
# ### This project aims to build a classification model to predict the sentiment of COVID-19 tweets.The tweets have been pulled from Twitter and manual tagging has been done then. Leveraging Natural Language Processing, sentiment analysis is to be done on the dataset. Additionally, machine learning algorithms are to be incorporated to evaluate accuracy score and classification prediction by the trained model.
#
# ### The following information is used:
# 1. Location
# 2. Tweet At
# 3. Original Tweet
# 4. Label
# ##Importing necessary libraries to build model
# In[63]:
import pandas as pd
import numpy as np
from numpy import percentile
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
import tweepy
from textblob import TextBlob
import re # for regular expressions
import pandas as pd
pd.set_option("display.max_colwidth", 200)
import string
import branca.colormap as cm
import requests
import folium
from folium import plugins
from folium.plugins import HeatMap
import branca.colormap
import nltk # for text manipulation
from nltk.stem.porter import *
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from nltk import pos_tag, ne_chunk
from nltk.sentiment.vader import SentimentIntensityAnalyzer as sid
from wordcloud import WordCloud
from tqdm import tqdm, notebook
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tqdm import tqdm
from gensim.models.doc2vec import LabeledSentence
import gensim
from sklearn.linear_model import LogisticRegression
from scipy import stats
from sklearn import metrics
from sklearn.metrics import mean_squared_error,mean_absolute_error, make_scorer,classification_report,confusion_matrix,accuracy_score,roc_auc_score,roc_curve
from sklearn.model_selection import train_test_split,cross_val_score,KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
# ##Extracting dataset and Reviewing Our Dataset
# In[4]:
df=pd.read_csv("https://raw.githubusercontent.com/gabrielpreda/covid-19-tweets/master/covid19_tweets.csv")
df.head()
# In[5]:
df.info()
# In[6]:
df.shape
# In[7]:
df.columns
# In[8]:
# There are 12220 unique locations from where the tweets came.
df['user_location'].value_counts()
# # Looking For Null Values
# In[9]:
missing_values = pd.DataFrame()
missing_values['column'] = df.columns
missing_values['percent'] = [round(100* df[col].isnull().sum() / len(df), 2) for col in df.columns]
missing_values = missing_values.sort_values('percent')
missing_values = missing_values[missing_values['percent']>0]
plt.figure(figsize=(15, 5))
sns.set(style='whitegrid', color_codes=True)
splot=sns.barplot(x='column', y='percent', data=missing_values)
for p in splot.patches:
splot.annotate(format(p.get_height(), '.2f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha = 'center',
va = 'center', xytext = (0, 9), textcoords = 'offset points')
plt.xlabel("Column_Name", size=14, weight="bold")
plt.ylabel("Percentage", size=14, weight="bold")
plt.title("Percentage of missing values in column",fontweight="bold",size=17)
plt.show()
# ##Heat Map for missing values
# In[10]:
plt.figure(figsize=(17, 5))
sns.heatmap(df.isnull(), cbar=True, yticklabels=False)
plt.xlabel("Column_Name", size=14, weight="bold")
plt.title("Places of missing values in column",fontweight="bold",size=17)
plt.show()
# In[11]:
df.describe()
# In[12]:
sns.heatmap(df.corr())
# ##Unique Values In Each Feature Coulmn
# In[13]:
unique_df = pd.DataFrame()
unique_df['Features'] = df.columns
unique=[]
for i in df.columns:
unique.append(df[i].nunique())
unique_df['Uniques'] = unique
f, ax = plt.subplots(1,1, figsize=(15,7))
splot = sns.barplot(x=unique_df['Features'], y=unique_df['Uniques'], alpha=0.8)
for p in splot.patches:
splot.annotate(format(p.get_height(), '.0f'), (p.get_x() + p.get_width() / 2., p.get_height()), ha = 'center',
va = 'center', xytext = (0, 9), textcoords = 'offset points')
plt.title('Bar plot for number of unique values in each column',weight='bold', size=15)
plt.ylabel('#Unique values', size=12, weight='bold')
plt.xlabel('Features', size=12, weight='bold')
plt.xticks(rotation=90)
plt.show()
# ##Plot Of Top 15 Locations Of Tweet.
# In[14]:
loc_analysis = pd.DataFrame(df['user_location'].value_counts().sort_values(ascending=False))
loc_analysis = loc_analysis.rename(columns={'user_location':'count'})
# In[15]:
import plotly.graph_objects as go
# In[16]:
data = {
"values": loc_analysis['count'][:15],
"labels": loc_analysis.index[:15],
"domain": {"column": 0},
"name": "Location Name",
"hoverinfo":"label+percent+name",
"hole": .4,
"type": "pie"
}
layout = go.Layout(title="<b>Ratio on Location</b>", legend=dict(x=0.1, y=1.1, orientation="h"))
data = [data]
fig = go.Figure(data = data, layout = layout)
fig.update_layout(title_x=0.5)
fig.show()
# ##Detailed Analysis
# In[17]:
# Make a copy of dataframe before making any changes
tweets = df.copy()
# In[18]:
# Convert date columns to datetime data type from object
tweets['date'] = pd.to_datetime(tweets['date'])
tweets['user_created'] = pd.to_datetime(tweets['user_created'])
tweets['date_ext'] = tweets['date'].dt.date
# In[19]:
# Take care of nulls in location and description
tweets.user_location.fillna('Unknown', inplace=True)
tweets.user_description.fillna('Unknown', inplace=True)
tweets.source.fillna('Unknown', inplace=True)
tweets.hashtags.fillna('None', inplace=True)
# In[20]:
# Verify
tweets.info()
# #Data Preprocessing
# **A) Removing @user**
# In[21]:
# write function for removing @user
def remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i,'',input_txt)
return input_txt
# create new column with removed @user
df['clean_text'] = np.vectorize(remove_pattern)(df['text'], '@[\w]*')
df.head(2)
# ##REMOVED HTTP AND URLS FROM TWEET
# In[22]:
import re
df['clean_text'] = df['clean_text'].apply(lambda x: re.split('https:\/\/.*', str(x))[0])
df.head(3)
# ##**B) Removing Punctuations, Numbers, and Special Characters**
# In[23]:
# remove special characters, numbers, punctuations
df['clean_text'] = df['clean_text'].str.replace('[^a-zA-Z#]+',' ')
# In[24]:
df.head(5)
# ##**C) Removing Short Words**
# In[25]:
# remove short words
df['clean_text'] = df['clean_text'].apply(lambda x: ' '.join([w for w in x.split() if len(w) > 2]))
df.head(2)
# ##**D) Tokenization**
# In[26]:
# create new variable tokenized tweet
tokenized_tweet = df['clean_text'].apply(lambda x: x.split())
df.head(2)
# ##**E) Stemming**
# In[27]:
from nltk.stem.porter import *
stemmer = PorterStemmer()
# apply stemmer for tokenized_tweet
tokenized_tweet = tokenized_tweet.apply(lambda x: [stemmer.stem(i) for i in x])
df.head(2)
# In[28]:
# join tokens into one sentence
for i in range(len(tokenized_tweet)):
tokenized_tweet[i] = ' '.join(tokenized_tweet[i])
# change df['clean_text'] to tokenized_tweet
# In[29]:
df['clean_text'] = tokenized_tweet
df.head(2)
# ##Story Generation and Visualization from Tweets
# #What are the most common words in the entire dataset?
#
# * What are the most common words in the dataset for negative and positive tweets, respectively?
#
# * How many hashtags are there in a tweet?
#
# * Which trends are associated with my dataset?
#
# * Which trends are associated with either of the sentiments? Are they compatible with the sentiments?
# **Understanding the common words used in the tweets: WordCloud**
# In[30]:
df.head(2)
# In[31]:
# create text from all tweets
all_words = ' '.join([text for text in df['clean_text']])
from wordcloud import WordCloud
wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(all_words)
plt.figure(figsize=(10, 7))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis('off')
plt.show()
# #**Extracting Features from Cleaned Tweets**
# ###Removing Stopwords
# In[32]:
nltk.download('stopwords')
# In[33]:
from nltk.corpus import stopwords
stop = stopwords.words('english')
# In[34]:
df['clean_text'].apply(lambda x: [item for item in x if item not in stop])
# In[35]:
df.head(2)
# ##Check and calculate sentiment of tweets
# In[36]:
#creates a function that determines subjectivity and polarity from the textblob package
def getTextSubjectivity(clean_text):
return TextBlob(clean_text).sentiment.subjectivity
def getTextPolarity(clean_text):
return TextBlob(clean_text).sentiment.polarity
#applies these functions to the dataframe
df['Subjectivity'] = df['clean_text'].apply(getTextSubjectivity)
df['Polarity'] = df['clean_text'].apply(getTextPolarity)
#builds a function to calculate and categorize each tweet as Negative, Neutral, and Positive
def getTextAnalysis(a):
if a < 0:
return "Negative"
elif a == 0:
return "Neutral"
else:
return "Positive"
#creates another column called Score and applies the function to the dataframe
df['Score'] = df['Polarity'].apply(getTextAnalysis)
# In[37]:
#visualizes the data through a bar chart
labels = df.groupby('Score').count().index.values
values = df.groupby('Score').size().values
plt.bar(labels, values, color = ['red', 'blue', 'lime'])
plt.title(label = "Sentiment Analysis - 12/17/2020",
fontsize = '15')
#calculates percentage of positive, negative, and neutral tweets
positive = df[df['Score'] == 'Positive']
print(str(positive.shape[0]/(df.shape[0])*100) + " % of positive tweets")
positive = df[df['Score'] == 'Neutral']
print(str(positive.shape[0]/(df.shape[0])*100) + " % of neutral tweets")
positive = df[df['Score'] == 'Negative']
print(str(positive.shape[0]/(df.shape[0])*100) + " % of negative tweets")
# In[54]:
# Most trended hashtags
top10_hashtags = tweets.hashtags.str.lower().value_counts().nlargest(10)
# initiate the figure with it's size
fig = plt.figure(figsize = (10,5))
plt.barh(top10_hashtags.index, top10_hashtags.values)
plt.xlabel('# of Tweets')
plt.title("Tweets by hashtags", fontsize=16);
# In[66]:
# We are using Compound score to detect the tweet sentiment which is a metric that calculates the sum of
# all the lexicon ratings which have been normalized between
# -1(most extreme negative) and +1 (most extreme positive)
# positive: (compound score >= 0.05), negative : (compound score <= -0.05), neutral otherwise
get_ipython().system('pip install vaderSentiment')
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
for index, row in tqdm(tweets.iterrows()): #tqdm
ss = sid.polarity_scores(row['text'])
if ss['compound'] >= 0.05 :
tweets.at[index,'sentiment'] = "Positive"
elif ss['compound'] <= - 0.05 :
tweets.at[index,'sentiment'] = "Negative"
else :
tweets.at[index,'sentiment'] = "Neutral"
# #Tweets Sentiments Distribution plotted graphically after leveraging NLP
# In[67]:
# Show distribution of tweet sentiments
sentiment_dist = tweets.sentiment.value_counts()
plt.pie(sentiment_dist, labels=sentiment_dist.index, explode= (0.1,0,0),
colors=['yellowgreen', 'gold', 'lightcoral'],
autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Tweets\' Sentiment Distribution \n", fontsize=16, color='Black')
plt.axis('equal')
plt.tight_layout()
plt.show()
# In[68]:
# Function to filter top 10 tweets by sentiment
def top10AccountsBySentiment(sentiment):
df = tweets.query("sentiment==@sentiment")
top10 = df.groupby(by=["user_name"])['sentiment'].count().sort_values(ascending=False)[:10]
return(top10)
# In[69]:
# Top 10 tweets by each sentiment
top10_pos = top10AccountsBySentiment("Positive")
top10_neg = top10AccountsBySentiment("Negative")
top10_neu = top10AccountsBySentiment("Neutral")
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, squeeze=True, figsize=(16,8))
fig.suptitle('Top 10 Twitter Accounts \n', fontsize=20)
ax1.barh(top10_pos.index, top10_pos.values, color='yellowgreen')
ax1.set_title("\n\n Positive Tweets", fontsize=16)
ax2.barh(top10_neg.index, top10_neg.values, color='lightcoral')
ax2.set_title("\n\n Negative Tweets", fontsize=16)
ax3.barh(top10_neu.index, top10_neu.values, color='gold')
ax3.set_title("\n\n Neutral Tweets", fontsize=16);
fig.tight_layout()
fig.show()
# In[70]:
df.head(1)
# In[71]:
new_df=df[['clean_text','Score']]
# ##Spitting Our Dataset into Training And Testing Dataset ( For Multiclass Classification)
# In[72]:
from sklearn.model_selection import train_test_split
train,valid = train_test_split(new_df,test_size = 0.2,random_state=0,stratify = new_df.Score.values) #stratification means that the train_test_split method returns training and test subsets that have the same proportions of class labels as the input dataset.
print("train shape : ", train.shape)
print("valid shape : ", valid.shape)
# # Use Of Counter Vectorizer For Multi Class Classification
# In[73]:
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
stop = list(stopwords.words('english'))
vectorizer = CountVectorizer(decode_error = 'replace',stop_words = stop)
X_train = vectorizer.fit_transform(train.clean_text.values)
X_valid = vectorizer.transform(valid.clean_text.values)
y_train = train.Score.values
y_valid = valid.Score.values
print("X_train.shape : ", X_train.shape)
print("X_train.shape : ", X_valid.shape)
print("y_train.shape : ", y_train.shape)
print("y_valid.shape : ", y_valid.shape)
# ## Naive Bayes Classifier for MULTICLASS Classification
# In[74]:
from sklearn.naive_bayes import MultinomialNB
naiveByes_clf = MultinomialNB()
naiveByes_clf.fit(X_train,y_train)
NB_prediction = naiveByes_clf.predict(X_valid)
NB_accuracy = accuracy_score(y_valid,NB_prediction)
print("training accuracy Score : ",naiveByes_clf.score(X_train,y_train))
print("Validation accuracy Score : ",NB_accuracy )
print(classification_report(NB_prediction,y_valid))
# #*Thank you! :)*
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"nltk.download",
"sklearn.metrics.classification_report",
"pandas.to_datetime",
"matplotlib.pyplot.imshow",
"textblob.TextBlob",
"seaborn.set",
"nltk.corpus.stopwords.words",
"sklearn.feature_extraction.text.CountVectorizer",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.barh",
"pandas.set_option",
"sklearn.naive_bayes.MultinomialNB",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xticks",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.title",
"re.findall",
"re.sub",
"numpy.vectorize",
"sklearn.metrics.accuracy_score",
"nltk.sentiment.vader.SentimentIntensityAnalyzer.polarity_scores",
"matplotlib.pyplot.show",
"vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"matplotlib.pyplot.pie",
"wordcloud.WordCloud",
"plotly.graph_objects.Figure",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.tight_layout",
"seaborn.barplot",
"matplotlib.pyplot.subplots"
] |
[((907, 949), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(200)'], {}), "('display.max_colwidth', 200)\n", (920, 949), True, 'import pandas as pd\n'), ((2075, 2188), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/gabrielpreda/covid-19-tweets/master/covid19_tweets.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/gabrielpreda/covid-19-tweets/master/covid19_tweets.csv'\n )\n", (2086, 2188), True, 'import pandas as pd\n'), ((2428, 2442), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2440, 2442), True, 'import pandas as pd\n'), ((2698, 2725), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (2708, 2725), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2770), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'color_codes': '(True)'}), "(style='whitegrid', color_codes=True)\n", (2733, 2770), True, 'import seaborn as sns\n'), ((2777, 2834), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""column"""', 'y': '"""percent"""', 'data': 'missing_values'}), "(x='column', y='percent', data=missing_values)\n", (2788, 2834), True, 'import seaborn as sns\n'), ((3055, 3104), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Column_Name"""'], {'size': '(14)', 'weight': '"""bold"""'}), "('Column_Name', size=14, weight='bold')\n", (3065, 3104), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3153), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage"""'], {'size': '(14)', 'weight': '"""bold"""'}), "('Percentage', size=14, weight='bold')\n", (3115, 3153), True, 'import matplotlib.pyplot as plt\n'), ((3154, 3233), 'matplotlib.pyplot.title', 'plt.title', (['"""Percentage of missing values in column"""'], {'fontweight': '"""bold"""', 'size': '(17)'}), "('Percentage of missing values in column', fontweight='bold', size=17)\n", (3163, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3232, 3242), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3240, 3242), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3317), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(17, 5)'}), '(figsize=(17, 5))\n', (3300, 3317), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Column_Name"""'], {'size': '(14)', 'weight': '"""bold"""'}), "('Column_Name', size=14, weight='bold')\n", (3383, 3422), True, 'import matplotlib.pyplot as plt\n'), ((3423, 3498), 'matplotlib.pyplot.title', 'plt.title', (['"""Places of missing values in column"""'], {'fontweight': '"""bold"""', 'size': '(17)'}), "('Places of missing values in column', fontweight='bold', size=17)\n", (3432, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3497, 3507), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3505, 3507), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3655), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3653, 3655), True, 'import pandas as pd\n'), ((3796, 3831), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(15, 7)'}), '(1, 1, figsize=(15, 7))\n', (3808, 3831), True, 'import matplotlib.pyplot as plt\n'), ((3839, 3910), 'seaborn.barplot', 'sns.barplot', ([], {'x': "unique_df['Features']", 'y': "unique_df['Uniques']", 'alpha': '(0.8)'}), "(x=unique_df['Features'], y=unique_df['Uniques'], alpha=0.8)\n", (3850, 3910), True, 'import seaborn as sns\n'), ((4131, 4224), 'matplotlib.pyplot.title', 'plt.title', (['"""Bar plot for number of unique values in each column"""'], {'weight': '"""bold"""', 'size': '(15)'}), "('Bar plot for number of unique values in each column', weight=\n 'bold', size=15)\n", (4140, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4271), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""#Unique values"""'], {'size': '(12)', 'weight': '"""bold"""'}), "('#Unique values', size=12, weight='bold')\n", (4229, 4271), True, 'import matplotlib.pyplot as plt\n'), ((4272, 4318), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Features"""'], {'size': '(12)', 'weight': '"""bold"""'}), "('Features', size=12, weight='bold')\n", (4282, 4318), True, 'import matplotlib.pyplot as plt\n'), ((4319, 4342), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (4329, 4342), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4353), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4351, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4966, 5001), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (4975, 5001), True, 'import plotly.graph_objects as go\n'), ((5245, 5275), 'pandas.to_datetime', 'pd.to_datetime', (["tweets['date']"], {}), "(tweets['date'])\n", (5259, 5275), True, 'import pandas as pd\n'), ((5301, 5339), 'pandas.to_datetime', 'pd.to_datetime', (["tweets['user_created']"], {}), "(tweets['user_created'])\n", (5315, 5339), True, 'import pandas as pd\n'), ((7991, 8018), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (8001, 8018), True, 'import matplotlib.pyplot as plt\n'), ((8019, 8066), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (8029, 8066), True, 'import matplotlib.pyplot as plt\n'), ((8067, 8082), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8075, 8082), True, 'import matplotlib.pyplot as plt\n'), ((8083, 8093), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8091, 8093), True, 'import matplotlib.pyplot as plt\n'), ((8180, 8206), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (8193, 8206), False, 'import nltk\n'), ((8262, 8288), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (8277, 8288), False, 'from nltk.corpus import stopwords\n'), ((9404, 9458), 'matplotlib.pyplot.bar', 'plt.bar', (['labels', 'values'], {'color': "['red', 'blue', 'lime']"}), "(labels, values, color=['red', 'blue', 'lime'])\n", (9411, 9458), True, 'import matplotlib.pyplot as plt\n'), ((9461, 9526), 'matplotlib.pyplot.title', 'plt.title', ([], {'label': '"""Sentiment Analysis - 12/17/2020"""', 'fontsize': '"""15"""'}), "(label='Sentiment Analysis - 12/17/2020', fontsize='15')\n", (9470, 9526), True, 'import matplotlib.pyplot as plt\n'), ((10112, 10139), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (10122, 10139), True, 'import matplotlib.pyplot as plt\n'), ((10141, 10194), 'matplotlib.pyplot.barh', 'plt.barh', (['top10_hashtags.index', 'top10_hashtags.values'], {}), '(top10_hashtags.index, top10_hashtags.values)\n', (10149, 10194), True, 'import matplotlib.pyplot as plt\n'), ((10195, 10220), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# of Tweets"""'], {}), "('# of Tweets')\n", (10205, 10220), True, 'import matplotlib.pyplot as plt\n'), ((10221, 10265), 'matplotlib.pyplot.title', 'plt.title', (['"""Tweets by hashtags"""'], {'fontsize': '(16)'}), "('Tweets by hashtags', fontsize=16)\n", (10230, 10265), True, 'import matplotlib.pyplot as plt\n'), ((10727, 10755), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (10753, 10755), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((11259, 11435), 'matplotlib.pyplot.pie', 'plt.pie', (['sentiment_dist'], {'labels': 'sentiment_dist.index', 'explode': '(0.1, 0, 0)', 'colors': "['yellowgreen', 'gold', 'lightcoral']", 'autopct': '"""%1.1f%%"""', 'shadow': '(True)', 'startangle': '(140)'}), "(sentiment_dist, labels=sentiment_dist.index, explode=(0.1, 0, 0),\n colors=['yellowgreen', 'gold', 'lightcoral'], autopct='%1.1f%%', shadow\n =True, startangle=140)\n", (11266, 11435), True, 'import matplotlib.pyplot as plt\n'), ((11442, 11516), 'matplotlib.pyplot.title', 'plt.title', (['"""Tweets\' Sentiment Distribution \n"""'], {'fontsize': '(16)', 'color': '"""Black"""'}), '("Tweets\' Sentiment Distribution \\n", fontsize=16, color=\'Black\')\n', (11451, 11516), True, 'import matplotlib.pyplot as plt\n'), ((11518, 11535), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (11526, 11535), True, 'import matplotlib.pyplot as plt\n'), ((11536, 11554), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11552, 11554), True, 'import matplotlib.pyplot as plt\n'), ((11555, 11565), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11563, 11565), True, 'import matplotlib.pyplot as plt\n'), ((12048, 12097), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'squeeze': '(True)', 'figsize': '(16, 8)'}), '(1, 3, squeeze=True, figsize=(16, 8))\n', (12060, 12097), True, 'import matplotlib.pyplot as plt\n'), ((12775, 12865), 'sklearn.model_selection.train_test_split', 'train_test_split', (['new_df'], {'test_size': '(0.2)', 'random_state': '(0)', 'stratify': 'new_df.Score.values'}), '(new_df, test_size=0.2, random_state=0, stratify=new_df.\n Score.values)\n', (12791, 12865), False, 'from sklearn.model_selection import train_test_split\n'), ((13317, 13373), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'decode_error': '"""replace"""', 'stop_words': 'stop'}), "(decode_error='replace', stop_words=stop)\n", (13332, 13373), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((13854, 13869), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (13867, 13869), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((13968, 14006), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_valid', 'NB_prediction'], {}), '(y_valid, NB_prediction)\n', (13982, 14006), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, make_scorer, classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve\n'), ((5830, 5860), 're.findall', 're.findall', (['pattern', 'input_txt'], {}), '(pattern, input_txt)\n', (5840, 5860), False, 'import re\n'), ((5999, 6027), 'numpy.vectorize', 'np.vectorize', (['remove_pattern'], {}), '(remove_pattern)\n', (6011, 6027), True, 'import numpy as np\n'), ((10815, 10847), 'nltk.sentiment.vader.SentimentIntensityAnalyzer.polarity_scores', 'sid.polarity_scores', (["row['text']"], {}), "(row['text'])\n", (10834, 10847), True, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer as sid\n'), ((13276, 13302), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (13291, 13302), False, 'from nltk.corpus import stopwords\n'), ((14139, 14184), 'sklearn.metrics.classification_report', 'classification_report', (['NB_prediction', 'y_valid'], {}), '(NB_prediction, y_valid)\n', (14160, 14184), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, make_scorer, classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve\n'), ((5897, 5921), 're.sub', 're.sub', (['i', '""""""', 'input_txt'], {}), "(i, '', input_txt)\n", (5903, 5921), False, 'import re\n'), ((7901, 7969), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(800)', 'height': '(500)', 'random_state': '(21)', 'max_font_size': '(110)'}), '(width=800, height=500, random_state=21, max_font_size=110)\n', (7910, 7969), False, 'from wordcloud import WordCloud\n'), ((8598, 8618), 'textblob.TextBlob', 'TextBlob', (['clean_text'], {}), '(clean_text)\n', (8606, 8618), False, 'from textblob import TextBlob\n'), ((8686, 8706), 'textblob.TextBlob', 'TextBlob', (['clean_text'], {}), '(clean_text)\n', (8694, 8706), False, 'from textblob import TextBlob\n')]
|
"""Code to embed a set of sequences in an embedding space using a trained protvec model and an embedding set of sequences. Creates a .csv file of the embedded sequences. Also returns file of sequences which could not be successfully embedded"""
import pickle
import numpy as np
from Bio import SeqIO
import pandas as pd
from sklearn.preprocessing import minmax_scale
import warnings
from random import shuffle
warnings.filterwarnings(action='once')
def seq_3mers(sequence):
"""Takes a sequence to overlapping 3-mers"""
seq_size = len(sequence)
seq_3mers = list() #intialise list
#iterate through sequence to obtain 3mers
for i in range (1,seq_size-1):
seq_3mers.append(sequence[i-1]+sequence[i]+sequence[i+1])
return seq_3mers
def seq_vector(seq_3mers, vec_dict, d, seq):
"""Converts a list of 3-mers to a vector, vec_dict is the model vectors and d is the number of dimensions in the embedding"""
#intialise empty array
seq_arr = np.zeros((len(seq_3mers), 100))
#populate the array
for i in range (0,len(seq_arr)):
if seq_3mers[i] in vec_dict:
seq_arr[i] = vec_dict.get(seq_3mers[i])
else:
print('\n3-mer not in model!!!!')
print('BAD 3MER: '+seq_3mers[i])
print(seq)
#take the array to a vector
sequence_vec = pd.DataFrame(seq_arr, index = seq_3mers).sum(axis = 0, skipna = True)
return sequence_vec
def normalise_vec(embedded_seqs, d):
"""Normalises embedded sequences in d dimensions. Got this from Phage-Prot https://github.com/mikejhuang/PhageProtVec/blob/master/protvec.py"""
features = []
for i in range(d):
tempvec = [vec[i] for vec in embedded_seqs]
print(tempvec)
mean = np.mean(tempvec)
var = np.var(tempvec)
features.append([(vec[i]-mean)/var for vec in embedded_seqs])
features = np.array(features).reshape(len(embedded_seqs),len(features))
return features
def standardise_vec(embedded_seqs,d):
"""Standardisation for embedded sequences in d dimensions"""
#intialise an empty matrix
stand_embed = np.zeros((len(embedded_seqs),d))
#normalise each vector
for i in range(0,len(embedded_seqs)):
x = embedded_seqs[i]
z = (x-np.mean(x))/np.std(x)
stand_embed[i] = z
return stand_embed
def missing_3mers(three_mers, vec_dict):
"""Checks that the 3mers are in the model"""
missing = [x for x in three_mers if x not in list(vec_dict.keys())]
return missing
def embed_sequences(sequences,vec_dict, d):
"""Embeds sequences in d dimensions using some protvec model vec_dict"""
#intialise an empty matrix for sequence vectors
embedded_seqs = np.zeros((len(sequences), d))
#intialise an empty matrix for the id of sequences embedded
embedded_seqs_keys = list()
#intialise a list of dictionaries for sequences with 3mers which are not in the model
bad_seqs = list()
#get the keys of the sequences
seq_keys = list(sequences.keys()) #list of the sequence ids
#reshuffle the sequence keys
shuffle(seq_keys)
#iterate through the sequence to obtain vectors
for i in range(0, len(sequences)):
#get 3mers in the sequence
three_mers = seq_3mers(str(sequences.get(seq_keys[i]).seq))
#check the 3-mers are in the model (if not exclude these sequences)
missing = missing_3mers(three_mers, vec_dict)
if len(missing) == 0 and len(sequences.get(seq_keys[i]).seq) <= 1024:
#get the vector of the sequence
vec = seq_vector(three_mers, vec_dict, d, sequences.get(seq_keys[i]).seq)
#add vector to array
embedded_seqs[i] = vec
#add corresponding sequence key to the list
embedded_seqs_keys.append(seq_keys[i])
else:
bad_seqs.append({'seq_ID': seq_keys[i], 'sequence':str(sequences.get(seq_keys[i]).seq), 'bad_3mers': missing})
#drop rows with zeroes (where there was a sequence could not be embedded)
embedded_seqs = embedded_seqs[~np.all(embedded_seqs == 0, axis=1)]
#standardise the vectors
standard_embedding = standardise_vec(embedded_seqs,d)
return standard_embedding, embedded_seqs_keys, bad_seqs
#import the pickled vector dictionary
with open('../protvec_models/bacillus_3mervectors.pkl', 'rb') as f:
vec_dict = pickle.load(f)
#load in real sequences to embed (see what the speed is like when I run it locally)
seqs = SeqIO.index("../sequences/bacillus_embeddingset.fa", 'fasta')
#embed the sequences
print('EMBEDDING SEQUENCES')
embedding,seqs_keys, missing = embed_sequences(seqs, vec_dict, 100)
print('sequences embedded')
#Assemble the embedding in a dataframe and drop the empty rows
embedding_df = pd.DataFrame(embedding)
embedding_df.index = seqs_keys
#save the embedding and missing 3mers so we can evaluate them locally
print('Saving the embedding')
embedding_df.to_csv('../embedded_sequences/bacillus_filtered_embedded.csv', sep = '\t')
(pd.DataFrame(missing)).to_csv('../embedded_sequences/bacillus_filtered_notembedded.csv', sep = '\t')
print('Embedding saved!')
|
[
"numpy.mean",
"random.shuffle",
"pickle.load",
"numpy.array",
"Bio.SeqIO.index",
"numpy.std",
"pandas.DataFrame",
"numpy.all",
"warnings.filterwarnings",
"numpy.var"
] |
[((414, 452), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""once"""'}), "(action='once')\n", (437, 452), False, 'import warnings\n'), ((4228, 4289), 'Bio.SeqIO.index', 'SeqIO.index', (['"""../sequences/bacillus_embeddingset.fa"""', '"""fasta"""'], {}), "('../sequences/bacillus_embeddingset.fa', 'fasta')\n", (4239, 4289), False, 'from Bio import SeqIO\n'), ((4522, 4545), 'pandas.DataFrame', 'pd.DataFrame', (['embedding'], {}), '(embedding)\n', (4534, 4545), True, 'import pandas as pd\n'), ((2934, 2951), 'random.shuffle', 'shuffle', (['seq_keys'], {}), '(seq_keys)\n', (2941, 2951), False, 'from random import shuffle\n'), ((4120, 4134), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4131, 4134), False, 'import pickle\n'), ((1664, 1680), 'numpy.mean', 'np.mean', (['tempvec'], {}), '(tempvec)\n', (1671, 1680), True, 'import numpy as np\n'), ((1689, 1704), 'numpy.var', 'np.var', (['tempvec'], {}), '(tempvec)\n', (1695, 1704), True, 'import numpy as np\n'), ((4769, 4790), 'pandas.DataFrame', 'pd.DataFrame', (['missing'], {}), '(missing)\n', (4781, 4790), True, 'import pandas as pd\n'), ((1278, 1316), 'pandas.DataFrame', 'pd.DataFrame', (['seq_arr'], {'index': 'seq_3mers'}), '(seq_arr, index=seq_3mers)\n', (1290, 1316), True, 'import pandas as pd\n'), ((1781, 1799), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1789, 1799), True, 'import numpy as np\n'), ((2150, 2159), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (2156, 2159), True, 'import numpy as np\n'), ((3819, 3853), 'numpy.all', 'np.all', (['(embedded_seqs == 0)'], {'axis': '(1)'}), '(embedded_seqs == 0, axis=1)\n', (3825, 3853), True, 'import numpy as np\n'), ((2138, 2148), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2145, 2148), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Code to load an expert policy and generate roll-out data for behavioral cloning.
Example usage:
python dagger_pytorch.py experts/Humanoid-v1.pkl Humanoid-v2 --render \
--num_rollouts 20
Author of this script and included expert policies: <NAME> (<EMAIL>)
"""
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
import matplotlib.pyplot as plt
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import *
def generate_rollout(env, expert_policy_file, max_timesteps, num_rollouts, render, envname):
max_steps = max_timesteps or env.spec.timestep_limit
policy_fn = load_policy.load_policy(expert_policy_file)
with tf.Session() as sess:
tf_util.initialize()
returns = []
observations = []
actions = []
for i in range(num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None,:])
#print(type(action))
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
result_file = open('result/result_%s_dagger.txt' % (envname), "w")
result_file.write("##### before setting #####\n")
result_file.write("mean return: %.4f \n" % np.mean(returns))
result_file.write("std of return: %.4f \n" % np.std(returns))
result_file.close()
return observations, actions
def variablesFromPair(pair, args):
pair[0] = np.reshape(pair[0], (1, -1))
pair[1] = np.reshape(pair[1], (1, -1))
# get the target action index
#target = pair[1].argmax(1)
input_variable = Variable(torch.FloatTensor(pair[0]))
target_variable = Variable(torch.FloatTensor(pair[1]))
#print(target_variable)
return (input_variable, target_variable)
def makePairs(obs, acts):
pairs = []
for i in range(len(obs)):
pair = []
pair.append(obs[i])
pair.append(acts[i])
pairs.append(pair)
return pairs
def train(input_var, target_var, net, net_optimizer, criterion, args):
loss = 0
net_optimizer.zero_grad()
#print(input_var)
net_output = net(input_var)
loss = criterion(net_output, target_var)
loss.backward()
net_optimizer.step()
return loss.data[0]
def trainEpoch(net, pairs, args, test_pairs):
n_epochs = args.epoch
learning_rate = args.lr
iter = 0
net_optimizer = optim.Adam(net.parameters(), lr=learning_rate)
criterion = nn.MSELoss()
plot_losses = []
plot_loss_total = 0
for epoch in range(1, args.epoch+1):
random.shuffle(pairs)
# converting pairs into variable
training_pairs = [variablesFromPair(pair, args) for pair in pairs]
for training_pair in training_pairs:
iter += 1
input_var = training_pair[0]
target_var = training_pair[1]
loss = train(input_var, target_var, net, net_optimizer, criterion, args)
#print(loss)
plot_loss_total += loss
if iter % 500 == 0:
plot_loss_avg = plot_loss_total / 500
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
print("epoch: %d, loss: %.6f, acc on test pairs: %.3f" % (epoch, plot_loss_avg, validate(net, test_pairs, args)))
f = plt.figure()
plt.plot(plot_losses)
plt.ylabel('Loss')
plt.xlabel('Iteration')
f.savefig("result/%s_dagger.pdf" % args.envname, bbox_inches='tight')
def validate(net, pairs, args):
valid_pairs = [variablesFromPair(pair, args) for pair in pairs]
correcrt = 0
for pair in valid_pairs:
input_var = pair[0]
target_var = pair[1]
#print(target_var)
output = net(input_var)
#print(output)
_, target_ind = torch.max(output, 1)
_, output_ind = torch.max(output, 1)
#print(output_ind)
if torch.equal(target_ind.data, output_ind.data):
correcrt += 1
return (correcrt / len(pairs))
def dagger(env, expert_policy_file, net, max_timesteps, num_dagger_rollouts, render):
max_steps = max_timesteps or env.spec.timestep_limit
policy_fn = load_policy.load_policy(expert_policy_file)
with tf.Session() as sess:
tf_util.initialize()
returns = []
observations = []
actions = []
for i in range(num_dagger_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
expected_action = policy_fn(obs[None,:])
action = net(Variable(torch.FloatTensor(obs)))
#print(action)
_, predict_ind = torch.max(action, 0)
#print(predict_ind)
expected_action1 = Variable(torch.FloatTensor(expected_action))
#print(expected_action)
_, target_ind = torch.max(expected_action1, 1)
#print(target_ind)
if torch.equal(predict_ind.data, target_ind.data) == False:
observations.append(obs)
actions.append(expected_action)
#print("step %d made a mistake" % steps)
action = action.data.numpy()
action = np.reshape(action, (1,-1))
#print("expected action: ", expected_action)
#print("predicted action: ", action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
return observations, actions
def test(env, net, max_timesteps, num_rollouts, render):
max_steps = max_timesteps or env.spec.timestep_limit
#policy_fn = load_policy.load_policy(expert_policy_file)
with tf.Session() as sess:
tf_util.initialize()
returns = []
for i in range(5):
observations = []
actions = []
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = net(Variable(torch.FloatTensor(obs)))
action = action.data.numpy()
action = np.reshape(action, (1,-1))
#print("expected action: ", expected_action)
#print("predicted action: ", action)
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
return returns
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=5,
help='Number of expert roll outs')
parser.add_argument('--hidden_size', type=int, default=64)
parser.add_argument('--epoch', type=int, default=30)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--num_dagger_rollouts', type=int, default=5)
parser.add_argument('--num_dagger', type=int, default=5)
args = parser.parse_args()
env = gym.make(args.envname)
obs, acts = generate_rollout(env, args.expert_policy_file, args.max_timesteps, \
args.num_rollouts, args.render, args.envname)
num_pairs = len(obs)
pairs = makePairs(obs, acts)
train_pairs = pairs[:int(0.8 * num_pairs)]
test_pairs = pairs[int(0.8 * num_pairs):]
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
net = FFNet(obs_dim, act_dim, args.hidden_size)
#print("obs_dim", obs_dim)
#print("act_dim", act_dim)
trainEpoch(net, train_pairs, args, test_pairs)
test(env, net, args.max_timesteps, args.num_rollouts, args.render)
# Use dagger to generate more training data
for i in range(args.num_dagger):
print("#### dagger " + str(i) + " ####")
more_obs, more_acts = dagger(env, args.expert_policy_file, net, args.max_timesteps, args.num_dagger_rollouts, args.render)
more_pairs = makePairs(more_obs, more_acts)
more_train_pairs = more_pairs[:int(0.8 * len(more_obs))]
more_test_pairs = more_pairs[int(0.8 * len(more_obs)):]
train_pairs = train_pairs + more_train_pairs
test_pairs = test_pairs + more_test_pairs
print("number of train pairs: ", len(train_pairs))
print("number of test pairs: ", len(test_pairs))
validate(net, test_pairs, args)
trainEpoch(net, train_pairs, args, test_pairs)
returns = test(env, net, args.max_timesteps, args.num_rollouts, args.render)
print("####### After training #######")
result_file = open('result/result_%s_dagger.txt' % (args.envname), "a")
result_file.write("##### training setting #####\n")
result_file.write("num of rollouts: %d \n" % args.num_rollouts)
result_file.write("num of epochs: %d \n" % args.epoch)
result_file.write("NN hidden size: %d \n" % args.hidden_size)
result_file.write("learning rate: %f \n" % args.lr)
result_file.write("mean return: %.4f \n" % np.mean(returns))
result_file.write("std of return: %.4f \n" % np.std(returns))
result_file.close()
if __name__ == '__main__':
main()
|
[
"numpy.mean",
"numpy.reshape",
"random.shuffle",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"load_policy.load_policy",
"tf_util.initialize",
"torch.FloatTensor",
"torch.max",
"torch.nn.MSELoss",
"matplotlib.pyplot.figure",
"torch.equal",
"numpy.std",
"gym.make"
] |
[((778, 821), 'load_policy.load_policy', 'load_policy.load_policy', (['expert_policy_file'], {}), '(expert_policy_file)\n', (801, 821), False, 'import load_policy\n'), ((2128, 2156), 'numpy.reshape', 'np.reshape', (['pair[0]', '(1, -1)'], {}), '(pair[0], (1, -1))\n', (2138, 2156), True, 'import numpy as np\n'), ((2168, 2196), 'numpy.reshape', 'np.reshape', (['pair[1]', '(1, -1)'], {}), '(pair[1], (1, -1))\n', (2178, 2196), True, 'import numpy as np\n'), ((3038, 3050), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3048, 3050), True, 'import torch.nn as nn\n'), ((3748, 3760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3758, 3760), True, 'import matplotlib.pyplot as plt\n'), ((3762, 3783), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_losses'], {}), '(plot_losses)\n', (3770, 3783), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3803), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3795, 3803), True, 'import matplotlib.pyplot as plt\n'), ((3805, 3828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (3815, 3828), True, 'import matplotlib.pyplot as plt\n'), ((4504, 4547), 'load_policy.load_policy', 'load_policy.load_policy', (['expert_policy_file'], {}), '(expert_policy_file)\n', (4527, 4547), False, 'import load_policy\n'), ((6788, 6813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6811, 6813), False, 'import argparse\n'), ((7622, 7644), 'gym.make', 'gym.make', (['args.envname'], {}), '(args.envname)\n', (7630, 7644), False, 'import gym\n'), ((831, 843), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (841, 843), True, 'import tensorflow as tf\n'), ((861, 881), 'tf_util.initialize', 'tf_util.initialize', ([], {}), '()\n', (879, 881), False, 'import tf_util\n'), ((1704, 1720), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (1711, 1720), True, 'import numpy as np\n'), ((1749, 1764), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (1755, 1764), True, 'import numpy as np\n'), ((2284, 2310), 'torch.FloatTensor', 'torch.FloatTensor', (['pair[0]'], {}), '(pair[0])\n', (2301, 2310), False, 'import torch\n'), ((2340, 2366), 'torch.FloatTensor', 'torch.FloatTensor', (['pair[1]'], {}), '(pair[1])\n', (2357, 2366), False, 'import torch\n'), ((3132, 3153), 'random.shuffle', 'random.shuffle', (['pairs'], {}), '(pairs)\n', (3146, 3153), False, 'import random\n'), ((4166, 4186), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (4175, 4186), False, 'import torch\n'), ((4205, 4225), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (4214, 4225), False, 'import torch\n'), ((4253, 4298), 'torch.equal', 'torch.equal', (['target_ind.data', 'output_ind.data'], {}), '(target_ind.data, output_ind.data)\n', (4264, 4298), False, 'import torch\n'), ((4554, 4566), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4564, 4566), True, 'import tensorflow as tf\n'), ((4578, 4598), 'tf_util.initialize', 'tf_util.initialize', ([], {}), '()\n', (4596, 4598), False, 'import tf_util\n'), ((5917, 5929), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5927, 5929), True, 'import tensorflow as tf\n'), ((5941, 5961), 'tf_util.initialize', 'tf_util.initialize', ([], {}), '()\n', (5959, 5961), False, 'import tf_util\n'), ((6665, 6681), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (6672, 6681), True, 'import numpy as np\n'), ((6707, 6722), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (6713, 6722), True, 'import numpy as np\n'), ((1939, 1955), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (1946, 1955), True, 'import numpy as np\n'), ((2006, 2021), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (2012, 2021), True, 'import numpy as np\n'), ((9551, 9567), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (9558, 9567), True, 'import numpy as np\n'), ((9618, 9633), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (9624, 9633), True, 'import numpy as np\n'), ((4928, 4948), 'torch.max', 'torch.max', (['action', '(0)'], {}), '(action, 0)\n', (4937, 4948), False, 'import torch\n'), ((5089, 5119), 'torch.max', 'torch.max', (['expected_action1', '(1)'], {}), '(expected_action1, 1)\n', (5098, 5119), False, 'import torch\n'), ((5366, 5393), 'numpy.reshape', 'np.reshape', (['action', '(1, -1)'], {}), '(action, (1, -1))\n', (5376, 5393), True, 'import numpy as np\n'), ((6217, 6244), 'numpy.reshape', 'np.reshape', (['action', '(1, -1)'], {}), '(action, (1, -1))\n', (6227, 6244), True, 'import numpy as np\n'), ((5005, 5039), 'torch.FloatTensor', 'torch.FloatTensor', (['expected_action'], {}), '(expected_action)\n', (5022, 5039), False, 'import torch\n'), ((5150, 5196), 'torch.equal', 'torch.equal', (['predict_ind.data', 'target_ind.data'], {}), '(predict_ind.data, target_ind.data)\n', (5161, 5196), False, 'import torch\n'), ((4863, 4885), 'torch.FloatTensor', 'torch.FloatTensor', (['obs'], {}), '(obs)\n', (4880, 4885), False, 'import torch\n'), ((6146, 6168), 'torch.FloatTensor', 'torch.FloatTensor', (['obs'], {}), '(obs)\n', (6163, 6168), False, 'import torch\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#File afsk1200lib.py
#Author <NAME>/M0ZJO
#Date 05/10/2019
#Desc. This is a physical layer decoder for UOSAT-2 AFSK
__author__ = "Jonathan/M0ZJO"
__copyright__ = "Jonathan/M0ZJO 2019"
__credits__ = ["Surrey University"]
__license__ = "MIT"
__version__ = "0.0.1"
__date__ = "04/10/2019"
__maintainer__ = "Jonathan/M0ZJO"
__status__ = "Development"
import numpy as np
from scipy import signal, stats
import matplotlib.pyplot as plt
import wavio
import datetime
import logging
import time
logging.basicConfig(filename='PyAFSK1200.log', level=logging.INFO)
# Print info about the software
def print_header():
# Generated:
# http://patorjk.com/software/taag/#p=display&f=Big&t=PyAFSK1200
name = """
_____ ______ _____ _ ____ ___ ___ ___
| __ \ /\ | ____/ ____| |/ /_ |__ \ / _ \ / _ \
| |__) | _ / \ | |__ | (___ | ' / | | ) | | | | | | |
| ___/ | | | / /\ \ | __| \___ \| < | | / /| | | | | | |
| | | |_| |/ ____ \| | ____) | . \ | |/ /_| |_| | |_| |
|_| \__, /_/ \_\_| |_____/|_|\_\|_|____|\___/ \___/
__/ |
|___/
"""
print(name)
print("### PyRSCW version %s release date %s ###" % (__version__, __date__))
print("### Written by %s. Happy Beeping! ###\n\n" % __author__)
# Use wavio to load the wav file from GQRX
def open_wav_file(filename, resample = None):
log("Opening: %s" % filename)
wav_data = wavio.read(filename)
log("Wavfile loaded. Len:%i, Fs:%iHz" % (wav_data.data.shape[0], wav_data.rate))
# Resample down to internal rate for speed (we don't need massive amounts of bw)
if resample != None:
resampled_wav = resample_wav(wav_data.data[:,0], wav_data.rate, resample)
wav_data.data = resampled_wav[0]
wav_data.rate = resampled_wav[1]
log("Wavfile resampled. Len:%i, Fs:%iHz" % (wav_data.data.shape[0], wav_data.rate))
else:
wav_data.data = wav_data.data[:,0]
return wav_data
# Decimate
def resample_wav(wav, in_fs, out_fs):
N = in_fs/out_fs
if int(N) == N:
# Integer rate
if int(N) != 1:
return signal.decimate(wav, int(N)), out_fs
else:
log("No decimation required (N = 1)")
return wav, in_fs
else:
# Non Integer Rate
log("Non-integer downsampling rates not supported")
return wav, in_fs
# Simple DC removal algorithem
def remove_dc(wav_data):
wav_data.data = wav_data.data - np.mean(wav_data.data)
return wav_data
def butter_bandpass(lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = signal.butter(order, [low, high], btype='band')
return b, a
def design_bp(fs, mode = 2, plot = False):
nyq_rate = fs/2
cf = 1800
offset = 1000
order = 50
if mode == 1:
# Help from -->> https://scipy-cookbook.readthedocs.io/items/FIRFilter.html
b, a = butter_bandpass(cf-offset, cf+offset, fs, order)
log("Filter of order %i deisgned (high)" % len(b))
if mode == 2:
f1 = (cf-offset)/nyq_rate
f2 = (cf+offset)/nyq_rate
b = signal.firwin(order, [f1, f2], pass_zero=False)
a = 1.0
if plot:
w, h = signal.freqz(b, a, worN=48000)
plt.plot((w/np.pi)*nyq_rate, 20 * np.log(np.absolute(h)), linewidth=2)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.title('Frequency Response')
#ylim(-0.05, 1.05)
plt.grid(True)
plt.show()
return b, a
def design_lp(fs, mode = 2, plot = False):
nyq_rate = fs/2
cf = 2500
order = 500
cf_d = cf/nyq_rate
b = signal.firwin(order, cf_d)
a = 1.0
if plot:
w, h = signal.freqz(b, a, worN=48000)
plt.plot((w/np.pi)*nyq_rate, 20 * np.log(np.absolute(h)), linewidth=2)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.title('Frequency Response')
#ylim(-0.05, 1.05)
plt.grid(True)
plt.show()
return b, a
def filter_wav(wav_file, fs = 48000):
b, a = design_bp(fs, 2, plot = False)
wav_file.data = signal.lfilter(b, a, wav_file.data)
return wav_file
def fsk_demodulate(y, f_sep, cf, fs, baud):
block_len = int(fs/baud)
t = np.arange(0, block_len)/fs
space_f = np.exp(-1j * 2 * np.pi * t * -f_sep/2)
mark_f = np.exp(-1j * 2 * np.pi * t * f_sep/2)
cos_carr = np.cos(2 * np.pi * cf * t)
sin_carr = np.sin(2 * np.pi * cf * t)
bitstream = []
for i in range(0, len(y)-block_len, block_len):
block = y[i:i+block_len]
I_sig = sin_carr * block
Q_sig = cos_carr * block
block_cplx = I_sig + 1j * Q_sig
space_b = block_cplx * space_f
mark_b = block_cplx * mark_f
int_val_s = np.abs(np.sum(space_b))
int_val_m = np.abs(np.sum(mark_b))
if int_val_m > int_val_s:
bit = 1
else:
bit = 0
bitstream.append(bit)
return bitstream
def PLL(NRZa, a = 0.74 , fs = 48000, baud = 1200):
ctr_max = 2**20
ctr = 0
ctr_list = []
idx = []
for n in range(1, len(NRZa)-1):
prev = NRZa[n-1]
new = NRZa[n]
if prev != new:
ctr = ctr + a * (ctr_max*baud)/(fs)
else:
ctr = ctr + (ctr_max*baud)/(fs)
if ctr>ctr_max:
idx.append(n)
ctr = 0
ctr_list.append(ctr)
#print(ctr, ctr_max-ctr)
return idx, ctr_list/np.max(ctr_list)
# Decode bitstream (find ascii chars)
def decode_block(bits):
i = 0
output_str = ""
while True:
if i > len(bits) - 12:
break
if [bits[i], bits[i+1], bits[i+10], bits[i+11]] == [1, 0, 1, 1]:
#print(bits[i:i+12])
data = np.array(bits[i+2:i+9])
data_str = ""
for k in range(0, 7):
data_str = data_str + str(data[6-k])
test_parity = bits[i+9]
#print(return_char(data_str), test_parity, parity_brute_force(int(data_str, 2)))
if test_parity == parity_brute_force(int(data_str, 2)):
output_str = output_str + return_char(data_str)
i = i + 11
else:
i = i + 1
else:
i = i + 1
return output_str
# Calculate Even Parity
def parity_brute_force(x):
bit = 0
num_bits = 0
while x:
bitmask = 1 << bit
bit += 1
if x & bitmask:
num_bits += 1
x &= ~bitmask
return num_bits % 2
# Get Ascii Char
def return_char(bits):
inv_map = {v: k for k, v in generate_alphabet().items()}
try:
return inv_map[bits]
except:
return "."
#def numpy_array_to_str(bits):
#out = ""
#for i in bits:
#if i == 1:
#out = out + "0"
#else:
#out = out + "1"
#return out
def generate_alphabet():
alphabet = {}
alphabet["A"] = "1000001"
alphabet["B"] = "1000010"
alphabet["C"] = "1000011"
alphabet["D"] = "1000100"
alphabet["E"] = "1000101"
alphabet["F"] = "1000110"
alphabet["G"] = "1000111"
alphabet["H"] = "1001000"
alphabet["I"] = "1001001"
alphabet["J"] = "1001010"
alphabet["K"] = "1001011"
alphabet["L"] = "1001100"
alphabet["M"] = "1001101"
alphabet["N"] = "1001110"
alphabet["O"] = "1001111"
alphabet["P"] = "1010000"
alphabet["Q"] = "1010001"
alphabet["R"] = "1010010"
alphabet["S"] = "1010011"
alphabet["T"] = "1010100"
alphabet["U"] = "1010101"
alphabet["V"] = "1010110"
alphabet["W"] = "1010111"
alphabet["X"] = "1011000"
alphabet["Y"] = "1011001"
alphabet["Z"] = "1011010"
alphabet["0"] = "0110000"
alphabet["1"] = "0110001"
alphabet["2"] = "0110010"
alphabet["3"] = "0110011"
alphabet["4"] = "0110100"
alphabet["5"] = "0110101"
alphabet["6"] = "0110110"
alphabet["7"] = "0110111"
alphabet["8"] = "0111000"
alphabet["9"] = "0111001"
alphabet["-"] = "0101101"
#alphabet["CR"] = "0001101"
#alphabet["LF"] = "0001010"
alphabet["\r"] = "0001101"
alphabet["\n"] = "0001010"
alphabet["---Record Seperator---\n"] = "0011110"
#alphabet["RS"] = "0011110"
alphabet["US"] = "0011111"
alphabet["NULL"] = "0000000"
alphabet[" "] = "0100000"
return alphabet
# Plot a series of numpy arrays (debugging...)
def plot_numpy_data(mag):
for i in mag:
plt.plot(i)
plt.show()
return
# Logging function
def log(string):
print(datetime.datetime.now(), string)
log_str = str(datetime.datetime.now()) + "\t" + string
logging.info(log_str)
# Save output data
def output_data(string, work_id):
fname = "pyafsk1200_%s_%i.txt" % (work_id, int(time.time()))
log("### Data written to: %s" % fname)
print(string)
with open(fname, "a+") as f:
f.write(string + "\r\n")
f.close()
# What if someone tries to run the library file!
if __name__ == "__main__":
# execute only if run as a script
print_header()
print("This is the library file!")
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.sin",
"logging.info",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.exp",
"wavio.read",
"scipy.signal.firwin",
"numpy.cos",
"matplotlib.pyplot.title",
"scipy.signal.freqz",
"time.time",
"matplotlib.pyplot.show",
"logging.basicConfig",
"numpy.absolute",
"scipy.signal.butter",
"datetime.datetime.now",
"scipy.signal.lfilter",
"numpy.sum"
] |
[((562, 628), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""PyAFSK1200.log"""', 'level': 'logging.INFO'}), "(filename='PyAFSK1200.log', level=logging.INFO)\n", (581, 628), False, 'import logging\n'), ((1544, 1564), 'wavio.read', 'wavio.read', (['filename'], {}), '(filename)\n', (1554, 1564), False, 'import wavio\n'), ((2764, 2811), 'scipy.signal.butter', 'signal.butter', (['order', '[low, high]'], {'btype': '"""band"""'}), "(order, [low, high], btype='band')\n", (2777, 2811), False, 'from scipy import signal, stats\n'), ((3828, 3854), 'scipy.signal.firwin', 'signal.firwin', (['order', 'cf_d'], {}), '(order, cf_d)\n', (3841, 3854), False, 'from scipy import signal, stats\n'), ((4314, 4349), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'wav_file.data'], {}), '(b, a, wav_file.data)\n', (4328, 4349), False, 'from scipy import signal, stats\n'), ((4507, 4549), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * t * -f_sep / 2)'], {}), '(-1.0j * 2 * np.pi * t * -f_sep / 2)\n', (4513, 4549), True, 'import numpy as np\n'), ((4560, 4601), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * t * f_sep / 2)'], {}), '(-1.0j * 2 * np.pi * t * f_sep / 2)\n', (4566, 4601), True, 'import numpy as np\n'), ((4619, 4645), 'numpy.cos', 'np.cos', (['(2 * np.pi * cf * t)'], {}), '(2 * np.pi * cf * t)\n', (4625, 4645), True, 'import numpy as np\n'), ((4661, 4687), 'numpy.sin', 'np.sin', (['(2 * np.pi * cf * t)'], {}), '(2 * np.pi * cf * t)\n', (4667, 4687), True, 'import numpy as np\n'), ((8824, 8834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8832, 8834), True, 'import matplotlib.pyplot as plt\n'), ((8989, 9010), 'logging.info', 'logging.info', (['log_str'], {}), '(log_str)\n', (9001, 9010), False, 'import logging\n'), ((2593, 2615), 'numpy.mean', 'np.mean', (['wav_data.data'], {}), '(wav_data.data)\n', (2600, 2615), True, 'import numpy as np\n'), ((3287, 3334), 'scipy.signal.firwin', 'signal.firwin', (['order', '[f1, f2]'], {'pass_zero': '(False)'}), '(order, [f1, f2], pass_zero=False)\n', (3300, 3334), False, 'from scipy import signal, stats\n'), ((3384, 3414), 'scipy.signal.freqz', 'signal.freqz', (['b', 'a'], {'worN': '(48000)'}), '(b, a, worN=48000)\n', (3396, 3414), False, 'from scipy import signal, stats\n'), ((3502, 3530), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (3512, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3557), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gain"""'], {}), "('Gain')\n", (3549, 3557), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3597), 'matplotlib.pyplot.title', 'plt.title', (['"""Frequency Response"""'], {}), "('Frequency Response')\n", (3575, 3597), True, 'import matplotlib.pyplot as plt\n'), ((3633, 3647), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3641, 3647), True, 'import matplotlib.pyplot as plt\n'), ((3656, 3666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3664, 3666), True, 'import matplotlib.pyplot as plt\n'), ((3900, 3930), 'scipy.signal.freqz', 'signal.freqz', (['b', 'a'], {'worN': '(48000)'}), '(b, a, worN=48000)\n', (3912, 3930), False, 'from scipy import signal, stats\n'), ((4018, 4046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (4028, 4046), True, 'import matplotlib.pyplot as plt\n'), ((4055, 4073), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gain"""'], {}), "('Gain')\n", (4065, 4073), True, 'import matplotlib.pyplot as plt\n'), ((4082, 4113), 'matplotlib.pyplot.title', 'plt.title', (['"""Frequency Response"""'], {}), "('Frequency Response')\n", (4091, 4113), True, 'import matplotlib.pyplot as plt\n'), ((4149, 4163), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4157, 4163), True, 'import matplotlib.pyplot as plt\n'), ((4172, 4182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4180, 4182), True, 'import matplotlib.pyplot as plt\n'), ((4466, 4489), 'numpy.arange', 'np.arange', (['(0)', 'block_len'], {}), '(0, block_len)\n', (4475, 4489), True, 'import numpy as np\n'), ((8808, 8819), 'matplotlib.pyplot.plot', 'plt.plot', (['i'], {}), '(i)\n', (8816, 8819), True, 'import matplotlib.pyplot as plt\n'), ((8893, 8916), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8914, 8916), False, 'import datetime\n'), ((5039, 5054), 'numpy.sum', 'np.sum', (['space_b'], {}), '(space_b)\n', (5045, 5054), True, 'import numpy as np\n'), ((5083, 5097), 'numpy.sum', 'np.sum', (['mark_b'], {}), '(mark_b)\n', (5089, 5097), True, 'import numpy as np\n'), ((5793, 5809), 'numpy.max', 'np.max', (['ctr_list'], {}), '(ctr_list)\n', (5799, 5809), True, 'import numpy as np\n'), ((6107, 6134), 'numpy.array', 'np.array', (['bits[i + 2:i + 9]'], {}), '(bits[i + 2:i + 9])\n', (6115, 6134), True, 'import numpy as np\n'), ((8944, 8967), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8965, 8967), False, 'import datetime\n'), ((9116, 9127), 'time.time', 'time.time', ([], {}), '()\n', (9125, 9127), False, 'import time\n'), ((3464, 3478), 'numpy.absolute', 'np.absolute', (['h'], {}), '(h)\n', (3475, 3478), True, 'import numpy as np\n'), ((3980, 3994), 'numpy.absolute', 'np.absolute', (['h'], {}), '(h)\n', (3991, 3994), True, 'import numpy as np\n')]
|
"""
Unit and regression test for the get_sequence_identity module of the molsysmt package on molsysmt MolSys molecular
systems.
"""
# Import package, test suite, and other packages as needed
import molsysmt as msm
import numpy as np
import math as math
# Distance between atoms in space and time
def test_get_sequence_identity_molsysmt_MolSys_1():
molsys = msm.convert(msm.demo['T4 lysozyme L99A']['181l.msmpk'], to_form='molsysmt.MolSys')
molsys_2 = msm.convert(msm.demo['T4 lysozyme L99A']['1l17.msmpk'], to_form='molsysmt.MolSys')
identity, intersection, ref_intersection = msm.topology.get_sequence_identity(molsys, selection='molecule_type=="protein"',
reference_molecular_system=molsys_2, reference_selection='molecule_type=="protein"')
intersection_true = np.array([0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 97, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161])
ref_intersection_true = np.array([0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107,
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
152, 153, 154, 155, 156, 157, 158, 159, 160, 161])
check_identity = math.isclose(97.53086419, identity)
check_intersection = np.all(intersection_true == intersection)
check_intersection_ref = np.all(ref_intersection_true == intersection_true)
assert check_identity and check_intersection and check_intersection_ref
|
[
"math.isclose",
"molsysmt.topology.get_sequence_identity",
"numpy.array",
"numpy.all",
"molsysmt.convert"
] |
[((364, 451), 'molsysmt.convert', 'msm.convert', (["msm.demo['T4 lysozyme L99A']['181l.msmpk']"], {'to_form': '"""molsysmt.MolSys"""'}), "(msm.demo['T4 lysozyme L99A']['181l.msmpk'], to_form=\n 'molsysmt.MolSys')\n", (375, 451), True, 'import molsysmt as msm\n'), ((462, 549), 'molsysmt.convert', 'msm.convert', (["msm.demo['T4 lysozyme L99A']['1l17.msmpk']"], {'to_form': '"""molsysmt.MolSys"""'}), "(msm.demo['T4 lysozyme L99A']['1l17.msmpk'], to_form=\n 'molsysmt.MolSys')\n", (473, 549), True, 'import molsysmt as msm\n'), ((592, 766), 'molsysmt.topology.get_sequence_identity', 'msm.topology.get_sequence_identity', (['molsys'], {'selection': '"""molecule_type=="protein\\""""', 'reference_molecular_system': 'molsys_2', 'reference_selection': '"""molecule_type=="protein\\""""'}), '(molsys, selection=\n \'molecule_type=="protein"\', reference_molecular_system=molsys_2,\n reference_selection=\'molecule_type=="protein"\')\n', (626, 766), True, 'import molsysmt as msm\n'), ((829, 1564), 'numpy.array', 'np.array', (['[0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58,\n 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,\n 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,\n 95, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,\n 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, \n 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, \n 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, \n 154, 155, 156, 157, 158, 159, 160, 161]'], {}), '([0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,\n 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56,\n 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,\n 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, \n 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, \n 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, \n 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, \n 152, 153, 154, 155, 156, 157, 158, 159, 160, 161])\n', (837, 1564), True, 'import numpy as np\n'), ((1601, 2336), 'numpy.array', 'np.array', (['[0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56, 57, 58,\n 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,\n 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,\n 95, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,\n 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, \n 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, \n 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, \n 154, 155, 156, 157, 158, 159, 160, 161]'], {}), '([0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,\n 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 54, 55, 56,\n 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,\n 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, \n 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, \n 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, \n 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, \n 152, 153, 154, 155, 156, 157, 158, 159, 160, 161])\n', (1609, 2336), True, 'import numpy as np\n'), ((2366, 2401), 'math.isclose', 'math.isclose', (['(97.53086419)', 'identity'], {}), '(97.53086419, identity)\n', (2378, 2401), True, 'import math as math\n'), ((2427, 2468), 'numpy.all', 'np.all', (['(intersection_true == intersection)'], {}), '(intersection_true == intersection)\n', (2433, 2468), True, 'import numpy as np\n'), ((2498, 2548), 'numpy.all', 'np.all', (['(ref_intersection_true == intersection_true)'], {}), '(ref_intersection_true == intersection_true)\n', (2504, 2548), True, 'import numpy as np\n')]
|
import copy
import logging
import os
import time
from collections import Counter
from statistics import mean
import numpy as np
import pandas as pd
from .fold_fitting_strategy import AbstractFoldFittingStrategy, SequentialLocalFoldFittingStrategy
from ..abstract.abstract_model import AbstractModel
from ...constants import MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE, REFIT_FULL_SUFFIX
from ...utils.exceptions import TimeLimitExceeded
from ...utils.loaders import load_pkl
from ...utils.savers import save_pkl
from ...utils.utils import CVSplitter, _compute_fi_with_stddev
logger = logging.getLogger(__name__)
# TODO: Add metadata object with info like score on each model, train time on each model, etc.
class BaggedEnsembleModel(AbstractModel):
"""
Bagged ensemble meta-model which fits a given model multiple times across different splits of the training data.
For certain child models such as KNN, this may only train a single model and instead rely on the child model to generate out-of-fold predictions.
"""
_oof_filename = 'oof.pkl'
def __init__(self, model_base: AbstractModel, random_state=0, **kwargs):
self.model_base = model_base
self._child_type = type(self.model_base)
self.models = []
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
self._n_repeats = 0 # Number of n_repeats with at least 1 model fit, if kfold=5 and 8 models have been fit, _n_repeats is 2
self._n_repeats_finished = 0 # Number of n_repeats finished, if kfold=5 and 8 models have been fit, _n_repeats_finished is 1
self._k_fold_end = 0 # Number of models fit in current n_repeat (0 if completed), if kfold=5 and 8 models have been fit, _k_fold_end is 3
self._k = None # k models per n_repeat, equivalent to kfold value
self._k_per_n_repeat = [] # k-fold used for each n_repeat. == [5, 10, 3] if first kfold was 5, second was 10, and third was 3
self._random_state = random_state
self.low_memory = True
self._bagged_mode = None
# _child_oof currently is only set to True for KNN models, that are capable of LOO prediction generation to avoid needing bagging.
# TODO: Consider moving `_child_oof` logic to a separate class / refactor OOF logic.
# FIXME: Avoid unnecessary refit during refit_full on `_child_oof=True` models, just re-use the original model.
self._child_oof = False # Whether the OOF preds were taken from a single child model (Assumes child can produce OOF preds without bagging).
self._cv_splitters = [] # Keeps track of the CV splitter used for each bagged repeat.
eval_metric = kwargs.pop('eval_metric', self.model_base.eval_metric)
stopping_metric = kwargs.pop('stopping_metric', self.model_base.stopping_metric) # FIXME: Has to be moved to post-model_base initialization, otherwise could be misaligned.
super().__init__(problem_type=self.model_base.problem_type, eval_metric=eval_metric, stopping_metric=stopping_metric, **kwargs)
def _set_default_params(self):
default_params = {
# 'use_child_oof': False, # [Advanced] Whether to defer to child model for OOF preds and only train a single child.
'save_bag_folds': True,
# 'refit_folds': False, # [Advanced, Experimental] Whether to refit bags immediately to a refit_full model in a single .fit call.
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
drop_unique=False, # TODO: Get the value from child instead
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
def is_valid(self):
return self.is_fit() and (self._n_repeats == self._n_repeats_finished)
def can_infer(self):
return self.is_fit() and self.params.get('save_bag_folds', True)
def is_stratified(self):
if self.problem_type in [REGRESSION, QUANTILE, SOFTCLASS]:
return False
else:
return True
def is_fit(self):
return len(self.models) != 0
def can_fit(self) -> bool:
return not self.is_fit() or self._bagged_mode
def is_valid_oof(self):
return self.is_fit() and (self._child_oof or self._bagged_mode)
def get_oof_pred_proba(self, **kwargs):
# TODO: Require is_valid == True (add option param to ignore is_valid)
return self._oof_pred_proba_func(self._oof_pred_proba, self._oof_pred_model_repeats)
@staticmethod
def _oof_pred_proba_func(oof_pred_proba, oof_pred_model_repeats):
oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats)
if oof_pred_proba.ndim == 2:
oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None]
return oof_pred_proba / oof_pred_model_repeats_without_0
def preprocess(self, X, preprocess_nonadaptive=True, model=None, **kwargs):
if preprocess_nonadaptive:
if model is None:
if not self.models:
return X
model = self.models[0]
model = self.load_child(model)
return model.preprocess(X, preprocess_stateful=False)
else:
return X
def _get_cv_splitter(self, n_splits, n_repeats, groups=None):
return CVSplitter(n_splits=n_splits, n_repeats=n_repeats, groups=groups, stratified=self.is_stratified(), random_state=self._random_state)
def _fit(self,
X,
y,
X_val=None,
y_val=None,
k_fold=None,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
groups=None,
**kwargs):
use_child_oof = self.params.get('use_child_oof', False)
if use_child_oof:
if self.is_fit():
# TODO: We may want to throw an exception instead and avoid calling fit more than once
return self
k_fold = 1
k_fold_end = None
groups = None
if k_fold is None and groups is None:
k_fold = 5
if k_fold is not None and k_fold < 1:
k_fold = 1
if k_fold is None or k_fold > 1:
k_fold = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups).n_splits
self._validate_bag_kwargs(
k_fold=k_fold,
k_fold_start=k_fold_start,
k_fold_end=k_fold_end,
n_repeats=n_repeats,
n_repeat_start=n_repeat_start,
groups=groups,
)
if k_fold_end is None:
k_fold_end = k_fold
model_base = self._get_model_base()
model_base.rename(name='')
kwargs['feature_metadata'] = self.feature_metadata
kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children
if self.model_base is not None:
self.save_model_base(self.model_base)
self.model_base = None
if self._oof_pred_proba is None and self.is_fit():
self._load_oof()
save_bag_folds = self.params.get('save_bag_folds', True)
if k_fold == 1:
self._fit_single(X=X, y=y, model_base=model_base, use_child_oof=use_child_oof, **kwargs)
return self
else:
refit_folds = self.params.get('refit_folds', False)
if refit_folds:
save_bag_folds = False
if kwargs.get('time_limit', None) is not None:
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeats - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
# Reserve time for final refit model
kwargs['time_limit'] = kwargs['time_limit'] * folds_to_fit / (folds_to_fit + 1.2)
self._fit_folds(X=X, y=y, model_base=model_base, k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold_end,
n_repeats=n_repeats, n_repeat_start=n_repeat_start, save_folds=save_bag_folds, groups=groups, **kwargs)
# FIXME: Don't save folds except for refit
# FIXME: Cleanup self
# FIXME: Don't add `_FULL` to name
if refit_folds:
refit_template = self.convert_to_refit_full_template()
refit_template.params['use_child_oof'] = False
kwargs['time_limit'] = None
refit_template.fit(X=X, y=y, k_fold=1, **kwargs)
refit_template._oof_pred_proba = self._oof_pred_proba
refit_template._oof_pred_model_repeats = self._oof_pred_model_repeats
refit_template._child_oof = True
refit_template.fit_time += self.fit_time + self.predict_time
return refit_template
else:
return self
def _validate_bag_kwargs(self, *,
k_fold,
k_fold_start,
k_fold_end,
n_repeats,
n_repeat_start,
groups):
if groups is not None:
if self._n_repeats_finished != 0:
raise AssertionError('Bagged models cannot call fit with `groups` specified when a full k-fold set has already been fit.')
if n_repeats > 1:
raise AssertionError('Cannot perform repeated bagging with `groups` specified.')
return
if k_fold_end is None:
k_fold_end = k_fold
if k_fold is None:
raise ValueError('k_fold cannot be None.')
if k_fold < 1:
raise ValueError(f'k_fold must be equal or greater than 1, value: ({k_fold})')
if n_repeat_start != self._n_repeats_finished:
raise ValueError(f'n_repeat_start must equal self._n_repeats_finished, values: ({n_repeat_start}, {self._n_repeats_finished})')
if n_repeats <= n_repeat_start:
raise ValueError(f'n_repeats must be greater than n_repeat_start, values: ({n_repeats}, {n_repeat_start})')
if k_fold_start != self._k_fold_end:
raise ValueError(f'k_fold_start must equal previous k_fold_end, values: ({k_fold_start}, {self._k_fold_end})')
if k_fold_start >= k_fold_end:
# TODO: Remove this limitation if n_repeats > 1
raise ValueError(f'k_fold_end must be greater than k_fold_start, values: ({k_fold_end}, {k_fold_start})')
if (n_repeats - n_repeat_start) > 1 and k_fold_end != k_fold:
# TODO: Remove this limitation
raise ValueError(f'k_fold_end must equal k_fold when (n_repeats - n_repeat_start) > 1, values: ({k_fold_end}, {k_fold})')
if self._k is not None and self._k != k_fold:
raise ValueError(f'k_fold must equal previously fit k_fold value for the current n_repeat, values: (({k_fold}, {self._k})')
def predict_proba(self, X, normalize=None, **kwargs):
model = self.load_child(self.models[0])
X = self.preprocess(X, model=model, **kwargs)
pred_proba = model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)
for model in self.models[1:]:
model = self.load_child(model)
pred_proba += model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)
pred_proba = pred_proba / len(self.models)
return pred_proba
def _predict_proba(self, X, normalize=False, **kwargs):
return self.predict_proba(X=X, normalize=normalize, **kwargs)
def score_with_oof(self, y, sample_weight=None):
self._load_oof()
valid_indices = self._oof_pred_model_repeats > 0
y = y[valid_indices]
y_pred_proba = self.get_oof_pred_proba()[valid_indices]
if sample_weight is not None:
sample_weight = sample_weight[valid_indices]
return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba, sample_weight=sample_weight)
def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, **kwargs):
if self.is_fit():
raise AssertionError('Model is already fit.')
if self._n_repeats != 0:
raise ValueError(f'n_repeats must equal 0 when fitting a single model with k_fold == 1, value: {self._n_repeats}')
model_base.name = f'{model_base.name}S1F1'
model_base.set_contexts(path_context=self.path + model_base.name + os.path.sep)
time_start_fit = time.time()
model_base.fit(X=X, y=y, time_limit=time_limit, **kwargs)
model_base.fit_time = time.time() - time_start_fit
model_base.predict_time = None
X_len = len(X)
# Check if pred_proba is going to take too long
if time_limit is not None and X_len >= 10000:
max_allowed_time = time_limit * 1.3 # allow some buffer
time_left = max(
max_allowed_time - model_base.fit_time,
time_limit * 0.1, # At least 10% of time_limit
10, # At least 10 seconds
)
# Sample at most 500 rows to estimate prediction time of all rows
# TODO: Consider moving this into end of abstract model fit for all models.
# Currently this only fixes problem when in bagged mode, if not bagging, then inference could still be problamatic
n_sample = min(500, round(X_len * 0.1))
frac = n_sample / X_len
X_sample = X.sample(n=n_sample)
time_start_predict = time.time()
model_base.predict_proba(X_sample)
time_predict_frac = time.time() - time_start_predict
time_predict_estimate = time_predict_frac / frac
logger.log(15, f'\t{round(time_predict_estimate, 2)}s\t= Estimated out-of-fold prediction time...')
if time_predict_estimate > time_left:
logger.warning(f'\tNot enough time to generate out-of-fold predictions for model. Estimated time required was {round(time_predict_estimate, 2)}s compared to {round(time_left, 2)}s of available time.')
raise TimeLimitExceeded
if use_child_oof:
logger.log(15, '\t`use_child_oof` was specified for this model. It will function similarly to a bagged model, but will only fit one child model.')
time_start_predict = time.time()
if model_base._get_tags().get('valid_oof', False):
self._oof_pred_proba = model_base.get_oof_pred_proba(X=X, y=y)
else:
logger.warning('\tWARNING: `use_child_oof` was specified but child model does not have a dedicated `get_oof_pred_proba` method. This model may have heavily overfit validation scores.')
self._oof_pred_proba = model_base.predict_proba(X=X)
self._child_oof = True
model_base.predict_time = time.time() - time_start_predict
model_base.val_score = model_base.score_with_y_pred_proba(y=y, y_pred_proba=self._oof_pred_proba)
else:
self._oof_pred_proba = model_base.predict_proba(X=X) # TODO: Cheater value, will be overfit to valid set
self._oof_pred_model_repeats = np.ones(shape=len(X), dtype=np.uint8)
self._n_repeats = 1
self._n_repeats_finished = 1
self._k_per_n_repeat = [1]
self._bagged_mode = False
model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)
if not self.params.get('save_bag_folds', True):
model_base.model = None
if self.low_memory:
self.save_child(model_base, verbose=False)
self.models = [model_base.name]
else:
self.models = [model_base]
self._add_child_times_to_bag(model=model_base)
def _fit_folds(self,
X,
y,
model_base,
k_fold=None,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
time_limit=None,
sample_weight=None,
save_folds=True,
groups=None,
**kwargs):
fold_fitting_strategy = self.params.get('fold_fitting_strategy', SequentialLocalFoldFittingStrategy)
# TODO: Preprocess data here instead of repeatedly
# FIXME: Raise exception if multiclass/binary and a single val fold contains all instances of a class. (Can happen if custom groups is specified)
time_start = time.time()
if k_fold_start != 0:
cv_splitter = self._cv_splitters[n_repeat_start]
else:
cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups)
if k_fold != cv_splitter.n_splits:
k_fold = cv_splitter.n_splits
if k_fold_end is None:
k_fold_end = k_fold
kfolds = cv_splitter.split(X=X, y=y)
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
models = []
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeats - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
# noinspection PyCallingNonCallable
fold_fitting_strategy: AbstractFoldFittingStrategy = fold_fitting_strategy(
self, X, y, sample_weight, time_limit, time_start, models, oof_pred_proba, oof_pred_model_repeats, save_folds=save_folds)
for j in range(n_repeat_start, n_repeats): # For each n_repeat
if j != n_repeat_start or k_fold_start == 0:
self._cv_splitters.append(cv_splitter)
cur_repeat_count = j - n_repeat_start
fold_start_n_repeat = fold_start + cur_repeat_count * k_fold
fold_end_n_repeat = min(fold_start_n_repeat + k_fold, fold_end)
for i in range(fold_start_n_repeat, fold_end_n_repeat): # For each fold
fold_num_in_repeat = i - (j * k_fold) # The fold in the current repeat set (first fold in set = 0)
fold_ctx = dict(
model_name_suffix=f'S{j + 1}F{fold_num_in_repeat + 1}', # S5F3 = 3rd fold of the 5th repeat set
fold=kfolds[i],
is_last_fold=i != (fold_end - 1),
folds_to_fit=folds_to_fit,
folds_finished=i - fold_start,
folds_left=fold_end - i,
)
fold_fitting_strategy.schedule_fold_model_fit(model_base, fold_ctx, kwargs)
if (fold_end_n_repeat != fold_end) or (k_fold == k_fold_end):
self._k_per_n_repeat.append(k_fold)
fold_fitting_strategy.after_all_folds_scheduled()
self.models += models
self._bagged_mode = True
if self._oof_pred_proba is None:
self._oof_pred_proba = oof_pred_proba
self._oof_pred_model_repeats = oof_pred_model_repeats
else:
self._oof_pred_proba += oof_pred_proba
self._oof_pred_model_repeats += oof_pred_model_repeats
self._n_repeats = n_repeats
if k_fold == k_fold_end:
self._k = None
self._k_fold_end = 0
self._n_repeats_finished = self._n_repeats
else:
self._k = k_fold
self._k_fold_end = k_fold_end
self._n_repeats_finished = self._n_repeats - 1
# TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way.
# TODO: Reduce logging clutter during OOF importance calculation (Currently logs separately for each child)
# Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y)
def compute_feature_importance(self,
X,
y,
features=None,
silent=False,
time_limit=None,
is_oof=False,
**kwargs) -> pd.DataFrame:
if features is None:
# FIXME: use FULL features (children can have different features)
features = self.load_child(model=self.models[0]).features
if not is_oof:
return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs)
fi_fold_list = []
model_index = 0
num_children = len(self.models)
if time_limit is not None:
time_limit_per_child = time_limit / num_children
else:
time_limit_per_child = None
if not silent:
logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...'
if time_limit is not None:
logging_message = f'{logging_message} Time limit: {time_limit}s...'
logger.log(20, logging_message)
time_start = time.time()
early_stop = False
children_completed = 0
log_final_suffix = ''
for n_repeat, k in enumerate(self._k_per_n_repeat):
if is_oof:
if self._child_oof or not self._bagged_mode:
raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name)
kfolds = self._cv_splitters[n_repeat].split(X=X, y=y)
cur_kfolds = kfolds[n_repeat * k:(n_repeat + 1) * k]
else:
cur_kfolds = [(None, list(range(len(X))))] * k
for i, fold in enumerate(cur_kfolds):
_, test_index = fold
model = self.load_child(self.models[model_index + i])
fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child,
silent=silent, log_prefix='\t', importance_as_list=True, **kwargs)
fi_fold_list.append(fi_fold)
children_completed += 1
if time_limit is not None and children_completed != num_children:
time_now = time.time()
time_left = time_limit - (time_now - time_start)
time_child_average = (time_now - time_start) / children_completed
if time_left < (time_child_average * 1.1):
log_final_suffix = f' (Early stopping due to lack of time...)'
early_stop = True
break
if early_stop:
break
model_index += k
# TODO: DON'T THROW AWAY SAMPLES! USE LARGER N
fi_list_dict = dict()
for val in fi_fold_list:
val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children
for key in val:
if key not in fi_list_dict:
fi_list_dict[key] = []
fi_list_dict[key] += val[key]
fi_df = _compute_fi_with_stddev(fi_list_dict)
if not silent:
logger.log(20, f'\t{round(time.time() - time_start, 2)}s\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}')
return fi_df
def load_child(self, model, verbose=False) -> AbstractModel:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
return self._child_type.load(path=child_path, verbose=verbose)
else:
return model
def save_child(self, model, verbose=False):
child = self.load_child(model)
child.set_contexts(self.path + child.name + os.path.sep)
child.save(verbose=verbose)
# TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models
def convert_to_refit_full_template(self):
init_args = self._get_init_args()
init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds
init_args['model_base'] = self.convert_to_refitfull_template_child()
init_args['name'] = init_args['name'] + REFIT_FULL_SUFFIX
model_full_template = self.__class__(**init_args)
return model_full_template
def convert_to_refitfull_template_child(self):
compressed_params = self._get_compressed_params()
child_compressed = copy.deepcopy(self._get_model_base())
child_compressed.feature_metadata = self.feature_metadata # TODO: Don't pass this here
child_compressed.params = compressed_params
return child_compressed
def _get_init_args(self):
init_args = dict(
model_base=self._get_model_base(),
random_state=self._random_state,
)
init_args.update(super()._get_init_args())
init_args.pop('problem_type')
return init_args
def _get_compressed_params(self, model_params_list=None):
if model_params_list is None:
model_params_list = [
self.load_child(child).get_trained_params()
for child in self.models
]
model_params_compressed = dict()
for param in model_params_list[0].keys():
model_param_vals = [model_params[param] for model_params in model_params_list]
if all(isinstance(val, bool) for val in model_param_vals):
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
elif all(isinstance(val, int) for val in model_param_vals):
compressed_val = round(mean(model_param_vals))
elif all(isinstance(val, float) for val in model_param_vals):
compressed_val = mean(model_param_vals)
else:
try:
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
except TypeError:
compressed_val = model_param_vals[0]
model_params_compressed[param] = compressed_val
return model_params_compressed
def _get_compressed_params_trained(self):
model_params_list = [
self.load_child(child).params_trained
for child in self.models
]
return self._get_compressed_params(model_params_list=model_params_list)
def _get_model_base(self):
if self.model_base is None:
return self.load_model_base()
else:
return self.model_base
def _add_child_times_to_bag(self, model):
if self.fit_time is None:
self.fit_time = model.fit_time
else:
self.fit_time += model.fit_time
if self.predict_time is None:
self.predict_time = model.predict_time
else:
self.predict_time += model.predict_time
@classmethod
def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True):
model = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
if not low_memory:
model.persist_child_models(reset_paths=reset_paths)
if load_oof:
model._load_oof()
return model
@classmethod
def load_oof(cls, path, verbose=True):
try:
oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose)
oof_pred_proba = oof['_oof_pred_proba']
oof_pred_model_repeats = oof['_oof_pred_model_repeats']
except FileNotFoundError:
model = cls.load(path=path, reset_paths=True, verbose=verbose)
model._load_oof()
oof_pred_proba = model._oof_pred_proba
oof_pred_model_repeats = model._oof_pred_model_repeats
return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats)
def _load_oof(self):
if self._oof_pred_proba is not None:
pass
else:
oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename)
self._oof_pred_proba = oof['_oof_pred_proba']
self._oof_pred_model_repeats = oof['_oof_pred_model_repeats']
def persist_child_models(self, reset_paths=True):
for i, model_name in enumerate(self.models):
if isinstance(model_name, str):
child_path = self.create_contexts(self.path + model_name + os.path.sep)
child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True)
self.models[i] = child_model
def load_model_base(self):
return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl')
def save_model_base(self, model_base):
save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base)
def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str:
if path is None:
path = self.path
if save_children:
model_names = []
for child in self.models:
child = self.load_child(child)
child.set_contexts(path + child.name + os.path.sep)
child.save(verbose=False)
model_names.append(child.name)
self.models = model_names
if save_oof and self._oof_pred_proba is not None:
save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={
'_oof_pred_proba': self._oof_pred_proba,
'_oof_pred_model_repeats': self._oof_pred_model_repeats,
})
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
return super().save(path=path, verbose=verbose)
# If `remove_fit_stack=True`, variables will be removed that are required to fit more folds and to fit new stacker models which use this model as a base model.
# This includes OOF variables.
def reduce_memory_size(self, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):
super().reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)
if remove_fit_stack:
try:
os.remove(self.path + 'utils' + os.path.sep + self._oof_filename)
except FileNotFoundError:
pass
if requires_save:
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
try:
os.remove(self.path + 'utils' + os.path.sep + 'model_template.pkl')
except FileNotFoundError:
pass
if requires_save:
self.model_base = None
try:
os.rmdir(self.path + 'utils')
except OSError:
pass
if reduce_children:
for model in self.models:
model = self.load_child(model)
model.reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)
if requires_save and self.low_memory:
self.save_child(model=model)
def _get_model_names(self):
model_names = []
for model in self.models:
if isinstance(model, str):
model_names.append(model)
else:
model_names.append(model.name)
return model_names
def get_info(self):
info = super().get_info()
children_info = self._get_child_info()
child_memory_sizes = [child['memory_size'] for child in children_info.values()]
sum_memory_size_child = sum(child_memory_sizes)
if child_memory_sizes:
max_memory_size_child = max(child_memory_sizes)
else:
max_memory_size_child = 0
if self.low_memory:
max_memory_size = info['memory_size'] + sum_memory_size_child
min_memory_size = info['memory_size'] + max_memory_size_child
else:
max_memory_size = info['memory_size']
min_memory_size = info['memory_size'] - sum_memory_size_child + max_memory_size_child
# Necessary if save_space is used as save_space deletes model_base.
if len(self.models) > 0:
child_model = self.load_child(self.models[0])
else:
child_model = self._get_model_base()
child_hyperparameters = child_model.params
child_ag_args_fit = child_model.params_aux
bagged_info = dict(
child_model_type=self._child_type.__name__,
num_child_models=len(self.models),
child_model_names=self._get_model_names(),
_n_repeats=self._n_repeats,
# _n_repeats_finished=self._n_repeats_finished, # commented out because these are too technical
# _k_fold_end=self._k_fold_end,
# _k=self._k,
_k_per_n_repeat=self._k_per_n_repeat,
_random_state=self._random_state,
low_memory=self.low_memory, # If True, then model will attempt to use at most min_memory_size memory by having at most one child in memory. If False, model will use max_memory_size memory.
bagged_mode=self._bagged_mode,
max_memory_size=max_memory_size, # Memory used when all children are loaded into memory at once.
min_memory_size=min_memory_size, # Memory used when only the largest child is loaded into memory.
child_hyperparameters=child_hyperparameters,
child_hyperparameters_fit=self._get_compressed_params_trained(),
child_ag_args_fit=child_ag_args_fit,
)
info['bagged_info'] = bagged_info
info['children_info'] = children_info
child_features_full = list(set().union(*[child['features'] for child in children_info.values()]))
info['features'] = child_features_full
info['num_features'] = len(child_features_full)
return info
def get_memory_size(self):
models = self.models
self.models = None
memory_size = super().get_memory_size()
self.models = models
return memory_size
def _get_child_info(self):
child_info_dict = dict()
for model in self.models:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
child_info_dict[model] = self._child_type.load_info(child_path)
else:
child_info_dict[model.name] = model.get_info()
return child_info_dict
def _construct_empty_oof(self, X, y):
if self.problem_type == MULTICLASS:
oof_pred_proba = np.zeros(shape=(len(X), len(y.unique())), dtype=np.float32)
elif self.problem_type == SOFTCLASS:
oof_pred_proba = np.zeros(shape=y.shape, dtype=np.float32)
elif self.problem_type == QUANTILE:
oof_pred_proba = np.zeros(shape=(len(X), len(self.quantile_levels)), dtype=np.float32)
else:
oof_pred_proba = np.zeros(shape=len(X), dtype=np.float32)
oof_pred_model_repeats = np.zeros(shape=len(X), dtype=np.uint8)
return oof_pred_proba, oof_pred_model_repeats
def _preprocess_fit_resources(self, silent=False, **kwargs):
"""Pass along to child models to avoid altering up-front"""
return kwargs
# TODO: Currently double disk usage, saving model in HPO and also saving model in bag
# FIXME: with use_bag_holdout=True, the fold-1 scores that are logged are of the inner validation score, not the holdout score.
# Fix this by passing X_val, y_val into this method
def _hyperparameter_tune(self, X, y, k_fold, scheduler_options, preprocess_kwargs=None, groups=None, **kwargs):
if len(self.models) != 0:
raise ValueError('self.models must be empty to call hyperparameter_tune, value: %s' % self.models)
kwargs['feature_metadata'] = self.feature_metadata
kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children
self.model_base.set_contexts(self.path + 'hpo' + os.path.sep)
# TODO: Preprocess data here instead of repeatedly
if preprocess_kwargs is None:
preprocess_kwargs = dict()
use_child_oof = self.params.get('use_child_oof', False)
X = self.preprocess(X=X, preprocess=False, fit=True, **preprocess_kwargs)
if use_child_oof:
k_fold = 1
X_fold = X
y_fold = y
X_val_fold = None
y_val_fold = None
train_index = list(range(len(X)))
test_index = train_index
cv_splitter = None
else:
cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=1, groups=groups)
if k_fold != cv_splitter.n_splits:
k_fold = cv_splitter.n_splits
kfolds = cv_splitter.split(X=X, y=y)
train_index, test_index = kfolds[0]
X_fold, X_val_fold = X.iloc[train_index, :], X.iloc[test_index, :]
y_fold, y_val_fold = y.iloc[train_index], y.iloc[test_index]
orig_time = scheduler_options[1]['time_out']
if orig_time:
scheduler_options[1]['time_out'] = orig_time * 0.8 # TODO: Scheduler doesn't early stop on final model, this is a safety net. Scheduler should be updated to early stop
hpo_models, hpo_model_performances, hpo_results = self.model_base.hyperparameter_tune(X=X_fold, y=y_fold, X_val=X_val_fold, y_val=y_val_fold, scheduler_options=scheduler_options, **kwargs)
scheduler_options[1]['time_out'] = orig_time
bags = {}
bags_performance = {}
for i, (model_name, model_path) in enumerate(hpo_models.items()):
child: AbstractModel = self._child_type.load(path=model_path)
# TODO: Create new Ensemble Here
bag = copy.deepcopy(self)
bag.rename(f"{bag.name}{os.path.sep}T{i}")
bag.set_contexts(self.path_root + bag.name + os.path.sep)
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
if child._get_tags().get('valid_oof', False):
y_pred_proba = child.get_oof_pred_proba(X=X, y=y)
bag._n_repeats_finished = 1
bag._k_per_n_repeat = [1]
bag._bagged_mode = False
bag._child_oof = True # TODO: Consider a separate tag for refit_folds vs efficient OOF
else:
y_pred_proba = child.predict_proba(X_val_fold)
oof_pred_proba[test_index] += y_pred_proba
oof_pred_model_repeats[test_index] += 1
bag.model_base = None
child.rename('')
child.set_contexts(bag.path + child.name + os.path.sep)
bag.save_model_base(child.convert_to_template())
bag._k = k_fold
bag._k_fold_end = 1
bag._n_repeats = 1
bag._oof_pred_proba = oof_pred_proba
bag._oof_pred_model_repeats = oof_pred_model_repeats
child.rename('S1F1')
child.set_contexts(bag.path + child.name + os.path.sep)
if not self.params.get('save_bag_folds', True):
child.model = None
if bag.low_memory:
bag.save_child(child, verbose=False)
bag.models.append(child.name)
else:
bag.models.append(child)
bag.val_score = child.val_score
bag._add_child_times_to_bag(model=child)
if cv_splitter is not None:
bag._cv_splitters = [cv_splitter]
bag.save()
bags[bag.name] = bag.path
bags_performance[bag.name] = bag.val_score
# TODO: hpo_results likely not correct because no renames
return bags, bags_performance, hpo_results
def _more_tags(self):
return {'valid_oof': True}
|
[
"logging.getLogger",
"statistics.mean",
"numpy.where",
"collections.Counter",
"os.rmdir",
"numpy.zeros",
"copy.deepcopy",
"time.time",
"os.remove"
] |
[((587, 614), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (604, 614), False, 'import logging\n'), ((4896, 4960), 'numpy.where', 'np.where', (['(oof_pred_model_repeats == 0)', '(1)', 'oof_pred_model_repeats'], {}), '(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats)\n', (4904, 4960), True, 'import numpy as np\n'), ((12901, 12912), 'time.time', 'time.time', ([], {}), '()\n', (12910, 12912), False, 'import time\n'), ((16980, 16991), 'time.time', 'time.time', ([], {}), '()\n', (16989, 16991), False, 'import time\n'), ((21570, 21581), 'time.time', 'time.time', ([], {}), '()\n', (21579, 21581), False, 'import time\n'), ((13009, 13020), 'time.time', 'time.time', ([], {}), '()\n', (13018, 13020), False, 'import time\n'), ((13946, 13957), 'time.time', 'time.time', ([], {}), '()\n', (13955, 13957), False, 'import time\n'), ((14769, 14780), 'time.time', 'time.time', ([], {}), '()\n', (14778, 14780), False, 'import time\n'), ((38819, 38838), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (38832, 38838), False, 'import copy\n'), ((14037, 14048), 'time.time', 'time.time', ([], {}), '()\n', (14046, 14048), False, 'import time\n'), ((15284, 15295), 'time.time', 'time.time', ([], {}), '()\n', (15293, 15295), False, 'import time\n'), ((26198, 26223), 'collections.Counter', 'Counter', (['model_param_vals'], {}), '(model_param_vals)\n', (26205, 26223), False, 'from collections import Counter\n'), ((31127, 31192), 'os.remove', 'os.remove', (["(self.path + 'utils' + os.path.sep + self._oof_filename)"], {}), "(self.path + 'utils' + os.path.sep + self._oof_filename)\n", (31136, 31192), False, 'import os\n'), ((31411, 31478), 'os.remove', 'os.remove', (["(self.path + 'utils' + os.path.sep + 'model_template.pkl')"], {}), "(self.path + 'utils' + os.path.sep + 'model_template.pkl')\n", (31420, 31478), False, 'import os\n'), ((31640, 31669), 'os.rmdir', 'os.rmdir', (["(self.path + 'utils')"], {}), "(self.path + 'utils')\n", (31648, 31669), False, 'import os\n'), ((35727, 35768), 'numpy.zeros', 'np.zeros', ([], {'shape': 'y.shape', 'dtype': 'np.float32'}), '(shape=y.shape, dtype=np.float32)\n', (35735, 35768), True, 'import numpy as np\n'), ((22888, 22899), 'time.time', 'time.time', ([], {}), '()\n', (22897, 22899), False, 'import time\n'), ((26397, 26419), 'statistics.mean', 'mean', (['model_param_vals'], {}), '(model_param_vals)\n', (26401, 26419), False, 'from statistics import mean\n'), ((26528, 26550), 'statistics.mean', 'mean', (['model_param_vals'], {}), '(model_param_vals)\n', (26532, 26550), False, 'from statistics import mean\n'), ((26620, 26645), 'collections.Counter', 'Counter', (['model_param_vals'], {}), '(model_param_vals)\n', (26627, 26645), False, 'from collections import Counter\n'), ((23853, 23864), 'time.time', 'time.time', ([], {}), '()\n', (23862, 23864), False, 'import time\n')]
|
"""
<NAME>
University of Manitoba
July 30th, 2019
"""
import numpy as np
###############################################################################
def shuffle_arrays(arrays_list, rand_seed=0, return_seed=False):
"""Shuffle arrays to maintain inter-array ordering
Shuffles each array in the list of arrays, arrays_list, such that
the inter-array order is maintained (i.e., the zeroth element of
the all arrays before shuffling corresponds to the nth element of
all arrays after shuffling)
Parameters
---------
arrays_list : list
List containing each array that will be shuffled
rand_seed : int
The seed to use for shuffling each array
return_seed : bool
If True, will return the seed used to shuffle the arrays (for
reproducibility)
Returns
-------
shuffled_arrs : list
List containing the shuffled arrays
rand_seed :
The seed that was used to shuffle the arrays
"""
shuffled_arrs = [] # Init arr for storing the shuffled arrays
for array in arrays_list: # For each array in the list
np.random.seed(rand_seed) # Set the seed
if type(array) == list: # If the 'array' is actually a list
# Copy the list into a new var that will be shuffled
shuffled_arr = [ii for ii in array]
else: # If the array is an array
# Make a copy that will be shuffled
shuffled_arr = array * np.ones_like(array)
np.random.shuffle(shuffled_arr) # Shuffle the array
# Append the shuffled array to the list of shuffled arrays
shuffled_arrs.append(shuffled_arr)
if return_seed: # If returning the seed, then do so
return shuffled_arrs, rand_seed
else:
return shuffled_arrs
def normalize_samples(data):
"""Normalizes each sample in data to have unity maximum
Parameters
----------
data : array_like
3D array of the features for each sample (assumes 2D features)
Returns
-------
normalized_data : array_like
Array of the features for each sample, normalized so that the
max value is unity for each sample
"""
# Assert that data must be 3D
assert len(np.shape(data)) == 3, 'Error: data must have 3 dim'
normalized_data = np.ones_like(data) # Init array to return
# For each sample
for sample_idx in range(np.size(data, axis=0)):
# Normalize to have max of unity
normalized_data[sample_idx, :, :] = (data[sample_idx, :, :] /
np.max(data[sample_idx, :, :]))
return normalized_data
|
[
"numpy.ones_like",
"numpy.size",
"numpy.max",
"numpy.random.seed",
"numpy.shape",
"numpy.random.shuffle"
] |
[((2407, 2425), 'numpy.ones_like', 'np.ones_like', (['data'], {}), '(data)\n', (2419, 2425), True, 'import numpy as np\n'), ((1164, 1189), 'numpy.random.seed', 'np.random.seed', (['rand_seed'], {}), '(rand_seed)\n', (1178, 1189), True, 'import numpy as np\n'), ((1558, 1589), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_arr'], {}), '(shuffled_arr)\n', (1575, 1589), True, 'import numpy as np\n'), ((2504, 2525), 'numpy.size', 'np.size', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2511, 2525), True, 'import numpy as np\n'), ((2330, 2344), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (2338, 2344), True, 'import numpy as np\n'), ((2689, 2719), 'numpy.max', 'np.max', (['data[sample_idx, :, :]'], {}), '(data[sample_idx, :, :])\n', (2695, 2719), True, 'import numpy as np\n'), ((1527, 1546), 'numpy.ones_like', 'np.ones_like', (['array'], {}), '(array)\n', (1539, 1546), True, 'import numpy as np\n')]
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
print("Loading Darknet_weights from:", Darknet_weights)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
print("Loading custom weights from:", YOLO_CUSTOM_WEIGHTS)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
try:
label = "{}".format(NUM_CLASS[class_ind]) + score_str
except KeyError:
print("You received KeyError, this might be that you are trying to use yolo original weights")
print("while using custom classes, if using custom model in configs.py set YOLO_CUSTOM_WEIGHTS = True")
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
print(bboxes)
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
return bboxes
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
print(YOLO_COCO_CLASSES)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"numpy.product",
"numpy.fromfile",
"tensorflow.shape",
"numpy.multiply.reduce",
"multiprocessing.Process",
"colorsys.hsv_to_rgb",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"tensorflow.saved_model.load",
"cv2.VideoWriter",
"numpy.exp",
"tensorflow.concat",
"numpy.concatenate",
"cv2.VideoWriter_fourcc",
"numpy.maximum",
"cv2.waitKey",
"random.shuffle",
"numpy.argmax",
"cv2.putText",
"cv2.cvtColor",
"numpy.finfo",
"cv2.resize",
"multiprocessing.Queue",
"cv2.imread",
"time.time",
"cv2.getTextSize",
"numpy.copy",
"cv2.imwrite",
"numpy.minimum",
"numpy.logical_and",
"tensorflow.config.experimental.set_memory_growth",
"numpy.logical_or",
"random.seed",
"tensorflow.constant",
"cv2.VideoCapture",
"numpy.full",
"tensorflow.keras.backend.clear_session",
"tensorflow.config.experimental.list_physical_devices"
] |
[((698, 730), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (728, 730), True, 'import tensorflow as tf\n'), ((2930, 2981), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (2974, 2981), True, 'import tensorflow as tf\n'), ((4481, 4508), 'cv2.resize', 'cv2.resize', (['image', '(nw, nh)'], {}), '(image, (nw, nh))\n', (4491, 4508), False, 'import cv2\n'), ((4528, 4572), 'numpy.full', 'np.full', ([], {'shape': '[ih, iw, 3]', 'fill_value': '(128.0)'}), '(shape=[ih, iw, 3], fill_value=128.0)\n', (4535, 4572), True, 'import numpy as np\n'), ((5494, 5508), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (5505, 5508), False, 'import random\n'), ((5513, 5535), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (5527, 5535), False, 'import random\n'), ((5540, 5557), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (5551, 5557), False, 'import random\n'), ((7301, 7317), 'numpy.array', 'np.array', (['boxes1'], {}), '(boxes1)\n', (7309, 7317), True, 'import numpy as np\n'), ((7331, 7347), 'numpy.array', 'np.array', (['boxes2'], {}), '(boxes2)\n', (7339, 7347), True, 'import numpy as np\n'), ((7546, 7590), 'numpy.maximum', 'np.maximum', (['boxes1[..., :2]', 'boxes2[..., :2]'], {}), '(boxes1[..., :2], boxes2[..., :2])\n', (7556, 7590), True, 'import numpy as np\n'), ((7611, 7655), 'numpy.minimum', 'np.minimum', (['boxes1[..., 2:]', 'boxes2[..., 2:]'], {}), '(boxes1[..., 2:], boxes2[..., 2:])\n', (7621, 7655), True, 'import numpy as np\n'), ((7677, 7714), 'numpy.maximum', 'np.maximum', (['(right_down - left_up)', '(0.0)'], {}), '(right_down - left_up, 0.0)\n', (7687, 7714), True, 'import numpy as np\n'), ((9701, 9720), 'numpy.array', 'np.array', (['pred_bbox'], {}), '(pred_bbox)\n', (9709, 9720), True, 'import numpy as np\n'), ((9889, 10004), 'numpy.concatenate', 'np.concatenate', (['[pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5, pred_xywh[:, :2] + pred_xywh[:,\n 2:] * 0.5]'], {'axis': '(-1)'}), '([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5, pred_xywh[:, :2] +\n pred_xywh[:, 2:] * 0.5], axis=-1)\n', (9903, 10004), True, 'import numpy as np\n'), ((10696, 10783), 'numpy.logical_or', 'np.logical_or', (['(pred_coor[:, 0] > pred_coor[:, 2])', '(pred_coor[:, 1] > pred_coor[:, 3])'], {}), '(pred_coor[:, 0] > pred_coor[:, 2], pred_coor[:, 1] >\n pred_coor[:, 3])\n', (10709, 10783), True, 'import numpy as np\n'), ((10965, 11041), 'numpy.logical_and', 'np.logical_and', (['(valid_scale[0] < bboxes_scale)', '(bboxes_scale < valid_scale[1])'], {}), '(valid_scale[0] < bboxes_scale, bboxes_scale < valid_scale[1])\n', (10979, 11041), True, 'import numpy as np\n'), ((11100, 11129), 'numpy.argmax', 'np.argmax', (['pred_prob'], {'axis': '(-1)'}), '(pred_prob, axis=-1)\n', (11109, 11129), True, 'import numpy as np\n'), ((11254, 11292), 'numpy.logical_and', 'np.logical_and', (['scale_mask', 'score_mask'], {}), '(scale_mask, score_mask)\n', (11268, 11292), True, 'import numpy as np\n'), ((11379, 11458), 'numpy.concatenate', 'np.concatenate', (['[coors, scores[:, np.newaxis], classes[:, np.newaxis]]'], {'axis': '(-1)'}), '([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)\n', (11393, 11458), True, 'import numpy as np\n'), ((11653, 11675), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (11663, 11675), False, 'import cv2\n'), ((11702, 11749), 'cv2.cvtColor', 'cv2.cvtColor', (['original_image', 'cv2.COLOR_BGR2RGB'], {}), '(original_image, cv2.COLOR_BGR2RGB)\n', (11714, 11749), False, 'import cv2\n'), ((11776, 11823), 'cv2.cvtColor', 'cv2.cvtColor', (['original_image', 'cv2.COLOR_BGR2RGB'], {}), '(original_image, cv2.COLOR_BGR2RGB)\n', (11788, 11823), False, 'import cv2\n'), ((12396, 12424), 'tensorflow.concat', 'tf.concat', (['pred_bbox'], {'axis': '(0)'}), '(pred_bbox, axis=0)\n', (12405, 12424), True, 'import tensorflow as tf\n'), ((12955, 13006), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (12999, 13006), True, 'import tensorflow as tf\n'), ((16077, 16108), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (16099, 16108), False, 'import cv2\n'), ((16119, 16176), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_path', 'codec', 'fps', '(width, height)'], {}), '(output_path, codec, fps, (width, height))\n', (16134, 16176), False, 'import cv2\n'), ((16285, 16292), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16290, 16292), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16311, 16318), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16316, 16318), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16340, 16347), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16345, 16347), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16371, 16378), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16376, 16378), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16402, 16409), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16407, 16409), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16429, 16436), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (16434, 16436), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16447, 16536), 'multiprocessing.Process', 'Process', ([], {'target': 'Predict_bbox_mp', 'args': '(Frames_data, Predicted_data, Processing_times)'}), '(target=Predict_bbox_mp, args=(Frames_data, Predicted_data,\n Processing_times))\n', (16454, 16536), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16542, 16737), 'multiprocessing.Process', 'Process', ([], {'target': 'postprocess_mp', 'args': '(Predicted_data, original_frames, Processed_frames, Processing_times,\n input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors,\n realtime)'}), '(target=postprocess_mp, args=(Predicted_data, original_frames,\n Processed_frames, Processing_times, input_size, CLASSES,\n score_threshold, iou_threshold, rectangle_colors, realtime))\n', (16549, 16737), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((16739, 16813), 'multiprocessing.Process', 'Process', ([], {'target': 'Show_Image_mp', 'args': '(Processed_frames, show, Final_frames)'}), '(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))\n', (16746, 16813), False, 'from multiprocessing import Process, Queue, Pipe\n'), ((17768, 17791), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (17789, 17791), False, 'import cv2\n'), ((17997, 18025), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (18013, 18025), False, 'import cv2\n'), ((18243, 18274), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (18265, 18274), False, 'import cv2\n'), ((18285, 18342), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_path', 'codec', 'fps', '(width, height)'], {}), '(output_path, codec, fps, (width, height))\n', (18300, 18342), False, 'import cv2\n'), ((20390, 20413), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20411, 20413), False, 'import cv2\n'), ((20618, 20637), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (20634, 20637), False, 'import cv2\n'), ((20885, 20916), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (20907, 20916), False, 'import cv2\n'), ((20927, 20984), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_path', 'codec', 'fps', '(width, height)'], {}), '(output_path, codec, fps, (width, height))\n', (20942, 20984), False, 'import cv2\n'), ((22713, 22736), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (22734, 22736), False, 'import cv2\n'), ((1197, 1237), 'numpy.fromfile', 'np.fromfile', (['wf'], {'dtype': 'np.int32', 'count': '(5)'}), '(wf, dtype=np.int32, count=5)\n', (1208, 1237), True, 'import numpy as np\n'), ((5612, 5646), 'numpy.array', 'np.array', (['bbox[:4]'], {'dtype': 'np.int32'}), '(bbox[:4], dtype=np.int32)\n', (5620, 5646), True, 'import numpy as np\n'), ((6038, 6106), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)', 'bbox_color', '(bbox_thick * 2)'], {}), '(image, (x1, y1), (x2, y2), bbox_color, bbox_thick * 2)\n', (6051, 6106), False, 'import cv2\n'), ((10880, 10946), 'numpy.multiply.reduce', 'np.multiply.reduce', (['(pred_coor[:, 2:4] - pred_coor[:, 0:2])'], {'axis': '(-1)'}), '(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1)\n', (10898, 10946), True, 'import numpy as np\n'), ((11859, 11882), 'numpy.copy', 'np.copy', (['original_image'], {}), '(original_image)\n', (11866, 11882), True, 'import numpy as np\n'), ((12824, 12855), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'image'], {}), '(output_path, image)\n', (12835, 12855), False, 'import cv2\n'), ((15787, 15806), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (15803, 15806), False, 'import cv2\n'), ((15831, 15859), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (15847, 15859), False, 'import cv2\n'), ((16970, 17006), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (16982, 17006), False, 'import cv2\n'), ((17032, 17079), 'cv2.cvtColor', 'cv2.cvtColor', (['original_image', 'cv2.COLOR_BGR2RGB'], {}), '(original_image, cv2.COLOR_BGR2RGB)\n', (17044, 17079), False, 'import cv2\n'), ((18778, 18789), 'time.time', 'time.time', ([], {}), '()\n', (18787, 18789), False, 'import time\n'), ((19170, 19181), 'time.time', 'time.time', ([], {}), '()\n', (19179, 19181), False, 'import time\n'), ((19281, 19309), 'tensorflow.concat', 'tf.concat', (['pred_bbox'], {'axis': '(0)'}), '(pred_bbox, axis=0)\n', (19290, 19309), True, 'import tensorflow as tf\n'), ((19577, 19588), 'time.time', 'time.time', ([], {}), '()\n', (19586, 19588), False, 'import time\n'), ((21423, 21434), 'time.time', 'time.time', ([], {}), '()\n', (21432, 21434), False, 'import time\n'), ((21815, 21826), 'time.time', 'time.time', ([], {}), '()\n', (21824, 21826), False, 'import time\n'), ((21926, 21954), 'tensorflow.concat', 'tf.concat', (['pred_bbox'], {'axis': '(0)'}), '(pred_bbox, axis=0)\n', (21935, 21954), True, 'import tensorflow as tf\n'), ((3047, 3102), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), '(gpus[0], True)\n', (3087, 3102), True, 'import tensorflow as tf\n'), ((4053, 4123), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['YOLO_CUSTOM_WEIGHTS'], {'tags': '[tag_constants.SERVING]'}), '(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])\n', (4072, 4123), True, 'import tensorflow as tf\n'), ((6713, 6805), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', 'fontScale'], {'thickness': 'bbox_thick'}), '(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale, thickness\n =bbox_thick)\n', (6728, 6805), False, 'import cv2\n'), ((6919, 7035), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x1 + text_width, y1 - text_height - baseline)', 'bbox_color'], {'thickness': 'cv2.FILLED'}), '(image, (x1, y1), (x1 + text_width, y1 - text_height -\n baseline), bbox_color, thickness=cv2.FILLED)\n', (6932, 7035), False, 'import cv2\n'), ((7084, 7217), 'cv2.putText', 'cv2.putText', (['image', 'label', '(x1, y1 - 4)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', 'fontScale', 'Text_colors', 'bbox_thick'], {'lineType': 'cv2.LINE_AA'}), '(image, label, (x1, y1 - 4), cv2.FONT_HERSHEY_COMPLEX_SMALL,\n fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)\n', (7095, 7217), False, 'import cv2\n'), ((7902, 7922), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (7910, 7922), True, 'import numpy as np\n'), ((8608, 8635), 'numpy.argmax', 'np.argmax', (['cls_bboxes[:, 4]'], {}), '(cls_bboxes[:, 4])\n', (8617, 8635), True, 'import numpy as np\n'), ((8747, 8811), 'numpy.concatenate', 'np.concatenate', (['[cls_bboxes[:max_ind], cls_bboxes[max_ind + 1:]]'], {}), '([cls_bboxes[:max_ind], cls_bboxes[max_ind + 1:]])\n', (8761, 8811), True, 'import numpy as np\n'), ((10543, 10579), 'numpy.maximum', 'np.maximum', (['pred_coor[:, :2]', '[0, 0]'], {}), '(pred_coor[:, :2], [0, 0])\n', (10553, 10579), True, 'import numpy as np\n'), ((10613, 10665), 'numpy.minimum', 'np.minimum', (['pred_coor[:, 2:]', '[org_w - 1, org_h - 1]'], {}), '(pred_coor[:, 2:], [org_w - 1, org_h - 1])\n', (10623, 10665), True, 'import numpy as np\n'), ((12109, 12132), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (12120, 12132), True, 'import tensorflow as tf\n'), ((13042, 13097), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), '(gpus[0], True)\n', (13082, 13097), True, 'import tensorflow as tf\n'), ((13358, 13369), 'time.time', 'time.time', ([], {}), '()\n', (13367, 13369), False, 'import time\n'), ((13926, 13954), 'tensorflow.concat', 'tf.concat', (['pred_bbox'], {'axis': '(0)'}), '(pred_bbox, axis=0)\n', (13935, 13954), True, 'import tensorflow as tf\n'), ((17163, 17186), 'numpy.copy', 'np.copy', (['original_image'], {}), '(original_image)\n', (17170, 17186), True, 'import numpy as np\n'), ((18458, 18494), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (18470, 18494), False, 'import cv2\n'), ((18524, 18571), 'cv2.cvtColor', 'cv2.cvtColor', (['original_image', 'cv2.COLOR_BGR2RGB'], {}), '(original_image, cv2.COLOR_BGR2RGB)\n', (18536, 18571), False, 'import cv2\n'), ((18645, 18668), 'numpy.copy', 'np.copy', (['original_image'], {}), '(original_image)\n', (18652, 18668), True, 'import numpy as np\n'), ((20244, 20271), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'image'], {}), "('output', image)\n", (20254, 20271), False, 'import cv2\n'), ((21102, 21140), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (21114, 21140), False, 'import cv2\n'), ((21170, 21217), 'cv2.cvtColor', 'cv2.cvtColor', (['original_frame', 'cv2.COLOR_BGR2RGB'], {}), '(original_frame, cv2.COLOR_BGR2RGB)\n', (21182, 21217), False, 'import cv2\n'), ((21290, 21313), 'numpy.copy', 'np.copy', (['original_frame'], {}), '(original_frame)\n', (21297, 21313), True, 'import numpy as np\n'), ((1894, 1946), 'numpy.fromfile', 'np.fromfile', (['wf'], {'dtype': 'np.float32', 'count': '(4 * filters)'}), '(wf, dtype=np.float32, count=4 * filters)\n', (1905, 1946), True, 'import numpy as np\n'), ((2210, 2258), 'numpy.fromfile', 'np.fromfile', (['wf'], {'dtype': 'np.float32', 'count': 'filters'}), '(wf, dtype=np.float32, count=filters)\n', (2221, 2258), True, 'import numpy as np\n'), ((5357, 5380), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (5376, 5380), False, 'import colorsys\n'), ((9365, 9398), 'numpy.exp', 'np.exp', (['(-(1.0 * iou ** 2 / sigma))'], {}), '(-(1.0 * iou ** 2 / sigma))\n', (9371, 9398), True, 'import numpy as np\n'), ((13403, 13414), 'time.time', 'time.time', ([], {}), '()\n', (13412, 13414), False, 'import time\n'), ((15393, 15420), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'image'], {}), "('output', image)\n", (15403, 15420), False, 'import cv2\n'), ((18940, 18963), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (18951, 18963), True, 'import tensorflow as tf\n'), ((20339, 20362), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20360, 20362), False, 'import cv2\n'), ((21585, 21608), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (21596, 21608), True, 'import tensorflow as tf\n'), ((2447, 2469), 'numpy.product', 'np.product', (['conv_shape'], {}), '(conv_shape)\n', (2457, 2469), True, 'import numpy as np\n'), ((12342, 12353), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (12350, 12353), True, 'import tensorflow as tf\n'), ((13583, 13606), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (13594, 13606), True, 'import tensorflow as tf\n'), ((14780, 14791), 'time.time', 'time.time', ([], {}), '()\n', (14789, 14791), False, 'import time\n'), ((15496, 15519), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15517, 15519), False, 'import cv2\n'), ((20287, 20302), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (20298, 20302), False, 'import cv2\n'), ((15440, 15455), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (15451, 15455), False, 'import cv2\n'), ((19223, 19234), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (19231, 19234), True, 'import tensorflow as tf\n'), ((21868, 21879), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (21876, 21879), True, 'import tensorflow as tf\n'), ((13864, 13875), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (13872, 13875), True, 'import tensorflow as tf\n')]
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import mxnet as mx
import numpy as np
import pytest
from gluonts.mx.distribution import Binned, BinnedOutput
COMMON_KWARGS = {
"bin_log_probs": mx.nd.array([[0.1, 0.2, 0.1, 0.05, 0.2, 0.1, 0.25]])
.log()
.repeat(axis=0, repeats=2),
"bin_centers": mx.nd.array([[-5, -3, -1.2, -0.5, 0, 0.1, 0.2]]).repeat(
axis=0, repeats=2
),
}
@pytest.fixture
def labels():
return mx.random.uniform(low=-6, high=1, shape=(2,)) # T, N
@pytest.mark.parametrize(
"K,alpha", itertools.product([1000, 10000, 100000], [0.001, 0.01, 0.1])
)
def test_smooth_mask_adds_to_one(K, alpha):
bin_log_probs = mx.nd.log_softmax(mx.nd.ones(K))
bin_centers = mx.nd.arange(K)
dist = Binned(
bin_log_probs=bin_log_probs,
bin_centers=bin_centers,
label_smoothing=0.2,
)
labels = mx.random.uniform(low=0, high=K, shape=(12,)).expand_dims(-1)
mask = dist._get_mask(labels)
smooth_mask = dist._smooth_mask(mx.nd, mask, alpha=mx.nd.array([alpha]))
# check smooth mask adds to one
assert np.allclose(
smooth_mask.asnumpy().sum(axis=-1), np.ones(12), atol=1e-6
)
def test_get_smooth_mask_correct(labels):
dist = Binned(**COMMON_KWARGS, label_smoothing=0.2)
binned = Binned(**COMMON_KWARGS)
labels = labels.expand_dims(-1)
mask = dist._get_mask(labels)
assert np.allclose(mask.asnumpy(), binned._get_mask(labels).asnumpy())
smooth_mask = dist._smooth_mask(mx.nd, mask, alpha=mx.nd.array([0.2]))
# check smooth mask adds to one
assert np.allclose(smooth_mask.asnumpy().sum(axis=-1), np.ones(2))
# check smooth mask peaks same
assert np.allclose(
np.argmax(smooth_mask.asnumpy(), axis=-1),
np.argmax(mask.asnumpy(), axis=-1),
)
# check smooth mask mins correct
assert np.allclose(
smooth_mask.asnumpy().min(axis=-1), np.ones(2) * 0.2 / 7 # alpha / K
)
def test_loss_correct(labels):
smooth_alpha = Binned(**COMMON_KWARGS, label_smoothing=0.4)
smooth_noalpha = Binned(**COMMON_KWARGS, label_smoothing=0.0)
binned = Binned(**COMMON_KWARGS)
assert np.allclose(
binned.loss(labels).asnumpy(), smooth_noalpha.loss(labels).asnumpy()
)
assert not np.allclose(
binned.loss(labels).asnumpy(), smooth_alpha.loss(labels).asnumpy()
)
@pytest.mark.parametrize("hybridize", [True, False])
def test_output_sets_alpha(labels, hybridize):
binned_output = BinnedOutput(
bin_centers=COMMON_KWARGS["bin_centers"][0], label_smoothing=0.35
)
arg_proj = binned_output.get_args_proj()
if hybridize:
arg_proj.hybridize()
arg_proj.initialize()
assert (
binned_output.distribution(
arg_proj(mx.nd.random.uniform(2, 10))
).label_smoothing
== 0.35
)
|
[
"mxnet.nd.random.uniform",
"gluonts.mx.distribution.Binned",
"numpy.ones",
"mxnet.nd.ones",
"mxnet.random.uniform",
"itertools.product",
"pytest.mark.parametrize",
"mxnet.nd.arange",
"mxnet.nd.array",
"gluonts.mx.distribution.BinnedOutput"
] |
[((2927, 2978), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hybridize"""', '[True, False]'], {}), "('hybridize', [True, False])\n", (2950, 2978), False, 'import pytest\n'), ((993, 1038), 'mxnet.random.uniform', 'mx.random.uniform', ([], {'low': '(-6)', 'high': '(1)', 'shape': '(2,)'}), '(low=-6, high=1, shape=(2,))\n', (1010, 1038), True, 'import mxnet as mx\n'), ((1268, 1283), 'mxnet.nd.arange', 'mx.nd.arange', (['K'], {}), '(K)\n', (1280, 1283), True, 'import mxnet as mx\n'), ((1296, 1381), 'gluonts.mx.distribution.Binned', 'Binned', ([], {'bin_log_probs': 'bin_log_probs', 'bin_centers': 'bin_centers', 'label_smoothing': '(0.2)'}), '(bin_log_probs=bin_log_probs, bin_centers=bin_centers,\n label_smoothing=0.2)\n', (1302, 1381), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((1090, 1150), 'itertools.product', 'itertools.product', (['[1000, 10000, 100000]', '[0.001, 0.01, 0.1]'], {}), '([1000, 10000, 100000], [0.001, 0.01, 0.1])\n', (1107, 1150), False, 'import itertools\n'), ((1785, 1829), 'gluonts.mx.distribution.Binned', 'Binned', ([], {'label_smoothing': '(0.2)'}), '(**COMMON_KWARGS, label_smoothing=0.2)\n', (1791, 1829), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((1843, 1866), 'gluonts.mx.distribution.Binned', 'Binned', ([], {}), '(**COMMON_KWARGS)\n', (1849, 1866), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((2558, 2602), 'gluonts.mx.distribution.Binned', 'Binned', ([], {'label_smoothing': '(0.4)'}), '(**COMMON_KWARGS, label_smoothing=0.4)\n', (2564, 2602), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((2624, 2668), 'gluonts.mx.distribution.Binned', 'Binned', ([], {'label_smoothing': '(0.0)'}), '(**COMMON_KWARGS, label_smoothing=0.0)\n', (2630, 2668), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((2682, 2705), 'gluonts.mx.distribution.Binned', 'Binned', ([], {}), '(**COMMON_KWARGS)\n', (2688, 2705), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((3046, 3125), 'gluonts.mx.distribution.BinnedOutput', 'BinnedOutput', ([], {'bin_centers': "COMMON_KWARGS['bin_centers'][0]", 'label_smoothing': '(0.35)'}), "(bin_centers=COMMON_KWARGS['bin_centers'][0], label_smoothing=0.35)\n", (3058, 3125), False, 'from gluonts.mx.distribution import Binned, BinnedOutput\n'), ((1235, 1248), 'mxnet.nd.ones', 'mx.nd.ones', (['K'], {}), '(K)\n', (1245, 1248), True, 'import mxnet as mx\n'), ((1701, 1712), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1708, 1712), True, 'import numpy as np\n'), ((2187, 2197), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2194, 2197), True, 'import numpy as np\n'), ((858, 906), 'mxnet.nd.array', 'mx.nd.array', (['[[-5, -3, -1.2, -0.5, 0, 0.1, 0.2]]'], {}), '([[-5, -3, -1.2, -0.5, 0, 0.1, 0.2]])\n', (869, 906), True, 'import mxnet as mx\n'), ((1423, 1468), 'mxnet.random.uniform', 'mx.random.uniform', ([], {'low': '(0)', 'high': 'K', 'shape': '(12,)'}), '(low=0, high=K, shape=(12,))\n', (1440, 1468), True, 'import mxnet as mx\n'), ((1574, 1594), 'mxnet.nd.array', 'mx.nd.array', (['[alpha]'], {}), '([alpha])\n', (1585, 1594), True, 'import mxnet as mx\n'), ((2071, 2089), 'mxnet.nd.array', 'mx.nd.array', (['[0.2]'], {}), '([0.2])\n', (2082, 2089), True, 'import mxnet as mx\n'), ((2466, 2476), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2473, 2476), True, 'import numpy as np\n'), ((743, 795), 'mxnet.nd.array', 'mx.nd.array', (['[[0.1, 0.2, 0.1, 0.05, 0.2, 0.1, 0.25]]'], {}), '([[0.1, 0.2, 0.1, 0.05, 0.2, 0.1, 0.25]])\n', (754, 795), True, 'import mxnet as mx\n'), ((3330, 3357), 'mxnet.nd.random.uniform', 'mx.nd.random.uniform', (['(2)', '(10)'], {}), '(2, 10)\n', (3350, 3357), True, 'import mxnet as mx\n')]
|
import sys, os, subprocess, importlib
import numpy as np
import ctypes
__all__ = ['Potential', 'TTM', 'MBPol']
class Potential:
"""Abstract base class for potential energy surfaces. A single function, evaluate(),
must be implemented which returns the energy and gradients.
Each child class needs to implement any methods to:
1) returns the gradients with the same atom ordering as it received them
2) help with evaluation of the potential by calling a library or python function
"""
def __init__(self, path_to_library=None, name_of_function=None, name_of_module=None, name_of_library=None):
"""
path_to_library (str): absolute path to library or file which contains potential function.
name_of_function (str): name of the function to be called from the imported module.
name_of_module (str): name of a module containing the function to be called.
name_of_library (str): name of a shared library containing the function to be called.
"""
self.path_to_library = os.path.normpath(os.path.join(os.getcwd(), path_to_library))
self.name_of_function = name_of_function
self.name_of_module = name_of_module
self.name_of_library = name_of_library
self.work_dir = os.getcwd()
# these are the actual handles to the library, module, and/or function
self.library = None
self.potential_function = None
if self.name_of_module and self.name_of_library:
print("Please provide either a module name or a library name, but not both.")
print("name_of_module is for a python module we will call.")
print("name_of_library is for a shared a library we will call into.")
sys.exit(1)
# remove the .py suffix from name_of_module if it happens to be there
if self.name_of_module and ".py" in self.name_of_module:
self.name_of_module = self.name_of_module.split(".")[0]
def evaluate(self, coords):
raise NotImplementedError
def initialize_potential(self):
"""
Initializes a potential which is accessed via an absolute path, self.path_to_library, and a function name.
Either a module name or the name of a shared library may be provided for python functions and C/Fortran functions, respectively.
This function is crucial if you want to use a multiprocessing pool, as it needs to be
used as the initializer for the pool.
"""
if self.potential_function is None:
os.chdir(self.path_to_library)
sys.path.insert(0, os.getcwd())
# this branch is for loading a function from a python module
if self.name_of_module:
try:
module = importlib.import_module(self.name_of_module)
self.potential_function = getattr(module, self.name_of_function)
os.chdir(self.work_dir)
except ImportError:
print("Did not find potential module. Make sure you have compiled it and the library can be linked against, including things like libgfortran and libgcc.")
print("If the module is a plain python function, then make sure you are passing the correct absolute path to the file.")
sys.exit(1)
elif self.name_of_library:
try:
self.library = ctypes.cdll.LoadLibrary(self.name_of_library)
self.potential_function = getattr(self.library, self.name_of_function)
os.chdir(self.work_dir)
except AttributeError:
print("Didn't find the function in the provided shared library. Make sure the library path and potential function name are correct.")
sys.exit(1)
else:
print("We need either a library name or a module name. Please provide one.")
sys.exit(1)
class TTM(Potential):
def __init__(self, path_to_library: str, name_of_function="ttm_from_f2py", name_of_module="ttm", model=21):
"""Evaluates the energy and gradients of the TTM family of potentials.
Args:
model (int, optional): The TTM model which will be used. Options are 2, 21, and 3. Defaults to 21.
"""
super().__init__(path_to_library=path_to_library, name_of_function=name_of_function, name_of_module=name_of_module)
self.model = model
self.initialize_potential()
possible_models = [2, 21, 3]
if self.model not in possible_models:
print("The possible TTM versions are 2, 21, or 3. Please choose one of these.")
sys.exit(1)
def evaluate(self, coords):
"""Takes xyz coordinates of water molecules in O H H, O H H order and re-orders to OOHHHH order
then transposes to fortran column-ordered matrix and calls the TTM potential from an f2py module.
Args:
coords (ndarray3d): xyz coordinates of a system which can be evaluated by this potential.
Returns:
energy (float): energy of the system in hartree
forces (ndarray3d): forces of the system in hartree / bohr
"""
# Sadly, we need to re-order the geometry to TTM format which is all oxygens first.
coords = self.ttm_ordering(coords)
os.chdir(self.path_to_library)
gradients, energy = self.potential_function(self.model, np.asarray(coords).T, int(len(coords) / 3))
os.chdir(self.work_dir)
return energy / 627.5, (-self.normal_water_ordering(gradients.T) / 627.5) / 1.88973
def __call__(self, coords):
return self.evaluate(coords)
def __getstate__(self):
d = dict(self.__dict__)
del d['potential_function']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__.update({"potential_function": None})
self.initialize_potential()
@staticmethod
def ttm_ordering(coords):
"""Sorts an array of coordinates in OHHOHH format to OOHHHH format.
Args:
coords (ndarray3d): numpy array of coordinates
Returns:
ndarray3d: numpy array of coordinate sorted according to the order TTM wants.
"""
atom_order = []
for i in range(0, coords.shape[0], 3):
atom_order.append(i)
for i in range(0, coords.shape[0], 3):
atom_order.append(i+1)
atom_order.append(i+2)
return coords[atom_order,:]
@staticmethod
def normal_water_ordering(coords):
"""Sorts an array of coordinates in OOHHHH format to OHHOHH format.
Args:
coords (ndarray3d): numpy array of coordinates
Returns:
ndarray3d: numpy array of coordinate sorted in the normal way for water.
"""
atom_order = []
Nw = int(coords.shape[0] / 3)
for i in range(0, Nw, 1):
atom_order.append(i)
atom_order.append(Nw+2*i)
atom_order.append(Nw+2*i+1)
return coords[atom_order,:]
class MBPol(Potential):
def __init__(self, path_to_library: str, name_of_function="calcpotg_", name_of_library="./libmbpol.so"):
super().__init__(path_to_library=path_to_library, name_of_function=name_of_function, name_of_library=name_of_library)
def initialize_potential(self, num_waters):
super().initialize_potential()
if self.potential_function.argtypes is None:
self.potential_function.restype = None
self.potential_function.argtypes = [
ctypes.POINTER(ctypes.c_int),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS")
]
self.num_waters = num_waters
self.c_num_waters = ctypes.byref(ctypes.c_int32(self.num_waters))
def evaluate(self, coords):
if isinstance(coords, np.ndarray):
self.initialize_potential(coords.shape[0] // 3)
else:
self.initialize_potential(len(coords) // 9) # N_w X 3 X 3
coords = np.array(coords)
coords=np.ascontiguousarray(coords, dtype=np.float64).flatten()
grads = np.zeros_like(coords)
potential_energy = np.zeros(1)
self.potential_function(self.c_num_waters, potential_energy, coords, grads)
return potential_energy[0] / 627.5, -np.reshape(grads, (3 * self.num_waters, 3)) / 627.5 / 1.88973
def __call__(self, coords):
return self.evaluate(coords)
class Protonated_Water(Potential):
def __init__(self, num_waters: int, library_path: str, do_init=True):
self.num_waters = num_waters
self.library_path = library_path
self.work_dir = os.getcwd()
sys.path.insert(0, self.library_path)
os.chdir(self.library_path)
self.module = importlib.import_module("Protonated_Water")
self.energy_function = getattr(self.module, "get_energy")
self.energy_and_gradient_function = getattr(self.module, "get_energy_and_gradients")
self.init_function = getattr(self.module, "initialize_potential")
if do_init:
self.init_function(self.num_waters)
os.chdir(self.work_dir)
def evaluate(self, coords, get_gradients=True):
if get_gradients:
return self.get_energy(coords)
else:
return self.get_energy_and_gradients(coords)
def get_energy(self, coords):
"""
Gets potential energy in hartree from coords.
"""
os.chdir(self.library_path)
energy = self.energy_function(coords.T)
os.chdir(self.work_dir)
return energy
def get_energy_and_gradients(self, coords):
"""
Gets potential energy in hartree and gradients in hartree per bohr from coords.
"""
os.chdir(self.library_path)
energy, gradients = self.energy_and_gradient_function(coords.T)
os.chdir(self.work_dir)
gradients = np.reshape(gradients, np.shape(coords))
return energy, gradients
|
[
"sys.path.insert",
"ctypes.c_int32",
"importlib.import_module",
"ctypes.POINTER",
"ctypes.cdll.LoadLibrary",
"numpy.reshape",
"numpy.asarray",
"os.getcwd",
"os.chdir",
"numpy.array",
"numpy.zeros",
"numpy.ctypeslib.ndpointer",
"numpy.ascontiguousarray",
"sys.exit",
"numpy.shape",
"numpy.zeros_like"
] |
[((1302, 1313), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1311, 1313), False, 'import sys, os, subprocess, importlib\n'), ((5504, 5534), 'os.chdir', 'os.chdir', (['self.path_to_library'], {}), '(self.path_to_library)\n', (5512, 5534), False, 'import sys, os, subprocess, importlib\n'), ((5653, 5676), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (5661, 5676), False, 'import sys, os, subprocess, importlib\n'), ((8590, 8611), 'numpy.zeros_like', 'np.zeros_like', (['coords'], {}), '(coords)\n', (8603, 8611), True, 'import numpy as np\n'), ((8640, 8651), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (8648, 8651), True, 'import numpy as np\n'), ((9140, 9151), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9149, 9151), False, 'import sys, os, subprocess, importlib\n'), ((9161, 9198), 'sys.path.insert', 'sys.path.insert', (['(0)', 'self.library_path'], {}), '(0, self.library_path)\n', (9176, 9198), False, 'import sys, os, subprocess, importlib\n'), ((9208, 9235), 'os.chdir', 'os.chdir', (['self.library_path'], {}), '(self.library_path)\n', (9216, 9235), False, 'import sys, os, subprocess, importlib\n'), ((9259, 9302), 'importlib.import_module', 'importlib.import_module', (['"""Protonated_Water"""'], {}), "('Protonated_Water')\n", (9282, 9302), False, 'import sys, os, subprocess, importlib\n'), ((9618, 9641), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (9626, 9641), False, 'import sys, os, subprocess, importlib\n'), ((9968, 9995), 'os.chdir', 'os.chdir', (['self.library_path'], {}), '(self.library_path)\n', (9976, 9995), False, 'import sys, os, subprocess, importlib\n'), ((10054, 10077), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (10062, 10077), False, 'import sys, os, subprocess, importlib\n'), ((10280, 10307), 'os.chdir', 'os.chdir', (['self.library_path'], {}), '(self.library_path)\n', (10288, 10307), False, 'import sys, os, subprocess, importlib\n'), ((10390, 10413), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (10398, 10413), False, 'import sys, os, subprocess, importlib\n'), ((1794, 1805), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1802, 1805), False, 'import sys, os, subprocess, importlib\n'), ((2617, 2647), 'os.chdir', 'os.chdir', (['self.path_to_library'], {}), '(self.path_to_library)\n', (2625, 2647), False, 'import sys, os, subprocess, importlib\n'), ((4813, 4824), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4821, 4824), False, 'import sys, os, subprocess, importlib\n'), ((8204, 8235), 'ctypes.c_int32', 'ctypes.c_int32', (['self.num_waters'], {}), '(self.num_waters)\n', (8218, 8235), False, 'import ctypes\n'), ((8483, 8499), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (8491, 8499), True, 'import numpy as np\n'), ((10457, 10473), 'numpy.shape', 'np.shape', (['coords'], {}), '(coords)\n', (10465, 10473), True, 'import numpy as np\n'), ((1102, 1113), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1111, 1113), False, 'import sys, os, subprocess, importlib\n'), ((2680, 2691), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2689, 2691), False, 'import sys, os, subprocess, importlib\n'), ((5600, 5618), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (5610, 5618), True, 'import numpy as np\n'), ((7840, 7868), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (7854, 7868), False, 'import ctypes\n'), ((7887, 7948), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', (['ctypes.c_double'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_double, flags='C_CONTIGUOUS')\n", (7909, 7948), True, 'import numpy as np\n'), ((7967, 8028), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', (['ctypes.c_double'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_double, flags='C_CONTIGUOUS')\n", (7989, 8028), True, 'import numpy as np\n'), ((8047, 8108), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', (['ctypes.c_double'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_double, flags='C_CONTIGUOUS')\n", (8069, 8108), True, 'import numpy as np\n'), ((8516, 8562), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coords'], {'dtype': 'np.float64'}), '(coords, dtype=np.float64)\n', (8536, 8562), True, 'import numpy as np\n'), ((2856, 2900), 'importlib.import_module', 'importlib.import_module', (['self.name_of_module'], {}), '(self.name_of_module)\n', (2879, 2900), False, 'import sys, os, subprocess, importlib\n'), ((3008, 3031), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (3016, 3031), False, 'import sys, os, subprocess, importlib\n'), ((4060, 4071), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4068, 4071), False, 'import sys, os, subprocess, importlib\n'), ((3409, 3420), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3417, 3420), False, 'import sys, os, subprocess, importlib\n'), ((3519, 3564), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['self.name_of_library'], {}), '(self.name_of_library)\n', (3542, 3564), False, 'import ctypes\n'), ((3678, 3701), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (3686, 3701), False, 'import sys, os, subprocess, importlib\n'), ((8783, 8826), 'numpy.reshape', 'np.reshape', (['grads', '(3 * self.num_waters, 3)'], {}), '(grads, (3 * self.num_waters, 3))\n', (8793, 8826), True, 'import numpy as np\n'), ((3918, 3929), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3926, 3929), False, 'import sys, os, subprocess, importlib\n')]
|
#!/usr/bin/env python
import sys
from cvangysel import argparse_utils, logging_utils
import argparse
import logging
import matplotlib.cm as cm
import matplotlib.markers as markers
import matplotlib.pyplot as plt
import numpy as np
import os
import pylatex.utils
import pyndri
from sklearn.manifold import TSNE
import nvsm
MARKERS = ['o', 's', '<', '>', '^', 'v', 'd', 'p', '*', '8',
'1', '2', '3', '4',
markers.TICKLEFT, markers.TICKRIGHT,
markers.TICKUP, markers.TICKDOWN,
markers.CARETLEFT, markers.CARETRIGHT,
markers.CARETUP, markers.CARETDOWN]
plt.rcParams["figure.figsize"] = (8.0, 4.25)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('index', type=argparse_utils.existing_directory_path)
parser.add_argument('--limit',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--object_classification',
type=argparse_utils.existing_file_path,
nargs='+',
default=None)
parser.add_argument('--filter_unclassified',
action='store_true',
default=False)
parser.add_argument('--l2_normalize',
action='store_true',
default=False)
parser.add_argument('--mode',
choices=('tsne', 'embedding_projector'),
default='tsne')
parser.add_argument('--legend',
action='store_true',
default=False)
parser.add_argument('--tick_labels',
action='store_true',
default=False)
parser.add_argument('--edges',
action='store_true',
default=False)
parser.add_argument('--border',
action='store_true',
default=False)
parser.add_argument('--plot_out',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
# Set matplotlib style.
plt.style.use('bmh')
logging.info('Loading index.')
index = pyndri.Index(args.index)
logging.info('Loading cuNVSM model.')
model_base, epoch_and_ext = args.model.rsplit('_', 1)
epoch = int(epoch_and_ext.split('.')[0])
if not os.path.exists('{}_meta'.format(model_base)):
model_meta_base, batch_idx = model_base.rsplit('_', 1)
else:
model_meta_base = model_base
model = nvsm.load_model(
nvsm.load_meta(model_meta_base),
model_base, epoch,
only_object_embeddings=True)
raw_object_representations = np.copy(model.object_representations)
if args.limit:
raw_object_representations = raw_object_representations[:args.limit, :]
for object_classification in args.object_classification:
root, ext = os.path.splitext(args.plot_out)
plot_out = '{}-{}.{}'.format(
root, os.path.basename(object_classification), ext.lstrip('.'))
if object_classification and args.filter_unclassified:
logging.info('Filtering unclassified.')
with open(object_classification, 'r') as f_objects:
object_ids = [line.strip().split()[0] for line in f_objects]
indices = sorted(model.inv_object_mapping[idx]
for _, idx in index.document_ids(object_ids)
if idx in model.inv_object_mapping)
logging.info('Considering %d out of %d representations.',
len(indices), len(object_ids))
translation_table = {idx: i for i, idx in enumerate(indices)}
object_representations = raw_object_representations[indices]
assert object_representations.shape[0] == \
len(translation_table)
else:
translation_table = None
raise NotImplementedError()
logging.info('Loading object clusters.')
cluster_id_to_product_ids = {}
if object_classification:
with open(object_classification, 'r') as f_objects:
for line in f_objects:
object_id, cluster_id = line.strip().split()
if cluster_id not in cluster_id_to_product_ids:
cluster_id_to_product_ids[cluster_id] = set()
cluster_id_to_product_ids[cluster_id].add(object_id)
for cluster_id in list(cluster_id_to_product_ids.keys()):
object_ids = list(cluster_id_to_product_ids[cluster_id])
cluster_id_to_product_ids[cluster_id] = set(
(model.inv_object_mapping[int_object_id]
if translation_table is None
else translation_table[
model.inv_object_mapping[int_object_id]])
for ext_object_id, int_object_id in
index.document_ids(object_ids)
if int_object_id in model.inv_object_mapping and
(args.limit is None or
(model.inv_object_mapping[int_object_id] <
args.limit)))
else:
raise NotImplementedError()
assert len(cluster_id_to_product_ids) < len(MARKERS)
if args.l2_normalize:
logging.info('L2-normalizing representations.')
object_representations /= np.linalg.norm(
object_representations,
axis=1, keepdims=True)
if args.mode == 'tsne':
logging.info('Running t-SNE.')
twodim_object_representations = \
TSNE(n_components=2, init='pca', random_state=0).\
fit_transform(object_representations)
logging.info('Plotting %s.', twodim_object_representations.shape)
colors = cm.rainbow(
np.linspace(0, 1, len(cluster_id_to_product_ids)))
for idx, cluster_id in enumerate(
sorted(cluster_id_to_product_ids.keys(),
key=lambda cluster_id: len(
cluster_id_to_product_ids[cluster_id]),
reverse=True)):
row_ids = list(cluster_id_to_product_ids[cluster_id])
plt.scatter(
twodim_object_representations[row_ids, 0],
twodim_object_representations[row_ids, 1],
marker=MARKERS[idx],
edgecolors='grey' if args.edges else None,
cmap=plt.cm.Spectral,
color=colors[idx],
alpha=0.3,
label=pylatex.utils.escape_latex(cluster_id))
plt.grid()
plt.tight_layout()
if args.legend:
plt.legend(bbox_to_anchor=(0, -0.15, 1, 0),
loc=2,
ncol=2,
mode='expand',
borderaxespad=0)
if not args.tick_labels:
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
if not args.border:
# plt.gcf().patch.set_visible(False)
plt.gca().axis('off')
logging.info('Writing %s.', plot_out)
plt.savefig(plot_out,
bbox_inches='tight',
transparent=True,
pad_inches=0,
dpi=200)
elif args.mode == 'embedding_projector':
logging.info('Dumping to TensorFlow embedding projector format.')
with open('{}_vectors.tsv'.format(plot_out), 'w') as f_vectors, \
open('{}_meta.tsv'.format(plot_out), 'w') as f_meta:
f_meta.write('document_id\tclass\n')
def write_rowids(row_ids, cluster_id):
for row_id in row_ids:
f_vectors.write(
'{}\n'.format('\t'.join(
'{:.5f}'.format(x)
for x in object_representations[row_id])))
f_meta.write('{}\t{}\n'.format(
index.ext_document_id(
model.object_mapping[row_id]),
cluster_id))
for cluster_id in cluster_id_to_product_ids.keys():
row_ids = list(cluster_id_to_product_ids[cluster_id])
write_rowids(row_ids, cluster_id)
logging.info('All done!')
if __name__ == '__main__':
sys.exit(main())
|
[
"numpy.copy",
"pyndri.Index",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"nvsm.load_meta",
"matplotlib.pyplot.gca",
"os.path.splitext",
"matplotlib.pyplot.style.use",
"sklearn.manifold.TSNE",
"cvangysel.logging_utils.configure_logging",
"os.path.basename",
"matplotlib.pyplot.tight_layout",
"numpy.linalg.norm",
"logging.info",
"matplotlib.pyplot.legend"
] |
[((682, 707), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (705, 707), False, 'import argparse\n'), ((2335, 2355), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""bmh"""'], {}), "('bmh')\n", (2348, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2391), 'logging.info', 'logging.info', (['"""Loading index."""'], {}), "('Loading index.')\n", (2373, 2391), False, 'import logging\n'), ((2404, 2428), 'pyndri.Index', 'pyndri.Index', (['args.index'], {}), '(args.index)\n', (2416, 2428), False, 'import pyndri\n'), ((2434, 2471), 'logging.info', 'logging.info', (['"""Loading cuNVSM model."""'], {}), "('Loading cuNVSM model.')\n", (2446, 2471), False, 'import logging\n'), ((2912, 2949), 'numpy.copy', 'np.copy', (['model.object_representations'], {}), '(model.object_representations)\n', (2919, 2949), True, 'import numpy as np\n'), ((8988, 9013), 'logging.info', 'logging.info', (['"""All done!"""'], {}), "('All done!')\n", (9000, 9013), False, 'import logging\n'), ((2226, 2263), 'cvangysel.logging_utils.configure_logging', 'logging_utils.configure_logging', (['args'], {}), '(args)\n', (2257, 2263), False, 'from cvangysel import argparse_utils, logging_utils\n'), ((2781, 2812), 'nvsm.load_meta', 'nvsm.load_meta', (['model_meta_base'], {}), '(model_meta_base)\n', (2795, 2812), False, 'import nvsm\n'), ((3132, 3163), 'os.path.splitext', 'os.path.splitext', (['args.plot_out'], {}), '(args.plot_out)\n', (3148, 3163), False, 'import os\n'), ((4244, 4284), 'logging.info', 'logging.info', (['"""Loading object clusters."""'], {}), "('Loading object clusters.')\n", (4256, 4284), False, 'import logging\n'), ((3221, 3260), 'os.path.basename', 'os.path.basename', (['object_classification'], {}), '(object_classification)\n', (3237, 3260), False, 'import os\n'), ((3355, 3394), 'logging.info', 'logging.info', (['"""Filtering unclassified."""'], {}), "('Filtering unclassified.')\n", (3367, 3394), False, 'import logging\n'), ((5712, 5759), 'logging.info', 'logging.info', (['"""L2-normalizing representations."""'], {}), "('L2-normalizing representations.')\n", (5724, 5759), False, 'import logging\n'), ((5799, 5860), 'numpy.linalg.norm', 'np.linalg.norm', (['object_representations'], {'axis': '(1)', 'keepdims': '(True)'}), '(object_representations, axis=1, keepdims=True)\n', (5813, 5860), True, 'import numpy as np\n'), ((5939, 5969), 'logging.info', 'logging.info', (['"""Running t-SNE."""'], {}), "('Running t-SNE.')\n", (5951, 5969), False, 'import logging\n'), ((6151, 6216), 'logging.info', 'logging.info', (['"""Plotting %s."""', 'twodim_object_representations.shape'], {}), "('Plotting %s.', twodim_object_representations.shape)\n", (6163, 6216), False, 'import logging\n'), ((7116, 7126), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7124, 7126), True, 'import matplotlib.pyplot as plt\n'), ((7140, 7158), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7156, 7158), True, 'import matplotlib.pyplot as plt\n'), ((7692, 7729), 'logging.info', 'logging.info', (['"""Writing %s."""', 'plot_out'], {}), "('Writing %s.', plot_out)\n", (7704, 7729), False, 'import logging\n'), ((7743, 7830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_out'], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)', 'dpi': '(200)'}), "(plot_out, bbox_inches='tight', transparent=True, pad_inches=0,\n dpi=200)\n", (7754, 7830), True, 'import matplotlib.pyplot as plt\n'), ((7204, 7298), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0, -0.15, 1, 0)', 'loc': '(2)', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0)'}), "(bbox_to_anchor=(0, -0.15, 1, 0), loc=2, ncol=2, mode='expand',\n borderaxespad=0)\n", (7214, 7298), True, 'import matplotlib.pyplot as plt\n'), ((7984, 8049), 'logging.info', 'logging.info', (['"""Dumping to TensorFlow embedding projector format."""'], {}), "('Dumping to TensorFlow embedding projector format.')\n", (7996, 8049), False, 'import logging\n'), ((6033, 6081), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'init': '"""pca"""', 'random_state': '(0)'}), "(n_components=2, init='pca', random_state=0)\n", (6037, 6081), False, 'from sklearn.manifold import TSNE\n'), ((7657, 7666), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7664, 7666), True, 'import matplotlib.pyplot as plt\n'), ((7457, 7466), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7464, 7466), True, 'import matplotlib.pyplot as plt\n'), ((7514, 7523), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7521, 7523), True, 'import matplotlib.pyplot as plt\n')]
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import print_function
# import os
import sys
import copy
import numpy as np
from collections import OrderedDict, Sequence
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
class SparseListElement(object):
"""
Handle single element of a sparse lisr
Args:
ind: index
val: value
"""
def __init__(self, ind, val):
self.index = ind
self.value = val
def __str__(self):
return '({}: {})'.format(self.index, self.value)
class SparseList(object):
"""
Object to represent a single sparse list
Internal representation like a dict
External representation like a list
Args:
sparse_list: dict object with {index: val}
default: default value for all elements not given by index in sparse_list
length: length of the list
"""
def __init__(self, sparse_list, default=None, length=None):
if isinstance(sparse_list, dict):
self._dict = sparse_list.copy()
if "_" in self._dict.keys():
default = self._dict["_"]
del self._dict["_"]
if length is None:
raise ValueError('Length must be provided in dict input mode')
self._length = length
elif isinstance(sparse_list, (list, np.ndarray)):
# self._dict = {el: [] for el in set(sparse_list)}
self._dict = {}
for i, el in enumerate(sparse_list):
self._dict[i] = el
self._length = len(sparse_list)
if length is not None:
if length != self._length:
raise ValueError('Incompatible length of new list')
self._default = default
def _val_data_type(self):
"""
Returns:
"""
if isinstance(self.values(), dict):
pass
print(self.values())
data_0 = self.values()[0]
if isinstance(data_0, list):
if isinstance(data_0[0], bool):
return "list_bool"
else:
raise ValueError('tags which have as elements lists or tensors are not implemented')
else:
return "scalar"
def to_hdf(self, hdf, key):
"""
Args:
hdf:
key:
Returns:
"""
if len(self.list()) > 0:
# Convert to array and store
hdf[key] = np.array(self.list())
elif len(self.values()) > 0:
print('sparse array: ', key, len(self.values()))
data_type = self._val_data_type()
my_dict = OrderedDict()
my_dict["index"] = self.keys()
if data_type is "list_bool":
my_dict["values"] = [sum([2 ** i * int(v) for i, v in enumerate(val)]) for val in self.values()]
else:
my_dict["values"] = self.values()
print("values: ", self.values())
hdf[key] = my_dict
def __len__(self):
return self._length
def __copy__(self):
return SparseList(sparse_list=self._dict, default=self._default, length=self._length)
def keys(self):
"""
Returns:
indices of non-sparse elements
"""
return self._dict.keys()
def values(self):
"""
Returns:
values of non-sparse elements
"""
return self._dict.values()
def items(self):
"""
Returns:
index, value pairs of non-sparse elements
"""
return self._dict.items()
def list(self):
"""
convert sparse list into full list
Returns:
list representation
"""
full_list = [self._default for _ in range(self._length)]
for i, val in self._dict.items():
full_list[i] = val
return full_list
def __iter__(self):
if self._default is None:
for i, val in self._dict.items():
yield SparseListElement(i, val)
else:
for i, val in enumerate(self.list()):
yield val
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
if item in self._dict:
return self._dict[item]
return self._default
if isinstance(item, slice):
ind_list = range(len(self))[item]
elif isinstance(item, (list, tuple, np.ndarray)):
if len(item) == 0:
ind_list = []
else:
if isinstance(item[0], (int, np.integer)):
ind_list = item
elif isinstance(item[0], (bool, np.bool_)):
ind_list = []
for i, bo in enumerate(item):
if bo:
ind_list.append(i)
else:
raise ValueError('Unknown item type: ' + str(type(item)))
sliced_dict = {j: self._dict[ind] for j, ind in enumerate(ind_list) if ind in self._dict}
return self.__class__(sliced_dict, default=self._default, length=len(ind_list))
def __setitem__(self, key, value):
if isinstance(key, (int, np.integer)):
if key > len(self):
raise IndexError
self._dict[key] = value
return
elif isinstance(key, slice):
key = range(len(self))[key]
if max(key) > self._length:
raise IndexError
for i in key:
self._dict[i] = value
def __delitem__(self, key):
# programmed for simplicity, not for performance
ind_list = list(range(len(self)))
if isinstance(key, (list, np.ndarray, tuple)):
indexes = sorted(list(key), reverse=True)
for index in indexes:
del ind_list[index]
else:
del ind_list[key]
new_list = self[ind_list]
self._dict = new_list._dict
self._length = new_list._length
self._default = new_list._default
def __add__(self, other):
if not (isinstance(other, SparseList)):
raise AssertionError()
if not (self._default == other._default):
raise AssertionError()
new_list = self.__copy__()
shifted_dict = {i + self._length: val for i, val in other._dict.items()}
new_list._dict.update(shifted_dict)
new_list._length += len(other)
return new_list
def __mul__(self, other):
if not isinstance(other, (int, np.integer)):
raise ValueError('Multiplication defined only for SparseArray*integers')
overall_list = other * np.arange(len(self)).tolist()
new_dic = dict()
for k in self.keys():
for val in np.argwhere(np.array(overall_list) == k).flatten():
new_dic[val] = self[k]
return self.__class__(new_dic, default=self._default, length=other * len(self))
def __rmul__(self, other):
if isinstance(other, int):
return self * other
def __str__(self):
if self._default is None:
return "[" + " ".join([str(el) for el in self]) + "]"
else:
# return "[" + " ".join([str(el) + os.sep for el in self.list()]) + "]"
return "[" + " ".join([str(el) for el in self.list()]) + "]"
def __repr__(self):
return str(self.list())
def sparse_index(index_list, length, default_val=True):
"""
Args:
index_list:
length:
default_val:
Returns:
"""
new_dict = {i: default_val for i in index_list}
return SparseList(new_dict, length=length)
class SparseArrayElement(object):
"""
Single element of a SparseArray
Args:
**qwargs:
"""
def __init__(self, **qwargs):
self._lists = dict()
if qwargs:
self._lists = qwargs
def __getattr__(self, item):
if item in self._lists.keys():
return self._lists[item]
raise AttributeError('Object has no attribute {} {}'.format(self.__class__, item))
def __str__(self):
out_str = ""
for key, val in self._lists.items():
out_str += '{}: {}'.format(key, val)
return out_str
def __eq__(self, other):
if not (isinstance(other, SparseArrayElement)):
raise AssertionError()
conditions = []
for key in self._lists.keys():
try:
if isinstance(self._lists[key], np.ndarray):
conditions += list(np.equal(self._lists[key], other._lists[key]))
else:
conditions.append(self._lists[key] == other._lists[key])
except KeyError:
conditions.append(False)
return all(conditions)
class SparseArray(object):
"""
Administrate object that consists of several sparse lists (tags) and full lists that have identical indices and
length
Args:
**qwargs: dictionary containing lists and SparseLists (tags) (must have identical length)
"""
def __init__(self, length=None, **qwargs):
self._lists = dict()
self._length = length
for key in qwargs:
value = qwargs[key]
if self._length is None:
self._length = len(value)
else:
if not len(self) == len(value):
raise ValueError('Inconsistent vector lengths {} {} {}'.format(key, len(self), len(value)))
self._lists[key] = value
def __setitem__(self, key, value):
# exclude hidden variables (starting with _ from being added to _lists
# if (not hasattr(self, '_lists')) or (key[0] == "_"):
# self.__dict__[key] = value
# return
# el
if isinstance(value, SparseList):
self._lists[key] = value
return
elif isinstance(value, (Sequence, np.ndarray)):
if len(value) == len(self):
self._lists[key] = value
return
else:
raise ValueError(
'Length of array object and new list are inconsistent: {} {} {}'.format(key, len(value), len(self)))
raise ValueError('Unsupported argument: ' + str(type(value)))
def __getattr__(self, item):
# if not (item in ["_lists"]):
# print "item: ", item, hasattr(self, item)
if sys.version_info.major > 2:
if '_lists' in dir(self): # Python 3
if item in self._lists.keys():
return self._lists[item]
else:
if hasattr(self, '_lists'):
if item in self._lists.keys():
return self._lists[item]
return object.__getattribute__(self, item)
# raise AttributeError("%r object has no attribute %r" %(self.__class__, item))
def __delitem__(self, key):
for k in self.keys():
if len(self._lists[k]) == 0:
# ensure ASE compatibility
print('Empty key in SparseList: ', k, key)
continue
# print "del: ", k, key
if isinstance(self._lists[k], np.ndarray):
self._lists[k] = np.delete(self._lists[k], key, axis=0)
self._length = len(self._lists[k])
elif isinstance(self._lists[k], (list, tuple)):
if isinstance(key, (list, np.ndarray, tuple)):
indexes = sorted(list(key), reverse=True)
for index in indexes:
del self._lists[k][index]
else:
del self._lists[k][key]
else:
del self._lists[k][key]
# self._length = len(self._lists[k])
def check_consistency(self):
"""
Returns:
"""
for key, val in self._lists.items():
# for val in self._lists.values():
# print ("consistency: ", key, len(val), len(self))
if not (len(val) == self._length):
raise AssertionError()
def __str__(self):
out_str = "\n"
for key, val in self._lists.items():
out_str += key + " := [" + " ".join([str(el) for el in val]) + "] \n"
return out_str
def __len__(self):
if hasattr(self, '_length'):
return self._length
else:
return 0
def __getitem__(self, item):
new_dict = {}
if isinstance(item, int):
for key, value in self._lists.items():
if value[item] is not None:
new_dict[key] = value[item]
return SparseArrayElement(**new_dict)
elif isinstance(item, (str, np.str, np.str_)):
return self._lists[item]
elif isinstance(item, (list, np.ndarray)):
# print("key(__getitem__) len, type, item[0]: ", len(item), type(item), item[0])
if len(item) == len(self):
if isinstance(item[0], (np.bool_, bool)):
item = np.arange(len(item))[item]
for key, value in self._lists.items():
# print ('key: ', key, type(value))
if isinstance(item, slice):
new_dict[key] = value[item]
else:
if isinstance(value, (list, tuple)):
new_dict[key] = [value[i] for i in item]
else:
if len(value) > 0:
try:
new_dict[key] = value[item]
except IndexError:
print('Index error:: ', key, item, value)
# else:
# new_dict[key] = []
# print ("new_dict: ", new_dict, self.__class__)
return self.__class__(**new_dict)
def keys(self):
"""
Returns:
"""
return self._lists.keys()
def items(self):
"""
Returns:
"""
return self._lists.items()
def __copy__(self):
"""
Returns:
"""
cls = self.__class__
result = cls.__new__(cls)
result.__init__()
for k, v in self.__dict__.items():
if k == '_lists':
result.__dict__[k] = {}
for key, val in self._lists.items():
if isinstance(val, SparseList):
result.__dict__[k][key] = val.__copy__()
elif isinstance(val, list):
result.__dict__[k][key] = val[:]
else:
result.__dict__[k][key] = np.copy(val)
else:
result.__dict__[k] = v
return result
def __add__(self, other):
# print "__add__.new_elements"
# assert(isinstance(other, self.__class__))
new_array = self.__copy__()
for key, val in other.items():
if key not in self.keys():
if isinstance(val, SparseList):
new_array._lists[key] = SparseList({}, default=other._lists[key]._default, length=len(self))
else:
raise ValueError('Incompatible lists (for non-sparse lists keys must be identical (1)' + str(key))
new_length = len(self) + len(other)
for key, val in new_array.items():
# print "key: ", key, val.__class__, isinstance(new_array, SparseList)
if key in other.keys():
if isinstance(new_array._lists[key], np.ndarray):
new_array._lists[key] = np.append(new_array._lists[key], other._lists[key], axis=0)
elif isinstance(new_array._lists[key], (list, SparseList)):
new_array._lists[key] += other._lists[key]
else:
raise ValueError("Type not implemented " + str(type(new_array._lists[key])))
elif isinstance(val, SparseList):
new_array._lists[key]._length = new_length # TODO: default extends to all elements (may be undesired)
else:
print("non-matching key: ", key)
raise ValueError('Incompatible lists (for non-sparse lists keys must be identical (2)')
new_array._length += len(other)
return new_array
def __mul__(self, other):
if not isinstance(other, int):
raise ValueError('Multiplication with SparseMatrix only implemented for integers')
new_array = self.__copy__()
for key, value in self.items():
new_array._lists[key] *= other
new_array._length *= other
return new_array
def __rmul__(self, other):
if isinstance(other, int):
return self * other
def add_tag(self, *args, **qwargs):
for key in args:
self._lists[key] = SparseList({}, length=len(self))
for key, default in qwargs.items():
self._lists[key] = SparseList({}, default=default, length=len(self))
def remove_tag(self, *args, **qwargs):
"""
Args:
*args:
**qwargs:
Returns:
"""
for key in args:
del self._lists[key]
for key, default in qwargs.items():
del self._lists[key]
|
[
"numpy.copy",
"collections.OrderedDict",
"numpy.delete",
"numpy.equal",
"numpy.append",
"numpy.array"
] |
[((3044, 3057), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3055, 3057), False, 'from collections import OrderedDict, Sequence\n'), ((11675, 11713), 'numpy.delete', 'np.delete', (['self._lists[k]', 'key'], {'axis': '(0)'}), '(self._lists[k], key, axis=0)\n', (11684, 11713), True, 'import numpy as np\n'), ((16046, 16105), 'numpy.append', 'np.append', (['new_array._lists[key]', 'other._lists[key]'], {'axis': '(0)'}), '(new_array._lists[key], other._lists[key], axis=0)\n', (16055, 16105), True, 'import numpy as np\n'), ((8978, 9023), 'numpy.equal', 'np.equal', (['self._lists[key]', 'other._lists[key]'], {}), '(self._lists[key], other._lists[key])\n', (8986, 9023), True, 'import numpy as np\n'), ((7208, 7230), 'numpy.array', 'np.array', (['overall_list'], {}), '(overall_list)\n', (7216, 7230), True, 'import numpy as np\n'), ((15099, 15111), 'numpy.copy', 'np.copy', (['val'], {}), '(val)\n', (15106, 15111), True, 'import numpy as np\n')]
|
# The Hazard Library
# Copyright (C) 2012-2018 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.correlation` defines correlation models for
spatially-distributed ground-shaking intensities.
"""
import abc
import numpy
class BaseCorrelationModel(metaclass=abc.ABCMeta):
"""
Base class for correlation models for spatially-distributed ground-shaking
intensities.
"""
@abc.abstractmethod
def get_lower_triangle_correlation_matrix(self, sites, imt):
"""
Get lower-triangle matrix as a result of Cholesky-decomposition
of correlation matrix.
The resulting matrix should have zeros on values above
the main diagonal.
The actual implementations of :class:`BaseCorrelationModel` interface
might calculate the matrix considering site collection and IMT (like
:class:`JB2009CorrelationModel` does) or might have it pre-constructed
for a specific site collection and IMT, in which case they will need
to make sure that parameters to this function match parameters that
were used to pre-calculate decomposed correlation matrix.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` to create
correlation matrix for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
"""
def apply_correlation(self, sites, imt, residuals):
"""
Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals.
"""
# intra-event residual for a single relization is a product
# of lower-triangle decomposed correlation matrix and vector
# of N random numbers (where N is equal to number of sites).
# we need to do that multiplication once per realization
# with the same matrix and different vectors.
try:
corma = self.cache[imt]
except KeyError:
corma = self.get_lower_triangle_correlation_matrix(
sites.complete, imt)
self.cache[imt] = corma
if len(sites.complete) == len(sites):
return numpy.dot(corma, residuals)
# it is important to allocate little memory, this is why I am
# accumulating below; if S is the length of the complete sites
# the correlation matrix has shape (S, S) and the residuals (N, s),
# where s is the number of samples
return numpy.sum(corma[sites.sids, sid] * res
for sid, res in zip(sites.sids, residuals))
class JB2009CorrelationModel(BaseCorrelationModel):
"""
"Correlation model for spatially distributed ground-motion intensities"
by <NAME> and <NAME>. Published in Earthquake Engineering
and Structural Dynamics 2009; 38, pages 1687-1708.
:param vs30_clustering:
Boolean value to indicate whether "Case 1" or "Case 2" from page 1700
should be applied. ``True`` value means that Vs 30 values show or are
expected to show clustering ("Case 2"), ``False`` means otherwise.
"""
def __init__(self, vs30_clustering):
self.vs30_clustering = vs30_clustering
self.cache = {} # imt -> correlation model
def _get_correlation_matrix(self, sites, imt):
return jbcorrelation(sites, imt, self.vs30_clustering)
def get_lower_triangle_correlation_matrix(self, sites, imt):
"""
See :meth:`BaseCorrelationModel.get_lower_triangle_correlation_matrix`.
"""
return numpy.linalg.cholesky(self._get_correlation_matrix(sites, imt))
def jbcorrelation(sites_or_distances, imt, vs30_clustering=False):
"""
Returns the Jayaram-Baker correlation model.
:param sites_or_distances:
SiteCollection instance o ristance matrix
:param imt:
Intensity Measure Type (PGA or SA)
:param vs30_clustering:
flag, defalt false
"""
if hasattr(sites_or_distances, 'mesh'):
distances = sites_or_distances.mesh.get_distance_matrix()
else:
distances = sites_or_distances
# formulae are from page 1700
if imt.period < 1:
if not vs30_clustering:
# case 1, eq. (17)
b = 8.5 + 17.2 * imt.period
else:
# case 2, eq. (18)
b = 40.7 - 15.0 * imt.period
else:
# both cases, eq. (19)
b = 22.0 + 3.7 * imt.period
# eq. (20)
return numpy.exp((- 3.0 / b) * distances)
|
[
"numpy.exp",
"numpy.dot"
] |
[((5955, 5986), 'numpy.exp', 'numpy.exp', (['(-3.0 / b * distances)'], {}), '(-3.0 / b * distances)\n', (5964, 5986), False, 'import numpy\n'), ((3573, 3600), 'numpy.dot', 'numpy.dot', (['corma', 'residuals'], {}), '(corma, residuals)\n', (3582, 3600), False, 'import numpy\n')]
|
import ray
import unittest
import numpy as np
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.examples.env.mock_env import MockEnv3
from ray.rllib.policy import Policy
from ray.rllib.utils import override
NUM_STEPS = 25
NUM_AGENTS = 4
class LastInfoCallback(DefaultCallbacks):
def __init__(self):
super(LastInfoCallback, self).__init__()
self.tc = unittest.TestCase()
self.step = 0
def on_episode_start(
self, worker, base_env, policies, episode, env_index, **kwargs
):
self.step = 0
self._check_last_values(episode)
def on_episode_step(self, worker, base_env, episode, env_index=None, **kwargs):
self.step += 1
self._check_last_values(episode)
def on_episode_end(self, worker, base_env, policies, episode, **kwargs):
self._check_last_values(episode)
def _check_last_values(self, episode):
last_obs = {
k: np.where(v)[0].item() for k, v in episode._agent_to_last_obs.items()
}
last_info = episode._agent_to_last_info
last_done = episode._agent_to_last_done
last_action = episode._agent_to_last_action
last_reward = {k: v[-1] for k, v in episode._agent_reward_history.items()}
if self.step == 0:
for last in [last_obs, last_info, last_done, last_action, last_reward]:
self.tc.assertEqual(last, {})
else:
for agent in last_obs.keys():
index = int(str(agent).replace("agent", ""))
self.tc.assertEqual(last_obs[agent], self.step + index)
self.tc.assertEqual(last_reward[agent], self.step + index)
self.tc.assertEqual(last_done[agent], self.step == NUM_STEPS)
if self.step == 1:
self.tc.assertEqual(last_action[agent], 0)
else:
self.tc.assertEqual(last_action[agent], self.step + index - 1)
self.tc.assertEqual(last_info[agent]["timestep"], self.step + index)
class EchoPolicy(Policy):
@override(Policy)
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs
):
return obs_batch.argmax(axis=1), [], {}
class EpisodeEnv(MultiAgentEnv):
def __init__(self, episode_length, num):
super().__init__()
self.agents = [MockEnv3(episode_length) for _ in range(num)]
self.dones = set()
self.observation_space = self.agents[0].observation_space
self.action_space = self.agents[0].action_space
def reset(self):
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
print("ACTIONDICT IN ENV\n", action_dict)
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
obs[i] = obs[i] + i
rew[i] = rew[i] + i
info[i]["timestep"] = info[i]["timestep"] + i
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
class TestEpisodeLastValues(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=1)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_singleagent_env(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv3(NUM_STEPS),
policy_spec=EchoPolicy,
callbacks=LastInfoCallback,
)
ev.sample()
def test_multiagent_env(self):
temp_env = EpisodeEnv(NUM_STEPS, NUM_AGENTS)
ev = RolloutWorker(
env_creator=lambda _: EpisodeEnv(NUM_STEPS, NUM_AGENTS),
policy_spec={
str(agent_id): (
EchoPolicy,
temp_env.observation_space,
temp_env.action_space,
{},
)
for agent_id in range(NUM_AGENTS)
},
policy_mapping_fn=lambda aid, eps, **kwargs: str(aid),
callbacks=LastInfoCallback,
)
ev.sample()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"unittest.TestCase",
"ray.shutdown",
"numpy.where",
"pytest.main",
"ray.rllib.utils.override",
"ray.rllib.examples.env.mock_env.MockEnv3",
"ray.init"
] |
[((2200, 2216), 'ray.rllib.utils.override', 'override', (['Policy'], {}), '(Policy)\n', (2208, 2216), False, 'from ray.rllib.utils import override\n'), ((512, 531), 'unittest.TestCase', 'unittest.TestCase', ([], {}), '()\n', (529, 531), False, 'import unittest\n'), ((3575, 3595), 'ray.init', 'ray.init', ([], {'num_cpus': '(1)'}), '(num_cpus=1)\n', (3583, 3595), False, 'import ray\n'), ((3650, 3664), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (3662, 3664), False, 'import ray\n'), ((4579, 4608), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (4590, 4608), False, 'import pytest\n'), ((2637, 2661), 'ray.rllib.examples.env.mock_env.MockEnv3', 'MockEnv3', (['episode_length'], {}), '(episode_length)\n', (2645, 2661), False, 'from ray.rllib.examples.env.mock_env import MockEnv3\n'), ((3764, 3783), 'ray.rllib.examples.env.mock_env.MockEnv3', 'MockEnv3', (['NUM_STEPS'], {}), '(NUM_STEPS)\n', (3772, 3783), False, 'from ray.rllib.examples.env.mock_env import MockEnv3\n'), ((1070, 1081), 'numpy.where', 'np.where', (['v'], {}), '(v)\n', (1078, 1081), True, 'import numpy as np\n')]
|
import abc
import numpy as np
import copy
import csv
import os
from beluga.liepack import *
# The following math import statements appear to be unused, but they are required on import of the specific
# methods since an eval() is called
from math import sqrt
class Method(object):
"""
Class containing information on various integration methods. It's primary purpose is not to perform explicit
calculations, but rather to load and store saved schemes.
"""
def __new__(cls, *args, **kwargs):
"""
Created a new Method object.
:param name: Name of a method.
:return: Method object.
"""
obj = super(Method, cls).__new__(cls)
obj.name = 'RK4'
obj.data = None
if len(args) > 0:
obj.name = args[0].upper()
return obj
def __init__(self, *args, **kwargs):
self.loadmethods()
self.RKtype = self.data[self.name]['type']
self.RKa = np.array(self.data[self.name]['a'], dtype=np.float64)
self.RKb = np.array(self.data[self.name]['b'], dtype=np.float64)
self.RKbhat = np.array(self.data[self.name]['bhat'], dtype=np.float64)
self.RKc = np.array(self.data[self.name]['c'], dtype=np.float64)
self.RKord = int(self.data[self.name]['order'])
self.RKns = int(self.data[self.name]['n'])
self.variable_step = sum(self.RKbhat) != 0
def loadmethods(self):
path = os.path.dirname(os.path.abspath(__file__))
with open(path + '/methods/RK.csv', mode='r', encoding='utf-8-sig', newline='\n') as RKfile:
reader = csv.reader(RKfile, delimiter=',')
num_methods = 0
data = {}
for row in reader:
if num_methods == 0:
header = row
else:
L = len(header)
name = row[0]
key = [header[1]]
val = [row[1]]
key += [_ for _ in header[2:]]
val += [eval(_) for _ in row[2:]]
data[name] = dict(zip(key, val))
num_methods += 1
self.data = data
def getmethods(self):
return self.data.keys()
class TimeStepper(object):
"""
This class serves as a superclass for various time stepper objects. The purpose of a timestepper is to advance
numerical solutions of ordinary differential equations a single time step per evaluation.
"""
def __new__(cls, *args, **kwargs):
"""
Creates a new TimeStepper object.
:param args:
:param kwargs:
:return:
"""
obj = super(TimeStepper, cls).__new__(cls)
obj.variablestep = False
if len(args) == 0:
obj.coordinate = 'exp'
obj.method = Method('RK4')
return obj
@abc.abstractmethod
def __call__(self, vf, y, t0, dt):
pass
def getcoordinate(self):
return self.coordinate
def getmethod(self):
return self.method
def setcoordinate(self, coordinate):
coordinate = coordinate.lower()
if coordinate == 'exp':
self.coordinate = 'exp'
else:
raise NotImplementedError
def setmethod(self, method):
self.method = Method(method)
class RKMK(TimeStepper):
"""
The Runge-Kutta-Munthe-Kaas time stepper object.
"""
def __call__(self, vf, y, t0, dt):
"""
Advances a numerical solution.
:param vf: Vectorfield object.
:param y: Homogeneous space.
:param t0: Initial time.
:param dt: Time to advance (for fixed-step methods).
:return: (y_low, y_high, errest) - A "low" quality and "high" quality estimate for solutions, and an error estimate.
"""
g = group2algebra(y.shape)
Kj = [g(y.shape.shape) for _ in range(self.method.RKns)]
Yr = [copy.copy(y) for _ in range(self.method.RKns)]
if self.method.RKtype == 'explicit':
Kj[0] = vf(t0, y)
for ii in range(self.method.RKns - 1):
U = sum([elem*dt*coeff for elem, coeff in zip(Kj[:ii+1], self.method.RKa[ii+1, :ii+1])])
Yr[ii+1] = Left(exp(U), y)
K = vf(t0 + dt*self.method.RKc[ii+1], Yr[ii+1])
Kj[ii+1] = dexpinv(U, K, order=self.method.RKord-1)
elif self.method.RKtype == 'implicit':
Kjold = copy.copy(Kj)
tol = 1e-15
max_iter = 50
iter = 0
iter_dist = 1 + tol
while (iter_dist > tol) and (iter < max_iter):
iter += 1
for ii in range(self.method.RKns):
U = sum([elem*dt*coeff for elem, coeff in zip(Kjold, self.method.RKa[ii, :])])
K = vf(t0 + dt*self.method.RKc[ii], Left(exp(U), y))
Kj[ii] = dexpinv(U, K, order=self.method.RKord-1)
iter_dist = sum(np.linalg.norm(v1.get_vector() - v2.get_vector()) for v1,v2 in zip(Kj, Kjold))
Kjold = copy.copy(Kj)
Ulow = sum([Kval*dt*coeff for Kval, coeff in zip(Kj, self.method.RKb)])
ylow = Left(exp(Ulow), y)
errest = -1
yhigh = None
if self.variablestep:
if not self.method.variable_step:
raise NotImplementedError(self.method.name + ' does not support variable stepsize.')
Uhigh = sum([Kval*dt*coeff for Kval, coeff in zip(Kj, self.method.RKbhat)])
yhigh = Left(exp(Uhigh), y)
errest = np.linalg.norm(Ulow.get_vector() - Uhigh.get_vector())
return ylow, yhigh, errest
|
[
"os.path.abspath",
"numpy.array",
"copy.copy",
"csv.reader"
] |
[((968, 1021), 'numpy.array', 'np.array', (["self.data[self.name]['a']"], {'dtype': 'np.float64'}), "(self.data[self.name]['a'], dtype=np.float64)\n", (976, 1021), True, 'import numpy as np\n'), ((1041, 1094), 'numpy.array', 'np.array', (["self.data[self.name]['b']"], {'dtype': 'np.float64'}), "(self.data[self.name]['b'], dtype=np.float64)\n", (1049, 1094), True, 'import numpy as np\n'), ((1117, 1173), 'numpy.array', 'np.array', (["self.data[self.name]['bhat']"], {'dtype': 'np.float64'}), "(self.data[self.name]['bhat'], dtype=np.float64)\n", (1125, 1173), True, 'import numpy as np\n'), ((1193, 1246), 'numpy.array', 'np.array', (["self.data[self.name]['c']"], {'dtype': 'np.float64'}), "(self.data[self.name]['c'], dtype=np.float64)\n", (1201, 1246), True, 'import numpy as np\n'), ((1464, 1489), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1479, 1489), False, 'import os\n'), ((1613, 1646), 'csv.reader', 'csv.reader', (['RKfile'], {'delimiter': '""","""'}), "(RKfile, delimiter=',')\n", (1623, 1646), False, 'import csv\n'), ((3942, 3954), 'copy.copy', 'copy.copy', (['y'], {}), '(y)\n', (3951, 3954), False, 'import copy\n'), ((4463, 4476), 'copy.copy', 'copy.copy', (['Kj'], {}), '(Kj)\n', (4472, 4476), False, 'import copy\n'), ((5093, 5106), 'copy.copy', 'copy.copy', (['Kj'], {}), '(Kj)\n', (5102, 5106), False, 'import copy\n')]
|
"""
Vortex dynamics
Several initial states are provided: select one with 'vortex_config'
"""
import sys
try:
from param import Param
except:
print("[ERROR] unable to import the param module")
print("[INFO] you likely forgot to set $PYTHONPATH")
print("[INFO] depending on your shell")
print("> source ~/.fluid2d/activate.sh")
print("> source ~/.fluid2d/activate.csh")
print("> source ~/.fluid2d/activate.fish")
sys.exit()
from grid import Grid
from fluid2d import Fluid2d
import numpy as np
import ana_profiles as ap
# If the code immediately stops with
# Traceback (most recent call last):
# File "vortex.py", line 1, in <module>
# from param import Param
# ImportError: No module named param
# it means that you forgot to do
# source activate.sh in your terminal
param = Param('default.xml')
param.modelname = 'euler'
param.expname = 'vortex_00'
# domain and resolution
param.nx = 64*2
param.ny = param.nx
param.Ly = param.Lx
param.npx = 1
param.npy = 1
param.geometry = 'closed'
# time
param.tend = 10
param.cfl = 1.
param.adaptable_dt = True
param.dt = 0.01
param.dtmax = 100
# discretization
param.order = 3
param.timestepping = 'RK3_SSP'
param.exacthistime = True
# output
param.var_to_save = ['vorticity', 'psi', 'tracer']
param.list_diag = 'all'
param.freq_plot = 10
param.freq_his = .2
param.freq_diag = 0.02
# plot
param.freq_plot = 10
param.plot_interactive = True
param.plot_psi = True
param.plot_var = 'vorticity'
param.cax = np.array([-2, 2.])*5
param.colorscheme = 'imposed'
param.generate_mp4 = False
# physics
param.noslip = False
param.diffusion = False
param.additional_tracer = ['tracer']
grid = Grid(param)
param.Kdiff = 5e-2*grid.dx
xr, yr = grid.xr, grid.yr
# it's time to modify the mask and add obstacles if you wish, 0 is land
msk_config = 'none' # the other possibility is 'T-wall' or 'bay'
if msk_config == 'bay':
x0, y0, radius = 0.5, 0.35, 0.2
y1 = 0.5
msk2 = ap.vortex(xr, yr, param.Lx, param.Ly,
x0, y0, radius, 'step')
grid.msk[yr < y1] = 0
grid.msk += np.asarray(msk2, dtype=int)
grid.msk[grid.msk < 0] = 0
grid.msk[grid.msk > 1] = 1
grid.msk[0:1, :] = 0
grid.finalize_msk()
elif msk_config == 'T-wall':
i0, j0 = param.nx//2, param.ny//2
di = int(0.25*param.Lx/grid.dx)
grid.msk[:j0, i0] = 0
grid.msk[j0, i0-di:i0+di] = 0
grid.finalize_msk()
else:
# do nothing
pass
f2d = Fluid2d(param, grid)
model = f2d.model
vor = model.var.get('vorticity')
def vortex(param, grid, x0, y0, sigma,
vortex_type, ratio=1):
"""Setup a compact distribution of vorticity
at location x0, y0 vortex, width is sigma, vortex_type controls
the radial vorticity profile, ratio controls the x/y aspect ratio
(for ellipses)
"""
xr, yr = grid.xr, grid.yr
# ratio controls the ellipticity, ratio=1 is a disc
x = np.sqrt((xr-param.Lx*x0)**2+(yr-param.Ly*y0)**2*ratio**2)
y = x.copy()*0.
if vortex_type in ('gaussian', 'cosine', 'step'):
if vortex_type == 'gaussian':
y = np.exp(-x**2/(sigma**2))
elif vortex_type == 'cosine':
y = np.cos(x/sigma*np.pi/2)
y[x > sigma] = 0.
elif vortex_type == 'step':
y[x <= sigma] = 1.
else:
print('this kind of vortex (%s) is not defined' % vortex_type)
return y
# 2/ set an initial tracer field
vtype = 'gaussian'
# vortex width
sigma = 0.0*param.Lx
vortex_config = 'dipole2'
if vortex_config == 'single':
vtype = 'gaussian'
sigma = 0.03*param.Lx
vor[:] = vortex(param, grid, 0.4, 0.54, sigma,
vtype, ratio=1)
elif vortex_config == 'dipolebay':
vtype = 'gaussian'
sigma = 0.03*param.Lx
y2 = 0.53
vor[:] = vortex(param, grid, 0.15, y2, sigma,
vtype, ratio=1)
vor[:] -= vortex(param, grid, -0.15, y2, sigma,
vtype, ratio=1)
elif vortex_config == 'dipole2':
vtype = 'gaussian'
sigma = 0.05*param.Lx
x0 = 0.7
vor[:] = -vortex(param, grid, x0, 0.42, sigma,
vtype, ratio=1)
vor[:] += vortex(param, grid, x0, 0.58, sigma,
vtype, ratio=1)
elif vortex_config == 'rankine':
vtype = 'step'
ring_sigma = 0.2*param.Lx
ring_amp = 1.
vor[:] = ring_amp * vortex(param, grid, 0.5, 0.5, ring_sigma,
vtype, ratio=1)
# sigma ring, core = 0.2, 0.135 yields a tripole (with step distribution)
# sigma ring, core = 0.2, 0.12 yields a dipole (with step distribution)
core_sigma = 0.173*param.Lx
core_amp = ring_amp*(ring_sigma**2-core_sigma**2.)/core_sigma**2.
vor[:] -= (core_amp+ring_amp)*vortex(param, grid, 0.5, 0.5, core_sigma,
vtype, ratio=1)
elif vortex_config == 'dipole':
vtype = 'gaussian'
sigma = 0.04*param.Lx
vor[:] = vortex(param, grid, 0.3, 0.52, sigma, vtype)
vor[:] -= vortex(param, grid, 0.3, 0.48, sigma, vtype)
elif vortex_config == 'chasing':
sigma = 0.03*param.Lx
vtype = 'step'
vor[:] = vortex(param, grid, 0.3, 0.6, sigma, vtype)
vor[:] -= vortex(param, grid, 0.3, 0.4, sigma, vtype)
vor[:] += vortex(param, grid, 0.1, 0.55, sigma, vtype)
vor[:] -= vortex(param, grid, 0.1, 0.45, sigma, vtype)
elif vortex_config == 'corotating':
sigma = 0.06*param.Lx
dist = 0.25*param.Lx
vtype = 'gaussian'
vor[:] = vortex(param, grid, 0.5, 0.5+dist/2, sigma, vtype)
vor[:] += vortex(param, grid, 0.5, 0.5-dist/2, sigma, vtype)
elif vortex_config == 'collection':
vtype = 'cosine'
x0 = [0.3, 0.4, 0.6, 0.8]
y0 = [0.5, 0.5, 0.5, 0.5]
amplitude = [1, -2, -1, 2]
width = np.array([1, 0.5, 1, 0.5])*0.04*param.Lx
for x, y, a, s in zip(x0, y0, amplitude, width):
vor[:] += a*vortex(param, grid, x, y, s, vtype)
elif vortex_config == 'unequal':
# Melander, Zabusky, McWilliams 1987
# Asymmetric vortex merger in two dimensions: Which vortex is 'victorious'?
s1 = 0.04*param.Lx
a1 = 1.
s2 = 0.1*param.Lx
a2 = 0.2
vtype = 'cosine'
vor[:] = a1*vortex(param, grid, 0.5, 0.6, s1, vtype)
vor[:] += a2*vortex(param, grid, 0.5, 0.4, s2, vtype)
vor[:] = vor*grid.msk
if False:
np.random.seed(1) # this guarantees the results reproducibility
noise = np.random.normal(size=np.shape(yr))*grid.msk
noise -= grid.domain_integration(noise)*grid.msk/grid.area
grid.fill_halo(noise)
noise_amplitude = 1e-3
vor += noise*noise_amplitude
model.set_psi_from_vorticity()
state = model.var.get('tracer')
state[:] = np.round(xr*6) % 2 + np.round(yr*6) % 2
state *= grid.msk
# % normalization of the vorticity so that enstrophy == 1.
model.diagnostics(model.var, 0)
enstrophy = model.diags['enstrophy']
# print('enstrophy = %g' % enstrophy)
vor[:] = vor[:] / np.sqrt(enstrophy)
model.set_psi_from_vorticity()
f2d.loop()
|
[
"grid.Grid",
"numpy.shape",
"numpy.sqrt",
"ana_profiles.vortex",
"numpy.asarray",
"fluid2d.Fluid2d",
"numpy.exp",
"numpy.array",
"numpy.random.seed",
"numpy.cos",
"sys.exit",
"param.Param",
"numpy.round"
] |
[((820, 840), 'param.Param', 'Param', (['"""default.xml"""'], {}), "('default.xml')\n", (825, 840), False, 'from param import Param\n'), ((1670, 1681), 'grid.Grid', 'Grid', (['param'], {}), '(param)\n', (1674, 1681), False, 'from grid import Grid\n'), ((2456, 2476), 'fluid2d.Fluid2d', 'Fluid2d', (['param', 'grid'], {}), '(param, grid)\n', (2463, 2476), False, 'from fluid2d import Fluid2d\n'), ((1491, 1510), 'numpy.array', 'np.array', (['[-2, 2.0]'], {}), '([-2, 2.0])\n', (1499, 1510), True, 'import numpy as np\n'), ((1963, 2024), 'ana_profiles.vortex', 'ap.vortex', (['xr', 'yr', 'param.Lx', 'param.Ly', 'x0', 'y0', 'radius', '"""step"""'], {}), "(xr, yr, param.Lx, param.Ly, x0, y0, radius, 'step')\n", (1972, 2024), True, 'import ana_profiles as ap\n'), ((2089, 2116), 'numpy.asarray', 'np.asarray', (['msk2'], {'dtype': 'int'}), '(msk2, dtype=int)\n', (2099, 2116), True, 'import numpy as np\n'), ((2915, 2990), 'numpy.sqrt', 'np.sqrt', (['((xr - param.Lx * x0) ** 2 + (yr - param.Ly * y0) ** 2 * ratio ** 2)'], {}), '((xr - param.Lx * x0) ** 2 + (yr - param.Ly * y0) ** 2 * ratio ** 2)\n', (2922, 2990), True, 'import numpy as np\n'), ((6296, 6313), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6310, 6313), True, 'import numpy as np\n'), ((6888, 6906), 'numpy.sqrt', 'np.sqrt', (['enstrophy'], {}), '(enstrophy)\n', (6895, 6906), True, 'import numpy as np\n'), ((447, 457), 'sys.exit', 'sys.exit', ([], {}), '()\n', (455, 457), False, 'import sys\n'), ((6645, 6661), 'numpy.round', 'np.round', (['(xr * 6)'], {}), '(xr * 6)\n', (6653, 6661), True, 'import numpy as np\n'), ((6666, 6682), 'numpy.round', 'np.round', (['(yr * 6)'], {}), '(yr * 6)\n', (6674, 6682), True, 'import numpy as np\n'), ((3103, 3131), 'numpy.exp', 'np.exp', (['(-x ** 2 / sigma ** 2)'], {}), '(-x ** 2 / sigma ** 2)\n', (3109, 3131), True, 'import numpy as np\n'), ((3183, 3212), 'numpy.cos', 'np.cos', (['(x / sigma * np.pi / 2)'], {}), '(x / sigma * np.pi / 2)\n', (3189, 3212), True, 'import numpy as np\n'), ((6395, 6407), 'numpy.shape', 'np.shape', (['yr'], {}), '(yr)\n', (6403, 6407), True, 'import numpy as np\n'), ((5745, 5771), 'numpy.array', 'np.array', (['[1, 0.5, 1, 0.5]'], {}), '([1, 0.5, 1, 0.5])\n', (5753, 5771), True, 'import numpy as np\n')]
|
"""import pywt
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(-1, 1, 200, endpoint=False)
sig = np.cos(2 * np.pi * 7 * t) + np.real(np.exp(-7*(t-0.4)**2)*np.exp(1j*2*np.pi*2*(t-0.4)))
plt.plot(t, sig)
plt.show()
widths = np.arange(1, 31)
cwtmatr, freqs = pywt.cwt(sig, widths, 'mexh')
print(cwtmatr)
print(freqs)
plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) # doctest: +SKIP
plt.show() # doctest: +SKIP"""
import numpy as np
import matplotlib.pyplot as plt
import pywt
import pywt.data
t = [i for i in range(100)]
t = np.asarray(t)
sig = np.cos(2 * np.pi / 7 * t) + np.cos(2 * np.pi / 50 * t) - np.cos(2 * np.pi / 4 * t) + np.cos(2 * np.pi / 2 * t)
"""
ca, cd = pywt.dwt(sig, "bior3.5")
cd_0 = [0 for _ in range(len(cd))]
ca_0 = [0 for _ in range(len(ca))]
sig_ret = pywt.idwt(ca, cd_0, "bior3.5")
sig_ret2 = pywt.idwt(ca_0, cd, "bior3.5")"""
coeffs = pywt.wavedec(sig, "rbio6.8", level=3)
print(pywt.wavelist('rbio'))
print(len(coeffs))
coeffs[3] = np.asarray([0 for i in range(len(coeffs[3]))])
coeffs[2] = np.asarray([0 for i in range(len(coeffs[2]))])
coeffs[1] = np.asarray([0 for i in range(len(coeffs[1]))])
# coeffs[0] = np.asarray([0 for i in range(len(coeffs[0]))])
sig_ret = pywt.waverec(coeffs, "rbio6.8")
# plt.plot(t, sig)
plt.plot(t, sig_ret)
# plt.plot(t, np.cos(2 * np.pi / 7 * t))
plt.plot(t, np.cos(2 * np.pi / 50 * t))
# plt.plot(t, - np.cos(2 * np.pi / 4 * t))
# plt.plot(t, np.cos(2 * np.pi / 2 * t))
plt.show()
|
[
"pywt.wavelist",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.cos",
"pywt.wavedec",
"pywt.waverec",
"matplotlib.pyplot.show"
] |
[((631, 644), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (641, 644), True, 'import numpy as np\n'), ((967, 1004), 'pywt.wavedec', 'pywt.wavedec', (['sig', '"""rbio6.8"""'], {'level': '(3)'}), "(sig, 'rbio6.8', level=3)\n", (979, 1004), False, 'import pywt\n'), ((1301, 1332), 'pywt.waverec', 'pywt.waverec', (['coeffs', '"""rbio6.8"""'], {}), "(coeffs, 'rbio6.8')\n", (1313, 1332), False, 'import pywt\n'), ((1353, 1373), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sig_ret'], {}), '(t, sig_ret)\n', (1361, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1547, 1549), True, 'import matplotlib.pyplot as plt\n'), ((736, 761), 'numpy.cos', 'np.cos', (['(2 * np.pi / 2 * t)'], {}), '(2 * np.pi / 2 * t)\n', (742, 761), True, 'import numpy as np\n'), ((1011, 1032), 'pywt.wavelist', 'pywt.wavelist', (['"""rbio"""'], {}), "('rbio')\n", (1024, 1032), False, 'import pywt\n'), ((1427, 1453), 'numpy.cos', 'np.cos', (['(2 * np.pi / 50 * t)'], {}), '(2 * np.pi / 50 * t)\n', (1433, 1453), True, 'import numpy as np\n'), ((708, 733), 'numpy.cos', 'np.cos', (['(2 * np.pi / 4 * t)'], {}), '(2 * np.pi / 4 * t)\n', (714, 733), True, 'import numpy as np\n'), ((651, 676), 'numpy.cos', 'np.cos', (['(2 * np.pi / 7 * t)'], {}), '(2 * np.pi / 7 * t)\n', (657, 676), True, 'import numpy as np\n'), ((679, 705), 'numpy.cos', 'np.cos', (['(2 * np.pi / 50 * t)'], {}), '(2 * np.pi / 50 * t)\n', (685, 705), True, 'import numpy as np\n')]
|
import numpy as np
class user:
def __init__(self):
self.planned_channel = -1
self.transmission_success = False
print('user creation success')
def choose_channel(self, method, num_channels):
if method == 'uniform':
self.planned_channel = np.random.randint(0, num_channels)
print('planned channel', self.planned_channel)
def transmit(self, occupied_channels):
if self.planned_channel in occupied_channels:
self.transmission_success = False
print('trasmission is failed')
else:
occupied_channels.add(self.planned_channel)
self.transmission_success = True
print('trasmission is successful')
def reset(self):
self.planned_channel = -1
self.transmission_success = False
class net:
def __init__(self, num_channels, num_users, num_transmissions):
self.num_transmissions = num_transmissions
self.num_channels = num_channels
self.occupied_channels = set()
self.users = [user() for i in range(num_users)]
def reset(self):
self.occupied_channels = set()
for user in self.users:
user.reset()
def step(self):
for i in range(self.num_transmissions):
for user in self.users:
user.choose_channel('uniform', self.num_channels)
user.transmit(self.occupied_channels)
def main():
net1 = net(2, 3, 10)
print('Transmission state for user # 0',
net1.users[0].transmission_success)
net1.step()
print(net1.occupied_channels)
net1.reset()
if __name__ == '__main__':
main()
|
[
"numpy.random.randint"
] |
[((291, 325), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_channels'], {}), '(0, num_channels)\n', (308, 325), True, 'import numpy as np\n')]
|
import random
import numpy as np
import pynmmso as nmmso
class Swarm:
"""
Represents a swarm in the NMMSO algorithm.
Arguments
---------
id : int
Id used to refer to the swarm
swarm_size : int
Maximum number of particles in the swarm
problem :
Instance of the problem class. Must implement get_bounds and fitness functions.
listener : subclass of nmmso.listeners.BaseListener
Listener object to receive notification of events. Optional.
Attributes
----------
id : int
A unique identification number of this swarm.
mode_location : numpy array
The location of this mode.
mode_value : float
The fitness of the mode location.
number_of_particles : int
Number of particles in the swarm.
history_locations : 2D Numpy array
The current locations of each particle in the swarm.
history_values : 1D Numpy array
The fitness values for current locations of each particle in the swarm.
velocities : 2D Numpy array
Current velocity of each particle in the swarm.
pbest_location : 2D Numpy array
The best location discovered for each particle.
pbest_value : 1D Numpy array
The fitness value associated with the best location for each particle in the swarm.
"""
def __init__(self, id, swarm_size, problem, listener=None):
self.id = id
self.swarm_size = swarm_size
self.problem = problem
self.listener = listener
self.mn = np.array(problem.get_bounds()[0])
self.mx = np.array(problem.get_bounds()[1])
self.changed = True
self.converged = False
self.num_dimensions = len(self.mn)
self.mode_location = None # Will be populated later on
self.new_location = None # Will be populated later on
self.mode_value = None # Will be populated later on
# Initialize locations for swarm elements
# current locations of swarm
self.history_locations = np.zeros((self.swarm_size, self.num_dimensions))
# current values of swarm
self.history_values = np.full(self.swarm_size, -np.inf)
# current best locations of swarm
self.pbest_locations = np.zeros((self.swarm_size, self.num_dimensions))
# current best values of swarm
self.pbest_values = np.full(self.swarm_size, -np.inf)
self.velocities = np.zeros((swarm_size, self.num_dimensions))
self.number_of_particles = 1
self.shifted_loc = None # Will be populated later on
self.dist = None # Will be populated later on
def set_initial_location(self):
"""Sets the initial location of a swarm."""
self.changed = True
self.new_location = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
# random initial velocities of swarm
self.velocities[0, :] = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
def set_arbitrary_distance(self):
"""Set an arbitrary distance - this is done when we only have one swarm"""
self.dist = np.min(self.mx-self.mn)
def increment(self):
""" Increments the swarm. """
new_location = self.mn - 1
d = self.dist
shifted = False
omega = 0.1
reject = 0
r = random.randrange(self.swarm_size) # select particle at random to move
while np.sum(new_location < self.mn) > 0 or np.sum(new_location > self.mx) > 0:
# if swarm is not yet at capacity, simply add a new particle
if self.number_of_particles < self.swarm_size:
usp = nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0]
new_location = self.mode_location + usp * (d/2)
else:
# move an existing particle
shifted = True
self.shifted_loc = r
r1 = np.random.rand(self.num_dimensions)
r2 = np.random.rand(self.num_dimensions)
temp_vel = omega * self.velocities[self.shifted_loc, :] + \
2.0 * r1 * \
(self.mode_location - self.history_locations[self.shifted_loc, :]) + \
2.0 * r2 * \
(self.pbest_locations[self.shifted_loc, :] -
self.history_locations[self.shifted_loc, :])
if reject > 20:
# if we keep rejecting then put at extreme any violating design parameters
i_max = np.flatnonzero(
np.asarray(
self.history_locations[self.shifted_loc, :] + temp_vel > self.mx))
i_min = np.flatnonzero(
np.asarray(
self.history_locations[self.shifted_loc, :] + temp_vel < self.mn))
if i_max.size > 0:
temp_vel[i_max] = \
np.random.rand(i_max.size) * \
(self.mx[i_max] - self.history_locations[self.shifted_loc, i_max])
if i_min.size > 0:
temp_vel[i_min] = \
np.random.rand(i_min.size) * \
(self.history_locations[self.shifted_loc, i_min] - self.mn[i_min])
new_location = self.history_locations[self.shifted_loc, :] + temp_vel
reject = reject + 1
if shifted:
self.velocities[self.shifted_loc, :] = temp_vel
else:
# otherwise initialise velocity in sphere based on distance from gbest to next
# closest mode
self.number_of_particles = self.number_of_particles + 1
self.shifted_loc = self.number_of_particles - 1
temp_vel = self.mn - 1
reject = 0
while np.sum(temp_vel < self.mn) > 0 or np.sum(temp_vel > self.mx) > 0:
temp_vel = \
self.mode_location + \
nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0] * (d / 2)
reject = reject + 1
if reject > 20: # resolve if keep rejecting
temp_vel = np.random.rand(self.num_dimensions)*(self.mx-self.mn) + self.mn
self.velocities[self.shifted_loc, :] = temp_vel
self.new_location = new_location
if self.listener is not None:
if shifted:
self.listener.swarm_moved_particle(self)
else:
self.listener.swarm_added_particle(self)
def initialise_with_uniform_crossover(self, swarm1, swarm2):
"""
Initialise a new swarm with the uniform crossover of the given swarms.
Arguments
---------
swarm1 : Swarm
swarm2 : Swarm
"""
self.new_location, _ = Swarm.uni(swarm1.mode_location, swarm2.mode_location)
self.evaluate_first()
self.changed = True
self.converged = False
def distance_to(self, swarm):
"""
Euclidean distance between this swarm and the given swarm, based on their mode locations.
Returns
-------
float
The distance between the two swarms.
"""
return np.linalg.norm(self.mode_location-swarm.mode_location)
def merge(self, swarm):
"""
Merges the give swarm into this swarm.
Arguments
----------
swarm : Swarm
Swarm to merge into this swarm.
"""
n1 = self.number_of_particles
n2 = swarm.number_of_particles
if n1 + n2 < self.swarm_size:
# simplest solution, where the combined active members of both populations
# are below the total size they can grow to
self.number_of_particles = n1 + n2
self.history_locations[n1:n1 + n2, :] = swarm.history_locations[0:n2, :]
self.history_values[n1:n1 + n2] = swarm.history_values[0:n2]
self.pbest_locations[n1:n1 + n2, :] = swarm.pbest_locations[0:n2, :]
self.pbest_values[n1:n1 + n2] = swarm.pbest_values[0:n2]
self.velocities[n1:n1 + n2, :] = swarm.velocities[0:n2, :]
else:
# select best out of combines population, based on current location (rather than pbest)
self.number_of_particles = self.swarm_size
temp_h_loc = \
np.concatenate((self.history_locations[0:n1, :], swarm.history_locations[0:n2, :]))
temp_h_v = \
np.concatenate((self.history_values[0:n1], swarm.history_values[0:n2]))
temp_p_loc = \
np.concatenate((self.pbest_locations[0:n1, :], swarm.pbest_locations[0:n2, :]))
temp_p_v = np.concatenate((self.pbest_values[0:n1], swarm.pbest_values[0:n2]))
temp_vel = np.concatenate((self.velocities[0:n1, :], swarm.velocities[0:n2, :]))
# get the indices of highest values
I = np.argsort(temp_h_v)[len(temp_h_v) - self.swarm_size:]
self.history_locations = temp_h_loc[I, :]
self.history_values = temp_h_v[I]
self.pbest_locations = temp_p_loc[I, :]
self.pbest_values = temp_p_v[I]
self.velocities = temp_vel[I, :]
def initialise_new_swarm_velocities(self):
"""Initialises velocities of a new swarm."""
reject = 0
temp_vel = self.mn - 1
while np.sum(temp_vel < self.mn) > 0 or np.sum(temp_vel > self.mx) > 0:
temp_vel = self.mode_location + \
nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0] * \
(self.dist / 2)
reject += 1
if reject > 20:
temp_vel = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
self.velocities[0, :] = temp_vel
def update_location_and_value(self, location, value):
"""
Updates the location and value of this swarm.
Arguments
---------
location : numpy arrya
New location of swarm
value : float
New fitness value at swarm location.
"""
previous_location = self.mode_location
previous_value = self.mode_value
self.mode_location = location
self.mode_value = value
if self.listener is not None:
self.listener.swarm_peak_changed(self, previous_location, previous_value)
def evaluate_first(self):
"""
Evaluates the new location. This is the first evaluation so no need to examine
if a shift has occurred
"""
# new location is the only solution thus far in mode, so by definition
# is also the mode estimate, and the only history thus far
y = self.problem.fitness(self.new_location)
if not np.isscalar(y):
raise ValueError("Problem class's fitness method must return a scalar value.")
if self.listener is not None:
self.listener.location_evaluated(self.new_location, y)
self.mode_location = self.new_location # gbest location
self.mode_value = y # gbest value
self.history_locations[0, :] = self.mode_location
self.history_values[0] = y
self.pbest_locations[0, :] = self.mode_location
self.pbest_values[0] = y
def evaluate(self, y):
"""
Takes the value at the new location and updates the swarm statistics and history.
Arguments
---------
y : float
fitness value at the new location.
"""
if y > self.mode_value:
self.update_location_and_value(self.new_location, y)
self.changed = True
self.history_locations[self.shifted_loc, :] = self.new_location
self.history_values[self.shifted_loc] = y
if y > self.pbest_values[self.shifted_loc]:
self.pbest_values[self.shifted_loc] = y
self.pbest_locations[self.shifted_loc, :] = self.new_location
def find_nearest(self, swarms):
"""
Finds the nearest swarm from the given set of swarms.
Returns
-------
swarm
The nearest swarm this this swarm.
"""
best_swarm = None
distance = np.inf
for s in swarms:
if self != s:
d = np.sum((self.mode_location - s.mode_location) ** 2)
if d < distance:
distance = d
best_swarm = s
self.dist = np.sqrt(distance) # track Euc distance to nearest neighbour
return best_swarm, self.dist
@staticmethod
def uni(x1, x2):
"""
Uniform binary crossover.
Arguments
---------
x1 : numpy array of parameters
x2 : numpy array of parameters
Returns:
numpy array
New array of parameters formed from uniform crossover.
"""
# simulated binary crossover
x_c = x1.copy()
x_d = x2.copy()
l = len(x1)
r = np.flatnonzero(np.random.rand(l, 1) > 0.5)
# ensure at least one is swapped
if r.size == 0 or r.size == l:
r = np.random.randint(l)
x_c[r] = x2[r]
x_d[r] = x1[r]
return x_c, x_d
|
[
"numpy.sqrt",
"numpy.isscalar",
"numpy.random.rand",
"random.randrange",
"pynmmso.Nmmso.uniform_sphere_points",
"numpy.asarray",
"numpy.min",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.full"
] |
[((2052, 2100), 'numpy.zeros', 'np.zeros', (['(self.swarm_size, self.num_dimensions)'], {}), '((self.swarm_size, self.num_dimensions))\n', (2060, 2100), True, 'import numpy as np\n'), ((2165, 2198), 'numpy.full', 'np.full', (['self.swarm_size', '(-np.inf)'], {}), '(self.swarm_size, -np.inf)\n', (2172, 2198), True, 'import numpy as np\n'), ((2273, 2321), 'numpy.zeros', 'np.zeros', (['(self.swarm_size, self.num_dimensions)'], {}), '((self.swarm_size, self.num_dimensions))\n', (2281, 2321), True, 'import numpy as np\n'), ((2389, 2422), 'numpy.full', 'np.full', (['self.swarm_size', '(-np.inf)'], {}), '(self.swarm_size, -np.inf)\n', (2396, 2422), True, 'import numpy as np\n'), ((2450, 2493), 'numpy.zeros', 'np.zeros', (['(swarm_size, self.num_dimensions)'], {}), '((swarm_size, self.num_dimensions))\n', (2458, 2493), True, 'import numpy as np\n'), ((3149, 3174), 'numpy.min', 'np.min', (['(self.mx - self.mn)'], {}), '(self.mx - self.mn)\n', (3155, 3174), True, 'import numpy as np\n'), ((3371, 3404), 'random.randrange', 'random.randrange', (['self.swarm_size'], {}), '(self.swarm_size)\n', (3387, 3404), False, 'import random\n'), ((7363, 7419), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.mode_location - swarm.mode_location)'], {}), '(self.mode_location - swarm.mode_location)\n', (7377, 7419), True, 'import numpy as np\n'), ((12664, 12681), 'numpy.sqrt', 'np.sqrt', (['distance'], {}), '(distance)\n', (12671, 12681), True, 'import numpy as np\n'), ((8521, 8609), 'numpy.concatenate', 'np.concatenate', (['(self.history_locations[0:n1, :], swarm.history_locations[0:n2, :])'], {}), '((self.history_locations[0:n1, :], swarm.history_locations[0:\n n2, :]))\n', (8535, 8609), True, 'import numpy as np\n'), ((8646, 8717), 'numpy.concatenate', 'np.concatenate', (['(self.history_values[0:n1], swarm.history_values[0:n2])'], {}), '((self.history_values[0:n1], swarm.history_values[0:n2]))\n', (8660, 8717), True, 'import numpy as np\n'), ((8761, 8840), 'numpy.concatenate', 'np.concatenate', (['(self.pbest_locations[0:n1, :], swarm.pbest_locations[0:n2, :])'], {}), '((self.pbest_locations[0:n1, :], swarm.pbest_locations[0:n2, :]))\n', (8775, 8840), True, 'import numpy as np\n'), ((8864, 8931), 'numpy.concatenate', 'np.concatenate', (['(self.pbest_values[0:n1], swarm.pbest_values[0:n2])'], {}), '((self.pbest_values[0:n1], swarm.pbest_values[0:n2]))\n', (8878, 8931), True, 'import numpy as np\n'), ((8955, 9024), 'numpy.concatenate', 'np.concatenate', (['(self.velocities[0:n1, :], swarm.velocities[0:n2, :])'], {}), '((self.velocities[0:n1, :], swarm.velocities[0:n2, :]))\n', (8969, 9024), True, 'import numpy as np\n'), ((10967, 10981), 'numpy.isscalar', 'np.isscalar', (['y'], {}), '(y)\n', (10978, 10981), True, 'import numpy as np\n'), ((13339, 13359), 'numpy.random.randint', 'np.random.randint', (['l'], {}), '(l)\n', (13356, 13359), True, 'import numpy as np\n'), ((2795, 2830), 'numpy.random.rand', 'np.random.rand', (['self.num_dimensions'], {}), '(self.num_dimensions)\n', (2809, 2830), True, 'import numpy as np\n'), ((2940, 2975), 'numpy.random.rand', 'np.random.rand', (['self.num_dimensions'], {}), '(self.num_dimensions)\n', (2954, 2975), True, 'import numpy as np\n'), ((3458, 3488), 'numpy.sum', 'np.sum', (['(new_location < self.mn)'], {}), '(new_location < self.mn)\n', (3464, 3488), True, 'import numpy as np\n'), ((3496, 3526), 'numpy.sum', 'np.sum', (['(new_location > self.mx)'], {}), '(new_location > self.mx)\n', (3502, 3526), True, 'import numpy as np\n'), ((3963, 3998), 'numpy.random.rand', 'np.random.rand', (['self.num_dimensions'], {}), '(self.num_dimensions)\n', (3977, 3998), True, 'import numpy as np\n'), ((4020, 4055), 'numpy.random.rand', 'np.random.rand', (['self.num_dimensions'], {}), '(self.num_dimensions)\n', (4034, 4055), True, 'import numpy as np\n'), ((9091, 9111), 'numpy.argsort', 'np.argsort', (['temp_h_v'], {}), '(temp_h_v)\n', (9101, 9111), True, 'import numpy as np\n'), ((9554, 9580), 'numpy.sum', 'np.sum', (['(temp_vel < self.mn)'], {}), '(temp_vel < self.mn)\n', (9560, 9580), True, 'import numpy as np\n'), ((9588, 9614), 'numpy.sum', 'np.sum', (['(temp_vel > self.mx)'], {}), '(temp_vel > self.mx)\n', (9594, 9614), True, 'import numpy as np\n'), ((12490, 12541), 'numpy.sum', 'np.sum', (['((self.mode_location - s.mode_location) ** 2)'], {}), '((self.mode_location - s.mode_location) ** 2)\n', (12496, 12541), True, 'import numpy as np\n'), ((13215, 13235), 'numpy.random.rand', 'np.random.rand', (['l', '(1)'], {}), '(l, 1)\n', (13229, 13235), True, 'import numpy as np\n'), ((3687, 3744), 'pynmmso.Nmmso.uniform_sphere_points', 'nmmso.Nmmso.uniform_sphere_points', (['(1)', 'self.num_dimensions'], {}), '(1, self.num_dimensions)\n', (3720, 3744), True, 'import pynmmso as nmmso\n'), ((5949, 5975), 'numpy.sum', 'np.sum', (['(temp_vel < self.mn)'], {}), '(temp_vel < self.mn)\n', (5955, 5975), True, 'import numpy as np\n'), ((5983, 6009), 'numpy.sum', 'np.sum', (['(temp_vel > self.mx)'], {}), '(temp_vel > self.mx)\n', (5989, 6009), True, 'import numpy as np\n'), ((4652, 4728), 'numpy.asarray', 'np.asarray', (['(self.history_locations[self.shifted_loc, :] + temp_vel > self.mx)'], {}), '(self.history_locations[self.shifted_loc, :] + temp_vel > self.mx)\n', (4662, 4728), True, 'import numpy as np\n'), ((4827, 4903), 'numpy.asarray', 'np.asarray', (['(self.history_locations[self.shifted_loc, :] + temp_vel < self.mn)'], {}), '(self.history_locations[self.shifted_loc, :] + temp_vel < self.mn)\n', (4837, 4903), True, 'import numpy as np\n'), ((9689, 9746), 'pynmmso.Nmmso.uniform_sphere_points', 'nmmso.Nmmso.uniform_sphere_points', (['(1)', 'self.num_dimensions'], {}), '(1, self.num_dimensions)\n', (9722, 9746), True, 'import pynmmso as nmmso\n'), ((9874, 9909), 'numpy.random.rand', 'np.random.rand', (['self.num_dimensions'], {}), '(self.num_dimensions)\n', (9888, 9909), True, 'import numpy as np\n'), ((5045, 5071), 'numpy.random.rand', 'np.random.rand', (['i_max.size'], {}), '(i_max.size)\n', (5059, 5071), True, 'import numpy as np\n'), ((5282, 5308), 'numpy.random.rand', 'np.random.rand', (['i_min.size'], {}), '(i_min.size)\n', (5296, 5308), True, 'import numpy as np\n'), ((6107, 6164), 'pynmmso.Nmmso.uniform_sphere_points', 'nmmso.Nmmso.uniform_sphere_points', (['(1)', 'self.num_dimensions'], {}), '(1, self.num_dimensions)\n', (6140, 6164), True, 'import pynmmso as nmmso\n'), ((6306, 6341), 'numpy.random.rand', 'np.random.rand', (['self.num_dimensions'], {}), '(self.num_dimensions)\n', (6320, 6341), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
import copy
import pytest
import itertools
import numpy as np
@pytest.mark.parametrize("op_type, pos, val", itertools.product(['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub'], ['x', 'y'], [0, 1, [0, 0, 0, 0], [1, 1, 1, 1]]))
def test_elementwise_elimination(op_type, pos, val):
if 'div' in op_type and np.prod(val) == 0:
return
if 'pow' in op_type and (val != 0 or val != 1):
return
test_op = getattr(mb, op_type)
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
if pos == "x":
r1 = test_op(x=val, y=x)
else:
r1 = test_op(x=x, y=val)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = [op_type, "relu"]
new_program = original_program
if op_type in {'add'}:
if val == 0 or val == [0, 0, 0, 0]:
new_program = ["relu"]
elif op_type in {'mul'}:
if val == 1 or val == [1, 1, 1, 1]:
new_program = ["relu"]
elif op_type in {'pow', 'real_div', 'floor_div'}:
if pos == 'y' and (val == 1 or val == [1, 1, 1, 1]):
new_program = ["relu"]
elif op_type in {'sub'}:
if pos == 'y' and (val == 0 or val == [0, 0, 0, 0]):
new_program = ["relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == new_program
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_elementwise_broadcast():
@mb.program(input_specs=[mb.TensorSpec(shape=[4])])
def prog(x):
r1 = mb.add(x=x, y=[[0, 0, 0, 0], [0, 0, 0, 0]])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = ["add", "relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == original_program
assert_model_is_valid(
prog,
{"x": [4]},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_reshape_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.reshape(x=x, shape=[1, 8])
r2 = mb.reshape(x=r1, shape=[1, 8])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"]
assert get_op_types_in_program(prog) == ["reshape", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (1, 8)},
)
def test_oneway_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, num_splits=1, axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_full_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, split_sizes=[4], axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_to_end_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebyindex_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
@pytest.mark.parametrize("begin_mask, end_mask",
itertools.product(itertools.product([True, False],[True, False]),
itertools.product([True, False],[True, False])))
def test_slicebyindex_mask_elimination(begin_mask, end_mask):
@mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))])
def prog(x):
begin = [1, 1]
end = [1, 1]
for i in range(2):
if not begin_mask[i]:
begin[i] = 0
if not end_mask[i]:
end[i] = 4
r1 = mb.slice_by_index(x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (4, 4)},
expected_output_shapes={block.outputs[0].name: (4, 4)},
)
def test_pad_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[0, 0, 0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_pad():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[4, 4, 2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["pad", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (10, 8)},
)
def test_tile_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[1, 1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_tile():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["tile", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (4, 8)},
)
def test_upsample_nearest_neighbor_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_nearest_neighbor(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_nearest_neighbor", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_upsample_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_bilinear(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_resize_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.resize_bilinear(x=x, target_size_height=2, target_size_width=4)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["resize_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_crop_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.crop(x=x, crop_height=[0, 0], crop_width=[0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["crop", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_linear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.linear_activation(x=x, alpha=1.0, beta=0.0)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["linear_activation", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
|
[
"numpy.prod",
"coremltools.converters.mil.testing_utils.apply_pass_and_basic_check",
"coremltools.converters.mil.testing_utils.get_op_types_in_program",
"coremltools.converters.mil.mil.Builder.TensorSpec",
"coremltools.converters.mil.mil.Builder.tile",
"itertools.product",
"coremltools.converters.mil.mil.Builder.slice_by_index",
"coremltools.converters.mil.mil.Builder.relu",
"coremltools.converters.mil.mil.Builder.resize_bilinear",
"coremltools.converters.mil.mil.Builder.pad",
"coremltools.converters.mil.mil.Builder.split",
"coremltools.converters.mil.mil.Builder.reshape",
"coremltools.converters.mil.mil.Builder.add",
"coremltools.converters.mil.testing_utils.assert_model_is_valid",
"coremltools.converters.mil.mil.Builder.crop",
"coremltools.converters.mil.mil.Builder.upsample_bilinear",
"coremltools.converters.mil.mil.Builder.upsample_nearest_neighbor",
"coremltools.converters.mil.mil.Builder.linear_activation",
"coremltools.converters.mil.mil.Builder.slice_by_size"
] |
[((1203, 1263), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (1229, 1263), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((1982, 2085), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (2003, 2085), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((610, 735), 'itertools.product', 'itertools.product', (["['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub']", "['x', 'y']", '[0, 1, [0, 0, 0, 0], [1, 1, 1, 1]]'], {}), "(['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub'], [\n 'x', 'y'], [0, 1, [0, 0, 0, 0], [1, 1, 1, 1]])\n", (627, 735), False, 'import itertools\n'), ((2343, 2403), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (2369, 2403), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((2589, 2689), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': [4]}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': [4]}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (2610, 2689), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((2977, 3037), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (3003, 3037), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((3200, 3303), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (1, 8)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (1, 8)})\n", (3221, 3303), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((3561, 3621), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (3587, 3621), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((3760, 3863), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (3781, 3863), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4122, 4182), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (4148, 4182), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4321, 4424), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (4342, 4424), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4698, 4758), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (4724, 4758), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4905, 5008), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (4926, 5008), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((5286, 5346), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (5312, 5346), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((5493, 5596), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (5514, 5596), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((5871, 5931), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (5897, 5931), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((6079, 6182), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (6100, 6182), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((6940, 7000), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (6966, 7000), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((7148, 7251), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (4, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (4, 4)}'}), "(prog, {'x': (4, 4)}, expected_output_shapes={block.\n outputs[0].name: (4, 4)})\n", (7169, 7251), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((7493, 7553), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (7519, 7553), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((7690, 7793), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (7711, 7793), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8028, 8088), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (8054, 8088), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8232, 8336), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (10, 8)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (10, 8)})\n", (8253, 8336), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8575, 8635), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (8601, 8635), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8773, 8876), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (8794, 8876), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9108, 9168), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (9134, 9168), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9314, 9417), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (4, 8)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (4, 8)})\n", (9335, 9417), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9688, 9748), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (9714, 9748), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9907, 10016), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (3, 2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (3, 2, 4)}'}), "(prog, {'x': (3, 2, 4)}, expected_output_shapes={block\n .outputs[0].name: (3, 2, 4)})\n", (9928, 10016), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((10271, 10331), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (10297, 10331), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((10482, 10591), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (3, 2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (3, 2, 4)}'}), "(prog, {'x': (3, 2, 4)}, expected_output_shapes={block\n .outputs[0].name: (3, 2, 4)})\n", (10503, 10591), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((10885, 10945), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (10911, 10945), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11094, 11203), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (3, 2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (3, 2, 4)}'}), "(prog, {'x': (3, 2, 4)}, expected_output_shapes={block\n .outputs[0].name: (3, 2, 4)})\n", (11115, 11203), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11471, 11531), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (11497, 11531), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11669, 11778), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (3, 2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (3, 2, 4)}'}), "(prog, {'x': (3, 2, 4)}, expected_output_shapes={block\n .outputs[0].name: (3, 2, 4)})\n", (11690, 11778), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((12040, 12100), 'coremltools.converters.mil.testing_utils.apply_pass_and_basic_check', 'apply_pass_and_basic_check', (['prog', '"""common::noop_elimination"""'], {}), "(prog, 'common::noop_elimination')\n", (12066, 12100), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((12251, 12354), 'coremltools.converters.mil.testing_utils.assert_model_is_valid', 'assert_model_is_valid', (['prog', "{'x': (2, 4)}"], {'expected_output_shapes': '{block.outputs[0].name: (2, 4)}'}), "(prog, {'x': (2, 4)}, expected_output_shapes={block.\n outputs[0].name: (2, 4)})\n", (12272, 12354), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((1153, 1166), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (1160, 1166), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((1867, 1901), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (1890, 1901), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((1933, 1962), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (1956, 1962), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((2234, 2277), 'coremltools.converters.mil.mil.Builder.add', 'mb.add', ([], {'x': 'x', 'y': '[[0, 0, 0, 0], [0, 0, 0, 0]]'}), '(x=x, y=[[0, 0, 0, 0], [0, 0, 0, 0]])\n', (2240, 2277), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((2293, 2306), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (2300, 2306), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((2469, 2503), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (2492, 2503), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((2535, 2564), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (2558, 2564), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((2838, 2867), 'coremltools.converters.mil.mil.Builder.reshape', 'mb.reshape', ([], {'x': 'x', 'shape': '[1, 8]'}), '(x=x, shape=[1, 8])\n', (2848, 2867), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((2881, 2911), 'coremltools.converters.mil.mil.Builder.reshape', 'mb.reshape', ([], {'x': 'r1', 'shape': '[1, 8]'}), '(x=r1, shape=[1, 8])\n', (2891, 2911), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((2927, 2940), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (2934, 2940), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((3063, 3097), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (3086, 3097), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((3143, 3172), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (3166, 3172), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((3458, 3494), 'coremltools.converters.mil.mil.Builder.split', 'mb.split', ([], {'x': 'x', 'num_splits': '(1)', 'axis': '(-1)'}), '(x=x, num_splits=1, axis=-1)\n', (3466, 3494), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((3511, 3524), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (3518, 3524), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((3647, 3681), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (3670, 3681), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((3714, 3743), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (3737, 3743), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4016, 4055), 'coremltools.converters.mil.mil.Builder.split', 'mb.split', ([], {'x': 'x', 'split_sizes': '[4]', 'axis': '(-1)'}), '(x=x, split_sizes=[4], axis=-1)\n', (4024, 4055), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((4072, 4085), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (4079, 4085), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((4208, 4242), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (4231, 4242), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4275, 4304), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (4298, 4304), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4583, 4631), 'coremltools.converters.mil.mil.Builder.slice_by_size', 'mb.slice_by_size', ([], {'x': 'x', 'begin': '[0, 0]', 'size': '[2, 4]'}), '(x=x, begin=[0, 0], size=[2, 4])\n', (4599, 4631), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((4648, 4661), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (4655, 4661), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((4784, 4818), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (4807, 4818), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((4859, 4888), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (4882, 4888), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((5169, 5219), 'coremltools.converters.mil.mil.Builder.slice_by_size', 'mb.slice_by_size', ([], {'x': 'x', 'begin': '[0, 0]', 'size': '[-1, -1]'}), '(x=x, begin=[0, 0], size=[-1, -1])\n', (5185, 5219), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((5236, 5249), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (5243, 5249), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((5372, 5406), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (5395, 5406), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((5447, 5476), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (5470, 5476), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((5756, 5804), 'coremltools.converters.mil.mil.Builder.slice_by_index', 'mb.slice_by_index', ([], {'x': 'x', 'begin': '[0, 0]', 'end': '[2, 4]'}), '(x=x, begin=[0, 0], end=[2, 4])\n', (5773, 5804), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((5821, 5834), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (5828, 5834), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((5957, 5991), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (5980, 5991), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((6033, 6062), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (6056, 6062), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((6787, 6877), 'coremltools.converters.mil.mil.Builder.slice_by_index', 'mb.slice_by_index', ([], {'x': 'x', 'begin': 'begin', 'end': 'end', 'begin_mask': 'begin_mask', 'end_mask': 'end_mask'}), '(x=x, begin=begin, end=end, begin_mask=begin_mask,\n end_mask=end_mask)\n', (6804, 6877), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((6890, 6903), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (6897, 6903), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((7026, 7060), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (7049, 7060), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((7102, 7131), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (7125, 7131), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((6303, 6350), 'itertools.product', 'itertools.product', (['[True, False]', '[True, False]'], {}), '([True, False], [True, False])\n', (6320, 6350), False, 'import itertools\n'), ((6394, 6441), 'itertools.product', 'itertools.product', (['[True, False]', '[True, False]'], {}), '([True, False], [True, False])\n', (6411, 6441), False, 'import itertools\n'), ((7397, 7426), 'coremltools.converters.mil.mil.Builder.pad', 'mb.pad', ([], {'x': 'x', 'pad': '[0, 0, 0, 0]'}), '(x=x, pad=[0, 0, 0, 0])\n', (7403, 7426), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((7443, 7456), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (7450, 7456), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((7579, 7613), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (7602, 7613), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((7644, 7673), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (7667, 7673), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((7932, 7961), 'coremltools.converters.mil.mil.Builder.pad', 'mb.pad', ([], {'x': 'x', 'pad': '[4, 4, 2, 2]'}), '(x=x, pad=[4, 4, 2, 2])\n', (7938, 7961), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((7978, 7991), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (7985, 7991), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((8114, 8148), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (8137, 8148), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8179, 8208), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (8202, 8208), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8483, 8508), 'coremltools.converters.mil.mil.Builder.tile', 'mb.tile', ([], {'x': 'x', 'reps': '[1, 1]'}), '(x=x, reps=[1, 1])\n', (8490, 8508), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((8525, 8538), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (8532, 8538), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((8661, 8695), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (8684, 8695), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((8727, 8756), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (8750, 8756), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9016, 9041), 'coremltools.converters.mil.mil.Builder.tile', 'mb.tile', ([], {'x': 'x', 'reps': '[2, 2]'}), '(x=x, reps=[2, 2])\n', (9023, 9041), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((9058, 9071), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (9065, 9071), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((9194, 9228), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (9217, 9228), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9260, 9289), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (9283, 9289), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9588, 9621), 'coremltools.converters.mil.mil.Builder.upsample_nearest_neighbor', 'mb.upsample_nearest_neighbor', ([], {'x': 'x'}), '(x=x)\n', (9616, 9621), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((9638, 9651), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (9645, 9651), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((9774, 9808), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (9797, 9808), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((9861, 9890), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (9884, 9890), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((10179, 10204), 'coremltools.converters.mil.mil.Builder.upsample_bilinear', 'mb.upsample_bilinear', ([], {'x': 'x'}), '(x=x)\n', (10199, 10204), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((10221, 10234), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (10228, 10234), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((10357, 10391), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (10380, 10391), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((10436, 10465), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (10459, 10465), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((10752, 10818), 'coremltools.converters.mil.mil.Builder.resize_bilinear', 'mb.resize_bilinear', ([], {'x': 'x', 'target_size_height': '(2)', 'target_size_width': '(4)'}), '(x=x, target_size_height=2, target_size_width=4)\n', (10770, 10818), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((10835, 10848), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (10842, 10848), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((10971, 11005), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (10994, 11005), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11048, 11077), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (11071, 11077), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11353, 11404), 'coremltools.converters.mil.mil.Builder.crop', 'mb.crop', ([], {'x': 'x', 'crop_height': '[0, 0]', 'crop_width': '[0, 0]'}), '(x=x, crop_height=[0, 0], crop_width=[0, 0])\n', (11360, 11404), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((11421, 11434), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (11428, 11434), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((11557, 11591), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (11580, 11591), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11623, 11652), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (11646, 11652), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((11927, 11973), 'coremltools.converters.mil.mil.Builder.linear_activation', 'mb.linear_activation', ([], {'x': 'x', 'alpha': '(1.0)', 'beta': '(0.0)'}), '(x=x, alpha=1.0, beta=0.0)\n', (11947, 11973), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((11990, 12003), 'coremltools.converters.mil.mil.Builder.relu', 'mb.relu', ([], {'x': 'r1'}), '(x=r1)\n', (11997, 12003), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((12126, 12160), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prev_prog'], {}), '(prev_prog)\n', (12149, 12160), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((12205, 12234), 'coremltools.converters.mil.testing_utils.get_op_types_in_program', 'get_op_types_in_program', (['prog'], {}), '(prog)\n', (12228, 12234), False, 'from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program, apply_pass_and_basic_check\n'), ((813, 825), 'numpy.prod', 'np.prod', (['val'], {}), '(val)\n', (820, 825), True, 'import numpy as np\n'), ((980, 1007), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (993, 1007), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((2177, 2201), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '[4]'}), '(shape=[4])\n', (2190, 2201), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((2778, 2805), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (2791, 2805), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((3398, 3425), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (3411, 3425), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((3956, 3983), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (3969, 3983), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((4523, 4550), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (4536, 4550), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((5109, 5136), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (5122, 5136), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((5696, 5723), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (5709, 5723), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((6534, 6561), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(4, 4)'}), '(shape=(4, 4))\n', (6547, 6561), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((7337, 7364), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (7350, 7364), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((7872, 7899), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (7885, 7899), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((8423, 8450), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (8436, 8450), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((8956, 8983), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (8969, 8983), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((9525, 9555), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(3, 2, 4)'}), '(shape=(3, 2, 4))\n', (9538, 9555), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((10116, 10146), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(3, 2, 4)'}), '(shape=(3, 2, 4))\n', (10129, 10146), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((10689, 10719), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(3, 2, 4)'}), '(shape=(3, 2, 4))\n', (10702, 10719), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((11290, 11320), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(3, 2, 4)'}), '(shape=(3, 2, 4))\n', (11303, 11320), True, 'from coremltools.converters.mil.mil import Builder as mb\n'), ((11867, 11894), 'coremltools.converters.mil.mil.Builder.TensorSpec', 'mb.TensorSpec', ([], {'shape': '(2, 4)'}), '(shape=(2, 4))\n', (11880, 11894), True, 'from coremltools.converters.mil.mil import Builder as mb\n')]
|
from blaze.expr import symbol
import numpy as np
from datashape import dshape, isscalar
def test_array_dshape():
x = symbol('x', '5 * 3 * float32')
assert x.shape == (5, 3)
assert x.schema == dshape('float32')
assert len(x) == 5
assert x.ndim == 2
def test_element():
x = symbol('x', '5 * 3 * float32')
assert isscalar(x[1, 2].dshape)
assert x[1, 2].dshape == dshape('float32')
assert str(x[1, 2]) == 'x[1, 2]'
x = symbol('x', '5 * float32')
assert isscalar(x[3].dshape)
def test_slice():
x = symbol('x', '5 * 3 * {name: string, amount: float32}')
assert x[2:, 0].dshape == dshape('3 * {name: string, amount: float32}')
assert x[2:].dshape == x[2:, :].dshape
# Make sure that these are hashable
hash(x[:2])
hash(x[0, :2])
assert str(x[1]) == 'x[1]'
assert str(x[:2]) == 'x[:2]'
assert str(x[0, :2]) == 'x[0, :2]'
assert str(x[1:4:2, :2]) == 'x[1:4:2, :2]'
def test_negative_slice():
x = symbol('x', '10 * 10 * int32')
assert x[:5, -3:].shape == (5, 3)
def test_None_slice():
x = symbol('x', '10 * 10 * int32')
assert x[:5, None, -3:].shape == (5, 1, 3)
def test_list_slice():
x = symbol('x', '10 * 10 * int32')
assert x[[1, 2, 3], [4, 5]].shape == (3, 2)
def test_list_slice_string():
x = symbol('x', '10 * 10 * int32')
assert str(x[[1, 2, 3]]) == "x[[1, 2, 3]]"
def test_slice_with_boolean_list():
x = symbol('x', '5 * int32')
expr = x[[True, False, False, True, False]]
assert expr.index == ([0, 3],)
def test_slice_with_numpy_array():
x = symbol('x', '2 * int32')
assert x[np.array([True, False])].isidentical(x[[True, False]])
|
[
"numpy.array",
"datashape.isscalar",
"datashape.dshape",
"blaze.expr.symbol"
] |
[((123, 153), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""5 * 3 * float32"""'], {}), "('x', '5 * 3 * float32')\n", (129, 153), False, 'from blaze.expr import symbol\n'), ((300, 330), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""5 * 3 * float32"""'], {}), "('x', '5 * 3 * float32')\n", (306, 330), False, 'from blaze.expr import symbol\n'), ((342, 366), 'datashape.isscalar', 'isscalar', (['x[1, 2].dshape'], {}), '(x[1, 2].dshape)\n', (350, 366), False, 'from datashape import dshape, isscalar\n'), ((461, 487), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""5 * float32"""'], {}), "('x', '5 * float32')\n", (467, 487), False, 'from blaze.expr import symbol\n'), ((499, 520), 'datashape.isscalar', 'isscalar', (['x[3].dshape'], {}), '(x[3].dshape)\n', (507, 520), False, 'from datashape import dshape, isscalar\n'), ((549, 603), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""5 * 3 * {name: string, amount: float32}"""'], {}), "('x', '5 * 3 * {name: string, amount: float32}')\n", (555, 603), False, 'from blaze.expr import symbol\n'), ((988, 1018), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""10 * 10 * int32"""'], {}), "('x', '10 * 10 * int32')\n", (994, 1018), False, 'from blaze.expr import symbol\n'), ((1090, 1120), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""10 * 10 * int32"""'], {}), "('x', '10 * 10 * int32')\n", (1096, 1120), False, 'from blaze.expr import symbol\n'), ((1201, 1231), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""10 * 10 * int32"""'], {}), "('x', '10 * 10 * int32')\n", (1207, 1231), False, 'from blaze.expr import symbol\n'), ((1320, 1350), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""10 * 10 * int32"""'], {}), "('x', '10 * 10 * int32')\n", (1326, 1350), False, 'from blaze.expr import symbol\n'), ((1444, 1468), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""5 * int32"""'], {}), "('x', '5 * int32')\n", (1450, 1468), False, 'from blaze.expr import symbol\n'), ((1597, 1621), 'blaze.expr.symbol', 'symbol', (['"""x"""', '"""2 * int32"""'], {}), "('x', '2 * int32')\n", (1603, 1621), False, 'from blaze.expr import symbol\n'), ((206, 223), 'datashape.dshape', 'dshape', (['"""float32"""'], {}), "('float32')\n", (212, 223), False, 'from datashape import dshape, isscalar\n'), ((396, 413), 'datashape.dshape', 'dshape', (['"""float32"""'], {}), "('float32')\n", (402, 413), False, 'from datashape import dshape, isscalar\n'), ((634, 679), 'datashape.dshape', 'dshape', (['"""3 * {name: string, amount: float32}"""'], {}), "('3 * {name: string, amount: float32}')\n", (640, 679), False, 'from datashape import dshape, isscalar\n'), ((1635, 1658), 'numpy.array', 'np.array', (['[True, False]'], {}), '([True, False])\n', (1643, 1658), True, 'import numpy as np\n')]
|
# Copyright 2021 MosaicML. All Rights Reserved.
import os
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.utils.data
import yahp as hp
from PIL import Image
from torchvision import transforms
from torchvision.datasets import ImageFolder
from composer.core.types import Batch, Tensor
from composer.datasets.hparams import DataloaderSpec, DatasetHparams
class PreprocessingFn:
def __init__(self) -> None:
self.mean: Optional[Tensor] = None
self.std: Optional[Tensor] = None
def __call__(self, batch: Batch):
xs, ys = batch
assert isinstance(xs, Tensor)
assert isinstance(ys, Tensor)
device = xs.device
if self.mean is None:
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255], device=device)
self.mean = self.mean.view(1, 3, 1, 1)
if self.std is None:
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255], device=device)
self.std = self.std.view(1, 3, 1, 1)
xs = xs.float()
xs = xs.sub_(self.mean).div_(self.std)
return xs, ys
def fast_collate(batch: List[Tuple[Image.Image, Tensor]], memory_format: torch.memory_format = torch.contiguous_format):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros((len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if nump_array.ndim < 3:
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2).copy()
if nump_array.shape[0] != 3:
assert nump_array.shape[0] == 1, "unexpected shape"
nump_array = np.resize(nump_array, (3, h, w))
assert tuple(tensor.shape)[1:] == nump_array.shape, "shape mistmatch"
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
@dataclass
class ImagenetDatasetHparams(DatasetHparams):
"""Defines an instance of the ImageNet dataset for image classification.
Parameters:
resize_size (int): The resize size to use.
crop size (int): The crop size to use.
is_train (bool): Whether to load the training or validation dataset.
datadir (str): Data directory to use.
drop_last (bool): Whether to drop the last samples for the last batch.
shuffle (bool): Whether to shuffle the dataset for each epoch.
"""
resize_size: int = hp.required("resize size")
crop_size: int = hp.required("crop size")
is_train: bool = hp.required("whether to load the training or validation dataset")
datadir: str = hp.required("data directory")
drop_last: bool = hp.optional("Whether to drop the last samples for the last batch", default=True)
shuffle: bool = hp.optional("Whether to shuffle the dataset for each epoch", default=True)
def initialize_object(self) -> DataloaderSpec:
datadir = self.datadir
is_train = self.is_train
if is_train:
# include fixed-size resize before RandomResizedCrop in training only
# if requested (by specifying a size > 0)
train_resize_size = self.resize_size
train_transforms: List[torch.nn.Module] = []
if train_resize_size > 0:
train_transforms.append(transforms.Resize(train_resize_size))
# always include RandomResizedCrop and RandomHorizontalFlip
train_transforms += [
transforms.RandomResizedCrop(self.crop_size, scale=(0.08, 1.0), ratio=(0.75, 4.0 / 3.0)),
transforms.RandomHorizontalFlip()
]
transformation = transforms.Compose(train_transforms)
else:
transformation = transforms.Compose([
transforms.Resize(self.resize_size),
transforms.CenterCrop(self.crop_size),
])
split = "train" if is_train else "val"
return DataloaderSpec(
dataset=ImageFolder(os.path.join(datadir, split), transformation),
drop_last=self.drop_last,
collate_fn=fast_collate,
shuffle=self.shuffle,
prefetch_fn=PreprocessingFn(),
)
|
[
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomResizedCrop",
"yahp.required",
"numpy.asarray",
"numpy.rollaxis",
"torch.from_numpy",
"torchvision.transforms.RandomHorizontalFlip",
"yahp.optional",
"torch.tensor",
"numpy.resize",
"os.path.join",
"numpy.expand_dims",
"torchvision.transforms.Resize",
"torchvision.transforms.Compose"
] |
[((1348, 1412), 'torch.tensor', 'torch.tensor', (['[target[1] for target in batch]'], {'dtype': 'torch.int64'}), '([target[1] for target in batch], dtype=torch.int64)\n', (1360, 1412), False, 'import torch\n'), ((2680, 2706), 'yahp.required', 'hp.required', (['"""resize size"""'], {}), "('resize size')\n", (2691, 2706), True, 'import yahp as hp\n'), ((2728, 2752), 'yahp.required', 'hp.required', (['"""crop size"""'], {}), "('crop size')\n", (2739, 2752), True, 'import yahp as hp\n'), ((2774, 2839), 'yahp.required', 'hp.required', (['"""whether to load the training or validation dataset"""'], {}), "('whether to load the training or validation dataset')\n", (2785, 2839), True, 'import yahp as hp\n'), ((2859, 2888), 'yahp.required', 'hp.required', (['"""data directory"""'], {}), "('data directory')\n", (2870, 2888), True, 'import yahp as hp\n'), ((2911, 2996), 'yahp.optional', 'hp.optional', (['"""Whether to drop the last samples for the last batch"""'], {'default': '(True)'}), "('Whether to drop the last samples for the last batch', default=True\n )\n", (2922, 2996), True, 'import yahp as hp\n'), ((3012, 3086), 'yahp.optional', 'hp.optional', (['"""Whether to shuffle the dataset for each epoch"""'], {'default': '(True)'}), "('Whether to shuffle the dataset for each epoch', default=True)\n", (3023, 3086), True, 'import yahp as hp\n'), ((1623, 1654), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1633, 1654), True, 'import numpy as np\n'), ((2063, 2091), 'torch.from_numpy', 'torch.from_numpy', (['nump_array'], {}), '(nump_array)\n', (2079, 2091), False, 'import torch\n'), ((790, 858), 'torch.tensor', 'torch.tensor', (['[0.485 * 255, 0.456 * 255, 0.406 * 255]'], {'device': 'device'}), '([0.485 * 255, 0.456 * 255, 0.406 * 255], device=device)\n', (802, 858), False, 'import torch\n'), ((962, 1030), 'torch.tensor', 'torch.tensor', (['[0.229 * 255, 0.224 * 255, 0.225 * 255]'], {'device': 'device'}), '([0.229 * 255, 0.224 * 255, 0.225 * 255], device=device)\n', (974, 1030), False, 'import torch\n'), ((1712, 1747), 'numpy.expand_dims', 'np.expand_dims', (['nump_array'], {'axis': '(-1)'}), '(nump_array, axis=-1)\n', (1726, 1747), True, 'import numpy as np\n'), ((1930, 1962), 'numpy.resize', 'np.resize', (['nump_array', '(3, h, w)'], {}), '(nump_array, (3, h, w))\n', (1939, 1962), True, 'import numpy as np\n'), ((3888, 3924), 'torchvision.transforms.Compose', 'transforms.Compose', (['train_transforms'], {}), '(train_transforms)\n', (3906, 3924), False, 'from torchvision import transforms\n'), ((1770, 1796), 'numpy.rollaxis', 'np.rollaxis', (['nump_array', '(2)'], {}), '(nump_array, 2)\n', (1781, 1796), True, 'import numpy as np\n'), ((3705, 3797), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['self.crop_size'], {'scale': '(0.08, 1.0)', 'ratio': '(0.75, 4.0 / 3.0)'}), '(self.crop_size, scale=(0.08, 1.0), ratio=(0.75,\n 4.0 / 3.0))\n', (3733, 3797), False, 'from torchvision import transforms\n'), ((3811, 3844), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3842, 3844), False, 'from torchvision import transforms\n'), ((3545, 3581), 'torchvision.transforms.Resize', 'transforms.Resize', (['train_resize_size'], {}), '(train_resize_size)\n', (3562, 3581), False, 'from torchvision import transforms\n'), ((4005, 4040), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.resize_size'], {}), '(self.resize_size)\n', (4022, 4040), False, 'from torchvision import transforms\n'), ((4058, 4095), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['self.crop_size'], {}), '(self.crop_size)\n', (4079, 4095), False, 'from torchvision import transforms\n'), ((4224, 4252), 'os.path.join', 'os.path.join', (['datadir', 'split'], {}), '(datadir, split)\n', (4236, 4252), False, 'import os\n')]
|
"""
Last edited: January 22 2020
@author: <NAME>
# here we provide unit tests of our main functions in robustPipelineSizing
"""
from FINE.expansionModules import robustPipelineSizing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
def test_robustPipelineDesign():
# write tests for function createNetwork()
# check if returned graph has the edges of the input argument:
def test_createNetwork(distances):
graph, newdistances = robustPipelineSizing.createNetwork(distances)
# check that every arc of the graph is in the original distances matrix
# check that every arc of the graph is in the new distances matrix
# check that in the new distance matrix only one direction either (u,v) or (v,u) is contained
for arcIndex in list(graph.edges):
assert (arcIndex in distances.index or (arcIndex[1], arcIndex[0]) in distances.index)
assert (arcIndex in newdistances.index or (arcIndex[1], arcIndex[0]) in newdistances.index)
assert (not (arcIndex in newdistances.index and (arcIndex[1], arcIndex[0]) in newdistances.index))
# check that every arcIndex of the original distance matrix is in the graph
for arcIndex in distances.index:
assert (arcIndex in graph.edges or (arcIndex[1], arcIndex[0]) in graph.edges)
# check that every arcIndex of new distances matrix is in the graph
for arcIndex in newdistances.index:
assert (arcIndex in graph.edges or (arcIndex[1], arcIndex[0]) in graph.edges)
# check lengths of the graph
for arcIndex in nx.get_edge_attributes(graph, 'length').keys():
if arcIndex in newdistances.index:
assert (newdistances[arcIndex] == nx.get_edge_attributes(graph, 'length')[arcIndex])
else:
assert (newdistances[(arcIndex[1], arcIndex[0])] == nx.get_edge_attributes(graph, 'length')[arcIndex])
return graph, newdistances
# we test the function createSteinerTree
def test_createSteinerTree(graph, distances):
stTree, newdistances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
# check if in newDistances only arcs of the spanning tree are contained
for arc in stTree.edges:
# first check that only one direction of the arc is contained
assert (not (arc in newdistances.index and (arc[1], arc[0])))
# check that the arc or its reversered arc is contained
assert (arc in newdistances.index or (arc[1], arc[0]) in newdistances.index)
# check that all arcs, respectively its reversed arc is contained in MinSpannTree
for arc in newdistances.index:
assert (arc in graph.edges or (arc[1], arc[0]) in graph.edges)
# check weights of the graph
for arc in nx.get_edge_attributes(graph, 'length').keys():
if arc in newdistances.index:
assert (newdistances[arc] == nx.get_edge_attributes(graph, 'length')[arc])
else:
assert (newdistances[arc] == nx.get_edge_attributes(graph, 'length')[(arc[1], arc[0])])
return stTree, newdistances
def test_networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress):
G, newdistances, dic_node_minPress, dic_node_maxPress = robustPipelineSizing.networkRefinement(distances,
maxPipeLength,
dic_node_minPress,
dic_node_maxPress)
# check that every arc of the graph is in the distance matrix, respectively its reversed arc
# check that not the arc and its reversed are contained in the distance matrix
for arc in list(G.edges):
assert (arc in newdistances.index or (arc[1], arc[0]) in newdistances.index)
assert (not (arc in newdistances.index and (arc[1], arc[0]) in newdistances.index))
# check that every arc of new distances matrix is in the graph
for arc in newdistances.index:
assert (arc in G.edges or (arc[1], arc[0]) in G.edges)
# check that we have a minimal and maximal pressure for every node of the graph
for node in G.nodes:
assert (node in dic_node_minPress.keys() and node in dic_node_maxPress.keys())
return G, newdistances, dic_node_minPress, dic_node_maxPress
def test_computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity,
dic_nodes_MaxCapacity, specialScenario=True):
dic_scenario_flow = robustPipelineSizing.computeSingleSpecialScenario(graph, distances, entries, exits, startNode,
endNode, dic_nodes_MinCapacity,
dic_nodes_MaxCapacity, specialScenario)
# check if each arc has a flow
for arc in distances.index:
assert (not (arc in dic_scenario_flow.keys() and (arc[1], arc[0]) in dic_scenario_flow.keys()))
assert (arc in dic_scenario_flow.keys() or (arc[1], arc[0]) in dic_scenario_flow.keys())
return dic_scenario_flow
def test_generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress):
dic_robustScenarios, entries, exits \
= robustPipelineSizing.generateRobustScenarios(injectionWithdrawalRates, graph, distances,
dic_node_minPress, dic_node_maxPress)
# we compute optimal values for each nodePair and save them in dictionary
dic_nodePair_optvalue = {}
for nodePair in dic_robustScenarios.keys():
# compute path between this two nodes
shortestPath = nx.shortest_path(graph, nodePair[0], nodePair[1])
obj = 0.0
for i in range(0, len(shortestPath) - 1):
if (shortestPath[i], shortestPath[i + 1]) in list(dic_robustScenarios[nodePair].keys()):
obj = obj + dic_robustScenarios[nodePair][(shortestPath[i], shortestPath[i + 1])]
else:
obj = obj - dic_robustScenarios[nodePair][(shortestPath[i + 1], shortestPath[i])]
dic_nodePair_optvalue[nodePair] = obj
return dic_robustScenarios, entries, exits, dic_nodePair_optvalue
def checkGraphDistanceMatrixCoincide(graph, distances):
# check that for every arc in the graph either the arc or its reversed arc are contained in the distance matrix
for arcIndex in list(graph.edges):
assert (arcIndex in distances.index or (arcIndex[1], arcIndex[0]) in distances.index)
assert (not (arcIndex in distances.index and (arcIndex[1], arcIndex[0]) in distances.index))
# check that for every arc in the distance matrix either the arc or its reversed are contained in the graph
for arcIndex in distances.index:
assert (arcIndex in list(graph.edges) or (arcIndex[1], arcIndex[0]) in list(graph.edges))
assert (not (arcIndex in list(graph.edges) and (arcIndex[1], arcIndex[0]) in list(graph.edges)))
# check lengths of the graph
for arcIndex in nx.get_edge_attributes(graph, 'length').keys():
if arcIndex in distances.index:
assert (distances[arcIndex] == nx.get_edge_attributes(graph, 'length')[arcIndex])
else:
assert (distances[(arcIndex[1], arcIndex[0])] == nx.get_edge_attributes(graph, 'length')[arcIndex])
return
# ######################################################################################################################
# unit test: createNetwork
# create input data and call tests
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
invalidData = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 450.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
invalidDistances = pd.Series(invalidData, index=keys)
# should raise an assertion error because an arc and its reversed arc with different length exist
try:
graph, distances = test_createNetwork(invalidDistances)
except AssertionError as error:
print(error)
# We create the new network
try:
graph, distances = test_createNetwork(distances)
except AssertionError as error:
print(error)
print("Something went wrong in createNetwork; check comment above assertion for error")
# # uncomment this block if you want the graph being plotted
# nx.draw(graph, with_labels=True)
# plt.show()
#######################################################################################################################
# unit test function: createSteinerTree
# create input data for testing and the optimal solution of the minimal spanning tree
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
optimalSolution = {('w2', 'w1'): 1200.0, ('w2', 'w3'): 750.0, ('w4', 'w3'): 500.0, ('w3', 'w5'): 600.0}
# test the minimum spanning tree function
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
# check if spanning tree is optimal
for arc in stTree.edges:
if arc in optimalSolution.keys():
assert (optimalSolution[arc] == nx.get_edge_attributes(stTree, 'length')[arc])
elif (arc[1], arc[0]) in optimalSolution.keys():
assert (optimalSolution[(arc[1], arc[0])] == nx.get_edge_attributes(stTree, 'length')[arc])
else:
print("Something went wrong in computation of minimum spanning tree")
raise ()
# check that the distance matrix and the graph of the graph (spanning tree) coincide
checkGraphDistanceMatrixCoincide(stTree, distances)
# # uncomment the following block if you want the minimal spanning tree to be plotted
# nx.draw(stTree, with_labels=True)
# plt.show()
#######################################################################################################################
# unit test function networkRefinement
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
# optimal solution
dic_node_minPressTruth = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50, 'v1_w2_w1': 47.5, 'v2_w2_w1': 47.5,
'v1_w2_w3': 52.5, 'v1_w3_w5': 55}
dic_node_maxPressTruth = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95, 'v1_w2_w1': 95, 'v2_w2_w1': 95,
'v1_w2_w3': 95, 'v1_w3_w5': 97.5}
nodesTruth = list(dic_node_minPressTruth.keys())
edgesTruth = [('w2', 'v1_w2_w1'), ('v1_w2_w1', 'v2_w2_w1'), ('v2_w2_w1', 'w1'), ('w4', 'w3'), ('w2', 'v1_w2_w3'),
('w3', 'v1_w2_w3'), ('w3', 'v1_w3_w5'), ('v1_w3_w5', 'w5')]
# test function networkRefinement
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
# check solution
assert (sorted(edgesTruth) == sorted(list(graph.edges)))
assert (sorted(nodesTruth) == sorted(list(graph.nodes)))
assert (dic_node_minPress == dic_node_minPressTruth)
assert (dic_node_maxPress == dic_node_maxPressTruth)
# uncomment this block if you want the graph to be plotted
# nx.draw(graph, with_labels=True)
# plt.show()
######################################################################################################################
# unit test function computeSingleSpecialScenario Case: SpecialScenario = true
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
dic_nodes_minCapacity = {'w1': -2, 'w2': 0, 'w3': -2, 'w4': 0, 'w5': 0}
dic_nodes_maxCapacity = {'w1': 0, 'w2': 1, 'w3': 2, 'w4': 0, 'w5': 4}
startNode = 'w1'
endNode = 'w3'
# create optimal solution
entries = ['w1', 'w3']
exits = ['w2', 'w3', 'w5']
optSol_W1_W2 = {('w2', 'v1_w2_w1'): -2.0, ('v1_w2_w1', 'v2_w2_w1'): -2.0, ('v2_w2_w1', 'w1'): -2.0,
('w2', 'v1_w2_w3'): 2.0, ('v1_w2_w3', 'w3'): 2.0}
# test function computeSingleSpecialScenario
dic_scenario_flow_W1_W3 = test_computeSingleSpecialScenario(graph, distances, entries, exits, startNode,
endNode, dic_nodes_minCapacity, dic_nodes_maxCapacity, True)
# check solution: since flow values on arcs not part of the unique path between start and endNode are not unique,
# we only check the unique flow values on the path between start and endNode
for arc in optSol_W1_W2.keys():
assert (dic_scenario_flow_W1_W3[arc] == optSol_W1_W2[arc])
#######################################################################################################################
# unit test function computeSingleSpecialScenario Case: SpecialScenario = false
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
dic_nodes_minCapacity = {'w1': -1, 'w2': 0, 'w3': -1, 'w4': 0, 'w5': 2, 'v1_w2_w1': 0, 'v2_w2_w1': 0,
'v1_w2_w3': 0, 'v1_w3_w5': 0}
dic_nodes_maxCapacity = dic_nodes_minCapacity
startNode = 'w1'
endNode = 'w3'
# create optimal solution
entries = []
exits = []
optSol_W1_W2 = {('w2', 'v1_w2_w1'): -1.0, ('v1_w2_w1', 'v2_w2_w1'): -1.0, ('v2_w2_w1', 'w1'): -1.0,
('w2', 'v1_w2_w3'): 1.0, ('v1_w2_w3', 'w3'): 1.0, ('w3', 'v1_w3_w5'): 2.0, ('w3', 'v1_w3_w5'): 2.0}
# test function computeSingleSpecialScenario
dic_scenario_flow_W1_W3 = test_computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode,
dic_nodes_minCapacity, dic_nodes_maxCapacity, False)
# check solution: since demands are fixed, the solution is unique
for arc in optSol_W1_W2.keys():
assert (dic_scenario_flow_W1_W3[arc] == optSol_W1_W2[arc])
######################################################################################################################
# unit test of function generateRobustScenarios
# create input data and optimal solution (only parts because inner function computeSingleSpecialScenario already tested
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
injectionRates = {'w1': [-2.0, 0.0, -2.0, -2.0], 'w2': [1.0, 0.0, 0.0, 0.0], 'w3': [1.0, -2.0, 2.0, -2.0],
'w4': [0.0, 0.0, 0.0, 0.0], 'w5': [0.0, 2.0, 0.0, 4.0]}
injectionWithdrawal = pd.DataFrame(data=injectionRates)
# optimal solution
entriesTruth = ['w1', 'w3']
exitsTruth = ['w2', 'w3', 'w5']
# optimal value of each scenario
dic_nodePair_optValueTruth = {('w4', 'w3'): 0.0, ('w4', 'w2'): 2.0, ('w4', 'v1_w2_w1'): 2.0, ('w4', 'v2_w2_w1'): 2.0,
('w4', 'w1'): 2.0, ('w4', 'v1_w2_w3'): 1.0, ('w4', 'v1_w3_w5'): 4.0, ('w4', 'w5'): 8.0,
('w3', 'w4'): 0.0, ('w3', 'w2'): 2.0, ('w3', 'v1_w2_w1'): 2.0, ('w3', 'v2_w2_w1'): 2.0,
('w3', 'w1'): 2.0, ('w3', 'v1_w2_w3'): 1.0, ('w3', 'v1_w3_w5'): 4.0, ('w3', 'w5'): 8.0,
('w2', 'w4'): 4.0, ('w2', 'w3'): 4.0, ('w2', 'v1_w2_w1'): 0.0, ('w2', 'v2_w2_w1'): 0.0,
('w2', 'w1'): 0.0, ('w2', 'v1_w2_w3'): 2.0, ('w2', 'v1_w3_w5'): 8.0, ('w2', 'w5'): 12.0,
('v1_w2_w1', 'w4'): 6.0, ('v1_w2_w1', 'w3'): 6.0, ('v1_w2_w1', 'w2'): 2.0,
('v1_w2_w1', 'v2_w2_w1'): 0.0, ('v1_w2_w1', 'w1'): 0.0, ('v1_w2_w1', 'v1_w2_w3'): 4.0,
('v1_w2_w1', 'v1_w3_w5'): 10.0, ('v1_w2_w1', 'w5'): 14.0, ('v2_w2_w1', 'w4'): 8.0,
('v2_w2_w1', 'w3'): 8.0, ('v2_w2_w1', 'w2'): 4.0, ('v2_w2_w1', 'v1_w2_w1'): 2.0,
('v2_w2_w1', 'w1'): 0.0, ('v2_w2_w1', 'v1_w2_w3'): 6.0, ('v2_w2_w1', 'v1_w3_w5'): 12.0,
('v2_w2_w1', 'w5'): 16.0, ('w1', 'w4'): 10.0, ('w1', 'w3'): 10.0, ('w1', 'w2'): 6.0,
('w1', 'v1_w2_w1'): 4.0, ('w1', 'v2_w2_w1'): 2.0, ('w1', 'v1_w2_w3'): 8.0,
('w1', 'v1_w3_w5'): 14.0, ('w1', 'w5'): 18.0, ('v1_w2_w3', 'w4'): 2.0,
('v1_w2_w3', 'w3'): 2.0, ('v1_w2_w3', 'w2'): 1.0, ('v1_w2_w3', 'v1_w2_w1'): 1.0,
('v1_w2_w3', 'v2_w2_w1'): 1.0, ('v1_w2_w3', 'w1'): 1.0, ('v1_w2_w3', 'v1_w3_w5'): 6.0,
('v1_w2_w3', 'w5'): 10.0, ('v1_w3_w5', 'w4'): 0.0, ('v1_w3_w5', 'w3'): 0.0,
('v1_w3_w5', 'w2'): 2.0, ('v1_w3_w5', 'v1_w2_w1'): 2.0, ('v1_w3_w5', 'v2_w2_w1'): 2.0,
('v1_w3_w5', 'w1'): 2.0, ('v1_w3_w5', 'v1_w2_w3'): 1.0, ('v1_w3_w5', 'w5'): 4.0,
('w5', 'w4'): 0.0, ('w5', 'w3'): 0.0, ('w5', 'w2'): 2.0, ('w5', 'v1_w2_w1'): 2.0,
('w5', 'v2_w2_w1'): 2.0, ('w5', 'w1'): 2.0, ('w5', 'v1_w2_w3'): 1.0,
('w5', 'v1_w3_w5'): 0.0}
nodes = list(dic_node_minPress.keys())
nodePair = []
for startnode in nodes:
for endnode in nodes:
if startnode is not endnode:
nodePair.append((startnode, endnode))
# test function generateRobustScenarios
dic_robustScenarios, entries, exits, dic_nodePair_optValue \
= test_generateRobustScenarios(injectionWithdrawal, graph, distances, dic_node_minPress, dic_node_maxPress)
assert (sorted(entriesTruth) == sorted(entries))
assert (sorted(exitsTruth) == sorted(exits))
assert (sorted(dic_robustScenarios.keys()) == sorted(nodePair))
assert (dic_nodePair_optValue == dic_nodePair_optValueTruth)
######################################################################################################################
# unit test of function computeLargeMergedDiameters
# create input data and optimal solution
dic_diamToMerge_costs = {0.144: 10, 1.500: 20}
# optimal solution
dic_mergedDiamTruth = {0.190009: 20, 1.979262: 40}
dic_reversed_diamsTruth = {0.190009: 0.144, 1.979262: 1.500}
dic_mergedDiam, dic_reversed_diams = robustPipelineSizing.computeLargeMergedDiameters(dic_diamToMerge_costs, 6)
assert (dic_mergedDiam == dic_mergedDiamTruth)
assert (dic_reversed_diams == dic_reversed_diamsTruth)
#######################################################################################################################
# unit function test of function determinePressureDropCoef
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
diameters = [0.1063, 1.536]
testscenarios = {('w5', 'w1'): {('w4', 'w3'): 0.0, ('w2', 'v1_w2_w1'): 0.0, ('v1_w2_w1', 'v2_w2_w1'): 0.0,
('v2_w2_w1', 'w1'): 0.0, ('w2', 'v1_w2_w3'): -1.0, ('v1_w2_w3', 'w3'): -1.0,
('w3', 'v1_w3_w5'): 0.0, ('v1_w3_w5', 'w5'): 0.0},
('w2', 'v1_w3_w5'): {('w4', 'w3'): 0.0, ('w2', 'v1_w2_w1'): -2.0,
('v1_w2_w1', 'v2_w2_w1'): -2.0, ('v2_w2_w1', 'w1'): -2.0, ('w2', 'v1_w2_w3'): 2.0,
('v1_w2_w3', 'w3'): 2.0, ('w3', 'v1_w3_w5'): 4.0, ('v1_w3_w5', 'w5'): 4.0}}
# optimal solution
dic_pressure_coefTruth = {(0.1063, ('w5', 'w1')): {('w4', 'w3'): 0, ('w2', 'v1_w2_w1'): 0, ('v1_w2_w1', 'v2_w2_w1'): 0,
('v2_w2_w1', 'w1'): 0, ('w2', 'v1_w2_w3'): -131.38307913282281,
('v1_w2_w3', 'w3'): -131.83249054911866, ('w3', 'v1_w3_w5'): 0,
('v1_w3_w5', 'w5'): 0},
(0.1063, ('w2', 'v1_w3_w5')): {('w4', 'w3'): 0, ('w2', 'v1_w2_w1'): -558.2111338732643,
('v1_w2_w1', 'v2_w2_w1'): -558.5140246655361,
('v2_w2_w1', 'w1'): -559.5025149566926,
('w2', 'v1_w2_w3'): 523.2976028753353,
('v1_w2_w3', 'w3'): 525.1425774408543,
('w3', 'v1_w3_w5'): 1677.5126669798137,
('v1_w3_w5', 'w5'): 1674.151293128955},
(1.536, ('w5', 'w1')): {('w4', 'w3'): 0, ('w2', 'v1_w2_w1'): 0, ('v1_w2_w1', 'v2_w2_w1'): 0,
('v2_w2_w1', 'w1'): 0, ('w2', 'v1_w2_w3'): -0.0001700642888961138,
('v1_w2_w3', 'w3'): -0.00017067718965527387, ('w3', 'v1_w3_w5'): 0,
('v1_w3_w5', 'w5'): 0},
(1.536, ('w2', 'v1_w3_w5')): {('w4', 'w3'): 0, ('w2', 'v1_w2_w1'): -0.0006460078899527932,
('v1_w2_w1', 'v2_w2_w1'): -0.0006463741986205677,
('v2_w2_w1', 'w1'): -0.000647570434768236,
('w2', 'v1_w2_w3'): 0.0006056017622910718,
('v1_w2_w3', 'w3'): 0.0006078348401792327,
('w3', 'v1_w3_w5'): 0.0017698676222416114,
('v1_w3_w5', 'w5'): 0.0017661900309253952}}
dic_pressure_coef = robustPipelineSizing.determinePressureDropCoef(testscenarios, distances, dic_node_minPress,
dic_node_maxPress, diameters)
assert (dic_pressure_coef == dic_pressure_coefTruth)
######################################################################################################################
# unit test of function computeTimeStepFlows
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
injectionRates = {'w1': [-20.0, -20.0], 'w2': [10.0, 0.0], 'w3': [10.0, -20.0],
'w4': [0.0, 0.0], 'w5': [0.0, 40.0]}
injectionWithdrawal = pd.DataFrame(data=injectionRates)
entries = []
exits = []
# create optimal solution
dic_timeStep_flowsTruth = {0: {('w4', 'w3'): 0.0, ('w2', 'v1_w2_w1'): -20.0, ('v1_w2_w1', 'v2_w2_w1'): -20.0,
('v2_w2_w1', 'w1'): -20.0, ('w2', 'v1_w2_w3'): 10.0, ('v1_w2_w3', 'w3'): 10.0,
('w3', 'v1_w3_w5'): 0.0, ('v1_w3_w5', 'w5'): 0.0},
1: {('w4', 'w3'): 0.0, ('w2', 'v1_w2_w1'): -20.0, ('v1_w2_w1', 'v2_w2_w1'): -20.0,
('v2_w2_w1', 'w1'): -20.0, ('w2', 'v1_w2_w3'): 20.0, ('v1_w2_w3', 'w3'): 20.0,
('w3', 'v1_w3_w5'): 40.0, ('v1_w3_w5', 'w5'): 40.0}}
dic_timeStep_flows = robustPipelineSizing.computeTimeStepFlows(injectionWithdrawal, distances, graph, entries, exits)
assert (dic_timeStep_flows == dic_timeStep_flowsTruth)
######################################################################################################################
# unit test of function determineOptimalDiscretePipelineSelection case Robust = True
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
injectionRates = {'w1': [-20.0, 0.0, -20.0, -20.0], 'w2': [10.0, 0.0, 0.0, 0.0], 'w3': [10.0, -20.0, 20.0, -20.0],
'w4': [0.0, 0.0, 0.0, 0.0], 'w5': [0.0, 20.0, 0.0, 40.0]}
injectionWithdrawal = pd.DataFrame(data=injectionRates)
dic_robustScenarios, entries, exits = robustPipelineSizing.generateRobustScenarios(injectionWithdrawal, graph,
distances, dic_node_minPress,
dic_node_maxPress)
diameters = [0.1063, 1.536]
# for debugging reason we consider only two special scenarios
dic_robustTestScenarios = {}
dic_robustTestScenarios[('w1', 'w3')] = dic_robustScenarios[('w1', 'w3')]
dic_robustTestScenarios[('w5', 'w1')] = dic_robustScenarios[('w5', 'w1')]
dic_pressure_coef = robustPipelineSizing.determinePressureDropCoef(dic_robustTestScenarios, distances,
dic_node_minPress, dic_node_maxPress, diameters)
dic_diameter_costs = {0.1063: 10, 1.536: 30}
specialScenarionames = [('w1', 'w3'), ('w5', 'w1')]
# optimal solution
dic_arc_diamTruth = {('w4', 'w3'): 0.1063, ('w2', 'v1_w2_w1'): 1.536, ('v1_w2_w1', 'v2_w2_w1'): 1.536,
('v2_w2_w1', 'w1'): 1.536, ('w2', 'v1_w2_w3'): 1.536, ('v1_w2_w3', 'w3'): 1.536,
('w3', 'v1_w3_w5'): 1.536, ('v1_w3_w5', 'w5'): 1.536}
dic_arc_diam, dic_scen_node_press = robustPipelineSizing.determineOptimalDiscretePipelineSelection(graph, distances,
dic_pressure_coef, specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs,
robust=True, verbose=2)
assert (sorted(dic_arc_diam) == sorted(dic_arc_diamTruth))
#######################################################################################################################
# unit test of function determineOptimalDiscretePipelineSelection case Robust = False
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
injectionRates = {'w1': [-5.0, -5.0], 'w2': [3.0, 0.0], 'w3': [2.0, 0.0],
'w4': [0.0, 0.0], 'w5': [0.0, 5.0]}
injectionWithdrawal = pd.DataFrame(data=injectionRates)
entries = []
exits = []
diameters = [0.1063, 1.536]
dic_timeStep_flows = robustPipelineSizing.computeTimeStepFlows(injectionWithdrawal, distances, graph,
entries, exits)
dic_pressure_coef = robustPipelineSizing.determinePressureDropCoef(dic_timeStep_flows, distances,
dic_node_minPress, dic_node_maxPress, diameters)
dic_diameter_costs = {0.1063: 10, 1.536: 30}
specialScenarionames = list(dic_timeStep_flows.keys())
# create optimal solution
dic_arc_diamTruth = {('w4', 'w3'): 0.1063, ('w2', 'v1_w2_w1'): 1.536, ('v1_w2_w1', 'v2_w2_w1'): 1.536,
('v2_w2_w1', 'w1'): 0.1063, ('w2', 'v1_w2_w3'): 1.536, ('v1_w2_w3', 'w3'): 1.536,
('w3', 'v1_w3_w5'): 1.536, ('v1_w3_w5', 'w5'): 0.1063}
dic_arc_diam, dic_scen_node_press = robustPipelineSizing.determineOptimalDiscretePipelineSelection(graph, distances,
dic_pressure_coef, specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs,
robust=False, verbose=2)
assert (sorted(dic_arc_diam) == sorted(dic_arc_diamTruth))
#######################################################################################################################
# unit test of function computePressureStartnodeArc
# create input data and optimal solution
arc = ('w1', 'w2')
pressureEndNode = 50.0
dic_arc_diam = {arc: 0.1063}
distances = pd.Series(1000.0, index=[arc])
dic_scenario_flows = {arc: 5.0}
pressStartnode = robustPipelineSizing.computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows,
dic_arc_diam, distances)
assert (np.round(pressStartnode,3) == 105.59)
# for the reversed arc flow we have the same result because the arc flow direction is handled by computePressureAtNode
dic_scenario_flows = {arc: -5.0}
pressStartnode = robustPipelineSizing.computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows,
dic_arc_diam, distances)
assert (np.round(pressStartnode,3) == 105.59)
dic_scenario_flows = {arc: 0.0}
pressStartnode = robustPipelineSizing.computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows,
dic_arc_diam, distances)
assert (np.round(pressStartnode,3) == np.round(pressureEndNode,3))
#######################################################################################################################
# unit test of function computePressureEndnodeArc
# create input data and optimal solution
arc = ('w1', 'w2')
pressStartNode = 100.0
dic_arc_diam = {arc: 0.1063}
distances = pd.Series(1000.0, index=[arc])
dic_scenario_flows = {arc: 5.0}
pressureEndNode = robustPipelineSizing.computePressureEndnodeArc(arc, pressStartNode, dic_scenario_flows, dic_arc_diam,
distances)
assert (np.round(pressureEndNode,3) == 37.29)
# results should be the same for reversed arc flow since the flow direction is handled by computePressureAtNode
dic_scenario_flows = {arc: -5.0}
pressureEndNode = robustPipelineSizing.computePressureEndnodeArc(arc, pressStartNode, dic_scenario_flows, dic_arc_diam,
distances)
assert (np.round(pressureEndNode,3) == 37.29)
dic_scenario_flows = {arc: 0.0}
pressureEndNode = robustPipelineSizing.computePressureEndnodeArc(arc, pressStartNode, dic_scenario_flows, dic_arc_diam,
distances)
assert (np.round(pressureEndNode,3) == np.round(pressStartNode,3))
#######################################################################################################################
# unit test of function computePressureAtNode
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
injectionRates = {'w1': [-5.0, -5.0], 'w2': [3.0, 0.0], 'w3': [2.0, 0.0],
'w4': [0.0, 0.0], 'w5': [0.0, 5.0]}
injectionWithdrawal = pd.DataFrame(data=injectionRates)
entries = []
exits = []
diameters = [0.1063, 1.536]
dic_timeStep_flows = robustPipelineSizing.computeTimeStepFlows(injectionWithdrawal, distances, graph,
entries, exits)
dic_pressure_coef = robustPipelineSizing.determinePressureDropCoef(dic_timeStep_flows, distances,
dic_node_minPress, dic_node_maxPress, diameters)
dic_diameter_costs = {0.1063: 10, 1.536: 30}
specialScenarionames = list(dic_timeStep_flows.keys())
dic_arc_diam, dic_scen_node_press = robustPipelineSizing.determineOptimalDiscretePipelineSelection(graph, distances,
dic_pressure_coef,
specialScenarionames,
dic_node_minPress,
dic_node_maxPress,
dic_diameter_costs,
robust=False)
validation = True
node = 'w1'
upperPressNode = 'w1'
tmp_violation = 0.0
dic_node_Pressure = {}
for nodeindex in graph.nodes:
dic_node_Pressure[nodeindex] = None
dic_timeStep_flow = dic_timeStep_flows[0]
# optimal solution
dic_nodePressTruth = {'w4': 80.6313893875461, 'w3': 80.6313893875461, 'w2': 80.6313969344965,
'v1_w2_w1': 80.63141923180355, 'v2_w2_w1': 80.63144152910475, 'w1': 100,
'v1_w2_w3': 80.63139316102138, 'v1_w3_w5': 80.6313893875461, 'w5': 80.6313893875461}
validation, tmp_violation = robustPipelineSizing.computePressureAtNode(validation, node, upperPressNode, graph,
dic_arc_diam, distances, dic_timeStep_flow,
dic_node_minPress, dic_node_maxPress,
tmp_violation, dic_node_Pressure)
assert (not validation)
assert (tmp_violation == 0.6313893875460934)
assert (dic_node_Pressure == dic_nodePressTruth)
validation = True
node = 'w4'
upperPressNode = 'w4'
tmp_violation = 0.0
dic_node_Pressure = {}
for nodeindex in graph.nodes:
dic_node_Pressure[nodeindex] = None
dic_timeStep_flow = dic_timeStep_flows[0]
# optimal solution
dic_nodePressTruth = {'w4': 80, 'w3': 80.0, 'w2': 80.00000760352876, 'v1_w2_w1': 80.00003006810435,
'v2_w2_w1': 80.00005253267393, 'w1': 99.4853419184688, 'v1_w2_w3': 80.00000380176446,
'v1_w3_w5': 80.0, 'w5': 80.0}
validation, tmp_violation = robustPipelineSizing.computePressureAtNode(validation, node, upperPressNode, graph,
dic_arc_diam, distances, dic_timeStep_flow,
dic_node_minPress, dic_node_maxPress,
tmp_violation, dic_node_Pressure)
assert (validation)
assert (tmp_violation == 0.0)
assert (dic_node_Pressure == dic_nodePressTruth)
#####################################################################################################################
# unit test of function postprocessing
# create input data and optimal solution
data = [1200.0, 1200.0, 1500.0, 750.0, 1500.0, 500.0, 500.0, 600.0, 2000.0]
keys = [('w1', 'w2'), ('w2', 'w1'), ('w1', 'w5'), ('w2', 'w3'), ('w2', 'w4'), ('w3', 'w4'), ('w4', 'w3'),
('w3', 'w5'), ('w4', 'w5')]
distances = pd.Series(data, index=keys)
graph, distances = robustPipelineSizing.createNetwork(distances)
stTree, distances = robustPipelineSizing.createSteinerTree(graph, distances, list(graph.nodes))
dic_node_maxPress = {'w1': 100, 'w2': 90, 'w3': 100, 'w4': 80, 'w5': 95}
dic_node_minPress = {'w1': 50, 'w2': 45, 'w3': 60, 'w4': 60, 'w5': 50}
maxPipeLength = 500
graph, distances, dic_node_minPress, dic_node_maxPress = test_networkRefinement(distances, maxPipeLength,
dic_node_minPress, dic_node_maxPress)
dic_arc_diam = {('w4', 'w3'): 0.1063, ('w2', 'v1_w2_w1'): 1.536, ('v1_w2_w1', 'v2_w2_w1'): 1.536,
('v2_w2_w1', 'w1'): 1.536, ('w2', 'v1_w2_w3'): 1.536, ('v1_w2_w3', 'w3'): 1.536,
('w3', 'v1_w3_w5'): 1.536, ('v1_w3_w5', 'w5'): 1.536}
testScen = {('w2', 'v1_w3_w5'): {('w4', 'w3'): 0.0, ('w2', 'v1_w2_w1'): -200.0, ('v1_w2_w1', 'v2_w2_w1'): -200.0,
('v2_w2_w1', 'w1'): -200.0, ('w2', 'v1_w2_w3'): 200.0, ('v1_w2_w3', 'w3'): 200.0,
('w3', 'v1_w3_w5'): 400.0, ('v1_w3_w5', 'w5'): 400.0}}
# optimal solution
dic_scenPressTruth = {('w2', 'v1_w3_w5'): {'w4': 80, 'w3': 80.0, 'w2': 80.05723929028916, 'v1_w2_w1': 80.08775096325076,
'v2_w2_w1': 80.11825156968065, 'w1': 80.14874112202088,
'v1_w2_w3': 80.02862451899747, 'v1_w3_w5': 79.9087044007704,
'w5': 79.81730934184435}}
dic_scen_PressLevel, dic_scen_MaxViolPress = robustPipelineSizing.postprocessing(graph, distances, dic_arc_diam,
testScen, dic_node_minPress,
dic_node_maxPress)
assert (dic_scenPressTruth == dic_scen_PressLevel)
assert (dic_scen_MaxViolPress[('w2', 'v1_w3_w5')] == 0.0)
# second testcase in which a violation exists
dic_arc_diam = {('w4', 'w3'): 0.1063, ('w2', 'v1_w2_w1'): 1.536, ('v1_w2_w1', 'v2_w2_w1'): 1.536,
('v2_w2_w1', 'w1'): 0.3063, ('w2', 'v1_w2_w3'): 1.536, ('v1_w2_w3', 'w3'): 1.536,
('w3', 'v1_w3_w5'): 1.536, ('v1_w3_w5', 'w5'): 1.536}
# optimal solution
dic_scenPressTruth = {('w2', 'v1_w3_w5'): {'w4': 80, 'w3': 80.0, 'w2': 80.05723929028916, 'v1_w2_w1': 80.08775096325076,
'v2_w2_w1': 80.11825156968065, 'w1': 168.2934447139534,
'v1_w2_w3': 80.02862451899747, 'v1_w3_w5': 79.9087044007704,
'w5': 79.81730934184435}}
dic_scen_MaxViol = {('w2', 'v1_w3_w5'): 68.29344471395339}
dic_scen_PressLevel, dic_scen_MaxViolPress = robustPipelineSizing.postprocessing(graph, distances, dic_arc_diam,
testScen, dic_node_minPress,
dic_node_maxPress)
assert (dic_scen_PressLevel == dic_scenPressTruth)
assert (dic_scen_MaxViolPress == dic_scen_MaxViol)
print("All Unit tests worked as expected")
|
[
"pandas.Series",
"FINE.expansionModules.robustPipelineSizing.computePressureEndnodeArc",
"FINE.expansionModules.robustPipelineSizing.networkRefinement",
"FINE.expansionModules.robustPipelineSizing.computeTimeStepFlows",
"pandas.DataFrame",
"numpy.round",
"FINE.expansionModules.robustPipelineSizing.determineOptimalDiscretePipelineSelection",
"networkx.get_edge_attributes",
"FINE.expansionModules.robustPipelineSizing.createNetwork",
"FINE.expansionModules.robustPipelineSizing.generateRobustScenarios",
"FINE.expansionModules.robustPipelineSizing.computePressureAtNode",
"networkx.shortest_path",
"FINE.expansionModules.robustPipelineSizing.computeSingleSpecialScenario",
"FINE.expansionModules.robustPipelineSizing.computePressureStartnodeArc",
"FINE.expansionModules.robustPipelineSizing.computeLargeMergedDiameters",
"FINE.expansionModules.robustPipelineSizing.determinePressureDropCoef",
"FINE.expansionModules.robustPipelineSizing.postprocessing"
] |
[((8410, 8437), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (8419, 8437), True, 'import pandas as pd\n'), ((8461, 8495), 'pandas.Series', 'pd.Series', (['invalidData'], {'index': 'keys'}), '(invalidData, index=keys)\n', (8470, 8495), True, 'import pandas as pd\n'), ((9606, 9633), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (9615, 9633), True, 'import pandas as pd\n'), ((9657, 9702), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (9691, 9702), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((11191, 11218), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (11200, 11218), True, 'import pandas as pd\n'), ((11242, 11287), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (11276, 11287), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((13335, 13362), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (13344, 13362), True, 'import pandas as pd\n'), ((13386, 13431), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (13420, 13431), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((15478, 15505), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (15487, 15505), True, 'import pandas as pd\n'), ((15529, 15574), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (15563, 15574), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((17627, 17654), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (17636, 17654), True, 'import pandas as pd\n'), ((17678, 17723), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (17712, 17723), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((18445, 18478), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'injectionRates'}), '(data=injectionRates)\n', (18457, 18478), True, 'import pandas as pd\n'), ((22211, 22285), 'FINE.expansionModules.robustPipelineSizing.computeLargeMergedDiameters', 'robustPipelineSizing.computeLargeMergedDiameters', (['dic_diamToMerge_costs', '(6)'], {}), '(dic_diamToMerge_costs, 6)\n', (22259, 22285), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((22876, 22903), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (22885, 22903), True, 'import pandas as pd\n'), ((22927, 22972), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (22961, 22972), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((26530, 26655), 'FINE.expansionModules.robustPipelineSizing.determinePressureDropCoef', 'robustPipelineSizing.determinePressureDropCoef', (['testscenarios', 'distances', 'dic_node_minPress', 'dic_node_maxPress', 'diameters'], {}), '(testscenarios, distances,\n dic_node_minPress, dic_node_maxPress, diameters)\n', (26576, 26655), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((27242, 27269), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (27251, 27269), True, 'import pandas as pd\n'), ((27293, 27338), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (27327, 27338), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((28014, 28047), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'injectionRates'}), '(data=injectionRates)\n', (28026, 28047), True, 'import pandas as pd\n'), ((28751, 28851), 'FINE.expansionModules.robustPipelineSizing.computeTimeStepFlows', 'robustPipelineSizing.computeTimeStepFlows', (['injectionWithdrawal', 'distances', 'graph', 'entries', 'exits'], {}), '(injectionWithdrawal, distances,\n graph, entries, exits)\n', (28792, 28851), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((29412, 29439), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (29421, 29439), True, 'import pandas as pd\n'), ((29463, 29508), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (29497, 29508), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((30240, 30273), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'injectionRates'}), '(data=injectionRates)\n', (30252, 30273), True, 'import pandas as pd\n'), ((30316, 30441), 'FINE.expansionModules.robustPipelineSizing.generateRobustScenarios', 'robustPipelineSizing.generateRobustScenarios', (['injectionWithdrawal', 'graph', 'distances', 'dic_node_minPress', 'dic_node_maxPress'], {}), '(injectionWithdrawal, graph,\n distances, dic_node_minPress, dic_node_maxPress)\n', (30360, 30441), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((30917, 31052), 'FINE.expansionModules.robustPipelineSizing.determinePressureDropCoef', 'robustPipelineSizing.determinePressureDropCoef', (['dic_robustTestScenarios', 'distances', 'dic_node_minPress', 'dic_node_maxPress', 'diameters'], {}), '(dic_robustTestScenarios,\n distances, dic_node_minPress, dic_node_maxPress, diameters)\n', (30963, 31052), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((31577, 31788), 'FINE.expansionModules.robustPipelineSizing.determineOptimalDiscretePipelineSelection', 'robustPipelineSizing.determineOptimalDiscretePipelineSelection', (['graph', 'distances', 'dic_pressure_coef', 'specialScenarionames', 'dic_node_minPress', 'dic_node_maxPress', 'dic_diameter_costs'], {'robust': '(True)', 'verbose': '(2)'}), '(graph,\n distances, dic_pressure_coef, specialScenarionames, dic_node_minPress,\n dic_node_maxPress, dic_diameter_costs, robust=True, verbose=2)\n', (31639, 31788), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((32367, 32394), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (32376, 32394), True, 'import pandas as pd\n'), ((32418, 32463), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (32452, 32463), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((33132, 33165), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'injectionRates'}), '(data=injectionRates)\n', (33144, 33165), True, 'import pandas as pd\n'), ((33255, 33355), 'FINE.expansionModules.robustPipelineSizing.computeTimeStepFlows', 'robustPipelineSizing.computeTimeStepFlows', (['injectionWithdrawal', 'distances', 'graph', 'entries', 'exits'], {}), '(injectionWithdrawal, distances,\n graph, entries, exits)\n', (33296, 33355), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((33440, 33570), 'FINE.expansionModules.robustPipelineSizing.determinePressureDropCoef', 'robustPipelineSizing.determinePressureDropCoef', (['dic_timeStep_flows', 'distances', 'dic_node_minPress', 'dic_node_maxPress', 'diameters'], {}), '(dic_timeStep_flows,\n distances, dic_node_minPress, dic_node_maxPress, diameters)\n', (33486, 33570), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((34106, 34318), 'FINE.expansionModules.robustPipelineSizing.determineOptimalDiscretePipelineSelection', 'robustPipelineSizing.determineOptimalDiscretePipelineSelection', (['graph', 'distances', 'dic_pressure_coef', 'specialScenarionames', 'dic_node_minPress', 'dic_node_maxPress', 'dic_diameter_costs'], {'robust': '(False)', 'verbose': '(2)'}), '(graph,\n distances, dic_pressure_coef, specialScenarionames, dic_node_minPress,\n dic_node_maxPress, dic_diameter_costs, robust=False, verbose=2)\n', (34168, 34318), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((34716, 34746), 'pandas.Series', 'pd.Series', (['(1000.0)'], {'index': '[arc]'}), '(1000.0, index=[arc])\n', (34725, 34746), True, 'import pandas as pd\n'), ((34804, 34923), 'FINE.expansionModules.robustPipelineSizing.computePressureStartnodeArc', 'robustPipelineSizing.computePressureStartnodeArc', (['arc', 'pressureEndNode', 'dic_scenario_flows', 'dic_arc_diam', 'distances'], {}), '(arc, pressureEndNode,\n dic_scenario_flows, dic_arc_diam, distances)\n', (34852, 34923), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((35220, 35339), 'FINE.expansionModules.robustPipelineSizing.computePressureStartnodeArc', 'robustPipelineSizing.computePressureStartnodeArc', (['arc', 'pressureEndNode', 'dic_scenario_flows', 'dic_arc_diam', 'distances'], {}), '(arc, pressureEndNode,\n dic_scenario_flows, dic_arc_diam, distances)\n', (35268, 35339), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((35512, 35631), 'FINE.expansionModules.robustPipelineSizing.computePressureStartnodeArc', 'robustPipelineSizing.computePressureStartnodeArc', (['arc', 'pressureEndNode', 'dic_scenario_flows', 'dic_arc_diam', 'distances'], {}), '(arc, pressureEndNode,\n dic_scenario_flows, dic_arc_diam, distances)\n', (35560, 35631), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((36090, 36120), 'pandas.Series', 'pd.Series', (['(1000.0)'], {'index': '[arc]'}), '(1000.0, index=[arc])\n', (36099, 36120), True, 'import pandas as pd\n'), ((36179, 36295), 'FINE.expansionModules.robustPipelineSizing.computePressureEndnodeArc', 'robustPipelineSizing.computePressureEndnodeArc', (['arc', 'pressStartNode', 'dic_scenario_flows', 'dic_arc_diam', 'distances'], {}), '(arc, pressStartNode,\n dic_scenario_flows, dic_arc_diam, distances)\n', (36225, 36295), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((36585, 36701), 'FINE.expansionModules.robustPipelineSizing.computePressureEndnodeArc', 'robustPipelineSizing.computePressureEndnodeArc', (['arc', 'pressStartNode', 'dic_scenario_flows', 'dic_arc_diam', 'distances'], {}), '(arc, pressStartNode,\n dic_scenario_flows, dic_arc_diam, distances)\n', (36631, 36701), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((36875, 36991), 'FINE.expansionModules.robustPipelineSizing.computePressureEndnodeArc', 'robustPipelineSizing.computePressureEndnodeArc', (['arc', 'pressStartNode', 'dic_scenario_flows', 'dic_arc_diam', 'distances'], {}), '(arc, pressStartNode,\n dic_scenario_flows, dic_arc_diam, distances)\n', (36921, 36991), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((37593, 37620), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (37602, 37620), True, 'import pandas as pd\n'), ((37644, 37689), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (37678, 37689), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((38358, 38391), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'injectionRates'}), '(data=injectionRates)\n', (38370, 38391), True, 'import pandas as pd\n'), ((38481, 38581), 'FINE.expansionModules.robustPipelineSizing.computeTimeStepFlows', 'robustPipelineSizing.computeTimeStepFlows', (['injectionWithdrawal', 'distances', 'graph', 'entries', 'exits'], {}), '(injectionWithdrawal, distances,\n graph, entries, exits)\n', (38522, 38581), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((38666, 38796), 'FINE.expansionModules.robustPipelineSizing.determinePressureDropCoef', 'robustPipelineSizing.determinePressureDropCoef', (['dic_timeStep_flows', 'distances', 'dic_node_minPress', 'dic_node_maxPress', 'diameters'], {}), '(dic_timeStep_flows,\n distances, dic_node_minPress, dic_node_maxPress, diameters)\n', (38712, 38796), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((39009, 39210), 'FINE.expansionModules.robustPipelineSizing.determineOptimalDiscretePipelineSelection', 'robustPipelineSizing.determineOptimalDiscretePipelineSelection', (['graph', 'distances', 'dic_pressure_coef', 'specialScenarionames', 'dic_node_minPress', 'dic_node_maxPress', 'dic_diameter_costs'], {'robust': '(False)'}), '(graph,\n distances, dic_pressure_coef, specialScenarionames, dic_node_minPress,\n dic_node_maxPress, dic_diameter_costs, robust=False)\n', (39071, 39210), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((40402, 40609), 'FINE.expansionModules.robustPipelineSizing.computePressureAtNode', 'robustPipelineSizing.computePressureAtNode', (['validation', 'node', 'upperPressNode', 'graph', 'dic_arc_diam', 'distances', 'dic_timeStep_flow', 'dic_node_minPress', 'dic_node_maxPress', 'tmp_violation', 'dic_node_Pressure'], {}), '(validation, node, upperPressNode,\n graph, dic_arc_diam, distances, dic_timeStep_flow, dic_node_minPress,\n dic_node_maxPress, tmp_violation, dic_node_Pressure)\n', (40444, 40609), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((41512, 41719), 'FINE.expansionModules.robustPipelineSizing.computePressureAtNode', 'robustPipelineSizing.computePressureAtNode', (['validation', 'node', 'upperPressNode', 'graph', 'dic_arc_diam', 'distances', 'dic_timeStep_flow', 'dic_node_minPress', 'dic_node_maxPress', 'tmp_violation', 'dic_node_Pressure'], {}), '(validation, node, upperPressNode,\n graph, dic_arc_diam, distances, dic_timeStep_flow, dic_node_minPress,\n dic_node_maxPress, tmp_violation, dic_node_Pressure)\n', (41554, 41719), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((42496, 42523), 'pandas.Series', 'pd.Series', (['data'], {'index': 'keys'}), '(data, index=keys)\n', (42505, 42523), True, 'import pandas as pd\n'), ((42547, 42592), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (42581, 42592), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((44179, 44298), 'FINE.expansionModules.robustPipelineSizing.postprocessing', 'robustPipelineSizing.postprocessing', (['graph', 'distances', 'dic_arc_diam', 'testScen', 'dic_node_minPress', 'dic_node_maxPress'], {}), '(graph, distances, dic_arc_diam,\n testScen, dic_node_minPress, dic_node_maxPress)\n', (44214, 44298), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((45445, 45564), 'FINE.expansionModules.robustPipelineSizing.postprocessing', 'robustPipelineSizing.postprocessing', (['graph', 'distances', 'dic_arc_diam', 'testScen', 'dic_node_minPress', 'dic_node_maxPress'], {}), '(graph, distances, dic_arc_diam,\n testScen, dic_node_minPress, dic_node_maxPress)\n', (45480, 45564), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((495, 540), 'FINE.expansionModules.robustPipelineSizing.createNetwork', 'robustPipelineSizing.createNetwork', (['distances'], {}), '(distances)\n', (529, 540), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((3392, 3498), 'FINE.expansionModules.robustPipelineSizing.networkRefinement', 'robustPipelineSizing.networkRefinement', (['distances', 'maxPipeLength', 'dic_node_minPress', 'dic_node_maxPress'], {}), '(distances, maxPipeLength,\n dic_node_minPress, dic_node_maxPress)\n', (3430, 3498), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((4892, 5066), 'FINE.expansionModules.robustPipelineSizing.computeSingleSpecialScenario', 'robustPipelineSizing.computeSingleSpecialScenario', (['graph', 'distances', 'entries', 'exits', 'startNode', 'endNode', 'dic_nodes_MinCapacity', 'dic_nodes_MaxCapacity', 'specialScenario'], {}), '(graph, distances, entries,\n exits, startNode, endNode, dic_nodes_MinCapacity, dic_nodes_MaxCapacity,\n specialScenario)\n', (4941, 5066), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((5711, 5841), 'FINE.expansionModules.robustPipelineSizing.generateRobustScenarios', 'robustPipelineSizing.generateRobustScenarios', (['injectionWithdrawalRates', 'graph', 'distances', 'dic_node_minPress', 'dic_node_maxPress'], {}), '(injectionWithdrawalRates,\n graph, distances, dic_node_minPress, dic_node_maxPress)\n', (5755, 5841), False, 'from FINE.expansionModules import robustPipelineSizing\n'), ((35001, 35028), 'numpy.round', 'np.round', (['pressStartnode', '(3)'], {}), '(pressStartnode, 3)\n', (35009, 35028), True, 'import numpy as np\n'), ((35416, 35443), 'numpy.round', 'np.round', (['pressStartnode', '(3)'], {}), '(pressStartnode, 3)\n', (35424, 35443), True, 'import numpy as np\n'), ((35708, 35735), 'numpy.round', 'np.round', (['pressStartnode', '(3)'], {}), '(pressStartnode, 3)\n', (35716, 35735), True, 'import numpy as np\n'), ((35738, 35766), 'numpy.round', 'np.round', (['pressureEndNode', '(3)'], {}), '(pressureEndNode, 3)\n', (35746, 35766), True, 'import numpy as np\n'), ((36372, 36400), 'numpy.round', 'np.round', (['pressureEndNode', '(3)'], {}), '(pressureEndNode, 3)\n', (36380, 36400), True, 'import numpy as np\n'), ((36778, 36806), 'numpy.round', 'np.round', (['pressureEndNode', '(3)'], {}), '(pressureEndNode, 3)\n', (36786, 36806), True, 'import numpy as np\n'), ((37068, 37096), 'numpy.round', 'np.round', (['pressureEndNode', '(3)'], {}), '(pressureEndNode, 3)\n', (37076, 37096), True, 'import numpy as np\n'), ((37099, 37126), 'numpy.round', 'np.round', (['pressStartNode', '(3)'], {}), '(pressStartNode, 3)\n', (37107, 37126), True, 'import numpy as np\n'), ((6101, 6150), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'nodePair[0]', 'nodePair[1]'], {}), '(graph, nodePair[0], nodePair[1])\n', (6117, 6150), True, 'import networkx as nx\n'), ((1640, 1679), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (1662, 1679), True, 'import networkx as nx\n'), ((2891, 2930), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (2913, 2930), True, 'import networkx as nx\n'), ((7541, 7580), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (7563, 7580), True, 'import networkx as nx\n'), ((10114, 10154), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['stTree', '"""length"""'], {}), "(stTree, 'length')\n", (10136, 10154), True, 'import networkx as nx\n'), ((1785, 1824), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (1807, 1824), True, 'import networkx as nx\n'), ((1922, 1961), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (1944, 1961), True, 'import networkx as nx\n'), ((3026, 3065), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (3048, 3065), True, 'import networkx as nx\n'), ((3135, 3174), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (3157, 3174), True, 'import networkx as nx\n'), ((7680, 7719), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (7702, 7719), True, 'import networkx as nx\n'), ((7814, 7853), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['graph', '"""length"""'], {}), "(graph, 'length')\n", (7836, 7853), True, 'import networkx as nx\n'), ((10275, 10315), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['stTree', '"""length"""'], {}), "(stTree, 'length')\n", (10297, 10315), True, 'import networkx as nx\n')]
|
from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.torch.networks import FlattenMlp_Dropout
from uncertainty_modeling.rl_uncertainty.rank1.r1bnn import Model
from uncertainty_modeling.rl_uncertainty.model import RegNetBase, SWAG, RaPP, get_diffs
def unc_premodel(env, env_name, model_name):
path = './uncertainty_modeling/rl_uncertainty'
obs_dim = env.observation_space.low.size
action_dim = env.action_space.low.size
input_size = obs_dim + action_dim
model = None
if model_name == 'mc_dropout':
model = FlattenMlp_Dropout( # Check the dropout layer!
input_size=input_size,
output_size=1,
hidden_sizes=[256, 256],
).cuda()
if model_name == 'rank1':
model = Model(x_dim=input_size, h_dim=10, y_dim=1, n=10).cuda()
if model_name == 'rapp':
model = RaPP(input_size).cuda()
if model_name == 'swag':
kwargs = {"dimensions": [200, 50, 50, 50],
"output_dim": 1,
"input_dim": input_size}
args = list()
model = SWAG(RegNetBase, subspace_type="pca", *args, **kwargs,
subspace_kwargs={"max_rank": 10, "pca_rank": 10})
model.cuda()
if model == None:
raise AttributeError
else:
model.load_state_dict(torch.load('{}/{}/model/{}/model_1980.pt'.format(path, model_name, env_name)))
return model
def uncertainty(state, action, pre_model, pre_model_name):
with torch.no_grad():
if pre_model_name == 'rapp':
dif = get_diffs(torch.cat([state, action], dim=1), pre_model)
difs = torch.cat([torch.from_numpy(i) for i in dif], dim=-1).cuda()
dif = (difs ** 2).mean(axis=1)
'''
unc = beta / dif # B
unc = unc.unsqueeze(1) # Bx1
# TODO: clipping on uncertainty
# unc_critic = torch.clamp(unc, 0.0, 1.5)
unc_critic = unc
'''
unc_critic = dif
return unc_critic
else:
exit()
class SACTrainer(TorchTrainer):
def __init__(
self,
pre_model,
env_name,
env,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
discount=0.99,
reward_scale=1.0,
policy_lr=1e-3,
qf_lr=1e-3,
optimizer_class=optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
policy_eval_start=0,
beta=1.0,
):
super().__init__()
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
# variables for sac uncertainty
self._current_epoch = 0
self.policy_eval_start = policy_eval_start
self.beta = beta
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.discount = discount
self.reward_scale = reward_scale
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
self.discrete = False
self.pre_model_name = pre_model
self.pre_model = unc_premodel(self.env, env_name, pre_model)
# Normalize the observation and action space
self.dataset = env.get_dataset()
all_obs = torch.tensor(self.dataset['observations'])
all_act = torch.tensor(self.dataset['actions'])
self.min_obs = torch.min(all_obs)
self.max_obs = torch.max(all_obs)
self.min_act = torch.min(all_act)
self.max_act = torch.max(all_act)
def normalize_state_action(self, state, action):
state = (state - self.min_obs) / (self.max_obs - self.min_obs)
action = (action - self.min_act) / (self.max_act - self.min_act)
return state, action
return state, action
def train_from_torch(self, batch):
self._current_epoch += 1
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
"""
Policy and Alpha Loss and Beta Uncertainty Loss
"""
new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(
obs, reparameterize=True, return_log_prob=True,
)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
q_new_actions = torch.min(
self.qf1(obs, new_obs_actions),
self.qf2(obs, new_obs_actions),
)
# policy uncertainty
norm_obs, norm_new_obs_actions = self.normalize_state_action(obs, new_obs_actions)
policy_unc = uncertainty(norm_obs, norm_new_obs_actions, self.pre_model, self.pre_model_name)[..., None].detach()
policy_loss = (alpha * log_pi - (q_new_actions - self.beta * policy_unc)).mean()
if self._current_epoch < self.policy_eval_start:
policy_log_prob = self.policy.log_prob(obs, actions)
policy_loss = (alpha * log_pi - policy_log_prob).mean()
"""
QF Loss and Beta Uncertainty Loss
"""
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
new_next_actions, _, _, new_log_pi, *_ = self.policy(
next_obs, reparameterize=True, return_log_prob=True,
)
target_q_values = torch.min(
self.target_qf1(next_obs, new_next_actions),
self.target_qf2(next_obs, new_next_actions),
) - alpha * new_log_pi
# critic uncertainty
norm_next_obs, norm_new_next_actions = self.normalize_state_action(next_obs, new_next_actions)
critic_unc = uncertainty(norm_next_obs, norm_new_next_actions, self.pre_model, self.pre_model_name)[..., None]
target_q_values = target_q_values - self.beta * critic_unc
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Update networks
"""
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
"""
Soft Updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
"""
Save some statistics for eval
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
policy_loss = (log_pi - q_new_actions).mean()
self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
self._n_train_steps_total += 1
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
]
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.qf1,
target_qf2=self.qf2,
)
|
[
"uncertainty_modeling.rl_uncertainty.model.RaPP",
"collections.OrderedDict",
"rlkit.torch.networks.FlattenMlp_Dropout",
"numpy.prod",
"torch.max",
"torch.from_numpy",
"torch.min",
"torch.tensor",
"torch.nn.MSELoss",
"rlkit.torch.pytorch_util.get_numpy",
"uncertainty_modeling.rl_uncertainty.model.SWAG",
"uncertainty_modeling.rl_uncertainty.rank1.r1bnn.Model",
"torch.no_grad",
"rlkit.torch.pytorch_util.soft_update_from_to",
"torch.cat",
"rlkit.torch.pytorch_util.zeros"
] |
[((1308, 1417), 'uncertainty_modeling.rl_uncertainty.model.SWAG', 'SWAG', (['RegNetBase', '*args'], {'subspace_type': '"""pca"""', 'subspace_kwargs': "{'max_rank': 10, 'pca_rank': 10}"}), "(RegNetBase, *args, subspace_type='pca', **kwargs, subspace_kwargs={\n 'max_rank': 10, 'pca_rank': 10})\n", (1312, 1417), False, 'from uncertainty_modeling.rl_uncertainty.model import RegNetBase, SWAG, RaPP, get_diffs\n'), ((1716, 1731), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1729, 1731), False, 'import torch\n'), ((4046, 4058), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4056, 4058), True, 'from torch import nn as nn\n'), ((4087, 4099), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4097, 4099), True, 'from torch import nn as nn\n'), ((4556, 4569), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4567, 4569), False, 'from collections import OrderedDict\n'), ((4913, 4955), 'torch.tensor', 'torch.tensor', (["self.dataset['observations']"], {}), "(self.dataset['observations'])\n", (4925, 4955), False, 'import torch\n'), ((4974, 5011), 'torch.tensor', 'torch.tensor', (["self.dataset['actions']"], {}), "(self.dataset['actions'])\n", (4986, 5011), False, 'import torch\n'), ((5035, 5053), 'torch.min', 'torch.min', (['all_obs'], {}), '(all_obs)\n', (5044, 5053), False, 'import torch\n'), ((5077, 5095), 'torch.max', 'torch.max', (['all_obs'], {}), '(all_obs)\n', (5086, 5095), False, 'import torch\n'), ((5119, 5137), 'torch.min', 'torch.min', (['all_act'], {}), '(all_act)\n', (5128, 5137), False, 'import torch\n'), ((5161, 5179), 'torch.max', 'torch.max', (['all_act'], {}), '(all_act)\n', (5170, 5179), False, 'import torch\n'), ((3771, 3803), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (3780, 3803), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((8540, 8612), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf1', 'self.target_qf1', 'self.soft_target_tau'], {}), '(self.qf1, self.target_qf1, self.soft_target_tau)\n', (8563, 8612), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((8655, 8727), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf2', 'self.target_qf2', 'self.soft_target_tau'], {}), '(self.qf2, self.target_qf2, self.soft_target_tau)\n', (8678, 8727), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((777, 862), 'rlkit.torch.networks.FlattenMlp_Dropout', 'FlattenMlp_Dropout', ([], {'input_size': 'input_size', 'output_size': '(1)', 'hidden_sizes': '[256, 256]'}), '(input_size=input_size, output_size=1, hidden_sizes=[256,\n 256])\n', (795, 862), False, 'from rlkit.torch.networks import FlattenMlp_Dropout\n'), ((987, 1035), 'uncertainty_modeling.rl_uncertainty.rank1.r1bnn.Model', 'Model', ([], {'x_dim': 'input_size', 'h_dim': '(10)', 'y_dim': '(1)', 'n': '(10)'}), '(x_dim=input_size, h_dim=10, y_dim=1, n=10)\n', (992, 1035), False, 'from uncertainty_modeling.rl_uncertainty.rank1.r1bnn import Model\n'), ((1088, 1104), 'uncertainty_modeling.rl_uncertainty.model.RaPP', 'RaPP', (['input_size'], {}), '(input_size)\n', (1092, 1104), False, 'from uncertainty_modeling.rl_uncertainty.model import RegNetBase, SWAG, RaPP, get_diffs\n'), ((1798, 1831), 'torch.cat', 'torch.cat', (['[state, action]'], {'dim': '(1)'}), '([state, action], dim=1)\n', (1807, 1831), False, 'import torch\n'), ((9187, 9210), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf1_loss'], {}), '(qf1_loss)\n', (9200, 9210), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9267, 9290), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf2_loss'], {}), '(qf2_loss)\n', (9280, 9290), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9350, 9376), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (9363, 9376), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9525, 9547), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q1_pred'], {}), '(q1_pred)\n', (9538, 9547), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9681, 9703), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q2_pred'], {}), '(q2_pred)\n', (9694, 9703), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9832, 9855), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_target'], {}), '(q_target)\n', (9845, 9855), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((9982, 10003), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['log_pi'], {}), '(log_pi)\n', (9995, 10003), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((10132, 10158), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_mean'], {}), '(policy_mean)\n', (10145, 10158), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((10292, 10321), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_log_std'], {}), '(policy_log_std)\n', (10305, 10321), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1874, 1893), 'torch.from_numpy', 'torch.from_numpy', (['i'], {}), '(i)\n', (1890, 1893), False, 'import torch\n'), ((3667, 3703), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (3674, 3703), True, 'import numpy as np\n')]
|
"""
Generate samples for a corpus tag and for a submission.
"""
import json
import logging
import numpy as np
from . import db
from . import distribution
from .sample_util import sample_without_replacement
from .counter_utils import normalize
logger = logging.getLogger(__name__)
def sample_document_uniform(corpus_tag, n_samples):
# Get distribution
P = distribution.document_uniform(corpus_tag)
# Get samples
doc_ids = sample_without_replacement(P, n_samples)
with db.CONN:
with db.CONN.cursor() as cur:
cur.execute("""
INSERT INTO sample_batch(distribution_type, corpus_tag, params) VALUES %s RETURNING id
""", [('uniform', corpus_tag, json.dumps({'type':'uniform', 'with_replacement': False}),)])
batch_id, = next(cur)
db.execute_values(cur, """
INSERT INTO document_sample(batch_id, doc_id) VALUES %s
""", [(batch_id, doc_id) for doc_id in doc_ids])
def test_sample_document_uniform():
np.random.seed(42)
tag = 'kbp2016'
db.execute("""TRUNCATE sample_batch CASCADE;
ALTER SEQUENCE sample_batch_id_seq RESTART;
""")
sample_document_uniform(tag, 20)
batches = db.select("""SELECT id, submission_id, distribution_type, corpus_tag, params FROM sample_batch""")
assert len(batches) == 1
batch = batches[0]
assert batch.id == 1
assert batch.submission_id is None
assert batch.distribution_type == "uniform"
assert batch.corpus_tag == "kbp2016"
assert batch.params == {"type":"uniform", "with_replacement": False}
docs = db.select("""SELECT doc_id FROM document_sample WHERE batch_id=%(batch_id)s""", batch_id=batch.id)
assert len(docs) == 20
def sample_document_entity(corpus_tag, n_samples, mention_table='evaluation_mention'):
# Get documents
seed_documents = [(row.doc_id,) for row in db.select("""
SELECT s.doc_id
FROM document_sample s,
document_tag d
WHERE s.doc_id = d.doc_id AND d.tag = %(corpus_tag)s
""", corpus_tag=corpus_tag)]
# Get distribution
P = distribution.document_entity(corpus_tag, seed_documents, mention_table=mention_table)
# Remove seed documents.
for doc_id in seed_documents:
P[doc_id] = 0.
P = normalize(P)
# Get samples
doc_ids = sample_without_replacement(P, n_samples)
with db.CONN:
with db.CONN.cursor() as cur:
cur.execute("""
INSERT INTO sample_batch(distribution_type, corpus_tag, params) VALUES %s RETURNING id
""", [('entity', corpus_tag, json.dumps({'type':'entity', 'with_replacement': False}),)])
batch_id, = next(cur)
db.execute_values(cur, """
INSERT INTO document_sample(batch_id, doc_id) VALUES %s
""", [(batch_id, doc_id) for doc_id in doc_ids])
def test_sample_document_entity():
tag = 'kbp2016'
db.execute("""TRUNCATE sample_batch CASCADE;
ALTER SEQUENCE sample_batch_id_seq RESTART;
""")
sample_document_uniform(tag, 20)
sample_document_entity(tag, 20, mention_table="suggested_mention")
batches = db.select("""SELECT id, submission_id, distribution_type, corpus_tag, params FROM sample_batch""")
assert len(batches) == 2
batch = batches[1]
assert batch.id == 2
assert batch.submission_id is None
assert batch.distribution_type == "entity"
assert batch.corpus_tag == "kbp2016"
assert batch.params == {"type":"entity", "with_replacement": False}
docs = db.select("""SELECT doc_id FROM document_sample WHERE batch_id=%(batch_id)s""", batch_id=batch.id)
assert len(docs) == 20
# TODO: compute sample size
def sample_submission(corpus_tag, submission_id, type_, n_samples):
# Get distribution
logger.info("Computing distributions")
if type_ == "instance":
P = distribution.submission_instance(corpus_tag, submission_id)
elif type_ == "relation":
P = distribution.submission_relation(corpus_tag, submission_id)
elif type_ == "entity":
P = distribution.submission_entity(corpus_tag, submission_id)
elif type_ == "entity_relation":
P = distribution.submission_entity_relation(corpus_tag, submission_id)
else:
raise ValueError("Invalid submission sampling distribution type: {}".format(type_))
# Get samples
logger.info("Drawing samples")
relation_mentions = sample_without_replacement(P[submission_id], n_samples)
logger.info("Loading samples into batch")
with db.CONN:
with db.CONN.cursor() as cur:
cur.execute("""
INSERT INTO sample_batch(submission_id, distribution_type, corpus_tag, params) VALUES %s RETURNING id
""", [(submission_id, type_, corpus_tag, json.dumps({'submission_id':submission_id, 'type':type_, 'with_replacement': False}),)])
batch_id, = next(cur)
db.execute_values(cur, """
INSERT INTO submission_sample(batch_id, submission_id, doc_id, subject, object) VALUES %s
""", [(batch_id, submission_id, doc_id, db.Int4NumericRange(*subject), db.Int4NumericRange(*object_)) for doc_id, subject, object_ in relation_mentions])
return batch_id
def test_sample_submission_instance():
tag = 'kbp2016'
submission_id = 1 # patterns
db.execute("""TRUNCATE sample_batch CASCADE;
ALTER SEQUENCE sample_batch_id_seq RESTART;
""")
sample_submission(tag, submission_id, 'instance', 20)
batches = db.select("""SELECT id, submission_id, distribution_type, corpus_tag, params FROM sample_batch""")
assert len(batches) == 1
batch = batches[0]
assert batch.id == 1
assert batch.submission_id == submission_id
assert batch.distribution_type == "instance"
assert batch.corpus_tag == "kbp2016"
assert batch.params == {"submission_id": submission_id, "type":"instance", "with_replacement": False}
relation_mentions = db.select("""SELECT doc_id, subject, object FROM submission_sample WHERE batch_id=%(batch_id)s AND submission_id=%(submission_id)s""", batch_id=batch.id, submission_id=submission_id)
assert len(relation_mentions) == 20
def test_sample_submission_relation():
tag = 'kbp2016'
submission_id = 1 # patterns
db.execute("""TRUNCATE sample_batch CASCADE;
ALTER SEQUENCE sample_batch_id_seq RESTART;
""")
sample_submission(tag, submission_id, 'relation', 20)
batches = db.select("""SELECT id, submission_id, distribution_type, corpus_tag, params FROM sample_batch""")
assert len(batches) == 1
batch = batches[0]
assert batch.id == 1
assert batch.submission_id == submission_id
assert batch.distribution_type == "relation"
assert batch.corpus_tag == "kbp2016"
assert batch.params == {"submission_id": submission_id, "type":"relation", "with_replacement": False}
relation_mentions = db.select("""SELECT doc_id, subject, object FROM submission_sample WHERE batch_id=%(batch_id)s AND submission_id=%(submission_id)s""", batch_id=batch.id, submission_id=submission_id)
assert len(relation_mentions) == 20
def test_sample_submission_entity():
tag = 'kbp2016'
submission_id = 1 # patterns
db.execute("""TRUNCATE sample_batch CASCADE;
ALTER SEQUENCE sample_batch_id_seq RESTART;
""")
sample_submission(tag, submission_id, 'entity', 20)
batches = db.select("""SELECT id, submission_id, distribution_type, corpus_tag, params FROM sample_batch""")
assert len(batches) == 1
batch = batches[0]
assert batch.id == 1
assert batch.submission_id == submission_id
assert batch.distribution_type == "entity"
assert batch.corpus_tag == "kbp2016"
assert batch.params == {"submission_id": submission_id, "type":"entity", "with_replacement": False}
relation_mentions = db.select("""SELECT doc_id, subject, object FROM submission_sample WHERE batch_id=%(batch_id)s AND submission_id=%(submission_id)s""", batch_id=batch.id, submission_id=submission_id)
assert len(relation_mentions) == 20
|
[
"logging.getLogger",
"json.dumps",
"numpy.random.seed"
] |
[((256, 283), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'import logging\n'), ((1031, 1049), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1045, 1049), True, 'import numpy as np\n'), ((718, 776), 'json.dumps', 'json.dumps', (["{'type': 'uniform', 'with_replacement': False}"], {}), "({'type': 'uniform', 'with_replacement': False})\n", (728, 776), False, 'import json\n'), ((2656, 2713), 'json.dumps', 'json.dumps', (["{'type': 'entity', 'with_replacement': False}"], {}), "({'type': 'entity', 'with_replacement': False})\n", (2666, 2713), False, 'import json\n'), ((4877, 4967), 'json.dumps', 'json.dumps', (["{'submission_id': submission_id, 'type': type_, 'with_replacement': False}"], {}), "({'submission_id': submission_id, 'type': type_,\n 'with_replacement': False})\n", (4887, 4967), False, 'import json\n')]
|
# Compute multivariate ESS using multi_ess function based on eeyore
# %% Load packages
import numpy as np
import torch
from eeyore.stats import multi_ess
# %% Read chains
chains = torch.as_tensor(np.genfromtxt('chain01.csv', delimiter=','))
# %% Compute multivariate ESS using INSE MC covariance estimation
ess_val = multi_ess(chains)
print('Multivariate ESS using INSE MC covariance estimation: {}'.format(ess_val))
|
[
"numpy.genfromtxt",
"eeyore.stats.multi_ess"
] |
[((324, 341), 'eeyore.stats.multi_ess', 'multi_ess', (['chains'], {}), '(chains)\n', (333, 341), False, 'from eeyore.stats import multi_ess\n'), ((201, 244), 'numpy.genfromtxt', 'np.genfromtxt', (['"""chain01.csv"""'], {'delimiter': '""","""'}), "('chain01.csv', delimiter=',')\n", (214, 244), True, 'import numpy as np\n')]
|
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path
import time
plt.ion()
import pybie2d
"""
Demonstrate how to use the pybie2d package to solve an interior Laplace problem
On a complicated domain using a global quadrature
This example demonstrates how to do this entirely using low-level routines,
To demonstrate both how to use these low level routines
And to give you an idea what is going on under the hood in the
higher level routines
"""
NG = 100
h_max = 0.01
# extract some functions for easy calling
PPB = pybie2d.boundaries.panel_polygon_boundary.panel_polygon_boundary.Panel_Polygon_Boundary
Grid = pybie2d.grid.Grid
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Cauchy_Layer_Apply = pybie2d.kernels.high_level.cauchy.Cauchy_Layer_Apply
Find_Near_Points = pybie2d.misc.near_points.find_near_points
Pairing = pybie2d.pairing.Pairing
################################################################################
# define problem
# boundary
boundary = PPB([0,1,1,0], [0,0,1,1], [h_max]*4, [True]*4)
# solution
solution_func = lambda x, y: 2*x + y
bc = solution_func(boundary.x, boundary.y)
def err_plot(up):
# compute the error
errorp = up - solution_func(full_grid.xg[phys], full_grid.yg[phys])
digitsp = -np.log10(np.abs(errorp)+1e-16)
digits = np.zeros_like(full_grid.xg)
digits[phys] = digitsp
mdigits = np.ma.array(digits, mask=ext)
# plot the error as a function of space (only good in interior)
fig, ax = plt.subplots(1,1)
clf = ax.imshow(mdigits[:,::-1].T, extent=[0,1,0,1],
cmap=mpl.cm.viridis_r)
ax.set_aspect('equal')
fig.colorbar(clf)
print('Error: {:0.2e}'.format(np.abs(errorp).max()))
################################################################################
##### solve problem the hard way ###############################################
################################################################################
################################################################################
# find physical region
# (this implements a fast way to tell if points are in or out of the boundary)
# (and of course, for the squish boundary, we could easily figure out something
# faster, but this illustrates a general purpose routine)
full_grid = Grid([0,1], NG, [0,1], NG, x_endpoints=[False,False], y_endpoints=[False,False])
# this is hiding a lot of stuff!
phys, ext = boundary.find_interior_points(full_grid)
phys = full_grid.reshape(phys)
ext = full_grid.reshape(ext)
################################################################################
# solve for the density
DLP = Laplace_Layer_Singular_Form(boundary, ifdipole=True)
A = -0.5*np.eye(boundary.N) + DLP
AI = np.linalg.inv(A)
tau = AI.dot(bc)
################################################################################
# naive evaluation
# generate a target for the physical grid
gridp = Grid([0,1], NG, [0,1], NG, mask=phys, x_endpoints=[False,False], y_endpoints=[False,False])
# evaluate at the target points
u = np.zeros_like(gridp.xg)
up = Laplace_Layer_Apply(boundary, gridp, dipstr=tau)
err_plot(up)
################################################################################
# use the oversampling features
hmax = gridp.xg[1,0] - gridp.xg[0,0]
fbdy, IMAT = boundary.prepare_oversampling(hmax/6.0)
IMAT = sp.sparse.csr_matrix(IMAT)
ftau = IMAT.dot(tau)
up = Laplace_Layer_Apply(fbdy, gridp, dipstr=ftau)
err_plot(up)
|
[
"numpy.abs",
"numpy.eye",
"numpy.ma.array",
"numpy.linalg.inv",
"matplotlib.pyplot.ion",
"scipy.sparse.csr_matrix",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((150, 159), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (157, 159), True, 'import matplotlib.pyplot as plt\n'), ((2983, 2999), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (2996, 2999), True, 'import numpy as np\n'), ((3298, 3321), 'numpy.zeros_like', 'np.zeros_like', (['gridp.xg'], {}), '(gridp.xg)\n', (3311, 3321), True, 'import numpy as np\n'), ((3601, 3627), 'scipy.sparse.csr_matrix', 'sp.sparse.csr_matrix', (['IMAT'], {}), '(IMAT)\n', (3621, 3627), True, 'import scipy as sp\n'), ((1592, 1619), 'numpy.zeros_like', 'np.zeros_like', (['full_grid.xg'], {}), '(full_grid.xg)\n', (1605, 1619), True, 'import numpy as np\n'), ((1655, 1684), 'numpy.ma.array', 'np.ma.array', (['digits'], {'mask': 'ext'}), '(digits, mask=ext)\n', (1666, 1684), True, 'import numpy as np\n'), ((1762, 1780), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1774, 1780), True, 'import matplotlib.pyplot as plt\n'), ((2953, 2971), 'numpy.eye', 'np.eye', (['boundary.N'], {}), '(boundary.N)\n', (2959, 2971), True, 'import numpy as np\n'), ((1560, 1574), 'numpy.abs', 'np.abs', (['errorp'], {}), '(errorp)\n', (1566, 1574), True, 'import numpy as np\n'), ((1944, 1958), 'numpy.abs', 'np.abs', (['errorp'], {}), '(errorp)\n', (1950, 1958), True, 'import numpy as np\n')]
|
import numpy
n = int(input())
matrix = []
for i in range(n):
matrix.append(list(map(float,input().split())))
print(round(numpy.linalg.det(matrix), 2))
|
[
"numpy.linalg.det"
] |
[((129, 153), 'numpy.linalg.det', 'numpy.linalg.det', (['matrix'], {}), '(matrix)\n', (145, 153), False, 'import numpy\n')]
|
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import glob
import logging
import datetime
import numpy as np
import isceobj
import isceobj.Sensor.MultiMode as MultiMode
from isceobj.Planet.Planet import Planet
from isceobj.Alos2Proc.Alos2ProcPublic import runCmd
from isceobj.Alos2Proc.Alos2ProcPublic import getBboxRdr
from isceobj.Alos2Proc.Alos2ProcPublic import getBboxGeo
from isceobj.Alos2Proc.Alos2ProcPublic import modeProcParDict
logger = logging.getLogger('isce.alos2insar.runPreprocessor')
def runPreprocessor(self):
'''Extract images.
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
#find files
#actually no need to use absolute path any longer, since we are able to find file from vrt now. 27-JAN-2020, CRL.
#denseoffset may still need absolute path when making links
self.referenceDir = os.path.abspath(self.referenceDir)
self.secondaryDir = os.path.abspath(self.secondaryDir)
ledFilesReference = sorted(glob.glob(os.path.join(self.referenceDir, 'LED-ALOS2*-*-*')))
imgFilesReference = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*-*-*'.format(self.referencePolarization.upper()))))
ledFilesSecondary = sorted(glob.glob(os.path.join(self.secondaryDir, 'LED-ALOS2*-*-*')))
imgFilesSecondary = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*-*-*'.format(self.secondaryPolarization.upper()))))
firstFrameReference = ledFilesReference[0].split('-')[-3][-4:]
firstFrameSecondary = ledFilesSecondary[0].split('-')[-3][-4:]
firstFrameImagesReference = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.referencePolarization.upper(), firstFrameReference))))
firstFrameImagesSecondary = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.secondaryPolarization.upper(), firstFrameSecondary))))
#determin operation mode
referenceMode = os.path.basename(ledFilesReference[0]).split('-')[-1][0:3]
secondaryMode = os.path.basename(ledFilesSecondary[0]).split('-')[-1][0:3]
spotlightModes = ['SBS']
stripmapModes = ['UBS', 'UBD', 'HBS', 'HBD', 'HBQ', 'FBS', 'FBD', 'FBQ']
scansarNominalModes = ['WBS', 'WBD', 'WWS', 'WWD']
scansarWideModes = ['VBS', 'VBD']
scansarModes = ['WBS', 'WBD', 'WWS', 'WWD', 'VBS', 'VBD']
#usable combinations
if (referenceMode in spotlightModes) and (secondaryMode in spotlightModes):
self._insar.modeCombination = 0
elif (referenceMode in stripmapModes) and (secondaryMode in stripmapModes):
self._insar.modeCombination = 1
elif (referenceMode in scansarNominalModes) and (secondaryMode in scansarNominalModes):
self._insar.modeCombination = 21
elif (referenceMode in scansarWideModes) and (secondaryMode in scansarWideModes):
self._insar.modeCombination = 22
elif (referenceMode in scansarNominalModes) and (secondaryMode in stripmapModes):
self._insar.modeCombination = 31
elif (referenceMode in scansarWideModes) and (secondaryMode in stripmapModes):
self._insar.modeCombination = 32
else:
print('\n\nthis mode combination is not possible')
print('note that for ScanSAR-stripmap, ScanSAR must be reference\n\n')
raise Exception('mode combination not supported')
# pixel size from real data processing. azimuth pixel size may change a bit as
# the antenna points to a different swath and therefore uses a different PRF.
# MODE RANGE PIXEL SIZE (LOOKS) AZIMUTH PIXEL SIZE (LOOKS)
# -------------------------------------------------------------------
# SPT [SBS]
# 1.4304222392897463 (2) 0.9351804642158579 (4)
# SM1 [UBS,UBD]
# 1.4304222392897463 (2) 1.8291988125114438 (2)
# SM2 [HBS,HBD,HBQ]
# 2.8608444785794984 (2) 3.0672373839847196 (2)
# SM3 [FBS,FBD,FBQ]
# 4.291266717869248 (2) 3.2462615913656667 (4)
# WD1 [WBS,WBD] [WWS,WWD]
# 8.582533435738496 (1) 2.6053935830031887 (14)
# 8.582533435738496 (1) 2.092362043327227 (14)
# 8.582533435738496 (1) 2.8817632034495717 (14)
# 8.582533435738496 (1) 3.054362492601842 (14)
# 8.582533435738496 (1) 2.4582084463356977 (14)
# WD2 [VBS,VBD]
# 8.582533435738496 (1) 2.9215796012950728 (14)
# 8.582533435738496 (1) 3.088859074497863 (14)
# 8.582533435738496 (1) 2.8792293071133073 (14)
# 8.582533435738496 (1) 3.0592146044234854 (14)
# 8.582533435738496 (1) 2.8818767752199137 (14)
# 8.582533435738496 (1) 3.047038521027477 (14)
# 8.582533435738496 (1) 2.898816222039108 (14)
#determine default number of looks:
self._insar.numberRangeLooks1 = self.numberRangeLooks1
self._insar.numberAzimuthLooks1 = self.numberAzimuthLooks1
self._insar.numberRangeLooks2 = self.numberRangeLooks2
self._insar.numberAzimuthLooks2 = self.numberAzimuthLooks2
#the following two will be automatically determined by runRdrDemOffset.py
self._insar.numberRangeLooksSim = self.numberRangeLooksSim
self._insar.numberAzimuthLooksSim = self.numberAzimuthLooksSim
self._insar.numberRangeLooksIon = self.numberRangeLooksIon
self._insar.numberAzimuthLooksIon = self.numberAzimuthLooksIon
if self._insar.numberRangeLooks1 is None:
self._insar.numberRangeLooks1 = modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooks1']
if self._insar.numberAzimuthLooks1 is None:
self._insar.numberAzimuthLooks1 = modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooks1']
if self._insar.numberRangeLooks2 is None:
self._insar.numberRangeLooks2 = modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooks2']
if self._insar.numberAzimuthLooks2 is None:
self._insar.numberAzimuthLooks2 = modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooks2']
if self._insar.numberRangeLooksIon is None:
self._insar.numberRangeLooksIon = modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooksIon']
if self._insar.numberAzimuthLooksIon is None:
self._insar.numberAzimuthLooksIon = modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooksIon']
#define processing file names
self._insar.referenceDate = os.path.basename(ledFilesReference[0]).split('-')[2]
self._insar.secondaryDate = os.path.basename(ledFilesSecondary[0]).split('-')[2]
self._insar.setFilename(referenceDate=self._insar.referenceDate, secondaryDate=self._insar.secondaryDate, nrlks1=self._insar.numberRangeLooks1, nalks1=self._insar.numberAzimuthLooks1, nrlks2=self._insar.numberRangeLooks2, nalks2=self._insar.numberAzimuthLooks2)
#find frame numbers
if (self._insar.modeCombination == 31) or (self._insar.modeCombination == 32):
if (self.referenceFrames == None) or (self.secondaryFrames == None):
raise Exception('for ScanSAR-stripmap inteferometry, you must set reference and secondary frame numbers')
#if not set, find frames automatically
if self.referenceFrames == None:
self.referenceFrames = []
for led in ledFilesReference:
frameNumber = os.path.basename(led).split('-')[1][-4:]
if frameNumber not in self.referenceFrames:
self.referenceFrames.append(frameNumber)
if self.secondaryFrames == None:
self.secondaryFrames = []
for led in ledFilesSecondary:
frameNumber = os.path.basename(led).split('-')[1][-4:]
if frameNumber not in self.secondaryFrames:
self.secondaryFrames.append(frameNumber)
#sort frames
self.referenceFrames = sorted(self.referenceFrames)
self.secondaryFrames = sorted(self.secondaryFrames)
#check number of frames
if len(self.referenceFrames) != len(self.secondaryFrames):
raise Exception('number of frames in reference dir is not equal to number of frames \
in secondary dir. please set frame number manually')
#find swath numbers (if not ScanSAR-ScanSAR, compute valid swaths)
if (self._insar.modeCombination == 0) or (self._insar.modeCombination == 1):
self.startingSwath = 1
self.endingSwath = 1
if self._insar.modeCombination == 21:
if self.startingSwath == None:
self.startingSwath = 1
if self.endingSwath == None:
self.endingSwath = 5
if self._insar.modeCombination == 22:
if self.startingSwath == None:
self.startingSwath = 1
if self.endingSwath == None:
self.endingSwath = 7
#determine starting and ending swaths for ScanSAR-stripmap, user's settings are overwritten
#use first frame to check overlap
if (self._insar.modeCombination == 31) or (self._insar.modeCombination == 32):
if self._insar.modeCombination == 31:
numberOfSwaths = 5
else:
numberOfSwaths = 7
overlapSubswaths = []
for i in range(numberOfSwaths):
overlapRatio = check_overlap(ledFilesReference[0], firstFrameImagesReference[i], ledFilesSecondary[0], firstFrameImagesSecondary[0])
if overlapRatio > 1.0 / 4.0:
overlapSubswaths.append(i+1)
if overlapSubswaths == []:
raise Exception('There is no overlap area between the ScanSAR-stripmap pair')
self.startingSwath = int(overlapSubswaths[0])
self.endingSwath = int(overlapSubswaths[-1])
#save the valid frames and swaths for future processing
self._insar.referenceFrames = self.referenceFrames
self._insar.secondaryFrames = self.secondaryFrames
self._insar.startingSwath = self.startingSwath
self._insar.endingSwath = self.endingSwath
##################################################
#1. create directories and read data
##################################################
self.reference.configure()
self.secondary.configure()
self.reference.track.configure()
self.secondary.track.configure()
for i, (referenceFrame, secondaryFrame) in enumerate(zip(self._insar.referenceFrames, self._insar.secondaryFrames)):
#frame number starts with 1
frameDir = 'f{}_{}'.format(i+1, referenceFrame)
os.makedirs(frameDir, exist_ok=True)
os.chdir(frameDir)
#attach a frame to reference and secondary
frameObjReference = MultiMode.createFrame()
frameObjSecondary = MultiMode.createFrame()
frameObjReference.configure()
frameObjSecondary.configure()
self.reference.track.frames.append(frameObjReference)
self.secondary.track.frames.append(frameObjSecondary)
#swath number starts with 1
for j in range(self._insar.startingSwath, self._insar.endingSwath+1):
print('processing frame {} swath {}'.format(referenceFrame, j))
swathDir = 's{}'.format(j)
os.makedirs(swathDir, exist_ok=True)
os.chdir(swathDir)
#attach a swath to reference and secondary
swathObjReference = MultiMode.createSwath()
swathObjSecondary = MultiMode.createSwath()
swathObjReference.configure()
swathObjSecondary.configure()
self.reference.track.frames[-1].swaths.append(swathObjReference)
self.secondary.track.frames[-1].swaths.append(swathObjSecondary)
#setup reference
self.reference.leaderFile = sorted(glob.glob(os.path.join(self.referenceDir, 'LED-ALOS2*{}-*-*'.format(referenceFrame))))[0]
if referenceMode in scansarModes:
self.reference.imageFile = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*{}-*-*-F{}'.format(self.referencePolarization.upper(), referenceFrame, j))))[0]
else:
self.reference.imageFile = sorted(glob.glob(os.path.join(self.referenceDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.referencePolarization.upper(), referenceFrame))))[0]
self.reference.outputFile = self._insar.referenceSlc
self.reference.useVirtualFile = self.useVirtualFile
#read reference
(imageFDR, imageData)=self.reference.readImage()
(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord)=self.reference.readLeader()
self.reference.setSwath(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.reference.setFrame(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.reference.setTrack(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
#setup secondary
self.secondary.leaderFile = sorted(glob.glob(os.path.join(self.secondaryDir, 'LED-ALOS2*{}-*-*'.format(secondaryFrame))))[0]
if secondaryMode in scansarModes:
self.secondary.imageFile = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*{}-*-*-F{}'.format(self.secondaryPolarization.upper(), secondaryFrame, j))))[0]
else:
self.secondary.imageFile = sorted(glob.glob(os.path.join(self.secondaryDir, 'IMG-{}-ALOS2*{}-*-*'.format(self.secondaryPolarization.upper(), secondaryFrame))))[0]
self.secondary.outputFile = self._insar.secondarySlc
self.secondary.useVirtualFile = self.useVirtualFile
#read secondary
(imageFDR, imageData)=self.secondary.readImage()
(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord)=self.secondary.readLeader()
self.secondary.setSwath(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.secondary.setFrame(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
self.secondary.setTrack(leaderFDR, sceneHeaderRecord, platformPositionRecord, facilityRecord, imageFDR, imageData)
os.chdir('../')
self._insar.saveProduct(self.reference.track.frames[-1], self._insar.referenceFrameParameter)
self._insar.saveProduct(self.secondary.track.frames[-1], self._insar.secondaryFrameParameter)
os.chdir('../')
self._insar.saveProduct(self.reference.track, self._insar.referenceTrackParameter)
self._insar.saveProduct(self.secondary.track, self._insar.secondaryTrackParameter)
catalog.printToLog(logger, "runPreprocessor")
self._insar.procDoc.addAllFromCatalog(catalog)
def check_overlap(ldr_m, img_m, ldr_s, img_s):
from isceobj.Constants import SPEED_OF_LIGHT
rangeSamplingRateReference, widthReference, nearRangeReference = read_param_for_checking_overlap(ldr_m, img_m)
rangeSamplingRateSecondary, widthSecondary, nearRangeSecondary = read_param_for_checking_overlap(ldr_s, img_s)
farRangeReference = nearRangeReference + (widthReference-1) * 0.5 * SPEED_OF_LIGHT / rangeSamplingRateReference
farRangeSecondary = nearRangeSecondary + (widthSecondary-1) * 0.5 * SPEED_OF_LIGHT / rangeSamplingRateSecondary
#This should be good enough, although precise image offsets are not used.
if farRangeReference <= nearRangeSecondary:
overlapRatio = 0.0
elif farRangeSecondary <= nearRangeReference:
overlapRatio = 0.0
else:
# 0 1 2 3
ranges = np.array([nearRangeReference, farRangeReference, nearRangeSecondary, farRangeSecondary])
rangesIndex = np.argsort(ranges)
overlapRatio = ranges[rangesIndex[2]]-ranges[rangesIndex[1]] / (farRangeReference-nearRangeReference)
return overlapRatio
def read_param_for_checking_overlap(leader_file, image_file):
from isceobj.Sensor import xmlPrefix
import isceobj.Sensor.CEOS as CEOS
#read from leader file
fsampConst = { 104: 1.047915957140240E+08,
52: 5.239579785701190E+07,
34: 3.493053190467460E+07,
17: 1.746526595233730E+07 }
fp = open(leader_file,'rb')
leaderFDR = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/leader_file.xml'),dataFile=fp)
leaderFDR.parse()
fp.seek(leaderFDR.getEndOfRecordPosition())
sceneHeaderRecord = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/scene_record.xml'),dataFile=fp)
sceneHeaderRecord.parse()
fp.seek(sceneHeaderRecord.getEndOfRecordPosition())
fsamplookup = int(sceneHeaderRecord.metadata['Range sampling rate in MHz'])
rangeSamplingRate = fsampConst[fsamplookup]
fp.close()
#print('{}'.format(rangeSamplingRate))
#read from image file
fp = open(image_file, 'rb')
imageFDR = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/image_file.xml'), dataFile=fp)
imageFDR.parse()
fp.seek(imageFDR.getEndOfRecordPosition())
imageData = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/image_record.xml'), dataFile=fp)
imageData.parseFast()
width = imageFDR.metadata['Number of pixels per line per SAR channel']
near_range = imageData.metadata['Slant range to 1st data sample']
fp.close()
#print('{}'.format(width))
#print('{}'.format(near_range))
return (rangeSamplingRate, width, near_range)
|
[
"logging.getLogger",
"os.makedirs",
"isceobj.Catalog.createCatalog",
"os.path.join",
"isceobj.Sensor.MultiMode.createSwath",
"os.chdir",
"numpy.array",
"numpy.argsort",
"isceobj.Sensor.MultiMode.createFrame",
"os.path.basename",
"os.path.abspath"
] |
[((478, 530), 'logging.getLogger', 'logging.getLogger', (['"""isce.alos2insar.runPreprocessor"""'], {}), "('isce.alos2insar.runPreprocessor')\n", (495, 530), False, 'import logging\n'), ((604, 659), 'isceobj.Catalog.createCatalog', 'isceobj.Catalog.createCatalog', (['self._insar.procDoc.name'], {}), '(self._insar.procDoc.name)\n', (633, 659), False, 'import isceobj\n'), ((884, 918), 'os.path.abspath', 'os.path.abspath', (['self.referenceDir'], {}), '(self.referenceDir)\n', (899, 918), False, 'import os\n'), ((943, 977), 'os.path.abspath', 'os.path.abspath', (['self.secondaryDir'], {}), '(self.secondaryDir)\n', (958, 977), False, 'import os\n'), ((10403, 10439), 'os.makedirs', 'os.makedirs', (['frameDir'], {'exist_ok': '(True)'}), '(frameDir, exist_ok=True)\n', (10414, 10439), False, 'import os\n'), ((10448, 10466), 'os.chdir', 'os.chdir', (['frameDir'], {}), '(frameDir)\n', (10456, 10466), False, 'import os\n'), ((10547, 10570), 'isceobj.Sensor.MultiMode.createFrame', 'MultiMode.createFrame', ([], {}), '()\n', (10568, 10570), True, 'import isceobj.Sensor.MultiMode as MultiMode\n'), ((10599, 10622), 'isceobj.Sensor.MultiMode.createFrame', 'MultiMode.createFrame', ([], {}), '()\n', (10620, 10622), True, 'import isceobj.Sensor.MultiMode as MultiMode\n'), ((14393, 14408), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (14401, 14408), False, 'import os\n'), ((1020, 1069), 'os.path.join', 'os.path.join', (['self.referenceDir', '"""LED-ALOS2*-*-*"""'], {}), "(self.referenceDir, 'LED-ALOS2*-*-*')\n", (1032, 1069), False, 'import os\n'), ((1253, 1302), 'os.path.join', 'os.path.join', (['self.secondaryDir', '"""LED-ALOS2*-*-*"""'], {}), "(self.secondaryDir, 'LED-ALOS2*-*-*')\n", (1265, 1302), False, 'import os\n'), ((11066, 11102), 'os.makedirs', 'os.makedirs', (['swathDir'], {'exist_ok': '(True)'}), '(swathDir, exist_ok=True)\n', (11077, 11102), False, 'import os\n'), ((11115, 11133), 'os.chdir', 'os.chdir', (['swathDir'], {}), '(swathDir)\n', (11123, 11133), False, 'import os\n'), ((11222, 11245), 'isceobj.Sensor.MultiMode.createSwath', 'MultiMode.createSwath', ([], {}), '()\n', (11243, 11245), True, 'import isceobj.Sensor.MultiMode as MultiMode\n'), ((11278, 11301), 'isceobj.Sensor.MultiMode.createSwath', 'MultiMode.createSwath', ([], {}), '()\n', (11299, 11301), True, 'import isceobj.Sensor.MultiMode as MultiMode\n'), ((14165, 14180), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (14173, 14180), False, 'import os\n'), ((15590, 15682), 'numpy.array', 'np.array', (['[nearRangeReference, farRangeReference, nearRangeSecondary, farRangeSecondary]'], {}), '([nearRangeReference, farRangeReference, nearRangeSecondary,\n farRangeSecondary])\n', (15598, 15682), True, 'import numpy as np\n'), ((15701, 15719), 'numpy.argsort', 'np.argsort', (['ranges'], {}), '(ranges)\n', (15711, 15719), True, 'import numpy as np\n'), ((16278, 16330), 'os.path.join', 'os.path.join', (['xmlPrefix', '"""alos2_slc/leader_file.xml"""'], {}), "(xmlPrefix, 'alos2_slc/leader_file.xml')\n", (16290, 16330), False, 'import os\n'), ((16453, 16506), 'os.path.join', 'os.path.join', (['xmlPrefix', '"""alos2_slc/scene_record.xml"""'], {}), "(xmlPrefix, 'alos2_slc/scene_record.xml')\n", (16465, 16506), False, 'import os\n'), ((16882, 16933), 'os.path.join', 'os.path.join', (['xmlPrefix', '"""alos2_slc/image_file.xml"""'], {}), "(xmlPrefix, 'alos2_slc/image_file.xml')\n", (16894, 16933), False, 'import os\n'), ((17047, 17100), 'os.path.join', 'os.path.join', (['xmlPrefix', '"""alos2_slc/image_record.xml"""'], {}), "(xmlPrefix, 'alos2_slc/image_record.xml')\n", (17059, 17100), False, 'import os\n'), ((6455, 6493), 'os.path.basename', 'os.path.basename', (['ledFilesReference[0]'], {}), '(ledFilesReference[0])\n', (6471, 6493), False, 'import os\n'), ((6540, 6578), 'os.path.basename', 'os.path.basename', (['ledFilesSecondary[0]'], {}), '(ledFilesSecondary[0])\n', (6556, 6578), False, 'import os\n'), ((1970, 2008), 'os.path.basename', 'os.path.basename', (['ledFilesReference[0]'], {}), '(ledFilesReference[0])\n', (1986, 2008), False, 'import os\n'), ((2049, 2087), 'os.path.basename', 'os.path.basename', (['ledFilesSecondary[0]'], {}), '(ledFilesSecondary[0])\n', (2065, 2087), False, 'import os\n'), ((7341, 7362), 'os.path.basename', 'os.path.basename', (['led'], {}), '(led)\n', (7357, 7362), False, 'import os\n'), ((7630, 7651), 'os.path.basename', 'os.path.basename', (['led'], {}), '(led)\n', (7646, 7651), False, 'import os\n')]
|
import sys
sys.path.extend(["./"])
import numpy as np
import torch
from clustorch.kmeans import KMeans
from src.threat.clustering.constrained_poisoning import ConstrainedAdvPoisoningGlobal
from experiments.utilities import ClusteringWrapper3Dto2D, set_seed
X = np.load("comparison/SEEDS/kme_X_org.npy")
Xadv_s = np.load("comparison/SEEDS/kme_X_adv.npy")
X = torch.from_numpy(X).unsqueeze(2)
Xadv_s = torch.from_numpy(Xadv_s).unsqueeze(2)
eps_s = Xadv_s - X
set_seed(2)
h = KMeans(n_clusters=2)
model = ClusteringWrapper3Dto2D(h)
yhat = model.fit_predict(X)
yadv_s = model.fit_predict(Xadv_s)
print("Suspicion Miss:", min((yhat != yadv_s).sum(), (1 - yhat != yadv_s).sum()))
idx_s = torch.nonzero(Xadv_s - X, as_tuple=False)[0, 0]
print("target sample", idx_s)
import pandas as pd
def comparison(T, N=20):
miss_clustered = np.zeros(N)
l2 = np.zeros(N)
linf = np.zeros(N)
l0 = np.zeros(N)
for i in range(N):
Xadv_m, leps, ts_idx, d = T.forward(X, yhat, from_to=[0, 1])
yadv_m = model.fit_predict(Xadv_m)
miss_clustered[i] = min(
(yhat != yadv_m).sum(), (yhat != (1 - yadv_m)).sum()
).item()
l2[i] = (Xadv_m - X).norm(2).item()
l0[i] = (Xadv_m - X).norm(0).item()
linf[i] = (Xadv_m - X).norm(float("inf")).item()
return miss_clustered, l0, l2, linf
set_seed(4)
constrained = ConstrainedAdvPoisoningGlobal(
delta=(Xadv_s - X).norm(float("inf")),
s=1,
clst_model=model,
lb=1,
G=20,
mutation_rate=0.01,
crossover_rate=0.85,
zero_rate=0.1,
domain_cons=[0, X.max() + 5],
objective="AMI",
mode="guided",
link="centroids",
)
miss, l0, l2, linf = comparison(constrained, N=20)
out = pd.DataFrame(
np.array([l0, l2, linf, miss]).transpose(), columns=["l0", "l2", "linf", "miss"]
)
print(
"Constrained l0:{} l2:{} linf:{} miss: {} std: {}".format(
l0.mean().item(),
l2.mean().item(),
linf.mean().item(),
miss.mean().item(),
miss.std().item(),
)
)
print(
"Constrained std l0:{} l2:{} linf:{}".format(
l0.std().item(), l2.std().item(), linf.std().item(),
)
)
print(
"suspicion: ",
"l0:",
(Xadv_s - X).norm(0).item(),
"l2: ",
(Xadv_s - X).norm(2).item(),
"linf: ",
(Xadv_s - X).norm(float("inf")).item(),
)
print("\n=================================================\n")
set_seed(4)
constrained = ConstrainedAdvPoisoningGlobal(
delta=(Xadv_s - X).norm(float("inf")) / 2,
s=1,
clst_model=model,
lb=1.0,
G=20,
mutation_rate=0.01,
crossover_rate=0.85,
zero_rate=0.1,
domain_cons=[0, X.max() + 5],
objective="AMI",
mode="guided",
link="centroids",
)
miss, l0, l2, linf = comparison(constrained, N=20)
out = pd.DataFrame(
np.array([l0, l2, linf, miss]).transpose(), columns=["l0", "l2", "linf", "miss"]
)
print(
"Constrained/2 l0:{} l2:{} linf:{} miss: {} std: {}".format(
l0.mean().item(),
l2.mean().item(),
linf.mean().item(),
miss.mean().item(),
miss.std().item(),
)
)
print(
"Constrained/2 std l0:{} l2:{} linf:{}".format(
l0.std().item(), l2.std().item(), linf.std().item(),
)
)
print(
"suspicion: ",
"l0:",
(Xadv_s - X).norm(0).item(),
"l2: ",
(Xadv_s - X).norm(2).item(),
"linf",
(Xadv_s - X).norm(float("inf")).item(),
)
|
[
"experiments.utilities.ClusteringWrapper3Dto2D",
"experiments.utilities.set_seed",
"torch.from_numpy",
"torch.nonzero",
"numpy.array",
"numpy.zeros",
"sys.path.extend",
"numpy.load",
"clustorch.kmeans.KMeans"
] |
[((12, 35), 'sys.path.extend', 'sys.path.extend', (["['./']"], {}), "(['./'])\n", (27, 35), False, 'import sys\n'), ((265, 306), 'numpy.load', 'np.load', (['"""comparison/SEEDS/kme_X_org.npy"""'], {}), "('comparison/SEEDS/kme_X_org.npy')\n", (272, 306), True, 'import numpy as np\n'), ((316, 357), 'numpy.load', 'np.load', (['"""comparison/SEEDS/kme_X_adv.npy"""'], {}), "('comparison/SEEDS/kme_X_adv.npy')\n", (323, 357), True, 'import numpy as np\n'), ((462, 473), 'experiments.utilities.set_seed', 'set_seed', (['(2)'], {}), '(2)\n', (470, 473), False, 'from experiments.utilities import ClusteringWrapper3Dto2D, set_seed\n'), ((479, 499), 'clustorch.kmeans.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (485, 499), False, 'from clustorch.kmeans import KMeans\n'), ((508, 534), 'experiments.utilities.ClusteringWrapper3Dto2D', 'ClusteringWrapper3Dto2D', (['h'], {}), '(h)\n', (531, 534), False, 'from experiments.utilities import ClusteringWrapper3Dto2D, set_seed\n'), ((1352, 1363), 'experiments.utilities.set_seed', 'set_seed', (['(4)'], {}), '(4)\n', (1360, 1363), False, 'from experiments.utilities import ClusteringWrapper3Dto2D, set_seed\n'), ((2409, 2420), 'experiments.utilities.set_seed', 'set_seed', (['(4)'], {}), '(4)\n', (2417, 2420), False, 'from experiments.utilities import ClusteringWrapper3Dto2D, set_seed\n'), ((690, 731), 'torch.nonzero', 'torch.nonzero', (['(Xadv_s - X)'], {'as_tuple': '(False)'}), '(Xadv_s - X, as_tuple=False)\n', (703, 731), False, 'import torch\n'), ((837, 848), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (845, 848), True, 'import numpy as np\n'), ((858, 869), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (866, 869), True, 'import numpy as np\n'), ((881, 892), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (889, 892), True, 'import numpy as np\n'), ((902, 913), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (910, 913), True, 'import numpy as np\n'), ((363, 382), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (379, 382), False, 'import torch\n'), ((405, 429), 'torch.from_numpy', 'torch.from_numpy', (['Xadv_s'], {}), '(Xadv_s)\n', (421, 429), False, 'import torch\n'), ((1744, 1774), 'numpy.array', 'np.array', (['[l0, l2, linf, miss]'], {}), '([l0, l2, linf, miss])\n', (1752, 1774), True, 'import numpy as np\n'), ((2807, 2837), 'numpy.array', 'np.array', (['[l0, l2, linf, miss]'], {}), '([l0, l2, linf, miss])\n', (2815, 2837), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from PIL import Image
import os
import glob
import platform
import argparse
from scipy.io import loadmat,savemat
from preprocess_img import align_img
from utils import *
from face_decoder import Face3D
from options import Option
is_windows = True
def parse_args():
desc = "Deep3DFaceReconstruction"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--pretrain_weights', type=str, default=None, help='path for pre-trained model')
parser.add_argument('--use_pb', type=int, default=1, help='validation data folder')
return parser.parse_args()
def restore_weights(sess,opt):
var_list = tf.trainable_variables()
g_list = tf.global_variables()
# add batch normalization params into trainable variables
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list +=bn_moving_vars
# create saver to save and restore weights
saver = tf.train.Saver(var_list = var_list)
saver.restore(sess,opt.pretrain_weights)
"""
Files in the input directory are arranged as
Folder1
Folder1_A0
Folder1_A1
Folder1_A2
....
Folder2
Folder2_A0
Folder2_A1
Folder2_A2
...
Folder3
Folder3_A0
Folder3_A1
Folder3_A2
...
"""
def demo():
# input and output folder
args = parse_args()
image_path = 'input'
save_path = 'output'
if not os.path.exists(save_path):
os.makedirs(save_path)
folder_list = glob.glob(image_path)
# img_list = glob.glob(image_path + '/' + '*.png')
# img_list +=glob.glob(image_path + '/' + '*.jpg')
# read BFM face model
# transfer original BFM model to our model
if not os.path.isfile('./BFM/BFM_model_front.mat'):
transferBFM09()
# read standard landmarks for preprocessing images
lm3D = load_lm3d()
n = 0
# build reconstruction model
with tf.Graph().as_default() as graph,tf.device('/cpu:0'):
opt = Option(is_train=False)
opt.batch_size = 1
opt.pretrain_weights = args.pretrain_weights
FaceReconstructor = Face3D()
images = tf.placeholder(name = 'input_imgs', shape = [opt.batch_size,224,224,3], dtype = tf.float32)
if args.use_pb and os.path.isfile('network/FaceReconModel.pb'):
print('Using pre-trained .pb file.')
graph_def = load_graph('network/FaceReconModel.pb')
tf.import_graph_def(graph_def,name='resnet',input_map={'input_imgs:0': images})
# output coefficients of R-Net (dim = 257)
coeff = graph.get_tensor_by_name('resnet/coeff:0')
else:
print('Using pre-trained .ckpt file: %s'%opt.pretrain_weights)
import networks
coeff = networks.R_Net(images,is_training=False)
# reconstructing faces
FaceReconstructor.Reconstruction_Block(coeff,opt)
face_shape = FaceReconstructor.face_shape_t
face_texture = FaceReconstructor.face_texture
face_color = FaceReconstructor.face_color
landmarks_2d = FaceReconstructor.landmark_p
recon_img = FaceReconstructor.render_imgs
tri = FaceReconstructor.facemodel.face_buf
with tf.Session() as sess:
if not args.use_pb :
restore_weights(sess,opt)
print('reconstructing...')
for folder in folder_list:
img_list = glob.glob(folder+"/*")
os.mkdir(folder.replace("input","output"))
save_path = folder.replace("input","output")
for file in img_list:
n += 1
print(n)
# load images and corresponding 5 facial landmarks
lm_file =file.replace('JPG','txt')
if(not os.path.isfile(lm_file)):
continue
img,lm = load_img(file,lm_file)
# preprocess input image
input_img,lm_new,transform_params = align_img(img,lm,lm3D)
coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})
# reshape outputs
input_img = np.squeeze(input_img)
face_shape_ = np.squeeze(face_shape_, (0))
face_texture_ = np.squeeze(face_texture_, (0))
face_color_ = np.squeeze(face_color_, (0))
landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
if not is_windows:
recon_img_ = np.squeeze(recon_img_, (0))
# save output files
# if not is_windows:
# savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
# 'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
save_obj(file.replace('.JPG','_mesh.obj'),face_shape_,tri_,np.clip(face_color_,0,255)/255) # 3D reconstruction face (in canonical view)
if __name__ == '__main__':
demo()
|
[
"numpy.clip",
"options.Option",
"networks.R_Net",
"os.path.exists",
"tensorflow.Graph",
"argparse.ArgumentParser",
"face_decoder.Face3D",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.trainable_variables",
"preprocess_img.align_img",
"glob.glob",
"tensorflow.device",
"tensorflow.global_variables",
"numpy.squeeze",
"os.path.isfile",
"tensorflow.import_graph_def",
"os.makedirs",
"tensorflow.train.Saver"
] |
[((364, 405), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (387, 405), False, 'import argparse\n'), ((676, 700), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (698, 700), True, 'import tensorflow as tf\n'), ((711, 732), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (730, 732), True, 'import tensorflow as tf\n'), ((1010, 1043), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list'}), '(var_list=var_list)\n', (1024, 1043), True, 'import tensorflow as tf\n'), ((1489, 1510), 'glob.glob', 'glob.glob', (['image_path'], {}), '(image_path)\n', (1498, 1510), False, 'import glob\n'), ((1422, 1447), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (1436, 1447), False, 'import os\n'), ((1451, 1473), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (1462, 1473), False, 'import os\n'), ((1691, 1734), 'os.path.isfile', 'os.path.isfile', (['"""./BFM/BFM_model_front.mat"""'], {}), "('./BFM/BFM_model_front.mat')\n", (1705, 1734), False, 'import os\n'), ((1904, 1923), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (1913, 1923), True, 'import tensorflow as tf\n'), ((1934, 1956), 'options.Option', 'Option', ([], {'is_train': '(False)'}), '(is_train=False)\n', (1940, 1956), False, 'from options import Option\n'), ((2047, 2055), 'face_decoder.Face3D', 'Face3D', ([], {}), '()\n', (2053, 2055), False, 'from face_decoder import Face3D\n'), ((2067, 2159), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""input_imgs"""', 'shape': '[opt.batch_size, 224, 224, 3]', 'dtype': 'tf.float32'}), "(name='input_imgs', shape=[opt.batch_size, 224, 224, 3],\n dtype=tf.float32)\n", (2081, 2159), True, 'import tensorflow as tf\n'), ((2181, 2224), 'os.path.isfile', 'os.path.isfile', (['"""network/FaceReconModel.pb"""'], {}), "('network/FaceReconModel.pb')\n", (2195, 2224), False, 'import os\n'), ((2324, 2409), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '"""resnet"""', 'input_map': "{'input_imgs:0': images}"}), "(graph_def, name='resnet', input_map={'input_imgs:0':\n images})\n", (2343, 2409), True, 'import tensorflow as tf\n'), ((2609, 2650), 'networks.R_Net', 'networks.R_Net', (['images'], {'is_training': '(False)'}), '(images, is_training=False)\n', (2623, 2650), False, 'import networks\n'), ((3010, 3022), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3020, 3022), True, 'import tensorflow as tf\n'), ((1871, 1881), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1879, 1881), True, 'import tensorflow as tf\n'), ((3162, 3186), 'glob.glob', 'glob.glob', (["(folder + '/*')"], {}), "(folder + '/*')\n", (3171, 3186), False, 'import glob\n'), ((3590, 3614), 'preprocess_img.align_img', 'align_img', (['img', 'lm', 'lm3D'], {}), '(img, lm, lm3D)\n', (3599, 3614), False, 'from preprocess_img import align_img\n'), ((3858, 3879), 'numpy.squeeze', 'np.squeeze', (['input_img'], {}), '(input_img)\n', (3868, 3879), True, 'import numpy as np\n'), ((3899, 3925), 'numpy.squeeze', 'np.squeeze', (['face_shape_', '(0)'], {}), '(face_shape_, 0)\n', (3909, 3925), True, 'import numpy as np\n'), ((3949, 3977), 'numpy.squeeze', 'np.squeeze', (['face_texture_', '(0)'], {}), '(face_texture_, 0)\n', (3959, 3977), True, 'import numpy as np\n'), ((3999, 4025), 'numpy.squeeze', 'np.squeeze', (['face_color_', '(0)'], {}), '(face_color_, 0)\n', (4009, 4025), True, 'import numpy as np\n'), ((4049, 4077), 'numpy.squeeze', 'np.squeeze', (['landmarks_2d_', '(0)'], {}), '(landmarks_2d_, 0)\n', (4059, 4077), True, 'import numpy as np\n'), ((3441, 3464), 'os.path.isfile', 'os.path.isfile', (['lm_file'], {}), '(lm_file)\n', (3455, 3464), False, 'import os\n'), ((4123, 4148), 'numpy.squeeze', 'np.squeeze', (['recon_img_', '(0)'], {}), '(recon_img_, 0)\n', (4133, 4148), True, 'import numpy as np\n'), ((4582, 4610), 'numpy.clip', 'np.clip', (['face_color_', '(0)', '(255)'], {}), '(face_color_, 0, 255)\n', (4589, 4610), True, 'import numpy as np\n')]
|
import abc
from typing import Dict, List, Optional, TypeVar
import numpy as np
from pydantic import BaseModel
from ..constants import ZEROISH, Dtype
class DatasetMetadata(BaseModel):
id: Optional[int]
name: str = ""
dose_units: str = ""
response_units: str = ""
dose_name: str = ""
response_name: str = ""
class Config:
extra = "allow"
def get_name(self):
if self.name:
return self.name
if self.id:
return f"Dataset #{self.id}"
return "BMDS output results"
class DatasetBase(abc.ABC):
# Abstract parent-class for dataset-types.
dtype: Dtype
metadata: DatasetMetadata
DEFAULT_XLABEL = "Dose"
DEFAULT_YLABEL = "Response"
@abc.abstractmethod
def _validate(self):
...
@abc.abstractmethod
def as_dfile(self):
...
@abc.abstractmethod
def plot(self):
...
@abc.abstractmethod
def drop_dose(self):
...
@property
def num_dose_groups(self):
return len(set(self.doses))
def to_dict(self):
return self.serialize().dict()
@property
def dose_linspace(self) -> np.ndarray:
if not hasattr(self, "_dose_linspace"):
self._dose_linspace = np.linspace(np.min(self.doses), np.max(self.doses), 100)
self._dose_linspace[self._dose_linspace == 0] = ZEROISH
return self._dose_linspace
def _get_dose_units_text(self) -> str:
if self.metadata.dose_units:
return f" ({self.metadata.dose_units})"
return ""
def _get_response_units_text(self) -> str:
if self.metadata.response_units:
return f" ({self.metadata.response_units})"
return ""
def _get_dataset_name(self) -> str:
return self.metadata.get_name()
def get_xlabel(self):
label = self.DEFAULT_XLABEL
if self.metadata.dose_name:
label = self.metadata.dose_name
if self.metadata.dose_units:
label += f" ({self.metadata.dose_units})"
return label
def get_ylabel(self):
label = self.DEFAULT_YLABEL
if self.metadata.response_name:
label = self.metadata.response_name
if self.metadata.response_units:
label += f" ({self.metadata.response_units})"
return label
@abc.abstractmethod
def serialize(self) -> "DatasetSchemaBase":
...
def update_record(self, d: dict) -> None:
"""Update data record for a tabular-friendly export"""
d.update(
dataset_name=self.metadata.name,
dataset_dose_name=self.metadata.dose_name,
dataset_dose_units=self.metadata.dose_units,
dataset_response_name=self.metadata.response_name,
dataset_response_units=self.metadata.response_units,
)
DatasetType = TypeVar("DatasetType", bound=DatasetBase)
class DatasetSchemaBase(BaseModel, abc.ABC):
@classmethod
def get_subclass(cls, dtype: Dtype) -> BaseModel:
from .continuous import ContinuousDatasetSchema, ContinuousIndividualDatasetSchema
from .dichotomous import DichotomousCancerDatasetSchema, DichotomousDatasetSchema
_dataset_schema_map: Dict = {
Dtype.CONTINUOUS: ContinuousDatasetSchema,
Dtype.CONTINUOUS_INDIVIDUAL: ContinuousIndividualDatasetSchema,
Dtype.DICHOTOMOUS: DichotomousDatasetSchema,
Dtype.DICHOTOMOUS_CANCER: DichotomousCancerDatasetSchema,
}
try:
return _dataset_schema_map[dtype]
except KeyError:
raise ValueError(f"Unknown dtype: {dtype}")
@abc.abstractmethod
def deserialize(self) -> DatasetBase:
...
class DatasetPlottingSchema(BaseModel):
mean: Optional[List[float]]
ll: List[float]
ul: List[float]
|
[
"numpy.max",
"numpy.min",
"typing.TypeVar"
] |
[((2869, 2910), 'typing.TypeVar', 'TypeVar', (['"""DatasetType"""'], {'bound': 'DatasetBase'}), "('DatasetType', bound=DatasetBase)\n", (2876, 2910), False, 'from typing import Dict, List, Optional, TypeVar\n'), ((1276, 1294), 'numpy.min', 'np.min', (['self.doses'], {}), '(self.doses)\n', (1282, 1294), True, 'import numpy as np\n'), ((1296, 1314), 'numpy.max', 'np.max', (['self.doses'], {}), '(self.doses)\n', (1302, 1314), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.