code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
from molsysmt import puw
from ..exceptions import *
def digest_box(box):
return box
def digest_box_lengths_value(box_lengths):
output = None
if type(box_lengths) is not np.ndarray:
box_lengths = np.array(box_lengths)
shape = box_lengths.shape
if len(shape)==1:
if shape[0]==3:
output = np.expand_dims(box_lengths, axis=0)
else:
raise ValueError('box_lengths array with has not the correct shape.')
elif len(shape)==2:
if shape[1]==3:
output = box_lengths
else:
raise ValueError('box_lengths array with has not the correct shape.')
else:
raise ValueError('box_lengths array with has not the correct shape.')
return output
def digest_box_lengths(box_lengths):
output = None
unit = puw.get_unit(box_lengths)
box_lengths_value = puw.get_value(box_lengths)
box_lengths_value = digest_box_lengths_value(box_lengths_value)
output = box_lengths_value*unit
return output
def digest_box_angles_value(box_angles):
output = None
if type(box_angles) is not np.ndarray:
box_angles = np.array(box_angles)
shape = box_angles.shape
if len(shape)==1:
if shape[0]==3:
output = np.expand_dims(box_angles, axis=0)
else:
raise ValueError('box_angles array with has not the correct shape.')
elif len(shape)==2:
if shape[1]==3:
output = box_angles
else:
raise ValueError('box_angles array with has not the correct shape.')
else:
raise ValueError('box_angles array with has not the correct shape.')
return output
def digest_box_angles(box_angles):
output = None
unit = puw.get_unit(box_angles)
box_angles_value = puw.get_value(box_angles)
box_angles_value = digest_box_angles_value(box_angles_value)
output = box_angles_value*unit
return output
|
[
"numpy.array",
"numpy.expand_dims",
"molsysmt.puw.get_value",
"molsysmt.puw.get_unit"
] |
[((844, 869), 'molsysmt.puw.get_unit', 'puw.get_unit', (['box_lengths'], {}), '(box_lengths)\n', (856, 869), False, 'from molsysmt import puw\n'), ((894, 920), 'molsysmt.puw.get_value', 'puw.get_value', (['box_lengths'], {}), '(box_lengths)\n', (907, 920), False, 'from molsysmt import puw\n'), ((1766, 1790), 'molsysmt.puw.get_unit', 'puw.get_unit', (['box_angles'], {}), '(box_angles)\n', (1778, 1790), False, 'from molsysmt import puw\n'), ((1814, 1839), 'molsysmt.puw.get_value', 'puw.get_value', (['box_angles'], {}), '(box_angles)\n', (1827, 1839), False, 'from molsysmt import puw\n'), ((239, 260), 'numpy.array', 'np.array', (['box_lengths'], {}), '(box_lengths)\n', (247, 260), True, 'import numpy as np\n'), ((1170, 1190), 'numpy.array', 'np.array', (['box_angles'], {}), '(box_angles)\n', (1178, 1190), True, 'import numpy as np\n'), ((360, 395), 'numpy.expand_dims', 'np.expand_dims', (['box_lengths'], {'axis': '(0)'}), '(box_lengths, axis=0)\n', (374, 395), True, 'import numpy as np\n'), ((1289, 1323), 'numpy.expand_dims', 'np.expand_dims', (['box_angles'], {'axis': '(0)'}), '(box_angles, axis=0)\n', (1303, 1323), True, 'import numpy as np\n')]
|
"""Package for loading and running the nuclei and cell segmentation models programmaticly."""
import os
import sys
import cv2
import imageio
import numpy as np
import torch
import torch.nn
import torch.nn.functional as F
from skimage import transform, util
from hpacellseg.constants import (MULTI_CHANNEL_CELL_MODEL_URL,
NUCLEI_MODEL_URL, TWO_CHANNEL_CELL_MODEL_URL)
from hpacellseg.utils import download_with_url
NORMALIZE = {"mean": [124 / 255, 117 / 255, 104 / 255], "std": [1 / (0.0167 * 255)] * 3}
class CellSegmentator(object):
"""Uses pretrained DPN-Unet models to segment cells from images."""
def __init__(
self,
nuclei_model="./nuclei_model.pth",
cell_model="./cell_model.pth",
model_width_height=None,
device="cuda",
multi_channel_model=True,
return_without_scale_restore=False,
scale_factor=0.25,
padding=False
):
if device != "cuda" and device != "cpu" and "cuda" not in device:
raise ValueError(f"{device} is not a valid device (cuda/cpu)")
if device != "cpu":
try:
assert torch.cuda.is_available()
except AssertionError:
print("No GPU found, using CPU.", file=sys.stderr)
device = "cpu"
self.device = device
if isinstance(nuclei_model, str):
if not os.path.exists(nuclei_model):
print(
f"Could not find {nuclei_model}. Downloading it now",
file=sys.stderr,
)
download_with_url(NUCLEI_MODEL_URL, nuclei_model)
nuclei_model = torch.load(
nuclei_model, map_location=torch.device(self.device)
)
if isinstance(nuclei_model, torch.nn.DataParallel) and device == "cpu":
nuclei_model = nuclei_model.module
self.nuclei_model = nuclei_model.to(self.device)
self.multi_channel_model = multi_channel_model
if isinstance(cell_model, str):
if not os.path.exists(cell_model):
print(
f"Could not find {cell_model}. Downloading it now", file=sys.stderr
)
if self.multi_channel_model:
download_with_url(MULTI_CHANNEL_CELL_MODEL_URL, cell_model)
else:
download_with_url(TWO_CHANNEL_CELL_MODEL_URL, cell_model)
cell_model = torch.load(cell_model, map_location=torch.device(self.device))
self.cell_model = cell_model.to(self.device)
self.model_width_height = model_width_height
self.return_without_scale_restore = return_without_scale_restore
self.scale_factor = scale_factor
self.padding = padding
def _image_conversion(self, images):
microtubule_imgs, er_imgs, nuclei_imgs = images
if self.multi_channel_model:
if not isinstance(er_imgs, list):
raise ValueError("Please speicify the image path(s) for er channels!")
else:
if not er_imgs is None:
raise ValueError(
"second channel should be None for two channel model predition!"
)
if not isinstance(microtubule_imgs, list):
raise ValueError("The microtubule images should be a list")
if not isinstance(nuclei_imgs, list):
raise ValueError("The microtubule images should be a list")
if er_imgs:
if not len(microtubule_imgs) == len(er_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
else:
if not len(microtubule_imgs) == len(nuclei_imgs):
raise ValueError("The lists of images needs to be the same length")
if not all(isinstance(item, np.ndarray) for item in microtubule_imgs):
microtubule_imgs = [
os.path.expanduser(item) for _, item in enumerate(microtubule_imgs)
]
nuclei_imgs = [
os.path.expanduser(item) for _, item in enumerate(nuclei_imgs)
]
microtubule_imgs = list(
map(lambda item: imageio.imread(item), microtubule_imgs)
)
nuclei_imgs = list(map(lambda item: imageio.imread(item), nuclei_imgs))
if er_imgs:
er_imgs = [os.path.expanduser(item) for _, item in enumerate(er_imgs)]
er_imgs = list(map(lambda item: imageio.imread(item), er_imgs))
if not er_imgs:
er_imgs = [
np.zeros(item.shape, dtype=item.dtype)
for _, item in enumerate(microtubule_imgs)
]
cell_imgs = list(
map(
lambda item: np.dstack((item[0], item[1], item[2])),
list(zip(microtubule_imgs, er_imgs, nuclei_imgs)),
)
)
return cell_imgs
def _pad(self, image):
rows, cols = image.shape[:2]
self.scaled_shape = rows, cols
img_pad= cv2.copyMakeBorder(
image,
32,
(32 - rows % 32),
32,
(32 - cols % 32),
cv2.BORDER_REFLECT,
)
return img_pad
def pred_nuclei(self, images):
def _preprocess(images):
if isinstance(images[0], str):
raise NotImplementedError('Currently the model requires images as numpy arrays, not paths.')
# images = [imageio.imread(image_path) for image_path in images]
self.target_shapes = [image.shape for image in images]
#print(images.shape)
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = [transform.rescale(image, self.scale_factor) for image in images]
if self.padding:
images = [self._pad(image) for image in images]
nuc_images = np.array([np.dstack((image[..., 2], image[..., 2], image[..., 2])) if len(image.shape) >= 3
else np.dstack((image, image, image)) for image in images])
nuc_images = nuc_images.transpose([0, 3, 1, 2])
#print("nuc", nuc_images.shape)
return nuc_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.nuclei_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
#dont restore scaling, just save and scale later ...
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
def _restore_scaling(self, n_prediction, target_shape):
"""Restore an image from scaling and padding.
This method is intended for internal use.
It takes the output from the nuclei model as input.
"""
n_prediction = n_prediction.transpose([1, 2, 0])
if self.padding:
n_prediction = n_prediction[
32 : 32 + self.scaled_shape[0], 32 : 32 + self.scaled_shape[1], ...
]
n_prediction[..., 0] = 0
if not self.return_without_scale_restore:
n_prediction = cv2.resize(
n_prediction,
(target_shape[0], target_shape[1]),
#try INTER_NEAREST_EXACT
interpolation=cv2.INTER_NEAREST_EXACT,
)
return n_prediction
def pred_cells(self, images, precombined=False):
def _preprocess(images):
self.target_shapes = [image.shape for image in images]
for image in images:
if not len(image.shape) == 3:
raise ValueError("image should has 3 channels")
#resize like in original implementation with https://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.resize
if self.model_width_height:
images = np.array([transform.resize(image, (self.model_width_height,self.model_width_height))
for image in images])
else:
images = np.array([transform.rescale(image, self.scale_factor, multichannel=True) for image in images])
if self.padding:
images = np.array([self._pad(image) for image in images])
cell_images = images.transpose([0, 3, 1, 2])
return cell_images
def _segment_helper(imgs):
with torch.no_grad():
mean = torch.as_tensor(NORMALIZE["mean"], device=self.device)
std = torch.as_tensor(NORMALIZE["std"], device=self.device)
imgs = torch.tensor(imgs).float()
imgs = imgs.to(self.device)
imgs = imgs.sub_(mean[:, None, None]).div_(std[:, None, None])
imgs = self.cell_model(imgs)
imgs = F.softmax(imgs, dim=1)
return imgs
if not precombined:
images = self._image_conversion(images)
preprocessed_imgs = _preprocess(images)
predictions = _segment_helper(preprocessed_imgs)
predictions = predictions.to("cpu").numpy()
predictions = [self._restore_scaling(util.img_as_ubyte(pred), target_shape)
for pred, target_shape in zip(predictions, self.target_shapes)]
return predictions
|
[
"numpy.dstack",
"skimage.transform.rescale",
"skimage.util.img_as_ubyte",
"imageio.imread",
"os.path.exists",
"cv2.copyMakeBorder",
"numpy.zeros",
"torch.nn.functional.softmax",
"torch.cuda.is_available",
"skimage.transform.resize",
"torch.device",
"torch.as_tensor",
"hpacellseg.utils.download_with_url",
"torch.tensor",
"torch.no_grad",
"os.path.expanduser",
"cv2.resize"
] |
[((5151, 5241), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(32)', '(32 - rows % 32)', '(32)', '(32 - cols % 32)', 'cv2.BORDER_REFLECT'], {}), '(image, 32, 32 - rows % 32, 32, 32 - cols % 32, cv2.\n BORDER_REFLECT)\n', (5169, 5241), False, 'import cv2\n'), ((8231, 8335), 'cv2.resize', 'cv2.resize', (['n_prediction', '(target_shape[0], target_shape[1])'], {'interpolation': 'cv2.INTER_NEAREST_EXACT'}), '(n_prediction, (target_shape[0], target_shape[1]), interpolation=\n cv2.INTER_NEAREST_EXACT)\n', (8241, 8335), False, 'import cv2\n'), ((1204, 1229), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1227, 1229), False, 'import torch\n'), ((1454, 1482), 'os.path.exists', 'os.path.exists', (['nuclei_model'], {}), '(nuclei_model)\n', (1468, 1482), False, 'import os\n'), ((1652, 1701), 'hpacellseg.utils.download_with_url', 'download_with_url', (['NUCLEI_MODEL_URL', 'nuclei_model'], {}), '(NUCLEI_MODEL_URL, nuclei_model)\n', (1669, 1701), False, 'from hpacellseg.utils import download_with_url\n'), ((2124, 2150), 'os.path.exists', 'os.path.exists', (['cell_model'], {}), '(cell_model)\n', (2138, 2150), False, 'import os\n'), ((4015, 4039), 'os.path.expanduser', 'os.path.expanduser', (['item'], {}), '(item)\n', (4033, 4039), False, 'import os\n'), ((4141, 4165), 'os.path.expanduser', 'os.path.expanduser', (['item'], {}), '(item)\n', (4159, 4165), False, 'import os\n'), ((4683, 4721), 'numpy.zeros', 'np.zeros', (['item.shape'], {'dtype': 'item.dtype'}), '(item.shape, dtype=item.dtype)\n', (4691, 4721), True, 'import numpy as np\n'), ((6782, 6797), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6795, 6797), False, 'import torch\n'), ((6822, 6876), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['mean']"], {'device': 'self.device'}), "(NORMALIZE['mean'], device=self.device)\n", (6837, 6876), False, 'import torch\n'), ((6899, 6952), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['std']"], {'device': 'self.device'}), "(NORMALIZE['std'], device=self.device)\n", (6914, 6952), False, 'import torch\n'), ((7197, 7219), 'torch.nn.functional.softmax', 'F.softmax', (['imgs'], {'dim': '(1)'}), '(imgs, dim=1)\n', (7206, 7219), True, 'import torch.nn.functional as F\n'), ((7512, 7535), 'skimage.util.img_as_ubyte', 'util.img_as_ubyte', (['pred'], {}), '(pred)\n', (7529, 7535), False, 'from skimage import transform, util\n'), ((9498, 9513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9511, 9513), False, 'import torch\n'), ((9538, 9592), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['mean']"], {'device': 'self.device'}), "(NORMALIZE['mean'], device=self.device)\n", (9553, 9592), False, 'import torch\n'), ((9615, 9668), 'torch.as_tensor', 'torch.as_tensor', (["NORMALIZE['std']"], {'device': 'self.device'}), "(NORMALIZE['std'], device=self.device)\n", (9630, 9668), False, 'import torch\n'), ((9910, 9932), 'torch.nn.functional.softmax', 'F.softmax', (['imgs'], {'dim': '(1)'}), '(imgs, dim=1)\n', (9919, 9932), True, 'import torch.nn.functional as F\n'), ((10244, 10267), 'skimage.util.img_as_ubyte', 'util.img_as_ubyte', (['pred'], {}), '(pred)\n', (10261, 10267), False, 'from skimage import transform, util\n'), ((1784, 1809), 'torch.device', 'torch.device', (['self.device'], {}), '(self.device)\n', (1796, 1809), False, 'import torch\n'), ((2346, 2405), 'hpacellseg.utils.download_with_url', 'download_with_url', (['MULTI_CHANNEL_CELL_MODEL_URL', 'cell_model'], {}), '(MULTI_CHANNEL_CELL_MODEL_URL, cell_model)\n', (2363, 2405), False, 'from hpacellseg.utils import download_with_url\n'), ((2448, 2505), 'hpacellseg.utils.download_with_url', 'download_with_url', (['TWO_CHANNEL_CELL_MODEL_URL', 'cell_model'], {}), '(TWO_CHANNEL_CELL_MODEL_URL, cell_model)\n', (2465, 2505), False, 'from hpacellseg.utils import download_with_url\n'), ((2567, 2592), 'torch.device', 'torch.device', (['self.device'], {}), '(self.device)\n', (2579, 2592), False, 'import torch\n'), ((4478, 4502), 'os.path.expanduser', 'os.path.expanduser', (['item'], {}), '(item)\n', (4496, 4502), False, 'import os\n'), ((4867, 4905), 'numpy.dstack', 'np.dstack', (['(item[0], item[1], item[2])'], {}), '((item[0], item[1], item[2]))\n', (4876, 4905), True, 'import numpy as np\n'), ((6211, 6254), 'skimage.transform.rescale', 'transform.rescale', (['image', 'self.scale_factor'], {}), '(image, self.scale_factor)\n', (6228, 6254), False, 'from skimage import transform, util\n'), ((4289, 4309), 'imageio.imread', 'imageio.imread', (['item'], {}), '(item)\n', (4303, 4309), False, 'import imageio\n'), ((4391, 4411), 'imageio.imread', 'imageio.imread', (['item'], {}), '(item)\n', (4405, 4411), False, 'import imageio\n'), ((6035, 6110), 'skimage.transform.resize', 'transform.resize', (['image', '(self.model_width_height, self.model_width_height)'], {}), '(image, (self.model_width_height, self.model_width_height))\n', (6051, 6110), False, 'from skimage import transform, util\n'), ((6404, 6460), 'numpy.dstack', 'np.dstack', (['(image[..., 2], image[..., 2], image[..., 2])'], {}), '((image[..., 2], image[..., 2], image[..., 2]))\n', (6413, 6460), True, 'import numpy as np\n'), ((6526, 6558), 'numpy.dstack', 'np.dstack', (['(image, image, image)'], {}), '((image, image, image))\n', (6535, 6558), True, 'import numpy as np\n'), ((6976, 6994), 'torch.tensor', 'torch.tensor', (['imgs'], {}), '(imgs)\n', (6988, 6994), False, 'import torch\n'), ((8983, 9058), 'skimage.transform.resize', 'transform.resize', (['image', '(self.model_width_height, self.model_width_height)'], {}), '(image, (self.model_width_height, self.model_width_height))\n', (8999, 9058), False, 'from skimage import transform, util\n'), ((9168, 9230), 'skimage.transform.rescale', 'transform.rescale', (['image', 'self.scale_factor'], {'multichannel': '(True)'}), '(image, self.scale_factor, multichannel=True)\n', (9185, 9230), False, 'from skimage import transform, util\n'), ((9692, 9710), 'torch.tensor', 'torch.tensor', (['imgs'], {}), '(imgs)\n', (9704, 9710), False, 'import torch\n'), ((4586, 4606), 'imageio.imread', 'imageio.imread', (['item'], {}), '(item)\n', (4600, 4606), False, 'import imageio\n')]
|
import sys
sys.path.append("..")
import os
here = os.path.dirname(os.path.realpath(__file__))
import pickle
import tempfile
import numpy as np
import pyrfr.regression
data_set_prefix = '%(here)s/../test_data_sets/diabetes_' % {"here":here}
features = np.loadtxt(data_set_prefix+'features.csv', delimiter=",")
responses = np.loadtxt(data_set_prefix+'responses.csv', delimiter=",")
data = pyrfr.regression.default_data_container(10)
data.import_csv_files(data_set_prefix+'features.csv', data_set_prefix+'responses.csv')
# create an instance of a regerssion forest using binary splits and the RSS loss
the_forest = pyrfr.regression.binary_rss_forest()
#reset to reseed the rng for the next fit
rng = pyrfr.regression.default_random_engine(42)
# create an instance of a regerssion forest using binary splits and the RSS loss
the_forest = pyrfr.regression.binary_rss_forest()
the_forest.options.num_trees = 16
# the forest's parameters
the_forest.options.compute_oob_error = True
the_forest.options.do_bootstrapping=True # default: false
the_forest.options.num_data_points_per_tree=(data.num_data_points()//4)* 3 # means same number as data points
the_forest.options.tree_opts.max_features = data.num_features()//2 # 0 would mean all the features
the_forest.options.tree_opts.min_samples_to_split = 0 # 0 means split until pure
the_forest.options.tree_opts.min_samples_in_leaf = 0 # 0 means no restriction
the_forest.options.tree_opts.max_depth=1024 # 0 means no restriction
the_forest.options.tree_opts.epsilon_purity = 1e-8 # when checking for purity, the data points can differ by this epsilon
the_forest.fit(data, rng)
predictions_1 = [ the_forest.predict(f.tolist()) for f in features]
with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as f:
fname = f.name
pickle.dump(the_forest, f)
with open(fname, 'r+b') as fh:
a_second_forest = pickle.load(fh)
os.remove(fname)
predictions_2 = [ a_second_forest.predict(f.tolist()) for f in features]
if (np.allclose(predictions_1, predictions_2)):
print("successfully pickled/unpickled the forest")
else:
print("something went wrong")
|
[
"sys.path.append",
"tempfile.NamedTemporaryFile",
"os.remove",
"pickle.dump",
"os.path.realpath",
"numpy.allclose",
"pickle.load",
"numpy.loadtxt"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((258, 317), 'numpy.loadtxt', 'np.loadtxt', (["(data_set_prefix + 'features.csv')"], {'delimiter': '""","""'}), "(data_set_prefix + 'features.csv', delimiter=',')\n", (268, 317), True, 'import numpy as np\n'), ((329, 389), 'numpy.loadtxt', 'np.loadtxt', (["(data_set_prefix + 'responses.csv')"], {'delimiter': '""","""'}), "(data_set_prefix + 'responses.csv', delimiter=',')\n", (339, 389), True, 'import numpy as np\n'), ((1884, 1900), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (1893, 1900), False, 'import os\n'), ((1982, 2023), 'numpy.allclose', 'np.allclose', (['predictions_1', 'predictions_2'], {}), '(predictions_1, predictions_2)\n', (1993, 2023), True, 'import numpy as np\n'), ((66, 92), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (82, 92), False, 'import os\n'), ((1712, 1765), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+b"""', 'delete': '(False)'}), "(mode='w+b', delete=False)\n", (1739, 1765), False, 'import tempfile\n'), ((1789, 1815), 'pickle.dump', 'pickle.dump', (['the_forest', 'f'], {}), '(the_forest, f)\n', (1800, 1815), False, 'import pickle\n'), ((1868, 1883), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (1879, 1883), False, 'import pickle\n')]
|
import numpy as np
import os
import torch
import copy
from math import cos, sqrt, pi
def dct(x, y, v, u, n):
# Normalisation
def alpha(a):
if a == 0:
return sqrt(1.0 / n)
else:
return sqrt(2.0 / n)
return alpha(u) * alpha(v) * cos(((2 * x + 1) * (u * pi)) / (2 * n)) * cos(((2 * y + 1) * (v * pi)) / (2 * n))
def generate_2d_dct_basis(root_path, image_height, sub_dim=75):
path = "{}/attacked_images/GeoDA/2d_dct_basis_height_{}_subdim_{}.npy".format(root_path, image_height, sub_dim)
os.makedirs(os.path.dirname(path),exist_ok=True)
if os.path.exists(path):
return np.load(path)
n = image_height # Assume square image, so we don't have different xres and yres
# We can get different frequencies by setting u and v
# Here, we have a max u and v to loop over and display
# Feel free to adjust
maxU = sub_dim
maxV = sub_dim
dct_basis = []
for u in range(0, maxU):
for v in range(0, maxV):
basisImg = np.zeros((n, n))
for y in range(0, n):
for x in range(0, n):
basisImg[y, x] = dct(x, y, v, u, max(n, maxV))
dct_basis.append(basisImg)
dct_basis = np.mat(np.reshape(dct_basis, (maxV*maxU, n*n))).transpose()
np.save(path, dct_basis)
return dct_basis
def clip_image_values(x, minv, maxv):
if not isinstance(minv, torch.Tensor):
return torch.clamp(x,min=minv,max=maxv)
return torch.min(torch.max(x, minv), maxv)
def valid_bounds(img, delta=255):
im = copy.deepcopy(np.asarray(img))
im = im.astype(np.int)
# General valid bounds [0, 255]
valid_lb = np.zeros_like(im)
valid_ub = np.full_like(im, 255)
# Compute the bounds
lb = im - delta
ub = im + delta
# Validate that the bounds are in [0, 255]
lb = np.maximum(valid_lb, np.minimum(lb, im))
ub = np.minimum(valid_ub, np.maximum(ub, im))
# Change types to uint8
lb = lb.astype(np.uint8)
ub = ub.astype(np.uint8)
return lb, ub
def inv_tf(x, mean, std):
for i in range(len(mean)):
x[i] = np.multiply(x[i], std[i], dtype=np.float32)
x[i] = np.add(x[i], mean[i], dtype=np.float32)
x = np.swapaxes(x, 0, 2)
x = np.swapaxes(x, 0, 1)
return x
def inv_tf_pert(r):
pert = np.sum(np.absolute(r), axis=0)
pert[pert != 0] = 1
return pert
def get_label(x):
s = x.split(' ')
label = ''
for l in range(1, len(s)):
label += s[l] + ' '
return label
def nnz_pixels(arr):
return np.count_nonzero(np.sum(np.absolute(arr), axis=0))
|
[
"numpy.absolute",
"numpy.load",
"numpy.maximum",
"numpy.full_like",
"numpy.zeros_like",
"numpy.multiply",
"os.path.dirname",
"os.path.exists",
"numpy.swapaxes",
"math.cos",
"numpy.reshape",
"numpy.add",
"numpy.save",
"numpy.minimum",
"math.sqrt",
"numpy.asarray",
"torch.clamp",
"torch.max",
"numpy.zeros"
] |
[((606, 626), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (620, 626), False, 'import os\n'), ((1305, 1329), 'numpy.save', 'np.save', (['path', 'dct_basis'], {}), '(path, dct_basis)\n', (1312, 1329), True, 'import numpy as np\n'), ((1685, 1702), 'numpy.zeros_like', 'np.zeros_like', (['im'], {}), '(im)\n', (1698, 1702), True, 'import numpy as np\n'), ((1718, 1739), 'numpy.full_like', 'np.full_like', (['im', '(255)'], {}), '(im, 255)\n', (1730, 1739), True, 'import numpy as np\n'), ((2244, 2264), 'numpy.swapaxes', 'np.swapaxes', (['x', '(0)', '(2)'], {}), '(x, 0, 2)\n', (2255, 2264), True, 'import numpy as np\n'), ((2273, 2293), 'numpy.swapaxes', 'np.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (2284, 2293), True, 'import numpy as np\n'), ((324, 361), 'math.cos', 'cos', (['((2 * y + 1) * (v * pi) / (2 * n))'], {}), '((2 * y + 1) * (v * pi) / (2 * n))\n', (327, 361), False, 'from math import cos, sqrt, pi\n'), ((562, 583), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (577, 583), False, 'import os\n'), ((643, 656), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (650, 656), True, 'import numpy as np\n'), ((1449, 1483), 'torch.clamp', 'torch.clamp', (['x'], {'min': 'minv', 'max': 'maxv'}), '(x, min=minv, max=maxv)\n', (1460, 1483), False, 'import torch\n'), ((1503, 1521), 'torch.max', 'torch.max', (['x', 'minv'], {}), '(x, minv)\n', (1512, 1521), False, 'import torch\n'), ((1589, 1604), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1599, 1604), True, 'import numpy as np\n'), ((1884, 1902), 'numpy.minimum', 'np.minimum', (['lb', 'im'], {}), '(lb, im)\n', (1894, 1902), True, 'import numpy as np\n'), ((1934, 1952), 'numpy.maximum', 'np.maximum', (['ub', 'im'], {}), '(ub, im)\n', (1944, 1952), True, 'import numpy as np\n'), ((2136, 2179), 'numpy.multiply', 'np.multiply', (['x[i]', 'std[i]'], {'dtype': 'np.float32'}), '(x[i], std[i], dtype=np.float32)\n', (2147, 2179), True, 'import numpy as np\n'), ((2195, 2234), 'numpy.add', 'np.add', (['x[i]', 'mean[i]'], {'dtype': 'np.float32'}), '(x[i], mean[i], dtype=np.float32)\n', (2201, 2234), True, 'import numpy as np\n'), ((2349, 2363), 'numpy.absolute', 'np.absolute', (['r'], {}), '(r)\n', (2360, 2363), True, 'import numpy as np\n'), ((187, 200), 'math.sqrt', 'sqrt', (['(1.0 / n)'], {}), '(1.0 / n)\n', (191, 200), False, 'from math import cos, sqrt, pi\n'), ((234, 247), 'math.sqrt', 'sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (238, 247), False, 'from math import cos, sqrt, pi\n'), ((282, 319), 'math.cos', 'cos', (['((2 * x + 1) * (u * pi) / (2 * n))'], {}), '((2 * x + 1) * (u * pi) / (2 * n))\n', (285, 319), False, 'from math import cos, sqrt, pi\n'), ((1030, 1046), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1038, 1046), True, 'import numpy as np\n'), ((2605, 2621), 'numpy.absolute', 'np.absolute', (['arr'], {}), '(arr)\n', (2616, 2621), True, 'import numpy as np\n'), ((1248, 1291), 'numpy.reshape', 'np.reshape', (['dct_basis', '(maxV * maxU, n * n)'], {}), '(dct_basis, (maxV * maxU, n * n))\n', (1258, 1291), True, 'import numpy as np\n')]
|
import math
from typing import Any, Dict, Tuple
import attr
from attr import attrib, attrs
import numpy as np
from nlisim.cell import CellData, CellFields, CellList
from nlisim.coordinates import Point, Voxel
from nlisim.grid import RectangularGrid
from nlisim.modules.phagocyte import (
PhagocyteCellData,
PhagocyteModel,
PhagocyteModuleState,
PhagocyteStatus,
)
from nlisim.random import rg
from nlisim.state import State
from nlisim.util import TissueType, activation_function
class PneumocyteCellData(PhagocyteCellData):
PNEUMOCYTE_FIELDS: CellFields = [
('status', np.uint8),
('status_iteration', np.uint),
('tnfa', bool),
]
dtype = np.dtype(
CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS + PNEUMOCYTE_FIELDS, align=True
) # type: ignore
@classmethod
def create_cell_tuple(
cls,
**kwargs,
) -> Tuple:
initializer = {
'status': kwargs.get('status', PhagocyteStatus.RESTING),
'status_iteration': kwargs.get('status_iteration', 0),
'tnfa': kwargs.get('tnfa', False),
}
# ensure that these come in the correct order
return PhagocyteCellData.create_cell_tuple(**kwargs) + tuple(
[initializer[key] for key, *_ in PneumocyteCellData.PNEUMOCYTE_FIELDS]
)
@attrs(kw_only=True, frozen=True, repr=False)
class PneumocyteCellList(CellList):
CellDataClass = PneumocyteCellData
def cell_list_factory(self: 'PneumocyteState') -> PneumocyteCellList:
return PneumocyteCellList(grid=self.global_state.grid)
@attrs(kw_only=True)
class PneumocyteState(PhagocyteModuleState):
cells: PneumocyteCellList = attrib(default=attr.Factory(cell_list_factory, takes_self=True))
max_conidia: int # units: conidia
time_to_rest: float # units: hours
iter_to_rest: int # units: steps
time_to_change_state: float # units: hours
iter_to_change_state: int # units: steps
# p_il6_qtty: float # units: mol * cell^-1 * h^-1
# p_il8_qtty: float # units: mol * cell^-1 * h^-1
p_tnf_qtty: float # units: atto-mol * cell^-1 * h^-1
pr_p_int: float # units: probability
pr_p_int_param: float
class Pneumocyte(PhagocyteModel):
name = 'pneumocyte'
StateClass = PneumocyteState
def initialize(self, state: State):
pneumocyte: PneumocyteState = state.pneumocyte
voxel_volume: float = state.voxel_volume
time_step_size: float = self.time_step
lung_tissue: np.ndarray = state.lung_tissue
pneumocyte.max_conidia = self.config.getint('max_conidia') # units: conidia
pneumocyte.time_to_rest = self.config.getint('time_to_rest') # units: hours
pneumocyte.time_to_change_state = self.config.getint('time_to_change_state') # units: hours
pneumocyte.p_tnf_qtty = self.config.getfloat(
'p_tnf_qtty'
) # units: atto-mol * cell^-1 * h^-1
pneumocyte.pr_p_int_param = self.config.getfloat('pr_p_int_param')
# computed values
pneumocyte.iter_to_rest = int(
pneumocyte.time_to_rest * (60 / self.time_step)
) # units: hours * (min/hour) / (min/step) = step
pneumocyte.iter_to_change_state = int(
pneumocyte.time_to_change_state * (60 / self.time_step)
) # units: hours * (min/hour) / (min/step) = step
pneumocyte.pr_p_int = -math.expm1(
-time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param)
) # units: probability
# initialize cells, placing one per epithelial voxel
dz_field: np.ndarray = state.grid.delta(axis=0)
dy_field: np.ndarray = state.grid.delta(axis=1)
dx_field: np.ndarray = state.grid.delta(axis=2)
epithelial_voxels = list(zip(*np.where(lung_tissue == TissueType.EPITHELIUM)))
rg.shuffle(epithelial_voxels)
for vox_z, vox_y, vox_x in epithelial_voxels[: self.config.getint('count')]:
# the x,y,z coordinates are in the centers of the grids
z = state.grid.z[vox_z]
y = state.grid.y[vox_y]
x = state.grid.x[vox_x]
dz = dz_field[vox_z, vox_y, vox_x]
dy = dy_field[vox_z, vox_y, vox_x]
dx = dx_field[vox_z, vox_y, vox_x]
pneumocyte.cells.append(
PneumocyteCellData.create_cell(
point=Point(
x=x + rg.uniform(-dx / 2, dx / 2),
y=y + rg.uniform(-dy / 2, dy / 2),
z=z + rg.uniform(-dz / 2, dz / 2),
)
)
)
return state
def single_step_probabilistic_drift(
self, state: State, cell: PhagocyteCellData, voxel: Voxel
) -> Point:
# pneumocytes do not move
pass
def advance(self, state: State, previous_time: float):
"""Advance the state by a single time step."""
from nlisim.modules.afumigatus import (
AfumigatusCellData,
AfumigatusCellStatus,
AfumigatusState,
)
# from nlisim.modules.il6 import IL6State
# from nlisim.modules.il8 import IL8State
from nlisim.modules.tnfa import TNFaState
pneumocyte: PneumocyteState = state.pneumocyte
afumigatus: AfumigatusState = state.afumigatus
# il6: IL6State = getattr(state, 'il6', None)
# il8: IL8State = getattr(state, 'il8', None)
tnfa: TNFaState = state.tnfa
grid: RectangularGrid = state.grid
voxel_volume: float = state.voxel_volume
for pneumocyte_cell_index in pneumocyte.cells.alive():
pneumocyte_cell = pneumocyte.cells[pneumocyte_cell_index]
pneumocyte_cell_voxel: Voxel = grid.get_voxel(pneumocyte_cell['point'])
# self update
if pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE:
if pneumocyte_cell['status_iteration'] >= pneumocyte.iter_to_rest:
pneumocyte_cell['status_iteration'] = 0
pneumocyte_cell['status'] = PhagocyteStatus.RESTING
pneumocyte_cell['tnfa'] = False
else:
pneumocyte_cell['status_iteration'] += 1
elif pneumocyte_cell['status'] == PhagocyteStatus.ACTIVATING:
if pneumocyte_cell['status_iteration'] >= pneumocyte.iter_to_change_state:
pneumocyte_cell['status_iteration'] = 0
pneumocyte_cell['status'] = PhagocyteStatus.ACTIVE
else:
pneumocyte_cell['status_iteration'] += 1
# ----------- interactions
# interact with fungus
if pneumocyte_cell['status'] not in {
PhagocyteStatus.APOPTOTIC,
PhagocyteStatus.NECROTIC,
PhagocyteStatus.DEAD,
}:
local_aspergillus = afumigatus.cells.get_cells_in_voxel(pneumocyte_cell_voxel)
for aspergillus_index in local_aspergillus:
aspergillus_cell: AfumigatusCellData = afumigatus.cells[aspergillus_index]
# skip resting conidia
if aspergillus_cell['status'] == AfumigatusCellStatus.RESTING_CONIDIA:
continue
if pneumocyte_cell['status'] != PhagocyteStatus.ACTIVE:
if rg.uniform() < pneumocyte.pr_p_int:
pneumocyte_cell['status'] = PhagocyteStatus.ACTIVATING
else:
# TODO: I don't get this, looks like it zeros out the iteration
# when activating
pneumocyte_cell['status_iteration'] = 0
# # secrete IL6
# if il6 is not None and pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE:
# il6.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_il6_qtty
#
# # secrete IL8
# if il8 is not None and pneumocyte_cell['tnfa']:
# il8.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_il8_qtty
# interact with TNFa
if pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE:
if (
activation_function(
x=tnfa.grid[tuple(pneumocyte_cell_voxel)],
k_d=tnfa.k_d,
h=self.time_step / 60, # units: (min/step) / (min/hour)
volume=voxel_volume,
b=1,
)
< rg.uniform()
):
pneumocyte_cell['status_iteration'] = 0
pneumocyte_cell['tnfa'] = True
# secrete TNFa
tnfa.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_tnf_qtty
return state
def summary_stats(self, state: State) -> Dict[str, Any]:
pneumocyte: PneumocyteState = state.pneumocyte
live_pneumocytes = pneumocyte.cells.alive()
max_index = max(map(int, PhagocyteStatus))
status_counts = np.bincount(
np.fromiter(
(
pneumocyte.cells[pneumocyte_cell_index]['status']
for pneumocyte_cell_index in live_pneumocytes
),
dtype=np.uint8,
),
minlength=max_index + 1,
)
tnfa_active = int(
np.sum(
np.fromiter(
(
pneumocyte.cells[pneumocyte_cell_index]['tnfa']
for pneumocyte_cell_index in live_pneumocytes
),
dtype=bool,
)
)
)
return {
'count': len(pneumocyte.cells.alive()),
'inactive': int(status_counts[PhagocyteStatus.INACTIVE]),
'inactivating': int(status_counts[PhagocyteStatus.INACTIVATING]),
'resting': int(status_counts[PhagocyteStatus.RESTING]),
'activating': int(status_counts[PhagocyteStatus.ACTIVATING]),
'active': int(status_counts[PhagocyteStatus.ACTIVE]),
'apoptotic': int(status_counts[PhagocyteStatus.APOPTOTIC]),
'necrotic': int(status_counts[PhagocyteStatus.NECROTIC]),
'interacting': int(status_counts[PhagocyteStatus.INTERACTING]),
'TNFa active': tnfa_active,
}
def visualization_data(self, state: State):
return 'cells', state.pneumocyte.cells
|
[
"math.expm1",
"attr.attrs",
"attr.Factory",
"numpy.dtype",
"numpy.where",
"numpy.fromiter",
"nlisim.modules.phagocyte.PhagocyteCellData.create_cell_tuple",
"nlisim.random.rg.shuffle",
"nlisim.random.rg.uniform"
] |
[((1350, 1394), 'attr.attrs', 'attrs', ([], {'kw_only': '(True)', 'frozen': '(True)', 'repr': '(False)'}), '(kw_only=True, frozen=True, repr=False)\n', (1355, 1394), False, 'from attr import attrib, attrs\n'), ((1604, 1623), 'attr.attrs', 'attrs', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (1609, 1623), False, 'from attr import attrib, attrs\n'), ((695, 793), 'numpy.dtype', 'np.dtype', (['(CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS + PNEUMOCYTE_FIELDS)'], {'align': '(True)'}), '(CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS +\n PNEUMOCYTE_FIELDS, align=True)\n', (703, 793), True, 'import numpy as np\n'), ((3858, 3887), 'nlisim.random.rg.shuffle', 'rg.shuffle', (['epithelial_voxels'], {}), '(epithelial_voxels)\n', (3868, 3887), False, 'from nlisim.random import rg\n'), ((1199, 1244), 'nlisim.modules.phagocyte.PhagocyteCellData.create_cell_tuple', 'PhagocyteCellData.create_cell_tuple', ([], {}), '(**kwargs)\n', (1234, 1244), False, 'from nlisim.modules.phagocyte import PhagocyteCellData, PhagocyteModel, PhagocyteModuleState, PhagocyteStatus\n'), ((1716, 1764), 'attr.Factory', 'attr.Factory', (['cell_list_factory'], {'takes_self': '(True)'}), '(cell_list_factory, takes_self=True)\n', (1728, 1764), False, 'import attr\n'), ((3411, 3488), 'math.expm1', 'math.expm1', (['(-time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param))'], {}), '(-time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param))\n', (3421, 3488), False, 'import math\n'), ((9171, 9301), 'numpy.fromiter', 'np.fromiter', (["(pneumocyte.cells[pneumocyte_cell_index]['status'] for\n pneumocyte_cell_index in live_pneumocytes)"], {'dtype': 'np.uint8'}), "((pneumocyte.cells[pneumocyte_cell_index]['status'] for\n pneumocyte_cell_index in live_pneumocytes), dtype=np.uint8)\n", (9182, 9301), True, 'import numpy as np\n'), ((9515, 9639), 'numpy.fromiter', 'np.fromiter', (["(pneumocyte.cells[pneumocyte_cell_index]['tnfa'] for pneumocyte_cell_index in\n live_pneumocytes)"], {'dtype': 'bool'}), "((pneumocyte.cells[pneumocyte_cell_index]['tnfa'] for\n pneumocyte_cell_index in live_pneumocytes), dtype=bool)\n", (9526, 9639), True, 'import numpy as np\n'), ((3801, 3847), 'numpy.where', 'np.where', (['(lung_tissue == TissueType.EPITHELIUM)'], {}), '(lung_tissue == TissueType.EPITHELIUM)\n', (3809, 3847), True, 'import numpy as np\n'), ((8623, 8635), 'nlisim.random.rg.uniform', 'rg.uniform', ([], {}), '()\n', (8633, 8635), False, 'from nlisim.random import rg\n'), ((7428, 7440), 'nlisim.random.rg.uniform', 'rg.uniform', ([], {}), '()\n', (7438, 7440), False, 'from nlisim.random import rg\n'), ((4438, 4465), 'nlisim.random.rg.uniform', 'rg.uniform', (['(-dx / 2)', '(dx / 2)'], {}), '(-dx / 2, dx / 2)\n', (4448, 4465), False, 'from nlisim.random import rg\n'), ((4497, 4524), 'nlisim.random.rg.uniform', 'rg.uniform', (['(-dy / 2)', '(dy / 2)'], {}), '(-dy / 2, dy / 2)\n', (4507, 4524), False, 'from nlisim.random import rg\n'), ((4556, 4583), 'nlisim.random.rg.uniform', 'rg.uniform', (['(-dz / 2)', '(dz / 2)'], {}), '(-dz / 2, dz / 2)\n', (4566, 4583), False, 'from nlisim.random import rg\n')]
|
from numpy import array, arange, zeros, unique, searchsorted, full, nan
from numpy.linalg import norm # type: ignore
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank,
double_or_blank, integer_double_or_blank, string_or_blank)
from pyNastran.bdf.cards.elements.bars import BAROR
from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default
from pyNastran.dev.bdf_vectorized.cards.elements.element import Element
class CBAR(Element):
"""
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| CBAR | EID | PID | GA | GB | X1 | X2 | X3 | OFFT |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
or
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| CBAR | EID | PID | GA | GB | G0 | | | OFFT |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
| CBAR | 2 | 39 | 7 | 6 | 105 | | | GGG |
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
| | | 513 | 0.0+0 | 0.0+0 | -9. | 0.0+0 | 0.0+0 | -9. |
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
"""
type = 'CBAR'
def __init__(self, model):
"""
Defines the CBAR object.
Parameters
----------
model : BDF
the BDF object
"""
Element.__init__(self, model)
def allocate(self, card_count):
ncards = card_count[self.type]
self.n = ncards
if self.n:
assert isinstance(ncards, int), ncards
float_fmt = self.model.float_fmt
#: Element ID
self.element_id = zeros(ncards, 'int32')
#: Property ID
self.property_id = zeros(ncards, 'int32')
self.node_ids = zeros((ncards, 2), 'int32')
self.is_g0 = zeros(ncards, 'bool')
self.g0 = full(ncards, nan, 'int32')
self.x = full((ncards, 3), nan, float_fmt)
self.offt = full(ncards, nan, '|U3')
self.pin_flags = zeros((ncards, 2), 'int32')
self.wa = zeros((ncards, 3), float_fmt)
self.wb = zeros((ncards, 3), float_fmt)
def add_card(self, card, comment=''):
i = self.i
if 0 and self.model.cbaror.n > 0:
cbaror = self.model.cbaror
pid_default = cbaror.property_id
is_g0_default = cbaror.is_g0
x1_default = cbaror.x[0]
x2_default = cbaror.x[1]
x3_default = cbaror.x[2]
g0_default = cbaror.g0
offt_default = cbaror.offt
else:
pid_default = None
is_g0_default = None
x1_default = 0.0
x2_default = 0.0
x3_default = 0.0
g0_default = None
offt_default = 'GGG'
eid = integer(card, 1, 'element_id')
self.element_id[i] = eid
if pid_default is not None:
self.property_id[i] = integer_or_blank(card, 2, 'property_id', pid_default)
else:
self.property_id[i] = integer_or_blank(card, 2, 'property_id', eid)
self.node_ids[i] = [integer(card, 3, 'GA'),
integer(card, 4, 'GB')]
#---------------------------------------------------------
# x / g0
if g0_default is not None:
field5 = integer_double_or_blank(card, 5, 'g0_x1', g0_default)
else:
field5 = integer_double_or_blank(card, 5, 'g0_x1', x1_default)
if isinstance(field5, integer_types):
self.is_g0[i] = True
self.g0[i] = field5
elif isinstance(field5, float):
self.is_g0[i] = False
x = array([field5,
double_or_blank(card, 6, 'x2', x2_default),
double_or_blank(card, 7, 'x3', x3_default)], dtype='float64')
self.x[i, :] = x
if norm(x) == 0.0:
msg = 'G0 vector defining plane 1 is not defined on CBAR %s.\n' % eid
msg += 'G0 = %s\n' % field5
msg += 'X = %s\n' % x
msg += '%s' % card
raise RuntimeError(msg)
else:
msg = ('field5 on CBAR (G0/X1) is the wrong type...id=%s field5=%s '
'type=%s' % (self.eid, field5, type(field5)))
raise RuntimeError(msg)
#---------------------------------------------------------
# offt
# bit doesn't exist on the CBAR
offt = string_or_blank(card, 8, 'offt', offt_default)
msg = 'invalid offt parameter of CBEAM...offt=%s' % offt
assert offt[0] in ['G', 'B', 'O', 'E'], msg
assert offt[1] in ['G', 'B', 'O', 'E'], msg
assert offt[2] in ['G', 'B', 'O', 'E'], msg
self.offt[i] = offt
self.pin_flags[i, :] = [integer_or_blank(card, 9, 'pa', 0),
integer_or_blank(card, 10, 'pb', 0)]
self.wa[i, :] = [double_or_blank(card, 11, 'w1a', 0.0),
double_or_blank(card, 12, 'w2a', 0.0),
double_or_blank(card, 13, 'w3a', 0.0),]
self.wb[i, :] = [double_or_blank(card, 14, 'w1b', 0.0),
double_or_blank(card, 15, 'w2b', 0.0),
double_or_blank(card, 16, 'w3b', 0.0),]
assert len(card) <= 17, 'len(CBAR card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self.is_g0 = self.is_g0[i]
self.g0 = self.g0[i]
self.x = self.x[i, :]
self.offt = self.offt[i]
self.pin_flags = self.pin_flags[i, :]
self.wa = self.wa[i, :]
self.wb = self.wb[i, :]
unique_eids = unique(self.element_id)
if len(unique_eids) != len(self.element_id):
raise RuntimeError('There are duplicate CBAR IDs...')
self._cards = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
nid_map = maps['node']
pid_map = maps['property']
for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id,
self.node_ids)):
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
#=========================================================================
def get_mass_by_element_id(self, grid_cid0=None, total=False):
"""
mass = rho * A * L + nsm
"""
if self.n == 0:
return 0.0
return [0.0]
if grid_cid0 is None:
grid_cid0 = self.model.grid.get_position_by_node_index()
p1 = grid_cid0[self.node_ids[:, 0]]
p2 = grid_cid0[self.node_ids[:, 1]]
L = p2 - p1
i = self.model.properties_bar.get_index(self.property_id)
A = self.model.properties_bar.get_Area[i]
material_id = self.model.properties_bar.material_id[i]
rho, E, J = self.model.Materials.get_rho_E_J(material_id)
rho = self.model.Materials.get_rho(self.mid)
E = self.model.Materials.get_E(self.mid)
J = self.model.Materials.get_J(self.mid)
mass = norm(L, axis=1) * A * rho + self.nsm
if total:
return mass.sum()
else:
return mass
#=========================================================================
def write_card(self, bdf_file, size=8, element_ids=None):
if self.n:
if element_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, self.element_id)
for (eid, pid, n, is_g0, g0, x, offt, pin, wa, wb) in zip(
self.element_id[i], self.property_id[i], self.node_ids[i],
self.is_g0[i], self.g0[i], self.x[i],
self.offt[i],
self.pin_flags[i], self.wa[i], self.wb[i]):
pa = set_blank_if_default(pin[0], 0)
pb = set_blank_if_default(pin[1], 0)
w1a = set_blank_if_default(wa[0], 0.0)
w2a = set_blank_if_default(wa[1], 0.0)
w3a = set_blank_if_default(wa[2], 0.0)
w1b = set_blank_if_default(wb[0], 0.0)
w2b = set_blank_if_default(wb[1], 0.0)
w3b = set_blank_if_default(wb[2], 0.0)
x1 = g0 if is_g0 else x[0]
x2 = 0 if is_g0 else x[1]
x3 = 0 if is_g0 else x[2]
offt = set_string8_blank_if_default(offt, 'GGG')
card = ['CBAR', eid, pid, n[0], n[1], x1, x2, x3, offt,
pa, pb, w1a, w2a, w3a, w1b, w2b, w3b]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def slice_by_index(self, i):
i = self._validate_slice(i)
obj = CBAR(self.model)
obj.n = len(i)
#obj._cards = self._cards[i]
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.element_id = self.element_id[i]
obj.property_id = self.property_id[i]
obj.node_ids = self.node_ids[i, :]
obj.is_g0 = self.is_g0[i]
obj.g0 = self.g0[i]
obj.x = self.x[i, :]
obj.offt = self.offt[i]
obj.pin_flags = self.pin_flags[i]
obj.wa = self.wa[i]
obj.wb = self.wb[i]
return obj
#def get_stiffness_matrix(self, model, node_ids, index0s, fnorm=1.0):
#return K, dofs, n_ijv
|
[
"numpy.full",
"pyNastran.bdf.field_writer_8.set_blank_if_default",
"pyNastran.bdf.bdf_interface.assign_type.string_or_blank",
"pyNastran.bdf.bdf_interface.assign_type.integer",
"pyNastran.bdf.field_writer_8.print_card_8",
"numpy.unique",
"numpy.zeros",
"numpy.searchsorted",
"pyNastran.bdf.bdf_interface.assign_type.integer_or_blank",
"numpy.array",
"numpy.arange",
"pyNastran.bdf.field_writer_8.set_string8_blank_if_default",
"numpy.linalg.norm",
"pyNastran.dev.bdf_vectorized.cards.elements.element.Element.__init__",
"pyNastran.bdf.bdf_interface.assign_type.integer_double_or_blank",
"pyNastran.bdf.bdf_interface.assign_type.double_or_blank",
"pyNastran.bdf.field_writer_16.print_card_16"
] |
[((1911, 1940), 'pyNastran.dev.bdf_vectorized.cards.elements.element.Element.__init__', 'Element.__init__', (['self', 'model'], {}), '(self, model)\n', (1927, 1940), False, 'from pyNastran.dev.bdf_vectorized.cards.elements.element import Element\n'), ((3391, 3421), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(1)', '"""element_id"""'], {}), "(card, 1, 'element_id')\n", (3398, 3421), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5068, 5114), 'pyNastran.bdf.bdf_interface.assign_type.string_or_blank', 'string_or_blank', (['card', '(8)', '"""offt"""', 'offt_default'], {}), "(card, 8, 'offt', offt_default)\n", (5083, 5114), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((2212, 2234), 'numpy.zeros', 'zeros', (['ncards', '"""int32"""'], {}), "(ncards, 'int32')\n", (2217, 2234), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2293, 2315), 'numpy.zeros', 'zeros', (['ncards', '"""int32"""'], {}), "(ncards, 'int32')\n", (2298, 2315), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2344, 2371), 'numpy.zeros', 'zeros', (['(ncards, 2)', '"""int32"""'], {}), "((ncards, 2), 'int32')\n", (2349, 2371), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2397, 2418), 'numpy.zeros', 'zeros', (['ncards', '"""bool"""'], {}), "(ncards, 'bool')\n", (2402, 2418), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2441, 2467), 'numpy.full', 'full', (['ncards', 'nan', '"""int32"""'], {}), "(ncards, nan, 'int32')\n", (2445, 2467), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2489, 2522), 'numpy.full', 'full', (['(ncards, 3)', 'nan', 'float_fmt'], {}), '((ncards, 3), nan, float_fmt)\n', (2493, 2522), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2547, 2571), 'numpy.full', 'full', (['ncards', 'nan', '"""|U3"""'], {}), "(ncards, nan, '|U3')\n", (2551, 2571), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2601, 2628), 'numpy.zeros', 'zeros', (['(ncards, 2)', '"""int32"""'], {}), "((ncards, 2), 'int32')\n", (2606, 2628), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2651, 2680), 'numpy.zeros', 'zeros', (['(ncards, 3)', 'float_fmt'], {}), '((ncards, 3), float_fmt)\n', (2656, 2680), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2703, 2732), 'numpy.zeros', 'zeros', (['(ncards, 3)', 'float_fmt'], {}), '((ncards, 3), float_fmt)\n', (2708, 2732), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((3525, 3578), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""property_id"""', 'pid_default'], {}), "(card, 2, 'property_id', pid_default)\n", (3541, 3578), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3627, 3672), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""property_id"""', 'eid'], {}), "(card, 2, 'property_id', eid)\n", (3643, 3672), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3701, 3723), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(3)', '"""GA"""'], {}), "(card, 3, 'GA')\n", (3708, 3723), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3753, 3775), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(4)', '"""GB"""'], {}), "(card, 4, 'GB')\n", (3760, 3775), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3918, 3971), 'pyNastran.bdf.bdf_interface.assign_type.integer_double_or_blank', 'integer_double_or_blank', (['card', '(5)', '"""g0_x1"""', 'g0_default'], {}), "(card, 5, 'g0_x1', g0_default)\n", (3941, 3971), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((4007, 4060), 'pyNastran.bdf.bdf_interface.assign_type.integer_double_or_blank', 'integer_double_or_blank', (['card', '(5)', '"""g0_x1"""', 'x1_default'], {}), "(card, 5, 'g0_x1', x1_default)\n", (4030, 4060), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5398, 5432), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(9)', '"""pa"""', '(0)'], {}), "(card, 9, 'pa', 0)\n", (5414, 5432), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5466, 5501), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(10)', '"""pb"""', '(0)'], {}), "(card, 10, 'pb', 0)\n", (5482, 5501), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5529, 5566), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(11)', '"""w1a"""', '(0.0)'], {}), "(card, 11, 'w1a', 0.0)\n", (5544, 5566), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5593, 5630), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(12)', '"""w2a"""', '(0.0)'], {}), "(card, 12, 'w2a', 0.0)\n", (5608, 5630), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5657, 5694), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(13)', '"""w3a"""', '(0.0)'], {}), "(card, 13, 'w3a', 0.0)\n", (5672, 5694), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5723, 5760), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(14)', '"""w1b"""', '(0.0)'], {}), "(card, 14, 'w1b', 0.0)\n", (5738, 5760), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5787, 5824), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(15)', '"""w2b"""', '(0.0)'], {}), "(card, 15, 'w2b', 0.0)\n", (5802, 5824), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5851, 5888), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(16)', '"""w3b"""', '(0.0)'], {}), "(card, 16, 'w3b', 0.0)\n", (5866, 5888), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((6521, 6544), 'numpy.unique', 'unique', (['self.element_id'], {}), '(self.element_id)\n', (6527, 6544), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((6745, 6769), 'numpy.array', 'array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (6750, 6769), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((6801, 6825), 'numpy.array', 'array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (6806, 6825), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((8705, 8719), 'numpy.arange', 'arange', (['self.n'], {}), '(self.n)\n', (8711, 8719), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((8758, 8804), 'numpy.searchsorted', 'searchsorted', (['self.element_id', 'self.element_id'], {}), '(self.element_id, self.element_id)\n', (8770, 8804), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((9118, 9149), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['pin[0]', '(0)'], {}), '(pin[0], 0)\n', (9138, 9149), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9171, 9202), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['pin[1]', '(0)'], {}), '(pin[1], 0)\n', (9191, 9202), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9226, 9258), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wa[0]', '(0.0)'], {}), '(wa[0], 0.0)\n', (9246, 9258), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9281, 9313), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wa[1]', '(0.0)'], {}), '(wa[1], 0.0)\n', (9301, 9313), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9336, 9368), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wa[2]', '(0.0)'], {}), '(wa[2], 0.0)\n', (9356, 9368), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9391, 9423), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wb[0]', '(0.0)'], {}), '(wb[0], 0.0)\n', (9411, 9423), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9446, 9478), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wb[1]', '(0.0)'], {}), '(wb[1], 0.0)\n', (9466, 9478), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9501, 9533), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wb[2]', '(0.0)'], {}), '(wb[2], 0.0)\n', (9521, 9533), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9685, 9726), 'pyNastran.bdf.field_writer_8.set_string8_blank_if_default', 'set_string8_blank_if_default', (['offt', '"""GGG"""'], {}), "(offt, 'GGG')\n", (9713, 9726), False, 'from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default\n'), ((4474, 4481), 'numpy.linalg.norm', 'norm', (['x'], {}), '(x)\n', (4478, 4481), False, 'from numpy.linalg import norm\n'), ((8365, 8380), 'numpy.linalg.norm', 'norm', (['L'], {'axis': '(1)'}), '(L, axis=1)\n', (8369, 8380), False, 'from numpy.linalg import norm\n'), ((4301, 4343), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(6)', '"""x2"""', 'x2_default'], {}), "(card, 6, 'x2', x2_default)\n", (4316, 4343), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((4368, 4410), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(7)', '"""x3"""', 'x3_default'], {}), "(card, 7, 'x3', x3_default)\n", (4383, 4410), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((9926, 9944), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['card'], {}), '(card)\n', (9938, 9944), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((10003, 10022), 'pyNastran.bdf.field_writer_16.print_card_16', 'print_card_16', (['card'], {}), '(card)\n', (10016, 10022), False, 'from pyNastran.bdf.field_writer_16 import print_card_16\n')]
|
"""
Environments and wrappers for Sonic training.
"""
import gym
import numpy as np
import gzip
import retro
import os
from baselines.common.atari_wrappers import WarpFrame, FrameStack
# from retro_contest.local import make
import logging
import retro_contest
import pandas as pd
train_states = pd.read_csv('../data/sonic_env/sonic-train.csv')
validation_states = pd.read_csv('../data/sonic_env/sonic-validation.csv')
logger = logging.getLogger(__name__)
def make(game, state, discrete_actions=False, bk2dir=None, max_episode_steps=4000):
"""Make the competition environment."""
print('game:', game, 'state:', state)
use_restricted_actions = retro.ACTIONS_FILTERED
if discrete_actions:
use_restricted_actions = retro.ACTIONS_DISCRETE
try:
env = retro.make(game, state, scenario='contest', use_restricted_actions=use_restricted_actions)
except Exception:
env = retro.make(game, state, use_restricted_actions=use_restricted_actions)
if bk2dir:
env.auto_record(bk2dir)
env = retro_contest.StochasticFrameSkip(env, n=4, stickprob=0.25)
env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def make_env(stack=True, scale_rew=True):
"""
Create an environment with some standard wrappers.
"""
start_state = train_states.sample().iloc[0]
env = make(game=start_state.game, state=start_state.state, max_episode_steps=600)
env = SonicDiscretizer(env)
# env = AllowBacktracking(env)
env = RandomGameReset(env)
env = EpisodeInfo(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
return env
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'],
['DOWN', 'B'], ['B']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
class RewardScaler(gym.RewardWrapper):
"""
Bring rewards to a reasonable scale for PPO.
This is incredibly important and effects performance
drastically.
"""
def reward(self, reward):
return reward * 0.01
class AllowBacktracking(gym.Wrapper):
"""
Use deltas in max(X) as the reward, rather than deltas
in X. This way, agents are not discouraged too heavily
from exploring backwards if there is no way to advance
head-on in the level.
"""
def __init__(self, env):
super(AllowBacktracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._cur_x += rew
rew = max(0, self._cur_x - self._max_x)
self._max_x = max(self._max_x, self._cur_x)
return obs, rew, done, info
class RandomGameReset(gym.Wrapper):
def __init__(self, env, state=None):
"""Reset game to a random level."""
super().__init__(env)
self.state = state
def step(self, action):
return self.env.step(action)
def reset(self):
# Reset to a random level (but don't change the game)
try:
game = self.env.unwrapped.gamename
except AttributeError:
logger.warning('no game name')
pass
else:
game_path = retro.get_game_path(game)
# pick a random state that's in the same game
game_states = train_states[train_states.game == game]
# if self.state:
# game_states = game_states[game_states.state.str.contains(self.state)]
# Load
choice = game_states.sample().iloc[0]
state = choice.state + '.state'
logger.info('reseting env %s to %s %s', self.unwrapped.rank, game, state)
with gzip.open(os.path.join(game_path, state), 'rb') as fh:
self.env.unwrapped.initial_state = fh.read()
return self.env.reset()
class EpisodeInfo(gym.Wrapper):
"""
Add information about episode end and total final reward
"""
def __init__(self, env):
super(EpisodeInfo, self).__init__(env)
self._ep_len = 0
self._ep_rew_total = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._ep_len = 0
self._ep_rew_total = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._ep_len += 1
self._ep_rew_total += rew
if done:
if "episode" not in info:
info = {"episode": {"l": self._ep_len, "r": self._ep_rew_total}}
elif isinstance(info, dict):
if "l" not in info["episode"]:
info["episode"]["l"] = self._ep_len
if "r" not in info["episode"]:
info["episode"]["r"] = self._ep_rew_total
return obs, rew, done, info
|
[
"pandas.read_csv",
"baselines.common.atari_wrappers.WarpFrame",
"retro.make",
"retro.get_game_path",
"numpy.array",
"retro_contest.StochasticFrameSkip",
"gym.wrappers.TimeLimit",
"os.path.join",
"logging.getLogger"
] |
[((297, 345), 'pandas.read_csv', 'pd.read_csv', (['"""../data/sonic_env/sonic-train.csv"""'], {}), "('../data/sonic_env/sonic-train.csv')\n", (308, 345), True, 'import pandas as pd\n'), ((366, 419), 'pandas.read_csv', 'pd.read_csv', (['"""../data/sonic_env/sonic-validation.csv"""'], {}), "('../data/sonic_env/sonic-validation.csv')\n", (377, 419), True, 'import pandas as pd\n'), ((430, 457), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (447, 457), False, 'import logging\n'), ((1041, 1100), 'retro_contest.StochasticFrameSkip', 'retro_contest.StochasticFrameSkip', (['env'], {'n': '(4)', 'stickprob': '(0.25)'}), '(env, n=4, stickprob=0.25)\n', (1074, 1100), False, 'import retro_contest\n'), ((1111, 1175), 'gym.wrappers.TimeLimit', 'gym.wrappers.TimeLimit', (['env'], {'max_episode_steps': 'max_episode_steps'}), '(env, max_episode_steps=max_episode_steps)\n', (1133, 1175), False, 'import gym\n'), ((1625, 1639), 'baselines.common.atari_wrappers.WarpFrame', 'WarpFrame', (['env'], {}), '(env)\n', (1634, 1639), False, 'from baselines.common.atari_wrappers import WarpFrame, FrameStack\n'), ((786, 881), 'retro.make', 'retro.make', (['game', 'state'], {'scenario': '"""contest"""', 'use_restricted_actions': 'use_restricted_actions'}), "(game, state, scenario='contest', use_restricted_actions=\n use_restricted_actions)\n", (796, 881), False, 'import retro\n'), ((913, 983), 'retro.make', 'retro.make', (['game', 'state'], {'use_restricted_actions': 'use_restricted_actions'}), '(game, state, use_restricted_actions=use_restricted_actions)\n', (923, 983), False, 'import retro\n'), ((2189, 2211), 'numpy.array', 'np.array', (['([False] * 12)'], {}), '([False] * 12)\n', (2197, 2211), True, 'import numpy as np\n'), ((4057, 4082), 'retro.get_game_path', 'retro.get_game_path', (['game'], {}), '(game)\n', (4076, 4082), False, 'import retro\n'), ((4552, 4582), 'os.path.join', 'os.path.join', (['game_path', 'state'], {}), '(game_path, state)\n', (4564, 4582), False, 'import os\n')]
|
# tf2.0目标检测之csv 2 Tfrecord
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
import random
import cv2
from tqdm import tqdm
import datetime
import os
import time
from detection.models.detectors import faster_rcnn
from bjod_data import ZiptrainDataset, Zipvaluedata
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
random.seed(234)
def save_images(image, boxes, filen, label_pre, pth=''):
image = image.numpy()
image = image.astype(np.uint8)
if image.shape[0] == 1:
image = np.squeeze(image, axis=0)
cv2.cvtColor(image, cv2.COLOR_RGB2BGR, image)
n = boxes.shape[0]
if not n:
print("no instances to display ")
for i in range(n):
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
if not np.any(boxes[i]):
continue
x1, y1, x2, y2, _ = boxes[i]
y1, x1, y2, x2 = int(y1), int(x1), int(y2), int(x2)
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2, 8, 0)
cv2.putText(image, str(label_pre[i]), (int((x1 + x2) / 2), int((y1 + y2) / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1,
color, 1)
filen = filen[:-4] + '.jpg'
cv2.imwrite(os.path.join(pth, filen), image)
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def eval_modle(gt_dict, pre__dict, npos, categaries):
result = {}
for key in range(categaries):
b1 = pre__dict[str(key + 1)]
if not b1:
continue
image_ids = [tt[0] for tt in b1]
confidence = np.array([tt[5] for tt in b1])
BB = np.array([tt[1:5] for tt in b1])
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = gt_dict[str(key + 1)][str(image_ids[d])] # ann
bb = BB[d, :].astype(float)
ovmax = -np.inf # 负数最大值
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps) # 最大重叠
jmax = np.argmax(overlaps) # 最大重合率对应的gt
# 计算tp 和 fp个数
if ovmax > 0.5:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1 # 标记为已检测
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp) # np.cumsum() 按位累加
tp = np.cumsum(tp)
rec = tp / np.maximum(float(npos[str(key + 1)]), np.finfo(np.float64).eps)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec)
print('key+1: ', str(key + 1))
print(ap)
result[str(key + 1)] = ap
return result
class DistTrainer:
def __init__(self, dis_strategy, ori_model, categaries, nu_devices, maxap=0.0, epoch=[0, 200], trian_dir=''):
self.dist_strategy = dis_strategy
self.model = ori_model
self.num_devices = nu_devices
self.trian_dir = trian_dir
self.epochs = epoch
self.maxap = maxap
self.total_categaries = categaries
self.optimizer = tf.keras.optimizers.SGD(1e-4, momentum=0.9, nesterov=True)
# @tf.function
def train_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
with tf.GradientTape() as tape:
rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \
self.model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
def dist_train_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
per_loss_value, per_rpn_class_loss, per_rpn_bbox_loss, per_rcnn_class_loss, per_rcnn_bbox_loss = self.dist_strategy.run(
self.train_step,
args=(batch_imgs, batch_metas, batch_bboxes, batch_labels))
loss_value = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_loss_value, axis=None)
rpn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_class_loss, axis=None)
rpn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_bbox_loss, axis=None)
rcnn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_class_loss, axis=None)
rcnn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_bbox_loss, axis=None)
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
# @tf.function
def test_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \
self.model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
def dist_test_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels):
per_loss_value, per_rpn_class_loss, per_rpn_bbox_loss, per_rcnn_class_loss, per_rcnn_bbox_loss = self.dist_strategy.run(
self.test_step,
args=(batch_imgs, batch_metas, batch_bboxes, batch_labels))
loss_value = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_loss_value, axis=None)
rpn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_class_loss, axis=None)
rpn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_bbox_loss, axis=None)
rcnn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_class_loss, axis=None)
rcnn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_bbox_loss, axis=None)
return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss
def eval_step(self, val_dts):
gt_bbox_dict = {str(key + 1): {} for key in range(self.total_categaries)}
pre_bbox_dict = {str(key + 1): [] for key in range(self.total_categaries)}
# number of true positive
npos = {str(key + 1): 0 for key in range(self.total_categaries)}
img_ids = 10000
for val_imgs, val_metas, val_bboxes, val_labels, val_file in tqdm(val_dts):
# if random.randint(1, 100) > 11:
# continue
val_labels = tf.squeeze(val_labels, axis=0).numpy()
val_bboxes = tf.squeeze(val_bboxes, 0).numpy().astype(np.int)
val_imgs = tf.squeeze(tf.cast(val_imgs, tf.float32), axis=0)
val_metas = tf.squeeze(tf.cast(val_metas, tf.float32), axis=0)
val_predict_bboxes = []
for key in range(self.total_categaries):
tmp_box = [val_bboxes[indcc] for indcc, cc in enumerate(val_labels) if cc == key + 1]
det = [False] * len(tmp_box)
gt_bbox_dict[str(key + 1)][str(img_ids)] = {'bbox': np.array(tmp_box), 'det': det}
npos[str(key + 1)] += len(tmp_box)
proposals = self.model.simple_test_rpn(val_imgs, val_metas)
res = self.model.simple_test_bboxes(val_imgs, val_metas, proposals)
for pos in range(res['class_ids'].shape[0]):
label_id = int(res['class_ids'][pos])
y1, x1, y2, x2 = [int(num) for num in list(res['rois'][pos])]
tmp_list2 = [img_ids, x1, y1, x2, y2, float(res['scores'][pos])]
val_predict_bboxes.append([x1, y1, x2, y2, float(res['scores'][pos])])
pre_bbox_dict[str(label_id)].append(tmp_list2)
img_ids += 1
return gt_bbox_dict, pre_bbox_dict, npos
def rd_save_images(self, val_dts, img_save_path):
for val_imgs, val_metas, _, _, val_file in tqdm(val_dts):
if random.randint(1, 100) > 10:
continue
val_file = val_file.numpy()[0].decode('utf-8')
val_imgs = tf.squeeze(tf.cast(val_imgs, tf.float32), axis=0)
val_metas = tf.squeeze(tf.cast(val_metas, tf.float32), axis=0)
val_predict_bboxes = []
proposals = self.model.simple_test_rpn(val_imgs, val_metas)
res = self.model.simple_test_bboxes(val_imgs, val_metas, proposals)
for pos in range(res['class_ids'].shape[0]):
y1, x1, y2, x2 = [int(num) for num in list(res['rois'][pos])]
val_predict_bboxes.append([x1, y1, x2, y2, float(res['scores'][pos])])
save_images(val_imgs, np.array(val_predict_bboxes), val_file, res['class_ids'], img_save_path)
def train(self, train_ds, val_ds):
# train model
train_dts = self.dist_strategy.experimental_distribute_dataset(train_ds)
val_dts = self.dist_strategy.experimental_distribute_dataset(val_ds)
log_dir = self.trian_dir + 'log_dir/' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(log_dir)
index_step = 0
time_start = time.time()
input_flag = False
for epoch in range(self.epochs[0], self.epochs[1]):
loss_history = np.zeros(5)
for (step, inputs) in enumerate(train_dts):
batch_imgs, batch_metas, batch_bboxes, batch_labels, filen = inputs
labels_tmp = tf.cast(tf.fill([1,1000], -1), tf.int32)
if self.num_devices > 1:
for per_tensor in batch_labels.values:
if tf.equal(per_tensor, labels_tmp).numpy().all():
input_flag = True
print("skip this batch")
break
else:
pass
if input_flag:
input_flag = False
continue
else:
if tf.equal(batch_labels, labels_tmp).numpy().all():
continue
loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss \
= self.dist_train_step(batch_imgs, batch_metas, batch_bboxes, batch_labels)
loss_history[0] += loss_value
loss_history[1] += rpn_class_loss
loss_history[2] += rpn_bbox_loss
loss_history[3] += rcnn_class_loss
loss_history[4] += rcnn_bbox_loss
if step % 10 == 0:
if step:
loss_history = loss_history / 10
print('time:', round(time.time() - time_start, 2), 'epoch:', epoch, ', step:', step, ', loss:',
loss_history)
time_start = time.time()
with file_writer.as_default():
tf.summary.scalar('total_loss', loss_history[0], step=index_step)
tf.summary.scalar('rpn_class_loss', loss_history[1], step=index_step)
tf.summary.scalar('rpn_bbox_loss', loss_history[2], step=index_step)
tf.summary.scalar('rcnn_class_loss', loss_history[3], step=index_step)
tf.summary.scalar('rcnn_bbox_loss', loss_history[4], step=index_step)
file_writer.flush()
index_step += 1
loss_history = np.zeros(5)
else:
print('epoch:', epoch, ', step:', step, ', loss:', loss_history)
if step % 2000 == 0:
weights_dir = self.trian_dir + 'weights/epoch_' + str(epoch) + '_loss_'
sum_loss = 0
for (val_step, inputs_val) in tqdm(enumerate(val_dts)):
batch_imgs, batch_metas, batch_bboxes, batch_labels, filen = inputs_val
labels_tmp = tf.cast(tf.fill([1, 1000], -1), tf.int32)
if self.num_devices > 1:
for per_tensor in batch_labels.values:
if tf.equal(per_tensor, labels_tmp).numpy().all():
input_flag = True
print("skip this batch")
break
else:
pass
if input_flag:
input_flag = False
continue
else:
if tf.equal(batch_labels, labels_tmp).numpy().all():
continue
loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss \
= self.dist_test_step(batch_imgs, batch_metas, batch_bboxes, batch_labels)
sum_loss += loss_value
print('sum_loss: ', sum_loss)
if sum_loss > self.maxap:
self.maxap = sum_loss
self.model.save_weights(weights_dir + str(tf.round(sum_loss, 2).numpy()) + '.h5')
if __name__ == '__main__':
PER_GPU_BATCHSIZE = 1
dist_strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()
)
num_devices = dist_strategy.num_replicas_in_sync
print('Number of devices: {}'.format(num_devices))
GLOBAL_BATCHSIZE = num_devices * PER_GPU_BATCHSIZE
with dist_strategy.scope():
if os.name == 'nt':
tf_record_path = 'D:/datasets/bjod/'
train_dir = './train_dir/'
else:
tf_record_path = '../../../../../datasets/bjod/'
train_dir = './train_dir/'
crop_size = [992, 992, 3]
train_datasets = ZiptrainDataset(tf_record_path, 1, 96, crop_size=crop_size,
roi_path='D:/datasets/bjod/roi_test/').prepare(True)
val_train = Zipvaluedata(tf_record_path, crop_size=crop_size).prepare('train_api_97.record')
val_test = Zipvaluedata(tf_record_path, crop_size=crop_size).prepare('val_api_19.record')
one_imgs, one_metas, one_bboxes, one_labels, _ = next(iter(val_train))
one_imgs = tf.expand_dims(tf.cast(one_imgs[0], tf.float32), axis=0)
one_metas = tf.expand_dims(tf.cast(one_metas[0], tf.float32), axis=0)
model = faster_rcnn.FasterRCNN(num_classes=2)
_ = model((one_imgs, one_metas), training=False)
model_ori = faster_rcnn.FasterRCNN(num_classes=81)
_ = model_ori((one_imgs, one_metas), training=False)
model_ori.load_weights('./weights/faster_rcnn_resnet101_fpn_coco2017_map35.h5',
by_name=True)
model.backbone.set_weights(model_ori.backbone.get_weights())
model.neck.set_weights(model_ori.neck.get_weights())
model.rpn_head.set_weights(model_ori.rpn_head.get_weights())
model.roi_align.set_weights(model_ori.roi_align.get_weights())
# print(cc)
model.summary()
def __init__(self, dis_strategy, ori_model, categaries, nu_devices, maxap=0.0, epoch=[0, 200], trian_dir=''):
self.dist_strategy = dis_strategy
self.model = ori_model
self.num_devices = nu_devices
self.trian_dir = trian_dir
self.epochs = epoch
self.maxap = maxap
self.total_categaries = categaries
self.optimizer = tf.keras.optimizers.SGD(1e-4, momentum=0.9, nesterov=True)
trainer = DistTrainer(dis_strategy=dist_strategy,
ori_model=model,
categaries=2,
nu_devices=1,
maxap=0.0,
epoch=[0, 200],
trian_dir=train_dir
)
trainer.train(train_datasets, val_test)
|
[
"numpy.sum",
"numpy.maximum",
"numpy.argmax",
"tensorflow.keras.optimizers.SGD",
"bjod_data.ZiptrainDataset",
"numpy.argsort",
"bjod_data.Zipvaluedata",
"numpy.arange",
"cv2.rectangle",
"os.path.join",
"random.randint",
"cv2.cvtColor",
"numpy.cumsum",
"tensorflow.cast",
"numpy.max",
"random.seed",
"numpy.finfo",
"tensorflow.squeeze",
"tensorflow.distribute.HierarchicalCopyAllReduce",
"datetime.datetime.now",
"tensorflow.equal",
"tqdm.tqdm",
"numpy.minimum",
"tensorflow.summary.scalar",
"tensorflow.round",
"numpy.squeeze",
"numpy.concatenate",
"detection.models.detectors.faster_rcnn.FasterRCNN",
"numpy.zeros",
"time.time",
"numpy.any",
"tensorflow.fill",
"numpy.where",
"numpy.array",
"tensorflow.summary.create_file_writer",
"tensorflow.GradientTape"
] |
[((413, 429), 'random.seed', 'random.seed', (['(234)'], {}), '(234)\n', (424, 429), False, 'import random\n'), ((624, 669), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR', 'image'], {}), '(image, cv2.COLOR_RGB2BGR, image)\n', (636, 669), False, 'import cv2\n'), ((594, 619), 'numpy.squeeze', 'np.squeeze', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (604, 619), True, 'import numpy as np\n'), ((1020, 1076), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)', 'color', '(2)', '(8)', '(0)'], {}), '(image, (x1, y1), (x2, y2), color, 2, 8, 0)\n', (1033, 1076), False, 'import cv2\n'), ((1272, 1296), 'os.path.join', 'os.path.join', (['pth', 'filen'], {}), '(pth, filen)\n', (1284, 1296), False, 'import os\n'), ((1569, 1593), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.1)'], {}), '(0.0, 1.1, 0.1)\n', (1578, 1593), True, 'import numpy as np\n'), ((1853, 1888), 'numpy.concatenate', 'np.concatenate', (['([0.0], rec, [1.0])'], {}), '(([0.0], rec, [1.0]))\n', (1867, 1888), True, 'import numpy as np\n'), ((1902, 1938), 'numpy.concatenate', 'np.concatenate', (['([0.0], prec, [0.0])'], {}), '(([0.0], prec, [0.0]))\n', (1916, 1938), True, 'import numpy as np\n'), ((2293, 2338), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (2299, 2338), True, 'import numpy as np\n'), ((2598, 2628), 'numpy.array', 'np.array', (['[tt[5] for tt in b1]'], {}), '([tt[5] for tt in b1])\n', (2606, 2628), True, 'import numpy as np\n'), ((2642, 2674), 'numpy.array', 'np.array', (['[tt[1:5] for tt in b1]'], {}), '([tt[1:5] for tt in b1])\n', (2650, 2674), True, 'import numpy as np\n'), ((2696, 2719), 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), '(-confidence)\n', (2706, 2719), True, 'import numpy as np\n'), ((2848, 2860), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (2856, 2860), True, 'import numpy as np\n'), ((2874, 2886), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (2882, 2886), True, 'import numpy as np\n'), ((4236, 4249), 'numpy.cumsum', 'np.cumsum', (['fp'], {}), '(fp)\n', (4245, 4249), True, 'import numpy as np\n'), ((4283, 4296), 'numpy.cumsum', 'np.cumsum', (['tp'], {}), '(tp)\n', (4292, 4296), True, 'import numpy as np\n'), ((4990, 5050), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.0001)'], {'momentum': '(0.9)', 'nesterov': '(True)'}), '(0.0001, momentum=0.9, nesterov=True)\n', (5013, 5050), True, 'import tensorflow as tf\n'), ((8437, 8450), 'tqdm.tqdm', 'tqdm', (['val_dts'], {}), '(val_dts)\n', (8441, 8450), False, 'from tqdm import tqdm\n'), ((9951, 9964), 'tqdm.tqdm', 'tqdm', (['val_dts'], {}), '(val_dts)\n', (9955, 9964), False, 'from tqdm import tqdm\n'), ((11099, 11137), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['log_dir'], {}), '(log_dir)\n', (11128, 11137), True, 'import tensorflow as tf\n'), ((11182, 11193), 'time.time', 'time.time', ([], {}), '()\n', (11191, 11193), False, 'import time\n'), ((16604, 16641), 'detection.models.detectors.faster_rcnn.FasterRCNN', 'faster_rcnn.FasterRCNN', ([], {'num_classes': '(2)'}), '(num_classes=2)\n', (16626, 16641), False, 'from detection.models.detectors import faster_rcnn\n'), ((16720, 16758), 'detection.models.detectors.faster_rcnn.FasterRCNN', 'faster_rcnn.FasterRCNN', ([], {'num_classes': '(81)'}), '(num_classes=81)\n', (16742, 16758), False, 'from detection.models.detectors import faster_rcnn\n'), ((789, 811), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (803, 811), False, 'import random\n'), ((813, 835), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (827, 835), False, 'import random\n'), ((837, 859), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (851, 859), False, 'import random\n'), ((876, 892), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (882, 892), True, 'import numpy as np\n'), ((2051, 2083), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (2061, 2083), True, 'import numpy as np\n'), ((2203, 2234), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (2211, 2234), True, 'import numpy as np\n'), ((5161, 5178), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5176, 5178), True, 'import tensorflow as tf\n'), ((11308, 11319), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (11316, 11319), True, 'import numpy as np\n'), ((15467, 15508), 'tensorflow.distribute.HierarchicalCopyAllReduce', 'tf.distribute.HierarchicalCopyAllReduce', ([], {}), '()\n', (15506, 15508), True, 'import tensorflow as tf\n'), ((16468, 16500), 'tensorflow.cast', 'tf.cast', (['one_imgs[0]', 'tf.float32'], {}), '(one_imgs[0], tf.float32)\n', (16475, 16500), True, 'import tensorflow as tf\n'), ((16545, 16578), 'tensorflow.cast', 'tf.cast', (['one_metas[0]', 'tf.float32'], {}), '(one_metas[0], tf.float32)\n', (16552, 16578), True, 'import tensorflow as tf\n'), ((17688, 17748), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.0001)'], {'momentum': '(0.9)', 'nesterov': '(True)'}), '(0.0001, momentum=0.9, nesterov=True)\n', (17711, 17748), True, 'import tensorflow as tf\n'), ((1609, 1625), 'numpy.sum', 'np.sum', (['(rec >= t)'], {}), '(rec >= t)\n', (1615, 1625), True, 'import numpy as np\n'), ((1692, 1714), 'numpy.max', 'np.max', (['prec[rec >= t]'], {}), '(prec[rec >= t])\n', (1698, 1714), True, 'import numpy as np\n'), ((3220, 3249), 'numpy.maximum', 'np.maximum', (['BBGT[:, 0]', 'bb[0]'], {}), '(BBGT[:, 0], bb[0])\n', (3230, 3249), True, 'import numpy as np\n'), ((3274, 3303), 'numpy.maximum', 'np.maximum', (['BBGT[:, 1]', 'bb[1]'], {}), '(BBGT[:, 1], bb[1])\n', (3284, 3303), True, 'import numpy as np\n'), ((3328, 3357), 'numpy.minimum', 'np.minimum', (['BBGT[:, 2]', 'bb[2]'], {}), '(BBGT[:, 2], bb[2])\n', (3338, 3357), True, 'import numpy as np\n'), ((3382, 3411), 'numpy.minimum', 'np.minimum', (['BBGT[:, 3]', 'bb[3]'], {}), '(BBGT[:, 3], bb[3])\n', (3392, 3411), True, 'import numpy as np\n'), ((3433, 3469), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin + 1.0)', '(0.0)'], {}), '(ixmax - ixmin + 1.0, 0.0)\n', (3443, 3469), True, 'import numpy as np\n'), ((3489, 3525), 'numpy.maximum', 'np.maximum', (['(iymax - iymin + 1.0)', '(0.0)'], {}), '(iymax - iymin + 1.0, 0.0)\n', (3499, 3525), True, 'import numpy as np\n'), ((3835, 3851), 'numpy.max', 'np.max', (['overlaps'], {}), '(overlaps)\n', (3841, 3851), True, 'import numpy as np\n'), ((3883, 3902), 'numpy.argmax', 'np.argmax', (['overlaps'], {}), '(overlaps)\n', (3892, 3902), True, 'import numpy as np\n'), ((8697, 8726), 'tensorflow.cast', 'tf.cast', (['val_imgs', 'tf.float32'], {}), '(val_imgs, tf.float32)\n', (8704, 8726), True, 'import tensorflow as tf\n'), ((8771, 8801), 'tensorflow.cast', 'tf.cast', (['val_metas', 'tf.float32'], {}), '(val_metas, tf.float32)\n', (8778, 8801), True, 'import tensorflow as tf\n'), ((9981, 10003), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (9995, 10003), False, 'import random\n'), ((10128, 10157), 'tensorflow.cast', 'tf.cast', (['val_imgs', 'tf.float32'], {}), '(val_imgs, tf.float32)\n', (10135, 10157), True, 'import tensorflow as tf\n'), ((10202, 10232), 'tensorflow.cast', 'tf.cast', (['val_metas', 'tf.float32'], {}), '(val_metas, tf.float32)\n', (10209, 10232), True, 'import tensorflow as tf\n'), ((10686, 10714), 'numpy.array', 'np.array', (['val_predict_bboxes'], {}), '(val_predict_bboxes)\n', (10694, 10714), True, 'import numpy as np\n'), ((16000, 16103), 'bjod_data.ZiptrainDataset', 'ZiptrainDataset', (['tf_record_path', '(1)', '(96)'], {'crop_size': 'crop_size', 'roi_path': '"""D:/datasets/bjod/roi_test/"""'}), "(tf_record_path, 1, 96, crop_size=crop_size, roi_path=\n 'D:/datasets/bjod/roi_test/')\n", (16015, 16103), False, 'from bjod_data import ZiptrainDataset, Zipvaluedata\n'), ((16175, 16224), 'bjod_data.Zipvaluedata', 'Zipvaluedata', (['tf_record_path'], {'crop_size': 'crop_size'}), '(tf_record_path, crop_size=crop_size)\n', (16187, 16224), False, 'from bjod_data import ZiptrainDataset, Zipvaluedata\n'), ((16275, 16324), 'bjod_data.Zipvaluedata', 'Zipvaluedata', (['tf_record_path'], {'crop_size': 'crop_size'}), '(tf_record_path, crop_size=crop_size)\n', (16287, 16324), False, 'from bjod_data import ZiptrainDataset, Zipvaluedata\n'), ((4354, 4374), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (4362, 4374), True, 'import numpy as np\n'), ((4420, 4440), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (4428, 4440), True, 'import numpy as np\n'), ((8550, 8580), 'tensorflow.squeeze', 'tf.squeeze', (['val_labels'], {'axis': '(0)'}), '(val_labels, axis=0)\n', (8560, 8580), True, 'import tensorflow as tf\n'), ((9116, 9133), 'numpy.array', 'np.array', (['tmp_box'], {}), '(tmp_box)\n', (9124, 9133), True, 'import numpy as np\n'), ((11027, 11050), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11048, 11050), False, 'import datetime\n'), ((11497, 11519), 'tensorflow.fill', 'tf.fill', (['[1, 1000]', '(-1)'], {}), '([1, 1000], -1)\n', (11504, 11519), True, 'import tensorflow as tf\n'), ((12898, 12909), 'time.time', 'time.time', ([], {}), '()\n', (12907, 12909), False, 'import time\n'), ((13574, 13585), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (13582, 13585), True, 'import numpy as np\n'), ((8614, 8639), 'tensorflow.squeeze', 'tf.squeeze', (['val_bboxes', '(0)'], {}), '(val_bboxes, 0)\n', (8624, 8639), True, 'import tensorflow as tf\n'), ((12993, 13058), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'loss_history[0]'], {'step': 'index_step'}), "('total_loss', loss_history[0], step=index_step)\n", (13010, 13058), True, 'import tensorflow as tf\n'), ((13087, 13156), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_class_loss"""', 'loss_history[1]'], {'step': 'index_step'}), "('rpn_class_loss', loss_history[1], step=index_step)\n", (13104, 13156), True, 'import tensorflow as tf\n'), ((13185, 13253), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_bbox_loss"""', 'loss_history[2]'], {'step': 'index_step'}), "('rpn_bbox_loss', loss_history[2], step=index_step)\n", (13202, 13253), True, 'import tensorflow as tf\n'), ((13282, 13352), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rcnn_class_loss"""', 'loss_history[3]'], {'step': 'index_step'}), "('rcnn_class_loss', loss_history[3], step=index_step)\n", (13299, 13352), True, 'import tensorflow as tf\n'), ((13381, 13450), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rcnn_bbox_loss"""', 'loss_history[4]'], {'step': 'index_step'}), "('rcnn_bbox_loss', loss_history[4], step=index_step)\n", (13398, 13450), True, 'import tensorflow as tf\n'), ((14080, 14102), 'tensorflow.fill', 'tf.fill', (['[1, 1000]', '(-1)'], {}), '([1, 1000], -1)\n', (14087, 14102), True, 'import tensorflow as tf\n'), ((12057, 12091), 'tensorflow.equal', 'tf.equal', (['batch_labels', 'labels_tmp'], {}), '(batch_labels, labels_tmp)\n', (12065, 12091), True, 'import tensorflow as tf\n'), ((12742, 12753), 'time.time', 'time.time', ([], {}), '()\n', (12751, 12753), False, 'import time\n'), ((11657, 11689), 'tensorflow.equal', 'tf.equal', (['per_tensor', 'labels_tmp'], {}), '(per_tensor, labels_tmp)\n', (11665, 11689), True, 'import tensorflow as tf\n'), ((14745, 14779), 'tensorflow.equal', 'tf.equal', (['batch_labels', 'labels_tmp'], {}), '(batch_labels, labels_tmp)\n', (14753, 14779), True, 'import tensorflow as tf\n'), ((14265, 14297), 'tensorflow.equal', 'tf.equal', (['per_tensor', 'labels_tmp'], {}), '(per_tensor, labels_tmp)\n', (14273, 14297), True, 'import tensorflow as tf\n'), ((15295, 15316), 'tensorflow.round', 'tf.round', (['sum_loss', '(2)'], {}), '(sum_loss, 2)\n', (15303, 15316), True, 'import tensorflow as tf\n')]
|
import numpy as np
from liegroups.numpy import _base
from liegroups.numpy.so2 import SO2
class SE2(_base.SpecialEuclideanBase):
"""Homogeneous transformation matrix in :math:`SE(2)` using active (alibi) transformations.
.. math::
SE(2) &= \\left\\{ \\mathbf{T}=
\\begin{bmatrix}
\\mathbf{C} & \\mathbf{r} \\\\
\\mathbf{0}^T & 1
\\end{bmatrix} \\in \\mathbb{R}^{3 \\times 3} ~\\middle|~ \\mathbf{C} \\in SO(2), \\mathbf{r} \\in \\mathbb{R}^2 \\right\\} \\\\
\\mathfrak{se}(2) &= \\left\\{ \\boldsymbol{\\Xi} =
\\boldsymbol{\\xi}^\\wedge \\in \\mathbb{R}^{3 \\times 3} ~\\middle|~
\\boldsymbol{\\xi}=
\\begin{bmatrix}
\\boldsymbol{\\rho} \\\\ \\phi
\\end{bmatrix} \\in \\mathbb{R}^3, \\boldsymbol{\\rho} \\in \\mathbb{R}^2, \\phi \in \\mathbb{R} \\right\\}
:cvar ~liegroups.SE2.dim: Dimension of the rotation matrix.
:cvar ~liegroups.SE2.dof: Underlying degrees of freedom (i.e., dimension of the tangent space).
:ivar rot: Storage for the rotation matrix :math:`\mathbf{C}`.
:ivar trans: Storage for the translation vector :math:`\mathbf{r}`.
"""
dim = 3
"""Dimension of the transformation matrix."""
dof = 3
"""Underlying degrees of freedom (i.e., dimension of the tangent space)."""
RotationType = SO2
def adjoint(self):
"""Adjoint matrix of the transformation.
.. math::
\\text{Ad}(\\mathbf{T}) =
\\begin{bmatrix}
\\mathbf{C} & 1^\\wedge \\mathbf{r} \\\\
\\mathbf{0}^T & 1
\\end{bmatrix}
\\in \\mathbb{R}^{3 \\times 3}
"""
rot_part = self.rot.as_matrix()
trans_part = np.array([self.trans[1], -self.trans[0]]).reshape((2, 1))
return np.vstack([np.hstack([rot_part, trans_part]),
[0, 0, 1]])
@classmethod
def exp(cls, xi):
"""Exponential map for :math:`SE(2)`, which computes a transformation from a tangent vector:
.. math::
\\mathbf{T}(\\boldsymbol{\\xi}) =
\\exp(\\boldsymbol{\\xi}^\\wedge) =
\\begin{bmatrix}
\\exp(\\phi ^\\wedge) & \\mathbf{J} \\boldsymbol{\\rho} \\\\
\\mathbf{0} ^ T & 1
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.log`.
"""
if len(xi) != cls.dof:
raise ValueError("xi must have length {}".format(cls.dof))
rho = xi[0:2]
phi = xi[2]
return cls(cls.RotationType.exp(phi),
cls.RotationType.left_jacobian(phi).dot(rho))
@classmethod
def inv_left_jacobian(cls, xi):
""":math:`SE(2)` inverse left Jacobian.
.. math::
\\mathcal{J}^{-1}(\\boldsymbol{\\xi})
"""
raise NotImplementedError
@classmethod
def left_jacobian(cls, xi):
""":math:`SE(2)` left Jacobian.
.. math::
\\mathcal{J}(\\boldsymbol{\\xi})
"""
raise NotImplementedError
def log(self):
"""Logarithmic map for :math:`SE(2)`, which computes a tangent vector from a transformation:
.. math::
\\boldsymbol{\\xi}(\\mathbf{T}) =
\\ln(\\mathbf{T})^\\vee =
\\begin{bmatrix}
\\mathbf{J} ^ {-1} \\mathbf{r} \\\\
\\ln(\\boldsymbol{C}) ^\\vee
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.log`.
"""
phi = self.rot.log()
rho = self.RotationType.inv_left_jacobian(phi).dot(self.trans)
return np.hstack([rho, phi])
@classmethod
def odot(cls, p, directional=False):
""":math:`SE(2)` odot operator as defined by Barfoot.
This is the Jacobian of a vector
.. math::
\\mathbf{p} =
\\begin{bmatrix}
sx \\\\ sy \\\\ sz \\\\ s
\\end{bmatrix} =
\\begin{bmatrix}
\\boldsymbol{\\epsilon} \\\\ \\eta
\\end{bmatrix}
with respect to a perturbation in the underlying parameters of :math:`\\mathbf{T}`.
If :math:`\\mathbf{p}` is given in Euclidean coordinates and directional=False, the missing scale value :math:`\\eta` is assumed to be 1 and the Jacobian is 2x3. If directional=True, :math:`\\eta` is assumed to be 0:
.. math::
\\mathbf{p}^\\odot =
\\begin{bmatrix}
\\eta \\mathbf{1} & 1^\\wedge \\boldsymbol{\\epsilon}
\\end{bmatrix}
If :math:`\\mathbf{p}` is given in Homogeneous coordinates, the Jacobian is 3x3:
.. math::
\\mathbf{p}^\\odot =
\\begin{bmatrix}
\\eta \\mathbf{1} & 1^\\wedge \\boldsymbol{\\epsilon} \\\\
\\mathbf{0}^T & 0
\\end{bmatrix}
"""
p = np.atleast_2d(p)
result = np.zeros([p.shape[0], p.shape[1], cls.dof])
if p.shape[1] == cls.dim - 1:
# Assume scale parameter is 1 unless p is a direction
# vector, in which case the scale is 0
if not directional:
result[:, 0:2, 0:2] = np.eye(2)
result[:, 0:2, 2] = cls.RotationType.wedge(1).dot(p.T).T
elif p.shape[1] == cls.dim:
result[:, 0:2, 0:2] = p[:, 2] * np.eye(2)
result[:, 0:2, 2] = cls.RotationType.wedge(1).dot(p[:, 0:2].T).T
else:
raise ValueError("p must have shape ({},), ({},), (N,{}) or (N,{})".format(
cls.dim - 1, cls.dim, cls.dim - 1, cls.dim))
return np.squeeze(result)
@classmethod
def vee(cls, Xi):
""":math:`SE(2)` vee operator as defined by Barfoot.
.. math::
\\boldsymbol{\\xi} = \\boldsymbol{\\Xi} ^\\vee
This is the inverse operation to :meth:`~liegroups.SE2.wedge`.
"""
if Xi.ndim < 3:
Xi = np.expand_dims(Xi, axis=0)
if Xi.shape[1:3] != (cls.dof, cls.dof):
raise ValueError("Xi must have shape ({},{}) or (N,{},{})".format(
cls.dof, cls.dof, cls.dof, cls.dof))
xi = np.empty([Xi.shape[0], cls.dof])
xi[:, 0:2] = Xi[:, 0:2, 2]
xi[:, 2] = cls.RotationType.vee(Xi[:, 0:2, 0:2])
return np.squeeze(xi)
@classmethod
def wedge(cls, xi):
""":math:`SE(2)` wedge operator as defined by Barfoot.
.. math::
\\boldsymbol{\\Xi} =
\\boldsymbol{\\xi} ^\\wedge =
\\begin{bmatrix}
\\phi ^\\wedge & \\boldsymbol{\\rho} \\\\
\\mathbf{0} ^ T & 0
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.vee`.
"""
xi = np.atleast_2d(xi)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
Xi = np.zeros([xi.shape[0], cls.dof, cls.dof])
Xi[:, 0:2, 0:2] = cls.RotationType.wedge(xi[:, 2])
Xi[:, 0:2, 2] = xi[:, 0:2]
return np.squeeze(Xi)
|
[
"numpy.empty",
"numpy.zeros",
"numpy.expand_dims",
"numpy.hstack",
"numpy.array",
"numpy.squeeze",
"numpy.eye",
"numpy.atleast_2d"
] |
[((3705, 3726), 'numpy.hstack', 'np.hstack', (['[rho, phi]'], {}), '([rho, phi])\n', (3714, 3726), True, 'import numpy as np\n'), ((4973, 4989), 'numpy.atleast_2d', 'np.atleast_2d', (['p'], {}), '(p)\n', (4986, 4989), True, 'import numpy as np\n'), ((5007, 5050), 'numpy.zeros', 'np.zeros', (['[p.shape[0], p.shape[1], cls.dof]'], {}), '([p.shape[0], p.shape[1], cls.dof])\n', (5015, 5050), True, 'import numpy as np\n'), ((5705, 5723), 'numpy.squeeze', 'np.squeeze', (['result'], {}), '(result)\n', (5715, 5723), True, 'import numpy as np\n'), ((6250, 6282), 'numpy.empty', 'np.empty', (['[Xi.shape[0], cls.dof]'], {}), '([Xi.shape[0], cls.dof])\n', (6258, 6282), True, 'import numpy as np\n'), ((6390, 6404), 'numpy.squeeze', 'np.squeeze', (['xi'], {}), '(xi)\n', (6400, 6404), True, 'import numpy as np\n'), ((6849, 6866), 'numpy.atleast_2d', 'np.atleast_2d', (['xi'], {}), '(xi)\n', (6862, 6866), True, 'import numpy as np\n'), ((7025, 7066), 'numpy.zeros', 'np.zeros', (['[xi.shape[0], cls.dof, cls.dof]'], {}), '([xi.shape[0], cls.dof, cls.dof])\n', (7033, 7066), True, 'import numpy as np\n'), ((7177, 7191), 'numpy.squeeze', 'np.squeeze', (['Xi'], {}), '(Xi)\n', (7187, 7191), True, 'import numpy as np\n'), ((6028, 6054), 'numpy.expand_dims', 'np.expand_dims', (['Xi'], {'axis': '(0)'}), '(Xi, axis=0)\n', (6042, 6054), True, 'import numpy as np\n'), ((1795, 1836), 'numpy.array', 'np.array', (['[self.trans[1], -self.trans[0]]'], {}), '([self.trans[1], -self.trans[0]])\n', (1803, 1836), True, 'import numpy as np\n'), ((1879, 1912), 'numpy.hstack', 'np.hstack', (['[rot_part, trans_part]'], {}), '([rot_part, trans_part])\n', (1888, 1912), True, 'import numpy as np\n'), ((5277, 5286), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5283, 5286), True, 'import numpy as np\n'), ((5438, 5447), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5444, 5447), True, 'import numpy as np\n')]
|
import pickle
import numpy as np
from neupy import algorithms
from neupy.exceptions import NotTrained
from algorithms.memory.data import zero, one, half_one, half_zero
from base import BaseTestCase
from helpers import vectors_for_testing
zero_hint = np.array([[0, 1, 0, 0]])
one_hint = np.array([[1, 0, 0, 0]])
class BAMTestCase(BaseTestCase):
def setUp(self):
super(BAMTestCase, self).setUp()
self.data = np.concatenate([zero, one], axis=0)
self.hints = np.concatenate([zero_hint, one_hint], axis=0)
def test_bam_exceptions(self):
with self.assertRaises(NotTrained):
dbnet = algorithms.DiscreteBAM()
dbnet.predict(np.array([0, 1]))
with self.assertRaises(NotTrained):
dbnet = algorithms.DiscreteBAM()
dbnet.predict_input(np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet = algorithms.DiscreteBAM()
dbnet.weight = np.array([[0, 1], [1, 0]])
dbnet.train(np.array([0, 1, 1]), np.array([0, 1]))
def test_bam_X_validation(self):
dbnet = algorithms.DiscreteBAM()
dbnet.weight = np.array([[0, 1], [1, 0]])
with self.assertRaises(ValueError):
# Invalid discrete input values
dbnet.train(np.array([-1, 1]), np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet.train(np.array([0, 1]), np.array([-1, 1]))
with self.assertRaises(ValueError):
dbnet.energy(np.array([-1, 1]), np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet.energy(np.array([0, 1]), np.array([-1, 1]))
with self.assertRaises(ValueError):
dbnet.predict(np.array([-1, 1]))
def test_discrete_bam_storage(self):
network = algorithms.DiscreteBAM(mode='sync')
network.train(self.data, self.hints)
stored_network = pickle.dumps(network)
loaded_network = pickle.loads(stored_network)
network_prediction = network.predict(self.data)
loaded_network_prediction = loaded_network.predict(self.data)
np.testing.assert_array_almost_equal(
loaded_network_prediction[0], network_prediction[0])
np.testing.assert_array_almost_equal(
loaded_network_prediction[1], network_prediction[1])
def test_discrete_bam_sync(self):
bamnet = algorithms.DiscreteBAM(mode='sync')
bamnet.train(self.data, self.hints)
data_before = self.data.copy()
hints_before = self.hints.copy()
np.testing.assert_array_almost_equal(
bamnet.predict(half_zero)[1],
zero_hint
)
np.testing.assert_array_almost_equal(
bamnet.predict_output(half_one)[1],
one_hint
)
np.testing.assert_array_almost_equal(
bamnet.predict_input(zero_hint)[0],
zero
)
np.testing.assert_array_almost_equal(
bamnet.predict_input(one_hint)[0],
one
)
# Test 1d input array prediction
np.testing.assert_array_almost_equal(
bamnet.predict_input(one_hint.ravel())[0],
one
)
# Test 1d output array input prediction
np.testing.assert_array_almost_equal(
bamnet.predict_output(half_one.ravel())[1],
one_hint
)
# Test multiple input values prediction
input_matrix = np.vstack([one, zero])
output_matrix = np.vstack([one_hint, zero_hint])
output_matrix_before = output_matrix.copy()
input_matrix_before = input_matrix.copy()
np.testing.assert_array_almost_equal(
bamnet.predict_input(output_matrix)[0],
input_matrix
)
np.testing.assert_array_almost_equal(
bamnet.predict(input_matrix)[1],
output_matrix
)
np.testing.assert_array_equal(self.data, data_before)
np.testing.assert_array_equal(self.hints, hints_before)
np.testing.assert_array_equal(output_matrix, output_matrix_before)
np.testing.assert_array_equal(input_matrix, input_matrix_before)
def test_discrete_bam_async(self):
bamnet = algorithms.DiscreteBAM(mode='async', n_times=400)
data_before = self.data.copy()
hints_before = self.hints.copy()
bamnet.train(self.data, self.hints)
input_matrix = np.vstack([one, zero])
output_matrix = np.vstack([one_hint, zero_hint])
output_matrix_before = output_matrix.copy()
input_matrix_before = input_matrix.copy()
np.testing.assert_array_almost_equal(
bamnet.predict_input(output_matrix)[0],
input_matrix
)
np.testing.assert_array_almost_equal(
bamnet.predict_output(input_matrix)[1],
output_matrix
)
np.testing.assert_array_equal(self.data, data_before)
np.testing.assert_array_equal(self.hints, hints_before)
np.testing.assert_array_equal(output_matrix, output_matrix_before)
np.testing.assert_array_equal(input_matrix, input_matrix_before)
def test_bam_argument_in_predict_method(self):
dbnet = algorithms.DiscreteBAM(mode='async', n_times=1)
dbnet.train(self.data, self.hints)
self.assertTrue(np.any(one != dbnet.predict_output(half_one)[0]))
np.testing.assert_array_almost_equal(
one, dbnet.predict_output(half_one, n_times=100)[0])
def test_bam_energy_function(self):
input_vector = np.array([[1, 0, 0, 1, 1, 0, 0]])
output_vector = np.array([[1, 0]])
dbnet = algorithms.DiscreteBAM()
dbnet.train(input_vector, output_vector)
self.assertEqual(-7, dbnet.energy(input_vector, output_vector))
self.assertEqual(0, dbnet.energy(
np.array([[0, 0, 0, 0, 0, 0, 0]]),
np.array([[0, 0]])
))
self.assertEqual(-7, dbnet.energy(
np.array([[0, 1, 1, 0, 0, 1, 1]]),
np.array([[0, 1]])
))
# Test 1d array
self.assertEqual(-7, dbnet.energy(
np.array([0, 1, 1, 0, 0, 1, 1]),
np.array([0, 1])
))
# Test multiple input values energy calculation
np.testing.assert_array_almost_equal(
np.array([-7, 0]),
dbnet.energy(
np.array([
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
]),
np.array([
[0, 1],
[0, 0],
])
)
)
def test_bam_train_different_inputs(self):
self.assertInvalidVectorTrain(
algorithms.DiscreteBAM(),
np.array([1, 0, 0, 1]),
np.array([1, 0]),
is_feature1d=False)
def test_bam_predict_different_inputs(self):
bamnet = algorithms.DiscreteBAM()
data = np.array([[1, 0, 0, 1]])
target = np.array([[1, 0]])
bamnet.train(data, target)
test_vectors = vectors_for_testing(
data.reshape(data.size), is_feature1d=False)
for test_vector in test_vectors:
np.testing.assert_array_almost_equal(
bamnet.predict(test_vector)[1], target)
|
[
"pickle.loads",
"algorithms.memory.data.half_one.ravel",
"numpy.testing.assert_array_equal",
"numpy.array",
"neupy.algorithms.DiscreteBAM",
"numpy.vstack",
"numpy.testing.assert_array_almost_equal",
"numpy.concatenate",
"pickle.dumps"
] |
[((255, 279), 'numpy.array', 'np.array', (['[[0, 1, 0, 0]]'], {}), '([[0, 1, 0, 0]])\n', (263, 279), True, 'import numpy as np\n'), ((291, 315), 'numpy.array', 'np.array', (['[[1, 0, 0, 0]]'], {}), '([[1, 0, 0, 0]])\n', (299, 315), True, 'import numpy as np\n'), ((433, 468), 'numpy.concatenate', 'np.concatenate', (['[zero, one]'], {'axis': '(0)'}), '([zero, one], axis=0)\n', (447, 468), True, 'import numpy as np\n'), ((490, 535), 'numpy.concatenate', 'np.concatenate', (['[zero_hint, one_hint]'], {'axis': '(0)'}), '([zero_hint, one_hint], axis=0)\n', (504, 535), True, 'import numpy as np\n'), ((1106, 1130), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (1128, 1130), False, 'from neupy import algorithms\n'), ((1154, 1180), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1162, 1180), True, 'import numpy as np\n'), ((1801, 1836), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""sync"""'}), "(mode='sync')\n", (1823, 1836), False, 'from neupy import algorithms\n'), ((1908, 1929), 'pickle.dumps', 'pickle.dumps', (['network'], {}), '(network)\n', (1920, 1929), False, 'import pickle\n'), ((1955, 1983), 'pickle.loads', 'pickle.loads', (['stored_network'], {}), '(stored_network)\n', (1967, 1983), False, 'import pickle\n'), ((2120, 2213), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loaded_network_prediction[0]', 'network_prediction[0]'], {}), '(loaded_network_prediction[0],\n network_prediction[0])\n', (2156, 2213), True, 'import numpy as np\n'), ((2232, 2325), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loaded_network_prediction[1]', 'network_prediction[1]'], {}), '(loaded_network_prediction[1],\n network_prediction[1])\n', (2268, 2325), True, 'import numpy as np\n'), ((2391, 2426), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""sync"""'}), "(mode='sync')\n", (2413, 2426), False, 'from neupy import algorithms\n'), ((3461, 3483), 'numpy.vstack', 'np.vstack', (['[one, zero]'], {}), '([one, zero])\n', (3470, 3483), True, 'import numpy as np\n'), ((3508, 3540), 'numpy.vstack', 'np.vstack', (['[one_hint, zero_hint]'], {}), '([one_hint, zero_hint])\n', (3517, 3540), True, 'import numpy as np\n'), ((3913, 3966), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'data_before'], {}), '(self.data, data_before)\n', (3942, 3966), True, 'import numpy as np\n'), ((3975, 4030), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.hints', 'hints_before'], {}), '(self.hints, hints_before)\n', (4004, 4030), True, 'import numpy as np\n'), ((4039, 4105), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['output_matrix', 'output_matrix_before'], {}), '(output_matrix, output_matrix_before)\n', (4068, 4105), True, 'import numpy as np\n'), ((4114, 4178), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['input_matrix', 'input_matrix_before'], {}), '(input_matrix, input_matrix_before)\n', (4143, 4178), True, 'import numpy as np\n'), ((4236, 4285), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""async"""', 'n_times': '(400)'}), "(mode='async', n_times=400)\n", (4258, 4285), False, 'from neupy import algorithms\n'), ((4434, 4456), 'numpy.vstack', 'np.vstack', (['[one, zero]'], {}), '([one, zero])\n', (4443, 4456), True, 'import numpy as np\n'), ((4481, 4513), 'numpy.vstack', 'np.vstack', (['[one_hint, zero_hint]'], {}), '([one_hint, zero_hint])\n', (4490, 4513), True, 'import numpy as np\n'), ((4893, 4946), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'data_before'], {}), '(self.data, data_before)\n', (4922, 4946), True, 'import numpy as np\n'), ((4955, 5010), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.hints', 'hints_before'], {}), '(self.hints, hints_before)\n', (4984, 5010), True, 'import numpy as np\n'), ((5019, 5085), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['output_matrix', 'output_matrix_before'], {}), '(output_matrix, output_matrix_before)\n', (5048, 5085), True, 'import numpy as np\n'), ((5094, 5158), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['input_matrix', 'input_matrix_before'], {}), '(input_matrix, input_matrix_before)\n', (5123, 5158), True, 'import numpy as np\n'), ((5227, 5274), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""async"""', 'n_times': '(1)'}), "(mode='async', n_times=1)\n", (5249, 5274), False, 'from neupy import algorithms\n'), ((5568, 5601), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 1, 0, 0]]'], {}), '([[1, 0, 0, 1, 1, 0, 0]])\n', (5576, 5601), True, 'import numpy as np\n'), ((5626, 5644), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (5634, 5644), True, 'import numpy as np\n'), ((5661, 5685), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (5683, 5685), False, 'from neupy import algorithms\n'), ((6933, 6957), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (6955, 6957), False, 'from neupy import algorithms\n'), ((6974, 6998), 'numpy.array', 'np.array', (['[[1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1]])\n', (6982, 6998), True, 'import numpy as np\n'), ((7016, 7034), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (7024, 7034), True, 'import numpy as np\n'), ((636, 660), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (658, 660), False, 'from neupy import algorithms\n'), ((770, 794), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (792, 794), False, 'from neupy import algorithms\n'), ((910, 934), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (932, 934), False, 'from neupy import algorithms\n'), ((962, 988), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (970, 988), True, 'import numpy as np\n'), ((6339, 6356), 'numpy.array', 'np.array', (['[-7, 0]'], {}), '([-7, 0])\n', (6347, 6356), True, 'import numpy as np\n'), ((6742, 6766), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (6764, 6766), False, 'from neupy import algorithms\n'), ((6780, 6802), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (6788, 6802), True, 'import numpy as np\n'), ((6816, 6832), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (6824, 6832), True, 'import numpy as np\n'), ((687, 703), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (695, 703), True, 'import numpy as np\n'), ((827, 843), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (835, 843), True, 'import numpy as np\n'), ((1013, 1032), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (1021, 1032), True, 'import numpy as np\n'), ((1034, 1050), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1042, 1050), True, 'import numpy as np\n'), ((1294, 1311), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1302, 1311), True, 'import numpy as np\n'), ((1313, 1329), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1321, 1329), True, 'import numpy as np\n'), ((1400, 1416), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1408, 1416), True, 'import numpy as np\n'), ((1418, 1435), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1426, 1435), True, 'import numpy as np\n'), ((1507, 1524), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1515, 1524), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1534, 1542), True, 'import numpy as np\n'), ((1614, 1630), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1622, 1630), True, 'import numpy as np\n'), ((1632, 1649), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1640, 1649), True, 'import numpy as np\n'), ((1722, 1739), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1730, 1739), True, 'import numpy as np\n'), ((5862, 5895), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0]])\n', (5870, 5895), True, 'import numpy as np\n'), ((5909, 5927), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (5917, 5927), True, 'import numpy as np\n'), ((5994, 6027), 'numpy.array', 'np.array', (['[[0, 1, 1, 0, 0, 1, 1]]'], {}), '([[0, 1, 1, 0, 0, 1, 1]])\n', (6002, 6027), True, 'import numpy as np\n'), ((6041, 6059), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (6049, 6059), True, 'import numpy as np\n'), ((6151, 6182), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 0, 1, 1]'], {}), '([0, 1, 1, 0, 0, 1, 1])\n', (6159, 6182), True, 'import numpy as np\n'), ((6196, 6212), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (6204, 6212), True, 'import numpy as np\n'), ((6400, 6456), 'numpy.array', 'np.array', (['[[0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0]])\n', (6408, 6456), True, 'import numpy as np\n'), ((6533, 6559), 'numpy.array', 'np.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (6541, 6559), True, 'import numpy as np\n'), ((3336, 3352), 'algorithms.memory.data.half_one.ravel', 'half_one.ravel', ([], {}), '()\n', (3350, 3352), False, 'from algorithms.memory.data import zero, one, half_one, half_zero\n')]
|
import data.tools.maths as m
import pygame, numpy
class MousePicker:
current_ray = None
RAY_RANGE = 600.0
RECURSION_COUNT = 200
def __init__(self, camera, projection_matrix, display, terrain):
self.camera = camera
self.projection_matrix = projection_matrix
self.display = display
self.terrain = terrain
self.view_matrix = m.Maths().create_view_matrix(camera)
self.current_terrain_point = None
self.count = 0
def get_current_ray(self):
return self.current_ray
def update(self):
self.view_matrix = m.Maths().create_view_matrix(self.camera)
self.current_ray = self.calculate_mouse_ray()
def calculate_mouse_ray(self):
mouse_x, mouse_y = float(pygame.mouse.get_pos()[0]), float(pygame.mouse.get_pos()[1])
normalized_device_coordinates = self.get_normalized_device_coordinates(mouse_x, mouse_y)
clip_coordinates = (normalized_device_coordinates[0], normalized_device_coordinates[1], -1.0, 1.0)
eye_coordinates = self.to_eye_coordinates(clip_coordinates)
world_ray = self.to_world_coordinates(eye_coordinates)
return world_ray
def to_world_coordinates(self, eye_coordinates):
inverted_view_matrix = numpy.linalg.inv(self.view_matrix)
ray_world_coordinates = numpy.dot(inverted_view_matrix, eye_coordinates)
mouse_ray = (-ray_world_coordinates[0], ray_world_coordinates[1], -ray_world_coordinates[2])
return mouse_ray
def to_eye_coordinates(self, clip_coordinates):
inverted_projection_matrix = numpy.linalg.inv(self.projection_matrix)
eye_coordinates = numpy.dot(inverted_projection_matrix, clip_coordinates)
return eye_coordinates[0], eye_coordinates[1], -1.0, 0.0
def get_normalized_device_coordinates(self, mouse_x, mouse_y):
x = (2.0 * mouse_x) / self.display.get_width() - 1.0
y = (2.0 * mouse_y) / self.display.get_height() - 1.0
return (x, y)
def intersect_with_y(self):
a = self.camera.position[0]
b = self.camera.position[1]
c = self.camera.position[2]
alpha = self.current_ray[0]
beta = self.current_ray[1]
gamma = self.current_ray[2]
x = a - (alpha * b) / beta
if self.terrain.height is not None:
y = self.terrain.height
else:
y = 0.0
z = c - (gamma * b) / beta
return (x, y, z)
|
[
"numpy.dot",
"data.tools.maths.Maths",
"numpy.linalg.inv",
"pygame.mouse.get_pos"
] |
[((1302, 1336), 'numpy.linalg.inv', 'numpy.linalg.inv', (['self.view_matrix'], {}), '(self.view_matrix)\n', (1318, 1336), False, 'import pygame, numpy\n'), ((1370, 1418), 'numpy.dot', 'numpy.dot', (['inverted_view_matrix', 'eye_coordinates'], {}), '(inverted_view_matrix, eye_coordinates)\n', (1379, 1418), False, 'import pygame, numpy\n'), ((1640, 1680), 'numpy.linalg.inv', 'numpy.linalg.inv', (['self.projection_matrix'], {}), '(self.projection_matrix)\n', (1656, 1680), False, 'import pygame, numpy\n'), ((1708, 1763), 'numpy.dot', 'numpy.dot', (['inverted_projection_matrix', 'clip_coordinates'], {}), '(inverted_projection_matrix, clip_coordinates)\n', (1717, 1763), False, 'import pygame, numpy\n'), ((395, 404), 'data.tools.maths.Maths', 'm.Maths', ([], {}), '()\n', (402, 404), True, 'import data.tools.maths as m\n'), ((619, 628), 'data.tools.maths.Maths', 'm.Maths', ([], {}), '()\n', (626, 628), True, 'import data.tools.maths as m\n'), ((788, 810), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (808, 810), False, 'import pygame, numpy\n'), ((822, 844), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (842, 844), False, 'import pygame, numpy\n')]
|
import os
import subprocess
from subprocess import check_output
import cv2
import numpy as np
class VideoCaptureYUV:
def __init__(self, filename, size):
self.height, self.width = size
self.frame_len = int(self.width * self.height * 3 / 2)
self.f = open(filename, 'rb')
self.shape = (int(self.height*1.5), self.width)
def quantize(self,y_comp,bits,bdepth=8):
y_comp=np.uint8(np.rint(y_comp*((pow(2,bits)-1)/(pow(2,bdepth)-1))))
return y_comp
def quantize_inverse(self,y_comp,bits,bdepth=8):
y_comp=np.uint8(np.rint(y_comp*((pow(2,bdepth)-1)/(pow(2,bits)-1))))
return y_comp
def adjust_luminance(self,y_comp,step):
y_comp=np.clip(y_comp+step,a_min = 2, a_max = 255)
return y_comp
def read_raw(self):
try:
raw = self.f.read(self.frame_len)
yuv = np.frombuffer(raw, dtype=np.uint8)
yuv = yuv.reshape(self.shape)
except Exception as e:
print(str(e))
return False, None
return True, yuv
def read(self,lum_step=0):
ret, yuv = self.read_raw()
if not ret:
return ret, yuv
y=yuv[:1080,:]
uv=yuv[1080:,:]
y=self.quantize(y,6,8)
uv=self.quantize(uv,5,8)
y=self.quantize_inverse(y,6,8)
uv=self.quantize_inverse(uv,5,8)
yuv=np.concatenate((y,uv),axis=0)
yuv_mod = yuv.reshape(self.frame_len,)
bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB_I420)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
# gray=gray = cv2.cvtColor(yuv, cv2.COLOR_BGR2GRAY)
return ret, rgb
def step1():
# step 1
inputfilepath="./mp4"
outputfilepath="./yuv"
files=os.listdir(inputfilepath)
for name in files:
cmd="ffmpeg -i {0} -c:v rawvideo -pix_fmt yuv420p {1}".format(inputfilepath+"/"+name,outputfilepath+"/"+name[:-3]+"yuv")
check_output(cmd, shell=True).decode()
print(cmd)
#os.remove(inputfilepath+"/"+name)
print("shukar hai!")
print("Step 1 completed")
for name in files:
os.remove(inputfilepath+"/"+name)
def step2():
# step 2
path="./yuv"
files=os.listdir(path)
for name in files:
filename=path+'\\'+name
print(filename)
for lum_step in range(0,1):
size = (1080, 1920)
cap = VideoCaptureYUV(filename, size)
fourcc=cv2.VideoWriter_fourcc(*'MP4V')
fourcc=0x7634706d
fps=int(name[-6:-4])
out=cv2.VideoWriter('./mp4/{0}_6b.mp4'.format(name[:-4]),fourcc,fps,(1920,1080))
while 1:
ret, frame = cap.read(lum_step)
out.write(frame)
if ret:
pass
else:
break
print("step 2 completed")
def step3():
#step3
path = "./yuv"
files = os.listdir(path)
for name in files:
filename=path+'\\'+name
os.remove(filename)
print("step3 completed")
def step4():
#step 4
inputfilepath="./mp4"
outputfilepath="./png"
files=os.listdir(inputfilepath)
for name in files:
try:
cmd="ffmpeg -i {0} -vf fps=0.2 {1}".format(inputfilepath+"/"+name,outputfilepath+"/%06d_"+name[:4]+".png")
check_output(cmd, shell=True).decode()
except:
pass
print("step 3 completed")
if __name__ == "__main__":
step1()
step2()
step3()
step4()
|
[
"os.remove",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"numpy.frombuffer",
"subprocess.check_output",
"numpy.clip",
"os.listdir",
"numpy.concatenate"
] |
[((1505, 1530), 'os.listdir', 'os.listdir', (['inputfilepath'], {}), '(inputfilepath)\n', (1515, 1530), False, 'import os\n'), ((1916, 1932), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1926, 1932), False, 'import os\n'), ((2454, 2470), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2464, 2470), False, 'import os\n'), ((2644, 2669), 'os.listdir', 'os.listdir', (['inputfilepath'], {}), '(inputfilepath)\n', (2654, 2669), False, 'import os\n'), ((644, 686), 'numpy.clip', 'np.clip', (['(y_comp + step)'], {'a_min': '(2)', 'a_max': '(255)'}), '(y_comp + step, a_min=2, a_max=255)\n', (651, 686), True, 'import numpy as np\n'), ((1189, 1220), 'numpy.concatenate', 'np.concatenate', (['(y, uv)'], {'axis': '(0)'}), '((y, uv), axis=0)\n', (1203, 1220), True, 'import numpy as np\n'), ((1268, 1309), 'cv2.cvtColor', 'cv2.cvtColor', (['yuv', 'cv2.COLOR_YUV2RGB_I420'], {}), '(yuv, cv2.COLOR_YUV2RGB_I420)\n', (1280, 1309), False, 'import cv2\n'), ((1318, 1354), 'cv2.cvtColor', 'cv2.cvtColor', (['bgr', 'cv2.COLOR_BGR2RGB'], {}), '(bgr, cv2.COLOR_BGR2RGB)\n', (1330, 1354), False, 'import cv2\n'), ((1837, 1874), 'os.remove', 'os.remove', (["(inputfilepath + '/' + name)"], {}), "(inputfilepath + '/' + name)\n", (1846, 1874), False, 'import os\n'), ((2519, 2538), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2528, 2538), False, 'import os\n'), ((786, 820), 'numpy.frombuffer', 'np.frombuffer', (['raw'], {'dtype': 'np.uint8'}), '(raw, dtype=np.uint8)\n', (799, 820), True, 'import numpy as np\n'), ((2101, 2132), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (2123, 2132), False, 'import cv2\n'), ((1677, 1706), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1689, 1706), False, 'from subprocess import check_output\n'), ((2811, 2840), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2823, 2840), False, 'from subprocess import check_output\n')]
|
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# In[2]:
data = pd.read_csv("../build/nis_array_.log",delimiter="\n")
t = [7.8 for i in range(498)]
ts = np.arange(0,498,1)
# In[3]:
plt.plot(ts, t, label='first plot')
plt.plot(ts, data, label='second plot')
plt.legend
# If the curve is way under, we're overestimating the uncertainty in the system; if half of the curve is over, we're underestimating the uncertainty
|
[
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.plot"
] |
[((179, 233), 'pandas.read_csv', 'pd.read_csv', (['"""../build/nis_array_.log"""'], {'delimiter': '"""\n"""'}), "('../build/nis_array_.log', delimiter='\\n')\n", (190, 233), True, 'import pandas as pd\n'), ((268, 288), 'numpy.arange', 'np.arange', (['(0)', '(498)', '(1)'], {}), '(0, 498, 1)\n', (277, 288), True, 'import numpy as np\n'), ((300, 335), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 't'], {'label': '"""first plot"""'}), "(ts, t, label='first plot')\n", (308, 335), True, 'import matplotlib.pyplot as plt\n'), ((336, 375), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'data'], {'label': '"""second plot"""'}), "(ts, data, label='second plot')\n", (344, 375), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Evaluate the model using Eigen split of KITTI dataset
- prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py
"""
import argparse
import os
import cv2
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from eval_utils import compute_errors, compute_scale_and_shift
from network import Pydnet
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class KITTILoader(object):
def __init__(self, params):
self.params = params
self.height = params["height"]
self.width = params["width"]
self.data_list_file = params["data_list_file"]
self.data_path = params["data_path"]
self.num_workers = 4
self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str)
self.default_img_shape = None
def read_and_decode(self, filename_queue):
"""Read jpeg file from file system"""
img0_name = tf.strings.join([self.data_path, "/", filename_queue, ".jpg"])
img0 = tf.image.decode_jpeg(tf.io.read_file(img0_name), channels=3)
img0 = tf.cast(img0, tf.float32)
return img0
def preprocess(self, filename_queue):
"""Prepare single image at testing time"""
img0 = self.read_and_decode(filename_queue)
img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
img0.set_shape([self.height, self.width, 3])
img0 = img0 / 255.0
return img0
def create_iterator(self, num_parallel_calls=4):
"""Create iterator"""
data_list = tf.convert_to_tensor(self.data_list, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(data_list)
dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(1)
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
return iterator
def read_test_files(test_file) -> list:
"""Read test files from txt file"""
assert os.path.exists(test_file)
with open(test_file, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def run_inference(opts):
"""Run the model on KITTI"""
network_params = {"height": 320, "width": 640, "is_training": False}
dataset_params = {
"height": 320,
"width": 640,
"data_path": opts.data_path,
"data_list_file": opts.data_list_file,
}
dataset = KITTILoader(dataset_params)
iterator = dataset.create_iterator()
batch_img = iterator.get_next()
network = Pydnet(network_params)
predicted_idepth = network.forward(batch_img)
predicted_idepth = tf.nn.relu(predicted_idepth)
# restore graph
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(iterator.initializer)
saver.restore(sess, opts.ckpt)
os.makedirs(opts.dest, exist_ok=True)
test_images = read_test_files(opts.data_list_file)
num_images = len(test_images)
with tqdm(total=num_images) as pbar:
for i in range(num_images):
idepth = sess.run(predicted_idepth)
idepth = np.squeeze(idepth)
min_idepth = idepth.min()
max_idepth = idepth.max()
norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
norm_idepth *= 255.0
target_path = os.path.join(opts.data_path, f"{test_images[i]}.jpg")
target = cv2.imread(target_path)
h, w = target.shape[:2]
norm_idepth = cv2.resize(norm_idepth, (w, h))
img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
pbar.update(1)
print("Inference done!")
def eval(opts):
"""Compute error metrics."""
errors = []
test_images = read_test_files(opts.data_list_file)
print("=> loading gt data")
gt_depths = np.load(opts.gt_path, fix_imports=True, encoding="latin1", allow_pickle=True)[
"data"
]
print("=> starting evaluation")
with tqdm(total=len(test_images)) as pbar:
for i in range(len(test_images)):
target = gt_depths[i]
pred_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
prediction_idepth = cv2.imread(pred_path, -1) / 256.0
mask = (target > 1e-3) & (target < opts.max_depth)
target_idepth = np.zeros_like(target)
target_idepth[mask == 1] = 1.0 / target[mask == 1]
scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
prediction_idepth_aligned = scale * prediction_idepth + shift
disparity_cap = 1.0 / opts.max_depth
prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
target = target[mask == 1]
errors.append(compute_errors(target, prediciton_depth_aligned))
pbar.update(1)
mean_errors = np.array(errors).mean(0)
labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
for i in range(len(labels)):
print(f"{labels[i]}:{mean_errors[i]}")
print("Evaluation done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate depth network on KITTI")
parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
parser.add_argument("--data_path", type=str, help="path to kitti", required=True)
parser.add_argument("--gt_path", type=str, help="path to gt_depths.npz", required=True)
parser.add_argument(
"--data_list_file", type=str, help="path to data list", default="test_kitti.txt"
)
parser.add_argument("--dest", type=str, help="prediction folder", default="kitti")
parser.add_argument("--max_depth", type=float, help="maximum depth value", default=80.0)
opts = parser.parse_args()
run_inference(opts)
eval(opts)
|
[
"numpy.load",
"argparse.ArgumentParser",
"os.path.join",
"eval_utils.compute_scale_and_shift",
"eval_utils.compute_errors",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.nn.relu",
"numpy.zeros_like",
"os.path.exists",
"tensorflow.cast",
"numpy.loadtxt",
"tensorflow.io.read_file",
"cv2.resize",
"tensorflow.image.resize_images",
"tqdm.tqdm",
"tensorflow.train.Saver",
"tensorflow.Session",
"network.Pydnet",
"numpy.squeeze",
"os.makedirs",
"tensorflow.convert_to_tensor",
"tensorflow.data.Dataset.from_tensor_slices",
"cv2.imread",
"tensorflow.strings.join",
"numpy.array"
] |
[((2040, 2065), 'os.path.exists', 'os.path.exists', (['test_file'], {}), '(test_file)\n', (2054, 2065), False, 'import os\n'), ((2614, 2636), 'network.Pydnet', 'Pydnet', (['network_params'], {}), '(network_params)\n', (2620, 2636), False, 'from network import Pydnet\n'), ((2710, 2738), 'tensorflow.nn.relu', 'tf.nn.relu', (['predicted_idepth'], {}), '(predicted_idepth)\n', (2720, 2738), True, 'import tensorflow as tf\n'), ((2772, 2788), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2786, 2788), True, 'import tensorflow as tf\n'), ((2800, 2812), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2810, 2812), True, 'import tensorflow as tf\n'), ((2946, 2983), 'os.makedirs', 'os.makedirs', (['opts.dest'], {'exist_ok': '(True)'}), '(opts.dest, exist_ok=True)\n', (2957, 2983), False, 'import os\n'), ((5467, 5537), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate depth network on KITTI"""'}), "(description='Evaluate depth network on KITTI')\n", (5490, 5537), False, 'import argparse\n'), ((942, 1004), 'tensorflow.strings.join', 'tf.strings.join', (["[self.data_path, '/', filename_queue, '.jpg']"], {}), "([self.data_path, '/', filename_queue, '.jpg'])\n", (957, 1004), True, 'import tensorflow as tf\n'), ((1096, 1121), 'tensorflow.cast', 'tf.cast', (['img0', 'tf.float32'], {}), '(img0, tf.float32)\n', (1103, 1121), True, 'import tensorflow as tf\n'), ((1303, 1391), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['img0', '[self.height, self.width]', 'tf.image.ResizeMethod.AREA'], {}), '(img0, [self.height, self.width], tf.image.\n ResizeMethod.AREA)\n', (1325, 1391), True, 'import tensorflow as tf\n'), ((1592, 1645), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.data_list'], {'dtype': 'tf.string'}), '(self.data_list, dtype=tf.string)\n', (1612, 1645), True, 'import tensorflow as tf\n'), ((1664, 1709), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data_list'], {}), '(data_list)\n', (1698, 1709), True, 'import tensorflow as tf\n'), ((2826, 2869), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2867, 2869), True, 'import tensorflow as tf\n'), ((3082, 3104), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_images'}), '(total=num_images)\n', (3086, 3104), False, 'from tqdm import tqdm\n'), ((4018, 4095), 'numpy.load', 'np.load', (['opts.gt_path'], {'fix_imports': '(True)', 'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(opts.gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)\n", (4025, 4095), True, 'import numpy as np\n'), ((1041, 1067), 'tensorflow.io.read_file', 'tf.io.read_file', (['img0_name'], {}), '(img0_name)\n', (1056, 1067), True, 'import tensorflow as tf\n'), ((3219, 3237), 'numpy.squeeze', 'np.squeeze', (['idepth'], {}), '(idepth)\n', (3229, 3237), True, 'import numpy as np\n'), ((3450, 3503), 'os.path.join', 'os.path.join', (['opts.data_path', 'f"""{test_images[i]}.jpg"""'], {}), "(opts.data_path, f'{test_images[i]}.jpg')\n", (3462, 3503), False, 'import os\n'), ((3525, 3548), 'cv2.imread', 'cv2.imread', (['target_path'], {}), '(target_path)\n', (3535, 3548), False, 'import cv2\n'), ((3611, 3642), 'cv2.resize', 'cv2.resize', (['norm_idepth', '(w, h)'], {}), '(norm_idepth, (w, h))\n', (3621, 3642), False, 'import cv2\n'), ((4510, 4531), 'numpy.zeros_like', 'np.zeros_like', (['target'], {}), '(target)\n', (4523, 4531), True, 'import numpy as np\n'), ((4622, 4685), 'eval_utils.compute_scale_and_shift', 'compute_scale_and_shift', (['prediction_idepth', 'target_idepth', 'mask'], {}), '(prediction_idepth, target_idepth, mask)\n', (4645, 4685), False, 'from eval_utils import compute_errors, compute_scale_and_shift\n'), ((5216, 5232), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (5224, 5232), True, 'import numpy as np\n'), ((730, 774), 'numpy.loadtxt', 'np.loadtxt', (['self.data_list_file'], {'dtype': 'bytes'}), '(self.data_list_file, dtype=bytes)\n', (740, 774), True, 'import numpy as np\n'), ((4383, 4408), 'cv2.imread', 'cv2.imread', (['pred_path', '(-1)'], {}), '(pred_path, -1)\n', (4393, 4408), False, 'import cv2\n'), ((5119, 5167), 'eval_utils.compute_errors', 'compute_errors', (['target', 'prediciton_depth_aligned'], {}), '(target, prediciton_depth_aligned)\n', (5133, 5167), False, 'from eval_utils import compute_errors, compute_scale_and_shift\n')]
|
"""
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, \
scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h
image = camera()
edge_roberts = roberts(image)
edge_sobel = sobel(image)
fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
ax[0].imshow(edge_roberts, cmap=plt.cm.gray)
ax[0].set_title('Roberts Edge Detection')
ax[1].imshow(edge_sobel, cmap=plt.cm.gray)
ax[1].set_title('Sobel Edge Detection')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# Different operators compute different finite-difference approximations of
# the gradient. For example, the Scharr filter results in a less rotational
# variance than the Sobel filter that is in turn better than the Prewitt
# filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters
# and the Scharr filter is illustrated below with an image that is the
# discretization of a rotation- invariant continuous function. The
# discrepancy between the Prewitt and Sobel filters, and the Scharr filter is
# stronger for regions of the image where the direction of the gradient is
# close to diagonal, and for regions with high spatial frequencies. For the
# example image the differences between the filter results are very small and
# the filter results are visually almost indistinguishable.
#
# .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
#
# .. [2] <NAME>, <NAME>, and <NAME>. Principles of filter design.
# In Handbook of Computer Vision and Applications. Academic Press,
# 1999.
#
# .. [3] https://en.wikipedia.org/wiki/Prewitt_operator
x, y = np.ogrid[:100, :100]
# Rotation-invariant image with different spatial frequencies
img = np.exp(1j * np.hypot(x, y) ** 1.3 / 20.).real
edge_sobel = sobel(img)
edge_scharr = scharr(img)
edge_prewitt = prewitt(img)
diff_scharr_prewitt = edge_scharr - edge_prewitt
diff_scharr_sobel = edge_scharr - edge_sobel
max_diff = np.max(np.maximum(diff_scharr_prewitt, diff_scharr_sobel))
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
ax = axes.ravel()
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(edge_scharr, cmap=plt.cm.gray)
ax[1].set_title('Scharr Edge Detection')
ax[2].imshow(diff_scharr_prewitt, cmap=plt.cm.gray, vmax=max_diff)
ax[2].set_title('Scharr - Prewitt')
ax[3].imshow(diff_scharr_sobel, cmap=plt.cm.gray, vmax=max_diff)
ax[3].set_title('Scharr - Sobel')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# As in the previous example, here we illustrate the rotational invariance of
# the filters. The top row shows a rotationally invariant image along with the
# angle of its analytical gradient. The other two rows contain the difference
# between the different gradient approximations (Sobel, Prewitt, Scharr &
# Farid) and analytical gradient.
#
# The Farid & Simoncelli derivative filters [4]_, [5]_ are the most
# rotationally invariant, but require a 5x5 kernel, which is computationally
# more intensive than a 3x3 kernel.
#
# .. [4] <NAME>. and <NAME>., "Differentiation of discrete
# multidimensional signals", IEEE Transactions on Image Processing 13(4):
# 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
#
# .. [5] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
# <https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
x, y = np.mgrid[-10:10:255j, -10:10:255j]
img = np.sin(x ** 2 + y ** 2)
imgx = 2 * x * np.cos(x ** 2 + y ** 2)
imgy = 2 * y * np.cos(x ** 2 + y ** 2)
def angle(dx, dy):
return np.mod(np.arctan2(dy, dx), np.pi)
true_angle = angle(imgx, imgy)
angle_farid = angle(farid_h(img), farid_v(img))
angle_sobel = angle(sobel_h(img), sobel_v(img))
angle_scharr = angle(scharr_h(img), scharr_v(img))
angle_prewitt = angle(prewitt_h(img), prewitt_v(img))
def diff_angle(angle_1, angle_2):
return np.minimum(np.pi - np.abs(angle_1 - angle_2),
np.abs(angle_1 - angle_2))
diff_farid = diff_angle(true_angle, angle_farid)
diff_sobel = diff_angle(true_angle, angle_sobel)
diff_scharr = diff_angle(true_angle, angle_scharr)
diff_prewitt = diff_angle(true_angle, angle_prewitt)
fig, axes = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
ax = axes.ravel()
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(true_angle, cmap=plt.cm.hsv)
ax[1].set_title('Analytical gradient angle')
ax[2].imshow(diff_sobel, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[2].set_title('Sobel error')
ax[3].imshow(diff_prewitt, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[3].set_title('Prewitt error')
ax[4].imshow(diff_scharr, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[4].set_title('Scharr error')
cax = ax[5].imshow(diff_farid, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[5].set_title('Farid error')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.90, 0.10, 0.02, 0.50])
fig.colorbar(cax, cax=cbar_ax, ticks=[0, 0.01, 0.02])
for a in ax:
a.axis('off')
plt.show()
|
[
"numpy.maximum",
"numpy.arctan2",
"numpy.abs",
"skimage.filters.farid_v",
"skimage.filters.scharr_h",
"skimage.filters.sobel_v",
"skimage.filters.sobel",
"numpy.sin",
"matplotlib.pyplot.tight_layout",
"skimage.filters.farid_h",
"skimage.filters.scharr",
"skimage.filters.prewitt_h",
"skimage.filters.sobel_h",
"skimage.filters.prewitt_v",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.hypot",
"numpy.cos",
"skimage.data.camera",
"skimage.filters.roberts",
"skimage.filters.scharr_v",
"skimage.filters.prewitt"
] |
[((491, 499), 'skimage.data.camera', 'camera', ([], {}), '()\n', (497, 499), False, 'from skimage.data import camera\n'), ((515, 529), 'skimage.filters.roberts', 'roberts', (['image'], {}), '(image)\n', (522, 529), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((543, 555), 'skimage.filters.sobel', 'sobel', (['image'], {}), '(image)\n', (548, 555), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((567, 630), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 4)'}), '(ncols=2, sharex=True, sharey=True, figsize=(8, 4))\n', (579, 630), True, 'import matplotlib.pyplot as plt\n'), ((859, 877), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (875, 877), True, 'import matplotlib.pyplot as plt\n'), ((878, 888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (886, 888), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2229), 'skimage.filters.sobel', 'sobel', (['img'], {}), '(img)\n', (2224, 2229), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((2244, 2255), 'skimage.filters.scharr', 'scharr', (['img'], {}), '(img)\n', (2250, 2255), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((2271, 2283), 'skimage.filters.prewitt', 'prewitt', (['img'], {}), '(img)\n', (2278, 2283), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((2462, 2534), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 8)'}), '(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(8, 8))\n', (2474, 2534), True, 'import matplotlib.pyplot as plt\n'), ((2972, 2990), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2988, 2990), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3001), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2999, 3001), True, 'import matplotlib.pyplot as plt\n'), ((4012, 4035), 'numpy.sin', 'np.sin', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4018, 4035), True, 'import numpy as np\n'), ((4775, 4847), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 8)'}), '(nrows=3, ncols=2, sharex=True, sharey=True, figsize=(8, 8))\n', (4787, 4847), True, 'import matplotlib.pyplot as plt\n'), ((5618, 5628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5626, 5628), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2447), 'numpy.maximum', 'np.maximum', (['diff_scharr_prewitt', 'diff_scharr_sobel'], {}), '(diff_scharr_prewitt, diff_scharr_sobel)\n', (2407, 2447), True, 'import numpy as np\n'), ((4052, 4075), 'numpy.cos', 'np.cos', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4058, 4075), True, 'import numpy as np\n'), ((4091, 4114), 'numpy.cos', 'np.cos', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4097, 4114), True, 'import numpy as np\n'), ((4235, 4247), 'skimage.filters.farid_h', 'farid_h', (['img'], {}), '(img)\n', (4242, 4247), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4249, 4261), 'skimage.filters.farid_v', 'farid_v', (['img'], {}), '(img)\n', (4256, 4261), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4283, 4295), 'skimage.filters.sobel_h', 'sobel_h', (['img'], {}), '(img)\n', (4290, 4295), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4297, 4309), 'skimage.filters.sobel_v', 'sobel_v', (['img'], {}), '(img)\n', (4304, 4309), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4332, 4345), 'skimage.filters.scharr_h', 'scharr_h', (['img'], {}), '(img)\n', (4340, 4345), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4347, 4360), 'skimage.filters.scharr_v', 'scharr_v', (['img'], {}), '(img)\n', (4355, 4360), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4384, 4398), 'skimage.filters.prewitt_h', 'prewitt_h', (['img'], {}), '(img)\n', (4393, 4398), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4400, 4414), 'skimage.filters.prewitt_v', 'prewitt_v', (['img'], {}), '(img)\n', (4409, 4414), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4154, 4172), 'numpy.arctan2', 'np.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (4164, 4172), True, 'import numpy as np\n'), ((4531, 4556), 'numpy.abs', 'np.abs', (['(angle_1 - angle_2)'], {}), '(angle_1 - angle_2)\n', (4537, 4556), True, 'import numpy as np\n'), ((4482, 4507), 'numpy.abs', 'np.abs', (['(angle_1 - angle_2)'], {}), '(angle_1 - angle_2)\n', (4488, 4507), True, 'import numpy as np\n'), ((2171, 2185), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (2179, 2185), True, 'import numpy as np\n')]
|
import pandas as pd
import os
import time
import numpy as np
from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster
import pickle
from collections import defaultdict
from utils import offset_str2list, offset_decreaseSentOffset, insert_string
def find_abstractive_target(predictions_topic_cluster, alignments, topic):
cluster_spans = list(predictions_topic_cluster['docSpanText'].values)
alignments_cluster = alignments[(alignments['topic']==topic) & (alignments['docSpanText'].isin(cluster_spans))]
aligned_summ_span_cands = list(alignments_cluster['summarySpanText'].drop_duplicates().values)
summ_span_cands_score = []
for summ_span in aligned_summ_span_cands:
alignments_cluster_summ_span = alignments_cluster[alignments_cluster['summarySpanText'] == summ_span]
summ_span_cands_score.append(alignments_cluster_summ_span['pred_prob'].sum())
return aligned_summ_span_cands[np.argmax(summ_span_cands_score)]
def add_OIE_special_tok(docSpanOffsets, docSentCharIdx, sent):
# document_tmp = document[:]
span_offsets = offset_str2list(docSpanOffsets)
offsets = offset_decreaseSentOffset(docSentCharIdx, span_offsets)
# assume we have max 2 parts
for offset in offsets[::-1]: # [::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' > ')
sent = insert_string(sent, offset[0], ' < ')
return sent
##################################
###### main ##############
##################################
if __name__ == "__main__":
MAX_SENT = 100
DATASETS = ['DUC2004']#['TAC2008','TAC2009','TAC2010']
SET_TYPE = 'test'
CLUSTERING = True
SUMM_LEN = 100
MAX_CLUSTERS = 10
DUC2004_Benchmark = True
FULL_SENT = False
if FULL_SENT:
full_sent_flag = '_full_sent'
else:
full_sent_flag = ''
sys_model = 'roberta'
model_name = 'greedyMaxRouge'
sys_checkpoint = 'checkpoint-1200' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_TAC2008_TAC2009_2010_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed'
##DUC2004
if DUC2004_Benchmark:
sys_checkpoint = 'checkpoint-1500' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080'
sys_folder = 'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed_finetuned_TAC8910'
empty = 0
analysis_list = []
fusion_text = []
fusion_target = []
cluster_metadata = []
##full
full_fixed = 'fixed'
if DATASETS[0] == 'TAC2011':
full_fixed = 'full'
if DUC2004_Benchmark:
if DATASETS[0] == 'DUC2004':
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE, full_fixed))
else:
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_greedyMaxRouge_no_alignment_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE, full_fixed))
else:
metadata = pd.read_csv(
'./OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format(
'_'.join(DATASETS),
SET_TYPE,full_fixed))
predictions = pd.read_csv(
'./models/{}/{}/{}_{}_results_None.csv'.format(sys_folder, sys_checkpoint,
SET_TYPE, '_'.join(DATASETS)))
assert (len(predictions) == len(metadata))
metadata.insert(2, "prediction", predictions['prediction'])
predictions = metadata
for SET in DATASETS:
alignments = pd.read_csv(
'./dev{}_checkpoint-2000_negative.csv'.format(SET))
sys_summary_path = './{}_system_summaries/{}/{}_'.format(SET, sys_folder,
sys_checkpoint) + time.strftime(
"%Y%m%d-%H%M%S") + '/'
data_path = './data/{}/'.format(SET)
gold_summary_path = data_path + 'summaries/'
for topic in os.listdir(data_path):
print(topic)
if topic == 'summaries':
continue
if SET.startswith('TAC'):
topic = topic[:-3] + topic[-2:]
summary = ''
predictions_topic = predictions[predictions['topic'] == topic]
if DUC2004_Benchmark:
predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.4]
else:
predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.04]
predictions_topic = predictions_topic.sort_values(by=['prediction'], ascending=False)
if len(predictions_topic) == 0:
empty += 1
continue
if CLUSTERING:
simMat = read_simMats(topic, predictions_topic, SET)
cluster_mat(simMat, predictions_topic['simMat_idx'].values, predictions_topic)
oracle_per_cluster(SET, gold_summary_path, topic, predictions_topic, MAX_CLUSTERS)
allowed_clusters = list(
predictions_topic.sort_values(by=['cluster_size', 'inFile_sentIdx'], ascending=[False, True])[
'cluster_idx'].drop_duplicates(keep="first").values)[:MAX_CLUSTERS]
selected_spans = []
summary = ' '
for allowed_cluster_idx in allowed_clusters:
predictions_topic_cluster = predictions_topic[
predictions_topic['cluster_idx'] == allowed_cluster_idx]
predictions_topic_cluster = predictions_topic_cluster.sort_values(by=['prediction'],
ascending=False)
if len(predictions_topic_cluster) > 0:
if FULL_SENT:
predictions_topic_cluster['docSentText_special_tokens'] = predictions_topic_cluster.apply(lambda x: add_OIE_special_tok(x['docSpanOffsets'], x['docSentCharIdx'], x['docSentText']), axis=1)
fusion_text.append(
'<s> ' + ' </s> <s> '.join(
list(predictions_topic_cluster['docSentText_special_tokens'].values)) + ' </s>')
else:
fusion_text.append(
'<s> ' + ' </s> <s> '.join(list(predictions_topic_cluster['docSpanText'].values)) + ' </s>')
fusion_target.append(find_abstractive_target(predictions_topic_cluster, alignments, topic))
cluster_metadata.append([topic, list(predictions_topic_cluster.index)])
assert (predictions['docSpanText'].values[predictions_topic_cluster.index[0]]
== predictions_topic_cluster['docSpanText'].values[0])
if DUC2004_Benchmark:
out_dir = 'fusion_data/DUC2004{}/{}/'.format(full_sent_flag,model_name)
else:
out_dir = 'fusion_data/TAC2011{}/'.format(model_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
cluster_metadata_df = pd.DataFrame(cluster_metadata, columns=['topic', 'cluster_indexes'])
cluster_metadata_df.to_csv('{}/cluster_metadata_{}.csv'.format(out_dir,'_'.join(DATASETS)))
if SET_TYPE == 'dev':
SET_TYPE = 'val'
with open('{}/{}.source'.format(out_dir, SET_TYPE), 'w') as f:
f.write('\n'.join(fusion_text).replace('...', ' '))
with open('{}/{}.target'.format(out_dir, SET_TYPE), 'w') as f:
f.write('\n'.join(fusion_target).replace('...', ' '))
|
[
"pandas.DataFrame",
"utils.offset_decreaseSentOffset",
"os.makedirs",
"numpy.argmax",
"deriveSummaryDUC.cluster_mat",
"deriveSummaryDUC.oracle_per_cluster",
"os.path.exists",
"time.strftime",
"utils.insert_string",
"deriveSummaryDUC.read_simMats",
"utils.offset_str2list",
"os.listdir"
] |
[((1114, 1145), 'utils.offset_str2list', 'offset_str2list', (['docSpanOffsets'], {}), '(docSpanOffsets)\n', (1129, 1145), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((1161, 1216), 'utils.offset_decreaseSentOffset', 'offset_decreaseSentOffset', (['docSentCharIdx', 'span_offsets'], {}), '(docSentCharIdx, span_offsets)\n', (1186, 1216), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((7917, 7985), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_metadata'], {'columns': "['topic', 'cluster_indexes']"}), "(cluster_metadata, columns=['topic', 'cluster_indexes'])\n", (7929, 7985), True, 'import pandas as pd\n'), ((958, 990), 'numpy.argmax', 'np.argmax', (['summ_span_cands_score'], {}), '(summ_span_cands_score)\n', (967, 990), True, 'import numpy as np\n'), ((1373, 1410), 'utils.insert_string', 'insert_string', (['sent', 'offset[1]', '""" > """'], {}), "(sent, offset[1], ' > ')\n", (1386, 1410), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((1427, 1464), 'utils.insert_string', 'insert_string', (['sent', 'offset[0]', '""" < """'], {}), "(sent, offset[0], ' < ')\n", (1440, 1464), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((4657, 4678), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (4667, 4678), False, 'import os\n'), ((7829, 7852), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (7843, 7852), False, 'import os\n'), ((7863, 7883), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (7874, 7883), False, 'import os\n'), ((4468, 4498), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (4481, 4498), False, 'import time\n'), ((5474, 5517), 'deriveSummaryDUC.read_simMats', 'read_simMats', (['topic', 'predictions_topic', 'SET'], {}), '(topic, predictions_topic, SET)\n', (5486, 5517), False, 'from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster\n'), ((5535, 5613), 'deriveSummaryDUC.cluster_mat', 'cluster_mat', (['simMat', "predictions_topic['simMat_idx'].values", 'predictions_topic'], {}), "(simMat, predictions_topic['simMat_idx'].values, predictions_topic)\n", (5546, 5613), False, 'from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster\n'), ((5633, 5719), 'deriveSummaryDUC.oracle_per_cluster', 'oracle_per_cluster', (['SET', 'gold_summary_path', 'topic', 'predictions_topic', 'MAX_CLUSTERS'], {}), '(SET, gold_summary_path, topic, predictions_topic,\n MAX_CLUSTERS)\n', (5651, 5719), False, 'from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster\n')]
|
import logging
import timeit
import numpy as np
import pandas as pd
from tqdm import tqdm
from unified_model import UnifiedModel
from unified_model.utils import truncate_middle, ITEM_COLUMN, SCORE_COLUMN
log = logging.getLogger(__name__)
UNKNOWN_ITEM = '<UNK>'
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)
# http://scikit-learn.org/stable/modules/model_evaluation.html
def f1_score(precision, recall):
return 2 * precision * recall / (precision + recall)
def evaluate_classifier(unified_model, test_data: list, target_predictions: list, k: list = None, per_label=False):
# TODO multithreaded evaluation
k = [1, 5] if k is None else [k] if isinstance(k, int) else k # set default value for k
k.sort()
pred_labels, pred_scores, in_top_k, avg_pred_time = _process_predictions(unified_model,
test_data,
target_predictions, k)
scored_labels = _score_labels(target_predictions, k, pred_labels, in_top_k)
metrics = _calculate_metrics(scored_labels, k)
metrics['avg_prediction_time'] = avg_pred_time
if per_label:
return metrics, scored_labels
else:
return metrics
def _calculate_metrics(scored_labels, k):
metrics = {}
for i in k:
i = str(i)
try:
metrics['micro_precision@k' + i] = scored_labels['true_positives@k' + i].sum() / scored_labels[
'predicted_count@k' + i].sum()
except ZeroDivisionError:
metrics['micro_precision@k' + i] = 0
metrics['micro_recall@k' + i] = scored_labels['true_positives@k' + i].sum() / scored_labels['count'].sum()
try:
metrics['micro_f1@k' + i] = f1_score(metrics['micro_precision@k' + i], metrics['micro_recall@k' + i])
except ZeroDivisionError:
metrics['micro_f1@k' + i] = 0
metrics['macro_precision@k' + i] = scored_labels['precision@k' + i].mean()
metrics['macro_recall@k' + i] = scored_labels['recall@k' + i].mean()
metrics['macro_f1@k' + i] = scored_labels['f1@k' + i].mean()
return metrics
def _score_labels(target_predictions, k, pred_labels, in_top_k):
unique_labels = list(set(target_predictions))
target_predictions = np.array(target_predictions) # convert true predictions to no array
columns = ['count'] # tp + fn
for i in k:
i = str(i)
columns.append('predicted_count@k' + i) # tp + fp
columns.append('true_positives@k' + i)
columns.append('precision@k' + i)
columns.append('recall@k' + i)
columns.append('f1@k' + i)
df = pd.DataFrame(0, columns=columns, index=unique_labels)
for label in unique_labels:
df['count'][label] = np.sum(target_predictions == label)
for i in k:
df['predicted_count@k' + str(i)][label] = np.sum(pred_labels[:, :i].flatten() == label)
df['true_positives@k' + str(i)][label] = np.sum(in_top_k[i][target_predictions == label])
for i in k:
i = str(i)
df['precision@k' + i] = df['true_positives@k' + i] / df['predicted_count@k' + i]
df['recall@k' + i] = df['true_positives@k' + i] / df['count']
df['f1@k' + i] = f1_score(df['precision@k' + i], df['recall@k' + i])
df = df.fillna(0)
return df.sort_values(by='count', ascending=False)
def _fill_missing_predictions(df: pd.DataFrame, max_k: int) -> pd.DataFrame:
for i in range(max_k - df.shape[0]):
df = df.append({ITEM_COLUMN: UNKNOWN_ITEM,
SCORE_COLUMN: 0}, ignore_index=True)
return df
def _process_predictions(unified_model, test_data, target_predictions, k):
# allow target_predictions to also contain a list of true labels per prediction
target_predictions = np.array(target_predictions) # convert true predictions to no array
start_time = timeit.default_timer()
predictions = []
for data in tqdm(test_data, desc="Calculating metrics..."):
try:
prediction_result = unified_model.predict(data, limit=np.amax(k))
if prediction_result.shape[0] < np.amax(k):
log.warning("Model returned " + str(prediction_result.shape[0]) + " predictions, "
+ str(np.amax(k)) + " were expected.")
log.debug("Model data: " + str(data))
prediction_result = _fill_missing_predictions(prediction_result, np.amax(k))
if prediction_result is None:
log.warning("Model returned no prediction (None).")
log.debug("Model data: " + str(data))
# add empty predictions
prediction_result = _fill_missing_predictions(pd.DataFrame(columns=[ITEM_COLUMN, SCORE_COLUMN]),
np.amax(k))
except Exception as ex:
log.warning("Exception during prediction: " + str(ex))
log.debug("Model data: " + str(data))
prediction_result = _fill_missing_predictions(pd.DataFrame(columns=[ITEM_COLUMN, SCORE_COLUMN]), np.amax(k))
predictions.append(prediction_result)
avg_pred_time = ((timeit.default_timer() - start_time) / len(test_data) * 1000)
pred_labels = np.array([prediction[ITEM_COLUMN].tolist() for prediction in predictions])
pred_scores = np.array([prediction[SCORE_COLUMN].tolist() for prediction in predictions])
in_top_k = {}
for i in k:
in_top_k[i] = np.array(
[true_label in k_predictions[:i] for true_label, k_predictions in zip(target_predictions, pred_labels)])
return pred_labels, pred_scores, in_top_k, avg_pred_time
def compare_models(unified_models: list, data_list: list, target_predictions: list, styled=True,
**kwargs) -> pd.DataFrame:
"""
Compare evaluation metrics for the given list of models.
# Arguments
data_list (list): List of data items used for the evaluations.
target_predictions (list): List of true predictions for test data.
styled (boolean): If 'True', a styled DataFrame will be returned (with coloring, etc.)
**kwargs: Provide additional keyword-based parameters.
# Returns
DataFrame that summarizes the metrics of all of the given models.
"""
model_names = []
metrics_per_model = []
for model in unified_models:
print("Calculating metrics for " + str(model))
model_names.append(truncate_middle(str(model), 40))
metrics_per_model.append(model.evaluate(data_list, target_predictions, **kwargs))
## compare evaluation df, also use color to show best and worst values
# add random baseline and combined score
df = pd.DataFrame(metrics_per_model, index=model_names)
# https://pandas.pydata.org/pandas-docs/stable/style.html
if styled:
# return df.style.bar(color='#f0fbff')
return df.style.background_gradient(cmap='BuGn', low=0.1, high=0.8, axis=0)
else:
return df
def test_unified_model(model_instance: UnifiedModel, data=None, conda_environment=False):
"""
Helps to test whether your model instance can be successfully loaded in another python environment.
This method saves the model instance, loads the model file in another python process,
and (optionally) calls `predict()` with the provided test data.
# Arguments
model_instance (UnifiedModel): Unified model instance.
data (string or bytes): Input data to test the model (optional).
conda_environment (bool): If `True`, a clean conda environment will be created for the test (optional).
"""
import sys
import os
import tempfile
import subprocess
import shutil
log.info("Starting model test.")
temp_test_folder = tempfile.mkdtemp()
saved_model_path = model_instance.save(os.path.join(temp_test_folder, "test_model"))
python_runtime = sys.executable
CONDA_ENV = "model-test-env"
if conda_environment:
log.info("Creating clean conda environment.")
try:
log.info(subprocess.check_output("conda create -n " + CONDA_ENV + " python=3.6 cython -y",
stderr=subprocess.STDOUT, shell=True).decode("utf-8"))
log.info("Installing unified model.")
log.info(
subprocess.check_output("/opt/conda/envs/"
+ CONDA_ENV
+ "/bin/pip install --upgrade unified-model",
stderr=subprocess.STDOUT,
shell=True).decode("utf-8"))
python_runtime = "/opt/conda/envs/" + CONDA_ENV + "/bin/python"
except subprocess.CalledProcessError as e:
log.info("Failed to create conda environment: \n" + e.output.decode("utf-8"))
test_command = python_runtime + " " + saved_model_path + ' predict'
if data:
test_command += ' --input-data "' + str(data) + '"'
log.info("Executing " + test_command)
try:
log.info(subprocess.check_output(test_command, stderr=subprocess.STDOUT, shell=True).decode("utf-8"))
log.info("Finished model test successfully!")
except subprocess.CalledProcessError as e:
log.info("Test failed: \n" + e.output.decode("utf-8"))
shutil.rmtree(temp_test_folder)
if conda_environment:
log.info("Removing conda environment.")
subprocess.call("conda remove --name " + CONDA_ENV + " --all -y", shell=True)
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.sum",
"timeit.default_timer",
"subprocess.check_output",
"numpy.amax",
"tempfile.mkdtemp",
"numpy.array",
"subprocess.call",
"shutil.rmtree",
"os.path.join",
"logging.getLogger"
] |
[((213, 240), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (230, 240), False, 'import logging\n'), ((2368, 2396), 'numpy.array', 'np.array', (['target_predictions'], {}), '(target_predictions)\n', (2376, 2396), True, 'import numpy as np\n'), ((2740, 2793), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'columns': 'columns', 'index': 'unique_labels'}), '(0, columns=columns, index=unique_labels)\n', (2752, 2793), True, 'import pandas as pd\n'), ((3895, 3923), 'numpy.array', 'np.array', (['target_predictions'], {}), '(target_predictions)\n', (3903, 3923), True, 'import numpy as np\n'), ((3981, 4003), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4001, 4003), False, 'import timeit\n'), ((4042, 4088), 'tqdm.tqdm', 'tqdm', (['test_data'], {'desc': '"""Calculating metrics..."""'}), "(test_data, desc='Calculating metrics...')\n", (4046, 4088), False, 'from tqdm import tqdm\n'), ((6824, 6874), 'pandas.DataFrame', 'pd.DataFrame', (['metrics_per_model'], {'index': 'model_names'}), '(metrics_per_model, index=model_names)\n', (6836, 6874), True, 'import pandas as pd\n'), ((7898, 7916), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7914, 7916), False, 'import tempfile\n'), ((9473, 9504), 'shutil.rmtree', 'shutil.rmtree', (['temp_test_folder'], {}), '(temp_test_folder)\n', (9486, 9504), False, 'import shutil\n'), ((2855, 2890), 'numpy.sum', 'np.sum', (['(target_predictions == label)'], {}), '(target_predictions == label)\n', (2861, 2890), True, 'import numpy as np\n'), ((7960, 8004), 'os.path.join', 'os.path.join', (['temp_test_folder', '"""test_model"""'], {}), "(temp_test_folder, 'test_model')\n", (7972, 8004), False, 'import os\n'), ((9588, 9665), 'subprocess.call', 'subprocess.call', (["('conda remove --name ' + CONDA_ENV + ' --all -y')"], {'shell': '(True)'}), "('conda remove --name ' + CONDA_ENV + ' --all -y', shell=True)\n", (9603, 9665), False, 'import subprocess\n'), ((3064, 3112), 'numpy.sum', 'np.sum', (['in_top_k[i][target_predictions == label]'], {}), '(in_top_k[i][target_predictions == label])\n', (3070, 3112), True, 'import numpy as np\n'), ((4226, 4236), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4233, 4236), True, 'import numpy as np\n'), ((5282, 5304), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5302, 5304), False, 'import timeit\n'), ((4170, 4180), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4177, 4180), True, 'import numpy as np\n'), ((4539, 4549), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4546, 4549), True, 'import numpy as np\n'), ((4817, 4866), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[ITEM_COLUMN, SCORE_COLUMN]'}), '(columns=[ITEM_COLUMN, SCORE_COLUMN])\n', (4829, 4866), True, 'import pandas as pd\n'), ((4930, 4940), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4937, 4940), True, 'import numpy as np\n'), ((5149, 5198), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[ITEM_COLUMN, SCORE_COLUMN]'}), '(columns=[ITEM_COLUMN, SCORE_COLUMN])\n', (5161, 5198), True, 'import pandas as pd\n'), ((5200, 5210), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (5207, 5210), True, 'import numpy as np\n'), ((9211, 9286), 'subprocess.check_output', 'subprocess.check_output', (['test_command'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(test_command, stderr=subprocess.STDOUT, shell=True)\n', (9234, 9286), False, 'import subprocess\n'), ((8191, 8314), 'subprocess.check_output', 'subprocess.check_output', (["('conda create -n ' + CONDA_ENV + ' python=3.6 cython -y')"], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), "('conda create -n ' + CONDA_ENV +\n ' python=3.6 cython -y', stderr=subprocess.STDOUT, shell=True)\n", (8214, 8314), False, 'import subprocess\n'), ((8461, 8607), 'subprocess.check_output', 'subprocess.check_output', (["('/opt/conda/envs/' + CONDA_ENV + '/bin/pip install --upgrade unified-model')"], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), "('/opt/conda/envs/' + CONDA_ENV +\n '/bin/pip install --upgrade unified-model', stderr=subprocess.STDOUT,\n shell=True)\n", (8484, 8607), False, 'import subprocess\n'), ((4371, 4381), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4378, 4381), True, 'import numpy as np\n')]
|
from luminaire.model.base_model import BaseModel, BaseModelHyperParams
from luminaire.exploration.data_exploration import DataExploration
class WindowDensityHyperParams(BaseModelHyperParams):
"""
Hyperparameter class for Luminaire Window density model.
:param str freq: The frequency of the time-series. Luminaire supports default configuration for 'S', T, '15T',
'H', 'D'. Any other frequency type should be specified as 'custom' and configuration should be set manually.
:param float max_missing_train_prop: Maximum proportion of missing observation allowed in the training data.
:param bool is_log_transformed: A flag to specify whether to take a log transform of the input data.
If the data contain negatives, is_log_transformed is ignored even though it is set to True.
:param str baseline_type: A string flag to specify whether to take set a baseline as the previous sub-window from
the training data for scoring or to aggregate the overall window as a baseline. Possible values:
- "last_window"
- "aggregated"
:param str detection_method: A string that select between two window testing method. Possible values:
- "kldiv" (KL-divergence). This is recommended to be set for high frequency time series such as 'S', 'T' etc.
- "sign_test" (Wilcoxon sign rank test). This is recommended to be set for low frequency time series such as 'H', 'D' etc.
:param int min_window_length: Minimum size of the scoring window / a stable training sub-window length.
.. Note :: This is not the minimum size of the whole training window which is the combination of stable sub-windows.
:param int max_window_length: Maximum size of the scoring window / a stable training sub-window length.
.. Note :: This is not the maximum size of the whole training window which is the combination of stable sub-windows.
:param int window_length: Size of the scoring window / a stable training sub-window length.
.. Note :: This is not the size of the whole training window which is the combination of stable sub-windows.
:param str detrend_method: A string that select between two stationarizing method. Possible values:
- "ma" (moving average based)
- "diff" (differencing based).
"""
def __init__(self,
freq=None,
max_missing_train_prop=0.1,
is_log_transformed=False,
baseline_type="aggregated",
detection_method=None,
min_window_length=None,
max_window_length=None,
window_length=None,
detrend_method='modeling'
):
super(WindowDensityHyperParams, self).__init__(
model_name="WindowDensityModel",
freq=freq,
max_missing_train_prop=max_missing_train_prop,
is_log_transformed=is_log_transformed,
baseline_type=baseline_type,
detection_method=detection_method,
min_window_length=min_window_length,
max_window_length=max_window_length,
window_length=window_length,
detrend_method=detrend_method
)
class WindowDensityModel(BaseModel):
"""
This model detects anomalous windows using KL divergence (for high frequency data) and Wilcoxon sign rank test
(for low frequency data). This default monitoring frequency is set to pandas time frequency type 'T'.
:param dict hyper_params: Hyper parameters for Luminaire window density model.
See :class:`luminaire.model.window_density.WindowDensityHyperParams` for detailed information.
:return: Anomaly probability for the execution window and other related model outputs
:rtype: list[dict]
"""
__version__ = "0.1"
def __init__(self,
hyper_params: WindowDensityHyperParams().params or None,
**kwargs):
# Specifying the minimum and maximum number of training windows
self.min_num_train_windows = 5
self.max_num_train_windows = 10000
self.hyper_params = hyper_params
self.sig_level = 0.001
super(WindowDensityModel, self).__init__(**hyper_params, **kwargs)
def _volume_shift_detection(self, mean_list=None, sd_list=None, probability_threshold=0.5):
"""
This function detects any significant shift in the training data volume using a Bayesian change point detection
technique.
:param list mean_list: The list of means from each training sub-window.
:param list sd_list: The list of standard deviations from each training sub-window.
:param float probability_threshold: Threshold for the probability value to be flagged as a change point.
:return: Indices with significant vdata volume shift.
:rtype: int
"""
import numpy as np
from bayesian_changepoint_detection import offline_changepoint_detection as offcd
from functools import partial
# Volume shift detection over the means of the training window
q, p, pcp = offcd.offline_changepoint_detection(
data=np.array(mean_list),
prior_func=partial(offcd.const_prior, l=(len(mean_list) + 1)),
observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood,
truncate=-10)
mask_mean = np.append(0, np.exp(pcp).sum(0)) > probability_threshold
# Volume shift detection over the standard deviations of the training window
change_points = np.array(mask_mean).nonzero()
last_mean_cp = change_points[0][-1] if len(change_points[0]) > 0 else []
q, p, pcp = offcd.offline_changepoint_detection(
data=np.array(sd_list),
prior_func=partial(offcd.const_prior, l=(len(sd_list) + 1)),
observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood,
truncate=-10)
mask_sd = np.append(0, np.exp(pcp).sum(0)) > probability_threshold
change_points = np.array(mask_sd).nonzero()
last_sd_cp = change_points[0][-1] if len(change_points[0]) > 0 else []
# Change point is the maximum obtained from mean list and the standard deviation list
cdate = max(last_mean_cp, last_sd_cp)
return cdate
def _distance_function(self, data=None, called_for=None, baseline=None):
"""
This function finds the distance of the given data from the baseline using KL divergence.
:param list data: The list containing the scoring window (for scoring) / training sub-window (for training).
:param str distance_method: The method to be used to calculate the distance between two datasets.
:param str called_for: A flag to specify whether this function is called for training or scoring.
:param list baseline: A list containing the base line to be compared with the given data.
:return: KL divergence between two time windows.
:rtype: float
"""
import numpy as np
import scipy.stats as stats
float_min = 1e-50
float_max = 1e50
# If called for training, Kl divergence is performed over each pair of consecutive windows to create
# the past anomaly scores
if called_for == "training":
distance = []
for i in range(0, len(data) - 1):
q = stats.kde.gaussian_kde(data[i])
p = stats.kde.gaussian_kde(data[i + 1])
ts_min = min(np.min(data[i]), np.min(data[i + 1]))
ts_max = max(np.max(data[i]), np.max(data[i + 1]))
density_domain = np.linspace(ts_min, ts_max, 1000)
q = q(density_domain)
p = p(density_domain)
# approximating the zero probability regions to avoid divide by zero issue in KL divergence
q[q == 0] = min(np.array(q)[np.array(q) > 0])
p[p == 0] = min(np.array(p)[np.array(p) > 0])
q = np.clip(q, float_min, float_max)
p = np.clip(p, float_min, float_max)
distance.append(stats.entropy(pk=p, qk=q))
# If called for scoring, Kl divergence is performed between the scoring window and the baseline
elif called_for == "scoring":
q = stats.kde.gaussian_kde(baseline)
p = stats.kde.gaussian_kde(data)
ts_min = min(np.min(baseline), np.min(data))
ts_max = max(np.max(baseline), np.max(data))
density_domain = np.linspace(ts_min, ts_max, 1000)
q = q(density_domain)
p = p(density_domain)
q[q == 0] = min(np.array(q)[np.array(q) > 0])
p[p == 0] = min(np.array(p)[np.array(p) > 0])
q = np.clip(q, float_min, float_max)
p = np.clip(p, float_min, float_max)
distance = stats.entropy(pk=p, qk=q)
return distance
def _training_data_truncation(self, sliced_training_data=None):
"""
This function performs the truncation of the training data using the _volume_shift_detection function.
:param list sliced_training_data: The list containing the training data.
:return: Sliced training sample based on the most recent change point
:rtype: list
"""
import numpy as np
# Change point detection is performed over the means and standard deviations of the sub windows
window_means = []
window_sds = []
for ts in sliced_training_data:
window_means.append(np.mean(ts))
window_sds.append(np.std(ts))
change_point = self._volume_shift_detection(mean_list=window_means, sd_list=window_sds)
# Truncating the training data based on the last change point
if change_point:
sliced_training_data_truncated = sliced_training_data[change_point:]
return sliced_training_data_truncated
else:
return sliced_training_data
def _call_training(self, df=None, window_length=None, imputed_metric=None, detrend_method=None,
detection_method=None, freq=None, **kwargs):
"""
This function generates the baseline and training metrics to be used for scoring.
:param pandas.DataFrame df: Input training data frame.
:param int window_length: The length of a training sub-window.
:param str imputed_metric: Column storing the time series values.
:param str detrend_method: Detrend method "modeling" or "diff" for nonstationarity.
:param str detection_method: Detection method "kldiv" or "sign_test".
:param str freq: Data frequency.
:return: Returns past anomaly scores based on training data, baseline and other related metrics.
:rtype: tuple(list, float, float, float, int, list, luminaire.model, float, dict, list)
"""
import pandas as pd
past_anomaly_scores = dict()
gamma_alpha = dict()
gama_loc = dict()
gamma_beta = dict()
detrend_order = dict()
baseline = dict()
agg_data_model = dict()
agg_data = dict()
past_model = kwargs.get('past_model')
training_start = df.first_valid_index()
training_end = df.last_valid_index()
current_training_end = training_end
while (training_end - current_training_end) < pd.Timedelta('1D'):
df_current = df[df.index <= current_training_end]
past_anomaly_scores_current, gamma_alpha_current, gama_loc_current, gamma_beta_current, \
detrend_order_current, baseline_current, agg_data_model_current, \
agg_data_current = self._anomalous_region_detection(input_df=df_current,
window_length=window_length,
value_column=imputed_metric,
called_for="training",
detrend_method=detrend_method,
past_model=past_model,
detection_method=detection_method)
past_anomaly_scores.update({str(current_training_end.time().strftime('%H:%M:%S')): past_anomaly_scores_current})
gamma_alpha.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_alpha_current) if gamma_alpha_current else None})
gama_loc.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gama_loc_current) if gama_loc_current else None})
gamma_beta.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_beta_current) if gamma_beta_current else None})
detrend_order.update({str(current_training_end.time().strftime('%H:%M:%S')): detrend_order_current})
baseline.update({str(current_training_end.time().strftime('%H:%M:%S')): baseline_current})
agg_data_model.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_model_current})
agg_data.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_current})
if isinstance(freq, str):
freq = pd.Timedelta('1' + freq)
current_training_end = current_training_end - min(pd.Timedelta('30T'), freq * 10)
return past_anomaly_scores, gamma_alpha, gama_loc, gamma_beta, \
detrend_order, baseline, agg_data_model, agg_data, training_start, training_end
def _get_model(self, input_df=None, window_length=None, value_column=None, detrend_method=None, baseline_type=None,
detection_method=None, past_model=None):
"""
This function runs the training process given the input parameters.
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int window_length: The length of a training sub-window / scoring window.
:param str value_column: Column containing the values.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param str baseline_type: Selects between "aggregated" or "last_window" baseline.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param luminaire.model.window_density.WindowDensityModel past_model: luminaire.model to append model metadata from past
:return: Returns past anomaly scores based on training data, baseline and other related metrics.
:rtype: tuple(list, float, float, float, int, list, luminaire.model, float)
"""
import numpy as np
import pandas as pd
from itertools import chain
import scipy.stats as st
model_history_truncation_prop = 0.25 # This is the proportion of history to truncate from both sides
# everytime we store the past anomaly scores
de_obj = DataExploration()
sliced_training_data, agg_datetime = de_obj._partition(input_df, window_length, value_column)
# performing the stationarity test
sliced_training_data_cleaned, detrend_order, agg_data_model, agg_data = de_obj._detrender(
training_data_sliced=sliced_training_data,
significance_level=0.05,
detrend_method=detrend_method,
agg_datetime=agg_datetime,
past_model=past_model)
# Obtain the past anomaly scores and the anomaly means and standard deviation if the detection method
# is KL divergence
if detection_method == "kldiv":
past_anomaly_scores = np.array(self._distance_function(data=sliced_training_data_cleaned,
called_for="training"))
if past_model:
model_timestamps = list(past_model._params['PastAnomalyScores'].keys())
training_end = input_df.index[-1]
current_min_timedelta = pd.Timedelta('10D')
for timestamp in model_timestamps:
current_datetime = pd.Timestamp(str(training_end.date()) + ' ' + timestamp)
temp_timedelta = training_end - current_datetime
temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(
0) else temp_timedelta
if temp_timedelta < current_min_timedelta:
opt_timestamp = timestamp
current_min_timedelta = temp_timedelta
past_anomaly_scores = np.concatenate([past_model._params['PastAnomalyScores'][opt_timestamp][
int(len(past_anomaly_scores) * model_history_truncation_prop):
-int(len(past_anomaly_scores) * model_history_truncation_prop)]
, past_anomaly_scores])
if len(past_anomaly_scores) < 100:
alpha = []
loc = []
beta = []
for i in range(10):
boot_scores = np.random.choice(past_anomaly_scores.tolist(), size=100, replace=True)
alpha_i, loc_i, beta_i = st.gamma.fit(boot_scores)
alpha.append(alpha_i)
loc.append(loc_i)
beta.append(beta_i)
gamma_alpha = np.mean(alpha)
gamma_loc = np.mean(loc)
gamma_beta = np.mean(beta)
else:
gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(past_anomaly_scores)
else:
past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta = None, None, None, None
# If aggregated baseline type is specified, we take the whole training window as a baseline, else we
# take the last training sub window from the sliced training data
if baseline_type == "aggregated":
sliced_training_data_cleaned = self._training_data_truncation(
sliced_training_data=sliced_training_data_cleaned)
if detection_method == "kldiv":
baseline = list(chain.from_iterable(sliced_training_data_cleaned))
elif detection_method == "sign_test":
baseline = sliced_training_data_cleaned
elif baseline_type == "last_window":
baseline = sliced_training_data_cleaned[-1]
return past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta, detrend_order, \
baseline, agg_data_model, agg_data
def train(self, data, **kwargs):
"""
Input time series for training.
:param pandas.DataFrame data: Input time series.
:return: Trained model with the training timestamp and a success flag
:rtype: tuple(bool, str, python model object)
>>> data
raw interpolated
index
2017-10-02 00:00:00 118870 118870
2017-10-02 01:00:00 121914 121914
2017-10-02 02:00:00 116097 116097
2017-10-02 03:00:00 94511 94511
2017-10-02 04:00:00 68330 68330
... ... ...
2018-10-10 19:00:00 219908 219908
2018-10-10 20:00:00 219149 219149
2018-10-10 21:00:00 207232 207232
2018-10-10 22:00:00 198741 198741
2018-10-10 23:00:00 213751 213751
>>> hyper_params = WindowDensityHyperParams(freq='H').params
>>> wdm_obj = WindowDensityModel(hyper_params=hyper_params)
>>> success, model = wdm_obj.train(data)
>>> success, model
(True, "2018-10-10 23:00:00", <luminaire.model.window_density.WindowDensityModel object at 0x7fd7c5a34e80>)
"""
import numpy as np
import pandas as pd
freq = pd.Timedelta(self._params['freq']) if self._params['freq'] not in ['S', 'T', '15T', 'H', 'D'] \
else self._params['freq']
if freq in ['S', 'T', '15T', 'H', 'D']:
window_length = self._params['window_length']
else:
min_window_length = self._params['min_window_length']
max_window_length = self._params['max_window_length']
window_length = self._params['window_length']
if not min_window_length or not max_window_length or not window_length:
raise ValueError(
'Training window length with min and max should be specified in case frequency not in the '
'specified list')
is_log_transformed = self._params['is_log_transformed']
detrend_method = self._params['detrend_method']
target_metric = 'raw'
imputed_metric = 'interpolated'
if not self._params['detection_method']:
if freq in ['S', 'T', '15T']:
detection_method = 'kldiv'
elif freq in ['H', 'D']:
detection_method = 'sign_test'
else:
detection_method = 'sign_test' if freq > np.timedelta64(30, 'm') else 'kldiv'
else:
detection_method = self._params['detection_method']
if len(data) == 0:
model = {'ErrorMessage': 'DataFrame length is 0'}
success = False
return success, WindowDensityModel(**model)
# Shift the interpolated value by +1 and get the log. This handles values with 0.
if is_log_transformed:
neg_flag = True if not data[data[target_metric] < 0].empty else False
data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1)
past_anomaly_scores, anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc, anomaly_scores_gamma_beta, \
detrend_order, baseline, agg_data_model, agg_data, \
training_start, training_end = self._call_training(df=data, window_length=window_length,
imputed_metric=imputed_metric,
detrend_method=detrend_method,
detection_method=detection_method,
freq=freq, **kwargs)
success = True
self.hyper_params['is_log_transformed'] = is_log_transformed
self.hyper_params['detection_method'] = detection_method
model = {'TrainingStartDate': str(training_start),
'PastAnomalyScores': past_anomaly_scores,
'AnomalyScoresGammaAlpha': anomaly_scores_gamma_alpha,
'AnomalyScoresGammaLoc': anomaly_scores_gamma_loc,
'AnomalyScoresGammaBeta': anomaly_scores_gamma_beta,
'NonStationarityOrder': detrend_order,
'Baseline': baseline,
'AggregatedDataModel': agg_data_model,
'AggregatedData': agg_data
}
return success, str(training_end), WindowDensityModel(hyper_params=self.hyper_params, **model)
def _call_scoring(self, df=None, target_metric=None, anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None,
anomaly_scores_gamma_beta=None, baseline=None, detrend_order=None, detrend_method=None,
agg_data_model=None, detection_method=None, attributes=None, agg_data=None):
"""
This function generates the anomaly flag and and probability for the scoring window.
:param pandas.DataFrame df: Input training data frame.
:param str target_metric: Column storing the time series values.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param list baseline: A list storing a baseline window used to score the scoring window.
:param int detrend_order: The order of detrending based on MA or differencing method.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param attributes: Model attributes.
:param agg_data: Aggregated Data per day.
:return: Returns the anomaly flag with the corresponding anomaly probability.
:rtype: tuple(bool, float, dict)
"""
is_anomaly, prob_of_anomaly = self._anomalous_region_detection(input_df=df, value_column=target_metric,
called_for="scoring",
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
baseline=baseline,
detrend_order=detrend_order,
detrend_method=detrend_method,
agg_data_model=agg_data_model,
detection_method=detection_method,
agg_data=agg_data)
return is_anomaly, prob_of_anomaly, attributes
def _get_result(self, input_df=None, detrend_order=None, agg_data_model=None, value_column=None,
detrend_method=None, baseline_type=None, detection_method=None, baseline=None,
anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None, anomaly_scores_gamma_beta=None,
agg_data=None):
"""
The function scores the scoring window for anomalies based on the training metrics and the baseline
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int detrend_order: The non-negative order of detrending based on Modeling or differencing method. When
the detrend_order > 0, corresponding detrending need to be performed using the method specified in the model
config.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param str value_column: Column containing the values.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param str baseline_type: Selects between "aggregated" or "last_window" baseline.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param list baseline: A list storing a baseline window used to score the scoring window.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param agg_data: Aggregated Data per day.
:return: Returns the anomaly flag with the corresponding anomaly probability.
:rtype: tuple(bool, float)
"""
import numpy as np
import pandas as pd
import copy
import scipy.stats as st
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EmpiricalCovariance, MinCovDet
import collections
import operator
is_anomaly = False
execution_data = input_df[value_column]
adjusted_execution_data = []
prob_of_anomaly = []
len_req_agg_data_model = 42 # Setting a hard threshold to have predictions from aggregated data
# for stationarity adjustment
if detrend_method == 'diff':
# Obtain the execution data and perform the necessary differencing
execution_data = list(execution_data)
adjusted_execution_data = np.diff(execution_data, detrend_order).tolist() if detrend_order > 0 \
else execution_data
elif detrend_method == 'modeling':
idx = input_df.index.normalize()
dates_freq_dist = dict(collections.Counter(idx))
scoring_datetime = str(max(dates_freq_dist.items(), key=operator.itemgetter(1))[0])
execution_data_avg = np.mean(execution_data)
# If detrending is needed, we scale the scoring data accordingly using the agg_dat_model forecast
if detrend_order > 0:
snapshot_len_max = min(len(agg_data), len_req_agg_data_model)
agg_data_trunc = np.array(agg_data)[:, 1][-snapshot_len_max:]
data_adjust_forecast = []
try:
# Setting the data adjustment window of the original data using the predictions and the CILower and
# CIUpper keeping the prediction uncertainty of the agg_model in mind
if agg_data_model and len(agg_data) > len_req_agg_data_model:
score = agg_data_model.score(execution_data_avg, scoring_datetime)
data_adjust_forecast.append(score['Prediction'])
data_adjust_forecast.append(score['CILower'])
data_adjust_forecast.append(score['CIUpper'])
else:
data_adjust_forecast.append(np.median(agg_data_trunc))
data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit
data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit
except:
# If the scoring for the agg_data_model fails for some reason, we use the latest agg_data for the
# detrending adjustment
data_adjust_forecast.append(np.median(agg_data_trunc))
data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit
data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit
for i in range(3):
if data_adjust_forecast[i] != 0:
adjusted_execution_data.append((execution_data / data_adjust_forecast[i]).tolist())
else:
adjusted_execution_data = list(execution_data)
# Kl divergence based anomaly detection
if detection_method == "kldiv":
if detrend_order > 0:
prob_of_anomaly = []
for i in range(3):
current_anomaly_score = self._distance_function(data=adjusted_execution_data[i],
called_for="scoring", baseline=baseline)
prob_of_anomaly.append(st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc, anomaly_scores_gamma_beta))
prob_of_anomaly = np.min(prob_of_anomaly)
else:
current_anomaly_score = self._distance_function(data=adjusted_execution_data,
called_for="scoring", baseline=baseline)
prob_of_anomaly = st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)
if 1 - prob_of_anomaly < self.sig_level:
is_anomaly = True
# Sign test based anomaly detection
elif detection_method == "sign_test":
# If last window is the baseline, we perform the Wilcoxon sign rank test for means and levene
# test for variance to detect anomalies
if baseline_type == "last_window":
test_stat_wilcoxon, pvalue_wilcoxon = st.wilcoxon(execution_data, baseline)
test_stat_levene, pvalue_levene = st.levene(execution_data, baseline)
if pvalue_wilcoxon < self.sig_level or pvalue_levene < self.sig_level:
is_anomaly = True
prob_of_anomaly = 1 - min(pvalue_wilcoxon, pvalue_levene)
# If aggregated is the baseline, we perform the Wilcoxon sign rank test for means and gamma distribution
# based test for the past standard deviations to detect anomalies
elif baseline_type == "aggregated":
baseline_sds = np.array(baseline).std(1).tolist()
if detrend_order == 0:
# crearing a 2d list to make it easy to loop through in the following for loop
adjusted_execution_data = [adjusted_execution_data]
for current_adjusted_data in adjusted_execution_data:
baseline_execution_data = copy.copy(baseline)
baseline_execution_data.append(current_adjusted_data)
pca = PCA()
scores = pca.fit_transform(StandardScaler().fit_transform(baseline_execution_data))
robust_cov = MinCovDet().fit(scores[:, :3])
mahalanobis_distance = robust_cov.mahalanobis(scores[:, :3]) # getting the top 3 dimensions
pvalue_mahalanobis = 1 - st.chi2.cdf(mahalanobis_distance[-1],
np.array(baseline_execution_data).shape[1])
gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(baseline_sds)
pvalue_gamma = 1 - st.gamma.cdf(np.std(current_adjusted_data), gamma_alpha, gamma_loc, gamma_beta)
if pvalue_mahalanobis < self.sig_level or pvalue_gamma < self.sig_level:
is_anomaly = True
prob_of_anomaly.append(1 - min(pvalue_mahalanobis, pvalue_gamma))
prob_of_anomaly = np.min(prob_of_anomaly)
return is_anomaly, prob_of_anomaly
def score(self, data, **kwargs):
"""
Function scores input series for anomalies
:param pandas.DataFrame data: Input time series to score
:return: Output dictionary with scoring summary.
:rtype: dict
>>> data
raw interpolated
index
2018-10-11 00:00:00 204800 204800
2018-10-11 01:00:00 222218 222218
2018-10-11 02:00:00 218903 218903
2018-10-11 03:00:00 190639 190639
2018-10-11 04:00:00 148214 148214
2018-10-11 05:00:00 106358 106358
2018-10-11 06:00:00 70081 70081
2018-10-11 07:00:00 47748 47748
2018-10-11 08:00:00 36837 36837
2018-10-11 09:00:00 33023 33023
2018-10-11 10:00:00 44432 44432
2018-10-11 11:00:00 72773 72773
2018-10-11 12:00:00 115180 115180
2018-10-11 13:00:00 157568 157568
2018-10-11 14:00:00 180174 180174
2018-10-11 15:00:00 190048 190048
2018-10-11 16:00:00 188391 188391
2018-10-11 17:00:00 189233 189233
2018-10-11 18:00:00 191703 191703
2018-10-11 19:00:00 189848 189848
2018-10-11 20:00:00 192685 192685
2018-10-11 21:00:00 196743 196743
2018-10-11 22:00:00 193016 193016
2018-10-11 23:00:00 196441 196441
>>> model
<luminaire.model.window_density.WindowDensityModel object at 0x7fcaab72fdd8>
>>> model.score(data)
{'Success': True, 'ConfLevel': 99.9, 'IsAnomaly': False, 'AnomalyProbability': 0.6963188902776808}
"""
import numpy as np
import pandas as pd
is_log_transformed = self._params['is_log_transformed']
detrend_method = self._params['detrend_method']
target_metric = 'raw'
imputed_metric = 'interpolated'
detection_method = self._params['detection_method']
# We want to make sure the time series does not contain any negatives in case of log transformation
if is_log_transformed:
neg_flag = True if not data[data[target_metric] < 0].empty else False
data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1)
model_timestamps = list(self._params['AnomalyScoresGammaAlpha'].keys())
scoring_start = data.index[0]
current_min_timedelta = pd.Timedelta('10D')
for timestamp in model_timestamps:
current_datetime = pd.Timestamp(str(scoring_start.date()) + ' ' + timestamp)
temp_timedelta = scoring_start - current_datetime
temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(0) else temp_timedelta
if temp_timedelta < current_min_timedelta:
opt_timestamp = timestamp
current_min_timedelta = temp_timedelta
anomaly_scores_gamma_alpha = self._params['AnomalyScoresGammaAlpha'][opt_timestamp]
anomaly_scores_gamma_loc = self._params['AnomalyScoresGammaLoc'][opt_timestamp]
anomaly_scores_gamma_beta = self._params['AnomalyScoresGammaBeta'][opt_timestamp]
baseline = self._params['Baseline'][opt_timestamp]
detrend_order = self._params['NonStationarityOrder'][opt_timestamp]
agg_data_model = self._params['AggregatedDataModel'][opt_timestamp]
agg_data = self._params['AggregatedData'][opt_timestamp]
is_anomaly, prob_of_anomaly, attributes = self._call_scoring(df=data,
target_metric=target_metric,
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
baseline=baseline,
detrend_order=detrend_order,
detrend_method=detrend_method,
agg_data_model=agg_data_model,
detection_method=detection_method,
agg_data=agg_data)
result = {'Success': True,
'ConfLevel': float(1.0 - self.sig_level) * 100,
'IsAnomaly': is_anomaly,
'AnomalyProbability': float(prob_of_anomaly),
}
return result, data.reset_index().values.tolist()
def _anomalous_region_detection(self, input_df=None, window_length=None,
value_column=None, called_for=None,
anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None,
anomaly_scores_gamma_beta=None, detrend_order=None, baseline=None,
detrend_method=None, agg_data_model=None, past_model=None, detection_method=None,
agg_data=None):
"""
This function detects anomaly given a training and a scoring window.
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int window_length: The length of a training sub-window / scoring window.
:param str value_column: A string identifying the value column from the input dataframe
:param str called_for: A flag to specify whether this function is called for training or scoring.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param int detrend_order: Number of differencing for the scoring data. Only required if called for scoring.
:param list baseline: The baseline for the scoring. only required if called for scoring.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param luminaire.model.window_density.WindowDensityModel past_model: Past stored window density model.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param agg_data: Aggregated Data per day.
:return: Anomaly flag with the corresponding probability of anomaly.
:rtype: tuple(bool, float)
"""
baseline_type = self._params['baseline_type']
input_df.fillna(0, inplace=True)
# The function can be called for either training or scoring
if called_for == "training":
return self._get_model(input_df=input_df,
window_length=window_length,
value_column=value_column,
detrend_method=detrend_method,
baseline_type=baseline_type,
detection_method=detection_method,
past_model=past_model)
elif called_for == "scoring":
return self._get_result(input_df=input_df,
detrend_order=detrend_order,
agg_data_model=agg_data_model,
value_column=value_column,
detrend_method=detrend_method,
baseline_type=baseline_type,
detection_method=detection_method,
baseline=baseline,
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
agg_data=agg_data)
|
[
"sklearn.preprocessing.StandardScaler",
"numpy.clip",
"scipy.stats.levene",
"numpy.mean",
"numpy.exp",
"numpy.std",
"luminaire.exploration.data_exploration.DataExploration",
"numpy.max",
"numpy.linspace",
"pandas.Timedelta",
"collections.Counter",
"scipy.stats.kde.gaussian_kde",
"scipy.stats.gamma.cdf",
"numpy.median",
"numpy.percentile",
"numpy.min",
"scipy.stats.wilcoxon",
"sklearn.covariance.MinCovDet",
"numpy.log",
"scipy.stats.entropy",
"copy.copy",
"numpy.diff",
"numpy.array",
"numpy.timedelta64",
"sklearn.decomposition.PCA",
"scipy.stats.gamma.fit",
"operator.itemgetter",
"itertools.chain.from_iterable"
] |
[((15154, 15171), 'luminaire.exploration.data_exploration.DataExploration', 'DataExploration', ([], {}), '()\n', (15169, 15171), False, 'from luminaire.exploration.data_exploration import DataExploration\n'), ((37362, 37381), 'pandas.Timedelta', 'pd.Timedelta', (['"""10D"""'], {}), "('10D')\n", (37374, 37381), True, 'import pandas as pd\n'), ((11479, 11497), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (11491, 11497), True, 'import pandas as pd\n'), ((20150, 20184), 'pandas.Timedelta', 'pd.Timedelta', (["self._params['freq']"], {}), "(self._params['freq'])\n", (20162, 20184), True, 'import pandas as pd\n'), ((5213, 5232), 'numpy.array', 'np.array', (['mean_list'], {}), '(mean_list)\n', (5221, 5232), True, 'import numpy as np\n'), ((5606, 5625), 'numpy.array', 'np.array', (['mask_mean'], {}), '(mask_mean)\n', (5614, 5625), True, 'import numpy as np\n'), ((5792, 5809), 'numpy.array', 'np.array', (['sd_list'], {}), '(sd_list)\n', (5800, 5809), True, 'import numpy as np\n'), ((6094, 6111), 'numpy.array', 'np.array', (['mask_sd'], {}), '(mask_sd)\n', (6102, 6111), True, 'import numpy as np\n'), ((7458, 7489), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['data[i]'], {}), '(data[i])\n', (7480, 7489), True, 'import scipy.stats as stats\n'), ((7510, 7545), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['data[i + 1]'], {}), '(data[i + 1])\n', (7532, 7545), True, 'import scipy.stats as stats\n'), ((7715, 7748), 'numpy.linspace', 'np.linspace', (['ts_min', 'ts_max', '(1000)'], {}), '(ts_min, ts_max, 1000)\n', (7726, 7748), True, 'import numpy as np\n'), ((8079, 8111), 'numpy.clip', 'np.clip', (['q', 'float_min', 'float_max'], {}), '(q, float_min, float_max)\n', (8086, 8111), True, 'import numpy as np\n'), ((8132, 8164), 'numpy.clip', 'np.clip', (['p', 'float_min', 'float_max'], {}), '(p, float_min, float_max)\n', (8139, 8164), True, 'import numpy as np\n'), ((8384, 8416), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['baseline'], {}), '(baseline)\n', (8406, 8416), True, 'import scipy.stats as stats\n'), ((8433, 8461), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['data'], {}), '(data)\n', (8455, 8461), True, 'import scipy.stats as stats\n'), ((8607, 8640), 'numpy.linspace', 'np.linspace', (['ts_min', 'ts_max', '(1000)'], {}), '(ts_min, ts_max, 1000)\n', (8618, 8640), True, 'import numpy as np\n'), ((8843, 8875), 'numpy.clip', 'np.clip', (['q', 'float_min', 'float_max'], {}), '(q, float_min, float_max)\n', (8850, 8875), True, 'import numpy as np\n'), ((8892, 8924), 'numpy.clip', 'np.clip', (['p', 'float_min', 'float_max'], {}), '(p, float_min, float_max)\n', (8899, 8924), True, 'import numpy as np\n'), ((8949, 8974), 'scipy.stats.entropy', 'stats.entropy', ([], {'pk': 'p', 'qk': 'q'}), '(pk=p, qk=q)\n', (8962, 8974), True, 'import scipy.stats as stats\n'), ((9639, 9650), 'numpy.mean', 'np.mean', (['ts'], {}), '(ts)\n', (9646, 9650), True, 'import numpy as np\n'), ((9682, 9692), 'numpy.std', 'np.std', (['ts'], {}), '(ts)\n', (9688, 9692), True, 'import numpy as np\n'), ((13438, 13462), 'pandas.Timedelta', 'pd.Timedelta', (["('1' + freq)"], {}), "('1' + freq)\n", (13450, 13462), True, 'import pandas as pd\n'), ((16203, 16222), 'pandas.Timedelta', 'pd.Timedelta', (['"""10D"""'], {}), "('10D')\n", (16215, 16222), True, 'import pandas as pd\n'), ((17684, 17698), 'numpy.mean', 'np.mean', (['alpha'], {}), '(alpha)\n', (17691, 17698), True, 'import numpy as np\n'), ((17727, 17739), 'numpy.mean', 'np.mean', (['loc'], {}), '(loc)\n', (17734, 17739), True, 'import numpy as np\n'), ((17769, 17782), 'numpy.mean', 'np.mean', (['beta'], {}), '(beta)\n', (17776, 17782), True, 'import numpy as np\n'), ((17854, 17887), 'scipy.stats.gamma.fit', 'st.gamma.fit', (['past_anomaly_scores'], {}), '(past_anomaly_scores)\n', (17866, 17887), True, 'import scipy.stats as st\n'), ((21911, 21943), 'numpy.log', 'np.log', (['(data[imputed_metric] + 1)'], {}), '(data[imputed_metric] + 1)\n', (21917, 21943), True, 'import numpy as np\n'), ((29115, 29138), 'numpy.mean', 'np.mean', (['execution_data'], {}), '(execution_data)\n', (29122, 29138), True, 'import numpy as np\n'), ((31863, 31886), 'numpy.min', 'np.min', (['prob_of_anomaly'], {}), '(prob_of_anomaly)\n', (31869, 31886), True, 'import numpy as np\n'), ((32138, 32258), 'scipy.stats.gamma.cdf', 'st.gamma.cdf', (['current_anomaly_score', 'anomaly_scores_gamma_alpha', 'anomaly_scores_gamma_loc', 'anomaly_scores_gamma_beta'], {}), '(current_anomaly_score, anomaly_scores_gamma_alpha,\n anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)\n', (32150, 32258), True, 'import scipy.stats as st\n'), ((37178, 37210), 'numpy.log', 'np.log', (['(data[imputed_metric] + 1)'], {}), '(data[imputed_metric] + 1)\n', (37184, 37210), True, 'import numpy as np\n'), ((7576, 7591), 'numpy.min', 'np.min', (['data[i]'], {}), '(data[i])\n', (7582, 7591), True, 'import numpy as np\n'), ((7593, 7612), 'numpy.min', 'np.min', (['data[i + 1]'], {}), '(data[i + 1])\n', (7599, 7612), True, 'import numpy as np\n'), ((7643, 7658), 'numpy.max', 'np.max', (['data[i]'], {}), '(data[i])\n', (7649, 7658), True, 'import numpy as np\n'), ((7660, 7679), 'numpy.max', 'np.max', (['data[i + 1]'], {}), '(data[i + 1])\n', (7666, 7679), True, 'import numpy as np\n'), ((8198, 8223), 'scipy.stats.entropy', 'stats.entropy', ([], {'pk': 'p', 'qk': 'q'}), '(pk=p, qk=q)\n', (8211, 8223), True, 'import scipy.stats as stats\n'), ((8488, 8504), 'numpy.min', 'np.min', (['baseline'], {}), '(baseline)\n', (8494, 8504), True, 'import numpy as np\n'), ((8506, 8518), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (8512, 8518), True, 'import numpy as np\n'), ((8545, 8561), 'numpy.max', 'np.max', (['baseline'], {}), '(baseline)\n', (8551, 8561), True, 'import numpy as np\n'), ((8563, 8575), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (8569, 8575), True, 'import numpy as np\n'), ((13525, 13544), 'pandas.Timedelta', 'pd.Timedelta', (['"""30T"""'], {}), "('30T')\n", (13537, 13544), True, 'import pandas as pd\n'), ((17508, 17533), 'scipy.stats.gamma.fit', 'st.gamma.fit', (['boot_scores'], {}), '(boot_scores)\n', (17520, 17533), True, 'import scipy.stats as st\n'), ((18439, 18488), 'itertools.chain.from_iterable', 'chain.from_iterable', (['sliced_training_data_cleaned'], {}), '(sliced_training_data_cleaned)\n', (18458, 18488), False, 'from itertools import chain\n'), ((28960, 28984), 'collections.Counter', 'collections.Counter', (['idx'], {}), '(idx)\n', (28979, 28984), False, 'import collections\n'), ((32739, 32776), 'scipy.stats.wilcoxon', 'st.wilcoxon', (['execution_data', 'baseline'], {}), '(execution_data, baseline)\n', (32750, 32776), True, 'import scipy.stats as st\n'), ((32827, 32862), 'scipy.stats.levene', 'st.levene', (['execution_data', 'baseline'], {}), '(execution_data, baseline)\n', (32836, 32862), True, 'import scipy.stats as st\n'), ((37661, 37676), 'pandas.Timedelta', 'pd.Timedelta', (['(0)'], {}), '(0)\n', (37673, 37676), True, 'import pandas as pd\n'), ((37605, 37623), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (37617, 37623), True, 'import pandas as pd\n'), ((5452, 5463), 'numpy.exp', 'np.exp', (['pcp'], {}), '(pcp)\n', (5458, 5463), True, 'import numpy as np\n'), ((6025, 6036), 'numpy.exp', 'np.exp', (['pcp'], {}), '(pcp)\n', (6031, 6036), True, 'import numpy as np\n'), ((7966, 7977), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (7974, 7977), True, 'import numpy as np\n'), ((8028, 8039), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8036, 8039), True, 'import numpy as np\n'), ((8738, 8749), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8746, 8749), True, 'import numpy as np\n'), ((8796, 8807), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8804, 8807), True, 'import numpy as np\n'), ((28730, 28768), 'numpy.diff', 'np.diff', (['execution_data', 'detrend_order'], {}), '(execution_data, detrend_order)\n', (28737, 28768), True, 'import numpy as np\n'), ((31655, 31775), 'scipy.stats.gamma.cdf', 'st.gamma.cdf', (['current_anomaly_score', 'anomaly_scores_gamma_alpha', 'anomaly_scores_gamma_loc', 'anomaly_scores_gamma_beta'], {}), '(current_anomaly_score, anomaly_scores_gamma_alpha,\n anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)\n', (31667, 31775), True, 'import scipy.stats as st\n'), ((34753, 34776), 'numpy.min', 'np.min', (['prob_of_anomaly'], {}), '(prob_of_anomaly)\n', (34759, 34776), True, 'import numpy as np\n'), ((7978, 7989), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (7986, 7989), True, 'import numpy as np\n'), ((8040, 8051), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8048, 8051), True, 'import numpy as np\n'), ((8750, 8761), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8758, 8761), True, 'import numpy as np\n'), ((8808, 8819), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8816, 8819), True, 'import numpy as np\n'), ((16532, 16547), 'pandas.Timedelta', 'pd.Timedelta', (['(0)'], {}), '(0)\n', (16544, 16547), True, 'import pandas as pd\n'), ((16476, 16494), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (16488, 16494), True, 'import pandas as pd\n'), ((21345, 21368), 'numpy.timedelta64', 'np.timedelta64', (['(30)', '"""m"""'], {}), "(30, 'm')\n", (21359, 21368), True, 'import numpy as np\n'), ((29394, 29412), 'numpy.array', 'np.array', (['agg_data'], {}), '(agg_data)\n', (29402, 29412), True, 'import numpy as np\n'), ((33697, 33716), 'copy.copy', 'copy.copy', (['baseline'], {}), '(baseline)\n', (33706, 33716), False, 'import copy\n'), ((33817, 33822), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (33820, 33822), False, 'from sklearn.decomposition import PCA\n'), ((34352, 34378), 'scipy.stats.gamma.fit', 'st.gamma.fit', (['baseline_sds'], {}), '(baseline_sds)\n', (34364, 34378), True, 'import scipy.stats as st\n'), ((29054, 29076), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (29073, 29076), False, 'import operator\n'), ((30176, 30201), 'numpy.median', 'np.median', (['agg_data_trunc'], {}), '(agg_data_trunc)\n', (30185, 30201), True, 'import numpy as np\n'), ((30255, 30287), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(5)'], {}), '(agg_data_trunc, 5)\n', (30268, 30287), True, 'import numpy as np\n'), ((30373, 30406), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(95)'], {}), '(agg_data_trunc, 95)\n', (30386, 30406), True, 'import numpy as np\n'), ((30674, 30699), 'numpy.median', 'np.median', (['agg_data_trunc'], {}), '(agg_data_trunc)\n', (30683, 30699), True, 'import numpy as np\n'), ((30749, 30781), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(5)'], {}), '(agg_data_trunc, 5)\n', (30762, 30781), True, 'import numpy as np\n'), ((30863, 30896), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(95)'], {}), '(agg_data_trunc, 95)\n', (30876, 30896), True, 'import numpy as np\n'), ((33960, 33971), 'sklearn.covariance.MinCovDet', 'MinCovDet', ([], {}), '()\n', (33969, 33971), False, 'from sklearn.covariance import EmpiricalCovariance, MinCovDet\n'), ((34431, 34460), 'numpy.std', 'np.std', (['current_adjusted_data'], {}), '(current_adjusted_data)\n', (34437, 34460), True, 'import numpy as np\n'), ((33336, 33354), 'numpy.array', 'np.array', (['baseline'], {}), '(baseline)\n', (33344, 33354), True, 'import numpy as np\n'), ((33870, 33886), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (33884, 33886), False, 'from sklearn.preprocessing import StandardScaler\n'), ((34250, 34283), 'numpy.array', 'np.array', (['baseline_execution_data'], {}), '(baseline_execution_data)\n', (34258, 34283), True, 'import numpy as np\n')]
|
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from exceptions import NoUriProviden
from main import calculate_average_face_encoding
from main import obtain_image_face_encodings
from main import parallelize_face_encodings
class TryTesting(TestCase):
def test_obtain_image_face_encodings_empty_uri(self):
with self.assertRaises(NoUriProviden):
obtain_image_face_encodings("")
def test_obtain_image_face_encodings_uri_not_found(self):
with self.assertRaises(FileNotFoundError):
obtain_image_face_encodings("uri/that/doesnt/exists")
@patch("main.IMAGE_DIRECTORY", "/path/that/doesnt/exist")
def test_parallelize_face_encodings_directory_not_found(self):
# this is not checked on the actual code but the actual
# face_recognition library is raising the exception
with self.assertRaises(FileNotFoundError):
parallelize_face_encodings()
@patch("main.obtain_image_face_encodings", return_value=[])
@patch("os.listdir", return_value=[])
def test_parallelize_face_encodings_empty_directory_encoding_not_called(
self, listdir_mock, obtain_mock
):
self.assertFalse(obtain_mock.called)
def test_calculate_average_face_encoding_with_empty_encodings(self):
self.assertIsNone(calculate_average_face_encoding([]))
@patch("main.np.savetxt")
def test_calculate_average_face_encoding_ensure_file_creation_called(
self, mocked_np
):
calculate_average_face_encoding(np.ndarray([1]))
self.assertTrue(mocked_np.called)
|
[
"main.obtain_image_face_encodings",
"main.parallelize_face_encodings",
"unittest.mock.patch",
"main.calculate_average_face_encoding",
"numpy.ndarray"
] |
[((622, 678), 'unittest.mock.patch', 'patch', (['"""main.IMAGE_DIRECTORY"""', '"""/path/that/doesnt/exist"""'], {}), "('main.IMAGE_DIRECTORY', '/path/that/doesnt/exist')\n", (627, 678), False, 'from unittest.mock import patch\n'), ((968, 1026), 'unittest.mock.patch', 'patch', (['"""main.obtain_image_face_encodings"""'], {'return_value': '[]'}), "('main.obtain_image_face_encodings', return_value=[])\n", (973, 1026), False, 'from unittest.mock import patch\n'), ((1032, 1068), 'unittest.mock.patch', 'patch', (['"""os.listdir"""'], {'return_value': '[]'}), "('os.listdir', return_value=[])\n", (1037, 1068), False, 'from unittest.mock import patch\n'), ((1381, 1405), 'unittest.mock.patch', 'patch', (['"""main.np.savetxt"""'], {}), "('main.np.savetxt')\n", (1386, 1405), False, 'from unittest.mock import patch\n'), ((404, 435), 'main.obtain_image_face_encodings', 'obtain_image_face_encodings', (['""""""'], {}), "('')\n", (431, 435), False, 'from main import obtain_image_face_encodings\n'), ((562, 615), 'main.obtain_image_face_encodings', 'obtain_image_face_encodings', (['"""uri/that/doesnt/exists"""'], {}), "('uri/that/doesnt/exists')\n", (589, 615), False, 'from main import obtain_image_face_encodings\n'), ((933, 961), 'main.parallelize_face_encodings', 'parallelize_face_encodings', ([], {}), '()\n', (959, 961), False, 'from main import parallelize_face_encodings\n'), ((1338, 1373), 'main.calculate_average_face_encoding', 'calculate_average_face_encoding', (['[]'], {}), '([])\n', (1369, 1373), False, 'from main import calculate_average_face_encoding\n'), ((1551, 1566), 'numpy.ndarray', 'np.ndarray', (['[1]'], {}), '([1])\n', (1561, 1566), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from davg.lanefinding.Prediction import Prediction
def plot_line(img, x, y, color=(255,255,0), thickness=2):
''' Takes an image and two arrays of x and y points similar to matplotlib
and writes the lines onto the image. If the points are floats, they
are rounded and converted to ints to satisfy opencv.
'''
points = np.rint(np.vstack([x,y]).T).astype(int)
#print(points)
cv2.polylines(img, [points], False, color, thickness)
def demonstrate_weighted_average_and_prediction():
# Create a blank array to be used as an image
test_img = np.zeros((128, 128, 3), dtype='uint8')
# Define common y-points
y = np.array([0,31,63,95,127])
# Define an array of x-point arrays
#recent_x = np.array([[40,40,40,40,40]])
#recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40]])
#recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40]])
#recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40], [20,30,35,38,40]])
recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40], [0,20,29,36,40]])
print ("recent_x", recent_x)
# Calculate the softmax weighted averages for the x-points
averages = Prediction.find_weighted_averages(recent_x, window=3)
print("weighted averages", averages)
# Calculate the differences between the each consecutive set of x-points
recent_xdiff = np.diff(recent_x, axis=0)
print ("recent_xdiff", recent_xdiff)
if len(recent_xdiff) != 0:
# Calculate the non-weighted average of the differences for a baseline
recent_xdiff_avg = np.average(recent_xdiff, axis=0)
print ("recent_xdiff_avg", recent_xdiff_avg)
# Calculate the softmax weighted averages for the differences in the x-points
xdiff_weighted_averages = Prediction.find_weighted_averages(recent_xdiff, window=2)
print("xdiff_weighted_averages[-1]:", xdiff_weighted_averages[-1])
# Predict the next line location by applying the last weighted diff to the last x-points
#predicted_x = np.add(xdiff_weighted_averages[-1], recent_x[-1])
predicted_x = Prediction.predict_next_values(recent_x, window=2)
print("predicted:", predicted_x)
# Plot the various lines
for i in range(len(recent_x)):
# Plot a red line for the weighted moving averages
plot_line(test_img, averages[i], y, thickness=1, color=(200,0,0))
# Plot a yellow line for the current points
plot_line(test_img, recent_x[i], y, thickness=1)
# Plot a green line for the predicted next line based on weighted averages of the diffs
plot_line(test_img, predicted_x, y, thickness=1, color=(0,200,0))
plt.imshow(test_img)
plt.show()
# UNCOMMENT TO RUN
demonstrate_weighted_average_and_prediction()
|
[
"matplotlib.pyplot.show",
"cv2.polylines",
"numpy.average",
"davg.lanefinding.Prediction.Prediction.predict_next_values",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"davg.lanefinding.Prediction.Prediction.find_weighted_averages",
"numpy.diff",
"numpy.array",
"numpy.vstack"
] |
[((472, 525), 'cv2.polylines', 'cv2.polylines', (['img', '[points]', '(False)', 'color', 'thickness'], {}), '(img, [points], False, color, thickness)\n', (485, 525), False, 'import cv2\n'), ((644, 682), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {'dtype': '"""uint8"""'}), "((128, 128, 3), dtype='uint8')\n", (652, 682), True, 'import numpy as np\n'), ((721, 751), 'numpy.array', 'np.array', (['[0, 31, 63, 95, 127]'], {}), '([0, 31, 63, 95, 127])\n', (729, 751), True, 'import numpy as np\n'), ((1128, 1251), 'numpy.array', 'np.array', (['[[40, 40, 40, 40, 40], [30, 35, 37, 39, 40], [20, 30, 35, 38, 40], [10, 25,\n 32, 37, 40], [0, 20, 29, 36, 40]]'], {}), '([[40, 40, 40, 40, 40], [30, 35, 37, 39, 40], [20, 30, 35, 38, 40],\n [10, 25, 32, 37, 40], [0, 20, 29, 36, 40]])\n', (1136, 1251), True, 'import numpy as np\n'), ((1340, 1393), 'davg.lanefinding.Prediction.Prediction.find_weighted_averages', 'Prediction.find_weighted_averages', (['recent_x'], {'window': '(3)'}), '(recent_x, window=3)\n', (1373, 1393), False, 'from davg.lanefinding.Prediction import Prediction\n'), ((1532, 1557), 'numpy.diff', 'np.diff', (['recent_x'], {'axis': '(0)'}), '(recent_x, axis=0)\n', (1539, 1557), True, 'import numpy as np\n'), ((2258, 2308), 'davg.lanefinding.Prediction.Prediction.predict_next_values', 'Prediction.predict_next_values', (['recent_x'], {'window': '(2)'}), '(recent_x, window=2)\n', (2288, 2308), False, 'from davg.lanefinding.Prediction import Prediction\n'), ((2822, 2842), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_img'], {}), '(test_img)\n', (2832, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2847, 2857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2855, 2857), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1769), 'numpy.average', 'np.average', (['recent_xdiff'], {'axis': '(0)'}), '(recent_xdiff, axis=0)\n', (1747, 1769), True, 'import numpy as np\n'), ((1944, 2001), 'davg.lanefinding.Prediction.Prediction.find_weighted_averages', 'Prediction.find_weighted_averages', (['recent_xdiff'], {'window': '(2)'}), '(recent_xdiff, window=2)\n', (1977, 2001), False, 'from davg.lanefinding.Prediction import Prediction\n'), ((417, 434), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (426, 434), True, 'import numpy as np\n')]
|
'''
@brief Leg-Rest Pos Recommendataion with DecisionTree Regressor
@author <NAME> <<EMAIL>>
@date 2021. 05. 21
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import progressbar
'''
Presets & Hyper-parameters
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
pd.set_option('display.width', 200) # for display width
# FEATURE_LENGTH = 30 # n-dimensional data feature only use
# NUMBER_OF_SAMPLES = 299 # number of augmented data
# FEATURE_MAX_LENGTH = 115 # Maximum feature length
# NUMBER_OF_RANDOM_SELECTION = 5
# MAX_TRAIN_ITERATION = -1 # infinity
'''
1. Load configuration file
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
'''
2. data extraction
'''
X = data_config.loc[:, ['user_height', 'user_weight', 'user_age']]
bmr = 66.47+(13.75*X['user_weight'])+(5*X['user_height'])-(6.76*X['user_age'])
bmi = X['user_weight']/(X['user_height']/100*X['user_height']/100)
X["bmr"] = bmr
X["bmi"] = bmi
ys = data_config.loc[:, ['bestfit_angle_standard']]
yr = data_config.loc[:, ['bestfit_angle_relax']]
del X["user_age"]
'''
DecisionTree Regression Model
'''
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(ys), test_size=0.33, shuffle=True)
print("------ Regression Model Evaluation (@standard) ------")
model_standard = DecisionTreeRegressor(
criterion = "mse",
max_depth=50,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R2 Score with Trainset (@standard) :", model_standard.score(X_train, y_train))
print("* R2 Score with Testset (@standard) :", model_standard.score(X_test, y_test))
print("* Feature Impotances (@standard) :")
for name, value in zip(X_train.columns, model_standard.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
print("------ Regression Model Evaluation (@relax) ------")
model_relax = DecisionTreeRegressor(
criterion = "mse", # mean square error
max_depth=50,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R-squared Score with Trainset (@relax) :", model_relax.score(X_train, y_train))
print("* R-squared Score with Testset (@relax) :", model_relax.score(X_test, y_test))
print("* Feature Impotances (@relax) :")
for name, value in zip(X_train.columns, model_relax.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
'''
Output File Generation
'''
# min_age = 20
# max_age = 80
# ages = np.array([min_age+i for i in range(max_age-min_age+1)])
ages = np.arange(20, 80, step=10)
# min_height = 150
# max_height = 190
# heights = np.array([min_height+i for i in range(max_height-min_height+1)])
heights = np.arange(150, 190, step=10)
# min_weight = 40
# max_weight = 100
# weights = np.array([min_weight+i for i in range(max_weight-min_weight+1)])
weights = np.arange(40, 100, step=10)
print(X.head())
bar = progressbar.ProgressBar(maxval=len(ages)*len(heights)*len(weights), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
output_standard = pd.DataFrame(columns=['height','weight','legrest'])
output_relax = pd.DataFrame(columns=['height','weight','legrest'])
count = 0
for a in ages:
for h in heights:
for w in weights:
bmr = 66.47+(13.75*w)+(5*h)-(6.76*a)
bmi = w/(h/100*h/100)
pvs = model_standard.predict([[h,w,bmr,bmi]])
pvr = model_relax.predict([[h,w,bmr,bmi]])
output_standard = output_standard.append({'height':h, 'weight':w, 'legrest':pvs[0]}, ignore_index=True)
output_relax = output_relax.append({'height':h, 'weight':w, 'legrest':pvr[0]}, ignore_index=True)
count = count+1
bar.update(count)
bar.finish()
output_standard.to_csv('result_standard.csv', index=False)
output_relax.to_csv('result_relax.csv', index=False)
print("saved results")
|
[
"pandas.DataFrame",
"sklearn.tree.DecisionTreeRegressor",
"numpy.ravel",
"pandas.read_csv",
"progressbar.Bar",
"progressbar.Percentage",
"numpy.arange",
"pandas.set_option"
] |
[((404, 439), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(200)'], {}), "('display.width', 200)\n", (417, 439), True, 'import pandas as pd\n'), ((748, 807), 'pandas.read_csv', 'pd.read_csv', (['CONFIGURATION_FILE_PATH'], {'header': '(0)', 'index_col': '(0)'}), '(CONFIGURATION_FILE_PATH, header=0, index_col=0)\n', (759, 807), True, 'import pandas as pd\n'), ((2602, 2628), 'numpy.arange', 'np.arange', (['(20)', '(80)'], {'step': '(10)'}), '(20, 80, step=10)\n', (2611, 2628), True, 'import numpy as np\n'), ((2755, 2783), 'numpy.arange', 'np.arange', (['(150)', '(190)'], {'step': '(10)'}), '(150, 190, step=10)\n', (2764, 2783), True, 'import numpy as np\n'), ((2909, 2936), 'numpy.arange', 'np.arange', (['(40)', '(100)'], {'step': '(10)'}), '(40, 100, step=10)\n', (2918, 2936), True, 'import numpy as np\n'), ((3132, 3185), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['height', 'weight', 'legrest']"}), "(columns=['height', 'weight', 'legrest'])\n", (3144, 3185), True, 'import pandas as pd\n'), ((3199, 3252), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['height', 'weight', 'legrest']"}), "(columns=['height', 'weight', 'legrest'])\n", (3211, 3252), True, 'import pandas as pd\n'), ((1294, 1306), 'numpy.ravel', 'np.ravel', (['ys'], {}), '(ys)\n', (1302, 1306), True, 'import numpy as np\n'), ((1419, 1511), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'criterion': '"""mse"""', 'max_depth': '(50)', 'min_samples_leaf': '(1)', 'random_state': '(1)'}), "(criterion='mse', max_depth=50, min_samples_leaf=1,\n random_state=1)\n", (1440, 1511), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1973, 2065), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'criterion': '"""mse"""', 'max_depth': '(50)', 'min_samples_leaf': '(1)', 'random_state': '(1)'}), "(criterion='mse', max_depth=50, min_samples_leaf=1,\n random_state=1)\n", (1994, 2065), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((3038, 3068), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (3053, 3068), False, 'import progressbar\n'), ((3075, 3099), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (3097, 3099), False, 'import progressbar\n')]
|
import os
import os.path as osp
import re
import time
import shutil
import argparse
import subprocess
import multiprocessing
import cv2
import numpy as np
import pandas as pd
from requests_html import HTML
from selenium import webdriver
def check_banner(args):
valid = False
stage_dir = args[0]
banner_dir = args[1]
# Read banners to check
banners = [ cv2.imread(osp.join(banner_dir, banner))
for banner in os.listdir(banner_dir)
if not banner.startswith('.') ]
count = len(banners)
# Check downloaded images one by one
for path in [ osp.join(stage_dir, f) for f in os.listdir(stage_dir) ]:
# Read image
img = cv2.imread(path)
if img is None:
continue
# Match with banner
for banner in banners:
img = cv2.resize(img, (banner.shape[1], banner.shape[0]))
ref = banner.astype('float')
tar = img.astype('float')
# Determine image volume
volume = 1
for v in img.shape:
volume *= v
# Perform difference between two image
diff = np.sum(np.abs(ref-tar)) / volume
if diff < 10:
count -= 1
# Early stopping
if count <= 0:
valid = True
break
return (osp.basename(stage_dir), valid)
def main(args):
# Read target sellers to check their banner
with open(args['input'], 'r') as f:
sellers = [ line.strip('\n') for line in f.readlines() ]
seller_names = [ osp.basename(seller) for seller in sellers ]
# Instantiate chrome webdriver with default page google.com opened
mobile_emulation = { "deviceName": "iPhone X" }
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
# chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
driver = webdriver.Chrome(args['driver'], options=chrome_options)
driver.get('http://google.com')
# Load every seller pages
for name, seller in zip(seller_names, sellers):
print(f"Open page '{name}'")
driver.execute_script(f"window.open('about:blank', '{name}');")
driver.switch_to.window(name)
driver.get(seller)
time.sleep(3)
# Parse every opened pages
pattern = r"https://cf.shopee.tw/file/[\d\w]+"
for name in seller_names:
# Create Staging directory for each seller
stage_dir = osp.join(args['stage'], name)
shutil.rmtree(stage_dir, ignore_errors=True)
os.makedirs(stage_dir)
# Extract links of each loaded images
driver.switch_to.window(name)
html = driver.page_source
imgs = re.findall(pattern, html)
# Download each loaded images
print(f"Download images in '{driver.current_url}'")
procs = []
for img in imgs:
cmdline = f'wget -O {osp.join(stage_dir, osp.basename(img))} {img}'
proc = subprocess.Popen(
cmdline,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
procs.append(proc)
# Wait for download completion
for proc in procs:
proc.wait()
proc.terminate()
# Exit the driver
driver.quit()
# Check banners with multiple workers
stages = [
osp.join(args['stage'], seller)
for seller in os.listdir(args['stage'])
if not seller.startswith('.')
]
banners = [ args['banner'] ]*len(stages)
tasks = list(zip(stages, banners))
pool = multiprocessing.Pool(multiprocessing.cpu_count())
results = pool.map(check_banner, tasks)
data = { 'seller': [], 'result': [] }
for result in results:
data['seller'].append(result[0])
data['result'].append(result[1])
df = pd.DataFrame(data, columns=['seller', 'result'])
df.to_csv(args['output'], index=False)
print(f"Export result to {args['output']}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="list of urls of target sellers")
parser.add_argument("--output", default="report.txt", help="report file")
parser.add_argument("--banner", default="banner", help="directory containing banners need to check")
parser.add_argument("--stage", default="stage", help="staging directories to hold download images")
parser.add_argument("--driver", default="driver/chromedriver")
args = vars(parser.parse_args())
main(args)
|
[
"pandas.DataFrame",
"subprocess.Popen",
"numpy.abs",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.basename",
"time.sleep",
"cv2.imread",
"re.findall",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"cv2.resize",
"multiprocessing.cpu_count"
] |
[((1762, 1787), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (1785, 1787), False, 'from selenium import webdriver\n'), ((1929, 1985), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (["args['driver']"], {'options': 'chrome_options'}), "(args['driver'], options=chrome_options)\n", (1945, 1985), False, 'from selenium import webdriver\n'), ((3956, 4004), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['seller', 'result']"}), "(data, columns=['seller', 'result'])\n", (3968, 4004), True, 'import pandas as pd\n'), ((4138, 4163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4161, 4163), False, 'import argparse\n'), ((603, 625), 'os.path.join', 'osp.join', (['stage_dir', 'f'], {}), '(stage_dir, f)\n', (611, 625), True, 'import os.path as osp\n'), ((695, 711), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (705, 711), False, 'import cv2\n'), ((1345, 1368), 'os.path.basename', 'osp.basename', (['stage_dir'], {}), '(stage_dir)\n', (1357, 1368), True, 'import os.path as osp\n'), ((2287, 2300), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2297, 2300), False, 'import time\n'), ((2485, 2514), 'os.path.join', 'osp.join', (["args['stage']", 'name'], {}), "(args['stage'], name)\n", (2493, 2514), True, 'import os.path as osp\n'), ((2523, 2567), 'shutil.rmtree', 'shutil.rmtree', (['stage_dir'], {'ignore_errors': '(True)'}), '(stage_dir, ignore_errors=True)\n', (2536, 2567), False, 'import shutil\n'), ((2576, 2598), 'os.makedirs', 'os.makedirs', (['stage_dir'], {}), '(stage_dir)\n', (2587, 2598), False, 'import os\n'), ((2732, 2757), 're.findall', 're.findall', (['pattern', 'html'], {}), '(pattern, html)\n', (2742, 2757), False, 'import re\n'), ((3477, 3508), 'os.path.join', 'osp.join', (["args['stage']", 'seller'], {}), "(args['stage'], seller)\n", (3485, 3508), True, 'import os.path as osp\n'), ((3721, 3748), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3746, 3748), False, 'import multiprocessing\n'), ((387, 415), 'os.path.join', 'osp.join', (['banner_dir', 'banner'], {}), '(banner_dir, banner)\n', (395, 415), True, 'import os.path as osp\n'), ((447, 469), 'os.listdir', 'os.listdir', (['banner_dir'], {}), '(banner_dir)\n', (457, 469), False, 'import os\n'), ((635, 656), 'os.listdir', 'os.listdir', (['stage_dir'], {}), '(stage_dir)\n', (645, 656), False, 'import os\n'), ((834, 885), 'cv2.resize', 'cv2.resize', (['img', '(banner.shape[1], banner.shape[0])'], {}), '(img, (banner.shape[1], banner.shape[0]))\n', (844, 885), False, 'import cv2\n'), ((1572, 1592), 'os.path.basename', 'osp.basename', (['seller'], {}), '(seller)\n', (1584, 1592), True, 'import os.path as osp\n'), ((2999, 3095), 'subprocess.Popen', 'subprocess.Popen', (['cmdline'], {'shell': '(True)', 'stderr': 'subprocess.DEVNULL', 'stdout': 'subprocess.DEVNULL'}), '(cmdline, shell=True, stderr=subprocess.DEVNULL, stdout=\n subprocess.DEVNULL)\n', (3015, 3095), False, 'import subprocess\n'), ((3531, 3556), 'os.listdir', 'os.listdir', (["args['stage']"], {}), "(args['stage'])\n", (3541, 3556), False, 'import os\n'), ((1162, 1179), 'numpy.abs', 'np.abs', (['(ref - tar)'], {}), '(ref - tar)\n', (1168, 1179), True, 'import numpy as np\n'), ((2953, 2970), 'os.path.basename', 'osp.basename', (['img'], {}), '(img)\n', (2965, 2970), True, 'import os.path as osp\n')]
|
import argparse
import numpy as np
import struct
from matplotlib import gridspec
import matplotlib.pyplot as plt
from glob import glob
import os
from os.path import join
from natsort import natsorted
from skimage.transform import resize
import re
from tqdm import tqdm
""" Code to process depth/image/pose binaries the ios DepthBundleRecorder app into more useable .npz files.
Usage: python ConvertBinaries.py -d data_folder_with_binaries
Output: a folder data_processed_folder_with_binaries containing the processed depth bundles
"""
def read_header(header):
h = re.sub("\[|\]|\(|\)|\s|\'", "", str(header)) # Strip all delims but <> and commas
h = h.split("<ENDHEADER>")[0] # Snip empty end of header
timestamp = float(h.split("Time:")[1].split(",")[0])
euler_angles = np.array(h.split("EulerAngles:SIMD3<Float>")[1].split(",")[0:3], dtype=np.float32)
world_pose = np.array(h.split("WorldPose:simd_float4x4")[1].split(",")[0:16], dtype=np.float32).reshape((4,4))
intrinsics = np.array(h.split("Intrinsics:Optionalsimd_float3x3")[1].split(",")[0:9], dtype=np.float32).reshape((3,3))
world_to_camera = np.array(h.split("WorldToCamera:Optionalsimd_float4x4")[1].split(",")[0:16], dtype=np.float32).reshape((4,4))
return {'timestamp' : timestamp,
'euler_angles' : euler_angles,
'world_pose' : world_pose.T,
'intrinsics' : intrinsics.T,
'world_to_camera' : world_to_camera.T}
def load_info(info_name):
with open(info_name, mode='rb') as file:
file_content = file.read()
header = file_content[:1024] # 1024 bit header
return read_header(header)
def load_depth(depth_name):
with open(depth_name, mode='rb') as file:
file_content = file.read()
header = file_content[:1024] # 1024 bit header
file_content = file_content[1024:]
file_content = struct.unpack('f'* ((len(file_content)) // 4), file_content)
depth = np.reshape(file_content, (192,256))
depth = np.flip(depth.T, 1).astype(np.float32)
return depth, header
def load_conf(conf_name):
with open(conf_name, mode='rb') as file:
file_content = file.read()
file_content = struct.unpack('B'* ((len(file_content))), file_content)
conf = np.reshape(file_content, (192,256))
conf = np.flip(conf.T, 1).astype(np.uint8)
return conf
def load_img(img_name):
with open(img_name, mode='rb') as file:
file_content = file.read()
Y = file_content[:1920*1440]
Y = struct.unpack('B' * ((len(Y))), Y)
Y = np.reshape(Y, (1440,1920))
Y = np.flip(Y.T, 1)
UV = file_content[1920*1440:]
UV = struct.unpack('B' * ((len(UV))), UV)
U,V = UV[0::2], UV[1::2]
U,V = np.reshape(U, (720,960)), np.reshape(V, (720,960))
U,V = np.flip(U.T, 1), np.flip(V.T, 1)
# Re-Center U,V channels
Y,U,V = Y.astype(np.float32), (U.astype(np.float32) - 128), (V.astype(np.float32) - 128)
U,V = resize(U, (1920,1440), order=0), resize(V, (1920,1440), order=0)
# Convert YUV 420 to RGB
R = Y + (V*1/0.6350)
B = Y + (U*1/0.5389)
G = (Y - 0.2126*R - 0.0722*B)*(1/0.7152)
img = np.stack((R,G,B), axis=-1)
img[img<0] = 0
img[img>255] = 255
img = img.astype(np.uint8)
return img
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', default=None, type=str, required=True, help='Data directory')
args = parser.parse_args()
bundle_names = natsorted(glob(join(args.d, "*")))
for bundle_name in bundle_names:
print("Processing {0}.".format(bundle_name.split("/")[-1]))
if "-poses" not in bundle_name:
# Process image + depth bundle
depth_names = natsorted(glob(join(bundle_name, "depth*.bin")))
img_names = natsorted(glob(join(bundle_name, "image*.bin")))
conf_names = natsorted(glob(join(bundle_name, "conf*.bin")))
save_path = bundle_name.replace("data", "data_processed")
os.makedirs(save_path, exist_ok=True)
npz_file = {}
for i, (img_name, depth_name, conf_name) in tqdm(enumerate(zip(img_names, depth_names, conf_names))):
img = load_img(img_name)
depth, header = load_depth(depth_name)
info = read_header(header)
conf = load_conf(conf_name)
if i == 0:
ref_time = info['timestamp']
info['timestamp'] -= ref_time
npz_file["img_{0}".format(i)] = img
npz_file["depth_{0}".format(i)] = depth
npz_file["conf_{0}".format(i)] = conf
npz_file["info_{0}".format(i)] = info
npz_file["num_frames"] = len(img_names)
# Save first frame preview
fig = plt.figure(figsize=(14, 30))
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1,1,1.12])
ax1 = plt.subplot(gs[0,0])
ax1.imshow(npz_file['img_0'])
ax1.axis('off')
ax1.set_title("Image")
ax2 = plt.subplot(gs[0,1])
ax2.imshow(npz_file['conf_0'], cmap="gray")
ax2.axis('off')
ax2.set_title("Confidence")
ax3 = plt.subplot(gs[0,2])
d = ax3.imshow(npz_file['depth_0'], cmap="Spectral", vmin=0, vmax=7)
ax3.axis('off')
ax3.set_title("Depth")
fig.colorbar(d, fraction=0.055, label="Depth [m]")
plt.savefig(join(save_path, "frame_first.png"), bbox_inches='tight', pad_inches=0.05, facecolor='white')
plt.close()
# Save last frame preview
fig = plt.figure(figsize=(14, 30))
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1,1,1.12])
ax1 = plt.subplot(gs[0,0])
ax1.imshow(npz_file['img_{0}'.format(len(img_names) - 1)])
ax1.axis('off')
ax1.set_title("Image")
ax2 = plt.subplot(gs[0,1])
ax2.imshow(npz_file['conf_{0}'.format(len(img_names) - 1)], cmap="gray")
ax2.axis('off')
ax2.set_title("Confidence")
ax3 = plt.subplot(gs[0,2])
d = ax3.imshow(npz_file['depth_{0}'.format(len(img_names) - 1)], cmap="Spectral", vmin=0, vmax=7)
ax3.axis('off')
ax3.set_title("Depth")
fig.colorbar(d, fraction=0.055, label="Depth [m]")
plt.savefig(join(save_path, "frame_last.png"), bbox_inches='tight', pad_inches=0.05, facecolor='white')
plt.close()
# Save bundle
np.savez(join(save_path, "frame_bundle"), **npz_file)
else:
# Process only poses + info bundle
info_names = natsorted(glob(join(bundle_name, "info*.bin")))
save_path = bundle_name.replace("data", "data_processed")
os.makedirs(save_path, exist_ok=True)
npz_file = {}
for i, info_name in tqdm(enumerate(info_names)):
info = load_info(info_name)
if i == 0:
ref_time = info['timestamp']
info['timestamp'] -= ref_time
npz_file["info_{0}".format(i)] = info
npz_file["num_frames"] = len(info_names)
# Save bundle
np.savez(join(save_path, "info_bundle"), **npz_file)
if __name__ == '__main__':
main()
|
[
"numpy.stack",
"matplotlib.pyplot.subplot",
"numpy.flip",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"skimage.transform.resize",
"numpy.reshape",
"matplotlib.gridspec.GridSpec",
"os.path.join"
] |
[((1955, 1991), 'numpy.reshape', 'np.reshape', (['file_content', '(192, 256)'], {}), '(file_content, (192, 256))\n', (1965, 1991), True, 'import numpy as np\n'), ((2260, 2296), 'numpy.reshape', 'np.reshape', (['file_content', '(192, 256)'], {}), '(file_content, (192, 256))\n', (2270, 2296), True, 'import numpy as np\n'), ((2552, 2579), 'numpy.reshape', 'np.reshape', (['Y', '(1440, 1920)'], {}), '(Y, (1440, 1920))\n', (2562, 2579), True, 'import numpy as np\n'), ((2587, 2602), 'numpy.flip', 'np.flip', (['Y.T', '(1)'], {}), '(Y.T, 1)\n', (2594, 2602), True, 'import numpy as np\n'), ((3150, 3178), 'numpy.stack', 'np.stack', (['(R, G, B)'], {'axis': '(-1)'}), '((R, G, B), axis=-1)\n', (3158, 3178), True, 'import numpy as np\n'), ((3291, 3316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3314, 3316), False, 'import argparse\n'), ((2723, 2748), 'numpy.reshape', 'np.reshape', (['U', '(720, 960)'], {}), '(U, (720, 960))\n', (2733, 2748), True, 'import numpy as np\n'), ((2749, 2774), 'numpy.reshape', 'np.reshape', (['V', '(720, 960)'], {}), '(V, (720, 960))\n', (2759, 2774), True, 'import numpy as np\n'), ((2784, 2799), 'numpy.flip', 'np.flip', (['U.T', '(1)'], {}), '(U.T, 1)\n', (2791, 2799), True, 'import numpy as np\n'), ((2801, 2816), 'numpy.flip', 'np.flip', (['V.T', '(1)'], {}), '(V.T, 1)\n', (2808, 2816), True, 'import numpy as np\n'), ((2949, 2981), 'skimage.transform.resize', 'resize', (['U', '(1920, 1440)'], {'order': '(0)'}), '(U, (1920, 1440), order=0)\n', (2955, 2981), False, 'from skimage.transform import resize\n'), ((2982, 3014), 'skimage.transform.resize', 'resize', (['V', '(1920, 1440)'], {'order': '(0)'}), '(V, (1920, 1440), order=0)\n', (2988, 3014), False, 'from skimage.transform import resize\n'), ((2003, 2022), 'numpy.flip', 'np.flip', (['depth.T', '(1)'], {}), '(depth.T, 1)\n', (2010, 2022), True, 'import numpy as np\n'), ((2307, 2325), 'numpy.flip', 'np.flip', (['conf.T', '(1)'], {}), '(conf.T, 1)\n', (2314, 2325), True, 'import numpy as np\n'), ((3479, 3496), 'os.path.join', 'join', (['args.d', '"""*"""'], {}), "(args.d, '*')\n", (3483, 3496), False, 'from os.path import join\n'), ((4018, 4055), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (4029, 4055), False, 'import os\n'), ((4832, 4860), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 30)'}), '(figsize=(14, 30))\n', (4842, 4860), True, 'import matplotlib.pyplot as plt\n'), ((4879, 4953), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {'wspace': '(0.0)', 'hspace': '(0.0)', 'width_ratios': '[1, 1, 1.12]'}), '(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1, 1, 1.12])\n', (4896, 4953), False, 'from matplotlib import gridspec\n'), ((4970, 4991), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (4981, 4991), True, 'import matplotlib.pyplot as plt\n'), ((5114, 5135), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (5125, 5135), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5298), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 2]'], {}), '(gs[0, 2])\n', (5288, 5298), True, 'import matplotlib.pyplot as plt\n'), ((5634, 5645), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5643, 5645), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5731), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 30)'}), '(figsize=(14, 30))\n', (5713, 5731), True, 'import matplotlib.pyplot as plt\n'), ((5750, 5824), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {'wspace': '(0.0)', 'hspace': '(0.0)', 'width_ratios': '[1, 1, 1.12]'}), '(1, 3, wspace=0.0, hspace=0.0, width_ratios=[1, 1, 1.12])\n', (5767, 5824), False, 'from matplotlib import gridspec\n'), ((5841, 5862), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (5852, 5862), True, 'import matplotlib.pyplot as plt\n'), ((6014, 6035), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (6025, 6035), True, 'import matplotlib.pyplot as plt\n'), ((6206, 6227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 2]'], {}), '(gs[0, 2])\n', (6217, 6227), True, 'import matplotlib.pyplot as plt\n'), ((6591, 6602), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6600, 6602), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6971), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (6945, 6971), False, 'import os\n'), ((5529, 5563), 'os.path.join', 'join', (['save_path', '"""frame_first.png"""'], {}), "(save_path, 'frame_first.png')\n", (5533, 5563), False, 'from os.path import join\n'), ((6487, 6520), 'os.path.join', 'join', (['save_path', '"""frame_last.png"""'], {}), "(save_path, 'frame_last.png')\n", (6491, 6520), False, 'from os.path import join\n'), ((6651, 6682), 'os.path.join', 'join', (['save_path', '"""frame_bundle"""'], {}), "(save_path, 'frame_bundle')\n", (6655, 6682), False, 'from os.path import join\n'), ((7396, 7426), 'os.path.join', 'join', (['save_path', '"""info_bundle"""'], {}), "(save_path, 'info_bundle')\n", (7400, 7426), False, 'from os.path import join\n'), ((3755, 3786), 'os.path.join', 'join', (['bundle_name', '"""depth*.bin"""'], {}), "(bundle_name, 'depth*.bin')\n", (3759, 3786), False, 'from os.path import join\n'), ((3828, 3859), 'os.path.join', 'join', (['bundle_name', '"""image*.bin"""'], {}), "(bundle_name, 'image*.bin')\n", (3832, 3859), False, 'from os.path import join\n'), ((3902, 3932), 'os.path.join', 'join', (['bundle_name', '"""conf*.bin"""'], {}), "(bundle_name, 'conf*.bin')\n", (3906, 3932), False, 'from os.path import join\n'), ((6819, 6849), 'os.path.join', 'join', (['bundle_name', '"""info*.bin"""'], {}), "(bundle_name, 'info*.bin')\n", (6823, 6849), False, 'from os.path import join\n')]
|
from root.config.main import rAnk, mAster_rank, cOmm
from screws.freeze.main import FrozenOnly
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
class _3dCSCG_1Trace_Visualize(FrozenOnly):
"""The visualization property/component of standard forms."""
def __init__(self, tf):
self._tf_ = tf
self._freeze_self_()
def __call__(self, **kwargs):
"""When this object is called, we call the default visualizing method: ``tecplot``."""
return self.matplot(**kwargs)
def matplot(self, density=None, i=None,
plot_type='contourf',
colormap='RdBu',
num_color_bar_ticks=5):
"""
:param density:
:param i: Plot which trace elements?
:param plot_type: Plot type?
:param colormap:
:param num_color_bar_ticks:
:return:
"""
if density is None:
if plot_type == 'quiver':
density = 500
elif plot_type == 'contourf':
density = 10000
else:
raise NotImplementedError(f'3dCSCG 1Trace plot type={plot_type} is not implemented.')
else:
pass
mesh = self._tf_.mesh
density = int(np.sqrt(density/mesh.trace.elements.GLOBAL_num)) + 1
xi = eta = sigma = np.linspace(-1, 1, density)
xyz, v = self._tf_.reconstruct(xi, eta, sigma, i=i)
xyz = cOmm.gather(xyz, root=mAster_rank)
v = cOmm.gather(v, root=mAster_rank)
if rAnk != mAster_rank: return
XYZ = list()
Vx = list()
Vy = list()
Vz = list()
for _xyz_, _v_ in zip(xyz, v):
for i in _xyz_:
xyz_i = _xyz_[i]
vx_i, vy_i, vz_i = _v_[i]
XYZ.append(xyz_i)
Vx.append(vx_i)
Vy.append(vy_i)
Vz.append(vz_i)
Vx = np.array(Vx)
Vy = np.array(Vy)
Vz = np.array(Vz)
del xyz, v
if plot_type == 'quiver': # ================= quiver plot =====================================
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
for i, xyz in enumerate(XYZ):
x, y, z = xyz
ax.plot_surface(x, y, z, color=(0.5,0.5,0.5,0.5))
vx = Vx[i]
vy = Vy[i]
vz = Vz[i]
ax.quiver(x, y, z, vx, vy, vz, color='r', linewidth=0.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.show()
elif plot_type == 'contourf': # ================= contourf plot =====================================
cmap = getattr(cm, colormap)
fig = plt.figure(figsize=(15,6))
# x-component ----------------------------------------------------------
ax = fig.add_subplot(131, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vx)
MIN = np.min(Vx)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vx = Vx - MIN
Vx = Vx / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vx[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20,# extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('x-component')
# y-component -------------------------------------------------------------
ax = fig.add_subplot(132, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vy)
MIN = np.min(Vy)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vy = Vy - MIN
Vy = Vy / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vy[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20, # extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('y-component')
# z-component -------------------------------------------------------
ax = fig.add_subplot(133, projection='3d')
ax.view_init(45, 60)
MAX = np.max(Vz)
MIN = np.min(Vz)
if MAX == MIN:
MAX += 0.0001
bounds = MAX - MIN
Vz = Vz - MIN
Vz = Vz / bounds
ticks = np.linspace(MAX, MIN, num_color_bar_ticks)
for i, xyz in enumerate(XYZ):
x, y, z = xyz
v = Vz[i]
ax.plot_surface(x, y, z, facecolors=cmap(v))
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array(np.array(ticks))
cb = plt.colorbar(mappable, ax=ax, # ticks=np.linspace(0,1,num_ticks),
shrink=1, aspect=20, # extend='min',
orientation='vertical', )
cb.ax.tick_params() # labelsize=13.5)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
plt.title('z-component')
plt.show()
else:
raise NotImplementedError(f'3dCSCG 1Trace plot type={plot_type} is not implemented.')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.linspace",
"root.config.main.cOmm.gather",
"numpy.sqrt"
] |
[((1384, 1411), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'density'], {}), '(-1, 1, density)\n', (1395, 1411), True, 'import numpy as np\n'), ((1486, 1520), 'root.config.main.cOmm.gather', 'cOmm.gather', (['xyz'], {'root': 'mAster_rank'}), '(xyz, root=mAster_rank)\n', (1497, 1520), False, 'from root.config.main import rAnk, mAster_rank, cOmm\n'), ((1533, 1565), 'root.config.main.cOmm.gather', 'cOmm.gather', (['v'], {'root': 'mAster_rank'}), '(v, root=mAster_rank)\n', (1544, 1565), False, 'from root.config.main import rAnk, mAster_rank, cOmm\n'), ((1974, 1986), 'numpy.array', 'np.array', (['Vx'], {}), '(Vx)\n', (1982, 1986), True, 'import numpy as np\n'), ((2000, 2012), 'numpy.array', 'np.array', (['Vy'], {}), '(Vy)\n', (2008, 2012), True, 'import numpy as np\n'), ((2026, 2038), 'numpy.array', 'np.array', (['Vz'], {}), '(Vz)\n', (2034, 2038), True, 'import numpy as np\n'), ((2181, 2207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (2191, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2680, 2682), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1353), 'numpy.sqrt', 'np.sqrt', (['(density / mesh.trace.elements.GLOBAL_num)'], {}), '(density / mesh.trace.elements.GLOBAL_num)\n', (1311, 1353), True, 'import numpy as np\n'), ((2855, 2882), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (2865, 2882), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3084), 'numpy.max', 'np.max', (['Vx'], {}), '(Vx)\n', (3080, 3084), True, 'import numpy as np\n'), ((3103, 3113), 'numpy.min', 'np.min', (['Vx'], {}), '(Vx)\n', (3109, 3113), True, 'import numpy as np\n'), ((3277, 3319), 'numpy.linspace', 'np.linspace', (['MAX', 'MIN', 'num_color_bar_ticks'], {}), '(MAX, MIN, num_color_bar_ticks)\n', (3288, 3319), True, 'import numpy as np\n'), ((3504, 3532), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (3521, 3532), False, 'from matplotlib import cm\n'), ((3598, 3672), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'ax', 'shrink': '(1)', 'aspect': '(20)', 'orientation': '"""vertical"""'}), "(mappable, ax=ax, shrink=1, aspect=20, orientation='vertical')\n", (3610, 3672), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3975), 'matplotlib.pyplot.title', 'plt.title', (['"""x-component"""'], {}), "('x-component')\n", (3960, 3975), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4181), 'numpy.max', 'np.max', (['Vy'], {}), '(Vy)\n', (4177, 4181), True, 'import numpy as np\n'), ((4200, 4210), 'numpy.min', 'np.min', (['Vy'], {}), '(Vy)\n', (4206, 4210), True, 'import numpy as np\n'), ((4374, 4416), 'numpy.linspace', 'np.linspace', (['MAX', 'MIN', 'num_color_bar_ticks'], {}), '(MAX, MIN, num_color_bar_ticks)\n', (4385, 4416), True, 'import numpy as np\n'), ((4601, 4629), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (4618, 4629), False, 'from matplotlib import cm\n'), ((4695, 4769), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'ax', 'shrink': '(1)', 'aspect': '(20)', 'orientation': '"""vertical"""'}), "(mappable, ax=ax, shrink=1, aspect=20, orientation='vertical')\n", (4707, 4769), True, 'import matplotlib.pyplot as plt\n'), ((5051, 5075), 'matplotlib.pyplot.title', 'plt.title', (['"""y-component"""'], {}), "('y-component')\n", (5060, 5075), True, 'import matplotlib.pyplot as plt\n'), ((5265, 5275), 'numpy.max', 'np.max', (['Vz'], {}), '(Vz)\n', (5271, 5275), True, 'import numpy as np\n'), ((5294, 5304), 'numpy.min', 'np.min', (['Vz'], {}), '(Vz)\n', (5300, 5304), True, 'import numpy as np\n'), ((5468, 5510), 'numpy.linspace', 'np.linspace', (['MAX', 'MIN', 'num_color_bar_ticks'], {}), '(MAX, MIN, num_color_bar_ticks)\n', (5479, 5510), True, 'import numpy as np\n'), ((5695, 5723), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap'}), '(cmap=cmap)\n', (5712, 5723), False, 'from matplotlib import cm\n'), ((5789, 5863), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'ax', 'shrink': '(1)', 'aspect': '(20)', 'orientation': '"""vertical"""'}), "(mappable, ax=ax, shrink=1, aspect=20, orientation='vertical')\n", (5801, 5863), True, 'import matplotlib.pyplot as plt\n'), ((6145, 6169), 'matplotlib.pyplot.title', 'plt.title', (['"""z-component"""'], {}), "('z-component')\n", (6154, 6169), True, 'import matplotlib.pyplot as plt\n'), ((6183, 6193), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6191, 6193), True, 'import matplotlib.pyplot as plt\n'), ((3564, 3579), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (3572, 3579), True, 'import numpy as np\n'), ((4661, 4676), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (4669, 4676), True, 'import numpy as np\n'), ((5755, 5770), 'numpy.array', 'np.array', (['ticks'], {}), '(ticks)\n', (5763, 5770), True, 'import numpy as np\n')]
|
import sympy as sy
from sympy.physics import mechanics as mc
import numpy as np
from sympy import sympify, nsimplify
from forward_kinematics import forward
from sympy import Integral, Matrix, pi, pprint
def Inverse_kin(T0_4, T0_3, T0_2, T0_1, X):
#Calculates inverse kinematics
f=T0_4[:3,3]
J_half=f.jacobian(X)
# J_otherhalf=T0_1[:3,2].row_join(T0_2[:3,2].row_join(T0_3[:3,2].row_join(T0_4[:3,2].row_join(T0_5[:3,2]))))
J_otherhalf=T0_1[:3,2].row_join(T0_2[:3,2].row_join(T0_3[:3,2].row_join(T0_4[:3,2])))
J=J_half.col_join(J_otherhalf)
J=nsimplify(J,tolerance=1e-3,rational=True)
# print(J)
return J
if __name__ == "__main__" :
R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5 = sy.symbols('R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5')
pi=np.pi
X = [theta1, theta2, theta3, theta4]
# Solution 0 0 550
X_sub = [0,0,0,0]
T0_4, T0_3, T0_2, T0_1 = forward()
T0_f=T0_4.subs({theta1:X_sub[0],theta2:X_sub[1],theta3:X_sub[2],theta4:X_sub[3], d1:150, d2:0, d3:0, d4:400})
f_x, f_y, f_z = T0_f[0,3], T0_f[1,3], T0_f[2,3]
print(f'Locations : {f_x}, {f_y}, {f_z}')
print('Location calculated from input theta value it is validated using thetas.Using location values we validate joint angles')
J = Inverse_kin(T0_4, T0_3, T0_2, T0_1, X)
J_val=J.subs({theta1:X_sub[0],theta2:X_sub[1],theta3:X_sub[2],theta4:X_sub[3], d1:150, d2:0, d3:0, d4:400})
J_val= nsimplify(J_val,tolerance=1e-3,rational=True)
J_val=np.array(J_val,dtype='float')
# print(f'Jacobian for joint angles: {X_sub}')
# pprint(J_val)
J_inv=np.linalg.pinv(J_val)
J_inv= nsimplify(J_inv,tolerance=1e-3,rational=True)
print("Inverse kinematics Validation")
print(f'Location of end effector {[f_x, f_y, f_z, 0, 0, 0]}')
pos = np.matrix([f_x, f_y, f_z, 0, 0, 0])
# pos = np.matrix([0, 0, -150, 0, 0, 0])
j_a =([email protected])*pi
print('Joint Angles')
print(f'Theta1 : {j_a[0][0].flatten()}')
print(f'Theta2 : {j_a[1][0].flatten()}')
print(f'Theta3 : {j_a[2][0].flatten()}')
print(f'Theta4 : {j_a[3][0].flatten()}')
print(f'Theta5 : [[0]]')
|
[
"sympy.symbols",
"forward_kinematics.forward",
"numpy.matrix",
"numpy.array",
"sympy.nsimplify",
"numpy.linalg.pinv"
] |
[((572, 616), 'sympy.nsimplify', 'nsimplify', (['J'], {'tolerance': '(0.001)', 'rational': '(True)'}), '(J, tolerance=0.001, rational=True)\n', (581, 616), False, 'from sympy import sympify, nsimplify\n'), ((760, 865), 'sympy.symbols', 'sy.symbols', (['"""R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5"""'], {}), "(\n 'R, theta, alpha, a, d, theta1, theta2, theta3, theta4, theta5, d1, d2, d3, d4, d5'\n )\n", (770, 865), True, 'import sympy as sy\n'), ((991, 1000), 'forward_kinematics.forward', 'forward', ([], {}), '()\n', (998, 1000), False, 'from forward_kinematics import forward\n'), ((1516, 1564), 'sympy.nsimplify', 'nsimplify', (['J_val'], {'tolerance': '(0.001)', 'rational': '(True)'}), '(J_val, tolerance=0.001, rational=True)\n', (1525, 1564), False, 'from sympy import sympify, nsimplify\n'), ((1572, 1602), 'numpy.array', 'np.array', (['J_val'], {'dtype': '"""float"""'}), "(J_val, dtype='float')\n", (1580, 1602), True, 'import numpy as np\n'), ((1688, 1709), 'numpy.linalg.pinv', 'np.linalg.pinv', (['J_val'], {}), '(J_val)\n', (1702, 1709), True, 'import numpy as np\n'), ((1721, 1769), 'sympy.nsimplify', 'nsimplify', (['J_inv'], {'tolerance': '(0.001)', 'rational': '(True)'}), '(J_inv, tolerance=0.001, rational=True)\n', (1730, 1769), False, 'from sympy import sympify, nsimplify\n'), ((1888, 1923), 'numpy.matrix', 'np.matrix', (['[f_x, f_y, f_z, 0, 0, 0]'], {}), '([f_x, f_y, f_z, 0, 0, 0])\n', (1897, 1923), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import logging
from time import time
import numpy as np
from ...core.exceptions import IncompatibleAttribute
from ...core.util import Pointer, split_component_view
from ...utils import view_shape, stack_view, color2rgb
from ...clients.image_client import ImageClient
from ...clients.layer_artist import (LayerArtistBase,
ImageLayerBase, SubsetImageLayerBase)
from ginga.util import wcsmod
from ginga.misc import Bunch
wcsmod.use('astropy')
from ginga import AstroImage, BaseImage
class GingaClient(ImageClient):
def __init__(self, data, canvas=None, artist_container=None):
super(GingaClient, self).__init__(data, artist_container)
self._setup_ginga(canvas)
def _setup_ginga(self, canvas):
if canvas is None:
raise ValueError("GingaClient needs a canvas")
self._canvas = canvas
self._wcs = None
self._crosshair_id = '_crosshair'
def _new_rgb_layer(self, layer):
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
return GingaSubsetImageLayer(layer, self._canvas)
def _new_image_layer(self, layer):
return GingaImageLayer(layer, self._canvas)
def _new_scatter_layer(self, layer):
raise NotImplementedError()
def _update_axis_labels(self):
pass
def set_cmap(self, cmap):
self._canvas.set_cmap(cmap)
def show_crosshairs(self, x, y):
self.clear_crosshairs()
c = self._canvas.viewer.getDrawClass('point')(x, y, 6, color='red',
style='plus')
self._canvas.add(c, tag=self._crosshair_id, redraw=True)
def clear_crosshairs(self):
try:
self._canvas.deleteObjectsByTag([self._crosshair_id], redraw=False)
except:
pass
class GingaLayerArtist(LayerArtistBase):
zorder = Pointer('_zorder')
visible = Pointer('_visible')
def __init__(self, layer, canvas):
super(GingaLayerArtist, self).__init__(layer)
self._canvas = canvas
self._visible = True
def redraw(self, whence=0):
self._canvas.redraw(whence=whence)
class GingaImageLayer(GingaLayerArtist, ImageLayerBase):
# unused by Ginga
cmap = None
norm = None
def __init__(self, layer, canvas):
super(GingaImageLayer, self).__init__(layer, canvas)
self._override_image = None
self._tag = "layer%s_%s" % (layer.label, time())
self._img = None # DataImage instance
self._enabled = True
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
if self._visible == value:
return
self._visible = value
if not value:
self.clear()
elif self._img:
self._canvas.set_image(self._img)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag('_image')
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def set_norm(self, **kwargs):
# NOP for ginga
pass
def clear_norm(self):
# NOP for ginga
pass
def override_image(self, image):
"""Temporarily show a different image"""
self._override_image = image
def clear_override(self):
self._override_image = None
def clear(self):
# remove previously added image
try:
self._canvas.deleteObjectsByTag(['_image'], redraw=False)
except:
pass
@property
def enabled(self):
return self._enabled
def update(self, view, transpose=False):
if not self.visible:
return
# update ginga model
comp, view = split_component_view(view)
if self._img is None:
self._img = DataImage(self.layer, comp, view, transpose)
self._canvas.set_image(self._img)
self._img.data = self.layer
self._img.component = comp
self._img.view = view
self._img.transpose = transpose
self._img.override_image = self._override_image
self.redraw()
class GingaSubsetImageLayer(GingaLayerArtist, SubsetImageLayerBase):
def __init__(self, layer, canvas):
super(GingaSubsetImageLayer, self).__init__(layer, canvas)
self._img = None
self._cimg = None
self._tag = "layer%s_%s" % (layer.label, time())
self._enabled = True
@property
def visible(self):
return self._visible
@property
def enabled(self):
return self._enabled
@visible.setter
def visible(self, value):
if value is self._visible:
return
self._visible = value
if not value:
self.clear()
elif self._cimg:
self._canvas.add(self._cimg, tag=self._tag, redraw=True)
@property
def zorder(self):
return self._zorder
@zorder.setter
def zorder(self, value):
self._zorder = value
try:
canvas_img = self._canvas.getObjectByTag(self._tag)
canvas_img.set_zorder(value)
except KeyError:
# object does not yet exist on canvas
pass
def clear(self):
try:
self._canvas.deleteObjectsByTag([self._tag], redraw=True)
except:
pass
def _update_ginga_models(self, view, transpose=False):
subset = self.layer
logging.getLogger(__name__).debug("View into subset %s is %s", self.layer, view)
_, view = split_component_view(view) # discard ComponentID
r, g, b = color2rgb(self.layer.style.color)
if self._img is None:
self._img = SubsetImage(subset, view)
if self._cimg is None:
# SubsetImages can't be added to canvases directly. Need
# to wrap into a ginga canvas type.
Image = self._canvas.getDrawClass('image')
self._cimg = Image(0, 0, self._img, alpha=0.5, flipy=False)
self._img.view = view
self._img.color = (r, g, b)
self._img.transpose = transpose
def _check_enabled(self):
"""
Sync the enabled/disabled status, based on whether
mask is computable
"""
self._enabled = True
try:
# the first pixel
view = tuple(0 for _ in self.layer.data.shape)
self.layer.to_mask(view)
except IncompatibleAttribute as exc:
self._enabled = False
self.disable_invalid_attributes(*exc.args)
return self._enabled
def _ensure_added(self):
""" Add artist to canvas if needed """
try:
self._canvas.getObjectByTag(self._tag)
except KeyError:
self._canvas.add(self._cimg, tag=self._tag, redraw=False)
def update(self, view, transpose=False):
self._check_enabled()
self._update_ginga_models(view, transpose)
if self._enabled and self._visible:
self._ensure_added()
else:
self.clear()
self.redraw(whence=0)
def forbidden(*args):
raise ValueError("Forbidden")
class DataImage(AstroImage.AstroImage):
"""
A Ginga image subclass to interface with Glue Data objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, data, component, view, transpose=False,
override_image=None, **kwargs):
"""
Parameters
----------
data : glue.core.data.Data
The data to image
component : glue.core.data.ComponentID
The ComponentID in the data to image
view : numpy-style view
The view into the data to image. Must produce a 2D array
transpose : bool
Whether to transpose the view
override_image : numpy array (optional)
Whether to show override_image instead of the view into the data.
The override image must have the same shape as the 2D view into
the data.
kwargs : dict
Extra kwargs are passed to the superclass
"""
self.transpose = transpose
self.view = view
self.data = data
self.component = component
self.override_image = None
super(DataImage, self).__init__(**kwargs)
@property
def shape(self):
"""
The shape of the 2D view into the data
"""
result = view_shape(self.data.shape, self.view)
if self.transpose:
result = result[::-1]
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D image.
"""
if self.override_image is not None:
return self.override_image[view]
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.data.shape, *views)
return self.data[self.component, view]
class SubsetImage(BaseImage.BaseImage):
"""
A Ginga image subclass to interface with Glue subset objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, subset, view, color=(0, 1, 0), transpose=False, **kwargs):
"""
Parameters
----------
subset : glue.core.subset.Subset
The subset to image
view : numpy-style view
The view into the subset to image. Must produce a 2D array
color : tuple of 3 floats in range [0, 1]
The color to image the subset as
transpose : bool
Whether to transpose the view
kwargs : dict
Extra kwargs are passed to the ginga superclass
"""
super(SubsetImage, self).__init__(**kwargs)
self.subset = subset
self.view = view
self.transpose = transpose
self.color = color
self.order = 'RGBA'
@property
def shape(self):
"""
Shape of the 2D view into the subset mask
"""
result = view_shape(self.subset.data.shape, self.view)
if self.transpose:
result = result[::-1]
return tuple(list(result) + [4]) # 4th dim is RGBA channels
def _rgb_from_mask(self, mask):
"""
Turn a boolean mask into a 4-channel RGBA image
"""
r, g, b = self.color
ones = mask * 0 + 255
alpha = mask * 127
result = np.dstack((ones * r, ones * g, ones * b, alpha)).astype(np.uint8)
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D subset mask.
"""
# Combining multiple views: First a 2D slice into an ND array, then
# the requested view from this slice
if self.transpose:
views = [self.view, 'transpose', view]
else:
views = [self.view, view]
view = stack_view(self.subset.data.shape, *views)
mask = self.subset.to_mask(view)
return self._rgb_from_mask(mask)
def _set_minmax(self):
# we already know the data bounds
self.minval = 0
self.maxval = 256
self.minval_noinf = self.minval
self.maxval_noinf = self.maxval
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):
doit = getattr(self, '_doit', False)
self._doit = not doit
# default implementation if downsampling
if doit or new_wd <= (x2 - x1 + 1) or new_ht <= (y2 - y1 + 1):
return super(SubsetImage, self).get_scaled_cutout_wdht(x1, y1, x2, y2, new_wd, new_ht)
# if upsampling, prevent extra to_mask() computation
x1, x2 = np.clip([x1, x2], 0, self.width - 2).astype(np.int)
y1, y2 = np.clip([y1, y2], 0, self.height - 2).astype(np.int)
result = self._slice(np.s_[y1:y2 + 1, x1:x2 + 1])
yi = np.linspace(0, result.shape[0], new_ht).astype(np.int).reshape(-1, 1).clip(0, result.shape[0] - 1)
xi = np.linspace(0, result.shape[1], new_wd).astype(np.int).reshape(1, -1).clip(0, result.shape[1] - 1)
yi, xi = [np.array(a) for a in np.broadcast_arrays(yi, xi)]
result = result[yi, xi]
scale_x = 1.0 * result.shape[1] / (x2 - x1 + 1)
scale_y = 1.0 * result.shape[0] / (y2 - y1 + 1)
return Bunch.Bunch(data=result, scale_x=scale_x, scale_y=scale_y)
|
[
"numpy.dstack",
"ginga.util.wcsmod.use",
"numpy.clip",
"time.time",
"numpy.array",
"ginga.misc.Bunch.Bunch",
"numpy.linspace",
"numpy.broadcast_arrays",
"logging.getLogger"
] |
[((508, 529), 'ginga.util.wcsmod.use', 'wcsmod.use', (['"""astropy"""'], {}), "('astropy')\n", (518, 529), False, 'from ginga.util import wcsmod\n'), ((12980, 13038), 'ginga.misc.Bunch.Bunch', 'Bunch.Bunch', ([], {'data': 'result', 'scale_x': 'scale_x', 'scale_y': 'scale_y'}), '(data=result, scale_x=scale_x, scale_y=scale_y)\n', (12991, 13038), False, 'from ginga.misc import Bunch\n'), ((12769, 12780), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (12777, 12780), True, 'import numpy as np\n'), ((2535, 2541), 'time.time', 'time', ([], {}), '()\n', (2539, 2541), False, 'from time import time\n'), ((4680, 4686), 'time.time', 'time', ([], {}), '()\n', (4684, 4686), False, 'from time import time\n'), ((5715, 5742), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5732, 5742), False, 'import logging\n'), ((11013, 11061), 'numpy.dstack', 'np.dstack', (['(ones * r, ones * g, ones * b, alpha)'], {}), '((ones * r, ones * g, ones * b, alpha))\n', (11022, 11061), True, 'import numpy as np\n'), ((12345, 12381), 'numpy.clip', 'np.clip', (['[x1, x2]', '(0)', '(self.width - 2)'], {}), '([x1, x2], 0, self.width - 2)\n', (12352, 12381), True, 'import numpy as np\n'), ((12414, 12451), 'numpy.clip', 'np.clip', (['[y1, y2]', '(0)', '(self.height - 2)'], {}), '([y1, y2], 0, self.height - 2)\n', (12421, 12451), True, 'import numpy as np\n'), ((12790, 12817), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['yi', 'xi'], {}), '(yi, xi)\n', (12809, 12817), True, 'import numpy as np\n'), ((12540, 12579), 'numpy.linspace', 'np.linspace', (['(0)', 'result.shape[0]', 'new_ht'], {}), '(0, result.shape[0], new_ht)\n', (12551, 12579), True, 'import numpy as np\n'), ((12652, 12691), 'numpy.linspace', 'np.linspace', (['(0)', 'result.shape[1]', 'new_wd'], {}), '(0, result.shape[1], new_wd)\n', (12663, 12691), True, 'import numpy as np\n')]
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import numpy as np
import tensorflow as tf
from nncf.tensorflow.layers.wrapper import NNCFWrapper
from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS
from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec
from nncf.tensorflow.quantization.quantizers import QuantizerConfig
from nncf.tensorflow.quantization.quantizers import Quantizer
from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer
from nncf.common.quantization.structs import QuantizationMode
DIM_SPLIT = 1000
EPS = 1e-6
def check_quantized_values_equals(y_train, y_val, eps, range_len, narrow_range):
diff = np.abs(y_val - y_train)
if np.max(diff) > eps:
# If any point gets in really close to the middle of the quant
# it can changes its quant due to rounding error
outlayers = diff[diff > eps]
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(outlayers - quant_len) < eps).all(), 'Quants are completely different'
assert False, 'Some values moved to the neighbor quant, possibly due to this values gets in ' \
'really close to the middle of the quant. ' \
f'Position of values: {np.where(diff > eps)[0].tolist()}'
@pytest.mark.parametrize('bits,low,range_,narrow_range,ref',
[(7, -1, 2, False, -128 / 127),
(7, -2, 2, True, -2)], ids=['full_range', 'narrow_range'])
def test_min_adj(bits, low, range_, narrow_range, ref):
res = Quantizer._min_adj(bits, low, range_, narrow_range).numpy() # pylint: disable=protected-access
assert abs(res - ref) < EPS
def get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points):
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
if low > EPS:
# Range greater than zero
mid_points = [(i + 1 / 2) * quant_len for i in range(127)]
elif low + range_len < EPS:
# Range lower than zero
mid_points = [-(i + 1 / 2) * quant_len for i in range(127)]
else:
# Range with zero
min_adj = Quantizer._min_adj(7, low, range_len, narrow_range).numpy() # pylint: disable=protected-access
mid_points = [min_adj + (i + 1 / 2) * quant_len for i in range(127)]
new_w = mid_points * int(np.round(0.5 + DIM_SPLIT / 128))
new_w = tf.reshape(tf.constant(new_w[:DIM_SPLIT], dtype=tf.float32), (1, -1))
else:
new_w = tf.reshape(tf.constant(
np.linspace(low - 0.5, low + range_len + 0.5, DIM_SPLIT),
dtype=tf.float32), (1, -1))
return new_w
@pytest.mark.parametrize('per_ch', [False, True], ids=['per_tensor', 'per_channel'])
@pytest.mark.parametrize('init_w_as_middle_points', [False, True], ids=['', 'middle_points'])
@pytest.mark.parametrize('narrow_range', [False, True], ids=['full_range', 'narrow_range'])
class TestQuantizedWeightsEqualAfterFixApplied:
@pytest.mark.parametrize('signedness_to_force', [True, False], ids=['signed', 'unsigned'])
def test_symmetric_quantized_weights_equal_after_fix_applied(self, per_ch, signedness_to_force,
init_w_as_middle_points, narrow_range):
qconfig = QuantizerConfig(
num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signedness_to_force=signedness_to_force,
per_channel=per_ch)
qspec = TFQuantizerSpec.from_config(
qconfig,
narrow_range=narrow_range,
half_range=True)
op_name = 'quantizer'
weight_attr = 'kernel'
layer = tf.keras.layers.Dense(DIM_SPLIT)
layer = NNCFWrapper(layer)
quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode)
quantizer = quantizer_cls(op_name, qspec)
layer.registry_weight_operation(weight_attr, quantizer)
layer.build(1)
# Set layer weights
ref_signed_var = -1 if signedness_to_force else 0
ref_scale = 1
low = ref_scale * ref_signed_var
range_len = (1 - ref_signed_var) * ref_scale
new_w = get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points)
layer.get_layer_weight(weight_attr).assign(new_w)
# Check quantizer weights
ops_weights = layer.ops_weights[op_name]
assert (ops_weights['scale_var'].numpy() == ref_scale).all()
assert (ops_weights['signed_var'].numpy() == ref_signed_var).all()
w_int7 = layer(tf.ones((1, 1))).numpy()
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(np.abs(w_int7 - new_w) - quant_len / 2) < 1e-6).all(), 'Middle points calculated incorrectly'
apply_overflow_fix_to_layer(layer, 'kernel', quantizer)
assert not quantizer._half_range # pylint: disable=protected-access
w_int8 = layer(tf.ones((1, 1))).numpy()
check_quantized_values_equals(w_int7, w_int8, EPS, range_len, narrow_range)
@pytest.mark.parametrize('low,range_len', [(-1, 2), (-5, 4), (3, 2)],
ids=['zero_in_range', 'max_less_than_zero', 'low_greater_than_zero'])
def test_asymmetric_quantized_weights_equal_after_fix_applied(self, low, range_len, per_ch,
init_w_as_middle_points, narrow_range):
qconfig = QuantizerConfig(
num_bits=8,
mode=QuantizationMode.ASYMMETRIC,
per_channel=per_ch)
qspec = TFQuantizerSpec.from_config(
qconfig,
narrow_range=narrow_range,
half_range=True)
op_name = 'quantizer'
weight_attr = 'kernel'
layer = tf.keras.layers.Dense(DIM_SPLIT)
layer = NNCFWrapper(layer)
quantizer_cls = NNCF_QUANTIZATION_OPERATIONS.get(qspec.mode)
quantizer = quantizer_cls(op_name, qspec)
layer.registry_weight_operation(weight_attr, quantizer)
layer.build(1)
# Set layer weights
new_w = get_weights_for_overflow_issue_test(low, range_len, narrow_range, init_w_as_middle_points)
layer.get_layer_weight(weight_attr).assign(new_w)
# Set quantizer weights
if per_ch:
low = tf.repeat(tf.constant([low], dtype=tf.float32), repeats=[DIM_SPLIT])
range_len = tf.repeat(tf.constant([range_len], dtype=tf.float32), repeats=[DIM_SPLIT])
ops_weights = layer.ops_weights[op_name]
ops_weights['input_low_var'].assign(low)
ops_weights['input_range_var'].assign(range_len)
w_int7 = layer(tf.ones((1, 1))).numpy()
if init_w_as_middle_points:
quant_len = range_len / (128 - (2 if narrow_range else 1))
assert (np.abs(np.abs(w_int7 - new_w) - quant_len / 2) < EPS).all(), 'Middle points calculated incorrectly'
apply_overflow_fix_to_layer(layer, 'kernel', quantizer)
assert not quantizer._half_range # pylint: disable=protected-access
w_int8 = layer(tf.ones((1, 1))).numpy()
check_quantized_values_equals(w_int7, w_int8, EPS, range_len, narrow_range)
|
[
"nncf.tensorflow.quantization.quantizers.QuantizerConfig",
"tensorflow.ones",
"numpy.abs",
"nncf.tensorflow.quantization.quantizers.TFQuantizerSpec.from_config",
"tensorflow.keras.layers.Dense",
"nncf.tensorflow.quantization.utils.apply_overflow_fix_to_layer",
"nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATIONS.get",
"tensorflow.constant",
"numpy.max",
"numpy.where",
"nncf.tensorflow.layers.wrapper.NNCFWrapper",
"numpy.linspace",
"pytest.mark.parametrize",
"numpy.round",
"nncf.tensorflow.quantization.quantizers.Quantizer._min_adj"
] |
[((1862, 2021), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bits,low,range_,narrow_range,ref"""', '[(7, -1, 2, False, -128 / 127), (7, -2, 2, True, -2)]'], {'ids': "['full_range', 'narrow_range']"}), "('bits,low,range_,narrow_range,ref', [(7, -1, 2, \n False, -128 / 127), (7, -2, 2, True, -2)], ids=['full_range',\n 'narrow_range'])\n", (1885, 2021), False, 'import pytest\n'), ((3321, 3408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""per_ch"""', '[False, True]'], {'ids': "['per_tensor', 'per_channel']"}), "('per_ch', [False, True], ids=['per_tensor',\n 'per_channel'])\n", (3344, 3408), False, 'import pytest\n'), ((3406, 3502), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_w_as_middle_points"""', '[False, True]'], {'ids': "['', 'middle_points']"}), "('init_w_as_middle_points', [False, True], ids=['',\n 'middle_points'])\n", (3429, 3502), False, 'import pytest\n'), ((3500, 3594), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""narrow_range"""', '[False, True]'], {'ids': "['full_range', 'narrow_range']"}), "('narrow_range', [False, True], ids=['full_range',\n 'narrow_range'])\n", (3523, 3594), False, 'import pytest\n'), ((1230, 1253), 'numpy.abs', 'np.abs', (['(y_val - y_train)'], {}), '(y_val - y_train)\n', (1236, 1253), True, 'import numpy as np\n'), ((3644, 3737), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""signedness_to_force"""', '[True, False]'], {'ids': "['signed', 'unsigned']"}), "('signedness_to_force', [True, False], ids=['signed',\n 'unsigned'])\n", (3667, 3737), False, 'import pytest\n'), ((5767, 5910), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""low,range_len"""', '[(-1, 2), (-5, 4), (3, 2)]'], {'ids': "['zero_in_range', 'max_less_than_zero', 'low_greater_than_zero']"}), "('low,range_len', [(-1, 2), (-5, 4), (3, 2)], ids=[\n 'zero_in_range', 'max_less_than_zero', 'low_greater_than_zero'])\n", (5790, 5910), False, 'import pytest\n'), ((1261, 1273), 'numpy.max', 'np.max', (['diff'], {}), '(diff)\n', (1267, 1273), True, 'import numpy as np\n'), ((3957, 4082), 'nncf.tensorflow.quantization.quantizers.QuantizerConfig', 'QuantizerConfig', ([], {'num_bits': '(8)', 'mode': 'QuantizationMode.SYMMETRIC', 'signedness_to_force': 'signedness_to_force', 'per_channel': 'per_ch'}), '(num_bits=8, mode=QuantizationMode.SYMMETRIC,\n signedness_to_force=signedness_to_force, per_channel=per_ch)\n', (3972, 4082), False, 'from nncf.tensorflow.quantization.quantizers import QuantizerConfig\n'), ((4144, 4229), 'nncf.tensorflow.quantization.quantizers.TFQuantizerSpec.from_config', 'TFQuantizerSpec.from_config', (['qconfig'], {'narrow_range': 'narrow_range', 'half_range': '(True)'}), '(qconfig, narrow_range=narrow_range, half_range=True\n )\n', (4171, 4229), False, 'from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec\n'), ((4340, 4372), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['DIM_SPLIT'], {}), '(DIM_SPLIT)\n', (4361, 4372), True, 'import tensorflow as tf\n'), ((4389, 4407), 'nncf.tensorflow.layers.wrapper.NNCFWrapper', 'NNCFWrapper', (['layer'], {}), '(layer)\n', (4400, 4407), False, 'from nncf.tensorflow.layers.wrapper import NNCFWrapper\n'), ((4432, 4476), 'nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATIONS.get', 'NNCF_QUANTIZATION_OPERATIONS.get', (['qspec.mode'], {}), '(qspec.mode)\n', (4464, 4476), False, 'from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS\n'), ((5496, 5551), 'nncf.tensorflow.quantization.utils.apply_overflow_fix_to_layer', 'apply_overflow_fix_to_layer', (['layer', '"""kernel"""', 'quantizer'], {}), "(layer, 'kernel', quantizer)\n", (5523, 5551), False, 'from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer\n'), ((6155, 6241), 'nncf.tensorflow.quantization.quantizers.QuantizerConfig', 'QuantizerConfig', ([], {'num_bits': '(8)', 'mode': 'QuantizationMode.ASYMMETRIC', 'per_channel': 'per_ch'}), '(num_bits=8, mode=QuantizationMode.ASYMMETRIC, per_channel=\n per_ch)\n', (6170, 6241), False, 'from nncf.tensorflow.quantization.quantizers import QuantizerConfig\n'), ((6290, 6375), 'nncf.tensorflow.quantization.quantizers.TFQuantizerSpec.from_config', 'TFQuantizerSpec.from_config', (['qconfig'], {'narrow_range': 'narrow_range', 'half_range': '(True)'}), '(qconfig, narrow_range=narrow_range, half_range=True\n )\n', (6317, 6375), False, 'from nncf.tensorflow.quantization.quantizers import TFQuantizerSpec\n'), ((6486, 6518), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['DIM_SPLIT'], {}), '(DIM_SPLIT)\n', (6507, 6518), True, 'import tensorflow as tf\n'), ((6535, 6553), 'nncf.tensorflow.layers.wrapper.NNCFWrapper', 'NNCFWrapper', (['layer'], {}), '(layer)\n', (6546, 6553), False, 'from nncf.tensorflow.layers.wrapper import NNCFWrapper\n'), ((6578, 6622), 'nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATIONS.get', 'NNCF_QUANTIZATION_OPERATIONS.get', (['qspec.mode'], {}), '(qspec.mode)\n', (6610, 6622), False, 'from nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATIONS\n'), ((7633, 7688), 'nncf.tensorflow.quantization.utils.apply_overflow_fix_to_layer', 'apply_overflow_fix_to_layer', (['layer', '"""kernel"""', 'quantizer'], {}), "(layer, 'kernel', quantizer)\n", (7660, 7688), False, 'from nncf.tensorflow.quantization.utils import apply_overflow_fix_to_layer\n'), ((2130, 2181), 'nncf.tensorflow.quantization.quantizers.Quantizer._min_adj', 'Quantizer._min_adj', (['bits', 'low', 'range_', 'narrow_range'], {}), '(bits, low, range_, narrow_range)\n', (2148, 2181), False, 'from nncf.tensorflow.quantization.quantizers import Quantizer\n'), ((3065, 3113), 'tensorflow.constant', 'tf.constant', (['new_w[:DIM_SPLIT]'], {'dtype': 'tf.float32'}), '(new_w[:DIM_SPLIT], dtype=tf.float32)\n', (3076, 3113), True, 'import tensorflow as tf\n'), ((3005, 3036), 'numpy.round', 'np.round', (['(0.5 + DIM_SPLIT / 128)'], {}), '(0.5 + DIM_SPLIT / 128)\n', (3013, 3036), True, 'import numpy as np\n'), ((3194, 3250), 'numpy.linspace', 'np.linspace', (['(low - 0.5)', '(low + range_len + 0.5)', 'DIM_SPLIT'], {}), '(low - 0.5, low + range_len + 0.5, DIM_SPLIT)\n', (3205, 3250), True, 'import numpy as np\n'), ((7034, 7070), 'tensorflow.constant', 'tf.constant', (['[low]'], {'dtype': 'tf.float32'}), '([low], dtype=tf.float32)\n', (7045, 7070), True, 'import tensorflow as tf\n'), ((7127, 7169), 'tensorflow.constant', 'tf.constant', (['[range_len]'], {'dtype': 'tf.float32'}), '([range_len], dtype=tf.float32)\n', (7138, 7169), True, 'import tensorflow as tf\n'), ((1529, 1558), 'numpy.abs', 'np.abs', (['(outlayers - quant_len)'], {}), '(outlayers - quant_len)\n', (1535, 1558), True, 'import numpy as np\n'), ((5234, 5249), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (5241, 5249), True, 'import tensorflow as tf\n'), ((5651, 5666), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (5658, 5666), True, 'import tensorflow as tf\n'), ((7372, 7387), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (7379, 7387), True, 'import tensorflow as tf\n'), ((7788, 7803), 'tensorflow.ones', 'tf.ones', (['(1, 1)'], {}), '((1, 1))\n', (7795, 7803), True, 'import tensorflow as tf\n'), ((2795, 2846), 'nncf.tensorflow.quantization.quantizers.Quantizer._min_adj', 'Quantizer._min_adj', (['(7)', 'low', 'range_len', 'narrow_range'], {}), '(7, low, range_len, narrow_range)\n', (2813, 2846), False, 'from nncf.tensorflow.quantization.quantizers import Quantizer\n'), ((1824, 1844), 'numpy.where', 'np.where', (['(diff > eps)'], {}), '(diff > eps)\n', (1832, 1844), True, 'import numpy as np\n'), ((5393, 5415), 'numpy.abs', 'np.abs', (['(w_int7 - new_w)'], {}), '(w_int7 - new_w)\n', (5399, 5415), True, 'import numpy as np\n'), ((7531, 7553), 'numpy.abs', 'np.abs', (['(w_int7 - new_w)'], {}), '(w_int7 - new_w)\n', (7537, 7553), True, 'import numpy as np\n')]
|
"""
Script containing various utilities related to data processing and cleaning. Includes tokenization,
text cleaning, feature extractor (token type IDs & attention masks) for BERT, and IMDBDataset.
"""
import logging
import torch
from torch.utils.data import Dataset
import os
import pickle
import re
import numpy as np
from tqdm import trange
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# Setup stopwords list & word (noun, adjective, and verb) lemmatizer
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
def clean_text(text):
"""Function to clean text using RegEx operations, removal of stopwords, and lemmatization."""
text = re.sub(r'[^\w\s]', '', text, re.UNICODE)
text = text.lower()
text = [lemmatizer.lemmatize(token) for token in text.split(' ')]
text = [lemmatizer.lemmatize(token, 'v') for token in text]
text = [word for word in text if word not in stop_words]
text = ' '.join(text)
text = text.lstrip().rstrip()
text = re.sub(' +', ' ', text)
return text
def tokenize_and_encode(text, tokenizer, apply_cleaning=False, max_tokenization_length=512,
truncation_method='head-only', split_head_density=0.5):
"""
Function to tokenize & encode a given text.
@param (str) text: a sequence of words to be tokenized in raw string format
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (bool) apply_cleaning: whether or not to perform common cleaning operations on texts;
note that enabling only makes sense if language of the task is English (default: False)
@param (int) max_tokenization_length: maximum number of positional embeddings, or the sequence
length of an example that will be fed to BERT model (default: 512)
@param (str) truncation_method: method that will be applied in case the text exceeds
@max_tokenization_length; currently implemented methods include 'head-only', 'tail-only',
and 'head+tail' (default: 'head-only')
@param (float) split_head_density: weight on head when splitting between head and tail, only
applicable if @truncation_method='head+tail' (default: 0.5)
@return (list) input_ids: the encoded integer indexes of the given text; note that
get_data_iterators() function converts this to a Tensor under the hood
"""
if apply_cleaning:
text = clean_text(text=text)
# Tokenize and encode
tokenized_text = tokenizer.tokenize(text)
input_ids = tokenizer.convert_tokens_to_ids(tokenized_text)
# Subtract 2 ([CLS] and[SEP] tokens) to get the actual text tokenization length
text_tokenization_length = max_tokenization_length - 2
# Truncate sequences with the specified approach
if len(input_ids) > text_tokenization_length:
# i) Head-Only Approach: Keep the first N tokens
if truncation_method == 'head-only':
input_ids = input_ids[:text_tokenization_length]
# ii) Tail-Only Approach: Keep the last N tokens
elif truncation_method == 'tail-only':
input_ids = input_ids[-text_tokenization_length:]
# iii) Head+Tail Approach: Keep the first F tokens and last L tokens where F + L = N
elif truncation_method == 'head+tail':
head_tokenization_length = int(text_tokenization_length * split_head_density)
tail_tokenization_length = text_tokenization_length - head_tokenization_length
input_head_ids = input_ids[:head_tokenization_length]
input_tail_ids = input_ids[-tail_tokenization_length:]
input_ids = input_head_ids + input_tail_ids
# Plug in CLS & SEP special tokens for identification of start & end points of sequences
cls_id = tokenizer.convert_tokens_to_ids('[CLS]')
sep_id = tokenizer.convert_tokens_to_ids('[SEP]')
input_ids = [cls_id] + input_ids + [sep_id]
# Pad sequences & corresponding masks and features
pad_id = tokenizer.convert_tokens_to_ids('[PAD]')
if len(input_ids) < max_tokenization_length:
padding_length = max_tokenization_length - len(input_ids)
input_ids = input_ids + ([pad_id] * padding_length)
# Check if input is in correct length
# assert len(input_ids) == max_tokenization_length
return input_ids
def get_features(input_ids, tokenizer, device):
"""
Function to get BERT-related features, and helps to build the total input representation.
@param (Tensor) input_ids: the encoded integer indexes of a batch, with shape: (B, P)
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (torch.device) device: 'cpu' or 'gpu', decides where to store the outputted tensors
@return (Tensor, Tensor) token_type_ids, attention_mask: features describe token type with
a 0 for the first sentence and a 1 for the pair sentence; enable attention on a
particular token with a 1 or disable it with a 0
"""
token_type_ids, attention_mask = [], []
# Iterate over batch
for input_ids_example in input_ids:
# Convert tensor to a 1D list
input_ids_example = input_ids_example.squeeze().tolist()
# Set example to whole input when batch size is 1
if input_ids.shape[0] == 1:
input_ids_example = input_ids.squeeze().tolist()
# Get padding information
padding_token_id = tokenizer.convert_tokens_to_ids('[PAD]')
padding_length = input_ids_example.count(padding_token_id)
text_length = len(input_ids_example) - padding_length
# Get segment IDs -> all 0s for one sentence, which is the case for sequence classification
token_type_ids_example = [0] * len(input_ids_example)
# Get input mask -> 1 for real tokens, 0 for padding tokens
attention_mask_example = ([1] * text_length) + ([0] * padding_length)
# Check if features are in correct length
assert len(token_type_ids_example) == len(input_ids_example)
assert len(attention_mask_example) == len(input_ids_example)
token_type_ids.append(token_type_ids_example)
attention_mask.append(attention_mask_example)
# Convert lists to tensors
token_type_ids = torch.tensor(data=token_type_ids, device=device)
attention_mask = torch.tensor(data=attention_mask, device=device)
return token_type_ids, attention_mask
class IMDBDataset(Dataset):
"""
IMDB Dataset for easily iterating over and performing common operations.
@param (str) input_directory: path of directory where the desired data exists
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (bool) apply_cleaning: whether or not to perform common cleaning operations on texts;
note that enabling only makes sense if language of the task is English
@param (int) max_tokenization_length: maximum number of positional embeddings, or the sequence
length of an example that will be fed to BERT model (default: 512)
@param (str) truncation_method: method that will be applied in case the text exceeds
@max_tokenization_length; currently implemented methods include 'head-only', 'tail-only',
and 'head+tail' (default: 'head-only')
@param (float) split_head_density: weight on head when splitting between head and tail, only
applicable if @truncation_method='head+tail' (default: 0.5)
@param (torch.device) device: 'cpu' or 'gpu', decides where to store the data tensors
"""
def __init__(self, input_directory, tokenizer, apply_cleaning, max_tokenization_length,
truncation_method='head-only', split_head_density=0.5, device='cpu'):
super(IMDBDataset).__init__()
self.positive_path = os.path.join(input_directory, 'pos')
self.positive_files = [f for f in os.listdir(self.positive_path)
if os.path.isfile(os.path.join(self.positive_path, f))]
self.num_positive_examples = len(self.positive_files)
self.positive_label = 1
self.negative_path = os.path.join(input_directory, 'neg')
self.negative_files = [f for f in os.listdir(self.negative_path)
if os.path.isfile(os.path.join(self.negative_path, f))]
self.num_negative_examples = len(self.negative_files)
self.negative_label = 0
self.tokenizer = tokenizer
self.apply_cleaning = apply_cleaning
self.max_tokenization_length = max_tokenization_length
self.truncation_method = truncation_method
self.split_head_density = split_head_density
self.device = device
# Pre-tokenize & encode examples
self.pre_tokenize_and_encode_examples()
def pre_tokenize_and_encode_examples(self):
"""
Function to tokenize & encode examples and save the tokenized versions to a separate folder.
This way, we won't have to perform the same tokenization and encoding ops every epoch.
"""
if not os.path.exists(os.path.join(self.positive_path, 'tokenized_and_encoded')):
os.mkdir(os.path.join(self.positive_path, 'tokenized_and_encoded'))
# Clean & tokenize positive reviews
for i in trange(len(self.positive_files), desc='Tokenizing & Encoding Positive Reviews',
leave=True):
file = self.positive_files[i]
with open(os.path.join(self.positive_path, file), mode='r', encoding='utf8') as f:
example = f.read()
example = re.sub(r'<br />', '', example)
example = example.lstrip().rstrip()
example = re.sub(' +', ' ', example)
example = tokenize_and_encode(text=example,
tokenizer=self.tokenizer,
apply_cleaning=self.apply_cleaning,
max_tokenization_length=self.max_tokenization_length,
truncation_method=self.truncation_method,
split_head_density=self.split_head_density)
with open(os.path.join(self.positive_path, 'tokenized_and_encoded', file), mode='wb') as f:
pickle.dump(obj=example, file=f)
else:
logging.warning('Tokenized positive reviews directory already exists!')
if not os.path.exists(os.path.join(self.negative_path, 'tokenized_and_encoded')):
os.mkdir(os.path.join(self.negative_path, 'tokenized_and_encoded'))
# Clean & tokenize negative reviews
for i in trange(len(self.negative_files), desc='Tokenizing & Encoding Negative Reviews',
leave=True):
file = self.negative_files[i]
with open(os.path.join(self.negative_path, file), mode='r', encoding='utf8') as f:
example = f.read()
example = re.sub(r'<br />', '', example)
example = example.lstrip().rstrip()
example = re.sub(' +', ' ', example)
example = tokenize_and_encode(text=example,
tokenizer=self.tokenizer,
apply_cleaning=self.apply_cleaning,
max_tokenization_length=self.max_tokenization_length,
truncation_method=self.truncation_method,
split_head_density=self.split_head_density)
with open(os.path.join(self.negative_path, 'tokenized_and_encoded', file), mode='wb') as f:
pickle.dump(obj=example, file=f)
else:
logging.warning('Tokenized negative reviews directory already exists!')
def __len__(self):
return len(self.positive_files) + len(self.negative_files)
def __getitem__(self, index):
if index < self.num_positive_examples:
file = self.positive_files[index]
label = torch.tensor(data=self.positive_label, dtype=torch.long).to(self.device)
with open(os.path.join(self.positive_path, 'tokenized_and_encoded', file), mode='rb') as f:
example = pickle.load(file=f)
elif index >= self.num_positive_examples:
file = self.negative_files[index-self.num_positive_examples]
label = torch.tensor(data=self.negative_label, dtype=torch.long).to(self.device)
with open(os.path.join(self.negative_path, 'tokenized_and_encoded', file), mode='rb') as f:
example = pickle.load(file=f)
else:
raise ValueError('Out of range index while accessing dataset')
return torch.from_numpy(np.array(example)).long().to(self.device), label
|
[
"pickle.dump",
"nltk.stem.WordNetLemmatizer",
"logging.warning",
"pickle.load",
"numpy.array",
"nltk.corpus.stopwords.words",
"re.sub",
"os.path.join",
"os.listdir",
"torch.tensor"
] |
[((549, 568), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (566, 568), False, 'from nltk.stem import WordNetLemmatizer\n'), ((508, 534), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (523, 534), False, 'from nltk.corpus import stopwords\n'), ((702, 743), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 'text', 're.UNICODE'], {}), "('[^\\\\w\\\\s]', '', text, re.UNICODE)\n", (708, 743), False, 'import re\n'), ((1033, 1056), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (1039, 1056), False, 'import re\n'), ((6325, 6373), 'torch.tensor', 'torch.tensor', ([], {'data': 'token_type_ids', 'device': 'device'}), '(data=token_type_ids, device=device)\n', (6337, 6373), False, 'import torch\n'), ((6395, 6443), 'torch.tensor', 'torch.tensor', ([], {'data': 'attention_mask', 'device': 'device'}), '(data=attention_mask, device=device)\n', (6407, 6443), False, 'import torch\n'), ((7888, 7924), 'os.path.join', 'os.path.join', (['input_directory', '"""pos"""'], {}), "(input_directory, 'pos')\n", (7900, 7924), False, 'import os\n'), ((8208, 8244), 'os.path.join', 'os.path.join', (['input_directory', '"""neg"""'], {}), "(input_directory, 'neg')\n", (8220, 8244), False, 'import os\n'), ((10523, 10594), 'logging.warning', 'logging.warning', (['"""Tokenized positive reviews directory already exists!"""'], {}), "('Tokenized positive reviews directory already exists!')\n", (10538, 10594), False, 'import logging\n'), ((11984, 12055), 'logging.warning', 'logging.warning', (['"""Tokenized negative reviews directory already exists!"""'], {}), "('Tokenized negative reviews directory already exists!')\n", (11999, 12055), False, 'import logging\n'), ((7967, 7997), 'os.listdir', 'os.listdir', (['self.positive_path'], {}), '(self.positive_path)\n', (7977, 7997), False, 'import os\n'), ((8287, 8317), 'os.listdir', 'os.listdir', (['self.negative_path'], {}), '(self.negative_path)\n', (8297, 8317), False, 'import os\n'), ((9165, 9222), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""'], {}), "(self.positive_path, 'tokenized_and_encoded')\n", (9177, 9222), False, 'import os\n'), ((9246, 9303), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""'], {}), "(self.positive_path, 'tokenized_and_encoded')\n", (9258, 9303), False, 'import os\n'), ((9707, 9736), 're.sub', 're.sub', (['"""<br />"""', '""""""', 'example'], {}), "('<br />', '', example)\n", (9713, 9736), False, 'import re\n'), ((9816, 9842), 're.sub', 're.sub', (['""" +"""', '""" """', 'example'], {}), "(' +', ' ', example)\n", (9822, 9842), False, 'import re\n'), ((10626, 10683), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""'], {}), "(self.negative_path, 'tokenized_and_encoded')\n", (10638, 10683), False, 'import os\n'), ((10707, 10764), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""'], {}), "(self.negative_path, 'tokenized_and_encoded')\n", (10719, 10764), False, 'import os\n'), ((11168, 11197), 're.sub', 're.sub', (['"""<br />"""', '""""""', 'example'], {}), "('<br />', '', example)\n", (11174, 11197), False, 'import re\n'), ((11277, 11303), 're.sub', 're.sub', (['""" +"""', '""" """', 'example'], {}), "(' +', ' ', example)\n", (11283, 11303), False, 'import re\n'), ((12498, 12517), 'pickle.load', 'pickle.load', ([], {'file': 'f'}), '(file=f)\n', (12509, 12517), False, 'import pickle\n'), ((8047, 8082), 'os.path.join', 'os.path.join', (['self.positive_path', 'f'], {}), '(self.positive_path, f)\n', (8059, 8082), False, 'import os\n'), ((8367, 8402), 'os.path.join', 'os.path.join', (['self.negative_path', 'f'], {}), '(self.negative_path, f)\n', (8379, 8402), False, 'import os\n'), ((10464, 10496), 'pickle.dump', 'pickle.dump', ([], {'obj': 'example', 'file': 'f'}), '(obj=example, file=f)\n', (10475, 10496), False, 'import pickle\n'), ((11925, 11957), 'pickle.dump', 'pickle.dump', ([], {'obj': 'example', 'file': 'f'}), '(obj=example, file=f)\n', (11936, 11957), False, 'import pickle\n'), ((12295, 12351), 'torch.tensor', 'torch.tensor', ([], {'data': 'self.positive_label', 'dtype': 'torch.long'}), '(data=self.positive_label, dtype=torch.long)\n', (12307, 12351), False, 'import torch\n'), ((12390, 12453), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.positive_path, 'tokenized_and_encoded', file)\n", (12402, 12453), False, 'import os\n'), ((12864, 12883), 'pickle.load', 'pickle.load', ([], {'file': 'f'}), '(file=f)\n', (12875, 12883), False, 'import pickle\n'), ((9568, 9606), 'os.path.join', 'os.path.join', (['self.positive_path', 'file'], {}), '(self.positive_path, file)\n', (9580, 9606), False, 'import os\n'), ((10362, 10425), 'os.path.join', 'os.path.join', (['self.positive_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.positive_path, 'tokenized_and_encoded', file)\n", (10374, 10425), False, 'import os\n'), ((11029, 11067), 'os.path.join', 'os.path.join', (['self.negative_path', 'file'], {}), '(self.negative_path, file)\n', (11041, 11067), False, 'import os\n'), ((11823, 11886), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.negative_path, 'tokenized_and_encoded', file)\n", (11835, 11886), False, 'import os\n'), ((12661, 12717), 'torch.tensor', 'torch.tensor', ([], {'data': 'self.negative_label', 'dtype': 'torch.long'}), '(data=self.negative_label, dtype=torch.long)\n', (12673, 12717), False, 'import torch\n'), ((12756, 12819), 'os.path.join', 'os.path.join', (['self.negative_path', '"""tokenized_and_encoded"""', 'file'], {}), "(self.negative_path, 'tokenized_and_encoded', file)\n", (12768, 12819), False, 'import os\n'), ((13006, 13023), 'numpy.array', 'np.array', (['example'], {}), '(example)\n', (13014, 13023), True, 'import numpy as np\n')]
|
# Modules
import pygame
import numpy as np
import random
from pygame.constants import KEYDOWN
import settings as s
# Initialize pygame
pygame.init()
# screen
screen = pygame.display.set_mode((s.WIDTH,s.HEIGHT))
# Title and Icon
pygame.display.set_caption('TIC TAC TOE')
icon = pygame.image.load('icon.png')
pygame.display.set_icon(icon)
screen.fill(s.BG_COLOR)
# console board
board = np.zeros((3,3))
# Functions
def drawLines(): # Drawing lines function
# horizontal lines
pygame.draw.line(screen, s.LINE_COLOR, (0,s.SQUARE_SIZE), (500,s.SQUARE_SIZE), s.LINE_WIDTH)
pygame.draw.line(screen, s.LINE_COLOR, (0, 332), (500, 332), s.LINE_WIDTH)
# vertical lines
pygame.draw.line(screen, s.LINE_COLOR, (s.SQUARE_SIZE, 0), (s.SQUARE_SIZE, 500), s.LINE_WIDTH)
pygame.draw.line(screen, s.LINE_COLOR, (332, 0), (332, 500), s.LINE_WIDTH)
def playerEquals(x, y, z):
return x!=0 and x==y and y==z
def checkDraw():
emp = 0
for row in range (s.ROWS):
for col in range (s.COLS):
if availableSquare(row, col):
emp += 1
if emp==0:
return 'Draw'
def checkWinner():
winner = None
# check for tie
winner = checkDraw()
# vertical win
for col in range (s.COLS):
if playerEquals(board[0][col], board[1][col], board[2][col]):
winner = board[0][col]
# horizontal win
for row in range (s.ROWS):
if playerEquals(board[row][0], board[row][1], board[row][2]):
winner = board[row][0]
# ascending diagonal win
if playerEquals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
# descending diagonal win
if playerEquals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
return winner
# functions for drawing winning lines
def vertical_winline(col, winner):
posX = col * s.SQUARE_SIZE + s.SQUARE_SIZE//2 # column is constant
if winner == 1:
color = s.O_COLOR
elif winner == 2:
color = s.X_COLOR
pygame.draw.line(screen, color, (posX, 15), (posX, s.HEIGHT-15), 15)
def horizontal_winline(row, winner):
posY = row * s.SQUARE_SIZE + s.SQUARE_SIZE//2 # row is constant
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, posY), (s.WIDTH-15, posY), 15)
def asc_diagonal_winline(winner):
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, s.HEIGHT-15), (s.WIDTH-15, 15), 15)
def desc_diagonal_winline(winner):
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, 15), (s.WIDTH-15, s.HEIGHT-15), 15)
# function for drawing Os and Xs
def figures():
for row in range(3):
for col in range(3):
if board[row][col] == 1:
pygame.draw.circle(screen, s.O_COLOR, ( int(col * s.SQUARE_SIZE + 83), int(row * s.SQUARE_SIZE + 83)), s.C_RADIUS, s.C_WIDTH)
elif board[row][col] == 2:
pygame.draw.line(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE ), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE), s.CROSS_WIDTH)
pygame.draw.line(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SPACE ), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), s.CROSS_WIDTH)
def markSquare(row, col, player):
board[row][col] = player
def availableSquare(row, col):
return board[row][col] == 0
def isBoardFull():
for row in range (3):
for col in range (3):
if board[row][col] == 0:
return False
return True
def restart():
screen.fill(s.BG_COLOR)
drawLines()
player = 1
for row in range (s.ROWS):
for col in range (s.COLS):
board[row][col] = 0
def render():
x = checkWinner()
if x != None and x != 'Draw':
# vertical win
for col in range (s.COLS):
if playerEquals(board[0][col], board[1][col], board[2][col]):
winner = board[0][col]
vertical_winline(col, winner)
# horizontal win
for row in range (s.ROWS):
if playerEquals(board[row][0], board[row][1], board[row][2]):
winner = board[row][0]
horizontal_winline(row, winner)
# ascending diagonal win
if playerEquals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
asc_diagonal_winline(winner)
# descending diagonal win
if playerEquals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
desc_diagonal_winline(winner)
display(x)
def display(x):
if x == 1:
text = "O WINS!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250, 'GREEN')
elif x == 2:
text = "X WINS!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250)
elif x == 'Draw':
text = "DRAW!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250)
def drawTexttoScreen (screen, text, x, y, color = (250, 0, 0)):
font = pygame.font.SysFont('chalkduster.ttf', 30)
textSurface = font.render(text, True, color)
textRect = textSurface.get_rect()
textRect.centerx = x
textRect.centery = y
screen.blit(textSurface, textRect)
def playerMove(row, col, player):
markSquare(row, col, player)
return
def compMove():
bestScore = float('-inf')
new_r = new_c = None
for row in range(s.ROWS):
for col in range(s.COLS):
if availableSquare(row, col):
markSquare(row, col, 1)
score = minimax(0, float('-inf'), float('inf'), False)
markSquare(row, col, 0)
if score > bestScore:
bestScore = score
new_r, new_c = row, col
markSquare(new_r, new_c, 1)
return
# Minimax function
def minimax(depth, alpha, beta, is_maximizing):
winner = checkWinner()
if winner != None:
return s.score[winner]
if is_maximizing:
bestScore = float('-inf')
for row in range(s.ROWS):
for col in range(s.COLS):
if availableSquare(row, col):
markSquare(row, col, 1)
score = minimax(depth + 1, alpha, beta, False)
markSquare(row, col, 0)
bestScore = max(score, bestScore)
alpha = max(alpha, bestScore) # pruning
if beta <= alpha:
return bestScore
return bestScore
else:
bestScore = float('inf')
for row in range(3):
for col in range(3):
if availableSquare(row, col):
markSquare(row, col, 2)
score = minimax(depth + 1, alpha, beta, True)
markSquare(row, col, 0)
bestScore = min(score, bestScore)
beta = min(beta, bestScore) # pruning
if beta <= alpha:
return bestScore
return bestScore
drawLines()
player = random.choice(s.p) # initializing player
gameOver = False
# game loop
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# for comp move
if player == 1 and not gameOver:
compMove()
winner = checkWinner()
if winner != None:
gameOver = True
player = 2
figures()
render()
if event.type == pygame.MOUSEBUTTONDOWN and not gameOver:
mouseX = event.pos[0] # x coordinate
mouseY = event.pos[1] # y coordinate
clicked_row = int(mouseY // s.SQUARE_SIZE)
clicked_col = int(mouseX // s.SQUARE_SIZE)
# for player move
if availableSquare (clicked_row, clicked_col):
if player == 2:
playerMove(clicked_row, clicked_col, 2)
winner = checkWinner()
if winner != None:
gameOver = True
player = 1
figures()
render()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
gameOver = False # changing gameOver to False for the next game
pygame.display.update()
|
[
"pygame.draw.line",
"pygame.display.set_icon",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.zeros",
"random.choice",
"pygame.init",
"pygame.display.update",
"pygame.image.load",
"pygame.display.set_caption"
] |
[((136, 149), 'pygame.init', 'pygame.init', ([], {}), '()\n', (147, 149), False, 'import pygame\n'), ((169, 213), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(s.WIDTH, s.HEIGHT)'], {}), '((s.WIDTH, s.HEIGHT))\n', (192, 213), False, 'import pygame\n'), ((230, 271), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""TIC TAC TOE"""'], {}), "('TIC TAC TOE')\n", (256, 271), False, 'import pygame\n'), ((279, 308), 'pygame.image.load', 'pygame.image.load', (['"""icon.png"""'], {}), "('icon.png')\n", (296, 308), False, 'import pygame\n'), ((309, 338), 'pygame.display.set_icon', 'pygame.display.set_icon', (['icon'], {}), '(icon)\n', (332, 338), False, 'import pygame\n'), ((389, 405), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (397, 405), True, 'import numpy as np\n'), ((7387, 7405), 'random.choice', 'random.choice', (['s.p'], {}), '(s.p)\n', (7400, 7405), False, 'import random\n'), ((489, 588), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(0, s.SQUARE_SIZE)', '(500, s.SQUARE_SIZE)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (0, s.SQUARE_SIZE), (500, s.\n SQUARE_SIZE), s.LINE_WIDTH)\n', (505, 588), False, 'import pygame\n'), ((586, 660), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(0, 332)', '(500, 332)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (0, 332), (500, 332), s.LINE_WIDTH)\n', (602, 660), False, 'import pygame\n'), ((686, 785), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(s.SQUARE_SIZE, 0)', '(s.SQUARE_SIZE, 500)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (s.SQUARE_SIZE, 0), (s.SQUARE_SIZE, \n 500), s.LINE_WIDTH)\n', (702, 785), False, 'import pygame\n'), ((785, 859), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.LINE_COLOR', '(332, 0)', '(332, 500)', 's.LINE_WIDTH'], {}), '(screen, s.LINE_COLOR, (332, 0), (332, 500), s.LINE_WIDTH)\n', (801, 859), False, 'import pygame\n'), ((2026, 2096), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(posX, 15)', '(posX, s.HEIGHT - 15)', '(15)'], {}), '(screen, color, (posX, 15), (posX, s.HEIGHT - 15), 15)\n', (2042, 2096), False, 'import pygame\n'), ((2289, 2358), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(15, posY)', '(s.WIDTH - 15, posY)', '(15)'], {}), '(screen, color, (15, posY), (s.WIDTH - 15, posY), 15)\n', (2305, 2358), False, 'import pygame\n'), ((2479, 2555), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(15, s.HEIGHT - 15)', '(s.WIDTH - 15, 15)', '(15)'], {}), '(screen, color, (15, s.HEIGHT - 15), (s.WIDTH - 15, 15), 15)\n', (2495, 2555), False, 'import pygame\n'), ((2677, 2753), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(15, 15)', '(s.WIDTH - 15, s.HEIGHT - 15)', '(15)'], {}), '(screen, color, (15, 15), (s.WIDTH - 15, s.HEIGHT - 15), 15)\n', (2693, 2753), False, 'import pygame\n'), ((5331, 5373), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""chalkduster.ttf"""', '(30)'], {}), "('chalkduster.ttf', 30)\n", (5350, 5373), False, 'import pygame\n'), ((7497, 7515), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7513, 7515), False, 'import pygame\n'), ((8717, 8740), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8738, 8740), False, 'import pygame\n'), ((3089, 3309), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.X_COLOR', '(col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE)', '(col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE)', 's.CROSS_WIDTH'], {}), '(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s\n .SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), (col * s.SQUARE_SIZE + s.\n SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE), s.CROSS_WIDTH)\n', (3105, 3309), False, 'import pygame\n'), ((3317, 3536), 'pygame.draw.line', 'pygame.draw.line', (['screen', 's.X_COLOR', '(col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SPACE)', '(col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.\n SQUARE_SIZE - s.SPACE)', 's.CROSS_WIDTH'], {}), '(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s\n .SQUARE_SIZE + s.SPACE), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE,\n row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), s.CROSS_WIDTH)\n', (3333, 3536), False, 'import pygame\n')]
|
"""
A class for converting ``discretize`` meshes to OMF objects
"""
import omf
import numpy as np
import discretize
def ravel_data_array(arr, nx, ny, nz):
"""Ravel's a numpy array into proper order for passing to the OMF
specification from ``discretize``/UBC formats
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="C").ravel(order="F")
def unravel_data_array(arr, nx, ny, nz):
"""Unravel's a numpy array from the OMF specification to
``discretize``/UBC formats - the is the inverse of ``ravel_data_array``
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="F").ravel(order="C")
class InterfaceOMF(object):
def _tensor_mesh_to_omf(mesh, models=None):
"""
Constructs an :class:`omf.VolumeElement` object of this tensor mesh and
the given models as cell data of that grid.
Parameters
----------
mesh : discretize.TensorMesh
The tensor mesh to convert to a :class:`omf.VolumeElement`
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
if models is None:
models = {}
# Make the geometry
geometry = omf.VolumeGridGeometry()
# Set tensors
tensors = mesh.h
if len(tensors) < 1:
raise RuntimeError(
"Your mesh is empty... fill it out before converting to OMF"
)
elif len(tensors) == 1:
geometry.tensor_u = tensors[0]
geometry.tensor_v = np.array(
[
0.0,
]
)
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 2:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 3:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = tensors[2]
else:
raise RuntimeError("This mesh is too high-dimensional for OMF")
# Set rotation axes
geometry.axis_u = mesh.axis_u
geometry.axis_v = mesh.axis_v
geometry.axis_w = mesh.axis_w
# Set the origin
geometry.origin = mesh.origin
# Make sure the geometry is built correctly
geometry.validate()
# Make the volume elemet (the OMF object)
omfmesh = omf.VolumeElement(
geometry=geometry,
)
# Add model data arrays onto the cells of the mesh
omfmesh.data = []
for name, arr in models.items():
data = omf.ScalarData(
name=name,
array=ravel_data_array(arr, *mesh.shape_cells),
location="cells",
)
omfmesh.data.append(data)
# Validate to make sure a proper OMF object is returned to the user
omfmesh.validate()
return omfmesh
def _tree_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not possible until OMF v2 is released.")
def _curvilinear_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def _cyl_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def to_omf(mesh, models=None):
"""Convert this mesh object to it's proper ``omf`` data object with
the given model dictionary as the cell data of that dataset.
Parameters
----------
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
# TODO: mesh.validate()
converters = {
# TODO: 'tree' : InterfaceOMF._tree_mesh_to_omf,
"tensor": InterfaceOMF._tensor_mesh_to_omf,
# TODO: 'curv' : InterfaceOMF._curvilinear_mesh_to_omf,
# TODO: 'CylindricalMesh' : InterfaceOMF._cyl_mesh_to_omf,
}
key = mesh._meshType.lower()
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"Mesh type `{}` is not currently supported for OMF conversion.".format(
key
)
)
# Convert the data object
return convert(mesh, models=models)
@staticmethod
def _omf_volume_to_tensor(element):
"""Convert an :class:`omf.VolumeElement` to :class:`discretize.TensorMesh`"""
geometry = element.geometry
h = [geometry.tensor_u, geometry.tensor_v, geometry.tensor_w]
mesh = discretize.TensorMesh(h)
mesh.axis_u = geometry.axis_u
mesh.axis_v = geometry.axis_v
mesh.axis_w = geometry.axis_w
mesh.origin = geometry.origin
data_dict = {}
for data in element.data:
# NOTE: this is agnostic about data location - i.e. nodes vs cells
data_dict[data.name] = unravel_data_array(
np.array(data.array), *mesh.shape_cells
)
# Return TensorMesh and data dictionary
return mesh, data_dict
@staticmethod
def from_omf(element):
"""Convert an OMF element to it's proper ``discretize`` type.
Automatically determines the output type. Returns both the mesh and a
dictionary of model arrays.
"""
element.validate()
converters = {
omf.VolumeElement.__name__: InterfaceOMF._omf_volume_to_tensor,
}
key = element.__class__.__name__
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"OMF type `{}` is not currently supported for conversion.".format(key)
)
# Convert the data object
return convert(element)
|
[
"discretize.TensorMesh",
"omf.VolumeElement",
"omf.VolumeGridGeometry",
"numpy.array",
"numpy.reshape"
] |
[((1217, 1241), 'omf.VolumeGridGeometry', 'omf.VolumeGridGeometry', ([], {}), '()\n', (1239, 1241), False, 'import omf\n'), ((2589, 2625), 'omf.VolumeElement', 'omf.VolumeElement', ([], {'geometry': 'geometry'}), '(geometry=geometry)\n', (2606, 2625), False, 'import omf\n'), ((4741, 4765), 'discretize.TensorMesh', 'discretize.TensorMesh', (['h'], {}), '(h)\n', (4762, 4765), False, 'import discretize\n'), ((320, 351), 'numpy.reshape', 'np.reshape', (['arr', 'dim'], {'order': '"""C"""'}), "(arr, dim, order='C')\n", (330, 351), True, 'import numpy as np\n'), ((591, 622), 'numpy.reshape', 'np.reshape', (['arr', 'dim'], {'order': '"""F"""'}), "(arr, dim, order='F')\n", (601, 622), True, 'import numpy as np\n'), ((1548, 1563), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1556, 1563), True, 'import numpy as np\n'), ((1665, 1680), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1673, 1680), True, 'import numpy as np\n'), ((5126, 5146), 'numpy.array', 'np.array', (['data.array'], {}), '(data.array)\n', (5134, 5146), True, 'import numpy as np\n'), ((1900, 1915), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1908, 1915), True, 'import numpy as np\n')]
|
import sys
import numpy as np
def tvDenoising1D(data, lamb):
"""
This function implements a 1-D Total Variation denoising according to <NAME>. (2013) "A direct algorithm for 1-D total variation denoising."
See also: `<NAME>. (2013). A direct algorithm for 1-D total variation denoising. IEEE Signal Processing Letters, 20(11), 1054–1057. doi:10.1109/LSP.2013.2278339 <http://dx.doi.org/10.1109/LSP.2013.2278339>`_
Parameters
----------
data : array
Data to be fit
lamb : float
.. note::
**lamb** must be nonnegative. **lamb = 0** will result in **output = data**.
Returns
-------
fitData: `array`
Examples
--------
>>> import pylab as pl
>>> data = 'testdata.txt'
>>> X = pl.loadtxt(data);
>>> x = X[:,0];
>>> data = X[:,7];
>>>
>>> denoised = tvDenoising1D(data, lamb=200)
>>>
>>> pl.plot(x, data, 'b')
>>> pl.hold(True)
>>> pl.plot(x, denoised, 'r--')
>>> pl.show()
"""
N = len(data)
k = k0 = k_ = kp = 0
vmin = data[0]-lamb
vmax = data[0]+lamb
umin = lamb
umax = -lamb
x = np.zeros(len(data))
while True:
# 2:
if(k == N):
return np.array([vmin+umin])
# Break condition to avoid overflow...
if k+1 >= N:
break
# 3:
if(data[k+1]+umin < vmin-lamb):
for i in range(k0, k_+1):
x[i] = vmin
x[k0] = x[k_] = vmin
k = k0 = k_ = kp = k_+1
vmin = data[k]
vmax = data[k]+(2*lamb)
umin = lamb
umax = -lamb
# 4:
elif(data[k+1]+umax > vmax+lamb):
for i in range(k0, kp+1):
x[i] = vmax
x[k0] = x[k_] = x[kp] = vmax
k = k0 = k_ = kp = kp+1
vmin = data[k]-(2*lamb)
vmax = data[k]
umin = lamb
umax = -lamb
# 5:
else:
k = k+1
umin = umin +data[k] - vmin
umax = umax + data[k] - vmax
# 6:
if(umin >= lamb):
vmin = vmin + ((umin -lamb)/(k-k0+1))
umin = lamb
k_ = k
if(umax <= -lamb):
vmax = vmax+((umax + lamb)/(k-k0+1))
umax = -lamb
kp = k
# 7:
if k < N:
continue
# 8:
if(umin < 0):
for i in range(k0, k_+1):
x[i] = vmin
k = k0 = k_ = k_ + 1
vmin = data[k]
umin = lamb
umax = data[k] + lamb - vmax
continue
# 9:
elif(umax > 0):
for i in range(k0, kp+1):
x[i] = vmax
k = k0 = kp = kp+1
vmax = data[k]
umax = -lamb
umin = data[k]-lamb-vmin
continue
else:
for i in range(k0, N):
x[i] = vmin+(umin/(k-k0+1))
break
return x
def fitGauss(xarray, yarray):
"""
This function mix a Linear Model with a Gaussian Model (LMFit).
See also: `Lmfit Documentation <http://cars9.uchicago.edu/software/python/lmfit/>`_
Parameters
----------
xarray : array
X data
yarray : array
Y data
Returns
-------
peak value: `float`
peak position: `float`
min value: `float`
min position: `float`
fwhm: `float`
fwhm positon: `float`
center of mass: `float`
fit_Y: `array`
fit_result: `ModelFit`
Examples
--------
>>> import pylab as pl
>>> data = 'testdata.txt'
>>> X = pl.loadtxt(data);
>>> x = X[:,0];
>>> y = X[:,7];
>>>
>>> pkv, pkp, minv, minp, fwhm, fwhmp, com = fitGauss(x, y)
>>> print("Peak ", pkv, " at ", pkp)
>>> print("Min ", minv, " at ", minp)
>>> print("Fwhm ", fwhm, " at ", fwhmp)
>>> print("COM = ", com)
>>>
"""
from lmfit.models import GaussianModel, LinearModel
y = yarray
x = xarray
gaussMod = GaussianModel()
linMod = LinearModel()
pars = linMod.make_params(intercept=y.min(), slope=0)
pars += linMod.guess(y, x=x)
pars += gaussMod.guess(y, x=x)
mod = gaussMod + linMod
fwhm = 0
fwhm_position = 0
try:
result = mod.fit(y, pars, x=x)
fwhm = result.values['fwhm']
fwhm_position = result.values['center']
except:
result = None
peak_position = xarray[np.argmax(y)]
peak = np.max(y)
minv_position = x[np.argmin(y)]
minv = np.min(y)
COM = (np.multiply(x,y).sum())/y.sum()
return (peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM, result)
if __name__ == '__main__':
import pylab as pl
#file = '/home/ABTLUS/hugo.slepicka/devfiles/workspacePython/FIT_Test/teste'
file = "/home/ABTLUS/hugo.slepicka/SVN/Py4Syn/trunk/lab6_summed.dat"
X = np.loadtxt(file);
x = X[:,0];
y = X[:,1];
#x = np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
#y = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
#peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM, result = fitGauss(x, y)
#print("COM = ", result)
data = y
denoised = tvDenoising1D(data, lamb=200)
pl.plot(x, data, 'b')
pl.hold(True)
pl.plot(x, denoised, 'r--')
pl.show()
|
[
"pylab.hold",
"lmfit.models.LinearModel",
"pylab.show",
"numpy.multiply",
"numpy.argmax",
"numpy.argmin",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.loadtxt",
"lmfit.models.GaussianModel",
"pylab.plot"
] |
[((4077, 4092), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {}), '()\n', (4090, 4092), False, 'from lmfit.models import GaussianModel, LinearModel\n'), ((4106, 4119), 'lmfit.models.LinearModel', 'LinearModel', ([], {}), '()\n', (4117, 4119), False, 'from lmfit.models import GaussianModel, LinearModel\n'), ((4535, 4544), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4541, 4544), True, 'import numpy as np\n'), ((4593, 4602), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (4599, 4602), True, 'import numpy as np\n'), ((4950, 4966), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (4960, 4966), True, 'import numpy as np\n'), ((5310, 5331), 'pylab.plot', 'pl.plot', (['x', 'data', '"""b"""'], {}), "(x, data, 'b')\n", (5317, 5331), True, 'import pylab as pl\n'), ((5336, 5349), 'pylab.hold', 'pl.hold', (['(True)'], {}), '(True)\n', (5343, 5349), True, 'import pylab as pl\n'), ((5354, 5381), 'pylab.plot', 'pl.plot', (['x', 'denoised', '"""r--"""'], {}), "(x, denoised, 'r--')\n", (5361, 5381), True, 'import pylab as pl\n'), ((5386, 5395), 'pylab.show', 'pl.show', ([], {}), '()\n', (5393, 5395), True, 'import pylab as pl\n'), ((4510, 4522), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (4519, 4522), True, 'import numpy as np\n'), ((4568, 4580), 'numpy.argmin', 'np.argmin', (['y'], {}), '(y)\n', (4577, 4580), True, 'import numpy as np\n'), ((1231, 1254), 'numpy.array', 'np.array', (['[vmin + umin]'], {}), '([vmin + umin])\n', (1239, 1254), True, 'import numpy as np\n'), ((4615, 4632), 'numpy.multiply', 'np.multiply', (['x', 'y'], {}), '(x, y)\n', (4626, 4632), True, 'import numpy as np\n')]
|
"""Functions and utilities used to format the databases."""
import numpy as np
import jax.numpy as jnp
from scipy.integrate import quadrature
import tools21cm as t2c
def apply_uv_coverage(Box_uv, uv_bool):
"""Apply UV coverage to the data.
Args:
Box_uv: data box in Fourier space
uv_bool: mask of measured baselines
Returns:
Box_uv
"""
Box_uv = Box_uv * uv_bool
return Box_uv
def compute_uv_coverage(redshifts, ncells=200, boxsize=300):
"""Computing UV coverage box for SKA antenna configuration.
Args:
redshifts: list of redshifts for which the UV coverage is computed.
ncells: lsize of a grid in UV space (in pixels)
boxsize: size of the simulation (in Mpc)
Returns:
uv: UV coverage box
"""
uv = np.empty((ncells, ncells, len(redshifts)))
for i in range(len(redshifts)):
print(i, end=" ")
uv[..., i], _ = t2c.noise_model.get_uv_map(
ncells=200, z=redshifts[i], boxsize=300
)
return uv
def noise(seed, redshifts, uv, ncells=200, boxsize=300.0, obs_time=1000, N_ant=512):
"""Computing telescope thermal noise.
Args:
seed: noise seed
redshifts: list of redshifts for each slice of UV
uv: UV coveragebox
ncells: size of a box in real/UV space (in pixels)
boxsize: size of the simulation (in Mpc)
obs_time: total observation time (in hours)
N_ant: number of antennas in the configuration
Returns:
finalBox: noise in UV space
"""
redshifts = np.append(
redshifts, 2 * redshifts[-1] - redshifts[-2]
) # appending the last difference
finalBox = np.empty(uv.shape, dtype=np.complex64)
for i in range(uv.shape[-1]):
depth_mhz = t2c.cosmology.z_to_nu(redshifts[i]) - t2c.cosmology.z_to_nu(
redshifts[i + 1]
)
noise = t2c.noise_model.noise_map(
ncells=ncells,
z=redshifts[i],
depth_mhz=depth_mhz,
obs_time=obs_time,
boxsize=boxsize,
uv_map=uv[..., i],
N_ant=N_ant,
seed=10000 * seed + i,
)
noise = t2c.telescope_functions.jansky_2_kelvin(
noise, redshifts[i], boxsize=boxsize
).astype(np.complex64)
finalBox[..., i] = noise
return finalBox
def wedge_removal(
OMm,
redshifts,
HII_DIM,
cell_size,
Box_uv,
chunk_length=501,
blackman=True,
):
"""Computing horizon wedge removal. Implements "sliding" procedure
of removing the wedge for every redshift separately.
Args:
OMm: Omega matter
redshifts: list of redshifts in a lightcone
HII_DIM: size of the HII simulation box (see `21cmFASTv3`)
cell_size: size of a cell in Mpc
Box_uv: box in UV space on which wedge removal is to be computed
chunk_length: length of a sliding chunk (in number of z-slices)
blackman: either to use Blackman-Harris taper or not
Returns:
Box_final: wedge-removed box in real space
"""
def one_over_E(z, OMm):
return 1 / np.sqrt(OMm * (1.0 + z) ** 3 + (1 - OMm))
def multiplicative_factor(z, OMm):
return (
1
/ one_over_E(z, OMm)
/ (1 + z)
* quadrature(lambda x: one_over_E(x, OMm), 0, z)[0]
)
MF = jnp.array([multiplicative_factor(z, OMm) for z in redshifts]).astype(
np.float32
)
redshifts = jnp.array(redshifts).astype(np.float32)
k = jnp.fft.fftfreq(HII_DIM, d=cell_size)
k_parallel = jnp.fft.fftfreq(chunk_length, d=cell_size)
delta_k = k_parallel[1] - k_parallel[0]
k_cube = jnp.meshgrid(k, k, k_parallel)
bm = jnp.abs(jnp.fft.fft(jnp.blackman(chunk_length))) ** 2
buffer = delta_k * (jnp.where(bm / jnp.amax(bm) <= 1e-10)[0][0] - 1)
BM = jnp.blackman(chunk_length)[jnp.newaxis, jnp.newaxis, :]
box_shape = Box_uv.shape
Box_final = np.empty(box_shape, dtype=np.float32)
empty_box = jnp.zeros(k_cube[0].shape)
Box_uv = jnp.concatenate(
(empty_box, jnp.array(Box_uv, dtype=jnp.float32), empty_box), axis=2
)
for i in range(chunk_length, box_shape[-1] + chunk_length):
t_box = Box_uv[..., i - chunk_length // 2 : i + chunk_length // 2 + 1]
W = k_cube[2] / (
jnp.sqrt(k_cube[0] ** 2 + k_cube[1] ** 2)
* MF[min(i - chunk_length // 2 - 1, box_shape[-1] - 1)]
+ buffer
)
w = jnp.logical_or(W < -1.0, W > 1.0)
# w = cp.array(W[i + chunk_length - 1])
if blackman == True:
t_box = t_box * BM
Box_final[..., i - chunk_length] = jnp.real(
jnp.fft.ifftn(jnp.fft.fft(t_box, axis=-1) * w)
)[
..., chunk_length // 2
] # taking only middle slice in redshift
return Box_final.astype(np.float32)
def BoxCar3D(data, filter=(4, 4, 4)):
"""Computing BoxCar filter on the input data.
Args:
data: data to filter
filter: filter shape
Returns:
filtered data
"""
if len(data.shape) != 3:
raise AttributeError("data has to be 3D")
if len(filter) != 3:
raise AttributeError("filter has to be 3D")
s = data.shape
Nx, Ny, Nz = filter
return jnp.einsum(
"ijklmn->ikm",
data[: s[0] // Nx * Nx, : s[1] // Ny * Ny, : s[2] // Nz * Nz].reshape(
(s[0] // Nx, Nx, s[1] // Ny, Ny, s[2] // Nz, Nz)
),
) / (Nx * Ny * Nz)
|
[
"jax.numpy.array",
"jax.numpy.amax",
"jax.numpy.logical_or",
"jax.numpy.fft.fft",
"tools21cm.noise_model.noise_map",
"numpy.empty",
"tools21cm.telescope_functions.jansky_2_kelvin",
"jax.numpy.fft.fftfreq",
"tools21cm.noise_model.get_uv_map",
"numpy.append",
"tools21cm.cosmology.z_to_nu",
"jax.numpy.blackman",
"jax.numpy.zeros",
"jax.numpy.meshgrid",
"jax.numpy.sqrt",
"numpy.sqrt"
] |
[((1580, 1635), 'numpy.append', 'np.append', (['redshifts', '(2 * redshifts[-1] - redshifts[-2])'], {}), '(redshifts, 2 * redshifts[-1] - redshifts[-2])\n', (1589, 1635), True, 'import numpy as np\n'), ((1698, 1736), 'numpy.empty', 'np.empty', (['uv.shape'], {'dtype': 'np.complex64'}), '(uv.shape, dtype=np.complex64)\n', (1706, 1736), True, 'import numpy as np\n'), ((3566, 3603), 'jax.numpy.fft.fftfreq', 'jnp.fft.fftfreq', (['HII_DIM'], {'d': 'cell_size'}), '(HII_DIM, d=cell_size)\n', (3581, 3603), True, 'import jax.numpy as jnp\n'), ((3621, 3663), 'jax.numpy.fft.fftfreq', 'jnp.fft.fftfreq', (['chunk_length'], {'d': 'cell_size'}), '(chunk_length, d=cell_size)\n', (3636, 3663), True, 'import jax.numpy as jnp\n'), ((3721, 3751), 'jax.numpy.meshgrid', 'jnp.meshgrid', (['k', 'k', 'k_parallel'], {}), '(k, k, k_parallel)\n', (3733, 3751), True, 'import jax.numpy as jnp\n'), ((4000, 4037), 'numpy.empty', 'np.empty', (['box_shape'], {'dtype': 'np.float32'}), '(box_shape, dtype=np.float32)\n', (4008, 4037), True, 'import numpy as np\n'), ((4054, 4080), 'jax.numpy.zeros', 'jnp.zeros', (['k_cube[0].shape'], {}), '(k_cube[0].shape)\n', (4063, 4080), True, 'import jax.numpy as jnp\n'), ((936, 1003), 'tools21cm.noise_model.get_uv_map', 't2c.noise_model.get_uv_map', ([], {'ncells': '(200)', 'z': 'redshifts[i]', 'boxsize': '(300)'}), '(ncells=200, z=redshifts[i], boxsize=300)\n', (962, 1003), True, 'import tools21cm as t2c\n'), ((1907, 2085), 'tools21cm.noise_model.noise_map', 't2c.noise_model.noise_map', ([], {'ncells': 'ncells', 'z': 'redshifts[i]', 'depth_mhz': 'depth_mhz', 'obs_time': 'obs_time', 'boxsize': 'boxsize', 'uv_map': 'uv[..., i]', 'N_ant': 'N_ant', 'seed': '(10000 * seed + i)'}), '(ncells=ncells, z=redshifts[i], depth_mhz=\n depth_mhz, obs_time=obs_time, boxsize=boxsize, uv_map=uv[..., i], N_ant\n =N_ant, seed=10000 * seed + i)\n', (1932, 2085), True, 'import tools21cm as t2c\n'), ((3898, 3924), 'jax.numpy.blackman', 'jnp.blackman', (['chunk_length'], {}), '(chunk_length)\n', (3910, 3924), True, 'import jax.numpy as jnp\n'), ((4529, 4562), 'jax.numpy.logical_or', 'jnp.logical_or', (['(W < -1.0)', '(W > 1.0)'], {}), '(W < -1.0, W > 1.0)\n', (4543, 4562), True, 'import jax.numpy as jnp\n'), ((1791, 1826), 'tools21cm.cosmology.z_to_nu', 't2c.cosmology.z_to_nu', (['redshifts[i]'], {}), '(redshifts[i])\n', (1812, 1826), True, 'import tools21cm as t2c\n'), ((1829, 1868), 'tools21cm.cosmology.z_to_nu', 't2c.cosmology.z_to_nu', (['redshifts[i + 1]'], {}), '(redshifts[i + 1])\n', (1850, 1868), True, 'import tools21cm as t2c\n'), ((3154, 3195), 'numpy.sqrt', 'np.sqrt', (['(OMm * (1.0 + z) ** 3 + (1 - OMm))'], {}), '(OMm * (1.0 + z) ** 3 + (1 - OMm))\n', (3161, 3195), True, 'import numpy as np\n'), ((3517, 3537), 'jax.numpy.array', 'jnp.array', (['redshifts'], {}), '(redshifts)\n', (3526, 3537), True, 'import jax.numpy as jnp\n'), ((4131, 4167), 'jax.numpy.array', 'jnp.array', (['Box_uv'], {'dtype': 'jnp.float32'}), '(Box_uv, dtype=jnp.float32)\n', (4140, 4167), True, 'import jax.numpy as jnp\n'), ((2199, 2276), 'tools21cm.telescope_functions.jansky_2_kelvin', 't2c.telescope_functions.jansky_2_kelvin', (['noise', 'redshifts[i]'], {'boxsize': 'boxsize'}), '(noise, redshifts[i], boxsize=boxsize)\n', (2238, 2276), True, 'import tools21cm as t2c\n'), ((3782, 3808), 'jax.numpy.blackman', 'jnp.blackman', (['chunk_length'], {}), '(chunk_length)\n', (3794, 3808), True, 'import jax.numpy as jnp\n'), ((4376, 4417), 'jax.numpy.sqrt', 'jnp.sqrt', (['(k_cube[0] ** 2 + k_cube[1] ** 2)'], {}), '(k_cube[0] ** 2 + k_cube[1] ** 2)\n', (4384, 4417), True, 'import jax.numpy as jnp\n'), ((4750, 4777), 'jax.numpy.fft.fft', 'jnp.fft.fft', (['t_box'], {'axis': '(-1)'}), '(t_box, axis=-1)\n', (4761, 4777), True, 'import jax.numpy as jnp\n'), ((3855, 3867), 'jax.numpy.amax', 'jnp.amax', (['bm'], {}), '(bm)\n', (3863, 3867), True, 'import jax.numpy as jnp\n')]
|
# %% [markdown]
# # THE MIND OF A MAGGOT
# %% [markdown]
# ## Imports
import os
import time
import warnings
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, NodeMixin
from mpl_toolkits.mplot3d import Axes3D
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import adjusted_rand_score
from sklearn.utils.testing import ignore_warnings
from tqdm import tqdm
import pymaid
from graspy.cluster import GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import augment_diagonal, binarize, pass_to_ranks
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.pymaid import start_instance
from src.traverse import Cascade, TraverseDispatcher, to_transmission_matrix
from src.visualization import (
CLASS_COLOR_DICT,
adjplot,
barplot_text,
gridmap,
matrixplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name)
mg = load_metagraph("G", version="2020-04-01")
mg = preprocess(
mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
# plot where we are cutting out nodes based on degree
degrees = mg.calculate_degrees()
fig, ax = plt.subplots(1, 1, figsize=(5, 2.5))
sns.distplot(np.log10(degrees["Total edgesum"]), ax=ax)
q = np.quantile(degrees["Total edgesum"], 0.05)
ax.axvline(np.log10(q), linestyle="--", color="r")
ax.set_xlabel("log10(total synapses)")
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > q].index
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["inds"] = range(len(meta))
adj = mg.adj
# %% [markdown]
# ## Setup for paths
out_groups = [
("dVNC", "dVNC;CN", "dVNC;RG", "dSEZ;dVNC"),
("dSEZ", "dSEZ;CN", "dSEZ;LHN", "dSEZ;dVNC"),
("motor-PaN", "motor-MN", "motor-VAN", "motor-AN"),
("RG", "RG-IPC", "RG-ITP", "RG-CA-LP", "dVNC;RG"),
("dUnk",),
]
out_group_names = ["VNC", "SEZ" "motor", "RG", "dUnk"]
source_groups = [
("sens-ORN",),
("sens-MN",),
("sens-photoRh5", "sens-photoRh6"),
("sens-thermo",),
("sens-vtd",),
("sens-AN",),
]
source_group_names = ["Odor", "MN", "Photo", "Temp", "VTD", "AN"]
class_key = "merge_class"
sg = list(chain.from_iterable(source_groups))
og = list(chain.from_iterable(out_groups))
sg_name = "All"
og_name = "All"
from src.traverse import to_markov_matrix
np.random.seed(888)
max_hops = 10
n_init = 100
p = 0.05
traverse = Cascade
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
transition_probs = to_markov_matrix(adj)
source_inds = meta[meta[class_key].isin(sg)]["inds"].values
out_inds = meta[meta[class_key].isin(og)]["inds"].values
# %% [markdown]
# ## Run paths
from src.traverse import RandomWalk
n_init = 1000
paths = []
path_lens = []
for s in source_inds:
rw = RandomWalk(
transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=False
)
for n in range(n_init):
rw.start(s)
paths.append(rw.traversal_)
path_lens.append(len(rw.traversal_))
# %% [markdown]
# ## Look at distribution of path lengths
for p in paths:
path_lens.append(len(p))
sns.distplot(path_lens)
paths_by_len = {i: [] for i in range(1, max_hops + 1)}
for p in paths:
paths_by_len[len(p)].append(p)
# %% [markdown]
# ## Embed for a dissimilarity measure
from src.cluster import get_paired_inds
embedder = AdjacencySpectralEmbed(n_components=None, n_elbows=2)
embed = embedder.fit_transform(pass_to_ranks(adj))
embed = np.concatenate(embed, axis=-1)
lp_inds, rp_inds = get_paired_inds(meta)
R, _, = orthogonal_procrustes(embed[lp_inds], embed[rp_inds])
left_inds = meta[meta["left"]]["inds"]
right_inds = meta[meta["right"]]["inds"]
embed[left_inds] = embed[left_inds] @ R
from sklearn.metrics import pairwise_distances
pdist = pairwise_distances(embed, metric="cosine")
# %% [markdown]
# ##
subsample = 2 ** 11
paths = paths_by_len[6]
new_paths = []
for p in paths:
if p[-1] in out_inds:
new_paths.append(p)
paths = new_paths
print(len(paths))
if subsample != -1:
inds = np.random.choice(len(paths), size=subsample, replace=False)
new_paths = []
for i, p in enumerate(paths):
if i in inds:
new_paths.append(p)
paths = new_paths
print(len(paths))
# %% [markdown]
# ##
path_len = len(paths[0])
path_dist_mat = np.zeros((len(paths), len(paths)))
for i in range(len(paths)):
for j in range(len(paths)):
p1 = paths[i]
p2 = paths[j]
dist_sum = 0
for t in range(path_len):
dist = pdist[p1[t], p2[t]]
dist_sum += dist
path_dist_mat[i, j] = dist_sum
path_indicator_mat = np.zeros((len(paths), len(adj)), dtype=int)
for i, p in enumerate(paths):
for j, visit in enumerate(p):
path_indicator_mat[i, visit] = j + 1
# %% [markdown]
# ## Cluster and look at distance mat
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
Z = linkage(squareform(path_dist_mat), method="average")
sns.clustermap(
path_dist_mat,
figsize=(20, 20),
row_linkage=Z,
col_linkage=Z,
xticklabels=False,
yticklabels=False,
)
stashfig("clustermap")
# %% [markdown]
# ##
from graspy.embed import ClassicalMDS
from src.visualization import screeplot
cmds = ClassicalMDS(dissimilarity="precomputed", n_components=10)
path_embed = cmds.fit_transform(path_dist_mat)
plt.plot(cmds.singular_values_, "o")
# %% [markdown]
# ##
from graspy.plot import pairplot
n_components = 5
pairplot(path_embed[:, :n_components], alpha=0.1)
# %% [markdown]
# ##
from graspy.cluster import AutoGMMCluster
n_components = 4
agmm = AutoGMMCluster(max_components=20, n_jobs=-2)
pred = agmm.fit_predict(path_embed[:, :n_components])
print(agmm.n_components_)
pairplot(path_embed[:, :n_components], alpha=0.1, labels=pred, palette=cc.glasbey_light)
# %% [markdown]
# ##
color_dict = dict(zip(np.unique(pred), cc.glasbey_light))
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
adjplot(
path_dist_mat,
sort_class=pred,
cmap=None,
center=None,
ax=ax,
gridline_kws=dict(linewidth=0.5, color="grey", linestyle="--"),
ticks=False,
colors=pred,
palette=color_dict,
cbar=False,
)
stashfig("adjplot-GMMoCMDSoPathDist")
# %% [markdown]
# ##
from sklearn.cluster import AgglomerativeClustering
ag = AgglomerativeClustering(n_clusters=60, affinity="precomputed", linkage="average")
pred = ag.fit_predict(path_dist_mat)
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
color_dict = dict(zip(np.unique(pred), cc.glasbey_light))
adjplot(
path_dist_mat,
sort_class=pred,
cmap=None,
center=None,
ax=ax,
gridline_kws=dict(linewidth=0.5, color="grey", linestyle="--"),
ticks=False,
colors=pred,
palette=color_dict,
)
# %% [markdown]
# ##
meta["signal_flow"] = -signal_flow(adj)
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
meta["class2"].fillna(" ", inplace=True)
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["class1", "class2"],
col_class_order="signal_flow",
col_ticks=False,
col_meta=meta,
col_colors="merge_class",
col_palette=CLASS_COLOR_DICT,
# col_ticks=False,
row_sort_class=pred,
row_ticks=False,
sizes=(1, 1),
hue="weight",
palette="tab10",
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
# %% [markdown]
# ##
from sklearn.manifold import MDS
n_components = 8
metric = True
mds = MDS(
n_components=n_components,
metric=True,
n_init=16,
n_jobs=-1,
dissimilarity="precomputed",
)
embed = mds.fit_transform(pass_to_ranks(path_dist_mat))
pairplot(embed, alpha=0.1)
# %%
name = "122.1-BDP-silly-model-testing"
load = True
loc = f"maggot_models/notebooks/outs/{name}/csvs/stash-label-meta.csv"
if load:
meta = pd.read_csv(loc, index_col=0)
for col in ["0_pred", "1_pred", "2_pred", "hemisphere"]:
# meta[col] = meta[col].fillna("")
meta[col] = meta[col].astype(str)
meta[col] = meta[col].replace("nan", "")
meta[col] = meta[col].str.replace(".0", "")
# meta[col] = meta[col].astype(int).astype(str)
# meta[col] = meta[col].fillna("")
# vals =
# meta[col] = meta[col].astype(int).astype(str)
# meta[col].fillna("")
meta["lvl0_labels"] = meta["0_pred"]
meta["lvl1_labels"] = meta["0_pred"] + "-" + meta["1_pred"]
meta["lvl2_labels"] = meta["0_pred"] + "-" + meta["1_pred"] + "-" + meta["2_pred"]
meta["lvl0_labels_side"] = meta["lvl0_labels"] + meta["hemisphere"]
meta["lvl1_labels_side"] = meta["lvl1_labels"] + meta["hemisphere"]
meta["lvl2_labels_side"] = meta["lvl2_labels"] + meta["hemisphere"]
# %%
# %% [markdown]
# ##
# %% [markdown]
# ##
# inds = np.random.choice(len(path_dist_mat), replace=False, size=16000)
# sub_path_indicator_mat = path_indicator_mat[inds]
# %% [markdown]
# ##
fig, ax = plt.subplots(1, 1, figsize=(30, 20))
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["lvl2_labels"],
col_class_order="signal_flow",
col_meta=meta,
col_colors="merge_class",
col_item_order=["merge_class", "signal_flow"],
col_palette=CLASS_COLOR_DICT,
col_ticks=False,
row_sort_class=pred,
# row_class_order="size",
row_ticks=False,
sizes=(1, 1),
hue="weight",
palette="Set1",
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
stashfig("path-indicator-map")
# %% [markdown]
# ## compute orders
mean_orders = []
for n in range(path_indicator_mat.shape[1]):
nz = np.nonzero(path_indicator_mat[:, n])
mean_order = np.mean(nz)
mean_orders.append(mean_order)
meta["mean_order"] = mean_orders
# %% [markdown]
# ##
from src.visualization import palplot
fig, axs = plt.subplots(
1, 2, figsize=(30, 20), gridspec_kw=dict(width_ratios=[0.95, 0.02], wspace=0.02)
)
pal = sns.color_palette("Set1", n_colors=7)
pal = pal[:5] + pal[6:]
ax = axs[0]
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["lvl2_labels"],
col_class_order="signal_flow",
col_meta=meta,
col_colors="merge_class",
col_item_order=["merge_class", "mean_order"],
col_palette=CLASS_COLOR_DICT,
col_ticks=True,
tick_rot=90,
row_sort_class=pred,
# row_class_order="size",
row_ticks=True,
sizes=(1, 1),
hue="weight",
palette=pal,
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
ax = axs[1]
palplot(pal, cmap="Set1", ax=ax)
ax.set_title("Visit order")
stashfig("path-indicator-map")
|
[
"numpy.random.seed",
"scipy.linalg.orthogonal_procrustes",
"src.io.savefig",
"pandas.read_csv",
"src.traverse.to_transmission_matrix",
"src.cluster.get_paired_inds",
"src.traverse.RandomWalk",
"numpy.mean",
"graspy.cluster.AutoGMMCluster",
"sklearn.manifold.MDS",
"numpy.unique",
"src.graph.preprocess",
"seaborn.clustermap",
"graspy.utils.pass_to_ranks",
"sklearn.cluster.AgglomerativeClustering",
"numpy.log10",
"matplotlib.pyplot.subplots",
"seaborn.set_context",
"seaborn.plotting_context",
"graspy.embed.AdjacencySpectralEmbed",
"os.path.basename",
"sklearn.metrics.pairwise_distances",
"scipy.spatial.distance.squareform",
"graspy.plot.pairplot",
"numpy.concatenate",
"graspy.embed.ClassicalMDS",
"numpy.quantile",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"src.visualization.palplot",
"src.hierarchy.signal_flow",
"src.data.load_metagraph",
"numpy.nonzero",
"src.traverse.to_markov_matrix",
"seaborn.distplot",
"seaborn.color_palette",
"src.io.savecsv",
"itertools.chain.from_iterable"
] |
[((1450, 1519), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'ConvergenceWarning'}), "(action='ignore', category=ConvergenceWarning)\n", (1473, 1519), False, 'import warnings\n'), ((1813, 1875), 'seaborn.plotting_context', 'sns.plotting_context', ([], {'context': '"""talk"""', 'font_scale': '(1)', 'rc': 'rc_dict'}), "(context='talk', font_scale=1, rc=rc_dict)\n", (1833, 1875), True, 'import seaborn as sns\n'), ((1876, 1900), 'seaborn.set_context', 'sns.set_context', (['context'], {}), '(context)\n', (1891, 1900), True, 'import seaborn as sns\n'), ((1902, 1922), 'numpy.random.seed', 'np.random.seed', (['(8888)'], {}), '(8888)\n', (1916, 1922), True, 'import numpy as np\n'), ((2071, 2112), 'src.data.load_metagraph', 'load_metagraph', (['"""G"""'], {'version': '"""2020-04-01"""'}), "('G', version='2020-04-01')\n", (2085, 2112), False, 'from src.data import load_metagraph\n'), ((2118, 2222), 'src.graph.preprocess', 'preprocess', (['mg'], {'threshold': '(0)', 'sym_threshold': '(False)', 'remove_pdiff': '(True)', 'binarize': '(False)', 'weight': '"""weight"""'}), "(mg, threshold=0, sym_threshold=False, remove_pdiff=True,\n binarize=False, weight='weight')\n", (2128, 2222), False, 'from src.graph import preprocess\n'), ((2359, 2395), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 2.5)'}), '(1, 1, figsize=(5, 2.5))\n', (2371, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2499), 'numpy.quantile', 'np.quantile', (["degrees['Total edgesum']", '(0.05)'], {}), "(degrees['Total edgesum'], 0.05)\n", (2467, 2499), True, 'import numpy as np\n'), ((3694, 3713), 'numpy.random.seed', 'np.random.seed', (['(888)'], {}), '(888)\n', (3708, 3713), True, 'import numpy as np\n'), ((3808, 3838), 'src.traverse.to_transmission_matrix', 'to_transmission_matrix', (['adj', 'p'], {}), '(adj, p)\n', (3830, 3838), False, 'from src.traverse import Cascade, TraverseDispatcher, to_transmission_matrix\n'), ((3858, 3879), 'src.traverse.to_markov_matrix', 'to_markov_matrix', (['adj'], {}), '(adj)\n', (3874, 3879), False, 'from src.traverse import to_markov_matrix\n'), ((4469, 4492), 'seaborn.distplot', 'sns.distplot', (['path_lens'], {}), '(path_lens)\n', (4481, 4492), True, 'import seaborn as sns\n'), ((4710, 4763), 'graspy.embed.AdjacencySpectralEmbed', 'AdjacencySpectralEmbed', ([], {'n_components': 'None', 'n_elbows': '(2)'}), '(n_components=None, n_elbows=2)\n', (4732, 4763), False, 'from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD\n'), ((4823, 4853), 'numpy.concatenate', 'np.concatenate', (['embed'], {'axis': '(-1)'}), '(embed, axis=-1)\n', (4837, 4853), True, 'import numpy as np\n'), ((4874, 4895), 'src.cluster.get_paired_inds', 'get_paired_inds', (['meta'], {}), '(meta)\n', (4889, 4895), False, 'from src.cluster import get_paired_inds\n'), ((4904, 4957), 'scipy.linalg.orthogonal_procrustes', 'orthogonal_procrustes', (['embed[lp_inds]', 'embed[rp_inds]'], {}), '(embed[lp_inds], embed[rp_inds])\n', (4925, 4957), False, 'from scipy.linalg import orthogonal_procrustes\n'), ((5136, 5178), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['embed'], {'metric': '"""cosine"""'}), "(embed, metric='cosine')\n", (5154, 5178), False, 'from sklearn.metrics import pairwise_distances\n'), ((6351, 6471), 'seaborn.clustermap', 'sns.clustermap', (['path_dist_mat'], {'figsize': '(20, 20)', 'row_linkage': 'Z', 'col_linkage': 'Z', 'xticklabels': '(False)', 'yticklabels': '(False)'}), '(path_dist_mat, figsize=(20, 20), row_linkage=Z, col_linkage=\n Z, xticklabels=False, yticklabels=False)\n', (6365, 6471), True, 'import seaborn as sns\n'), ((6624, 6682), 'graspy.embed.ClassicalMDS', 'ClassicalMDS', ([], {'dissimilarity': '"""precomputed"""', 'n_components': '(10)'}), "(dissimilarity='precomputed', n_components=10)\n", (6636, 6682), False, 'from graspy.embed import ClassicalMDS\n'), ((6732, 6768), 'matplotlib.pyplot.plot', 'plt.plot', (['cmds.singular_values_', '"""o"""'], {}), "(cmds.singular_values_, 'o')\n", (6740, 6768), True, 'import matplotlib.pyplot as plt\n'), ((6842, 6891), 'graspy.plot.pairplot', 'pairplot', (['path_embed[:, :n_components]'], {'alpha': '(0.1)'}), '(path_embed[:, :n_components], alpha=0.1)\n', (6850, 6891), False, 'from graspy.plot import pairplot\n'), ((6982, 7026), 'graspy.cluster.AutoGMMCluster', 'AutoGMMCluster', ([], {'max_components': '(20)', 'n_jobs': '(-2)'}), '(max_components=20, n_jobs=-2)\n', (6996, 7026), False, 'from graspy.cluster import AutoGMMCluster\n'), ((7108, 7201), 'graspy.plot.pairplot', 'pairplot', (['path_embed[:, :n_components]'], {'alpha': '(0.1)', 'labels': 'pred', 'palette': 'cc.glasbey_light'}), '(path_embed[:, :n_components], alpha=0.1, labels=pred, palette=cc.\n glasbey_light)\n', (7116, 7201), False, 'from graspy.plot import pairplot\n'), ((7286, 7322), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (7298, 7322), True, 'import matplotlib.pyplot as plt\n'), ((7677, 7763), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(60)', 'affinity': '"""precomputed"""', 'linkage': '"""average"""'}), "(n_clusters=60, affinity='precomputed', linkage=\n 'average')\n", (7700, 7763), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((7806, 7842), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (7818, 7842), True, 'import matplotlib.pyplot as plt\n'), ((8195, 8231), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(20, 20)'}), '(1, 1, figsize=(20, 20))\n', (8207, 8231), True, 'import matplotlib.pyplot as plt\n'), ((8817, 8915), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': 'n_components', 'metric': '(True)', 'n_init': '(16)', 'n_jobs': '(-1)', 'dissimilarity': '"""precomputed"""'}), "(n_components=n_components, metric=True, n_init=16, n_jobs=-1,\n dissimilarity='precomputed')\n", (8820, 8915), False, 'from sklearn.manifold import MDS\n'), ((8992, 9018), 'graspy.plot.pairplot', 'pairplot', (['embed'], {'alpha': '(0.1)'}), '(embed, alpha=0.1)\n', (9000, 9018), False, 'from graspy.plot import pairplot\n'), ((10204, 10240), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(30, 20)'}), '(1, 1, figsize=(30, 20))\n', (10216, 10240), True, 'import matplotlib.pyplot as plt\n'), ((11195, 11232), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {'n_colors': '(7)'}), "('Set1', n_colors=7)\n", (11212, 11232), True, 'import seaborn as sns\n'), ((11795, 11827), 'src.visualization.palplot', 'palplot', (['pal'], {'cmap': '"""Set1"""', 'ax': 'ax'}), "(pal, cmap='Set1', ax=ax)\n", (11802, 11827), False, 'from src.visualization import palplot\n'), ((1529, 1555), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1545, 1555), False, 'import os\n'), ((1956, 2008), 'src.io.savefig', 'savefig', (['name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(name, foldername=FNAME, save_on=True, **kws)\n', (1963, 2008), False, 'from src.io import savecsv, savefig\n'), ((2046, 2063), 'src.io.savecsv', 'savecsv', (['df', 'name'], {}), '(df, name)\n', (2053, 2063), False, 'from src.io import savecsv, savefig\n'), ((2409, 2443), 'numpy.log10', 'np.log10', (["degrees['Total edgesum']"], {}), "(degrees['Total edgesum'])\n", (2417, 2443), True, 'import numpy as np\n'), ((2511, 2522), 'numpy.log10', 'np.log10', (['q'], {}), '(q)\n', (2519, 2522), True, 'import numpy as np\n'), ((3539, 3573), 'itertools.chain.from_iterable', 'chain.from_iterable', (['source_groups'], {}), '(source_groups)\n', (3558, 3573), False, 'from itertools import chain\n'), ((3585, 3616), 'itertools.chain.from_iterable', 'chain.from_iterable', (['out_groups'], {}), '(out_groups)\n', (3604, 3616), False, 'from itertools import chain\n'), ((4140, 4226), 'src.traverse.RandomWalk', 'RandomWalk', (['transition_probs'], {'stop_nodes': 'out_inds', 'max_hops': '(10)', 'allow_loops': '(False)'}), '(transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=\n False)\n', (4150, 4226), False, 'from src.traverse import RandomWalk\n'), ((4795, 4813), 'graspy.utils.pass_to_ranks', 'pass_to_ranks', (['adj'], {}), '(adj)\n', (4808, 4813), False, 'from graspy.utils import augment_diagonal, binarize, pass_to_ranks\n'), ((6305, 6330), 'scipy.spatial.distance.squareform', 'squareform', (['path_dist_mat'], {}), '(path_dist_mat)\n', (6315, 6330), False, 'from scipy.spatial.distance import squareform\n'), ((8167, 8183), 'src.hierarchy.signal_flow', 'signal_flow', (['adj'], {}), '(adj)\n', (8178, 8183), False, 'from src.hierarchy import signal_flow\n'), ((8961, 8989), 'graspy.utils.pass_to_ranks', 'pass_to_ranks', (['path_dist_mat'], {}), '(path_dist_mat)\n', (8974, 8989), False, 'from graspy.utils import augment_diagonal, binarize, pass_to_ranks\n'), ((9168, 9197), 'pandas.read_csv', 'pd.read_csv', (['loc'], {'index_col': '(0)'}), '(loc, index_col=0)\n', (9179, 9197), True, 'import pandas as pd\n'), ((10882, 10918), 'numpy.nonzero', 'np.nonzero', (['path_indicator_mat[:, n]'], {}), '(path_indicator_mat[:, n])\n', (10892, 10918), True, 'import numpy as np\n'), ((10936, 10947), 'numpy.mean', 'np.mean', (['nz'], {}), '(nz)\n', (10943, 10947), True, 'import numpy as np\n'), ((7240, 7255), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (7249, 7255), True, 'import numpy as np\n'), ((7865, 7880), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (7874, 7880), True, 'import numpy as np\n')]
|
import cv2
import rest
import numpy as np
class ChromaKeyServiceImpl(rest.ChromaKeyingService):
def replace(self, src_image_str, bg_image_str) -> bytes:
bg = cv2.imdecode(np.frombuffer(bg_image_str, np.uint8), cv2.IMREAD_COLOR)
img = cv2.imdecode(np.frombuffer(src_image_str, np.uint8), cv2.IMREAD_COLOR)
RED, GREEN, BLUE = (2, 1, 0)
reds = img[:, :, RED]
greens = img[:, :, GREEN]
blues = img[:, :, BLUE]
# z = np.zeros(shape=img.shape, dtype=in
mask = (greens < 70) | (reds > greens) | (blues > greens)
mask = mask.astype("uint8") * 255
# print(mask)
mask_inv = cv2.bitwise_not(mask)
# cv2.imshow("Mask", mask)
# cv2.imshow("Mask inv", mask_inv)
# converting mask 2d to 3d
result = cv2.bitwise_and(img, img, mask=mask)
bg = cv2.resize(bg, (1280, 720))
bg = cv2.bitwise_and(bg, bg, mask=mask_inv)
res = cv2.add(result, bg)
is_success, im_buf_arr = cv2.imencode(".jpg", res)
return im_buf_arr.tobytes()
# cv2.imshow("Result", res)
# # cv2.imshow("Bg", bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
[
"cv2.bitwise_not",
"cv2.bitwise_and",
"numpy.frombuffer",
"cv2.imencode",
"cv2.add",
"cv2.resize"
] |
[((665, 686), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (680, 686), False, 'import cv2\n'), ((819, 855), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (834, 855), False, 'import cv2\n'), ((870, 897), 'cv2.resize', 'cv2.resize', (['bg', '(1280, 720)'], {}), '(bg, (1280, 720))\n', (880, 897), False, 'import cv2\n'), ((911, 949), 'cv2.bitwise_and', 'cv2.bitwise_and', (['bg', 'bg'], {'mask': 'mask_inv'}), '(bg, bg, mask=mask_inv)\n', (926, 949), False, 'import cv2\n'), ((965, 984), 'cv2.add', 'cv2.add', (['result', 'bg'], {}), '(result, bg)\n', (972, 984), False, 'import cv2\n'), ((1019, 1044), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'res'], {}), "('.jpg', res)\n", (1031, 1044), False, 'import cv2\n'), ((186, 223), 'numpy.frombuffer', 'np.frombuffer', (['bg_image_str', 'np.uint8'], {}), '(bg_image_str, np.uint8)\n', (199, 223), True, 'import numpy as np\n'), ((270, 308), 'numpy.frombuffer', 'np.frombuffer', (['src_image_str', 'np.uint8'], {}), '(src_image_str, np.uint8)\n', (283, 308), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for linalg.py."""
import copy
from absl.testing import absltest
from hyperbo.basics import linalg
import jax
from jax import random
import jax.numpy as jnp
import jax.scipy.linalg as jspla
import numpy as np
grad = jax.grad
def test_grad(fun, params, index, eps=1e-4, cached_cholesky=False):
key = random.PRNGKey(0)
key, subkey = random.split(key)
vec = random.normal(subkey, params[index].shape)
if index == 0:
vec = 0.5 * jnp.dot(vec.T, vec)
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
else:
unitvec = vec / jnp.sqrt(jnp.vdot(vec, vec))
params_copy = copy.deepcopy(params)
params_copy[index] += eps / 2. * unitvec
if cached_cholesky:
params_copy[2] = jspla.cholesky(params_copy[0], lower=True)
f1 = fun(*params_copy)
params_copy = copy.deepcopy(params)
params_copy[index] -= eps / 2. * unitvec
if cached_cholesky:
params_copy[2] = jspla.cholesky(params_copy[0], lower=True)
f2 = fun(*params_copy)
exact_grad_prod = jnp.vdot(grad(fun, index)(*params), unitvec)
return {'Numerical': (f1 - f2) / eps, 'Exact': exact_grad_prod}
class LinalgTest(absltest.TestCase):
def test_inverse_spdmatrix_vector_product(self):
np.random.seed(1)
dim = 10
noise = 1e-3
num_replicas = 10
def fun(spd_matrix, x):
return jnp.dot(x, linalg.inverse_spdmatrix_vector_product(spd_matrix, x))
def test_grad_at_index(index):
for _ in range(num_replicas):
matrix = np.random.randn(dim, dim)
spd_matrix = matrix.T.dot(matrix) + noise * np.eye(matrix.shape[0])
x = np.random.randn(dim)
params = [spd_matrix, x]
grads = test_grad(fun, params, index)
numerical_grad = grads['Numerical']
exact_grad = grads['Exact']
self.assertTrue(jnp.allclose(numerical_grad, exact_grad, rtol=1))
test_grad_at_index(0)
test_grad_at_index(1)
def test_inverse_spdmatrix_vector_product_cached_cholesky(self):
"""Tests if the gradient works when the Cholesky factor is given."""
np.random.seed(1)
dim = 10
noise = 1e-3
num_replicas = 10
def fun(spd_matrix, x, cached_cholesky):
return jnp.dot(
x,
linalg.inverse_spdmatrix_vector_product(
spd_matrix, x, cached_cholesky=cached_cholesky))
def test_grad_at_index(index):
for _ in range(num_replicas):
matrix = np.random.randn(dim, dim)
spd_matrix = matrix.T.dot(matrix) + noise * np.eye(matrix.shape[0])
chol_factor = jspla.cholesky(spd_matrix, lower=True)
x = np.random.randn(dim)
params = [spd_matrix, x, chol_factor]
grads = test_grad(fun, params, index, cached_cholesky=True)
numerical_grad = grads['Numerical']
exact_grad = grads['Exact']
print(numerical_grad, exact_grad)
self.assertTrue(jnp.allclose(numerical_grad, exact_grad, rtol=1))
test_grad_at_index(0)
test_grad_at_index(1)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"copy.deepcopy",
"numpy.random.seed",
"jax.random.normal",
"hyperbo.basics.linalg.inverse_spdmatrix_vector_product",
"jax.numpy.dot",
"jax.scipy.linalg.cholesky",
"numpy.random.randn",
"jax.numpy.vdot",
"jax.random.PRNGKey",
"jax.numpy.allclose",
"numpy.eye",
"jax.random.split"
] |
[((909, 926), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (923, 926), False, 'from jax import random\n'), ((943, 960), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (955, 960), False, 'from jax import random\n'), ((969, 1011), 'jax.random.normal', 'random.normal', (['subkey', 'params[index].shape'], {}), '(subkey, params[index].shape)\n', (982, 1011), False, 'from jax import random\n'), ((1188, 1209), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (1201, 1209), False, 'import copy\n'), ((1380, 1401), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (1393, 1401), False, 'import copy\n'), ((3564, 3579), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3577, 3579), False, 'from absl.testing import absltest\n'), ((1296, 1338), 'jax.scipy.linalg.cholesky', 'jspla.cholesky', (['params_copy[0]'], {'lower': '(True)'}), '(params_copy[0], lower=True)\n', (1310, 1338), True, 'import jax.scipy.linalg as jspla\n'), ((1488, 1530), 'jax.scipy.linalg.cholesky', 'jspla.cholesky', (['params_copy[0]'], {'lower': '(True)'}), '(params_copy[0], lower=True)\n', (1502, 1530), True, 'import jax.scipy.linalg as jspla\n'), ((1783, 1800), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1797, 1800), True, 'import numpy as np\n'), ((2619, 2636), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2633, 2636), True, 'import numpy as np\n'), ((1045, 1064), 'jax.numpy.dot', 'jnp.dot', (['vec.T', 'vec'], {}), '(vec.T, vec)\n', (1052, 1064), True, 'import jax.numpy as jnp\n'), ((1094, 1112), 'jax.numpy.vdot', 'jnp.vdot', (['vec', 'vec'], {}), '(vec, vec)\n', (1102, 1112), True, 'import jax.numpy as jnp\n'), ((1151, 1169), 'jax.numpy.vdot', 'jnp.vdot', (['vec', 'vec'], {}), '(vec, vec)\n', (1159, 1169), True, 'import jax.numpy as jnp\n'), ((1906, 1960), 'hyperbo.basics.linalg.inverse_spdmatrix_vector_product', 'linalg.inverse_spdmatrix_vector_product', (['spd_matrix', 'x'], {}), '(spd_matrix, x)\n', (1945, 1960), False, 'from hyperbo.basics import linalg\n'), ((2051, 2076), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (2066, 2076), True, 'import numpy as np\n'), ((2165, 2185), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (2180, 2185), True, 'import numpy as np\n'), ((2780, 2872), 'hyperbo.basics.linalg.inverse_spdmatrix_vector_product', 'linalg.inverse_spdmatrix_vector_product', (['spd_matrix', 'x'], {'cached_cholesky': 'cached_cholesky'}), '(spd_matrix, x, cached_cholesky=\n cached_cholesky)\n', (2819, 2872), False, 'from hyperbo.basics import linalg\n'), ((2973, 2998), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (2988, 2998), True, 'import numpy as np\n'), ((3097, 3135), 'jax.scipy.linalg.cholesky', 'jspla.cholesky', (['spd_matrix'], {'lower': '(True)'}), '(spd_matrix, lower=True)\n', (3111, 3135), True, 'import jax.scipy.linalg as jspla\n'), ((3148, 3168), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (3163, 3168), True, 'import numpy as np\n'), ((2370, 2418), 'jax.numpy.allclose', 'jnp.allclose', (['numerical_grad', 'exact_grad'], {'rtol': '(1)'}), '(numerical_grad, exact_grad, rtol=1)\n', (2382, 2418), True, 'import jax.numpy as jnp\n'), ((3430, 3478), 'jax.numpy.allclose', 'jnp.allclose', (['numerical_grad', 'exact_grad'], {'rtol': '(1)'}), '(numerical_grad, exact_grad, rtol=1)\n', (3442, 3478), True, 'import jax.numpy as jnp\n'), ((2129, 2152), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (2135, 2152), True, 'import numpy as np\n'), ((3051, 3074), 'numpy.eye', 'np.eye', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (3057, 3074), True, 'import numpy as np\n')]
|
"""
For more informations on the contents of this module:
- help(plastic.GenotypeMatrix)
- help(clustering.cluster_mutations)
--------
Module that exposes the clustering algorithm presented at
https://github.com/AlgoLab/celluloid
Simple example workflow:
from plastic import clustering
to_cluster = cl.GenotypeMatrix.from_files('to_cluster.txt', mutations_file = 'mutations.txt')
# Reduce the size of the input down to 50 to speed up some complex computation
# (for instance SASC tree inference)
clustered = clustering.cluster_mutations(to_cluster, k = 50)
# Get the clustered mutations as comma separated lists of simple mutations
muts = clustered.mutations()
# Save the matrix and use it for some intensive computation
clustering.GenotypeMatrix.to_files('clustered.txt', mutations_file = 'clustered_mutations.txt')
"""
from ._core.genotypematrix import GenotypeMatrix
import numpy as np
from kmodes.kmodes import KModes
from collections import defaultdict
def cluster_mutations(
genotype_matrix,
k,
n_inits=10,
max_iter=100,
verbose=False,
**kwargs):
"""
Clusters the mutations in a genotype matrix by applying kmodes
Parameters:
genotype_matrix(GenotypeMatrix):
A matrix representing the results of single-cell sequencing.
k(int):
The number of clustered mutations in the output matrix.
Note that empty clusters will be discarded after clustering.
n_inits(int):
The number of initiliazations in the clustering process.
max_iter(int):
The maximum number of iterations in the clustering process.
verbose (bool)
**kwargs:
Additional arguments passed to KModes process.
Returns:
GenotypeMatrix:
The result of the clustering process. Each column in the matrix
will be the centroid of a non-empty cluster, and will be labeled with
a comma-separated list of the labels of the mutations within the cluster.
Cell labels are left unaltered.
"""
if type(k) != int or k < 1:
raise ValueError(f'the number of clusters must be a positive integer, but {k} is not.')
if type(max_iter) != int or max_iter < 1:
raise ValueError(f'the number of iterations must be a positive integer, but {max_iter} is not.')
if type(n_inits) != int or n_inits < 1:
raise ValueError(f'the number of initializations must be a positive integer, but {n_inits} is not.')
return _celluloid(genotype_matrix, k, n_inits, max_iter,verbose,**kwargs)
def _conflict_dissim(a, b, **_):
v = np.vectorize(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)
return np.sum(v(a, b), axis=1)
def _celluloid(
genotype_matrix,
k,
n_inits,
max_iter,
verbose,
**kwargs
):
"""
Clusters the mutations in a genotype matrix by applying kmodes
Parameters:
genotype_matrix(GenotypeMatrix):
A matrix representing the results of single-cell sequencing.
k(int):
The number of clustered mutations in the output matrix.
Note that empty clusters will be discarded after clustering.
n_inits(int):
The number of initiliazations in the clustering process.
max_iter(int):
The maximum number of iterations in the clustering process.
verbose (bool)
**kwargs:
Additional arguments passed to KModes process.
Returns:
GenotypeMatrix:
The result of the clustering process. Each column in the matrix
will be the centroid of a non-empty cluster, and will be labeled with
a comma-separated list of the labels of the mutations within the cluster.
Cell labels are left unaltered.
"""
mutations_as_points = np.array(genotype_matrix.matrix(), dtype='int').transpose()
mutation_labels = genotype_matrix.mutation_labels
km = KModes(
n_clusters=k,
cat_dissim=_conflict_dissim,
init='huang',
n_init=n_inits,
max_iter=max_iter,
verbose=(1 if verbose else 0),
**kwargs
)
clusters = km.fit_predict(mutations_as_points)
# Each cluster will be labeled with the labels of its components.
clusters_of_mutations = km.labels_
clustered_mutation_labels = defaultdict(list)
for mutation_label, mutation_cluster in zip(mutation_labels, clusters_of_mutations):
clustered_mutation_labels[mutation_cluster].append(mutation_label)
nonempty_clusters = clustered_mutation_labels.keys()
# build the output matrix and the mutation labels as strings
cluster_centroids = km.cluster_centroids_
clustered_mutation_labels_strings = [','.join(clustered_mutation_labels[cluster_id]) for cluster_id in
sorted(nonempty_clusters)]
out_matrix = [cluster_centroids[cluster_id] for cluster_id in sorted(nonempty_clusters)]
# the matrix needs to be transposed back to its original orientation
out_matrix = np.array(out_matrix).transpose()
return GenotypeMatrix(out_matrix, cell_labels=genotype_matrix.cell_labels,
mutation_labels=clustered_mutation_labels_strings)
|
[
"kmodes.kmodes.KModes",
"collections.defaultdict",
"numpy.vectorize",
"numpy.array"
] |
[((2650, 2711), 'numpy.vectorize', 'np.vectorize', (['(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)'], {}), '(lambda ai, bi: ai != 2 and bi != 2 and ai != bi)\n', (2662, 2711), True, 'import numpy as np\n'), ((4007, 4149), 'kmodes.kmodes.KModes', 'KModes', ([], {'n_clusters': 'k', 'cat_dissim': '_conflict_dissim', 'init': '"""huang"""', 'n_init': 'n_inits', 'max_iter': 'max_iter', 'verbose': '(1 if verbose else 0)'}), "(n_clusters=k, cat_dissim=_conflict_dissim, init='huang', n_init=\n n_inits, max_iter=max_iter, verbose=1 if verbose else 0, **kwargs)\n", (4013, 4149), False, 'from kmodes.kmodes import KModes\n'), ((4403, 4420), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4414, 4420), False, 'from collections import defaultdict\n'), ((5114, 5134), 'numpy.array', 'np.array', (['out_matrix'], {}), '(out_matrix)\n', (5122, 5134), True, 'import numpy as np\n')]
|
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import skimage
from sklearn import svm, metrics, datasets
from sklearn.utils import Bunch
from sklearn.model_selection import GridSearchCV, train_test_split
#import opencv
from skimage.io import imread
from skimage.transform import resize
import time
import sys
start = time.time()
def load_image_files(container_path, dimension=(256, 256, 3)):
"""
Load image files with categories as subfolder names
which performs like scikit-learn sample dataset
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
dimension : tuple
size to which image are adjusted to
Returns
-------
Bunch
"""
image_dir = Path(container_path)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
categories = [fo.name for fo in folders]
descr = "A image classification dataset"
images = []
flat_data = []
target = []
for i, direc in enumerate(folders):
for file in direc.iterdir():
img = skimage.io.imread(file)
img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect')
flat_data.append(img_resized.flatten())
images.append(img_resized)
target.append(i)
flat_data = np.array(flat_data)
target = np.array(target)
images = np.array(images)
#print(images)
return Bunch(data=flat_data,
target=target,
target_names=categories,
images=images,
DESCR=descr),folders
image_dataset_train,folders_train = load_image_files("train/")
image_dataset_test,folders_test = load_image_files("test/")
#image_dataset = load_image_files("images/")
X_train = image_dataset_train.data
y_train = image_dataset_train.target
X_test = image_dataset_test.data
y_test = image_dataset_test.target
# image_dataset.data, image_dataset.target, test_size=0.3,random_state=109)
# param_grid = [
# {'C': [1, 10, 100, 1000], 'kernel': ['linear']},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
# ]
#svc = svm.SVC()
clf = svm.SVC()
#clf = GridSearchCV(svc, param_grid)
clf.fit(X_train, y_train)
print(folders_train)
y_pred = clf.predict(X_test)
print(y_pred)
print(y_test)
len_of_y = len(y_pred)
predict_correct_covid = 0
predict_wrong_covid = 0
predict_correct_noncovid = 0
predict_wrong_noncovid = 0
for i in range(len_of_y):
if y_pred[i] == y_test[i] and y_pred[i] == 0:
predict_correct_covid += 1
elif y_pred[i] == y_test[i] and y_pred[i] == 1:
predict_correct_noncovid += 1
elif y_pred[i] != y_test[i] and y_pred[i] == 0:
predict_wrong_covid += 1
elif y_pred[i] != y_test[i] and y_pred[i] == 1:
predict_wrong_noncovid += 1
print("predict_correct_covid", predict_correct_covid)
print("predict_wrong_covid", predict_wrong_covid)
print("predict_correct_noncovid", predict_correct_noncovid)
print("predict_wrong_noncovid", predict_wrong_noncovid)
print("percen of correct covid", predict_correct_covid/(predict_correct_covid + predict_wrong_covid))
print("percen of correct noncovid", predict_correct_noncovid/(predict_correct_noncovid + predict_wrong_noncovid))
print("precent over all", (predict_correct_covid + predict_correct_noncovid)/len_of_y)
end = time.time()
print("time", end - start)
|
[
"sklearn.utils.Bunch",
"time.time",
"pathlib.Path",
"numpy.array",
"skimage.transform.resize",
"sklearn.svm.SVC",
"skimage.io.imread"
] |
[((360, 371), 'time.time', 'time.time', ([], {}), '()\n', (369, 371), False, 'import time\n'), ((2311, 2320), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (2318, 2320), False, 'from sklearn import svm, metrics, datasets\n'), ((3544, 3555), 'time.time', 'time.time', ([], {}), '()\n', (3553, 3555), False, 'import time\n'), ((834, 854), 'pathlib.Path', 'Path', (['container_path'], {}), '(container_path)\n', (838, 854), False, 'from pathlib import Path\n'), ((1441, 1460), 'numpy.array', 'np.array', (['flat_data'], {}), '(flat_data)\n', (1449, 1460), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (1483, 1491), True, 'import numpy as np\n'), ((1506, 1522), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1514, 1522), True, 'import numpy as np\n'), ((1555, 1648), 'sklearn.utils.Bunch', 'Bunch', ([], {'data': 'flat_data', 'target': 'target', 'target_names': 'categories', 'images': 'images', 'DESCR': 'descr'}), '(data=flat_data, target=target, target_names=categories, images=images,\n DESCR=descr)\n', (1560, 1648), False, 'from sklearn.utils import Bunch\n'), ((1187, 1210), 'skimage.io.imread', 'skimage.io.imread', (['file'], {}), '(file)\n', (1204, 1210), False, 'import skimage\n'), ((1240, 1298), 'skimage.transform.resize', 'resize', (['img', 'dimension'], {'anti_aliasing': '(True)', 'mode': '"""reflect"""'}), "(img, dimension, anti_aliasing=True, mode='reflect')\n", (1246, 1298), False, 'from skimage.transform import resize\n')]
|
import io
import math
from textwrap import wrap
from time import strftime, gmtime
import bezier
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from PIL import Image
from matplotlib import pyplot as plt
from ..utils import Log
def graph_bpm(map_obj):
"""
graphs the bpm changes on map
:param map_obj: a MapStats object
:return: image in io stream
"""
Log.log(f"Graphing BPM for {map_obj.title}")
data = [(i.starttime / map_obj.speed_multiplier,
1000 / i.ms_per_beat * 60 / map_obj.speed_multiplier)
for i in map_obj.beatmap.timingpoints if i.change]
chart_points = list()
for i, j in enumerate(data):
if i != 0:
last = data[i - 1]
chart_points.append((j[0] - .01, last[1]))
chart_points.append(j)
if len(data) - 1 == i:
chart_points.append((map_obj.beatmap.hitobjects[-1].starttime
/ map_obj.speed_multiplier, j[1]))
points = pd.DataFrame(chart_points)
points.columns = ["Time", "BPM"]
col = (38 / 255, 50 / 255, 59 / 255, .9)
sns.set(rc={'axes.facecolor': col,
'text.color': (236 / 255, 239 / 255, 241 / 255),
'figure.facecolor': col,
'savefig.facecolor': col,
'xtick.color': (176 / 255, 190 / 255, 197 / 255),
'ytick.color': (176 / 255, 190 / 255, 197 / 255),
'grid.color': (69 / 255, 90 / 255, 100 / 255),
'axes.labelcolor': (240 / 255, 98 / 255, 150 / 255),
'xtick.bottom': True,
'xtick.direction': 'in',
'figure.figsize': (6, 4),
'savefig.dpi': 100
})
ax = sns.lineplot(x="Time", y="BPM", data=points, color=(240 / 255, 98 / 255, 150 / 255))
length = int(map_obj.total_length) * 1000
m = length / 50
plt.xlim(-m, length + m)
formatter = matplotlib.ticker.FuncFormatter(lambda ms, x: strftime('%M:%S', gmtime(ms // 1000)))
ax.xaxis.set_major_formatter(formatter)
comp = round(max(1, (map_obj.bpm_max - map_obj.bpm_min) / 20), 2)
top = round(map_obj.bpm_max, 2) + comp
bot = max(round(map_obj.bpm_min, 2) - comp, 0)
dist = top - bot
plt.yticks(np.arange(bot, top, dist / 6 - .0001))
plt.ylim(bot, top)
round_num = 0 if dist > 10 else 2
formatter = matplotlib.ticker.FuncFormatter(lambda dig, y:
f"{max(dig - .004, 0.0):.{round_num}f}")
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.grid(False)
width = 85
map_text = "\n".join(wrap(f"{map_obj.title} by {map_obj.artist}", width=width)) + "\n" + \
"\n".join(wrap(f"Mapset by {map_obj.creator}, "
f"Difficulty: {map_obj.version}", width=width))
plt.title(map_text)
plt.box(False)
image = io.BytesIO()
plt.savefig(image, bbox_inches='tight')
image.seek(0)
plt.clf()
plt.close()
return image
def map_strain_graph(map_strains, progress=1., width=399., height=40., max_chunks=100, low_cut=30.):
"""
generats a strains graph based on map
:param map_strains: get_strains object
:param progress: how much of the map player finished
:param width: width of image
:param height: height of image
:param max_chunks: resolution to get out of map
:param low_cut: adds some beefing to the bottem
:return: an image in a bytesio object
"""
strains, max_strain = map_strains["strains"], map_strains["max_strain"]
strains_chunks = list()
chunk_size = math.ceil(len(strains) / max_chunks)
for i in range(0, len(strains), chunk_size):
strain_part = strains[i:i + chunk_size]
strains_chunks.append(max(strain_part))
x = np.linspace(0, width, num=len(strains_chunks))
y = np.minimum(low_cut,
height * 0.125 + height * .875 - np.array([i / max_strain for i in
strains_chunks]) * height * .875)
x = np.insert(x, 0, 0)
x = np.insert(x, 0, 0)
x = np.append(x, width)
x = np.append(x, width)
y = np.insert(y, 0, low_cut)
y = np.insert(y, 0, low_cut)
y = np.append(y, low_cut)
y = np.append(y, low_cut)
curves = list()
curves.append(bezier.Curve(np.asfortranarray([[0.0, 0.0], [height, low_cut]]), degree=1))
for i in range(1, len(y) - 1):
node = np.asfortranarray([
[avgpt(x, i - 1), x[i], avgpt(x, i)],
[avgpt(y, i - 1), y[i], avgpt(y, i)]])
curves.append(
bezier.Curve(node, degree=2)
)
curves.append(bezier.Curve(np.asfortranarray([[width, width], [low_cut, height]]), degree=1))
curves.append(bezier.Curve(np.asfortranarray([[width, 0.0], [height, height]]), degree=1))
polygon = bezier.CurvedPolygon(*curves)
_, ax = plt.subplots(figsize=(round(width * 1.30), round(height * 1.30)), dpi=1)
polygon.plot(pts_per_edge=200, color=(240 / 255, 98 / 255, 146 / 255, 1), ax=ax)
plt.xlim(0, width)
plt.ylim(height, 0)
plt.axis('off')
plt.box(False)
image = io.BytesIO()
fig1 = plt.gcf()
fig1.savefig(image, bbox_inches='tight', transparent=True, pad_inches=0, dpi=1)
image.seek(0)
plt.clf()
plt.close()
img = Image.open(image)
data = np.array(img)
for j in data:
for pos, i in enumerate(j):
if pos > len(j) * progress:
j[pos] = i / 1.5
if i[3] != 0:
j[pos][3] = i[3] / 159 * 255
img = Image.fromarray(data)
image.close()
image = io.BytesIO()
img.save(image, "png")
image.seek(0)
return image
def avgpt(points, index):
"""
get the average between current point and the next one
:param points: list of points
:param index: index
:return: average
"""
return (points[index] + points[index + 1]) / 2.0
|
[
"matplotlib.pyplot.title",
"seaborn.lineplot",
"matplotlib.pyplot.clf",
"textwrap.wrap",
"matplotlib.pyplot.box",
"numpy.arange",
"bezier.Curve",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"bezier.CurvedPolygon",
"numpy.insert",
"numpy.append",
"seaborn.set",
"io.BytesIO",
"matplotlib.pyplot.ylim",
"numpy.asfortranarray",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlim",
"time.gmtime",
"matplotlib.pyplot.axis",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"matplotlib.pyplot.savefig"
] |
[((1024, 1050), 'pandas.DataFrame', 'pd.DataFrame', (['chart_points'], {}), '(chart_points)\n', (1036, 1050), True, 'import pandas as pd\n'), ((1138, 1591), 'seaborn.set', 'sns.set', ([], {'rc': "{'axes.facecolor': col, 'text.color': (236 / 255, 239 / 255, 241 / 255),\n 'figure.facecolor': col, 'savefig.facecolor': col, 'xtick.color': (176 /\n 255, 190 / 255, 197 / 255), 'ytick.color': (176 / 255, 190 / 255, 197 /\n 255), 'grid.color': (69 / 255, 90 / 255, 100 / 255), 'axes.labelcolor':\n (240 / 255, 98 / 255, 150 / 255), 'xtick.bottom': True,\n 'xtick.direction': 'in', 'figure.figsize': (6, 4), 'savefig.dpi': 100}"}), "(rc={'axes.facecolor': col, 'text.color': (236 / 255, 239 / 255, 241 /\n 255), 'figure.facecolor': col, 'savefig.facecolor': col, 'xtick.color':\n (176 / 255, 190 / 255, 197 / 255), 'ytick.color': (176 / 255, 190 / 255,\n 197 / 255), 'grid.color': (69 / 255, 90 / 255, 100 / 255),\n 'axes.labelcolor': (240 / 255, 98 / 255, 150 / 255), 'xtick.bottom': \n True, 'xtick.direction': 'in', 'figure.figsize': (6, 4), 'savefig.dpi':\n 100})\n", (1145, 1591), True, 'import seaborn as sns\n'), ((1770, 1859), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Time"""', 'y': '"""BPM"""', 'data': 'points', 'color': '(240 / 255, 98 / 255, 150 / 255)'}), "(x='Time', y='BPM', data=points, color=(240 / 255, 98 / 255, \n 150 / 255))\n", (1782, 1859), True, 'import seaborn as sns\n'), ((1926, 1950), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-m)', '(length + m)'], {}), '(-m, length + m)\n', (1934, 1950), True, 'from matplotlib import pyplot as plt\n'), ((2343, 2361), 'matplotlib.pyplot.ylim', 'plt.ylim', (['bot', 'top'], {}), '(bot, top)\n', (2351, 2361), True, 'from matplotlib import pyplot as plt\n'), ((2879, 2898), 'matplotlib.pyplot.title', 'plt.title', (['map_text'], {}), '(map_text)\n', (2888, 2898), True, 'from matplotlib import pyplot as plt\n'), ((2904, 2918), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (2911, 2918), True, 'from matplotlib import pyplot as plt\n'), ((2932, 2944), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2942, 2944), False, 'import io\n'), ((2949, 2988), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image'], {'bbox_inches': '"""tight"""'}), "(image, bbox_inches='tight')\n", (2960, 2988), True, 'from matplotlib import pyplot as plt\n'), ((3012, 3021), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3019, 3021), True, 'from matplotlib import pyplot as plt\n'), ((3026, 3037), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3035, 3037), True, 'from matplotlib import pyplot as plt\n'), ((4112, 4130), 'numpy.insert', 'np.insert', (['x', '(0)', '(0)'], {}), '(x, 0, 0)\n', (4121, 4130), True, 'import numpy as np\n'), ((4139, 4157), 'numpy.insert', 'np.insert', (['x', '(0)', '(0)'], {}), '(x, 0, 0)\n', (4148, 4157), True, 'import numpy as np\n'), ((4166, 4185), 'numpy.append', 'np.append', (['x', 'width'], {}), '(x, width)\n', (4175, 4185), True, 'import numpy as np\n'), ((4194, 4213), 'numpy.append', 'np.append', (['x', 'width'], {}), '(x, width)\n', (4203, 4213), True, 'import numpy as np\n'), ((4222, 4246), 'numpy.insert', 'np.insert', (['y', '(0)', 'low_cut'], {}), '(y, 0, low_cut)\n', (4231, 4246), True, 'import numpy as np\n'), ((4255, 4279), 'numpy.insert', 'np.insert', (['y', '(0)', 'low_cut'], {}), '(y, 0, low_cut)\n', (4264, 4279), True, 'import numpy as np\n'), ((4288, 4309), 'numpy.append', 'np.append', (['y', 'low_cut'], {}), '(y, low_cut)\n', (4297, 4309), True, 'import numpy as np\n'), ((4318, 4339), 'numpy.append', 'np.append', (['y', 'low_cut'], {}), '(y, low_cut)\n', (4327, 4339), True, 'import numpy as np\n'), ((4906, 4935), 'bezier.CurvedPolygon', 'bezier.CurvedPolygon', (['*curves'], {}), '(*curves)\n', (4926, 4935), False, 'import bezier\n'), ((5111, 5129), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'width'], {}), '(0, width)\n', (5119, 5129), True, 'from matplotlib import pyplot as plt\n'), ((5134, 5153), 'matplotlib.pyplot.ylim', 'plt.ylim', (['height', '(0)'], {}), '(height, 0)\n', (5142, 5153), True, 'from matplotlib import pyplot as plt\n'), ((5158, 5173), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5166, 5173), True, 'from matplotlib import pyplot as plt\n'), ((5178, 5192), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (5185, 5192), True, 'from matplotlib import pyplot as plt\n'), ((5206, 5218), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5216, 5218), False, 'import io\n'), ((5230, 5239), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5237, 5239), True, 'from matplotlib import pyplot as plt\n'), ((5346, 5355), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5353, 5355), True, 'from matplotlib import pyplot as plt\n'), ((5360, 5371), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5369, 5371), True, 'from matplotlib import pyplot as plt\n'), ((5383, 5400), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (5393, 5400), False, 'from PIL import Image\n'), ((5412, 5425), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5420, 5425), True, 'import numpy as np\n'), ((5637, 5658), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (5652, 5658), False, 'from PIL import Image\n'), ((5689, 5701), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5699, 5701), False, 'import io\n'), ((2299, 2337), 'numpy.arange', 'np.arange', (['bot', 'top', '(dist / 6 - 0.0001)'], {}), '(bot, top, dist / 6 - 0.0001)\n', (2308, 2337), True, 'import numpy as np\n'), ((2759, 2844), 'textwrap.wrap', 'wrap', (['f"""Mapset by {map_obj.creator}, Difficulty: {map_obj.version}"""'], {'width': 'width'}), "(f'Mapset by {map_obj.creator}, Difficulty: {map_obj.version}', width=width\n )\n", (2763, 2844), False, 'from textwrap import wrap\n'), ((4391, 4441), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.0, 0.0], [height, low_cut]]'], {}), '([[0.0, 0.0], [height, low_cut]])\n', (4408, 4441), True, 'import numpy as np\n'), ((4660, 4688), 'bezier.Curve', 'bezier.Curve', (['node'], {'degree': '(2)'}), '(node, degree=2)\n', (4672, 4688), False, 'import bezier\n'), ((4730, 4784), 'numpy.asfortranarray', 'np.asfortranarray', (['[[width, width], [low_cut, height]]'], {}), '([[width, width], [low_cut, height]])\n', (4747, 4784), True, 'import numpy as np\n'), ((4828, 4879), 'numpy.asfortranarray', 'np.asfortranarray', (['[[width, 0.0], [height, height]]'], {}), '([[width, 0.0], [height, height]])\n', (4845, 4879), True, 'import numpy as np\n'), ((2032, 2050), 'time.gmtime', 'gmtime', (['(ms // 1000)'], {}), '(ms // 1000)\n', (2038, 2050), False, 'from time import strftime, gmtime\n'), ((2664, 2721), 'textwrap.wrap', 'wrap', (['f"""{map_obj.title} by {map_obj.artist}"""'], {'width': 'width'}), "(f'{map_obj.title} by {map_obj.artist}', width=width)\n", (2668, 2721), False, 'from textwrap import wrap\n'), ((3973, 4025), 'numpy.array', 'np.array', (['[(i / max_strain) for i in strains_chunks]'], {}), '([(i / max_strain) for i in strains_chunks])\n', (3981, 4025), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.pipeline import Pipeline
from soccer_xg.ml.preprocessing import simple_proc_for_linear_algoritms
def logreg_gridsearch_classifier(
numeric_features,
categoric_features,
learning_rate=0.08,
use_dask=False,
n_iter=100,
scoring='roc_auc',
):
"""
Simple classification pipeline using hyperband to optimize logreg hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _logreg_gridsearch_model(
'classification',
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
)
def logreg_gridsearch_regressor(
numeric_features,
categoric_features,
learning_rate=0.08,
use_dask=False,
n_iter=100,
scoring='roc_auc',
):
"""
Simple regression pipeline using hyperband to optimize logreg hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _logreg_gridsearch_model(
'regression',
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
)
def _logreg_gridsearch_model(
task,
numeric_features,
categoric_features,
learning_rate,
use_dask,
n_iter,
scoring,
):
if learning_rate is None:
param_space = {
'clf__C': np.logspace(-5, 5, 100),
'clf__class_weight': ['balanced', None],
}
model = LogisticRegression(max_iter=10000, fit_intercept=False)
else:
param_space = {
'clf__penalty': ['l1', 'l2'],
'clf__alpha': np.logspace(-5, 5, 100),
'clf__class_weight': ['balanced', None],
}
learning_rate_schedule = (
'constant' if isinstance(learning_rate, float) else learning_rate
)
eta0 = learning_rate if isinstance(learning_rate, float) else 0
model = SGDClassifier(
learning_rate=learning_rate_schedule,
eta0=eta0,
loss='log',
max_iter=10000,
fit_intercept=False,
)
pipe = Pipeline(
[
(
'preprocessing',
simple_proc_for_linear_algoritms(
numeric_features, categoric_features
),
),
('clf', model),
]
)
if use_dask:
from dask_ml.model_selection import RandomizedSearchCV
return RandomizedSearchCV(
pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5
)
else:
from sklearn.model_selection import RandomizedSearchCV
return RandomizedSearchCV(
pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5
)
|
[
"sklearn.linear_model.SGDClassifier",
"soccer_xg.ml.preprocessing.simple_proc_for_linear_algoritms",
"numpy.logspace",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.linear_model.LogisticRegression"
] |
[((1818, 1873), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(10000)', 'fit_intercept': '(False)'}), '(max_iter=10000, fit_intercept=False)\n', (1836, 1873), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((2275, 2390), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'learning_rate': 'learning_rate_schedule', 'eta0': 'eta0', 'loss': '"""log"""', 'max_iter': '(10000)', 'fit_intercept': '(False)'}), "(learning_rate=learning_rate_schedule, eta0=eta0, loss='log',\n max_iter=10000, fit_intercept=False)\n", (2288, 2390), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((2819, 2894), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['pipe', 'param_space'], {'n_iter': 'n_iter', 'scoring': 'scoring', 'cv': '(5)'}), '(pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5)\n', (2837, 2894), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((3006, 3081), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['pipe', 'param_space'], {'n_iter': 'n_iter', 'scoring': 'scoring', 'cv': '(5)'}), '(pipe, param_space, n_iter=n_iter, scoring=scoring, cv=5)\n', (3024, 3081), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((1714, 1737), 'numpy.logspace', 'np.logspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (1725, 1737), True, 'import numpy as np\n'), ((1976, 1999), 'numpy.logspace', 'np.logspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (1987, 1999), True, 'import numpy as np\n'), ((2553, 2623), 'soccer_xg.ml.preprocessing.simple_proc_for_linear_algoritms', 'simple_proc_for_linear_algoritms', (['numeric_features', 'categoric_features'], {}), '(numeric_features, categoric_features)\n', (2585, 2623), False, 'from soccer_xg.ml.preprocessing import simple_proc_for_linear_algoritms\n')]
|
import math
import random
import warnings
import numpy as np
import scipy.ndimage
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
import torch.backends.cudnn as cudnn
from util.logconf import logging
log = logging.getLogger(__name__)
# log.setLevel(logging.WARN)
# log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
def cropToShape(image, new_shape, center_list=None, fill=0.0):
# log.debug([image.shape, new_shape, center_list])
# assert len(image.shape) == 3, repr(image.shape)
if center_list is None:
center_list = [int(image.shape[i] / 2) for i in range(3)]
crop_list = []
for i in range(0, 3):
crop_int = center_list[i]
if image.shape[i] > new_shape[i] and crop_int is not None:
# We can't just do crop_int +/- shape/2 since shape might be odd
# and ints round down.
start_int = crop_int - int(new_shape[i]/2)
end_int = start_int + new_shape[i]
crop_list.append(slice(max(0, start_int), end_int))
else:
crop_list.append(slice(0, image.shape[i]))
# log.debug([image.shape, crop_list])
image = image[crop_list]
crop_list = []
for i in range(0, 3):
if image.shape[i] < new_shape[i]:
crop_int = int((new_shape[i] - image.shape[i]) / 2)
crop_list.append(slice(crop_int, crop_int + image.shape[i]))
else:
crop_list.append(slice(0, image.shape[i]))
# log.debug([image.shape, crop_list])
new_image = np.zeros(new_shape, dtype=image.dtype)
new_image[:] = fill
new_image[crop_list] = image
return new_image
def zoomToShape(image, new_shape, square=True):
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
if square and image.shape[0] != image.shape[1]:
crop_int = min(image.shape[0], image.shape[1])
new_shape = [crop_int, crop_int, image.shape[2]]
image = cropToShape(image, new_shape)
zoom_shape = [new_shape[i] / image.shape[i] for i in range(3)]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
image = scipy.ndimage.interpolation.zoom(
image, zoom_shape,
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
return image
def randomOffset(image_list, offset_rows=0.125, offset_cols=0.125):
center_list = [int(image_list[0].shape[i] / 2) for i in range(3)]
center_list[0] += int(offset_rows * (random.random() - 0.5) * 2)
center_list[1] += int(offset_cols * (random.random() - 0.5) * 2)
center_list[2] = None
new_list = []
for image in image_list:
new_image = cropToShape(image, image.shape, center_list)
new_list.append(new_image)
return new_list
def randomZoom(image_list, scale=None, scale_min=0.8, scale_max=1.3):
if scale is None:
scale = scale_min + (scale_max - scale_min) * random.random()
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# log.info([image.shape])
zimage = scipy.ndimage.interpolation.zoom(
image, [scale, scale, 1.0],
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
image = cropToShape(zimage, image.shape)
new_list.append(image)
return new_list
_randomFlip_transform_list = [
# lambda a: np.rot90(a, axes=(0, 1)),
# lambda a: np.flip(a, 0),
lambda a: np.flip(a, 1),
]
def randomFlip(image_list, transform_bits=None):
if transform_bits is None:
transform_bits = random.randrange(0, 2 ** len(_randomFlip_transform_list))
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
for n in range(len(_randomFlip_transform_list)):
if transform_bits & 2**n:
# prhist(image, 'before')
image = _randomFlip_transform_list[n](image)
# prhist(image, 'after ')
new_list.append(image)
return new_list
def randomSpin(image_list, angle=None, range_tup=None, axes=(0, 1)):
if range_tup is None:
range_tup = (0, 360)
if angle is None:
angle = range_tup[0] + (range_tup[1] - range_tup[0]) * random.random()
new_list = []
for image in image_list:
# assert image.shape[-1] in {1, 3, 4}, repr(image.shape)
image = scipy.ndimage.interpolation.rotate(
image, angle, axes=axes, reshape=False,
output=None, order=0, mode='nearest', cval=0.0, prefilter=True)
new_list.append(image)
return new_list
def randomNoise(image_list, noise_min=-0.1, noise_max=0.1):
noise = np.zeros_like(image_list[0])
noise += (noise_max - noise_min) * np.random.random_sample(image_list[0].shape) + noise_min
noise *= 5
noise = scipy.ndimage.filters.gaussian_filter(noise, 3)
# noise += (noise_max - noise_min) * np.random.random_sample(image_hsv.shape) + noise_min
new_list = []
for image_hsv in image_list:
image_hsv = image_hsv + noise
new_list.append(image_hsv)
return new_list
def randomHsvShift(image_list, h=None, s=None, v=None,
h_min=-0.1, h_max=0.1,
s_min=0.5, s_max=2.0,
v_min=0.5, v_max=2.0):
if h is None:
h = h_min + (h_max - h_min) * random.random()
if s is None:
s = s_min + (s_max - s_min) * random.random()
if v is None:
v = v_min + (v_max - v_min) * random.random()
new_list = []
for image_hsv in image_list:
# assert image_hsv.shape[-1] == 3, repr(image_hsv.shape)
image_hsv[:,:,0::3] += h
image_hsv[:,:,1::3] = image_hsv[:,:,1::3] ** s
image_hsv[:,:,2::3] = image_hsv[:,:,2::3] ** v
new_list.append(image_hsv)
return clampHsv(new_list)
def clampHsv(image_list):
new_list = []
for image_hsv in image_list:
image_hsv = image_hsv.clone()
# Hue wraps around
image_hsv[:,:,0][image_hsv[:,:,0] > 1] -= 1
image_hsv[:,:,0][image_hsv[:,:,0] < 0] += 1
# Everything else clamps between 0 and 1
image_hsv[image_hsv > 1] = 1
image_hsv[image_hsv < 0] = 0
new_list.append(image_hsv)
return new_list
# def torch_augment(input):
# theta = random.random() * math.pi * 2
# s = math.sin(theta)
# c = math.cos(theta)
# c1 = 1 - c
# axis_vector = torch.rand(3, device='cpu', dtype=torch.float64)
# axis_vector -= 0.5
# axis_vector /= axis_vector.abs().sum()
# l, m, n = axis_vector
#
# matrix = torch.tensor([
# [l*l*c1 + c, m*l*c1 - n*s, n*l*c1 + m*s, 0],
# [l*m*c1 + n*s, m*m*c1 + c, n*m*c1 - l*s, 0],
# [l*n*c1 - m*s, m*n*c1 + l*s, n*n*c1 + c, 0],
# [0, 0, 0, 1],
# ], device=input.device, dtype=torch.float32)
#
# return th_affine3d(input, matrix)
# following from https://github.com/ncullen93/torchsample/blob/master/torchsample/utils.py
# MIT licensed
# def th_affine3d(input, matrix):
# """
# 3D Affine image transform on torch.Tensor
# """
# A = matrix[:3,:3]
# b = matrix[:3,3]
#
# # make a meshgrid of normal coordinates
# coords = th_iterproduct(input.size(-3), input.size(-2), input.size(-1), dtype=torch.float32)
#
# # shift the coordinates so center is the origin
# coords[:,0] = coords[:,0] - (input.size(-3) / 2. - 0.5)
# coords[:,1] = coords[:,1] - (input.size(-2) / 2. - 0.5)
# coords[:,2] = coords[:,2] - (input.size(-1) / 2. - 0.5)
#
# # apply the coordinate transformation
# new_coords = coords.mm(A.t().contiguous()) + b.expand_as(coords)
#
# # shift the coordinates back so origin is origin
# new_coords[:,0] = new_coords[:,0] + (input.size(-3) / 2. - 0.5)
# new_coords[:,1] = new_coords[:,1] + (input.size(-2) / 2. - 0.5)
# new_coords[:,2] = new_coords[:,2] + (input.size(-1) / 2. - 0.5)
#
# # map new coordinates using bilinear interpolation
# input_transformed = th_trilinear_interp3d(input, new_coords)
#
# return input_transformed
#
#
# def th_trilinear_interp3d(input, coords):
# """
# trilinear interpolation of 3D torch.Tensor image
# """
# # take clamp then floor/ceil of x coords
# x = torch.clamp(coords[:,0], 0, input.size(-3)-2)
# x0 = x.floor()
# x1 = x0 + 1
# # take clamp then floor/ceil of y coords
# y = torch.clamp(coords[:,1], 0, input.size(-2)-2)
# y0 = y.floor()
# y1 = y0 + 1
# # take clamp then floor/ceil of z coords
# z = torch.clamp(coords[:,2], 0, input.size(-1)-2)
# z0 = z.floor()
# z1 = z0 + 1
#
# stride = torch.tensor(input.stride()[-3:], dtype=torch.int64, device=input.device)
# x0_ix = x0.mul(stride[0]).long()
# x1_ix = x1.mul(stride[0]).long()
# y0_ix = y0.mul(stride[1]).long()
# y1_ix = y1.mul(stride[1]).long()
# z0_ix = z0.mul(stride[2]).long()
# z1_ix = z1.mul(stride[2]).long()
#
# # input_flat = th_flatten(input)
# input_flat = x.contiguous().view(x[0], x[1], -1)
#
# vals_000 = input_flat[:, :, x0_ix+y0_ix+z0_ix]
# vals_001 = input_flat[:, :, x0_ix+y0_ix+z1_ix]
# vals_010 = input_flat[:, :, x0_ix+y1_ix+z0_ix]
# vals_011 = input_flat[:, :, x0_ix+y1_ix+z1_ix]
# vals_100 = input_flat[:, :, x1_ix+y0_ix+z0_ix]
# vals_101 = input_flat[:, :, x1_ix+y0_ix+z1_ix]
# vals_110 = input_flat[:, :, x1_ix+y1_ix+z0_ix]
# vals_111 = input_flat[:, :, x1_ix+y1_ix+z1_ix]
#
# xd = x - x0
# yd = y - y0
# zd = z - z0
# xm1 = 1 - xd
# ym1 = 1 - yd
# zm1 = 1 - zd
#
# x_mapped = (
# vals_000.mul(xm1).mul(ym1).mul(zm1) +
# vals_001.mul(xm1).mul(ym1).mul(zd) +
# vals_010.mul(xm1).mul(yd).mul(zm1) +
# vals_011.mul(xm1).mul(yd).mul(zd) +
# vals_100.mul(xd).mul(ym1).mul(zm1) +
# vals_101.mul(xd).mul(ym1).mul(zd) +
# vals_110.mul(xd).mul(yd).mul(zm1) +
# vals_111.mul(xd).mul(yd).mul(zd)
# )
#
# return x_mapped.view_as(input)
#
# def th_iterproduct(*args, dtype=None):
# return torch.from_numpy(np.indices(args).reshape((len(args),-1)).T)
#
# def th_flatten(x):
# """Flatten tensor"""
# return x.contiguous().view(x[0], x[1], -1)
|
[
"numpy.zeros_like",
"numpy.flip",
"warnings.simplefilter",
"numpy.random.random_sample",
"numpy.zeros",
"util.logconf.logging.getLogger",
"random.random",
"warnings.catch_warnings"
] |
[((266, 293), 'util.logconf.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'from util.logconf import logging\n'), ((1568, 1606), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'image.dtype'}), '(new_shape, dtype=image.dtype)\n', (1576, 1606), True, 'import numpy as np\n'), ((4847, 4875), 'numpy.zeros_like', 'np.zeros_like', (['image_list[0]'], {}), '(image_list[0])\n', (4860, 4875), True, 'import numpy as np\n'), ((2086, 2111), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2109, 2111), False, 'import warnings\n'), ((2121, 2152), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2142, 2152), False, 'import warnings\n'), ((3602, 3615), 'numpy.flip', 'np.flip', (['a', '(1)'], {}), '(a, 1)\n', (3609, 3615), True, 'import numpy as np\n'), ((3092, 3117), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3115, 3117), False, 'import warnings\n'), ((3131, 3162), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3152, 3162), False, 'import warnings\n'), ((4915, 4959), 'numpy.random.random_sample', 'np.random.random_sample', (['image_list[0].shape'], {}), '(image_list[0].shape)\n', (4938, 4959), True, 'import numpy as np\n'), ((2949, 2964), 'random.random', 'random.random', ([], {}), '()\n', (2962, 2964), False, 'import random\n'), ((4402, 4417), 'random.random', 'random.random', ([], {}), '()\n', (4415, 4417), False, 'import random\n'), ((5526, 5541), 'random.random', 'random.random', ([], {}), '()\n', (5539, 5541), False, 'import random\n'), ((5598, 5613), 'random.random', 'random.random', ([], {}), '()\n', (5611, 5613), False, 'import random\n'), ((5670, 5685), 'random.random', 'random.random', ([], {}), '()\n', (5683, 5685), False, 'import random\n'), ((2509, 2524), 'random.random', 'random.random', ([], {}), '()\n', (2522, 2524), False, 'import random\n'), ((2578, 2593), 'random.random', 'random.random', ([], {}), '()\n', (2591, 2593), False, 'import random\n')]
|
"""
Licensed under the terms of the BSD-3-Clause license.
Copyright (C) 2019 <NAME>, <EMAIL>
"""
from dataclasses import dataclass
from typing import ClassVar, Generator, Tuple, Union
import numpy as _np
from numpy.lib.stride_tricks import as_strided
from . audio import AudioFile
from . container import Params
from . signal.tools import zero_padding as _zero_padding
from . types import Array, Schema
@dataclass
class LazySegmentParams:
"""Encapsulates segmentation parameters."""
n_perseg: int
n_overlap: int
norm: bool = False
mono: bool = True
expand: bool = True
dtype: str = 'float64'
SEGMENTATION_PARAMS = {
"type": "object",
"properties": {
"n_perseg": {"type": "integer"},
"n_overlap": {"type": "integer"},
"extend": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]},
"pad": {"anyOf": [{"type": "boolean"}, {"type": "integer"}]}
}
}
@dataclass
class SegmentationParams(Params):
"""Parameters for Segmentation."""
_schema: ClassVar[Schema] = SEGMENTATION_PARAMS
n_perseg: int = 512
n_overlap: int = 256
extend: Union[bool, int] = True
pad: Union[bool, int] = True
@dataclass
class Segment:
"""Encapsulates audio segment data and meta data."""
idx: int
start: int
stop: int
center: int
n_frames: int
data: _np.ndarray
class Segments:
"""Segement"""
def __init__(self, params: SegmentationParams, segs: _np.ndarray) -> None:
self._segs = segs
self._params = params
if self._params.extend:
self._offset = 0
else:
self._offset = self._params.n_perseg // 2
@property
def data(self) -> Array:
"""Return the raw segment data array."""
return self._segs
@property
def n_segs(self) -> int:
return self._segs.shape[1]
@property
def n_perseg(self) -> int:
return self._params.n_perseg
@property
def n_overlap(self) -> int:
return self._params.n_overlap
@property
def step(self) -> int:
return self._params.n_perseg - self._params.n_overlap
@property
def params(self) -> SegmentationParams:
"""Parameter set used to compute this instance."""
return self._params
def center(self, seg_idx) -> int:
"""Return the center of segment ``seg_idx`` as frame number
of the original signal.
Args:
seg_indx: Segment index.
Returns:
Center frame index.
"""
if not (0 <= seg_idx < self.n_segs):
raise IndexError('Requested index out of range.')
return seg_idx * self.step + self._offset
def bounds(self, seg_idx) -> Tuple[int, int]:
"""Return the frame numbers of the lower and upper bound
of segment ``seg_idx``. Lower bound index is inclusive,
upper bound index is exclusive.
Args:
seg_idx: Segment index.
Returns:
Lower and upper bound frame index.
"""
if not (0 <= seg_idx < self.n_segs):
raise IndexError('Requested index out of range.')
lob = self.center(seg_idx) - self._params.n_perseg // 2
upb = lob + self._params.n_perseg
return lob, upb
def get(self, seg_idx) -> Segment:
"""Retrun segment ``seg_idx`` wrapped in an ``Segment`` object.
Args:
seg_idx: Segment index.
Returns:
Segment ``seg_idx``.
"""
return Segment(seg_idx, *self.bounds(seg_idx), self.center(seg_idx),
self._params.n_perseg, self[seg_idx])
def __iter__(self) -> Generator[_np.ndarray, None, None]:
for seg in self._segs.T:
yield _np.expand_dims(seg, 1)
def __getitem__(self, key) -> _np.ndarray:
out = self._segs[:, key]
if out.ndim < 2:
return _np.expand_dims(out, 1)
return out
def __repr__(self) -> str:
return f'Segments(params={self._params!s}, segs={self._segs!s})'
def __str__(self) -> str:
return f'<n_segs: {self.n_segs}, len_seg: {self._params.n_perseg}>'
class Segmentation:
"""Segementation"""
def __init__(self, n_perseg: int, n_overlap: int, extend: bool = True,
pad: bool = True) -> None:
"""Subdivide input array.
Args:
n_perseg: Samples per segment.
n_overlap: Overlap in samples.
extend: Extend a half window at start and end.
pad: Pad extension.
"""
if n_perseg > 0:
self.n_perseg = n_perseg
else:
msg = (f'Argument to ``n_perseg`` must be greater than '
f'zero.\nFound ``n_perseg`` = {n_perseg}.')
raise ValueError(msg)
if 0 < n_overlap < n_perseg:
self.n_overlap = n_overlap
else:
msg = (f'Argument to ``n_overlap`` must be greater than '
f'zero and less then ``n_perseg``.\n Found '
f'``n_perseg`` = {self.n_perseg} and ``n_overlap`` '
f' = {n_overlap}.')
raise ValueError(msg)
self._extend = extend
self._pad = pad
self._ext_len = 0
self._pad_len = 0
def transform(self, data: _np.ndarray) -> Segments:
"""Apply segmentation.
Input array must be either one-, or two-dimensional.
If ``data`` is two-dimensional, it must be of shape
(n_elements, 1).
Args:
data: Input array.
Returns:
``Segments`` object.
"""
self._validate_data_shape(data)
self._validate_nps(data.shape[0])
n_frames = data.shape[0]
step = self.n_perseg - self.n_overlap
if self._extend:
self._ext_len = self.n_perseg // 2
if self._pad:
self._pad_len = (-(n_frames-self.n_perseg) % step) % self.n_perseg
data = _np.pad(data.squeeze(), (self._ext_len, self._ext_len+self._pad_len))
new_shape = data.shape[:-1] + ((data.shape[-1] - self.n_overlap) // step, self.n_perseg)
new_strides = data.strides[:-1] + (step * data.strides[-1], data.strides[-1])
segs = as_strided(data, new_shape, new_strides, writeable=False).T
params = SegmentationParams(self.n_perseg, self.n_overlap,
self._extend, self._pad)
return Segments(params, segs)
def _validate_nps(self, n_frames: int) -> None:
if self.n_perseg > n_frames:
msg = (f'Input data length ({n_frames}) incompatible with '
'parameter ``n_perseg`` = {self.n_perseg}. ``n_perseg`` '
'must be less then or equal to input data length.')
raise ValueError(msg)
def _validate_data_shape(self, data: _np.ndarray) -> None:
if not (0 < data.ndim < 3):
msg = (f'Input array must have one or two dimensions.\n'
f'Found ``data.shape`` = {data.shape}.')
elif data.ndim == 2 and data.shape[1] != 1:
msg = (f'Two-dimensional import arrays can only have one '
f'column.\nFound ``data.shape``= {data.shape}.')
else:
return None
raise ValueError(msg)
class LazySegments:
"""Read segments from audio file."""
def __init__(self, snd: AudioFile, n_perseg: int, n_overlap: int,
norm: bool = False, mono: bool = True,
expand: bool = True, dtype: str = 'float64') -> None:
"""Compute equal-sized segments.
Args:
snd:
n_perseg: Number of samples per segment.
n_overlap: Size of segment overlap in samples.
norm: Normalize each segment separately.
mono: If ``True`` mixdown all channels.
expand: Start segmentation at -n_perseg//2.
dtype: Dtype of output array.
"""
self._snd = snd
self.n_perseg = n_perseg
self.n_overlap = n_overlap
self.expand = expand
self.n_segs = int(_np.ceil(self._snd.n_frames / n_overlap))
if expand:
self.n_segs += 1
self.offset = -self.n_perseg // 2
else:
self.n_segs -= 1
self.offset = 0
self.step = self.n_perseg - self.n_overlap
self.norm = norm
self.mono = mono
self.dtype = dtype
def compute_bounds(self, seg_idx):
if seg_idx < 0:
raise IndexError('Expected positive integer for ``seg_idx``. '
f'Got {seg_idx}.')
if seg_idx >= self.n_segs:
raise IndexError(f'You requested segment {seg_idx}, but there '
f'are only {self.n_segs} segments.')
start = seg_idx * self.n_overlap + self.offset
return start, start + self.n_perseg
def read_segment(self, seg_idx: int, norm: bool = None,
mono: bool = None, dtype: str = None):
norm = norm or self.norm
mono = mono or self.mono
dtype = dtype or self.dtype
offset = seg_idx * self.n_overlap + self.offset
return self._snd.read(self.n_perseg, offset, norm, mono, dtype)
def loc(self, seg_idx: int, norm: bool = None,
mono: bool = None, dtype: str = None) -> Segment:
"""Locate segment by index.
Args:
seg_idx: Segment index.
norm: If ``True``, normalize each segment separately.
Falls back to ``self.norm``.
mono: If ``True`` mixdown all channels.
Falls back to ``self.mono``.
dtype: Output dtype. Falls back to ``self.dtype``.
Returns:
Segment number ``seg_idx``.
"""
start, stop = self.compute_bounds(seg_idx)
data = self.read_segment(seg_idx, norm, mono, dtype)
return Segment(seg_idx, start, stop, self.n_perseg,
self._snd.fps, data)
def __getitem__(self, key):
return self.loc(key)
def __iter__(self):
for i in range(self.n_segs):
yield self.__getitem__(i)
def iter_data(self):
for i in range(self.n_segs):
yield self._snd.read(self.n_perseg)
def iter_bounds(self):
for i in range(self.n_segs):
yield self.compute_bounds(i)
def _by_samples(x: Array, n_perseg: int) -> Array:
"""Split ``x`` into segments of lenght ``n_perseg`` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
Args:
x: One-dimensional input array.
n_perseg: Length of segments in samples.
Returns:
Two-dimensional array of segments.
"""
if not isinstance(n_perseg, int):
raise TypeError('Param ``n_perchunk`` must be of type int.')
if n_perseg < 1:
raise ValueError('``n_perchunk`` out of range. '
'Expected 1 <= n_perchunk.')
fit_size = int(_np.ceil(x.size / n_perseg) * n_perseg)
n_ext = fit_size - x.size
x = _zero_padding(x, n_ext)
return x.reshape(-1, n_perseg)
def _by_samples_with_hop(x: Array, n_perseg: int, hop_size: int) -> Array:
"""Split `x` into segments of lenght `n_perseg` samples. Move the
extraction window `hop_size` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
Args:
x: One-dimensional input array.
n_perseg: Length of segments in samples.
hop_size: Hop size in samples
Returns:
Two-dimensional array of segments.
"""
if not (isinstance(n_perseg, int) and isinstance(hop_size, int)):
raise TypeError('Params must be of type int.')
if not 1 < n_perseg <= x.size:
raise ValueError('n_perseg out of range. '
'Expected 1 < n_perseg <= len(x).')
if hop_size < 1:
raise ValueError('hop_size out of range. Expected 1 < hop_size.')
n_hops = (x.size - n_perseg) // hop_size + 1
n_segs = n_hops
if (x.size - n_perseg) % hop_size != 0 and n_perseg > hop_size:
n_segs += 1
fit_size = hop_size * n_hops + n_perseg
n_ext = fit_size - x.size
x = _zero_padding(x, n_ext)
out = _np.empty((n_segs, n_perseg), dtype=x.dtype)
for i in range(n_segs):
off = i * hop_size
out[i] = x[off:off+n_perseg]
return out
def by_samples(x: Array, n_perseg: int, hop_size: int = 0) -> Array:
"""Segment the input into n segments of length n_perseg and move the
window `hop_size` samples.
This function automatically applies zero padding for inputs that cannot be
split evenly.
If `hop_size` is less than one, it is reset to `n_perseg`.
Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
Args:
x One-dimensional input array.
n_perseg Length of segments in samples.
hop_size Hop size in samples. If < 1, hop_size = n_perseg.
Returns:
Two-dimensional array of segments.
"""
if hop_size < 1:
return _by_samples(x, n_perseg)
else:
return _by_samples_with_hop(x, n_perseg, hop_size)
def by_ms(x: Array, fps: int, ms_perseg: int, hop_size: int = 0) -> Array:
"""Segment the input into n segments of length ms_perseg and move the
window `hop_size` milliseconds.
This function automatically applies zero padding for inputs that cannot be
split evenly.
If `hop_size` is less than one, it is reset to `n_perseg`.
Overlap in percent is calculated as ov = hop_size / n_perseg * 100.
Args:
x One-dimensional input array.
fs Sampling frequency.
n_perseg Length of segments in milliseconds.
hop_size Hop size in milliseconds. If < 1, hop_size = n_perseg.
Returns:
Two-dimensional array of segments.
"""
n_perseg = fps * ms_perseg // 1000
hop_size = fps * hop_size // 1000
return by_samples(x, n_perseg, hop_size)
def by_onsets(x: Array, n_perseg: int, ons_idx: Array, off: int = 0
) -> Array:
"""Split input `x` into len(ons_idx) segments of length `n_perseg`.
Extraction windos start at `ons_idx[i]` + `off`.
Args:
x One-dimensional input array.
n_perseg Length of segments in samples.
ons_idx One-dimensional array of onset positions.
off Length of offset.
Returns:
Two-dimensional array of shape (len(ons_idx), n_perseg).
"""
n_ons = ons_idx.size
out = _np.empty((n_ons, n_perseg), dtype=x.dtype)
for i, idx in enumerate(ons_idx):
pos = idx + off
if pos < 0:
pos = 0
elif pos >= x.size:
pos = x.size - 1
if pos + n_perseg >= x.size:
buff = x[pos:]
out[i] = _zero_padding(buff, n_perseg-buff.size)
else:
out[i] = x[pos:pos+n_perseg]
return out
|
[
"numpy.ceil",
"numpy.empty",
"numpy.lib.stride_tricks.as_strided",
"numpy.expand_dims"
] |
[((12391, 12435), 'numpy.empty', '_np.empty', (['(n_segs, n_perseg)'], {'dtype': 'x.dtype'}), '((n_segs, n_perseg), dtype=x.dtype)\n', (12400, 12435), True, 'import numpy as _np\n'), ((14728, 14771), 'numpy.empty', '_np.empty', (['(n_ons, n_perseg)'], {'dtype': 'x.dtype'}), '((n_ons, n_perseg), dtype=x.dtype)\n', (14737, 14771), True, 'import numpy as _np\n'), ((3905, 3928), 'numpy.expand_dims', '_np.expand_dims', (['out', '(1)'], {}), '(out, 1)\n', (3920, 3928), True, 'import numpy as _np\n'), ((6256, 6313), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['data', 'new_shape', 'new_strides'], {'writeable': '(False)'}), '(data, new_shape, new_strides, writeable=False)\n', (6266, 6313), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((8147, 8187), 'numpy.ceil', '_np.ceil', (['(self._snd.n_frames / n_overlap)'], {}), '(self._snd.n_frames / n_overlap)\n', (8155, 8187), True, 'import numpy as _np\n'), ((11108, 11135), 'numpy.ceil', '_np.ceil', (['(x.size / n_perseg)'], {}), '(x.size / n_perseg)\n', (11116, 11135), True, 'import numpy as _np\n'), ((3756, 3779), 'numpy.expand_dims', '_np.expand_dims', (['seg', '(1)'], {}), '(seg, 1)\n', (3771, 3779), True, 'import numpy as _np\n')]
|
from abc import ABC, abstractmethod
from typing import Tuple, Union, Optional, Iterable
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
from pyPDP.surrogate_models import SurrogateModel
from pyPDP.utils.plotting import get_ax, check_and_set_axis
from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder
class AcquisitionFunction(ConfigSpaceHolder, ABC):
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
samples_for_optimization: int = 100,
minimize_objective: bool = True,
seed=None):
super().__init__(config_space, seed=seed)
self.surrogate_model = surrogate_model
self.n_samples_for_optimization = samples_for_optimization
self.minimize_objective = minimize_objective
@abstractmethod
def __call__(self, configuration: CS.Configuration) -> Union[float, np.ndarray]:
pass
def update(self, eta: float):
pass
def get_optimum(self) -> CS.Configuration:
return self._get_optimum_uniform_distribution()[0]
def _get_optimum_uniform_distribution(self) -> Tuple[CS.Configuration, float]:
configs = self.config_space.sample_configuration(self.n_samples_for_optimization)
values = self(configs)
config_value_pairs = [(config, value) for config, value in zip(configs, values)]
return max(config_value_pairs, key=lambda x: x[1])
def convert_configs(self, configuration: Union[CS.Configuration, np.ndarray]):
if isinstance(configuration, CS.Configuration):
x = np.asarray(configuration.get_array())
x = x.reshape([1, -1])
elif isinstance(configuration, list):
x = []
for config in configuration:
if isinstance(config, CS.Configuration):
x.append(config.get_array())
else:
x.append(config.copy())
x = np.asarray(x)
else:
x = configuration.copy()
return x
def plot(self,
color_acquisition="darkgreen",
color_optimum="red",
show_optimum=True,
x_hyperparameters: Optional[Iterable[CSH.Hyperparameter]] = None,
ax: Optional[plt.Axes] = None):
ax = get_ax(ax)
x_hyperparameters = get_hyperparameters(x_hyperparameters, self.config_space)
check_and_set_axis(ax, x_hyperparameters, ylabel="Acquisition")
# Sample configs and get values of acquisition function
configs = self.config_space.sample_configuration(self.n_samples_for_optimization * len(x_hyperparameters))
acquisition_y = np.asarray([self(x) for x in configs]).reshape(-1)
x = np.asarray([[config[hp.name] for hp in x_hyperparameters] for config in configs])
# Get optimum
optimum = self.get_optimum()
# Plot
n_hyperparameters = len(tuple(x_hyperparameters))
if n_hyperparameters == 1: # 1D
# Sort by x axis
order = np.argsort(x, axis=0)[:, 0]
x = x[order, 0]
acquisition_y = acquisition_y[order]
ax.fill_between(x, acquisition_y, color=color_acquisition, alpha=0.3)
ax.plot(x, acquisition_y, color=color_acquisition, label=self.__class__.__name__)
if show_optimum:
ax.plot(list(optimum.values())[0], self(optimum), "*", color=color_optimum, label=f"Optimum ({optimum})",
markersize=15)
elif n_hyperparameters == 2: # 2D
idx = get_selected_idx(x_hyperparameters, self.config_space)
raise NotImplementedError("2D currently not implemented (#TODO)")
else:
raise NotImplementedError(f"Plotting for {n_hyperparameters} dimensions not implemented. "
"Please select a specific hp by setting `x_hyperparemeters`")
class ExpectedImprovement(AcquisitionFunction):
def __init__(
self,
config_space,
surrogate_model: SurrogateModel,
eps: float = 0.0, # Exploration parameter
samples_for_optimization=100,
minimize_objective=True,
seed=None
):
super().__init__(
config_space,
surrogate_model,
samples_for_optimization,
minimize_objective, seed=seed
)
if not minimize_objective:
raise NotImplementedError('EI for maximization')
self.eta = 0
self.exploration = eps
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
Z = (self.eta - mean - self.exploration) / sigma
Phi_Z = norm.cdf(Z)
phi_Z = norm.pdf(Z)
ret = sigma * (Z * Phi_Z + phi_Z)
ret[sigma == 0] = 0
return ret
def update(self, eta: float):
self.eta = eta
class ProbabilityOfImprovement(AcquisitionFunction):
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
eps: float = 0.1, # Exploration parameter
samples_for_optimization: int = 100,
minimize_objective=True,
seed=None):
super().__init__(config_space, surrogate_model, samples_for_optimization=samples_for_optimization,
minimize_objective=minimize_objective, seed=seed)
self.eta = 0
self.exploration = eps
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
if self.minimize_objective:
temp = (self.eta - mean - self.exploration) / sigma
else:
temp = (mean - self.eta - self.exploration) / sigma
prob_of_improvement = norm.cdf(temp)
prob_of_improvement[sigma == 0] = 0
return prob_of_improvement
def update(self, eta: float):
self.eta = eta
class LowerConfidenceBound(AcquisitionFunction):
"""LCB"""
def __init__(self,
config_space: CS.ConfigurationSpace,
surrogate_model: SurrogateModel,
tau: float = 5,
samples_for_optimization=100,
minimize_objective=True,
seed=None):
super().__init__(config_space, surrogate_model, samples_for_optimization, minimize_objective=minimize_objective,
seed=seed)
self.tau = tau
def __call__(self, configuration: Union[CS.Configuration, np.ndarray]) -> Union[float, np.ndarray]:
x = self.convert_configs(configuration)
mean, sigma = self.surrogate_model.predict(x)
if self.minimize_objective:
return - mean + self.tau * sigma
else:
return mean + self.tau * sigma
|
[
"pyPDP.utils.utils.get_hyperparameters",
"pyPDP.utils.plotting.get_ax",
"numpy.asarray",
"scipy.stats.norm.pdf",
"scipy.stats.norm.cdf",
"numpy.argsort",
"pyPDP.utils.utils.get_selected_idx",
"pyPDP.utils.plotting.check_and_set_axis"
] |
[((2469, 2479), 'pyPDP.utils.plotting.get_ax', 'get_ax', (['ax'], {}), '(ax)\n', (2475, 2479), False, 'from pyPDP.utils.plotting import get_ax, check_and_set_axis\n'), ((2508, 2565), 'pyPDP.utils.utils.get_hyperparameters', 'get_hyperparameters', (['x_hyperparameters', 'self.config_space'], {}), '(x_hyperparameters, self.config_space)\n', (2527, 2565), False, 'from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder\n'), ((2574, 2637), 'pyPDP.utils.plotting.check_and_set_axis', 'check_and_set_axis', (['ax', 'x_hyperparameters'], {'ylabel': '"""Acquisition"""'}), "(ax, x_hyperparameters, ylabel='Acquisition')\n", (2592, 2637), False, 'from pyPDP.utils.plotting import get_ax, check_and_set_axis\n'), ((2905, 2990), 'numpy.asarray', 'np.asarray', (['[[config[hp.name] for hp in x_hyperparameters] for config in configs]'], {}), '([[config[hp.name] for hp in x_hyperparameters] for config in\n configs])\n', (2915, 2990), True, 'import numpy as np\n'), ((5016, 5027), 'scipy.stats.norm.cdf', 'norm.cdf', (['Z'], {}), '(Z)\n', (5024, 5027), False, 'from scipy.stats import norm\n'), ((5044, 5055), 'scipy.stats.norm.pdf', 'norm.pdf', (['Z'], {}), '(Z)\n', (5052, 5055), False, 'from scipy.stats import norm\n'), ((6221, 6235), 'scipy.stats.norm.cdf', 'norm.cdf', (['temp'], {}), '(temp)\n', (6229, 6235), False, 'from scipy.stats import norm\n'), ((2120, 2133), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2130, 2133), True, 'import numpy as np\n'), ((3211, 3232), 'numpy.argsort', 'np.argsort', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3221, 3232), True, 'import numpy as np\n'), ((3745, 3799), 'pyPDP.utils.utils.get_selected_idx', 'get_selected_idx', (['x_hyperparameters', 'self.config_space'], {}), '(x_hyperparameters, self.config_space)\n', (3761, 3799), False, 'from pyPDP.utils.utils import get_hyperparameters, get_selected_idx, ConfigSpaceHolder\n')]
|
from abc import ABC
from dataclasses import dataclass
from enum import Enum
from logging import getLogger
from typing import List, Tuple, Callable
from beamngpy import Scenario
_logger = getLogger("DriveBuild.SimNode.DBTypes.Criteria")
class KPValue(Enum):
"""
Represents the Kleene-Priest logic.
"""
TRUE = True,
FALSE = False,
UNKNOWN = None
# NOTE Do not underestimate the complexity of the implementation of these logical operators!
def __and__(self, other):
if self == self.FALSE or other == self.FALSE:
return self.FALSE
if self == self.UNKNOWN or other == self.UNKNOWN:
return self.UNKNOWN
return self.TRUE
def __or__(self, other):
if self == self.TRUE or other == self.TRUE:
return self.TRUE
if self == self.UNKNOWN or other == self.UNKNOWN:
return self.UNKNOWN
return self.FALSE
def __neg__(self):
if self == self.TRUE:
return self.FALSE
if self == self.FALSE:
return self.TRUE
return self.UNKNOWN
class Evaluable(ABC):
from abc import abstractmethod
@abstractmethod
def eval(self) -> KPValue:
"""
Evaluates to KPValue.TRUE only if the condition got triggered.
"""
pass
class UnknownEvaluable(Evaluable):
"""
A class that can be used for representing an "empty" evaluable e.g. representing an empty precondition criterion.
"""
def eval(self) -> KPValue:
return KPValue.UNKNOWN
class Criterion(Evaluable, ABC):
def __init__(self, scenario: Scenario) -> None:
self.scenario = scenario
# State conditions
# FIXME Recognize "any" participant
class StateCondition(Criterion, ABC):
"""
NOTE: A StateCondition does never call Vehicle::update_vehicle() which has to be called before every evaluation.
"""
from abc import abstractmethod
from requests import AiRequest
from beamngpy import Vehicle
from typing import Any
from drivebuildclient import static_vars
def __init__(self, scenario: Scenario, participant: str) -> None:
super().__init__(scenario)
# TODO Check existence of participant id
self.participant = participant
self.requests = self._create_requests()
for request in self.requests:
vehicle = self._get_vehicle()
request.add_sensor_to(vehicle)
# Make sure vehicle sensor_cache is not empty
if self._is_simulation_running():
scenario.bng.poll_sensors(vehicle)
def _get_vehicle(self) -> Vehicle:
return self.scenario.get_vehicle(self.participant)
def _poll_request_data(self) -> List[Any]:
request_data = []
for request in self.requests:
request_data.append(request.read_sensor_cache_of(self._get_vehicle(), self.scenario))
return request_data
@static_vars(prefix="criterion_", counter=0)
def _generate_rid(self) -> str:
while True: # Pseudo "do-while"-loop
rid = StateCondition._generate_rid.prefix + str(StateCondition._generate_rid.counter)
if rid in self._get_vehicle().sensors:
StateCondition._generate_rid.counter += 1
else:
break
return rid
def _is_simulation_running(self) -> bool:
return self.scenario.bng is not None
def eval(self) -> KPValue:
if self._is_simulation_running():
return self._eval_impl()
else:
return KPValue.UNKNOWN
@abstractmethod
def _eval_impl(self) -> KPValue:
pass
@abstractmethod
def _create_requests(self) -> List[AiRequest]:
pass
class SCPosition(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, x: float, y: float, tolerance: float):
super().__init__(scenario, participant)
if tolerance < 0:
raise ValueError("The tolerance must be non negative.")
self.x = x
self.y = y
self.tolerance = tolerance
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from numpy import array
from numpy.linalg import norm
position = self._poll_request_data()[0]
if position:
x, y = position
return KPValue.TRUE if norm(array((x, y)) - array((self.x, self.y))) <= self.tolerance else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCArea(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, points: List[Tuple[float, float]]):
from shapely.geometry import Polygon
super().__init__(scenario, participant)
self.polygon = Polygon(points)
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from shapely.geometry import Point
position = self._poll_request_data()[0]
if position:
x, y = position
return KPValue.TRUE if self.polygon.contains(Point(x, y)) else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCLane(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, lane: str):
super().__init__(scenario, participant)
# TODO Check existence of lane id
self.lane = lane
def _create_requests(self) -> List[AiRequest]:
from requests import BoundingBoxRequest
return [BoundingBoxRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from typing import Dict
from shapely.geometry import Polygon
bbox = self._poll_request_data()[0]
def _to_polygon(road_edges: List[Dict[str, float]]) -> Polygon:
points = [p["left"][0:2] for p in road_edges]
right_edge_points = [p["right"][0:2] for p in road_edges]
right_edge_points.reverse()
points.extend(right_edge_points)
return Polygon(shell=points)
if bbox:
if self.lane == "offroad":
is_offroad = KPValue.TRUE
for road in self.scenario.roads:
if road.rid:
edges = self.scenario.bng.get_road_edges(road.rid)
polygon = _to_polygon(edges)
if polygon.intersects(bbox):
is_offroad = KPValue.FALSE
break
else:
_logger.warning("SCLane can not consider roads without ID.")
return is_offroad
else:
for road in self.scenario.roads:
edges = self.scenario.bng.get_road_edges(road.rid)
polygon = _to_polygon(edges)
return KPValue.TRUE if polygon.intersects(bbox) else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCSpeed(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, speed_limit: float):
super().__init__(scenario, participant)
if speed_limit < 0:
raise ValueError("Speed limits must be non negative.")
self.speed_limit = speed_limit
def _create_requests(self) -> List[AiRequest]:
from requests import SpeedRequest
return [SpeedRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
speed = self._poll_request_data()[0]
if speed:
return KPValue.TRUE if speed > self.speed_limit else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCDamage(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str):
super().__init__(scenario, participant)
def _create_requests(self) -> List[AiRequest]:
from requests import DamageRequest
return [DamageRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
damage = self._poll_request_data()[0]
if damage:
return KPValue.TRUE if damage else KPValue.FALSE
else:
return KPValue.UNKNOWN
class SCDistance(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, other_participant: str, max_distance: float):
super().__init__(scenario, participant)
if max_distance < 0:
raise ValueError("The maximum allowed distance has to be non negative.")
# TODO Check whether other_participant id exists
self.other_participant = other_participant
self.max_distance = max_distance
def _create_requests(self) -> List[AiRequest]:
from requests import PositionRequest
return [PositionRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
from numpy import array
from numpy.linalg import norm
position1 = self._poll_request_data()[0]
# FIXME This circumvents the request mechanism...
other_vehicle = self.scenario.get_vehicle(self.other_participant)
position2 = other_vehicle["pos"] if other_vehicle else None
if position1 and position2:
x1, y1 = position1
x2, y2, _ = position2
return KPValue.FALSE if norm(array((x1, y1)) - array((x2, y2))) > self.max_distance else KPValue.TRUE
else:
return KPValue.UNKNOWN
class SCLight(StateCondition):
from dbtypes.scheme import CarLight
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, light: CarLight):
super().__init__(scenario, participant)
self.light = light
def _create_requests(self) -> List[AiRequest]:
from requests import LightRequest
return [LightRequest(self._generate_rid())]
def _eval_impl(self) -> KPValue:
# FIXME Implement light criterion
print(self._poll_request_data()[0])
return KPValue.UNKNOWN
class SCWaypoint(StateCondition):
from requests import AiRequest
def __init__(self, scenario: Scenario, participant: str, waypoint: str):
super().__init__(scenario, participant)
# TODO Check whether waypoint id exists
self.waypoint = waypoint
def _create_requests(self) -> List[AiRequest]:
return []
def _eval_impl(self) -> KPValue:
# FIXME Implement waypoint criterion
return KPValue.UNKNOWN
# Validation constraints
class ValidationConstraint(Criterion, ABC):
from abc import abstractmethod
def __init__(self, scenario: Scenario, inner: Evaluable) -> None:
super().__init__(scenario)
self.inner = inner
def eval(self) -> KPValue:
# FIXME How to distinguish VCs that got ignored from ones that could not be determined?
return self.inner.eval() if self.eval_cond() == KPValue.TRUE else KPValue.UNKNOWN
@abstractmethod
def eval_cond(self) -> KPValue:
pass
class ValidationConstraintSC(ValidationConstraint, ABC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: StateCondition):
super().__init__(scenario, inner)
self.sc = sc
def eval_cond(self) -> KPValue:
return self.sc.eval()
class VCPosition(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCPosition):
super().__init__(scenario, inner, sc)
class VCArea(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCArea):
super().__init__(scenario, inner, sc)
class VCLane(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCLane):
super().__init__(scenario, inner, sc)
class VCSpeed(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCSpeed):
super().__init__(scenario, inner, sc)
class VCDamage(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCDamage):
super().__init__(scenario, inner, sc)
class VCTime(ValidationConstraint):
def __init__(self, scenario: Scenario, inner: Evaluable, from_tick: int, to_tick: int):
# FIXME from_step/to_step inclusive/exclusive?
super().__init__(scenario, inner)
self.from_tick = from_tick
self.to_tick = to_tick
def eval_cond(self) -> KPValue:
from dbtypes.beamngpy import DBBeamNGpy
from warnings import warn
bng = self.scenario.bng
if bng and type(bng) is DBBeamNGpy:
# FIXME from_step/to_step inclusive/exclusive?
return KPValue.TRUE if self.from_tick <= bng.current_tick <= self.to_tick else KPValue.FALSE
else:
warn("The underlying BeamNGpy instance does not provide time information.")
class VCDistance(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCDistance):
super().__init__(scenario, inner, sc)
class VCTTC(ValidationConstraint):
from beamngpy import Scenario
def __init__(self, scenario: Scenario, inner: Evaluable):
super().__init__(scenario, inner)
def eval_cond(self) -> KPValue:
# TODO Determine collision to which participant/obstacle
# FIXME Position is in center of car vs crash when colliding with its bounding box
return KPValue.UNKNOWN
class VCLight(ValidationConstraintSC):
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCLight):
super().__init__(scenario, inner, sc)
class VCWaypoint(ValidationConstraintSC):
from beamngpy import Scenario
def __init__(self, scenario: Scenario, inner: Evaluable, sc: SCWaypoint):
super().__init__(scenario, inner, sc)
# Connectives
class Connective(Evaluable, ABC):
pass
class BinaryConnective(Connective, ABC):
def __init__(self, evaluables: List[Evaluable]) -> None:
self.evaluables = evaluables
class And(BinaryConnective):
def eval(self) -> KPValue:
return KPValue.TRUE if all(map(lambda e: e.eval() is KPValue.TRUE, self.evaluables)) else KPValue.FALSE
class Or(BinaryConnective):
def eval(self) -> KPValue:
return KPValue.TRUE if any(map(lambda e: e.eval() is KPValue.TRUE, self.evaluables)) else KPValue.FALSE
class Not(Connective):
def __init__(self, evaluable: Evaluable) -> None:
self.evaluable = evaluable
def eval(self) -> KPValue:
return self.evaluable.eval().__neg__()
CriteriaFunction = Callable[[Scenario], Evaluable]
# Test case type
@dataclass
class TestCase:
from generator import ScenarioBuilder
name: str
scenario: ScenarioBuilder
precondition_fct: CriteriaFunction
success_fct: CriteriaFunction
failure_fct: CriteriaFunction
stepsPerSecond: int
aiFrequency: int
authors: List[str]
|
[
"shapely.geometry.Point",
"shapely.geometry.Polygon",
"numpy.array",
"warnings.warn",
"drivebuildclient.static_vars",
"logging.getLogger"
] |
[((189, 237), 'logging.getLogger', 'getLogger', (['"""DriveBuild.SimNode.DBTypes.Criteria"""'], {}), "('DriveBuild.SimNode.DBTypes.Criteria')\n", (198, 237), False, 'from logging import getLogger\n'), ((2936, 2979), 'drivebuildclient.static_vars', 'static_vars', ([], {'prefix': '"""criterion_"""', 'counter': '(0)'}), "(prefix='criterion_', counter=0)\n", (2947, 2979), False, 'from drivebuildclient import static_vars\n'), ((4928, 4943), 'shapely.geometry.Polygon', 'Polygon', (['points'], {}), '(points)\n', (4935, 4943), False, 'from shapely.geometry import Polygon\n'), ((6290, 6311), 'shapely.geometry.Polygon', 'Polygon', ([], {'shell': 'points'}), '(shell=points)\n', (6297, 6311), False, 'from shapely.geometry import Polygon\n'), ((13059, 13134), 'warnings.warn', 'warn', (['"""The underlying BeamNGpy instance does not provide time information."""'], {}), "('The underlying BeamNGpy instance does not provide time information.')\n", (13063, 13134), False, 'from warnings import warn\n'), ((5331, 5342), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (5336, 5342), False, 'from shapely.geometry import Point\n'), ((4520, 4533), 'numpy.array', 'array', (['(x, y)'], {}), '((x, y))\n', (4525, 4533), False, 'from numpy import array\n'), ((4536, 4559), 'numpy.array', 'array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (4541, 4559), False, 'from numpy import array\n'), ((9621, 9636), 'numpy.array', 'array', (['(x1, y1)'], {}), '((x1, y1))\n', (9626, 9636), False, 'from numpy import array\n'), ((9639, 9654), 'numpy.array', 'array', (['(x2, y2)'], {}), '((x2, y2))\n', (9644, 9654), False, 'from numpy import array\n')]
|
from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
from chitra.visualization.metrics import (
cm_accuracy,
detect_multilabel,
plot_confusion_matrix,
)
def test_detect_multilabel():
with pytest.raises(UserWarning):
detect_multilabel({"label1": "this will raise UserWarning"})
assert detect_multilabel([1, 2, 3, 4])
assert not detect_multilabel([0, 1, 1, 0])
def test_cm_accuracy():
x = np.asarray([[1, 2], [1, 2]])
assert cm_accuracy(x) == 0.5
@patch("chitra.visualization.metrics.plt")
def test_plot_confusion_matrix(mock_plt: Mock):
mock_plt.show = MagicMock()
y_pred = [1, 1, 0, 1]
y_true = [0, 1, 0, 1]
assert plot_confusion_matrix(y_pred, y_true) is None
mock_plt.show.assert_called_once()
|
[
"chitra.visualization.metrics.cm_accuracy",
"unittest.mock.MagicMock",
"numpy.asarray",
"chitra.visualization.metrics.plot_confusion_matrix",
"unittest.mock.patch",
"pytest.raises",
"chitra.visualization.metrics.detect_multilabel"
] |
[((524, 565), 'unittest.mock.patch', 'patch', (['"""chitra.visualization.metrics.plt"""'], {}), "('chitra.visualization.metrics.plt')\n", (529, 565), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((346, 377), 'chitra.visualization.metrics.detect_multilabel', 'detect_multilabel', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (363, 377), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((459, 487), 'numpy.asarray', 'np.asarray', (['[[1, 2], [1, 2]]'], {}), '([[1, 2], [1, 2]])\n', (469, 487), True, 'import numpy as np\n'), ((634, 645), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (643, 645), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((237, 263), 'pytest.raises', 'pytest.raises', (['UserWarning'], {}), '(UserWarning)\n', (250, 263), False, 'import pytest\n'), ((273, 333), 'chitra.visualization.metrics.detect_multilabel', 'detect_multilabel', (["{'label1': 'this will raise UserWarning'}"], {}), "({'label1': 'this will raise UserWarning'})\n", (290, 333), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((393, 424), 'chitra.visualization.metrics.detect_multilabel', 'detect_multilabel', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (410, 424), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((499, 513), 'chitra.visualization.metrics.cm_accuracy', 'cm_accuracy', (['x'], {}), '(x)\n', (510, 513), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n'), ((711, 748), 'chitra.visualization.metrics.plot_confusion_matrix', 'plot_confusion_matrix', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (732, 748), False, 'from chitra.visualization.metrics import cm_accuracy, detect_multilabel, plot_confusion_matrix\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
POINTS = 1000
if __name__ == '__main__':
gauss1 = (np.random.randn(POINTS), np.random.randn(POINTS)*0.24);
gauss2 = (np.random.randn(POINTS)*0.28, np.random.randn(POINTS));
x1 = np.array(range(POINTS)) * 0.005
y1 = x1 * -2
x2 = x1
y2 = x2 * 1
offset_x1 = -4
offset_y1 = 2
cc = zip(gauss1)
dd = zip(gauss2)
cc[0] = cc[0] + x1
cc[1] = cc[1] + y1
cc[0] = cc[0] + offset_x1
cc[1] = cc[1] + offset_y1
dd[0] = dd[0] + x2
dd[1] = dd[1] + y2
plt.scatter(cc[0], cc[1], c=u'b')
plt.scatter(dd[0], dd[1], c=u'r')
plt.draw()
plt.show()
|
[
"matplotlib.pyplot.draw",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show",
"numpy.random.randn"
] |
[((604, 637), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cc[0]', 'cc[1]'], {'c': 'u"""b"""'}), "(cc[0], cc[1], c=u'b')\n", (615, 637), True, 'import matplotlib.pyplot as plt\n'), ((642, 675), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dd[0]', 'dd[1]'], {'c': 'u"""r"""'}), "(dd[0], dd[1], c=u'r')\n", (653, 675), True, 'import matplotlib.pyplot as plt\n'), ((680, 690), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (688, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((150, 173), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (165, 173), True, 'import numpy as np\n'), ((250, 273), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (265, 273), True, 'import numpy as np\n'), ((175, 198), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (190, 198), True, 'import numpy as np\n'), ((220, 243), 'numpy.random.randn', 'np.random.randn', (['POINTS'], {}), '(POINTS)\n', (235, 243), True, 'import numpy as np\n')]
|
# phaseplots.py - examples of phase portraits
# RMM, 24 July 2011
#
# This file contains examples of phase portraits pulled from "Feedback
# Systems" by <NAME> Murray (Princeton University Press, 2008).
import numpy as np
import matplotlib.pyplot as mpl
from control.phaseplot import phase_plot
from numpy import pi
# Clear out any figures that are present
mpl.close('all')
#
# Inverted pendulum
#
# Define the ODEs for a damped (inverted) pendulum
def invpend_ode(x, t, m=1., l=1., b=0.2, g=1):
return (x[1], -b/m*x[1] + (g*l/m) * np.sin(x[0]))
# Set up the figure the way we want it to look
mpl.figure(); mpl.clf();
mpl.axis([-2*pi, 2*pi, -2.1, 2.1]);
mpl.title('Inverted pendlum')
# Outer trajectories
phase_plot(invpend_ode,
X0 = [ [-2*pi, 1.6], [-2*pi, 0.5], [-1.8, 2.1],
[-1, 2.1], [4.2, 2.1], [5, 2.1],
[2*pi, -1.6], [2*pi, -0.5], [1.8, -2.1],
[1, -2.1], [-4.2, -2.1], [-5, -2.1] ],
T = np.linspace(0, 40, 200),
logtime = (3, 0.7) )
# Separatrices
mpl.hold(True);
phase_plot(invpend_ode, X0 = [[-2.3056, 2.1], [2.3056, -2.1]], T=6, lingrid=0)
mpl.show();
#
# Systems of ODEs: damped oscillator example (simulation + phase portrait)
#
def oscillator_ode(x, t, m=1., b=1, k=1):
return (x[1], -k/m*x[0] - b/m*x[1])
# Generate a vector plot for the damped oscillator
mpl.figure(); mpl.clf();
phase_plot(oscillator_ode, [-1, 1, 10], [-1, 1, 10], 0.15);
mpl.hold(True); mpl.plot([0], [0], '.');
# a=gca; set(a,'FontSize',20); set(a,'DataAspectRatio',[1,1,1]);
mpl.xlabel('x1'); mpl.ylabel('x2');
# Generate a phase plot for the damped oscillator
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1, 1, 1]);
phase_plot(oscillator_ode,
X0 = [
[-1, 1], [-0.3, 1], [0, 1], [0.25, 1], [0.5, 1], [0.75, 1], [1, 1],
[1, -1], [0.3, -1], [0, -1], [-0.25, -1], [-0.5, -1], [-0.75, -1], [-1, -1]
], T = np.linspace(0, 8, 80), timepts = [0.25, 0.8, 2, 3])
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'DataAspectRatio',[1,1,1]);
mpl.xlabel('x1'); mpl.ylabel('x2');
mpl.show()
#
# Stability definitions
#
# This set of plots illustrates the various types of equilibrium points.
#
# Saddle point vector field
def saddle_ode(x, t):
return (x[0] - 3*x[1], -3*x[0] + x[1]);
# Asy stable
m = 1; b = 1; k = 1; # default values
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(oscillator_ode,
X0 = [
[-1,1], [-0.3,1], [0,1], [0.25,1], [0.5,1], [0.7,1], [1,1], [1.3,1],
[1,-1], [0.3,-1], [0,-1], [-0.25,-1], [-0.5,-1], [-0.7,-1], [-1,-1],
[-1.3,-1]
], T = np.linspace(0, 10, 100),
timepts = [0.3, 1, 2, 3], parms = (m, b, k));
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
# Saddle
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(saddle_ode, scale = 2, timepts = [0.2, 0.5, 0.8], X0 =
[ [-1, -1], [1, 1],
[-1, -0.95], [-1, -0.9], [-1, -0.8], [-1, -0.6], [-1, -0.4], [-1, -0.2],
[-0.95, -1], [-0.9, -1], [-0.8, -1], [-0.6, -1], [-0.4, -1], [-0.2, -1],
[1, 0.95], [1, 0.9], [1, 0.8], [1, 0.6], [1, 0.4], [1, 0.2],
[0.95, 1], [0.9, 1], [0.8, 1], [0.6, 1], [0.4, 1], [0.2, 1],
[-0.5, -0.45], [-0.45, -0.5], [0.5, 0.45], [0.45, 0.5],
[-0.04, 0.04], [0.04, -0.04] ], T = np.linspace(0, 2, 20));
mpl.hold(True); mpl.plot([0], [0], 'k.'); # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
# Stable isL
m = 1; b = 0; k = 1; # zero damping
mpl.figure(); mpl.clf();
mpl.axis([-1, 1, -1, 1]); # set(gca, 'DataAspectRatio', [1 1 1]);
phase_plot(oscillator_ode, timepts =
[pi/6, pi/3, pi/2, 2*pi/3, 5*pi/6, pi, 7*pi/6, 4*pi/3, 9*pi/6, 5*pi/3, 11*pi/6, 2*pi],
X0 = [ [0.2,0], [0.4,0], [0.6,0], [0.8,0], [1,0], [1.2,0], [1.4,0] ],
T = np.linspace(0, 20, 200), parms = (m, b, k));
mpl.hold(True); mpl.plot([0], [0], 'k.') # 'MarkerSize', AM_data_markersize*3);
# set(gca,'FontSize', 16);
mpl.xlabel('{\itx}_1'); mpl.ylabel('{\itx}_2');
mpl.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.hold",
"matplotlib.pyplot.axis",
"control.phaseplot.phase_plot",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((360, 376), 'matplotlib.pyplot.close', 'mpl.close', (['"""all"""'], {}), "('all')\n", (369, 376), True, 'import matplotlib.pyplot as mpl\n'), ((603, 615), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (613, 615), True, 'import matplotlib.pyplot as mpl\n'), ((617, 626), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (624, 626), True, 'import matplotlib.pyplot as mpl\n'), ((629, 667), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-2 * pi, 2 * pi, -2.1, 2.1]'], {}), '([-2 * pi, 2 * pi, -2.1, 2.1])\n', (637, 667), True, 'import matplotlib.pyplot as mpl\n'), ((665, 694), 'matplotlib.pyplot.title', 'mpl.title', (['"""Inverted pendlum"""'], {}), "('Inverted pendlum')\n", (674, 694), True, 'import matplotlib.pyplot as mpl\n'), ((1014, 1028), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (1022, 1028), True, 'import matplotlib.pyplot as mpl\n'), ((1030, 1106), 'control.phaseplot.phase_plot', 'phase_plot', (['invpend_ode'], {'X0': '[[-2.3056, 2.1], [2.3056, -2.1]]', 'T': '(6)', 'lingrid': '(0)'}), '(invpend_ode, X0=[[-2.3056, 2.1], [2.3056, -2.1]], T=6, lingrid=0)\n', (1040, 1106), False, 'from control.phaseplot import phase_plot\n'), ((1109, 1119), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (1117, 1119), True, 'import matplotlib.pyplot as mpl\n'), ((1336, 1348), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (1346, 1348), True, 'import matplotlib.pyplot as mpl\n'), ((1350, 1359), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (1357, 1359), True, 'import matplotlib.pyplot as mpl\n'), ((1361, 1419), 'control.phaseplot.phase_plot', 'phase_plot', (['oscillator_ode', '[-1, 1, 10]', '[-1, 1, 10]', '(0.15)'], {}), '(oscillator_ode, [-1, 1, 10], [-1, 1, 10], 0.15)\n', (1371, 1419), False, 'from control.phaseplot import phase_plot\n'), ((1421, 1435), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (1429, 1435), True, 'import matplotlib.pyplot as mpl\n'), ((1437, 1460), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""."""'], {}), "([0], [0], '.')\n", (1445, 1460), True, 'import matplotlib.pyplot as mpl\n'), ((1527, 1543), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""x1"""'], {}), "('x1')\n", (1537, 1543), True, 'import matplotlib.pyplot as mpl\n'), ((1545, 1561), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""x2"""'], {}), "('x2')\n", (1555, 1561), True, 'import matplotlib.pyplot as mpl\n'), ((1614, 1626), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (1624, 1626), True, 'import matplotlib.pyplot as mpl\n'), ((1628, 1637), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (1635, 1637), True, 'import matplotlib.pyplot as mpl\n'), ((1640, 1664), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (1648, 1664), True, 'import matplotlib.pyplot as mpl\n'), ((1959, 1973), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (1967, 1973), True, 'import matplotlib.pyplot as mpl\n'), ((1975, 1999), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (1983, 1999), True, 'import matplotlib.pyplot as mpl\n'), ((2078, 2094), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""x1"""'], {}), "('x1')\n", (2088, 2094), True, 'import matplotlib.pyplot as mpl\n'), ((2096, 2112), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""x2"""'], {}), "('x2')\n", (2106, 2112), True, 'import matplotlib.pyplot as mpl\n'), ((2115, 2125), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (2123, 2125), True, 'import matplotlib.pyplot as mpl\n'), ((2379, 2391), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (2389, 2391), True, 'import matplotlib.pyplot as mpl\n'), ((2393, 2402), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as mpl\n'), ((2405, 2429), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (2413, 2429), True, 'import matplotlib.pyplot as mpl\n'), ((2751, 2765), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (2759, 2765), True, 'import matplotlib.pyplot as mpl\n'), ((2767, 2791), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (2775, 2791), True, 'import matplotlib.pyplot as mpl\n'), ((2860, 2883), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""{\\\\itx}_1"""'], {}), "('{\\\\itx}_1')\n", (2870, 2883), True, 'import matplotlib.pyplot as mpl\n'), ((2884, 2907), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""{\\\\itx}_2"""'], {}), "('{\\\\itx}_2')\n", (2894, 2907), True, 'import matplotlib.pyplot as mpl\n'), ((2918, 2930), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (2928, 2930), True, 'import matplotlib.pyplot as mpl\n'), ((2932, 2941), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (2939, 2941), True, 'import matplotlib.pyplot as mpl\n'), ((2943, 2967), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (2951, 2967), True, 'import matplotlib.pyplot as mpl\n'), ((3507, 3521), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (3515, 3521), True, 'import matplotlib.pyplot as mpl\n'), ((3523, 3547), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (3531, 3547), True, 'import matplotlib.pyplot as mpl\n'), ((3616, 3639), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""{\\\\itx}_1"""'], {}), "('{\\\\itx}_1')\n", (3626, 3639), True, 'import matplotlib.pyplot as mpl\n'), ((3640, 3663), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""{\\\\itx}_2"""'], {}), "('{\\\\itx}_2')\n", (3650, 3663), True, 'import matplotlib.pyplot as mpl\n'), ((3716, 3728), 'matplotlib.pyplot.figure', 'mpl.figure', ([], {}), '()\n', (3726, 3728), True, 'import matplotlib.pyplot as mpl\n'), ((3730, 3739), 'matplotlib.pyplot.clf', 'mpl.clf', ([], {}), '()\n', (3737, 3739), True, 'import matplotlib.pyplot as mpl\n'), ((3741, 3765), 'matplotlib.pyplot.axis', 'mpl.axis', (['[-1, 1, -1, 1]'], {}), '([-1, 1, -1, 1])\n', (3749, 3765), True, 'import matplotlib.pyplot as mpl\n'), ((4058, 4072), 'matplotlib.pyplot.hold', 'mpl.hold', (['(True)'], {}), '(True)\n', (4066, 4072), True, 'import matplotlib.pyplot as mpl\n'), ((4074, 4098), 'matplotlib.pyplot.plot', 'mpl.plot', (['[0]', '[0]', '"""k."""'], {}), "([0], [0], 'k.')\n", (4082, 4098), True, 'import matplotlib.pyplot as mpl\n'), ((4166, 4189), 'matplotlib.pyplot.xlabel', 'mpl.xlabel', (['"""{\\\\itx}_1"""'], {}), "('{\\\\itx}_1')\n", (4176, 4189), True, 'import matplotlib.pyplot as mpl\n'), ((4190, 4213), 'matplotlib.pyplot.ylabel', 'mpl.ylabel', (['"""{\\\\itx}_2"""'], {}), "('{\\\\itx}_2')\n", (4200, 4213), True, 'import matplotlib.pyplot as mpl\n'), ((4215, 4225), 'matplotlib.pyplot.show', 'mpl.show', ([], {}), '()\n', (4223, 4225), True, 'import matplotlib.pyplot as mpl\n'), ((948, 971), 'numpy.linspace', 'np.linspace', (['(0)', '(40)', '(200)'], {}), '(0, 40, 200)\n', (959, 971), True, 'import numpy as np\n'), ((1907, 1928), 'numpy.linspace', 'np.linspace', (['(0)', '(8)', '(80)'], {}), '(0, 8, 80)\n', (1918, 1928), True, 'import numpy as np\n'), ((2677, 2700), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (2688, 2700), True, 'import numpy as np\n'), ((3483, 3504), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(20)'], {}), '(0, 2, 20)\n', (3494, 3504), True, 'import numpy as np\n'), ((4013, 4036), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(200)'], {}), '(0, 20, 200)\n', (4024, 4036), True, 'import numpy as np\n'), ((541, 553), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (547, 553), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# Copyright 2018 Intel
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import os
import argparse
import psutil
import time
parser = argparse.ArgumentParser(
description="Benchmark 3D and 2D Convolution Models",add_help=True)
parser.add_argument("--dim_lengthx",
type = int,
default=16,
help="Tensor cube length of side x")
parser.add_argument("--dim_lengthy",
type = int,
default=16,
help="Tensor cube length of side y")
parser.add_argument("--dim_lengthz",
type = int,
default=16,
help="Tensor cube length of side z")
parser.add_argument("--num_channels",
type = int,
default=1,
help="Number of channels")
parser.add_argument("--num_outputs",
type = int,
default=1,
help="Number of outputs")
parser.add_argument("--bz",
type = int,
default=1,
help="Batch size")
parser.add_argument("--lr",
type = float,
default=0.001,
help="Learning rate")
parser.add_argument("--num_datapoints",
type = int,
default=1024,
help="Number of datapoints")
parser.add_argument("--epochs",
type = int,
default=3,
help="Number of epochs")
parser.add_argument("--intraop_threads",
type = int,
default=psutil.cpu_count(logical=False),
help="Number of intraop threads")
parser.add_argument("--interop_threads",
type = int,
default=2,
help="Number of interop threads")
parser.add_argument("--blocktime",
type = int,
default=0,
help="Block time for CPU threads")
parser.add_argument("--print_model",
action="store_true",
default=False,
help="Print the summary of the model layers")
parser.add_argument("--use_upsampling",
action="store_true",
default=False,
help="Use upsampling instead of transposed convolution")
parser.add_argument("--D2",
action="store_true",
default=False,
help="Use 2D model and images instead of 3D.")
parser.add_argument("--single_class_output",
action="store_true",
default=False,
help="Use binary classifier instead of U-Net")
parser.add_argument("--mkl_verbose",
action="store_true",
default=False,
help="Print MKL debug statements.")
parser.add_argument("--trace",
action="store_true",
default=False,
help="Create trace of TensorFlow timeline")
parser.add_argument("--inference",
action="store_true",
default=False,
help="Test inference speed. Default=Test training speed")
args = parser.parse_args()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
if args.mkl_verbose:
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
os.environ["MKLDNN_VERBOSE"] = "1" # Print out messages from MKL-DNN operations
os.environ["OMP_NUM_THREADS"] = str(args.intraop_threads)
os.environ["KMP_BLOCKTIME"] = str(args.blocktime)
os.environ["KMP_AFFINITY"] = "granularity=thread,compact,1,0"
import tensorflow as tf
from model import *
from tqdm import tqdm
import datetime
print("Started script on {}".format(datetime.datetime.now()))
print("args = {}".format(args))
print("OS: {}".format(os.system("uname -a")))
print("TensorFlow version: {}".format(tf.__version__))
import keras as K
print("Keras API version: {}".format(K.__version__))
if args.D2: # Define shape of the tensors (2D)
dims = (1,2)
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.num_channels]
out_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.num_outputs]
else: # Define shape of the tensors (3D)
dims=(1,2,3)
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.dim_lengthz,
args.num_channels]
tensor_shape = [args.bz,
args.dim_lengthx,
args.dim_lengthy,
args.dim_lengthz,
args.num_outputs]
# Optimize CPU threads for TensorFlow
config = tf.ConfigProto(
inter_op_parallelism_threads=args.interop_threads,
intra_op_parallelism_threads=args.intraop_threads)
sess = tf.Session(config=config)
K.backend.set_session(sess)
global_step = tf.Variable(0, name="global_step", trainable=False)
# Define the shape of the input images
# For segmentation models, the label (mask) is the same shape.
img = tf.placeholder(tf.float32, shape=tensor_shape) # Input tensor
if args.single_class_output:
truth = tf.placeholder(tf.float32, shape=(args.bz,args.num_outputs)) # Label tensor
else:
truth = tf.placeholder(tf.float32, shape=tensor_shape) # Label tensor
# Define the model
# Predict the output mask
if not args.inference:
# Set keras learning phase to train
K.backend.set_learning_phase(True)
# Don"t initialize variables on the fly
K.backend.manual_variable_initialization(False)
if args.single_class_output:
if args.D2: # 2D convnet model
predictions = conv2D(img,
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D convet model
predictions = conv3D(img,
print_summary=args.print_model, n_out=args.num_outputs)
else:
if args.D2: # 2D U-Net model
predictions = unet2D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D U-Net model
predictions = unet3D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model, n_out=args.num_outputs)
# Performance metrics for model
if args.single_class_output:
loss = tf.losses.sigmoid_cross_entropy(truth, predictions)
metric_score = tf.metrics.mean_squared_error(truth, predictions)
else:
loss = dice_coef_loss(truth, predictions, dims) # Loss is the dice between mask and prediction
metric_score = dice_coef(truth, predictions, dims)
train_op = tf.train.AdamOptimizer(args.lr).minimize(loss, global_step=global_step)
# Just feed completely random data in for the benchmark testing
imgs = np.random.rand(*tensor_shape)
if args.single_class_output:
truths = np.random.rand(args.bz, args.num_outputs)
else:
truths = np.random.rand(*tensor_shape)
# Initialize all variables
init_op = tf.global_variables_initializer()
init_l = tf.local_variables_initializer() # For TensorFlow metrics
sess.run(init_op)
sess.run(init_l)
saver = tf.train.Saver()
save_path = saver.save(sess, "./saved_model/model.ckpt")
print("Model saved in path: %s" % save_path)
# Freeze graph if inference
if args.inference:
K.backend.set_learning_phase(False)
# Set up trace for operations
run_metadata = tf.RunMetadata()
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# Same number of sample to process regardless of batch size
# So if we have a larger batch size we can take fewer steps.
total_steps = args.num_datapoints//args.bz
print("Using random data.")
if args.inference:
print("Testing inference speed.")
else:
print("Testing training speed.")
start_time = time.time()
for epoch in tqdm(range(args.epochs), desc="Epoch #"):
for i in tqdm(range(total_steps), desc="Step #"):
if args.inference:
feed_dict = {img: imgs}
else:
feed_dict = {img: imgs, truth:truths}
if args.inference:
if args.trace:
history = sess.run([predictions], feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
else:
history = sess.run([predictions], feed_dict=feed_dict)
else:
if args.trace:
history, loss_v, metric_v, this_step = \
sess.run([train_op, loss, metric_score, global_step],
feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
else:
history, loss_v, metric_v, this_step = \
sess.run([train_op, loss, metric_score, global_step],
feed_dict=feed_dict)
stop_time = time.time()
print("\n\nTotal time = {:,.3f} seconds".format(stop_time - start_time))
print("Total images = {:,}".format(args.epochs*args.num_datapoints))
print("Speed = {:,.3f} images per second".format( \
(args.epochs*args.num_datapoints)/(stop_time - start_time)))
if args.trace:
"""
Save the training timeline
"""
from tensorflow.python.client import timeline
timeline_filename = "./timeline_trace.json"
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open(timeline_filename, "w") as f:
print("Saved Tensorflow trace to: {}".format(timeline_filename))
print("To view the trace:\n(1) Open Chrome browser.\n"
"(2) Go to this url -- chrome://tracing\n"
"(3) Click the load button.\n"
"(4) Load the file {}.".format(timeline_filename))
f.write(chrome_trace)
print("Stopped script on {}".format(datetime.datetime.now()))
|
[
"argparse.ArgumentParser",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.Variable",
"tensorflow.RunOptions",
"psutil.cpu_count",
"tensorflow.placeholder",
"datetime.datetime.now",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"keras.backend.set_session",
"tensorflow.Session",
"os.system",
"tensorflow.losses.sigmoid_cross_entropy",
"keras.backend.manual_variable_initialization",
"time.time",
"tensorflow.python.client.timeline.Timeline",
"keras.backend.set_learning_phase",
"tensorflow.RunMetadata",
"tensorflow.metrics.mean_squared_error",
"numpy.random.rand",
"tensorflow.train.AdamOptimizer"
] |
[((827, 924), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Benchmark 3D and 2D Convolution Models"""', 'add_help': '(True)'}), "(description=\n 'Benchmark 3D and 2D Convolution Models', add_help=True)\n", (850, 924), False, 'import argparse\n'), ((4769, 4889), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'inter_op_parallelism_threads': 'args.interop_threads', 'intra_op_parallelism_threads': 'args.intraop_threads'}), '(inter_op_parallelism_threads=args.interop_threads,\n intra_op_parallelism_threads=args.intraop_threads)\n', (4783, 4889), True, 'import tensorflow as tf\n'), ((4899, 4924), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4909, 4924), True, 'import tensorflow as tf\n'), ((4925, 4952), 'keras.backend.set_session', 'K.backend.set_session', (['sess'], {}), '(sess)\n', (4946, 4952), True, 'import keras as K\n'), ((4969, 5020), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4980, 5020), True, 'import tensorflow as tf\n'), ((5130, 5176), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'tensor_shape'}), '(tf.float32, shape=tensor_shape)\n', (5144, 5176), True, 'import tensorflow as tf\n'), ((6731, 6760), 'numpy.random.rand', 'np.random.rand', (['*tensor_shape'], {}), '(*tensor_shape)\n', (6745, 6760), True, 'import numpy as np\n'), ((6927, 6960), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6958, 6960), True, 'import tensorflow as tf\n'), ((6970, 7002), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (7000, 7002), True, 'import tensorflow as tf\n'), ((7073, 7089), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7087, 7089), True, 'import tensorflow as tf\n'), ((7323, 7339), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (7337, 7339), True, 'import tensorflow as tf\n'), ((7354, 7405), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), '(trace_level=tf.RunOptions.FULL_TRACE)\n', (7367, 7405), True, 'import tensorflow as tf\n'), ((7709, 7720), 'time.time', 'time.time', ([], {}), '()\n', (7718, 7720), False, 'import time\n'), ((8513, 8524), 'time.time', 'time.time', ([], {}), '()\n', (8522, 8524), False, 'import time\n'), ((5231, 5292), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(args.bz, args.num_outputs)'}), '(tf.float32, shape=(args.bz, args.num_outputs))\n', (5245, 5292), True, 'import tensorflow as tf\n'), ((5322, 5368), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'tensor_shape'}), '(tf.float32, shape=tensor_shape)\n', (5336, 5368), True, 'import tensorflow as tf\n'), ((5492, 5526), 'keras.backend.set_learning_phase', 'K.backend.set_learning_phase', (['(True)'], {}), '(True)\n', (5520, 5526), True, 'import keras as K\n'), ((5569, 5616), 'keras.backend.manual_variable_initialization', 'K.backend.manual_variable_initialization', (['(False)'], {}), '(False)\n', (5609, 5616), True, 'import keras as K\n'), ((6302, 6353), 'tensorflow.losses.sigmoid_cross_entropy', 'tf.losses.sigmoid_cross_entropy', (['truth', 'predictions'], {}), '(truth, predictions)\n', (6333, 6353), True, 'import tensorflow as tf\n'), ((6370, 6419), 'tensorflow.metrics.mean_squared_error', 'tf.metrics.mean_squared_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (6399, 6419), True, 'import tensorflow as tf\n'), ((6801, 6842), 'numpy.random.rand', 'np.random.rand', (['args.bz', 'args.num_outputs'], {}), '(args.bz, args.num_outputs)\n', (6815, 6842), True, 'import numpy as np\n'), ((6859, 6888), 'numpy.random.rand', 'np.random.rand', (['*tensor_shape'], {}), '(*tensor_shape)\n', (6873, 6888), True, 'import numpy as np\n'), ((7241, 7276), 'keras.backend.set_learning_phase', 'K.backend.set_learning_phase', (['(False)'], {}), '(False)\n', (7269, 7276), True, 'import keras as K\n'), ((8951, 8993), 'tensorflow.python.client.timeline.Timeline', 'timeline.Timeline', (['run_metadata.step_stats'], {}), '(run_metadata.step_stats)\n', (8968, 8993), False, 'from tensorflow.python.client import timeline\n'), ((2124, 2155), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (2140, 2155), False, 'import psutil\n'), ((3938, 3961), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3959, 3961), False, 'import datetime\n'), ((4019, 4040), 'os.system', 'os.system', (['"""uname -a"""'], {}), "('uname -a')\n", (4028, 4040), False, 'import os\n'), ((6587, 6618), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['args.lr'], {}), '(args.lr)\n', (6609, 6618), True, 'import tensorflow as tf\n'), ((9415, 9438), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9436, 9438), False, 'import datetime\n')]
|
import numpy as np
a = np.array([1,2,3,4,5,6]) #_ rewrite it!
b = np.array([0,1,2,3,4,5]) #_ rewrite it!
|
[
"numpy.array"
] |
[((24, 52), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (32, 52), True, 'import numpy as np\n'), ((68, 96), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (76, 96), True, 'import numpy as np\n')]
|
#!/Users/marc/miniconda3/bin/python3
import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import math
import ctypes
def framebuffer_size_callback(window, width, height):
# make sure the viewport matches the new window dimensions; note that width and
# height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height)
# process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly
# ---------------------------------------------------------------------------------------------------------
def processInput(window):
if glfw.get_key(window, glfw.KEY_ESCAPE) == glfw.PRESS:
glfw.set_window_should_close(window, True)
width = 800
height = 600
# Initialize the library
if not glfw.init():
print("Failed to init glfw")
else:
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
window = glfw.create_window(width, height, "LearnOpenGL", None, None)
if not window:
print("Failed to create GLFW window")
glfw.terminate()
glfw.make_context_current(window)
glfw.set_framebuffer_size_callback(window, framebuffer_size_callback)
## Load, compile, link shaders
import myshader
shaders = myshader.shader( "shaders/hellocolor.vert", "shaders/hellocolor.frag")
shaders.linkShaders()
# set up vertex data (and buffer(s)) and configure vertex attributes
# ------------------------------------------------------------------
import numpy as np
vertices = np.array([
0.5, 0.5, 0.0, 1.0, 0.0, 0.0, # top right
0.5, -0.5, 0.0, 0.0, 1.0, 0.0, # bottom right
-0.5, -0.5, 0.0, 0.0, 0.0, 1.0, # bottom left
-0.5, 0.5, 0.0, 1.0, 1.0, 1.0 # top left
], dtype=np.float32)
indices = np.array([ # note that we start from 0!
1, 3, 0, # first Triangle
1, 2, 3 # second Triangle
], dtype=np.uint32)
# bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
VAO = glGenVertexArrays(1)
glBindVertexArray(VAO)
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW)
EBO = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)
# glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_buffer, GL_STATIC_DRAW)
# d = glGetBufferSubData( GL_ELEMENT_ARRAY_BUFFER, 0, 6 * 4)
# print(d)
# d = glGetBufferSubData( GL_ARRAY_BUFFER, 0, 12 * 4)
# print(d)
## position of the attrib array, must match the shader
location = 0
glVertexAttribPointer(location, 3, GL_FLOAT, GL_FALSE, 6*4, None) #3 * 4, 0)
glEnableVertexAttribArray(location)
## position of the attrib array, must match the shader
location = 1
glVertexAttribPointer(location, 3, GL_FLOAT, GL_FALSE, 6*4, ctypes.c_void_p(3*4)) #3 * 4, 0)
glEnableVertexAttribArray(location)
# note that this is allowed, the call to glVertexAttribPointer registered VBO as the
# vertex attribute's bound vertex buffer object so afterwards we can safely unbind
# glBindBuffer(GL_ARRAY_BUFFER, 0)
# remember: do NOT unbind the EBO while a VAO is active as the bound element buffer object
# IS stored in the VAO; keep the EBO bound.
# NO glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
# You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO,
# but this rarely happens. Modifying other VAOs requires a call to glBindVertexArray anyways
# so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0)
# uncomment this call to draw in wireframe polygons.
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# render loop
# -----------
glClearColor(0.9, 0.7, 0.7, 1.0)
shaders.use()
# no need to bind it every time, but we'll do so to keep things a bit more organized
glBindVertexArray(VAO) # seeing as we only have a single VAO there's
while not glfw.window_should_close(window):
# input
processInput(window)
timeValue = glfw.get_time()*1.0
greenValue = (math.sin(timeValue) / 2.0) + 0.5
# print( greenValue )
shaders.setUniform4f( "extraColor", 0.0, greenValue, 0.0, 1.0)
scaleUp = abs( greenValue )
shaders.setUniform1f( "scaleUp", scaleUp)
angle = timeValue
rotation = np.array([
math.cos(angle), - math.sin(angle),
math.sin(angle), math.cos(angle)
], dtype=np.float32)
shaders.setUniformMatrix2fv( "rotation", rotation)
# render
glClear(GL_COLOR_BUFFER_BIT)
# draw our first triangle
# glDrawArrays(GL_TRIANGLES, 0, 6)
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None)
# glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
# -------------------------------------------------------------------------------
glfw.swap_buffers(window)
glfw.poll_events()
glBindVertexArray(0) # no need to unbind it every time
# optional: de-allocate all resources once they've outlived their purpose:
# ------------------------------------------------------------------------
glDeleteVertexArrays(1, [VAO])
glDeleteBuffers(1, [VBO])
glDeleteBuffers(1, [EBO])
# glfw: terminate, clearing all previously allocated GLFW resources.
# ------------------------------------------------------------------
glfw.terminate()
|
[
"glfw.window_hint",
"myshader.shader",
"glfw.poll_events",
"glfw.make_context_current",
"glfw.set_window_should_close",
"glfw.window_should_close",
"glfw.get_time",
"math.sin",
"glfw.init",
"glfw.get_key",
"numpy.array",
"ctypes.c_void_p",
"glfw.set_framebuffer_size_callback",
"math.cos",
"glfw.terminate",
"glfw.swap_buffers",
"glfw.create_window"
] |
[((1086, 1146), 'glfw.create_window', 'glfw.create_window', (['width', 'height', '"""LearnOpenGL"""', 'None', 'None'], {}), "(width, height, 'LearnOpenGL', None, None)\n", (1104, 1146), False, 'import glfw\n'), ((1234, 1267), 'glfw.make_context_current', 'glfw.make_context_current', (['window'], {}), '(window)\n', (1259, 1267), False, 'import glfw\n'), ((1268, 1337), 'glfw.set_framebuffer_size_callback', 'glfw.set_framebuffer_size_callback', (['window', 'framebuffer_size_callback'], {}), '(window, framebuffer_size_callback)\n', (1302, 1337), False, 'import glfw\n'), ((1396, 1465), 'myshader.shader', 'myshader.shader', (['"""shaders/hellocolor.vert"""', '"""shaders/hellocolor.frag"""'], {}), "('shaders/hellocolor.vert', 'shaders/hellocolor.frag')\n", (1411, 1465), False, 'import myshader\n'), ((1660, 1816), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, -0.5, -0.5, \n 0.0, 0.0, 0.0, 1.0, -0.5, 0.5, 0.0, 1.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, -0.5,\n -0.5, 0.0, 0.0, 0.0, 1.0, -0.5, 0.5, 0.0, 1.0, 1.0, 1.0], dtype=np.float32)\n', (1668, 1816), True, 'import numpy as np\n'), ((1943, 1988), 'numpy.array', 'np.array', (['[1, 3, 0, 1, 2, 3]'], {'dtype': 'np.uint32'}), '([1, 3, 0, 1, 2, 3], dtype=np.uint32)\n', (1951, 1988), True, 'import numpy as np\n'), ((5465, 5481), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (5479, 5481), False, 'import glfw\n'), ((794, 805), 'glfw.init', 'glfw.init', ([], {}), '()\n', (803, 805), False, 'import glfw\n'), ((850, 897), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MAJOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MAJOR, 3)\n', (866, 897), False, 'import glfw\n'), ((902, 949), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MINOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MINOR, 3)\n', (918, 949), False, 'import glfw\n'), ((954, 1017), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_PROFILE', 'glfw.OPENGL_CORE_PROFILE'], {}), '(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n', (970, 1017), False, 'import glfw\n'), ((1022, 1075), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_FORWARD_COMPAT', 'GL_TRUE'], {}), '(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)\n', (1038, 1075), False, 'import glfw\n'), ((1216, 1232), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (1230, 1232), False, 'import glfw\n'), ((3004, 3026), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(3 * 4)'], {}), '(3 * 4)\n', (3019, 3026), False, 'import ctypes\n'), ((4087, 4119), 'glfw.window_should_close', 'glfw.window_should_close', (['window'], {}), '(window)\n', (4111, 4119), False, 'import glfw\n'), ((4987, 5012), 'glfw.swap_buffers', 'glfw.swap_buffers', (['window'], {}), '(window)\n', (5004, 5012), False, 'import glfw\n'), ((5017, 5035), 'glfw.poll_events', 'glfw.poll_events', ([], {}), '()\n', (5033, 5035), False, 'import glfw\n'), ((630, 667), 'glfw.get_key', 'glfw.get_key', (['window', 'glfw.KEY_ESCAPE'], {}), '(window, glfw.KEY_ESCAPE)\n', (642, 667), False, 'import glfw\n'), ((691, 733), 'glfw.set_window_should_close', 'glfw.set_window_should_close', (['window', '(True)'], {}), '(window, True)\n', (719, 733), False, 'import glfw\n'), ((4176, 4191), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (4189, 4191), False, 'import glfw\n'), ((4214, 4233), 'math.sin', 'math.sin', (['timeValue'], {}), '(timeValue)\n', (4222, 4233), False, 'import math\n'), ((4476, 4491), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4484, 4491), False, 'import math\n'), ((4520, 4535), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4528, 4535), False, 'import math\n'), ((4539, 4554), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4547, 4554), False, 'import math\n'), ((4495, 4510), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4503, 4510), False, 'import math\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 09:52:17 2018
@author: liaamaral
"""
#------
# Load the main libraries
import os
import csv
import numpy as np
import pandas as pd
import logging
#------
# Data input and output paths:
pathin="/media/DATA/tmp/datasets/subsetDB/rain/" # Path of the rain dataset
pathrain="/media/DATA/tmp/datasets/subsetDB/rain/" # Path of the rain dataset
#pathnorain="/Volumes/lia_595gb/randel/python/dados/subsetDB/norain/" # Path of the non rain dataset
#------
# Create the list of Dataframes, eliminating the files that start with ".":
frames = []
for file in os.listdir(pathin):
if file.startswith(".", 0, len(file)):
name = os.path.splitext(file)[0]
print("File name starts with point: ", name)
else:
logging.debug(file)
df = pd.read_csv(os.path.join(pathin, file), sep=',', decimal='.', encoding="utf8")
df.reset_index(drop=True, inplace=True)
frames.append(df)
logging.debug(frames)
#------
# Concatenation of the monthly Dataframes into the yearly Dataframe:
try:
DB_yrly_rain = pd.concat(frames, sort=False, ignore_index=True, verify_integrity=True)
except ValueError as e:
print("ValueError:", e)
# Repairing the additional column wrongly generated in concatenation:
if np.where(np.isfinite(DB_yrly_rain.iloc[:,34])):
DB_yrly_rain["correto"]=DB_yrly_rain.iloc[:,34]
else:
pos=np.where(isnan())
DB_yrly_rain["correto"]=DB_yrly_rain.iloc[:,33]
#DB_yrly_norain = pd.concat(frames)
#------
# Giving the output file names:
DB_name="BR_yrly_rain.csv"
#DB_yrly_norain="BR_yrly_norain_.csv"
#------
# Saving the new output DB's (rain and no rain):
#DB_yrly_rain.to_csv(os.path.join(pathrain, DB_name),index=False,sep=",",decimal='.')
#print("The file ", DB_yrly_rain ," was genetared!")
DB_yrly_rain.to_csv(os.path.join(pathrain, DB_name),index=False,sep=",",decimal='.')
print("The file ", DB_name ," was genetared!")
|
[
"os.listdir",
"logging.debug",
"numpy.isfinite",
"os.path.splitext",
"os.path.join",
"pandas.concat"
] |
[((630, 648), 'os.listdir', 'os.listdir', (['pathin'], {}), '(pathin)\n', (640, 648), False, 'import os\n'), ((1151, 1222), 'pandas.concat', 'pd.concat', (['frames'], {'sort': '(False)', 'ignore_index': '(True)', 'verify_integrity': '(True)'}), '(frames, sort=False, ignore_index=True, verify_integrity=True)\n', (1160, 1222), True, 'import pandas as pd\n'), ((1368, 1405), 'numpy.isfinite', 'np.isfinite', (['DB_yrly_rain.iloc[:, 34]'], {}), '(DB_yrly_rain.iloc[:, 34])\n', (1379, 1405), True, 'import numpy as np\n'), ((1918, 1949), 'os.path.join', 'os.path.join', (['pathrain', 'DB_name'], {}), '(pathrain, DB_name)\n', (1930, 1949), False, 'import os\n'), ((808, 827), 'logging.debug', 'logging.debug', (['file'], {}), '(file)\n', (821, 827), False, 'import logging\n'), ((1002, 1023), 'logging.debug', 'logging.debug', (['frames'], {}), '(frames)\n', (1015, 1023), False, 'import logging\n'), ((710, 732), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (726, 732), False, 'import os\n'), ((853, 879), 'os.path.join', 'os.path.join', (['pathin', 'file'], {}), '(pathin, file)\n', (865, 879), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact: <EMAIL>
@software: PyCharm
@file: cam_sal_to_seed.py
@time: 2020/3/27 1:10
@desc:
"""
import numpy as np
from contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict import resolve_loc_cue_conflict_by_area_order
__all__ = ["cam_sal_to_seed"]
def cam_sal_to_seed(cam, sal, cls_in_label, cam_thresh, sal_thresh, ignore_label) -> np.ndarray:
"""Get localization cues with method in SEC paper
Perform hard threshold for each foreground class
Args:
cam: (H, W, num_class - 1) cam
sal: (H, W) saliency map
cls_in_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
sal_thresh: hard threshold to extract background class cues
ignore_label: ignore label in class cues
Returns:
(H, W) seed
"""
loc_cue_proposal = np.zeros(shape=(cam.shape[0], cam.shape[1], cam.shape[2] + 1), dtype=np.int) # (H, W, num_class)
for cls_idx in range(1, len(cls_in_label)):
if cls_in_label[cls_idx] == 1:
heat_map = cam[:, :, cls_idx - 1]
loc_cue_proposal[:, :, cls_idx] = heat_map > cam_thresh * np.amax(heat_map)
if cls_in_label[0] == 1:
loc_cue_proposal[:, :, 0] = sal < sal_thresh
# handle conflict seed
seed = resolve_loc_cue_conflict_by_area_order(loc_cue_proposal, ignore_label, train_boat=True)
return seed
|
[
"numpy.amax",
"numpy.zeros",
"contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict.resolve_loc_cue_conflict_by_area_order"
] |
[((916, 992), 'numpy.zeros', 'np.zeros', ([], {'shape': '(cam.shape[0], cam.shape[1], cam.shape[2] + 1)', 'dtype': 'np.int'}), '(shape=(cam.shape[0], cam.shape[1], cam.shape[2] + 1), dtype=np.int)\n', (924, 992), True, 'import numpy as np\n'), ((1357, 1448), 'contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict.resolve_loc_cue_conflict_by_area_order', 'resolve_loc_cue_conflict_by_area_order', (['loc_cue_proposal', 'ignore_label'], {'train_boat': '(True)'}), '(loc_cue_proposal, ignore_label,\n train_boat=True)\n', (1395, 1448), False, 'from contrib.tasks.wsss.seeding.e2e_seeding.resolve_loc_cue_conflict import resolve_loc_cue_conflict_by_area_order\n'), ((1217, 1234), 'numpy.amax', 'np.amax', (['heat_map'], {}), '(heat_map)\n', (1224, 1234), True, 'import numpy as np\n')]
|
from __future__ import division
import librosa, pydub
import numpy as np
from tempfile import TemporaryFile
import pickle, json, os
class mash:
def __init__(self, json_, cached=False):
self.sr = 22050 # new Sampling Rate for the audio files
self.songs = json_
self.Yin = []
self.Yout = []
self.pathIn = []
self.pathOut = []
self.beats = {'in': [], 'out': []}
self.tempo = {'in': 0, 'out': 0}
self._setup()
self._load(cached=cached)
self._extract()
self._segment()
self._speedUp()
out = self._mix()
print("Exporting...")
out.export(out_f="final.mp3", format="mp3")
print("[SUCCESS] Export as `final.mp3`")
def _setup(self):
if not os.path.exists('cache'):
os.makedirs('cache')
def _load(self, cached=True):
for song in self.songs:
if os.path.exists("cache/%s.pkl"%song['name']):
print("\nLoading", song['name'], "from cache")
with open("cache/%s.pkl"%song['name'], 'rb') as f:
if song['mixin']:
print("Yin=", song['name'])
self.Yin = pickle.load(f)
self.pathIn = song['path']
else:
print("Yout=", song['name'])
self.Yout.append(pickle.load(f))
self.pathOut.append(song['path'])
continue
print("\nLoading", song['name'])
y, sr = librosa.load(song['path'], sr=self.sr)
if song['mixin']:
self.Yin = y
self.pathIn = song['path']
else:
self.Yout.append(y)
self.pathOut.append(song['path'])
print("[SUCCESS] Loaded", song['name'])
if cached:
try:
with open('cache/%s.pkl'%song['name'], 'wb') as f:
pickle.dump(y, f)
print("[SUCCESS] Cached", song['name'])
except Exception as e:
print("[FAILED] Caching", song['name'])
print(e)
def _extract(self):
# TODO: Add cosine distance similarity to choose the best mixout
self.Yout = self.Yout[0] # NOTE: considering 1mixin & 1mixout
self.pathOut = self.pathOut[0]
self.tempo['in'], self.beats['in'] = librosa.beat.beat_track(y=self.Yin, sr=self.sr)
self.tempo['out'], self.beats['out'] = librosa.beat.beat_track(y=self.Yout, sr=self.sr)
print("TempoIn=", self.tempo['in'])
print("TempoOut=", self.tempo['out'])
self._OTAC()
self._crossFadeRegion()
def _OTAC(self): # Optimal Tempo Adjustment Coefficient Computation
C = [-2, -1, 0, 1, 2]
if self.tempo['in'] == self.tempo['out']:
self.tempo['tgt'] = self.tempo['in']
return
Tin_ = [(2**c)*self.tempo['in'] for c in C]
TinIndex_ = np.argmin(np.absolute(Tin_ - self.tempo['out']))
Copt = C[TinIndex_]
Bopt = (2**Copt)*self.tempo['in']
Tlow = min(Bopt, self.tempo['out'])
Thigh = max(Bopt, self.tempo['out'])
a, b = 0.765, 1
Ttgt = (a-b)*Tlow + np.sqrt( ((a-b)**2)*(Tlow**2) + 4*a*b*Thigh*Tlow )
Ttgt = Ttgt/(2*a)
print("FoptIn=", Ttgt/Bopt)
print("FoptOut=", Ttgt/self.tempo['out'])
print("Ttgt=", Ttgt)
self.tempo['tgt'] = Ttgt
def _crossFadeRegion(self): # Computes the cross fade region for the mixed song
Na = self.beats['in'].shape[0]-1
scores = [self._score(i, Na) for i in range(2, int(Na/4))]
noBeats = np.argmax(scores)+2
inDuration = librosa.get_duration(y=self.Yin, sr=self.sr)
fadeInStart = librosa.frames_to_time(self.beats['in'], sr=self.sr)[-int(noBeats/2)]
fadeIn = inDuration - fadeInStart
fadeOut = librosa.frames_to_time(self.beats['out'], sr=self.sr)[int(noBeats/2)]
print("Best Power Corelation Scores=", np.max(scores))
print("Number of beats in cross fade region=", noBeats)
print("fadeInStart=", fadeInStart)
print("fadeOutEnd=", fadeOut)
print("Cross Fade Time=", fadeIn+fadeOut)
self.crossFade = [fadeInStart*1000, fadeOut*1000] # In milliseconds
def _score(self, T, Na):
cr = 0
for i in range(1, T+1):
cr += self.beats['in'][Na-i+1]*self.beats['out'][i]
return cr/T
def _segment(self):
print("Started Segmentation")
sIn = pydub.AudioSegment.from_file(self.pathIn, format="mp3")
sOut = pydub.AudioSegment.from_file(self.pathOut, format="mp3")
print("[SUCCESS] Segmented audio files")
self.segments = {
'in': [ sIn[:self.crossFade[0]], sIn[self.crossFade[0]:] ],
'out': [ sOut[:self.crossFade[1]], sOut[self.crossFade[1]:] ],
}
del sIn, sOut
def _speedUp(self):
s1 = self.segments['in'][1]
s2 = self.segments['out'][0]
speed1 = self.tempo['tgt']/self.tempo['in']
speed2 = self.tempo['tgt']/self.tempo['out']
print("Playback Speed of in end segment=",speed1,'X')
print("Playback Speed of out start segment=",speed2,'X')
s1 = s1.speedup(playback_speed=speed1)
s2 = s1.speedup(playback_speed=speed2)
def _mix(self):
xf = self.segments['in'][1].fade(to_gain=-120, start=0, end=float('inf'))
xf *= self.segments['out'][0].fade(from_gain=-120, start=0, end=float('inf'))
out = TemporaryFile()
out.write(self.segments['in'][0]._data)
out.write(xf._data)
out.write(self.segments['out'][1]._data)
out.seek(0)
print("[SUCCESS] Mixed 4 audio segment to 1")
return self.segments['in'][0]._spawn(data=out)
if __name__ == '__main__':
with open('songs.json', 'r') as f:
j = json.loads(f.read())
obj = mash(j, cached=True)
|
[
"numpy.absolute",
"pickle.dump",
"librosa.frames_to_time",
"os.makedirs",
"numpy.argmax",
"os.path.exists",
"tempfile.TemporaryFile",
"numpy.max",
"pickle.load",
"librosa.load",
"librosa.beat.beat_track",
"pydub.AudioSegment.from_file",
"numpy.sqrt",
"librosa.get_duration"
] |
[((2481, 2528), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'self.Yin', 'sr': 'self.sr'}), '(y=self.Yin, sr=self.sr)\n', (2504, 2528), False, 'import librosa, pydub\n'), ((2576, 2624), 'librosa.beat.beat_track', 'librosa.beat.beat_track', ([], {'y': 'self.Yout', 'sr': 'self.sr'}), '(y=self.Yout, sr=self.sr)\n', (2599, 2624), False, 'import librosa, pydub\n'), ((3808, 3852), 'librosa.get_duration', 'librosa.get_duration', ([], {'y': 'self.Yin', 'sr': 'self.sr'}), '(y=self.Yin, sr=self.sr)\n', (3828, 3852), False, 'import librosa, pydub\n'), ((4652, 4707), 'pydub.AudioSegment.from_file', 'pydub.AudioSegment.from_file', (['self.pathIn'], {'format': '"""mp3"""'}), "(self.pathIn, format='mp3')\n", (4680, 4707), False, 'import librosa, pydub\n'), ((4723, 4779), 'pydub.AudioSegment.from_file', 'pydub.AudioSegment.from_file', (['self.pathOut'], {'format': '"""mp3"""'}), "(self.pathOut, format='mp3')\n", (4751, 4779), False, 'import librosa, pydub\n'), ((5667, 5682), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (5680, 5682), False, 'from tempfile import TemporaryFile\n'), ((790, 813), 'os.path.exists', 'os.path.exists', (['"""cache"""'], {}), "('cache')\n", (804, 813), False, 'import pickle, json, os\n'), ((827, 847), 'os.makedirs', 'os.makedirs', (['"""cache"""'], {}), "('cache')\n", (838, 847), False, 'import pickle, json, os\n'), ((930, 975), 'os.path.exists', 'os.path.exists', (["('cache/%s.pkl' % song['name'])"], {}), "('cache/%s.pkl' % song['name'])\n", (944, 975), False, 'import pickle, json, os\n'), ((1581, 1619), 'librosa.load', 'librosa.load', (["song['path']"], {'sr': 'self.sr'}), "(song['path'], sr=self.sr)\n", (1593, 1619), False, 'import librosa, pydub\n'), ((3075, 3112), 'numpy.absolute', 'np.absolute', (["(Tin_ - self.tempo['out'])"], {}), "(Tin_ - self.tempo['out'])\n", (3086, 3112), True, 'import numpy as np\n'), ((3327, 3387), 'numpy.sqrt', 'np.sqrt', (['((a - b) ** 2 * Tlow ** 2 + 4 * a * b * Thigh * Tlow)'], {}), '((a - b) ** 2 * Tlow ** 2 + 4 * a * b * Thigh * Tlow)\n', (3334, 3387), True, 'import numpy as np\n'), ((3766, 3783), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3775, 3783), True, 'import numpy as np\n'), ((3875, 3927), 'librosa.frames_to_time', 'librosa.frames_to_time', (["self.beats['in']"], {'sr': 'self.sr'}), "(self.beats['in'], sr=self.sr)\n", (3897, 3927), False, 'import librosa, pydub\n'), ((4006, 4059), 'librosa.frames_to_time', 'librosa.frames_to_time', (["self.beats['out']"], {'sr': 'self.sr'}), "(self.beats['out'], sr=self.sr)\n", (4028, 4059), False, 'import librosa, pydub\n'), ((4124, 4138), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (4130, 4138), True, 'import numpy as np\n'), ((1230, 1244), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1241, 1244), False, 'import pickle, json, os\n'), ((2018, 2035), 'pickle.dump', 'pickle.dump', (['y', 'f'], {}), '(y, f)\n', (2029, 2035), False, 'import pickle, json, os\n'), ((1416, 1430), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1427, 1430), False, 'import pickle, json, os\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
|
[
"megengine.functional.nn.cross_entropy",
"megengine.tensor",
"numpy.log",
"numpy.random.randn",
"numpy.random.rand",
"pytest.raises",
"numpy.random.randint",
"numpy.array",
"numpy.exp",
"numpy.random.permutation"
] |
[((627, 658), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (645, 658), True, 'import megengine.functional as F\n'), ((764, 795), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (782, 795), True, 'import megengine.functional as F\n'), ((859, 875), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (867, 875), True, 'import numpy as np\n'), ((887, 918), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['data', 'label'], {}), '(data, label)\n', (905, 918), True, 'import megengine.functional as F\n'), ((1233, 1265), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(5,)'}), '(10, size=(5,))\n', (1250, 1265), True, 'import numpy as np\n'), ((1566, 1589), 'numpy.random.randn', 'np.random.randn', (['(16)', '(10)'], {}), '(16, 10)\n', (1581, 1589), True, 'import numpy as np\n'), ((1602, 1634), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '[16]'}), '(10, size=[16])\n', (1619, 1634), True, 'import numpy as np\n'), ((1648, 1679), 'megengine.tensor', 'tensor', (['logits'], {'dtype': '"""float32"""'}), "(logits, dtype='float32')\n", (1654, 1679), False, 'from megengine import tensor\n'), ((1692, 1720), 'megengine.tensor', 'tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (1698, 1720), False, 'from megengine import tensor\n'), ((1733, 1758), 'numpy.random.permutation', 'np.random.permutation', (['(16)'], {}), '(16)\n', (1754, 1758), True, 'import numpy as np\n'), ((1777, 1814), 'megengine.tensor', 'tensor', (['logits[perm]'], {'dtype': '"""float32"""'}), "(logits[perm], dtype='float32')\n", (1783, 1814), False, 'from megengine import tensor\n'), ((1832, 1866), 'megengine.tensor', 'tensor', (['label[perm]'], {'dtype': '"""int32"""'}), "(label[perm], dtype='int32')\n", (1838, 1866), False, 'from megengine import tensor\n'), ((1879, 1930), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""none"""'}), "(logits, label, reduction='none')\n", (1897, 1930), True, 'import megengine.functional as F\n'), ((1947, 2008), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits_perm', 'label_perm'], {'reduction': '"""none"""'}), "(logits_perm, label_perm, reduction='none')\n", (1965, 2008), True, 'import megengine.functional as F\n'), ((2095, 2145), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""sum"""'}), "(logits, label, reduction='sum')\n", (2113, 2145), True, 'import megengine.functional as F\n'), ((2243, 2294), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""mean"""'}), "(logits, label, reduction='mean')\n", (2261, 2294), True, 'import megengine.functional as F\n'), ((2383, 2452), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""mean"""', 'label_smooth': '(0.1)'}), "(logits, label, reduction='mean', label_smooth=0.1)\n", (2401, 2452), True, 'import megengine.functional as F\n'), ((2479, 2548), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""none"""', 'label_smooth': '(0.1)'}), "(logits, label, reduction='none', label_smooth=0.1)\n", (2497, 2548), True, 'import megengine.functional as F\n'), ((1029, 1038), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1035, 1038), True, 'import numpy as np\n'), ((1405, 1425), 'megengine.tensor', 'tensor', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (1411, 1425), False, 'from megengine import tensor\n'), ((1427, 1445), 'megengine.tensor', 'tensor', (['y', '"""int32"""'], {}), "(y, 'int32')\n", (1433, 1445), False, 'from megengine import tensor\n'), ((2682, 2707), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2695, 2707), False, 'import pytest\n'), ((2717, 2768), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""MEAN"""'}), "(logits, label, reduction='MEAN')\n", (2735, 2768), True, 'import megengine.functional as F\n'), ((2779, 2804), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2792, 2804), False, 'import pytest\n'), ((2814, 2864), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'reduction': '"""max"""'}), "(logits, label, reduction='max')\n", (2832, 2864), True, 'import megengine.functional as F\n'), ((524, 552), 'megengine.tensor', 'tensor', (['[[0, 50], [0, -150]]'], {}), '([[0, 50], [0, -150]])\n', (530, 552), False, 'from megengine import tensor\n'), ((584, 598), 'megengine.tensor', 'tensor', (['[1, 0]'], {}), '([1, 0])\n', (590, 598), False, 'from megengine import tensor\n'), ((721, 735), 'megengine.tensor', 'tensor', (['[0, 1]'], {}), '([0, 1])\n', (727, 735), False, 'from megengine import tensor\n'), ((1192, 1213), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (1206, 1213), True, 'import numpy as np\n'), ((1316, 1332), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1330, 1332), True, 'import numpy as np\n'), ((1138, 1156), 'numpy.log', 'np.log', (['x[i, y[i]]'], {}), '(x[i, y[i]])\n', (1144, 1156), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from cv_bridge import CvBridge, CvBridgeError
import cv2
import rospy
from sensor_msgs.msg import Image
from gazebo_msgs.srv import GetModelState
from tf.transformations import euler_from_quaternion
import numpy as np
import json
import os
data_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir)) + "/data/"
def main():
rospy.init_node("satview_streamer")
bridge = CvBridge()
sv_filename = data_dir + "hk_data_sv_mean.json"
sv_data = {}
with open(sv_filename, "rb") as fstream:
sv_data = json.load(fstream)
print(sv_data.keys())
cno_max = np.max([sv_data[key]["cno_max"] for key in sv_data.keys()])
gms = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
img_pub = rospy.Publisher("skycam/satview", Image, queue_size=1)
start_time = rospy.get_rostime()
cb_args = [bridge, sv_data, gms, img_pub, start_time, cno_max]
img_sub = rospy.Subscriber("skycam/image_raw", Image, img_sub_cb, cb_args)
def img_sub_cb(data, cb_args):
bridge = cb_args[0]
sv_data = cb_args[1]
gms = cb_args[2]
img_pub = cb_args[3]
start_time = cb_args[4]
cno_max = cb_args[5]
model_pose = gms("laser_0", "world")
model_euler = euler_from_quaternion(
[model_pose.pose.orientation.x,
model_pose.pose.orientation.y,
model_pose.pose.orientation.z,
model_pose.pose.orientation.w,]
)
# ENU (gzb) to NED
heading = np.pi/2 - model_euler[2]
cv_img = bridge.imgmsg_to_cv2(data, "bgr8")
cv_img = np.array(np.flip(cv_img, axis=0))
img_height = cv_img.shape[0]
img_width = cv_img.shape[1]
img_center = np.array([img_height/2.0, img_width/2.0]) # [250 250]
r_max = np.min(img_center)
green = (0, 255, 0)
red = (0, 0, 255)
blue = (255, 0, 0)
now = rospy.get_rostime()
elapsed = (now-start_time).to_sec()
for sv_id in sv_data.keys():
elev = sv_data[sv_id]["mean"][0]
azim = sv_data[sv_id]["mean"][1]
index = int(elapsed*10 % len(sv_data[sv_id]["cno"]))
cno = sv_data[sv_id]["cno"][index]
# print(sv_id+" cno: ", cno)
# print(sv_id+" color: ", int((cno)/cno_max*255), int((cno_max - cno)/cno_max*255))
r = (90.0 - elev)/90.0 * r_max
theta = np.deg2rad(azim) - np.pi/2 - heading
x = int(r*np.cos(theta) + img_center[0])
y = int(r*np.sin(theta) + img_center[1])
cv2.circle(cv_img, (x, y), 10, (0, int((cno)/cno_max*255), int((cno_max-cno)/cno_max*255)/2), -1)
cv2.circle(cv_img, (x, y), 11, (0, 0, 255), 2)
cv2.putText(cv_img, sv_id, (x-10, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.3, green, 1)
nesw = ["N", "E", "S", "W"]
for i in range(4):
theta = i*np.pi/2 - np.pi/2 - heading
r = 235
x = int(r*np.cos(theta) + img_center[0])
y = int(r*np.sin(theta) + img_center[1])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(cv_img, nesw[i], (x,y), font, 0.5, green, 2)
ros_img = bridge.cv2_to_imgmsg(cv_img, "bgr8")
img_pub.publish(ros_img)
# cv2.imshow("skycam", cv_img)
# k = cv2.waitKey(3) & 0xff
if __name__ == "__main__":
main()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down...")
cv2.destroyAllWindows()
|
[
"rospy.Subscriber",
"rospy.ServiceProxy",
"numpy.sin",
"os.path.dirname",
"rospy.init_node",
"cv2.destroyAllWindows",
"cv2.circle",
"numpy.min",
"numpy.cos",
"cv_bridge.CvBridge",
"json.load",
"numpy.flip",
"cv2.putText",
"numpy.deg2rad",
"rospy.get_rostime",
"rospy.Publisher",
"numpy.array",
"tf.transformations.euler_from_quaternion",
"rospy.spin"
] |
[((374, 409), 'rospy.init_node', 'rospy.init_node', (['"""satview_streamer"""'], {}), "('satview_streamer')\n", (389, 409), False, 'import rospy\n'), ((428, 438), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (436, 438), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((700, 760), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/get_model_state"""', 'GetModelState'], {}), "('/gazebo/get_model_state', GetModelState)\n", (718, 760), False, 'import rospy\n'), ((775, 829), 'rospy.Publisher', 'rospy.Publisher', (['"""skycam/satview"""', 'Image'], {'queue_size': '(1)'}), "('skycam/satview', Image, queue_size=1)\n", (790, 829), False, 'import rospy\n'), ((847, 866), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (864, 866), False, 'import rospy\n'), ((948, 1012), 'rospy.Subscriber', 'rospy.Subscriber', (['"""skycam/image_raw"""', 'Image', 'img_sub_cb', 'cb_args'], {}), "('skycam/image_raw', Image, img_sub_cb, cb_args)\n", (964, 1012), False, 'import rospy\n'), ((1258, 1415), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[model_pose.pose.orientation.x, model_pose.pose.orientation.y, model_pose.\n pose.orientation.z, model_pose.pose.orientation.w]'], {}), '([model_pose.pose.orientation.x, model_pose.pose.\n orientation.y, model_pose.pose.orientation.z, model_pose.pose.\n orientation.w])\n', (1279, 1415), False, 'from tf.transformations import euler_from_quaternion\n'), ((1694, 1739), 'numpy.array', 'np.array', (['[img_height / 2.0, img_width / 2.0]'], {}), '([img_height / 2.0, img_width / 2.0])\n', (1702, 1739), True, 'import numpy as np\n'), ((1765, 1783), 'numpy.min', 'np.min', (['img_center'], {}), '(img_center)\n', (1771, 1783), True, 'import numpy as np\n'), ((1868, 1887), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (1885, 1887), False, 'import rospy\n'), ((3342, 3365), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3363, 3365), False, 'import cv2\n'), ((571, 589), 'json.load', 'json.load', (['fstream'], {}), '(fstream)\n', (580, 589), False, 'import json\n'), ((1582, 1605), 'numpy.flip', 'np.flip', (['cv_img'], {'axis': '(0)'}), '(cv_img, axis=0)\n', (1589, 1605), True, 'import numpy as np\n'), ((2589, 2635), 'cv2.circle', 'cv2.circle', (['cv_img', '(x, y)', '(11)', '(0, 0, 255)', '(2)'], {}), '(cv_img, (x, y), 11, (0, 0, 255), 2)\n', (2599, 2635), False, 'import cv2\n'), ((2644, 2733), 'cv2.putText', 'cv2.putText', (['cv_img', 'sv_id', '(x - 10, y - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.3)', 'green', '(1)'], {}), '(cv_img, sv_id, (x - 10, y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.3,\n green, 1)\n', (2655, 2733), False, 'import cv2\n'), ((2994, 3051), 'cv2.putText', 'cv2.putText', (['cv_img', 'nesw[i]', '(x, y)', 'font', '(0.5)', 'green', '(2)'], {}), '(cv_img, nesw[i], (x, y), font, 0.5, green, 2)\n', (3005, 3051), False, 'import cv2\n'), ((3260, 3272), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3270, 3272), False, 'import rospy\n'), ((306, 331), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (321, 331), False, 'import os\n'), ((2338, 2354), 'numpy.deg2rad', 'np.deg2rad', (['azim'], {}), '(azim)\n', (2348, 2354), True, 'import numpy as np\n'), ((2394, 2407), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2400, 2407), True, 'import numpy as np\n'), ((2443, 2456), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2449, 2456), True, 'import numpy as np\n'), ((2866, 2879), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2872, 2879), True, 'import numpy as np\n'), ((2915, 2928), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2921, 2928), True, 'import numpy as np\n')]
|
import csv
import numpy as np
import os
import sklearn
from sklearn import datasets
from sklearn import neighbors, datasets, preprocessing
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, auc
from sklearn.neighbors import KNeighborsClassifier
import math
from sklearn.multioutput import MultiOutputClassifier
'''
3 6
1 4
2 5
'''
def logistic(X_train, y_train, X_test):
output = np.zeros((2705, 147))#2705 or 1967
# X_train, X_test, y_train, y_test = train_test_split(X, Y)
# print(y_train)
# clf.predict_proba(X)
clf = LogisticRegression(random_state=0, solver='saga', n_jobs=-1, max_iter=250)
for k in range(148):
clf.fit(X_train, y_train[:, k])
print(k+1, "/148")
y_predict = clf.predict_proba(X_test)
output[:, k] = y_predict[:, 1]
np.savetxt("logistic.csv", output, delimiter=",")
def RandForest(X_train, y_train, X_test):
# X_train, X_test, y_train, y_test = train_test_split(X, Y)
output = np.zeros((2705,147))
for k in range(147):
clf = RandomForestClassifier(n_estimators=100, max_depth=60,
random_state=0)
clf.fit(X_train, y_train[:,k])
print(k, "/75")
y_predict = clf.predict_proba(X_test)
# print(y_predict)
output[:, k] = y_predict[:, 1]
# print(y_test)
# print("\n\n",output)
# fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_test, output)
# roc_auc = auc(fpr, tpr)
# print(roc_auc)
np.savetxt("RandomForest.csv", output, delimiter=",")
print("done")
def KNN(X_train, x_test, y_train, y_test):
knn = KNeighborsClassifier(algorithm='auto', metric='minkowski', metric_params=None, n_jobs=-1,
n_neighbors=147, p=2, weights='distance')
print("poopf")
knn.fit(X_train, y_train)
classifier = MultiOutputClassifier(knn, n_jobs=-1)
classifier.fit(X_train, y_train)
y_predict = (classifier.predict_proba(x_test))
output = np.zeros((1967,147)) #2597
for x in range(1967):
for y in range(147):
output[x][y] = y_predict[y][x][1]
# print(output)
# np.savetxt("sub.csv", output, delimiter=",")
print(classifier.score(output,y_test))
def main():
Trainfiles = 7868
TrainList = np.zeros((7868, 76))
for x in range(Trainfiles):
filename = "/Users/harrymargalotti/MLfinal/MachineLearningFinal/Kaggle_Final/train_feature_files/" + str(
x) + ".npz"
data = np.load(filename)
TrainList[x] = data['summary']
X = TrainList
X = np.nan_to_num(X)
tesetfile = 2705
testList = np.zeros((2705, 76))
for x in range(tesetfile):
filename = "/Users/harrymargalotti/MLfinal/MachineLearningFinal/Kaggle_Final/test_feature_files/" + str(
x) + ".npz"
data = np.load(filename)
testList[x] = data['summary']
xtest = testList
xtest= np.nan_to_num(xtest)
file = '/Users/harrymargalotti/MLfinal/MachineLearningFinal/Kaggle_Final/cal10k_train_data.csv'
y = np.array(list(csv.reader(open(file, "r"), delimiter=","))).astype("float")
# X_train, X_test, y_train, y_test = train_test_split(X, y)
# print("Xtrain: ", X_train.shape)
# print("Xtest: ",X_test.shape)
# print("ytrain: ", y_train.shape)
# print("ytest: ", y_test.shape)
# KNN(X_train, X_test, y_train, y_test)
print("data load done")
# RandForest(X, y, xtest)
pca = PCA()
pca.fit(X)
logistic(X, y, xtest)
main()
'''
Xtrain: (5901, 76)
Xtest: (1967, 76)
ytrain: (5901, 148)
ytest: (1967, 148)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42)
create a data matrix X_train from the 74-dimensional summary audio feature vectors for each of the 7868 training tracks: done
Load the cal10k_train_date matrix Y_train for the 147 genres and 7868 training tracks
Using scikit-learn, train 147 logistic regression classifiers with one for each genre
Iterate through the list of test npz files and create a data matrix X_test from the 74-dimensional summary audio feature vectors for each of the 2705 test tracks
Predict the probability of each test track and each genre. This should be a 2705-by-147 dimensional matrix called Y_predict
Format Y_predict so that it match the file format that is given in the cal10k_test_random_submission.csv
Upload your submission csv to Kaggle and check out the leaderboard
------------------------------------------------------------------------------------------------------------------------------
The training set is a subset of the data set used to train a model.
x_train is the training data set.
y_train is the set of labels to all the data in x_train.
The test set is a subset of the data set that you use to test your model after the model has gone through initial vetting by the validation set.
x_test is the test data set.
y_test is the set of labels to all the data in x_test.
'''
|
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.load",
"numpy.nan_to_num",
"numpy.savetxt",
"numpy.zeros",
"sklearn.linear_model.LogisticRegression",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.decomposition.PCA",
"sklearn.multioutput.MultiOutputClassifier"
] |
[((573, 594), 'numpy.zeros', 'np.zeros', (['(2705, 147)'], {}), '((2705, 147))\n', (581, 594), True, 'import numpy as np\n'), ((730, 804), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""saga"""', 'n_jobs': '(-1)', 'max_iter': '(250)'}), "(random_state=0, solver='saga', n_jobs=-1, max_iter=250)\n", (748, 804), False, 'from sklearn.linear_model import LogisticRegression\n'), ((986, 1035), 'numpy.savetxt', 'np.savetxt', (['"""logistic.csv"""', 'output'], {'delimiter': '""","""'}), "('logistic.csv', output, delimiter=',')\n", (996, 1035), True, 'import numpy as np\n'), ((1157, 1178), 'numpy.zeros', 'np.zeros', (['(2705, 147)'], {}), '((2705, 147))\n', (1165, 1178), True, 'import numpy as np\n'), ((1679, 1732), 'numpy.savetxt', 'np.savetxt', (['"""RandomForest.csv"""', 'output'], {'delimiter': '""","""'}), "('RandomForest.csv', output, delimiter=',')\n", (1689, 1732), True, 'import numpy as np\n'), ((1805, 1941), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'algorithm': '"""auto"""', 'metric': '"""minkowski"""', 'metric_params': 'None', 'n_jobs': '(-1)', 'n_neighbors': '(147)', 'p': '(2)', 'weights': '"""distance"""'}), "(algorithm='auto', metric='minkowski', metric_params=\n None, n_jobs=-1, n_neighbors=147, p=2, weights='distance')\n", (1825, 1941), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2028, 2065), 'sklearn.multioutput.MultiOutputClassifier', 'MultiOutputClassifier', (['knn'], {'n_jobs': '(-1)'}), '(knn, n_jobs=-1)\n', (2049, 2065), False, 'from sklearn.multioutput import MultiOutputClassifier\n'), ((2167, 2188), 'numpy.zeros', 'np.zeros', (['(1967, 147)'], {}), '((1967, 147))\n', (2175, 2188), True, 'import numpy as np\n'), ((2461, 2481), 'numpy.zeros', 'np.zeros', (['(7868, 76)'], {}), '((7868, 76))\n', (2469, 2481), True, 'import numpy as np\n'), ((2750, 2766), 'numpy.nan_to_num', 'np.nan_to_num', (['X'], {}), '(X)\n', (2763, 2766), True, 'import numpy as np\n'), ((2804, 2824), 'numpy.zeros', 'np.zeros', (['(2705, 76)'], {}), '((2705, 76))\n', (2812, 2824), True, 'import numpy as np\n'), ((3096, 3116), 'numpy.nan_to_num', 'np.nan_to_num', (['xtest'], {}), '(xtest)\n', (3109, 3116), True, 'import numpy as np\n'), ((3629, 3634), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (3632, 3634), False, 'from sklearn.decomposition import PCA\n'), ((1218, 1288), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'max_depth': '(60)', 'random_state': '(0)'}), '(n_estimators=100, max_depth=60, random_state=0)\n', (1240, 1288), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2667, 2684), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2674, 2684), True, 'import numpy as np\n'), ((3008, 3025), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (3015, 3025), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[ ]:
import numpy
import sys
import glob
import matplotlib.pyplot
def analyze(filename):
data = numpy.loadtxt(fname=filename, delimiter=',')
fig = matplotlib.pyplot.figure(figsize=(10.0,3.0))
axes1 = fig.add_subplot(1,3,1)
axes2 = fig.add_subplot(1,3,2)
axes3 = fig.add_subplot(1,3,3)
axes1.set_ylabel("average")
axes1.plot(numpy.mean(data,axis=0))
axes2.set_ylabel("min")
axes2.plot(numpy.min(data,axis=0))
axes3.set_ylabel("max")
axes3.plot(numpy.max(data,axis=0))
fig.tight_layout()
matplotlib.pyplot.savefig(filename+'_fig.eps')
def detect_problems(f_name):
data = numpy.loadtxt(fname=f_name, delimiter=',')
if numpy.max(data, axis=0)[0] == 0 and numpy.max(data, axis=0)[20] == 20:
print("Suspicous looking maxima")
elif numpy.sum(numpy.min(data,axis=0)) == 0:
print("Suspicous looking minima")
else:
print("OK")
name = sys.argv[1]
filenames = sorted(glob.glob(name+'*.csv'))
for f in filenames:
print(f)
analyze(f)
detect_problems(f)
|
[
"numpy.min",
"numpy.mean",
"numpy.max",
"numpy.loadtxt",
"glob.glob"
] |
[((130, 174), 'numpy.loadtxt', 'numpy.loadtxt', ([], {'fname': 'filename', 'delimiter': '""","""'}), "(fname=filename, delimiter=',')\n", (143, 174), False, 'import numpy\n'), ((667, 709), 'numpy.loadtxt', 'numpy.loadtxt', ([], {'fname': 'f_name', 'delimiter': '""","""'}), "(fname=f_name, delimiter=',')\n", (680, 709), False, 'import numpy\n'), ((999, 1024), 'glob.glob', 'glob.glob', (["(name + '*.csv')"], {}), "(name + '*.csv')\n", (1008, 1024), False, 'import glob\n'), ((385, 409), 'numpy.mean', 'numpy.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (395, 409), False, 'import numpy\n'), ((454, 477), 'numpy.min', 'numpy.min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (463, 477), False, 'import numpy\n'), ((522, 545), 'numpy.max', 'numpy.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (531, 545), False, 'import numpy\n'), ((717, 740), 'numpy.max', 'numpy.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (726, 740), False, 'import numpy\n'), ((753, 776), 'numpy.max', 'numpy.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (762, 776), False, 'import numpy\n'), ((849, 872), 'numpy.min', 'numpy.min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (858, 872), False, 'import numpy\n')]
|
import numpy as np
import pytest
import numpy.testing as npt
from lenstronomy.Util import util
import lenstronomy.Util.param_util as param_util
def test_cart2polar():
#singel 2d coordinate transformation
center_x, center_y = 0, 0
x = 1
y = 1
r, phi = param_util.cart2polar(x, y, center_x, center_y)
assert r == np.sqrt(2) #radial part
assert phi == np.arctan(1)
#array of 2d coordinates
x = np.array([1, 2])
y = np.array([1, 1])
r, phi = param_util.cart2polar(x, y, center_x, center_y)
assert r[0] == np.sqrt(2) #radial part
assert phi[0] == np.arctan(1)
def test_polar2cart():
#singel 2d coordinate transformation
center = np.array([0,0])
r = 1
phi = np.pi
x, y = param_util.polar2cart(r, phi, center)
assert x == -1
assert abs(y) < 10e-14
def test_phi_q2_ellipticity():
phi, q = 0, 1
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == 0
assert e2 == 0
phi, q = 1, 1
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == 0
assert e2 == 0
phi, q = 2.,0.95
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == -0.016760092842656733
assert e2 == -0.019405192187382792
phi, q = 0, 0.9
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
assert e1 == 0.05263157894736841
assert e2 == 0
def test_ellipticity2phi_q():
e1, e2 = 0.3,0
phi,q = param_util.ellipticity2phi_q(e1, e2)
assert phi == 0
assert q == 0.53846153846153844
# Works on np arrays as well
e1 = np.array([0.3, 0.9])
e2 = np.array([0.0, 0.9 ])
phi, q = param_util.ellipticity2phi_q(e1, e2)
assert np.allclose(phi, [0.0, 0.39269908], atol=1.e-08)
assert np.allclose(q, [0.53846153, 5.00025001e-05], atol=1.e-08)
def test_ellipticity2phi_q_symmetry():
phi,q = 1.5, 0.8
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
phi_new,q_new = param_util.ellipticity2phi_q(e1, e2)
assert phi == phi_new
assert q == q_new
phi,q = -1.5, 0.8
e1,e2 = param_util.phi_q2_ellipticity(phi, q)
phi_new,q_new = param_util.ellipticity2phi_q(e1, e2)
assert phi == phi_new
assert q == q_new
e1, e2 = 0.1, -0.1
phi, q = param_util.ellipticity2phi_q(e1, e2)
e1_new, e2_new = param_util.phi_q2_ellipticity(phi, q)
npt.assert_almost_equal(e1, e1_new, decimal=10)
npt.assert_almost_equal(e2, e2_new, decimal=10)
e1, e2 = 2.99, -0.0
phi, q = param_util.ellipticity2phi_q(e1, e2)
print(phi, q)
e1_new, e2_new = param_util.phi_q2_ellipticity(phi, q)
phi_new, q_new = param_util.ellipticity2phi_q(e1_new, e2_new)
npt.assert_almost_equal(phi, phi_new, decimal=10)
npt.assert_almost_equal(q, q_new, decimal=10)
#npt.assert_almost_equal(e1, e1_new, decimal=10)
#npt.assert_almost_equal(e2, e2_new, decimal=10)
def test_transform_e1e2():
e1 = 0.01
e2 = 0.
x = 0.
y = 1.
x_, y_ = param_util.transform_e1e2(x, y, e1, e2, center_x=0, center_y=0)
x_new = (1-e1) * x - e2 * y
y_new = -e2 * x + (1 + e1) * y
det = np.sqrt((1 - e1) * (1 + e1) + e2 ** 2)
npt.assert_almost_equal(x_, x_new / det, decimal=5)
npt.assert_almost_equal(y_, y_new / det, decimal=5)
def test_phi_gamma_ellipticity():
phi = -1.
gamma = 0.1
e1, e2 = param_util.shear_polar2cartesian(phi, gamma)
print(e1, e2, 'e1, e2')
phi_out, gamma_out = param_util.shear_cartesian2polar(e1, e2)
assert phi == phi_out
assert gamma == gamma_out
def test_phi_gamma_ellipticity_2():
e1, e2 = -0.04, -0.01
phi, gamma = param_util.shear_cartesian2polar(e1, e2)
e1_out, e2_out = param_util.shear_polar2cartesian(phi, gamma)
npt.assert_almost_equal(e1, e1_out, decimal=10)
npt.assert_almost_equal(e2, e2_out, decimal=10)
def test_displace_eccentricity():
#x, y = np.array([1, 0]), np.array([0, 1])
x, y = util.make_grid(numPix=10, deltapix=1)
e1 = 0.1#.1
e2 = -0#.1
center_x, center_y = 0, 0
x_, y_ = param_util.transform_e1e2(x, y, e1, e2, center_x=center_x, center_y=center_y)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
print(cos_phi, sin_phi)
xt1 = cos_phi * x_shift + sin_phi * y_shift
xt2 = -sin_phi * x_shift + cos_phi * y_shift
xt1 *= np.sqrt(q)
xt2 /= np.sqrt(q)
npt.assert_almost_equal(x_, xt1, decimal=8)
npt.assert_almost_equal(y_, xt2, decimal=8)
x, y = util.make_grid(numPix=10, deltapix=1)
x, y = np.array([1, 0]), np.array([0, 1])
e1 = 0.1#.1#.1
e2 = 0
center_x, center_y = 0, 0
x_, y_ = param_util.transform_e1e2(x, y, e1, e2, center_x=center_x, center_y=center_y)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
print(cos_phi, sin_phi)
xt1 = cos_phi * x_shift + sin_phi * y_shift
xt2 = -sin_phi * x_shift + cos_phi * y_shift
xt1 *= np.sqrt(q)
xt2 /= np.sqrt(q)
npt.assert_almost_equal(x_, xt1, decimal=8)
npt.assert_almost_equal(y_, xt2, decimal=8)
if __name__ == '__main__':
pytest.main()
|
[
"lenstronomy.Util.param_util.cart2polar",
"lenstronomy.Util.param_util.shear_polar2cartesian",
"numpy.testing.assert_almost_equal",
"numpy.allclose",
"lenstronomy.Util.util.make_grid",
"lenstronomy.Util.param_util.shear_cartesian2polar",
"pytest.main",
"numpy.sin",
"numpy.array",
"numpy.cos",
"lenstronomy.Util.param_util.polar2cart",
"numpy.arctan",
"lenstronomy.Util.param_util.transform_e1e2",
"lenstronomy.Util.param_util.ellipticity2phi_q",
"lenstronomy.Util.param_util.phi_q2_ellipticity",
"numpy.sqrt"
] |
[((273, 320), 'lenstronomy.Util.param_util.cart2polar', 'param_util.cart2polar', (['x', 'y', 'center_x', 'center_y'], {}), '(x, y, center_x, center_y)\n', (294, 320), True, 'import lenstronomy.Util.param_util as param_util\n'), ((429, 445), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (437, 445), True, 'import numpy as np\n'), ((454, 470), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (462, 470), True, 'import numpy as np\n'), ((485, 532), 'lenstronomy.Util.param_util.cart2polar', 'param_util.cart2polar', (['x', 'y', 'center_x', 'center_y'], {}), '(x, y, center_x, center_y)\n', (506, 532), True, 'import lenstronomy.Util.param_util as param_util\n'), ((689, 705), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (697, 705), True, 'import numpy as np\n'), ((742, 779), 'lenstronomy.Util.param_util.polar2cart', 'param_util.polar2cart', (['r', 'phi', 'center'], {}), '(r, phi, center)\n', (763, 779), True, 'import lenstronomy.Util.param_util as param_util\n'), ((889, 926), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (918, 926), True, 'import lenstronomy.Util.param_util as param_util\n'), ((996, 1033), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (1025, 1033), True, 'import lenstronomy.Util.param_util as param_util\n'), ((1107, 1144), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (1136, 1144), True, 'import lenstronomy.Util.param_util as param_util\n'), ((1257, 1294), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (1286, 1294), True, 'import lenstronomy.Util.param_util as param_util\n'), ((1414, 1450), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (1442, 1450), True, 'import lenstronomy.Util.param_util as param_util\n'), ((1550, 1570), 'numpy.array', 'np.array', (['[0.3, 0.9]'], {}), '([0.3, 0.9])\n', (1558, 1570), True, 'import numpy as np\n'), ((1580, 1600), 'numpy.array', 'np.array', (['[0.0, 0.9]'], {}), '([0.0, 0.9])\n', (1588, 1600), True, 'import numpy as np\n'), ((1615, 1651), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (1643, 1651), True, 'import lenstronomy.Util.param_util as param_util\n'), ((1663, 1710), 'numpy.allclose', 'np.allclose', (['phi', '[0.0, 0.39269908]'], {'atol': '(1e-08)'}), '(phi, [0.0, 0.39269908], atol=1e-08)\n', (1674, 1710), True, 'import numpy as np\n'), ((1723, 1779), 'numpy.allclose', 'np.allclose', (['q', '[0.53846153, 5.00025001e-05]'], {'atol': '(1e-08)'}), '(q, [0.53846153, 5.00025001e-05], atol=1e-08)\n', (1734, 1779), True, 'import numpy as np\n'), ((1854, 1891), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (1883, 1891), True, 'import lenstronomy.Util.param_util as param_util\n'), ((1912, 1948), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (1940, 1948), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2032, 2069), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (2061, 2069), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2090, 2126), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (2118, 2126), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2212, 2248), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (2240, 2248), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2270, 2307), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (2299, 2307), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2312, 2359), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['e1', 'e1_new'], {'decimal': '(10)'}), '(e1, e1_new, decimal=10)\n', (2335, 2359), True, 'import numpy.testing as npt\n'), ((2364, 2411), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['e2', 'e2_new'], {'decimal': '(10)'}), '(e2, e2_new, decimal=10)\n', (2387, 2411), True, 'import numpy.testing as npt\n'), ((2450, 2486), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (2478, 2486), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2526, 2563), 'lenstronomy.Util.param_util.phi_q2_ellipticity', 'param_util.phi_q2_ellipticity', (['phi', 'q'], {}), '(phi, q)\n', (2555, 2563), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2585, 2629), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1_new', 'e2_new'], {}), '(e1_new, e2_new)\n', (2613, 2629), True, 'import lenstronomy.Util.param_util as param_util\n'), ((2634, 2683), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['phi', 'phi_new'], {'decimal': '(10)'}), '(phi, phi_new, decimal=10)\n', (2657, 2683), True, 'import numpy.testing as npt\n'), ((2688, 2733), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['q', 'q_new'], {'decimal': '(10)'}), '(q, q_new, decimal=10)\n', (2711, 2733), True, 'import numpy.testing as npt\n'), ((2930, 2993), 'lenstronomy.Util.param_util.transform_e1e2', 'param_util.transform_e1e2', (['x', 'y', 'e1', 'e2'], {'center_x': '(0)', 'center_y': '(0)'}), '(x, y, e1, e2, center_x=0, center_y=0)\n', (2955, 2993), True, 'import lenstronomy.Util.param_util as param_util\n'), ((3071, 3109), 'numpy.sqrt', 'np.sqrt', (['((1 - e1) * (1 + e1) + e2 ** 2)'], {}), '((1 - e1) * (1 + e1) + e2 ** 2)\n', (3078, 3109), True, 'import numpy as np\n'), ((3114, 3165), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['x_', '(x_new / det)'], {'decimal': '(5)'}), '(x_, x_new / det, decimal=5)\n', (3137, 3165), True, 'import numpy.testing as npt\n'), ((3170, 3221), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['y_', '(y_new / det)'], {'decimal': '(5)'}), '(y_, y_new / det, decimal=5)\n', (3193, 3221), True, 'import numpy.testing as npt\n'), ((3301, 3345), 'lenstronomy.Util.param_util.shear_polar2cartesian', 'param_util.shear_polar2cartesian', (['phi', 'gamma'], {}), '(phi, gamma)\n', (3333, 3345), True, 'import lenstronomy.Util.param_util as param_util\n'), ((3399, 3439), 'lenstronomy.Util.param_util.shear_cartesian2polar', 'param_util.shear_cartesian2polar', (['e1', 'e2'], {}), '(e1, e2)\n', (3431, 3439), True, 'import lenstronomy.Util.param_util as param_util\n'), ((3577, 3617), 'lenstronomy.Util.param_util.shear_cartesian2polar', 'param_util.shear_cartesian2polar', (['e1', 'e2'], {}), '(e1, e2)\n', (3609, 3617), True, 'import lenstronomy.Util.param_util as param_util\n'), ((3640, 3684), 'lenstronomy.Util.param_util.shear_polar2cartesian', 'param_util.shear_polar2cartesian', (['phi', 'gamma'], {}), '(phi, gamma)\n', (3672, 3684), True, 'import lenstronomy.Util.param_util as param_util\n'), ((3689, 3736), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['e1', 'e1_out'], {'decimal': '(10)'}), '(e1, e1_out, decimal=10)\n', (3712, 3736), True, 'import numpy.testing as npt\n'), ((3741, 3788), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['e2', 'e2_out'], {'decimal': '(10)'}), '(e2, e2_out, decimal=10)\n', (3764, 3788), True, 'import numpy.testing as npt\n'), ((3883, 3920), 'lenstronomy.Util.util.make_grid', 'util.make_grid', ([], {'numPix': '(10)', 'deltapix': '(1)'}), '(numPix=10, deltapix=1)\n', (3897, 3920), False, 'from lenstronomy.Util import util\n'), ((3995, 4072), 'lenstronomy.Util.param_util.transform_e1e2', 'param_util.transform_e1e2', (['x', 'y', 'e1', 'e2'], {'center_x': 'center_x', 'center_y': 'center_y'}), '(x, y, e1, e2, center_x=center_x, center_y=center_y)\n', (4020, 4072), True, 'import lenstronomy.Util.param_util as param_util\n'), ((4089, 4125), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (4117, 4125), True, 'import lenstronomy.Util.param_util as param_util\n'), ((4195, 4208), 'numpy.cos', 'np.cos', (['phi_G'], {}), '(phi_G)\n', (4201, 4208), True, 'import numpy as np\n'), ((4223, 4236), 'numpy.sin', 'np.sin', (['phi_G'], {}), '(phi_G)\n', (4229, 4236), True, 'import numpy as np\n'), ((4374, 4384), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (4381, 4384), True, 'import numpy as np\n'), ((4396, 4406), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (4403, 4406), True, 'import numpy as np\n'), ((4411, 4454), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['x_', 'xt1'], {'decimal': '(8)'}), '(x_, xt1, decimal=8)\n', (4434, 4454), True, 'import numpy.testing as npt\n'), ((4459, 4502), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['y_', 'xt2'], {'decimal': '(8)'}), '(y_, xt2, decimal=8)\n', (4482, 4502), True, 'import numpy.testing as npt\n'), ((4516, 4553), 'lenstronomy.Util.util.make_grid', 'util.make_grid', ([], {'numPix': '(10)', 'deltapix': '(1)'}), '(numPix=10, deltapix=1)\n', (4530, 4553), False, 'from lenstronomy.Util import util\n'), ((4673, 4750), 'lenstronomy.Util.param_util.transform_e1e2', 'param_util.transform_e1e2', (['x', 'y', 'e1', 'e2'], {'center_x': 'center_x', 'center_y': 'center_y'}), '(x, y, e1, e2, center_x=center_x, center_y=center_y)\n', (4698, 4750), True, 'import lenstronomy.Util.param_util as param_util\n'), ((4767, 4803), 'lenstronomy.Util.param_util.ellipticity2phi_q', 'param_util.ellipticity2phi_q', (['e1', 'e2'], {}), '(e1, e2)\n', (4795, 4803), True, 'import lenstronomy.Util.param_util as param_util\n'), ((4873, 4886), 'numpy.cos', 'np.cos', (['phi_G'], {}), '(phi_G)\n', (4879, 4886), True, 'import numpy as np\n'), ((4901, 4914), 'numpy.sin', 'np.sin', (['phi_G'], {}), '(phi_G)\n', (4907, 4914), True, 'import numpy as np\n'), ((5052, 5062), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (5059, 5062), True, 'import numpy as np\n'), ((5074, 5084), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (5081, 5084), True, 'import numpy as np\n'), ((5089, 5132), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['x_', 'xt1'], {'decimal': '(8)'}), '(x_, xt1, decimal=8)\n', (5112, 5132), True, 'import numpy.testing as npt\n'), ((5137, 5180), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['y_', 'xt2'], {'decimal': '(8)'}), '(y_, xt2, decimal=8)\n', (5160, 5180), True, 'import numpy.testing as npt\n'), ((5214, 5227), 'pytest.main', 'pytest.main', ([], {}), '()\n', (5225, 5227), False, 'import pytest\n'), ((337, 347), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (344, 347), True, 'import numpy as np\n'), ((379, 391), 'numpy.arctan', 'np.arctan', (['(1)'], {}), '(1)\n', (388, 391), True, 'import numpy as np\n'), ((552, 562), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (559, 562), True, 'import numpy as np\n'), ((597, 609), 'numpy.arctan', 'np.arctan', (['(1)'], {}), '(1)\n', (606, 609), True, 'import numpy as np\n'), ((4565, 4581), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (4573, 4581), True, 'import numpy as np\n'), ((4583, 4599), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (4591, 4599), True, 'import numpy as np\n')]
|
"""
.. _tut-fnirs-glm-components:
GLM and Design Matrix Parameters
================================
This tutorial describes the various design choices available when analysing
fNIRS data with a GLM approach.
.. sidebar:: Nilearn
If you use MNE-NIRS to conduct a GLM analysis please cite Nilearn.
This package relies on Nilearn for the underlying computation.
Without Nilearn this would not be possible.
For how to accurately cite Nilearn see:
http://nilearn.github.io/authors.html#citing
There are subtle differences between the GLM analysis procedures
available in the different fNIRS software packages (Homer, NIRS-SPM, etc).
This document aims to clarify the features available for GLM analysis
in the MNE-NIRS software, and demonstrate how you can modify the default
analysis parameters to best suit your experiment.
It also endeavours to motivate some of the design choices that were made
when designing this software.
Please raise a GitHub issue if there is an analysis design you would
like to use but can not determine how to do with MNE-NIRS.
The MNE-NIRS GLM analysis framework is entirely based on the Nilearn package.
Their excellent software forms the basis of the analysis described in this tutorial.
As such, you may also wish to read
`their documentation <http://nilearn.github.io>`__
to familiarise yourself with different concepts used in MNE-NIRS.
Specifically this tutorial is heavily based on the following Nilearn examples,
but placed within an fNIRS context.
* `Nilearn: Understanding parameters of the first-level model <http://nilearn.github.io/auto_examples/04_glm_first_level/plot_first_level_details.html>`__.
* `Nilearn: Example of hemodynamic response functions <https://nilearn.github.io/auto_examples/04_glm_first_level/plot_hrf.html>`__.
Accordingly, in this tutorial we will access nilearn functions directly to illustrate
various choices available in your analysis.
However, this is just to illustrate various points. In reality (see all other tutorials),
MNE-NIRS will wrap all required Nilearn functions so you don't need to access them directly.
.. contents:: Page contents
:local:
:depth: 2
"""
# sphinx_gallery_thumbnail_number = 1
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
# Import common libraries
import os
import numpy as np
import mne
# Import MNE-NIRS processing
from mne_nirs.experimental_design import make_first_level_design_matrix, \
longest_inter_annotation_interval, drift_high_pass
# Import Nilearn
from nilearn.glm import first_level
from nilearn.plotting import plot_design_matrix
# Import Plotting Library
import matplotlib.pyplot as plt
import matplotlib as mpl
# %%
# Haemodynamic Response Function
# ---------------------------------------------------------------------
#
# Various Haemodynamic Response Functions (HRFs) are provided for use
# when analysing your data. A summary of these functions in the context
# of fMRI is provided in the Nilearn tutorial
# `Nilearn: Example of hemodynamic response functions. <https://nilearn.github.io/auto_examples/04_glm_first_level/plot_hrf.html>`__.
# This example heavily borrows from that example but expands the description
# within an fNIRS context.
#
# To illustrate underlying concepts we will use Nilearn functions directly,
# but for analysing actual data you should use the MNE-NIRS
# :func:`mne_nirs.experimental_design.make_first_level_design_matrix`
# wrapper.
# %%
# HRF Model Selection
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: FIR Models
#
# MNE-NIRS also supports FIR GLM models.
# See :ref:`MNE-NIRS FIR GLM tutorial <tut-fnirs-fir>`.
#
# Two standard HRF models are provided. The SPM and Glover models.
# These differ in their response dynamics.
# Both are plotted on top of each other below for comparison.
# Note that they differ in their peak timing and undershoot.
time_length = 30
glover_timecourse = first_level.glover_hrf(1, oversampling=50, time_length=time_length)
spm_timecourse = first_level.spm_hrf(1, oversampling=50, time_length=time_length)
sample_times = np.linspace(0, time_length, num=len(glover_timecourse))
plt.plot(sample_times, glover_timecourse, label="Glover")
plt.plot(sample_times, spm_timecourse, label="SPM")
plt.xlabel("Time (s)")
plt.ylabel("Amplitude (AU)")
plt.legend()
# %%
# Regressor Computation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# These functions are not used directly in the GLM analysis.
# Instead they are used as the basis to compute a regressor which is
# utilised in the GLM fit.
# This is done by convolving the HRF model with a boxcar function that
# distills information
# about the experimental design. Specifically the stimulus onset times
# are used to indicate when a response begins, and a duration is used
# to specify the time over which the model should be convolved.
#
# Modifying the duration changes the regressor timecourse. Below we demonstrate
# how this varies for several duration values with the Glover HRF.
# Convenient functions so we dont need to repeat code below
def generate_stim(onset, amplitude, duration, hrf_model, maxtime=30):
# Generate signal with specified duration and onset
frame_times = np.linspace(0, maxtime, 601)
exp_condition = np.array((onset, duration, amplitude)).reshape(3, 1)
stim = np.zeros_like(frame_times)
stim[(frame_times > onset) * (frame_times <= onset + duration)] = amplitude
signal, name = first_level.compute_regressor(
exp_condition, hrf_model, frame_times, con_id="main", oversampling=16
)
return frame_times, stim, signal
def plot_regressor(onset, amplitude, duration, hrf_model):
frame_times, stim, signal = generate_stim(
onset, amplitude, duration, hrf_model)
plt.fill(frame_times, stim, "k", alpha=0.5, label="stimulus")
plt.plot(frame_times, signal.T[0], label="Regressor")
plt.xlabel("Time (s)")
plt.ylabel("Amplitude (AU)")
plt.legend(loc=1)
plt.title(hrf_model)
return None
# Generate an event of 1 second duration that occurs at time zero.
onset, amplitude, duration = 0.0, 1.0, 1.0
hrf_model = "glover"
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# If the duration is increased we see the resulting regressor
# is modified, and the transformation is not a simple scaling.
#
# For a 3 second duration:
duration = 3
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# Or for a 5 second duration:
duration = 5
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# Or for a 15 second duration:
duration = 15
plot_regressor(onset, amplitude, duration, hrf_model)
# %%
#
# We can plot multiple durations together to see how the
# resulting regressor varies as a function of this parameter.
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=0, vmax=40)
for n in [1, 3, 5, 10, 15, 20, 25, 30, 35]:
frame_times, stim, signal = generate_stim(
onset, amplitude, n, hrf_model, maxtime=50)
plt.plot(frame_times, signal.T[0], label="Regressor", c=cmap(norm(n)))
plt.xlabel("Time (s)")
plt.ylabel("Amplitude (AU)")
plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap))
# %%
# Inclusion in Design matrix
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: Derivative and dispersion terms
#
# You can also include derivative and dispersion terms to model
# differences between your data and the model. This is done by simply
# specifying your selected model plus the additional terms.
# For example, ``spm + derivative`` or
# ``glover + derivative + dispersion``.
#
# As mentioned above, we don't directly compute these regressors for
# each condition. Instead the function ``make_first_level_design_matrix``
# conveniently does this for us.
#
# As an example we will import a measurement and generate a
# design matrix for it. We will specify that we wish to use a Glover
# HRF convolved with a 3 second duration.
# See the :ref:`MNE-NIRS fNIRS GLM tutorial <tut-fnirs-hrf>` for more details.
#
# First we import the example data, crop to just the first few minutes,
# and give names to the annotations.
fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data().crop(tmax=300)
# raw_intensity.resample(0.7)
raw_intensity.annotations.rename({'1.0': 'Control',
'2.0': 'Tapping/Left',
'3.0': 'Tapping/Right'})
raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0')
raw_intensity.annotations.set_durations(5)
# %%
#
# Next we generate the design matrix and plot it.
# This representation of the regressor is transposed,
# time goes down the vertical
# axis and is specified in scan number (fMRI hangover) or sample.
# There is no colorbar for this plot, as specified in Nilearn.
#
# We can see that when each event occurs the model value increases before returning to baseline.
# this is the same information as was shown in the time courses above, except displayed differently
# with color representing amplitude.
design_matrix = make_first_level_design_matrix(raw_intensity,
# Ignore drift model for now, see section below
drift_model='polynomial',
drift_order=0,
# Here we specify the HRF and duration
hrf_model='glover',
stim_dur=3.0)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
#
# As before we can explore the effect of modifying the duration,
# the resulting regressor for each annotation is elongated.
design_matrix = make_first_level_design_matrix(raw_intensity,
# Ignore drift model for now, see section below
drift_model='polynomial',
drift_order=0,
# Here we specify the HRF and duration
hrf_model='glover',
stim_dur=13.0)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
#
# Depending on your experimental design the resulting responses
# may overlap (for example an event related design).
# This is not an issue, the design matrix can handle overlapping responses.
design_matrix = make_first_level_design_matrix(raw_intensity,
# Ignore drift model for now, see section below
drift_model='polynomial',
drift_order=0,
# Here we specify the HRF and duration
hrf_model='glover',
stim_dur=30.0)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
# Drift Regressors
# ---------------------------------------------------------------------
#
# Aspects of the measured signal may change over time in a manner
# unrelated to the neural response we wish to measure.
# For example, the measurement room may warm up and result in a steady
# increase in the signal over the measurement duration.
# These signal changes that are unrelated to our feature of interest are
# termed drifts, and can be included in the design matrix and the GLM
# fitting as drift regressors.
#
# In the examples above a single drift regressor was used to model a constant
# offset in the data. This is also termed a zero order polynomial regressor.
# Two types of regressors are provided for in MNE-NIRS thanks to Nilearn.
# Polynomial and cosine drift regressors.
#
# .. note::
#
# Remember that the GLM can fit a negative coefficient,
# so a decreasing drift can be modeled by the increasing drift
# regressor with a negative coefficient.
# %%
# Polynomial Drift Regressors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the example above a polynomial drift regressor is included.
# In this case we can specify the order of the polynomials to be included.
# A zero order polynomial will fit a constant, a first order will fit an
# increasing function, and so on.
# As an example we demonstrate how to include up to a fifth order polynomial.
# You can observe that with increasing polynomial order,
# higher frequency components will be regressed from the signal.
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_model='polynomial',
drift_order=5)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
# Cosine Drift Regressors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# An alternative way to specify drift regressors is via the cosine drift model.
# This may be more intuitive as you can specify regressors up to a certain cut off
# frequency. Effectively regressing out frequency components below a limit,
# which may be interpreted as a high pass filter.
# In the example below we demonstrate how to regress our signals up to 0.01 Hz.
# We observe that the function has included 6 drift regressors in the design matrix.
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_model='cosine',
high_pass=0.01)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
#
# As described above, including additional regressor components will remove
# higher frequency components. So we can increase the high pass cut off and
# this should add more regressors.
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_model='cosine',
high_pass=0.03)
fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1)
fig = plot_design_matrix(design_matrix, ax=ax1)
# %%
# Selecting Drift Regressors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The aim of the drift regressors is to remove signal components unrelated
# to the expected neural response. As the expected response can be computed
# based on annotation timing and expected brain responses
# (see :ref:`frequency commentary <tut-fnirs-freq>`)
# the high pass cut off can be set on first principles.
#
# The Nilearn documentation states that
# "The cutoff period (1/high_pass) should be set as the longest period between two trials of the same condition multiplied by 2.
# For instance, if the longest period is 32s, the high_pass frequency shall be 1/64 Hz ~ 0.016 Hz."
# `(reference) <http://nilearn.github.io/auto_examples/04_glm_first_level/plot_first_level_details.html#changing-the-drift-model>`__.
#
# To assist in selecting a high pass value a few convenience functions are included in MNE-NIRS.
# First we can query what the longest ISI is per annotation, but first we must be sure
# to remove annotations we aren't interested in (in this experiment the trigger
# 15 is not of interest).
raw_original = mne.io.read_raw_nirx(fnirs_raw_dir)
raw_original.annotations.delete(raw_original.annotations.description == '15.0')
isis, names = longest_inter_annotation_interval(raw_original)
print(isis)
# %%
#
# We see that the longest period between two trials is 435 seconds. Which multiplied
# by two is 870 seconds. So a high pass value of 1/870 or 0.001 Hz is appropriate.
# We can also use the function
# :func:`mne_nirs.experimental_design.make_first_level_design_matrix`
# to suggest the high pass value. Note however, that you should not blindly follow
# this functions suggestion, as each experiment is different. Instead use this as
# a sanity check on your own calculations.
print(drift_high_pass(raw_original))
# %%
#
# For example, if all conditions were evoking the same response it may make more
# sense to include them as a single condition when computing the ISI.
# This would be achieved by renaming the triggers.
raw_original.annotations.rename({'2.0': 'Tapping', '3.0': 'Tapping'})
raw_original.annotations.delete(raw_original.annotations.description == '1.0')
isis, names = longest_inter_annotation_interval(raw_original)
print(isis)
print(drift_high_pass(raw_original))
|
[
"matplotlib.pyplot.title",
"nilearn.glm.first_level.spm_hrf",
"os.path.join",
"numpy.zeros_like",
"matplotlib.colors.Normalize",
"nilearn.plotting.plot_design_matrix",
"matplotlib.cm.ScalarMappable",
"nilearn.glm.first_level.compute_regressor",
"nilearn.glm.first_level.glover_hrf",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"mne.io.read_raw_nirx",
"matplotlib.pyplot.ylabel",
"mne_nirs.experimental_design.make_first_level_design_matrix",
"mne_nirs.experimental_design.drift_high_pass",
"mne_nirs.experimental_design.longest_inter_annotation_interval",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill",
"mne.datasets.fnirs_motor.data_path",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((3942, 4009), 'nilearn.glm.first_level.glover_hrf', 'first_level.glover_hrf', (['(1)'], {'oversampling': '(50)', 'time_length': 'time_length'}), '(1, oversampling=50, time_length=time_length)\n', (3964, 4009), False, 'from nilearn.glm import first_level\n'), ((4027, 4091), 'nilearn.glm.first_level.spm_hrf', 'first_level.spm_hrf', (['(1)'], {'oversampling': '(50)', 'time_length': 'time_length'}), '(1, oversampling=50, time_length=time_length)\n', (4046, 4091), False, 'from nilearn.glm import first_level\n'), ((4165, 4222), 'matplotlib.pyplot.plot', 'plt.plot', (['sample_times', 'glover_timecourse'], {'label': '"""Glover"""'}), "(sample_times, glover_timecourse, label='Glover')\n", (4173, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4223, 4274), 'matplotlib.pyplot.plot', 'plt.plot', (['sample_times', 'spm_timecourse'], {'label': '"""SPM"""'}), "(sample_times, spm_timecourse, label='SPM')\n", (4231, 4274), True, 'import matplotlib.pyplot as plt\n'), ((4275, 4297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (4285, 4297), True, 'import matplotlib.pyplot as plt\n'), ((4298, 4326), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude (AU)"""'], {}), "('Amplitude (AU)')\n", (4308, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4337, 4339), True, 'import matplotlib.pyplot as plt\n'), ((6843, 6880), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(40)'}), '(vmin=0, vmax=40)\n', (6863, 6880), True, 'import matplotlib as mpl\n'), ((7101, 7123), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (7111, 7123), True, 'import matplotlib.pyplot as plt\n'), ((7124, 7152), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude (AU)"""'], {}), "('Amplitude (AU)')\n", (7134, 7152), True, 'import matplotlib.pyplot as plt\n'), ((8220, 8256), 'mne.datasets.fnirs_motor.data_path', 'mne.datasets.fnirs_motor.data_path', ([], {}), '()\n', (8254, 8256), False, 'import mne\n'), ((8273, 8321), 'os.path.join', 'os.path.join', (['fnirs_data_folder', '"""Participant-1"""'], {}), "(fnirs_data_folder, 'Participant-1')\n", (8285, 8321), False, 'import os\n'), ((9249, 9373), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_model': '"""polynomial"""', 'drift_order': '(0)', 'hrf_model': '"""glover"""', 'stim_dur': '(3.0)'}), "(raw_intensity, drift_model='polynomial',\n drift_order=0, hrf_model='glover', stim_dur=3.0)\n", (9279, 9373), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((9751, 9798), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(10, 6), nrows=1, ncols=1)\n', (9763, 9798), True, 'import matplotlib.pyplot as plt\n'), ((9805, 9846), 'nilearn.plotting.plot_design_matrix', 'plot_design_matrix', (['design_matrix'], {'ax': 'ax1'}), '(design_matrix, ax=ax1)\n', (9823, 9846), False, 'from nilearn.plotting import plot_design_matrix\n'), ((9998, 10123), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_model': '"""polynomial"""', 'drift_order': '(0)', 'hrf_model': '"""glover"""', 'stim_dur': '(13.0)'}), "(raw_intensity, drift_model='polynomial',\n drift_order=0, hrf_model='glover', stim_dur=13.0)\n", (10028, 10123), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((10501, 10548), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(10, 6), nrows=1, ncols=1)\n', (10513, 10548), True, 'import matplotlib.pyplot as plt\n'), ((10555, 10596), 'nilearn.plotting.plot_design_matrix', 'plot_design_matrix', (['design_matrix'], {'ax': 'ax1'}), '(design_matrix, ax=ax1)\n', (10573, 10596), False, 'from nilearn.plotting import plot_design_matrix\n'), ((10816, 10941), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_model': '"""polynomial"""', 'drift_order': '(0)', 'hrf_model': '"""glover"""', 'stim_dur': '(30.0)'}), "(raw_intensity, drift_model='polynomial',\n drift_order=0, hrf_model='glover', stim_dur=30.0)\n", (10846, 10941), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((11319, 11366), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(10, 6), nrows=1, ncols=1)\n', (11331, 11366), True, 'import matplotlib.pyplot as plt\n'), ((11373, 11414), 'nilearn.plotting.plot_design_matrix', 'plot_design_matrix', (['design_matrix'], {'ax': 'ax1'}), '(design_matrix, ax=ax1)\n', (11391, 11414), False, 'from nilearn.plotting import plot_design_matrix\n'), ((12970, 13060), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_model': '"""polynomial"""', 'drift_order': '(5)'}), "(raw_intensity, drift_model='polynomial',\n drift_order=5)\n", (13000, 13060), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((13163, 13210), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(10, 6), nrows=1, ncols=1)\n', (13175, 13210), True, 'import matplotlib.pyplot as plt\n'), ((13217, 13258), 'nilearn.plotting.plot_design_matrix', 'plot_design_matrix', (['design_matrix'], {'ax': 'ax1'}), '(design_matrix, ax=ax1)\n', (13235, 13258), False, 'from nilearn.plotting import plot_design_matrix\n'), ((13837, 13924), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_model': '"""cosine"""', 'high_pass': '(0.01)'}), "(raw_intensity, drift_model='cosine',\n high_pass=0.01)\n", (13867, 13924), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((14027, 14074), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(10, 6), nrows=1, ncols=1)\n', (14039, 14074), True, 'import matplotlib.pyplot as plt\n'), ((14081, 14122), 'nilearn.plotting.plot_design_matrix', 'plot_design_matrix', (['design_matrix'], {'ax': 'ax1'}), '(design_matrix, ax=ax1)\n', (14099, 14122), False, 'from nilearn.plotting import plot_design_matrix\n'), ((14336, 14423), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_model': '"""cosine"""', 'high_pass': '(0.03)'}), "(raw_intensity, drift_model='cosine',\n high_pass=0.03)\n", (14366, 14423), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((14526, 14573), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(10, 6), nrows=1, ncols=1)\n', (14538, 14573), True, 'import matplotlib.pyplot as plt\n'), ((14580, 14621), 'nilearn.plotting.plot_design_matrix', 'plot_design_matrix', (['design_matrix'], {'ax': 'ax1'}), '(design_matrix, ax=ax1)\n', (14598, 14621), False, 'from nilearn.plotting import plot_design_matrix\n'), ((15763, 15798), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['fnirs_raw_dir'], {}), '(fnirs_raw_dir)\n', (15783, 15798), False, 'import mne\n'), ((15894, 15941), 'mne_nirs.experimental_design.longest_inter_annotation_interval', 'longest_inter_annotation_interval', (['raw_original'], {}), '(raw_original)\n', (15927, 15941), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((16853, 16900), 'mne_nirs.experimental_design.longest_inter_annotation_interval', 'longest_inter_annotation_interval', (['raw_original'], {}), '(raw_original)\n', (16886, 16900), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((5256, 5284), 'numpy.linspace', 'np.linspace', (['(0)', 'maxtime', '(601)'], {}), '(0, maxtime, 601)\n', (5267, 5284), True, 'import numpy as np\n'), ((5369, 5395), 'numpy.zeros_like', 'np.zeros_like', (['frame_times'], {}), '(frame_times)\n', (5382, 5395), True, 'import numpy as np\n'), ((5496, 5601), 'nilearn.glm.first_level.compute_regressor', 'first_level.compute_regressor', (['exp_condition', 'hrf_model', 'frame_times'], {'con_id': '"""main"""', 'oversampling': '(16)'}), "(exp_condition, hrf_model, frame_times, con_id\n ='main', oversampling=16)\n", (5525, 5601), False, 'from nilearn.glm import first_level\n'), ((5808, 5869), 'matplotlib.pyplot.fill', 'plt.fill', (['frame_times', 'stim', '"""k"""'], {'alpha': '(0.5)', 'label': '"""stimulus"""'}), "(frame_times, stim, 'k', alpha=0.5, label='stimulus')\n", (5816, 5869), True, 'import matplotlib.pyplot as plt\n'), ((5874, 5927), 'matplotlib.pyplot.plot', 'plt.plot', (['frame_times', 'signal.T[0]'], {'label': '"""Regressor"""'}), "(frame_times, signal.T[0], label='Regressor')\n", (5882, 5927), True, 'import matplotlib.pyplot as plt\n'), ((5932, 5954), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (5942, 5954), True, 'import matplotlib.pyplot as plt\n'), ((5959, 5987), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude (AU)"""'], {}), "('Amplitude (AU)')\n", (5969, 5987), True, 'import matplotlib.pyplot as plt\n'), ((5992, 6009), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (6002, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6014, 6034), 'matplotlib.pyplot.title', 'plt.title', (['hrf_model'], {}), '(hrf_model)\n', (6023, 6034), True, 'import matplotlib.pyplot as plt\n'), ((7166, 7209), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (7187, 7209), True, 'import matplotlib as mpl\n'), ((16447, 16476), 'mne_nirs.experimental_design.drift_high_pass', 'drift_high_pass', (['raw_original'], {}), '(raw_original)\n', (16462, 16476), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((16919, 16948), 'mne_nirs.experimental_design.drift_high_pass', 'drift_high_pass', (['raw_original'], {}), '(raw_original)\n', (16934, 16948), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix, longest_inter_annotation_interval, drift_high_pass\n'), ((5305, 5343), 'numpy.array', 'np.array', (['(onset, duration, amplitude)'], {}), '((onset, duration, amplitude))\n', (5313, 5343), True, 'import numpy as np\n'), ((8338, 8373), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['fnirs_raw_dir'], {}), '(fnirs_raw_dir)\n', (8358, 8373), False, 'import mne\n')]
|
# -*- coding: utf-8 -*-
r"""
=================================
Wasserstein unmixing with PyTorch
=================================
In this example we estimate mixing parameters from distributions that minimize
the Wasserstein distance. In other words we suppose that a target
distribution :math:`\mu^t` can be expressed as a weighted sum of source
distributions :math:`\mu^s_k` with the following model:
.. math::
\mu^t = \sum_{k=1}^K w_k\mu^s_k
where :math:`\mathbf{w}` is a vector of size :math:`K` and belongs in the
distribution simplex :math:`\Delta_K`.
In order to estimate this weight vector we propose to optimize the Wasserstein
distance between the model and the observed :math:`\mu^t` with respect to
the vector. This leads to the following optimization problem:
.. math::
\min_{\mathbf{w}\in\Delta_K} \quad W \left(\mu^t,\sum_{k=1}^K w_k\mu^s_k\right)
This minimization is done in this example with a simple projected gradient
descent in PyTorch. We use the automatic backend of POT that allows us to
compute the Wasserstein distance with :any:`ot.emd2` with
differentiable losses.
"""
# Author: <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 2
import numpy as np
import matplotlib.pylab as pl
import ot
import torch
##############################################################################
# Generate data
# -------------
#%% Data
nt = 100
nt1 = 10 #
ns1 = 50
ns = 2 * ns1
rng = np.random.RandomState(2)
xt = rng.randn(nt, 2) * 0.2
xt[:nt1, 0] += 1
xt[nt1:, 1] += 1
xs1 = rng.randn(ns1, 2) * 0.2
xs1[:, 0] += 1
xs2 = rng.randn(ns1, 2) * 0.2
xs2[:, 1] += 1
xs = np.concatenate((xs1, xs2))
# Sample reweighting matrix H
H = np.zeros((ns, 2))
H[:ns1, 0] = 1 / ns1
H[ns1:, 1] = 1 / ns1
# each columns sums to 1 and has weights only for samples form the
# corresponding source distribution
M = ot.dist(xs, xt)
##############################################################################
# Plot data
# ---------
#%% plot the distributions
pl.figure(1)
pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5)
pl.scatter(xs1[:, 0], xs1[:, 1], label='Source $\mu^s_1$', alpha=0.5)
pl.scatter(xs2[:, 0], xs2[:, 1], label='Source $\mu^s_2$', alpha=0.5)
pl.title('Sources and Target distributions')
pl.legend()
##############################################################################
# Optimization of the model wrt the Wasserstein distance
# ------------------------------------------------------
#%% Weights optimization with gradient descent
# convert numpy arrays to torch tensors
H2 = torch.tensor(H)
M2 = torch.tensor(M)
# weights for the source distributions
w = torch.tensor(ot.unif(2), requires_grad=True)
# uniform weights for target
b = torch.tensor(ot.unif(nt))
lr = 2e-3 # learning rate
niter = 500 # number of iterations
losses = [] # loss along the iterations
# loss for the minimal Wasserstein estimator
def get_loss(w):
a = torch.mv(H2, w) # distribution reweighting
return ot.emd2(a, b, M2) # squared Wasserstein 2
for i in range(niter):
loss = get_loss(w)
losses.append(float(loss))
loss.backward()
with torch.no_grad():
w -= lr * w.grad # gradient step
w[:] = ot.utils.proj_simplex(w) # projection on the simplex
w.grad.zero_()
##############################################################################
# Estimated weights and convergence of the objective
# ---------------------------------------------------
we = w.detach().numpy()
print('Estimated mixture:', we)
pl.figure(2)
pl.semilogy(losses)
pl.grid()
pl.title('Wasserstein distance')
pl.xlabel("Iterations")
##############################################################################
# Ploting the reweighted source distribution
# ------------------------------------------
pl.figure(3)
# compute source weights
ws = H.dot(we)
pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5)
pl.scatter(xs[:, 0], xs[:, 1], color='C3', s=ws * 20 * ns, label='Weighted sources $\sum_{k} w_k\mu^s_k$', alpha=0.5)
pl.title('Target and reweighted source distributions')
pl.legend()
|
[
"ot.emd2",
"torch.tensor",
"matplotlib.pylab.scatter",
"matplotlib.pylab.legend",
"ot.unif",
"ot.dist",
"numpy.zeros",
"numpy.random.RandomState",
"torch.mv",
"matplotlib.pylab.xlabel",
"ot.utils.proj_simplex",
"matplotlib.pylab.semilogy",
"matplotlib.pylab.title",
"torch.no_grad",
"matplotlib.pylab.grid",
"numpy.concatenate",
"matplotlib.pylab.figure"
] |
[((1453, 1477), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (1474, 1477), True, 'import numpy as np\n'), ((1639, 1665), 'numpy.concatenate', 'np.concatenate', (['(xs1, xs2)'], {}), '((xs1, xs2))\n', (1653, 1665), True, 'import numpy as np\n'), ((1701, 1718), 'numpy.zeros', 'np.zeros', (['(ns, 2)'], {}), '((ns, 2))\n', (1709, 1718), True, 'import numpy as np\n'), ((1869, 1884), 'ot.dist', 'ot.dist', (['xs', 'xt'], {}), '(xs, xt)\n', (1876, 1884), False, 'import ot\n'), ((2018, 2030), 'matplotlib.pylab.figure', 'pl.figure', (['(1)'], {}), '(1)\n', (2027, 2030), True, 'import matplotlib.pylab as pl\n'), ((2031, 2097), 'matplotlib.pylab.scatter', 'pl.scatter', (['xt[:, 0]', 'xt[:, 1]'], {'label': '"""Target $\\\\mu^t$"""', 'alpha': '(0.5)'}), "(xt[:, 0], xt[:, 1], label='Target $\\\\mu^t$', alpha=0.5)\n", (2041, 2097), True, 'import matplotlib.pylab as pl\n'), ((2097, 2167), 'matplotlib.pylab.scatter', 'pl.scatter', (['xs1[:, 0]', 'xs1[:, 1]'], {'label': '"""Source $\\\\mu^s_1$"""', 'alpha': '(0.5)'}), "(xs1[:, 0], xs1[:, 1], label='Source $\\\\mu^s_1$', alpha=0.5)\n", (2107, 2167), True, 'import matplotlib.pylab as pl\n'), ((2167, 2237), 'matplotlib.pylab.scatter', 'pl.scatter', (['xs2[:, 0]', 'xs2[:, 1]'], {'label': '"""Source $\\\\mu^s_2$"""', 'alpha': '(0.5)'}), "(xs2[:, 0], xs2[:, 1], label='Source $\\\\mu^s_2$', alpha=0.5)\n", (2177, 2237), True, 'import matplotlib.pylab as pl\n'), ((2237, 2281), 'matplotlib.pylab.title', 'pl.title', (['"""Sources and Target distributions"""'], {}), "('Sources and Target distributions')\n", (2245, 2281), True, 'import matplotlib.pylab as pl\n'), ((2282, 2293), 'matplotlib.pylab.legend', 'pl.legend', ([], {}), '()\n', (2291, 2293), True, 'import matplotlib.pylab as pl\n'), ((2584, 2599), 'torch.tensor', 'torch.tensor', (['H'], {}), '(H)\n', (2596, 2599), False, 'import torch\n'), ((2605, 2620), 'torch.tensor', 'torch.tensor', (['M'], {}), '(M)\n', (2617, 2620), False, 'import torch\n'), ((3551, 3563), 'matplotlib.pylab.figure', 'pl.figure', (['(2)'], {}), '(2)\n', (3560, 3563), True, 'import matplotlib.pylab as pl\n'), ((3564, 3583), 'matplotlib.pylab.semilogy', 'pl.semilogy', (['losses'], {}), '(losses)\n', (3575, 3583), True, 'import matplotlib.pylab as pl\n'), ((3584, 3593), 'matplotlib.pylab.grid', 'pl.grid', ([], {}), '()\n', (3591, 3593), True, 'import matplotlib.pylab as pl\n'), ((3594, 3626), 'matplotlib.pylab.title', 'pl.title', (['"""Wasserstein distance"""'], {}), "('Wasserstein distance')\n", (3602, 3626), True, 'import matplotlib.pylab as pl\n'), ((3627, 3650), 'matplotlib.pylab.xlabel', 'pl.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (3636, 3650), True, 'import matplotlib.pylab as pl\n'), ((3822, 3834), 'matplotlib.pylab.figure', 'pl.figure', (['(3)'], {}), '(3)\n', (3831, 3834), True, 'import matplotlib.pylab as pl\n'), ((3877, 3943), 'matplotlib.pylab.scatter', 'pl.scatter', (['xt[:, 0]', 'xt[:, 1]'], {'label': '"""Target $\\\\mu^t$"""', 'alpha': '(0.5)'}), "(xt[:, 0], xt[:, 1], label='Target $\\\\mu^t$', alpha=0.5)\n", (3887, 3943), True, 'import matplotlib.pylab as pl\n'), ((3943, 4067), 'matplotlib.pylab.scatter', 'pl.scatter', (['xs[:, 0]', 'xs[:, 1]'], {'color': '"""C3"""', 's': '(ws * 20 * ns)', 'label': '"""Weighted sources $\\\\sum_{k} w_k\\\\mu^s_k$"""', 'alpha': '(0.5)'}), "(xs[:, 0], xs[:, 1], color='C3', s=ws * 20 * ns, label=\n 'Weighted sources $\\\\sum_{k} w_k\\\\mu^s_k$', alpha=0.5)\n", (3953, 4067), True, 'import matplotlib.pylab as pl\n'), ((4061, 4115), 'matplotlib.pylab.title', 'pl.title', (['"""Target and reweighted source distributions"""'], {}), "('Target and reweighted source distributions')\n", (4069, 4115), True, 'import matplotlib.pylab as pl\n'), ((4116, 4127), 'matplotlib.pylab.legend', 'pl.legend', ([], {}), '()\n', (4125, 4127), True, 'import matplotlib.pylab as pl\n'), ((2678, 2688), 'ot.unif', 'ot.unif', (['(2)'], {}), '(2)\n', (2685, 2688), False, 'import ot\n'), ((2757, 2768), 'ot.unif', 'ot.unif', (['nt'], {}), '(nt)\n', (2764, 2768), False, 'import ot\n'), ((2948, 2963), 'torch.mv', 'torch.mv', (['H2', 'w'], {}), '(H2, w)\n', (2956, 2963), False, 'import torch\n'), ((3003, 3020), 'ot.emd2', 'ot.emd2', (['a', 'b', 'M2'], {}), '(a, b, M2)\n', (3010, 3020), False, 'import ot\n'), ((3157, 3172), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3170, 3172), False, 'import torch\n'), ((3231, 3255), 'ot.utils.proj_simplex', 'ot.utils.proj_simplex', (['w'], {}), '(w)\n', (3252, 3255), False, 'import ot\n')]
|
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: <NAME>, et al. (see THANKS for the full author list)
See LICENSE for the license information
Kinematics of three-link manipulator with GTSAM poses and product of exponential maps.
Author: <NAME>
"""
# pylint: disable=invalid-name, E1101
from __future__ import print_function
import math
import unittest
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
import gtsam
import gtsam.utils.plot as gtsam_plot
from gtsam import Pose2
from gtsam.utils.test_case import GtsamTestCase
def vector3(x, y, z):
"""Create 3D double numpy array."""
return np.array([x, y, z], dtype=np.float)
def compose(*poses):
"""Compose all Pose2 transforms given as arguments from left to right."""
return reduce((lambda x, y: x.compose(y)), poses)
def vee(M):
"""Pose2 vee operator."""
return vector3(M[0, 2], M[1, 2], M[1, 0])
def delta(g0, g1):
"""Difference between x,y,,theta components of SE(2) poses."""
return vector3(g1.x() - g0.x(), g1.y() - g0.y(), g1.theta() - g0.theta())
def trajectory(g0, g1, N=20):
""" Create an interpolated trajectory in SE(2), treating x,y, and theta separately.
g0 and g1 are the initial and final pose, respectively.
N is the number of *intervals*
Returns N+1 poses
"""
e = delta(g0, g1)
return [Pose2(g0.x()+e[0]*t, g0.y()+e[1]*t, g0.theta()+e[2]*t) for t in np.linspace(0, 1, N)]
class ThreeLinkArm(object):
"""Three-link arm class."""
def __init__(self):
self.L1 = 3.5
self.L2 = 3.5
self.L3 = 2.5
self.xi1 = vector3(0, 0, 1)
self.xi2 = vector3(self.L1, 0, 1)
self.xi3 = vector3(self.L1+self.L2, 0, 1)
self.sXt0 = Pose2(0, self.L1+self.L2 + self.L3, math.radians(90))
def fk(self, q):
""" Forward kinematics.
Takes numpy array of joint angles, in radians.
"""
sXl1 = Pose2(0, 0, math.radians(90))
l1Zl1 = Pose2(0, 0, q[0])
l1Xl2 = Pose2(self.L1, 0, 0)
l2Zl2 = Pose2(0, 0, q[1])
l2Xl3 = Pose2(self.L2, 0, 0)
l3Zl3 = Pose2(0, 0, q[2])
l3Xt = Pose2(self.L3, 0, 0)
return compose(sXl1, l1Zl1, l1Xl2, l2Zl2, l2Xl3, l3Zl3, l3Xt)
def jacobian(self, q):
""" Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
"""
a = q[0]+q[1]
b = a+q[2]
return np.array([[-self.L1*math.cos(q[0]) - self.L2*math.cos(a)-self.L3*math.cos(b),
-self.L1*math.cos(a)-self.L3*math.cos(b),
- self.L3*math.cos(b)],
[-self.L1*math.sin(q[0]) - self.L2*math.sin(a)-self.L3*math.sin(b),
-self.L1*math.sin(a)-self.L3*math.sin(b),
- self.L3*math.sin(b)],
[1, 1, 1]], np.float)
def poe(self, q):
""" Forward kinematics.
Takes numpy array of joint angles, in radians.
"""
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
def con(self, q):
""" Forward kinematics, conjugation form.
Takes numpy array of joint angles, in radians.
"""
def expmap(x, y, theta):
"""Implement exponential map via conjugation with axis (x,y)."""
return compose(Pose2(x, y, 0), Pose2(0, 0, theta), Pose2(-x, -y, 0))
l1Zl1 = expmap(0.0, 0.0, q[0])
l2Zl2 = expmap(0.0, self.L1, q[1])
l3Zl3 = expmap(0.0, self.L1+self.L2, q[2])
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
def ik(self, sTt_desired, e=1e-9):
""" Inverse kinematics.
Takes desired Pose2 of tool T with respect to base S.
Optional: mu, gradient descent rate; e: error norm threshold
"""
q = np.radians(vector3(30, -30, 45)) # well within workspace
error = vector3(100, 100, 100)
while np.linalg.norm(error) > e:
error = delta(sTt_desired, self.fk(q))
J = self.jacobian(q)
q -= np.dot(np.linalg.pinv(J), error)
# return result in interval [-pi,pi)
return np.remainder(q+math.pi, 2*math.pi)-math.pi
def manipulator_jacobian(self, q):
""" Calculate manipulator Jacobian.
Takes numpy array of joint angles, in radians.
Returns the manipulator Jacobian of differential twists. When multiplied with
a vector of joint velocities, will yield a single differential twist which is
the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose.
Just like always, differential twists can be hatted and multiplied with spatial
coordinates of a point to give the spatial velocity of the point.
"""
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
# l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
p1 = self.xi1
# p1 = Pose2().Adjoint(self.xi1)
sTl1 = l1Zl1
p2 = sTl1.Adjoint(self.xi2)
sTl2 = compose(l1Zl1, l2Zl2)
p3 = sTl2.Adjoint(self.xi3)
differential_twists = [p1, p2, p3]
return np.stack(differential_twists, axis=1)
def plot(self, fignum, q):
""" Plot arm.
Takes figure number, and numpy array of joint angles, in radians.
"""
fig = plt.figure(fignum)
axes = fig.gca()
sXl1 = Pose2(0, 0, math.radians(90))
p1 = sXl1.translation()
gtsam_plot.plot_pose2_on_axes(axes, sXl1)
def plot_line(p, g, color):
q = g.translation()
line = np.append(p[np.newaxis], q[np.newaxis], axis=0)
axes.plot(line[:, 0], line[:, 1], color)
return q
l1Zl1 = Pose2(0, 0, q[0])
l1Xl2 = Pose2(self.L1, 0, 0)
sTl2 = compose(sXl1, l1Zl1, l1Xl2)
p2 = plot_line(p1, sTl2, 'r-')
gtsam_plot.plot_pose2_on_axes(axes, sTl2)
l2Zl2 = Pose2(0, 0, q[1])
l2Xl3 = Pose2(self.L2, 0, 0)
sTl3 = compose(sTl2, l2Zl2, l2Xl3)
p3 = plot_line(p2, sTl3, 'g-')
gtsam_plot.plot_pose2_on_axes(axes, sTl3)
l3Zl3 = Pose2(0, 0, q[2])
l3Xt = Pose2(self.L3, 0, 0)
sTt = compose(sTl3, l3Zl3, l3Xt)
plot_line(p3, sTt, 'b-')
gtsam_plot.plot_pose2_on_axes(axes, sTt)
# Create common example configurations.
Q0 = vector3(0, 0, 0)
Q1 = np.radians(vector3(-30, -45, -90))
Q2 = np.radians(vector3(-90, 90, 0))
class TestPose2SLAMExample(GtsamTestCase):
"""Unit tests for functions used below."""
def setUp(self):
self.arm = ThreeLinkArm()
def assertPose2Equals(self, actual, expected, tol=1e-2):
"""Helper function that prints out actual and expected if not equal."""
equal = actual.equals(expected, tol)
if not equal:
raise self.failureException(
"Poses are not equal:\n{}!={}".format(actual, expected))
def test_fk_arm(self):
"""Make sure forward kinematics is correct for some known test configurations."""
# at rest
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
sTt = self.arm.fk(Q0)
self.assertIsInstance(sTt, Pose2)
self.assertPose2Equals(sTt, expected)
# -30, -45, -90
expected = Pose2(5.78, 1.52, math.radians(-75))
sTt = self.arm.fk(Q1)
self.assertPose2Equals(sTt, expected)
def test_jacobian(self):
"""Test Jacobian calculation."""
# at rest
expected = np.array([[-9.5, -6, -2.5], [0, 0, 0], [1, 1, 1]], np.float)
J = self.arm.jacobian(Q0)
np.testing.assert_array_almost_equal(J, expected)
# at -90, 90, 0
expected = np.array([[-6, -6, -2.5], [3.5, 0, 0], [1, 1, 1]], np.float)
J = self.arm.jacobian(Q2)
np.testing.assert_array_almost_equal(J, expected)
def test_con_arm(self):
"""Make sure POE is correct for some known test configurations."""
# at rest
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
sTt = self.arm.con(Q0)
self.assertIsInstance(sTt, Pose2)
self.assertPose2Equals(sTt, expected)
# -30, -45, -90
expected = Pose2(5.78, 1.52, math.radians(-75))
sTt = self.arm.con(Q1)
self.assertPose2Equals(sTt, expected)
def test_poe_arm(self):
"""Make sure POE is correct for some known test configurations."""
# at rest
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
sTt = self.arm.poe(Q0)
self.assertIsInstance(sTt, Pose2)
self.assertPose2Equals(sTt, expected)
# -30, -45, -90
expected = Pose2(5.78, 1.52, math.radians(-75))
sTt = self.arm.poe(Q1)
self.assertPose2Equals(sTt, expected)
def test_ik(self):
"""Check iterative inverse kinematics function."""
# at rest
actual = self.arm.ik(Pose2(0, 2*3.5 + 2.5, math.radians(90)))
np.testing.assert_array_almost_equal(actual, Q0, decimal=2)
# -30, -45, -90
sTt_desired = Pose2(5.78, 1.52, math.radians(-75))
actual = self.arm.ik(sTt_desired)
self.assertPose2Equals(self.arm.fk(actual), sTt_desired)
np.testing.assert_array_almost_equal(actual, Q1, decimal=2)
def test_manipulator_jacobian(self):
"""Test Jacobian calculation."""
# at rest
expected = np.array([[0, 3.5, 7], [0, 0, 0], [1, 1, 1]], np.float)
J = self.arm.manipulator_jacobian(Q0)
np.testing.assert_array_almost_equal(J, expected)
# at -90, 90, 0
expected = np.array(
[[0, 0, 3.5], [0, -3.5, -3.5], [1, 1, 1]], np.float)
J = self.arm.manipulator_jacobian(Q2)
np.testing.assert_array_almost_equal(J, expected)
def run_example():
""" Use trajectory interpolation and then trajectory tracking a la Murray
to move a 3-link arm on a straight line.
"""
# Create arm
arm = ThreeLinkArm()
# Get initial pose using forward kinematics
q = np.radians(vector3(30, -30, 45))
sTt_initial = arm.fk(q)
# Create interpolated trajectory in task space to desired goal pose
sTt_goal = Pose2(2.4, 4.3, math.radians(0))
poses = trajectory(sTt_initial, sTt_goal, 50)
# Setup figure and plot initial pose
fignum = 0
fig = plt.figure(fignum)
axes = fig.gca()
axes.set_xlim(-5, 5)
axes.set_ylim(0, 10)
gtsam_plot.plot_pose2(fignum, arm.fk(q))
# For all poses in interpolated trajectory, calculate dq to move to next pose.
# We do this by calculating the local Jacobian J and doing dq = inv(J)*delta(sTt, pose).
for pose in poses:
sTt = arm.fk(q)
error = delta(sTt, pose)
J = arm.jacobian(q)
q += np.dot(np.linalg.inv(J), error)
arm.plot(fignum, q)
plt.pause(0.01)
plt.pause(10)
if __name__ == "__main__":
run_example()
unittest.main()
|
[
"unittest.main",
"numpy.stack",
"gtsam.Pose2",
"math.radians",
"numpy.remainder",
"math.sin",
"gtsam.Pose2.Expmap",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.linalg.inv",
"math.cos",
"numpy.linalg.pinv",
"numpy.testing.assert_array_almost_equal",
"matplotlib.pyplot.pause",
"gtsam.utils.plot.plot_pose2_on_axes"
] |
[((775, 810), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.float'}), '([x, y, z], dtype=np.float)\n', (783, 810), True, 'import numpy as np\n'), ((10685, 10703), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (10695, 10703), True, 'import matplotlib.pyplot as plt\n'), ((11207, 11220), 'matplotlib.pyplot.pause', 'plt.pause', (['(10)'], {}), '(10)\n', (11216, 11220), True, 'import matplotlib.pyplot as plt\n'), ((11272, 11287), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11285, 11287), False, 'import unittest\n'), ((2140, 2157), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'q[0]'], {}), '(0, 0, q[0])\n', (2145, 2157), False, 'from gtsam import Pose2\n'), ((2174, 2194), 'gtsam.Pose2', 'Pose2', (['self.L1', '(0)', '(0)'], {}), '(self.L1, 0, 0)\n', (2179, 2194), False, 'from gtsam import Pose2\n'), ((2211, 2228), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'q[1]'], {}), '(0, 0, q[1])\n', (2216, 2228), False, 'from gtsam import Pose2\n'), ((2245, 2265), 'gtsam.Pose2', 'Pose2', (['self.L2', '(0)', '(0)'], {}), '(self.L2, 0, 0)\n', (2250, 2265), False, 'from gtsam import Pose2\n'), ((2282, 2299), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'q[2]'], {}), '(0, 0, q[2])\n', (2287, 2299), False, 'from gtsam import Pose2\n'), ((2315, 2335), 'gtsam.Pose2', 'Pose2', (['self.L3', '(0)', '(0)'], {}), '(self.L3, 0, 0)\n', (2320, 2335), False, 'from gtsam import Pose2\n'), ((3201, 3230), 'gtsam.Pose2.Expmap', 'Pose2.Expmap', (['(self.xi1 * q[0])'], {}), '(self.xi1 * q[0])\n', (3213, 3230), False, 'from gtsam import Pose2\n'), ((3247, 3276), 'gtsam.Pose2.Expmap', 'Pose2.Expmap', (['(self.xi2 * q[1])'], {}), '(self.xi2 * q[1])\n', (3259, 3276), False, 'from gtsam import Pose2\n'), ((3293, 3322), 'gtsam.Pose2.Expmap', 'Pose2.Expmap', (['(self.xi3 * q[2])'], {}), '(self.xi3 * q[2])\n', (3305, 3322), False, 'from gtsam import Pose2\n'), ((5115, 5144), 'gtsam.Pose2.Expmap', 'Pose2.Expmap', (['(self.xi1 * q[0])'], {}), '(self.xi1 * q[0])\n', (5127, 5144), False, 'from gtsam import Pose2\n'), ((5161, 5190), 'gtsam.Pose2.Expmap', 'Pose2.Expmap', (['(self.xi2 * q[1])'], {}), '(self.xi2 * q[1])\n', (5173, 5190), False, 'from gtsam import Pose2\n'), ((5494, 5531), 'numpy.stack', 'np.stack', (['differential_twists'], {'axis': '(1)'}), '(differential_twists, axis=1)\n', (5502, 5531), True, 'import numpy as np\n'), ((5690, 5708), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (5700, 5708), True, 'import matplotlib.pyplot as plt\n'), ((5820, 5861), 'gtsam.utils.plot.plot_pose2_on_axes', 'gtsam_plot.plot_pose2_on_axes', (['axes', 'sXl1'], {}), '(axes, sXl1)\n', (5849, 5861), True, 'import gtsam.utils.plot as gtsam_plot\n'), ((6089, 6106), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'q[0]'], {}), '(0, 0, q[0])\n', (6094, 6106), False, 'from gtsam import Pose2\n'), ((6123, 6143), 'gtsam.Pose2', 'Pose2', (['self.L1', '(0)', '(0)'], {}), '(self.L1, 0, 0)\n', (6128, 6143), False, 'from gtsam import Pose2\n'), ((6234, 6275), 'gtsam.utils.plot.plot_pose2_on_axes', 'gtsam_plot.plot_pose2_on_axes', (['axes', 'sTl2'], {}), '(axes, sTl2)\n', (6263, 6275), True, 'import gtsam.utils.plot as gtsam_plot\n'), ((6293, 6310), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'q[1]'], {}), '(0, 0, q[1])\n', (6298, 6310), False, 'from gtsam import Pose2\n'), ((6327, 6347), 'gtsam.Pose2', 'Pose2', (['self.L2', '(0)', '(0)'], {}), '(self.L2, 0, 0)\n', (6332, 6347), False, 'from gtsam import Pose2\n'), ((6438, 6479), 'gtsam.utils.plot.plot_pose2_on_axes', 'gtsam_plot.plot_pose2_on_axes', (['axes', 'sTl3'], {}), '(axes, sTl3)\n', (6467, 6479), True, 'import gtsam.utils.plot as gtsam_plot\n'), ((6497, 6514), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'q[2]'], {}), '(0, 0, q[2])\n', (6502, 6514), False, 'from gtsam import Pose2\n'), ((6530, 6550), 'gtsam.Pose2', 'Pose2', (['self.L3', '(0)', '(0)'], {}), '(self.L3, 0, 0)\n', (6535, 6550), False, 'from gtsam import Pose2\n'), ((6633, 6673), 'gtsam.utils.plot.plot_pose2_on_axes', 'gtsam_plot.plot_pose2_on_axes', (['axes', 'sTt'], {}), '(axes, sTt)\n', (6662, 6673), True, 'import gtsam.utils.plot as gtsam_plot\n'), ((7864, 7924), 'numpy.array', 'np.array', (['[[-9.5, -6, -2.5], [0, 0, 0], [1, 1, 1]]', 'np.float'], {}), '([[-9.5, -6, -2.5], [0, 0, 0], [1, 1, 1]], np.float)\n', (7872, 7924), True, 'import numpy as np\n'), ((7967, 8016), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['J', 'expected'], {}), '(J, expected)\n', (8003, 8016), True, 'import numpy as np\n'), ((8061, 8121), 'numpy.array', 'np.array', (['[[-6, -6, -2.5], [3.5, 0, 0], [1, 1, 1]]', 'np.float'], {}), '([[-6, -6, -2.5], [3.5, 0, 0], [1, 1, 1]], np.float)\n', (8069, 8121), True, 'import numpy as np\n'), ((8164, 8213), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['J', 'expected'], {}), '(J, expected)\n', (8200, 8213), True, 'import numpy as np\n'), ((9309, 9368), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'Q0'], {'decimal': '(2)'}), '(actual, Q0, decimal=2)\n', (9345, 9368), True, 'import numpy as np\n'), ((9568, 9627), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'Q1'], {'decimal': '(2)'}), '(actual, Q1, decimal=2)\n', (9604, 9627), True, 'import numpy as np\n'), ((9748, 9803), 'numpy.array', 'np.array', (['[[0, 3.5, 7], [0, 0, 0], [1, 1, 1]]', 'np.float'], {}), '([[0, 3.5, 7], [0, 0, 0], [1, 1, 1]], np.float)\n', (9756, 9803), True, 'import numpy as np\n'), ((9858, 9907), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['J', 'expected'], {}), '(J, expected)\n', (9894, 9907), True, 'import numpy as np\n'), ((9952, 10013), 'numpy.array', 'np.array', (['[[0, 0, 3.5], [0, -3.5, -3.5], [1, 1, 1]]', 'np.float'], {}), '([[0, 0, 3.5], [0, -3.5, -3.5], [1, 1, 1]], np.float)\n', (9960, 10013), True, 'import numpy as np\n'), ((10081, 10130), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['J', 'expected'], {}), '(J, expected)\n', (10117, 10130), True, 'import numpy as np\n'), ((10551, 10566), 'math.radians', 'math.radians', (['(0)'], {}), '(0)\n', (10563, 10566), False, 'import math\n'), ((11186, 11201), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (11195, 11201), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1597), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1588, 1597), True, 'import numpy as np\n'), ((1936, 1952), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1948, 1952), False, 'import math\n'), ((2106, 2122), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (2118, 2122), False, 'import math\n'), ((4249, 4270), 'numpy.linalg.norm', 'np.linalg.norm', (['error'], {}), '(error)\n', (4263, 4270), True, 'import numpy as np\n'), ((4471, 4509), 'numpy.remainder', 'np.remainder', (['(q + math.pi)', '(2 * math.pi)'], {}), '(q + math.pi, 2 * math.pi)\n', (4483, 4509), True, 'import numpy as np\n'), ((5762, 5778), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (5774, 5778), False, 'import math\n'), ((5950, 5997), 'numpy.append', 'np.append', (['p[np.newaxis]', 'q[np.newaxis]'], {'axis': '(0)'}), '(p[np.newaxis], q[np.newaxis], axis=0)\n', (5959, 5997), True, 'import numpy as np\n'), ((7463, 7479), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (7475, 7479), False, 'import math\n'), ((7661, 7678), 'math.radians', 'math.radians', (['(-75)'], {}), '(-75)\n', (7673, 7678), False, 'import math\n'), ((8377, 8393), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (8389, 8393), False, 'import math\n'), ((8576, 8593), 'math.radians', 'math.radians', (['(-75)'], {}), '(-75)\n', (8588, 8593), False, 'import math\n'), ((8835, 8851), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (8847, 8851), False, 'import math\n'), ((9034, 9051), 'math.radians', 'math.radians', (['(-75)'], {}), '(-75)\n', (9046, 9051), False, 'import math\n'), ((9434, 9451), 'math.radians', 'math.radians', (['(-75)'], {}), '(-75)\n', (9446, 9451), False, 'import math\n'), ((11125, 11141), 'numpy.linalg.inv', 'np.linalg.inv', (['J'], {}), '(J)\n', (11138, 11141), True, 'import numpy as np\n'), ((3659, 3673), 'gtsam.Pose2', 'Pose2', (['x', 'y', '(0)'], {}), '(x, y, 0)\n', (3664, 3673), False, 'from gtsam import Pose2\n'), ((3675, 3693), 'gtsam.Pose2', 'Pose2', (['(0)', '(0)', 'theta'], {}), '(0, 0, theta)\n', (3680, 3693), False, 'from gtsam import Pose2\n'), ((3695, 3711), 'gtsam.Pose2', 'Pose2', (['(-x)', '(-y)', '(0)'], {}), '(-x, -y, 0)\n', (3700, 3711), False, 'from gtsam import Pose2\n'), ((4384, 4401), 'numpy.linalg.pinv', 'np.linalg.pinv', (['J'], {}), '(J)\n', (4398, 4401), True, 'import numpy as np\n'), ((9282, 9298), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (9294, 9298), False, 'import math\n'), ((2787, 2798), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (2795, 2798), False, 'import math\n'), ((2998, 3009), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (3006, 3009), False, 'import math\n'), ((2670, 2681), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (2678, 2681), False, 'import math\n'), ((2718, 2729), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (2726, 2729), False, 'import math\n'), ((2738, 2749), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (2746, 2749), False, 'import math\n'), ((2881, 2892), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (2889, 2892), False, 'import math\n'), ((2929, 2940), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (2937, 2940), False, 'import math\n'), ((2949, 2960), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (2957, 2960), False, 'import math\n'), ((2625, 2639), 'math.cos', 'math.cos', (['q[0]'], {}), '(q[0])\n', (2633, 2639), False, 'import math\n'), ((2650, 2661), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (2658, 2661), False, 'import math\n'), ((2836, 2850), 'math.sin', 'math.sin', (['q[0]'], {}), '(q[0])\n', (2844, 2850), False, 'import math\n'), ((2861, 2872), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (2869, 2872), False, 'import math\n')]
|
import operator
import os
import re
import sys
import time
from collections import deque
from functools import reduce
import numpy as np
def read_input() -> list[list[int]]:
# Read lines input:
# 2199943210
# 3987894921
# 9856789892
# 8767896789
# 9899965678
# return list with lists of integers [[r1.1,r1.2,r1.3...],[r2.1,r2.2,r2.3,...],...]
# plus wrapped with max values (9) all around for easier low points search ;)
data = [[int(elem) for elem in '9' + line.strip() + '9'] for line in sys.stdin]
nr_additions = len(data[0])
data.insert(0, [9 for _ in range(nr_additions)])
data.append([9 for _ in range(nr_additions)])
return data
def is_low_point(height_map: np.array, x_curr: int, y_curr: int) -> bool:
val = height_map[x_curr, y_curr]
hor_elems = [height_map[x_curr, y_curr - 1], height_map[x_curr, y_curr + 1]]
ver_elems = [height_map[x_curr - 1, y_curr], height_map[x_curr + 1, y_curr]]
all_elems = hor_elems + ver_elems
min_elem = min(all_elems)
if val < min_elem:
return True
return False
def find_low_points(height_map: np.array) -> list[(int, int), int]:
candidates = list()
# print("height map shape: {}".format(height_map.shape))
x_dim, y_dim = height_map.shape
for x_curr in range(1, x_dim - 1):
for y_curr in range(1, y_dim - 1):
if is_low_point(height_map, x_curr, y_curr):
# print("low point found: [{}][{}] -> {}".format(x_curr, y_curr, height_map[x_curr, y_curr]))
candidates.append([(x_curr, y_curr), height_map[x_curr, y_curr]])
return candidates
def find_solution_a(low_points: list[(int, int), int]) -> int:
answer = sum([elem[1] for elem in low_points]) + len(low_points)
return answer
def generate_basin(height_map: np.array, x_y: (int, int)) -> list[(int, int)]:
basin = set()
candidates = deque()
candidates.append(x_y)
neigh_offsets = [(0, -1), (1, 0), (0, 1), (-1, 0)]
while len(candidates) > 0:
# use DFS (depth first search) by simulating store in stack (FIFO)
x_curr, y_curr = candidates.pop()
# use BFS (breadth first search) by simulating store in queue (FILO)
# x_curr, y_curr = candidates.popleft()
# NOTE: this is not optimal (maybe due to python implementation)
# but that's the fact, BFS is 3 times slower than DFS
# both methods should work
basin.add((x_curr, y_curr))
for x_off, y_off in neigh_offsets:
candi_x = x_curr + x_off
candi_y = y_curr + y_off
cur_val = height_map[x_curr][y_curr]
candi_val = height_map[candi_x][candi_y]
if candi_val != 9 and candi_val > cur_val and\
(candi_x, candi_y) not in basin:
candidates.append((candi_x, candi_y))
return list(basin)
def find_solution_b(height_map: np.array, low_points: list[(int, int), int]) -> int:
# print("low points: {}".format(low_points))
basin_sizes = list()
for x_y, _ in low_points:
basin = generate_basin(height_map, x_y)
if len(basin) > 0:
basin_sizes.append(len(basin))
answer = reduce(operator.mul, sorted(basin_sizes, reverse=True)[:3], 1)
return answer
def do_main():
prev_time = time.process_time()
print("start reading input...")
data = read_input()
cur_time = time.process_time()
diff = cur_time - prev_time
prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
# print("input data: {}".format(data))
print("generate low points...")
height_map = np.array(data)
low_points = find_low_points(height_map)
cur_time = time.process_time()
diff = cur_time - prev_time
prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
print("find_solution_a...")
result_a = find_solution_a(low_points)
print("result_a:", result_a)
cur_time = time.process_time()
diff = cur_time - prev_time
prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
print("find_solution_b...")
result_b = find_solution_b(height_map, low_points)
print("result_b:", result_b)
cur_time = time.process_time()
diff = cur_time - prev_time
# prev_time = cur_time
print("[{}] took: {} sec.".format(cur_time, diff))
if __name__ == "__main__":
# execute only if run as a script
filename = os.path.basename(__file__)
day_nr = re.search(r"\d+", filename).group()
print("day_nr:", day_nr)
do_main()
|
[
"os.path.basename",
"time.process_time",
"numpy.array",
"re.search",
"collections.deque"
] |
[((1912, 1919), 'collections.deque', 'deque', ([], {}), '()\n', (1917, 1919), False, 'from collections import deque\n'), ((3345, 3364), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3362, 3364), False, 'import time\n'), ((3441, 3460), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3458, 3460), False, 'import time\n'), ((3671, 3685), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3679, 3685), True, 'import numpy as np\n'), ((3746, 3765), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3763, 3765), False, 'import time\n'), ((4002, 4021), 'time.process_time', 'time.process_time', ([], {}), '()\n', (4019, 4021), False, 'import time\n'), ((4270, 4289), 'time.process_time', 'time.process_time', ([], {}), '()\n', (4287, 4289), False, 'import time\n'), ((4488, 4514), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (4504, 4514), False, 'import os\n'), ((4528, 4555), 're.search', 're.search', (['"""\\\\d+"""', 'filename'], {}), "('\\\\d+', filename)\n", (4537, 4555), False, 'import re\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def plot_image_frame(image_frame):
"""
utils for plot image frames
:param image_frame: list of images
"""
for ii, image in enumerate(image_frame):
plt.figure()
if isinstance(image, list):
image = image[0]
plt.imshow(image)
plt.title('frame: ' + str(ii))
plt.show()
def plot_trajectories(pose_frame):
"""
utils for plot trajectory related to time-step t
:param pose_frame: numpy-array, (time_step,joint_num, ccordinate_dim)
"""
pose_frame = np.array(pose_frame)
timestep, joint_num, dim = pose_frame.shape
joints = ['neck', 'shoulder', 'elbow', 'hand']
plt.figure(figsize=(12, 7))
t = np.arange(timestep)
for ii, mark in enumerate(joints):
plt.subplot(331)
plt.plot(t, pose_frame[:, ii, 0], label=mark)
plt.xlabel('t')
plt.ylabel('x')
plt.subplot(332)
plt.plot(t, pose_frame[:, ii, 1], label=mark)
plt.xlabel('t')
plt.ylabel('y')
if dim > 2:
plt.subplot(333)
plt.plot(t, pose_frame[:, ii, 2], label=mark)
plt.xlabel('t')
plt.ylabel('z')
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.legend(loc=(1, 0.4))
plt.show()
def plot_trajectory_3d(trajectory):
"""
plot 3d trajectory
:param trajectory: numpy-array, shape of (time_step,3)
"""
xs = trajectory[:, 0]
ys = trajectory[:, 1]
zs = trajectory[:, 2]
fig = plt.figure()
ax = Axes3D(fig)
ax.plot3D(xs, ys, zs=zs, marker='o', color='b')
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((558, 578), 'numpy.array', 'np.array', (['pose_frame'], {}), '(pose_frame)\n', (566, 578), True, 'import numpy as np\n'), ((673, 700), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (683, 700), True, 'import matplotlib.pyplot as plt\n'), ((706, 725), 'numpy.arange', 'np.arange', (['timestep'], {}), '(timestep)\n', (715, 725), True, 'import numpy as np\n'), ((1090, 1131), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)', 'hspace': '(0)'}), '(wspace=0.5, hspace=0)\n', (1109, 1131), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1157), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1, 0.4)'}), '(loc=(1, 0.4))\n', (1143, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1167, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1380, 1382), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1400), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (1395, 1400), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1451, 1461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1459, 1461), True, 'import matplotlib.pyplot as plt\n'), ((247, 259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (257, 259), True, 'import matplotlib.pyplot as plt\n'), ((312, 329), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (322, 329), True, 'import matplotlib.pyplot as plt\n'), ((365, 375), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (373, 375), True, 'import matplotlib.pyplot as plt\n'), ((764, 780), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(331)'], {}), '(331)\n', (775, 780), True, 'import matplotlib.pyplot as plt\n'), ((783, 828), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'pose_frame[:, ii, 0]'], {'label': 'mark'}), '(t, pose_frame[:, ii, 0], label=mark)\n', (791, 828), True, 'import matplotlib.pyplot as plt\n'), ((831, 846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (841, 846), True, 'import matplotlib.pyplot as plt\n'), ((849, 864), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x"""'], {}), "('x')\n", (859, 864), True, 'import matplotlib.pyplot as plt\n'), ((867, 883), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(332)'], {}), '(332)\n', (878, 883), True, 'import matplotlib.pyplot as plt\n'), ((886, 931), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'pose_frame[:, ii, 1]'], {'label': 'mark'}), '(t, pose_frame[:, ii, 1], label=mark)\n', (894, 931), True, 'import matplotlib.pyplot as plt\n'), ((934, 949), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (944, 949), True, 'import matplotlib.pyplot as plt\n'), ((952, 967), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (962, 967), True, 'import matplotlib.pyplot as plt\n'), ((985, 1001), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(333)'], {}), '(333)\n', (996, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1050), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'pose_frame[:, ii, 2]'], {'label': 'mark'}), '(t, pose_frame[:, ii, 2], label=mark)\n', (1013, 1050), True, 'import matplotlib.pyplot as plt\n'), ((1054, 1069), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (1064, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z"""'], {}), "('z')\n", (1083, 1088), True, 'import matplotlib.pyplot as plt\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import tempfile
import shutil
import numpy as np
import pytest
from datetime import datetime
import os
from urllib.parse import urlparse
import re
from unittest.mock import Mock, patch
from astropy import coordinates
from astropy import units as u
from astroquery.utils.commons import ASTROPY_LT_4_1
from .. import Alma
from .. import _url_list, _test_url_list
# ALMA tests involving staging take too long, leading to travis timeouts
# TODO: make this a configuration item
SKIP_SLOW = True
all_colnames = {'Project code', 'Source name', 'RA', 'Dec', 'Band',
'Frequency resolution', 'Integration', 'Release date',
'Frequency support', 'Velocity resolution', 'Pol products',
'Observation date', 'PI name', 'PWV', 'Member ous id',
'Asdm uid', 'Project title', 'Project type', 'Scan intent',
'Spatial resolution', 'Largest angular scale',
'QA2 Status', 'Group ous id', 'Pub'}
def get_client():
alma = Alma()
# need this to point alma to a different test site
# alma package __init__.py mentions test sites but I don't know how the
# mechanism is supposed to be used
from .. import core
core.ALMA_TAP_PATH = 'obscore'
alma.archive_url = 'https://alma.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/'
return alma
@pytest.mark.remote_data
class TestAlma:
def setup_class(cls):
pass
# new test server
# this server seems not to serve a help page?
# Alma.archive_url = "https://2016-03.asa-test.alma.cl/aq/"
# starting somewhere between Nov 2015 and Jan 2016, the beta server
# stopped serving the actual data, making all staging attempts break
@pytest.fixture()
def temp_dir(self, request):
my_temp_dir = tempfile.mkdtemp()
def fin():
shutil.rmtree(my_temp_dir)
request.addfinalizer(fin)
return my_temp_dir
def test_public(self):
alma = get_client()
results = alma.query(payload=None, public=True, maxrec=100)
assert len(results) == 100
for row in results:
assert row['data_rights'] == 'Public'
results = alma.query(payload=None, public=False, maxrec=100)
assert len(results) == 100
for row in results:
assert row['data_rights'] == 'Proprietary'
def test_SgrAstar(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
result_s = alma.query_object('Sgr A*', legacy_columns=True)
assert '2013.1.00857.S' in result_s['Project code']
# "The Brick", g0.253, is in this one
# assert b'2011.0.00217.S' in result_c['Project code'] # missing cycle 1 data
def test_docs_example(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
rslt = alma.query(payload=dict(obs_creator_name='*Ginsburg*'))
assert 'ADS/JAO.ALMA#2013.1.00269.S' in rslt['obs_publisher_did']
def test_freq(self):
alma = get_client()
payload = {'frequency': '85..86'}
result = alma.query(payload)
assert len(result) > 0
for row in result:
# returned em_min and em_max are in m
assert row['frequency'] >= 85
assert row['frequency'] <= 100
assert '3' in row['band_list']
@pytest.mark.skipif("SKIP_SLOW",
reason="Extremely slow due to limitations of "
"the implementation")
def test_bands(self):
alma = get_client()
payload = {'band_list': ['5', '7']}
result = alma.query(payload)
assert len(result) > 0
for row in result:
assert ('5' in row['band_list']) or ('7' in row['band_list'])
def test_equivalent_columns(self):
# this test is to ensure that queries using original column names
# return the same results as the ones that use ObsCore names
alma = get_client()
# original
result_orig = alma.query(payload={'project_code': '2011.0.00131.S'},
legacy_columns=True)
result_obscore = alma.query(payload={'proposal_id': '2011.0.00131.S'},
legacy_columns=True)
assert len(result_orig) == len(result_obscore)
for row in result_orig:
assert row['Project code'] == '2011.0.00131.S'
for row in result_obscore:
assert row['Project code'] == '2011.0.00131.S'
def test_alma_source_name(self):
alma = get_client()
payload = {'source_name_alma': 'GRB021004'}
result = alma.query(payload)
assert len(result) > 0
for row in result:
assert 'GRB021004' == row['target_name']
@pytest.mark.skipif("SKIP_SLOW", reason="Known issue")
def test_ra_dec(self):
alma = get_client()
payload = {'ra_dec': '181.0192d -0.01928d'}
result = alma.query(payload)
assert len(result) > 0
@pytest.mark.skipif("SKIP_SLOW")
def test_m83(self, temp_dir, recwarn):
alma = get_client()
alma.cache_location = temp_dir
m83_data = alma.query_object('M83', science=True, legacy_columns=True)
uids = np.unique(m83_data['Member ous id'])
link_list = alma.stage_data(uids)
# On Feb 8, 2016 there were 83 hits. This number should never go down.
# Except it has. On May 18, 2016, there were 47.
assert len(link_list) >= 47
# test re-staging
# (has been replaced with warning)
# with pytest.raises(requests.HTTPError) as ex:
# link_list = alma.stage_data(uids)
# assert ex.value.args[0] == ('Received an error 405: this may indicate you have '
# 'already staged the data. Try downloading the '
# 'file URLs directly with download_files.')
# log.warning doesn't actually make a warning
# link_list = alma.stage_data(uids)
# w = recwarn.pop()
# assert (str(w.message) == ('Error 405 received. If you have previously staged the '
# 'same UIDs, the result returned is probably correct,'
# ' otherwise you may need to create a fresh astroquery.Alma instance.'))
@pytest.mark.skipif("SKIP_SLOW", reason="Known issue")
def test_stage_data(self, temp_dir, recwarn):
alma = get_client()
alma.cache_location = temp_dir
result_s = alma.query_object('Sgr A*', legacy_columns=True)
if ASTROPY_LT_4_1:
assert b'2013.1.00857.S' in result_s['Project code']
assert b'uid://A002/X40d164/X1b3' in result_s['Asdm uid']
assert b'uid://A002/X391d0b/X23d' in result_s['Member ous id']
match_val = b'uid://A002/X40d164/X1b3'
else:
assert '2013.1.00857.S' in result_s['Project code']
assert 'uid://A002/X40d164/X1b3' in result_s['Asdm uid']
assert 'uid://A002/X391d0b/X23d' in result_s['Member ous id']
match_val = 'uid://A002/X40d164/X1b3'
match = result_s['Asdm uid'] == match_val
uid = result_s['Member ous id'][match]
# this is temporary to switch back to ALMA servers
# del alma.dataarchive_url
# alma.archive_url = 'http://almascience.org'
result = alma.stage_data(uid)
found = False
for url in result['URL']:
if 'uid___A002_X40d164_X1b3' in url:
found = True
break
assert found, 'URL to uid___A002_X40d164_X1b3 expected'
def test_stage_data_listall(self, temp_dir, recwarn):
"""
test for expanded capability created in #1683
"""
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
result1 = alma.stage_data(uid, expand_tarfiles=False)
result2 = alma.stage_data(uid, expand_tarfiles=True)
expected_names = [
'2017.1.01185.S_uid___A002_Xd28a9e_X71b8.asdm.sdm.tar',
'2017.1.01185.S_uid___A002_Xd28a9e_X7b4d.asdm.sdm.tar',
'2017.1.01185.S_uid___A002_Xd29c1f_X1f74.asdm.sdm.tar',
'2017.1.01185.S_uid___A002_Xd29c1f_X5cf.asdm.sdm.tar']
expected_names_with_aux = expected_names + \
['2017.1.01185.S_uid___A001_X12a3_Xe9_auxiliary.tar']
for name in expected_names_with_aux:
assert name in result1['name']
for res in result1:
p = re.compile(r'.*(uid__.*)\.asdm.*')
if res['name'] in expected_names:
assert 'application/x-tar' == res['type']
assert res['id'] == p.search(res['name']).group(1)
else:
assert res['type'] in ['application/x-tar', 'application/x-votable+xml;content=datalink', 'text/plain']
assert res['id'] == 'None'
assert 'UNKNOWN' == res['permission']
assert res['mous_uid'] == uid
assert len(result2) > len(result1)
def test_stage_data_json(self, temp_dir, recwarn):
"""
test for json returns
"""
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
# this is temporary to switch back to ALMA servers
# alma.archive_url = 'http://almascience.org'
result = alma.stage_data(uid, return_json=False)
assert len(result) > 0
with pytest.raises(AttributeError):
# this no longer works
alma.stage_data(uid, return_json=True)
def test_data_proprietary(self):
# public
alma = get_client()
assert not alma.is_proprietary('uid://A001/X12a3/Xe9')
IVOA_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
now = datetime.utcnow().strftime(IVOA_DATE_FORMAT)[:-3]
query = "select top 1 obs_id from ivoa.obscore where " \
"obs_release_date > '{}'".format(now)
result = alma.query_tap(query)
assert len(result.table) == 1
# proprietary
assert alma.is_proprietary(result.table[0][0])
# non existent
with pytest.raises(AttributeError):
alma.is_proprietary('uid://NON/EXI/STING')
def test_data_info(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
data_info = alma.get_data_info(uid, expand_tarfiles=True)
for file in data_info:
# TODO found files that do not match info.
# assert u.isclose(file['content_length']*u.B,
# alma._HEADER_data_size([file['access_url']])[1]),\
# 'File {} size: datalink and head do not match'.\
# format(file['access_url'])
pass
# compare with tarball version
data_info_tar = alma.get_data_info(uid, expand_tarfiles=False)
assert len(data_info) > len(data_info_tar)
# size is the same - not working because service inconsistencies
# assert sum(data_info['content_length']) == \
# sum(data_info_tar['content_length'])
# check smallest file downloads correctly
file = 'member.uid___A001_X12a3_Xe9.README.txt'
for url in data_info['access_url']:
if file in url:
file_url = url
break
assert file_url
alma.download_files([file_url], temp_dir)
assert os.stat(os.path.join(temp_dir, file)).st_size
# mock downloading an entire program
download_files_mock = Mock()
alma.download_files = download_files_mock
alma.retrieve_data_from_uid([uid])
comparison = download_files_mock.mock_calls[0][1] == data_info_tar[
'access_url']
assert comparison.all()
def test_download_data(self, temp_dir):
# test only fits files from a program
def myrequests(op, file_url, **kwargs):
# this is to avoid downloading the actual files
if op == 'HEAD':
return Mock(headers={'Content-Type': 'fits'})
else:
return file_url.split('/')[-1]
alma = get_client()
alma.cache_location = temp_dir
uid = 'uid://A001/X12a3/Xe9'
data_info = alma.get_data_info(uid, expand_tarfiles=True)
fitsre = re.compile(r'.*\.fits$')
alma._request = Mock(side_effect=myrequests)
urls = [x['access_url'] for x in data_info
if fitsre.match(x['access_url'])]
results = alma.download_files(urls, temp_dir)
alma._request.assert_called()
assert len(results) == len(urls)
# each url triggers 2 calls: HEAD and GET
assert len(urls)*2 == len(alma._request.mock_calls)
def test_download_and_extract(self, temp_dir):
def myrequests(op, file_url, **kwargs):
# this is to avoid downloading the actual files
if op == 'HEAD':
return Mock(headers={'Content-Type': 'fits'})
else:
return file_url.split('/')[-1]
alma = get_client()
alma.cache_location = temp_dir
alma._request = Mock(side_effect=myrequests)
alma._cycle0_tarfile_content_table = {'ID': ''}
uid = 'uid://A001/X12a3/Xe9'
data_info = alma.get_data_info(uid, expand_tarfiles=False)
aux_tar_file = [x for x in data_info['access_url'] if 'auxiliary' in x]
assert 1 == len(aux_tar_file)
# there are no FITS files in the auxiliary file
assert not alma.download_and_extract_files(aux_tar_file)
# download python scripts now
downloaded = alma.download_and_extract_files(aux_tar_file,
regex=r'.*\.py')
assert len(downloaded) > 1
assert len(downloaded)*2 == len(alma._request.mock_calls)
# ASDM files cannot be expanded.
asdm_url = [x for x in data_info['access_url'] if 'asdm' in x][0]
tarfile_handle_mock = Mock()
mock_content_file1 = Mock(path='/tmp/')
# mocking attribute name is trickier and it requires the name to
# be set separately.
mock_content_file1.name = 'foo.py'
mock_content_file2 = Mock(path='/tmp/')
mock_content_file2.name = 'blah.txt'
tarfile_handle_mock.getmembers.return_value = \
[mock_content_file1, mock_content_file2]
tarfile_pkg_mock = Mock()
tarfile_pkg_mock.open.return_value = tarfile_handle_mock
with patch('astroquery.alma.core.tarfile', tarfile_pkg_mock):
with patch('astroquery.alma.core.os.remove') as delete_mock:
downloaded_asdm = alma.download_and_extract_files(
[asdm_url], include_asdm=True, regex=r'.*\.py')
delete_mock.assert_called_once_with(asdm_url.split('/')[-1])
assert downloaded_asdm == [os.path.join(temp_dir, 'foo.py')]
@pytest.mark.skipif("SKIP_SLOW", reason="Known issue")
def test_doc_example(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
alma2 = get_client()
alma2.cache_location = temp_dir
m83_data = alma.query_object('M83', legacy_columns=True)
# the order can apparently sometimes change
# These column names change too often to keep testing.
# assert set(m83_data.colnames) == set(all_colnames)
galactic_center = coordinates.SkyCoord(0 * u.deg, 0 * u.deg,
frame='galactic')
gc_data = alma.query_region(galactic_center, 1 * u.deg)
# assert len(gc_data) >= 425 # Feb 8, 2016
assert len(gc_data) >= 50 # Nov 16, 2016
uids = np.unique(m83_data['Member ous id'])
if ASTROPY_LT_4_1:
assert b'uid://A001/X11f/X30' in uids
X30 = (m83_data['Member ous id'] == b'uid://A001/X11f/X30')
X31 = (m83_data['Member ous id'] == b'uid://A002/X3216af/X31')
else:
assert 'uid://A001/X11f/X30' in uids
X30 = (m83_data['Member ous id'] == 'uid://A001/X11f/X30')
X31 = (m83_data['Member ous id'] == 'uid://A002/X3216af/X31')
assert X30.sum() == 4 # Jul 13, 2020
assert X31.sum() == 4 # Jul 13, 2020
mous1 = alma.stage_data('uid://A001/X11f/X30')
totalsize_mous1 = mous1['size'].sum() * u.Unit(mous1['size'].unit)
assert (totalsize_mous1.to(u.B) > 1.9*u.GB)
mous = alma2.stage_data('uid://A002/X3216af/X31')
totalsize_mous = mous['size'].sum() * u.Unit(mous['size'].unit)
# More recent ALMA request responses do not include any information
# about file size, so we have to allow for the possibility that all
# file sizes are replaced with -1
assert (totalsize_mous.to(u.GB).value > 52)
def test_query(self, temp_dir):
alma = get_client()
alma.cache_location = temp_dir
result = alma.query(payload={'start_date': '<11-11-2011'},
public=False, legacy_columns=True, science=True)
# Nov 16, 2016: 159
# Apr 25, 2017: 150
# Jul 2, 2017: 160
# May 9, 2018: 162
# March 18, 2019: 171 (seriously, how do they keep changing history?)
# with SIA2 numbers are different (cardinality?) assert len(result) == 171
test_date = datetime.strptime('11-11-2011', '%d-%m-%Y')
for row in result['Observation date']:
assert test_date > datetime.strptime(row, '%d-%m-%Y'), \
'Unexpected value: {}'.format(row)
# Not in the help - no need to support it.
# result = alma.query(payload={'member_ous_id': 'uid://A001/X11a2/X11'},
# science=True)
# assert len(result) == 1
@pytest.mark.skipif("SKIP_SLOW", reason="ra dec search known issue")
def test_misc(self):
# miscellaneous set of common tests
alma = get_client()
#
# alma.query_region(coordinate=orionkl_coords, radius=4 * u.arcmin,
# public=False, science=False)
result = alma.query_object('M83', public=True, science=True)
assert len(result) > 0
result = alma.query(payload={'pi_name': '*Bally*'}, public=False,
maxrec=10)
assert result
result.write('/tmp/alma-onerow.txt', format='ascii')
for row in result:
assert 'Bally' in row['obs_creator_name']
result = alma.query(payload=dict(project_code='2016.1.00165.S'),
public=False, cache=False)
assert result
for row in result:
assert '2016.1.00165.S' == row['proposal_id']
result = alma.query(payload=dict(project_code='2017.1.01355.L',
source_name_alma='G008.67'),)
assert result
for row in result:
assert '2017.1.01355.L' == row['proposal_id']
assert 'Public' == row['data_rights']
assert 'G008.67' in row['target_name']
result = alma.query_region(
coordinates.SkyCoord('5:35:14.461 -5:21:54.41', frame='fk5',
unit=(u.hour, u.deg)), radius=0.034 * u.deg)
assert result
result = alma.query_region(
coordinates.SkyCoord('5:35:14.461 -5:21:54.41', frame='fk5',
unit=(u.hour, u.deg)), radius=0.034 * u.deg,
payload={'energy.frequency-asu': '215 .. 220'})
result = alma.query(payload=dict(project_code='2012.*',
public_data=True))
assert result
for row in result:
assert '2012.' in row['proposal_id']
assert 'Public' == row['data_rights']
result = alma.query(payload={'frequency': '96 .. 96.5'})
assert result
for row in result:
# TODO not sure how to test this
pass
result = alma.query_object('M83', band_list=[3, 6, 8])
assert result
for row in result:
assert row['band_list'] in ['3', '6', '8']
result = alma.query(payload={'pi_name': '*Ginsburg*',
'band_list': '6'})
assert result
for row in result:
assert '6' == row['band_list']
assert 'ginsburg' in row['obs_creator_name'].lower()
@pytest.mark.skipif("SKIP_SLOW")
def test_user(self):
# miscellaneous set of tests from current users
alma = get_client()
rslt = alma.query({'band_list': [6], 'project_code': '2012.1.*'},
legacy_columns=True)
for row in rslt:
print(row['Project code'])
print(row['Member ous id'])
# As of April 2017, these data are *MISSING FROM THE ARCHIVE*.
# This has been reported, as it is definitely a bug.
@pytest.mark.xfail
@pytest.mark.bigdata
@pytest.mark.skipif("SKIP_SLOW")
def test_cycle1(self, temp_dir):
# About 500 MB
alma = get_client()
alma.cache_location = temp_dir
target = 'NGC4945'
project_code = '2012.1.00912.S'
payload = {'project_code': project_code,
'source_name_alma': target, }
result = alma.query(payload=payload)
assert len(result) == 1
# Need new Alma() instances each time
a1 = alma()
uid_url_table_mous = a1.stage_data(result['Member ous id'])
a2 = alma()
uid_url_table_asdm = a2.stage_data(result['Asdm uid'])
# I believe the fixes as part of #495 have resulted in removal of a
# redundancy in the table creation, so a 1-row table is OK here.
# A 2-row table may not be OK any more, but that's what it used to
# be...
assert len(uid_url_table_asdm) == 1
assert len(uid_url_table_mous) >= 2 # now is len=3 (Nov 17, 2016)
# URL should look like:
# https://almascience.eso.org/dataPortal/requests/anonymous/944120962/ALMA/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar
# https://almascience.eso.org/rh/requests/anonymous/944222597/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar
small = uid_url_table_mous['size'] < 1
urls_to_download = uid_url_table_mous[small]['URL']
uri = urlparse(urls_to_download[0])
assert uri.path == ('/dataPortal/requests/anonymous/{0}/ALMA/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar/2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar' # noqa
.format(a1._staging_log['staging_page_id']))
# THIS IS FAIL
# '2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar'
left = uid_url_table_mous['URL'][0].split("/")[-1]
assert left == '2012.1.00912.S_uid___A002_X5a9a13_X528_001_of_001.tar'
right = uid_url_table_mous['uid'][0]
assert right == 'uid://A002/X5a9a13/X528'
assert left[15:-15] == right.replace(":", "_").replace("/", "_")
data = alma.download_and_extract_files(urls_to_download)
assert len(data) == 6
@pytest.mark.skipif("SKIP_SLOW")
@pytest.mark.skip("Not working anymore")
def test_cycle0(self, temp_dir):
# About 20 MB
alma = get_client()
alma.cache_location = temp_dir
target = 'NGC4945'
project_code = '2011.0.00121.S'
payload = {'project_code': project_code,
'source_name_alma': target, }
result = alma.query(payload=payload, legacy_columns=True)
assert len(result) == 1
alma1 = alma()
alma2 = alma()
uid_url_table_mous = alma1.stage_data(result['Member ous id'])
uid_url_table_asdm = alma2.stage_data(result['Asdm uid'])
assert len(uid_url_table_asdm) == 1
assert len(uid_url_table_mous) == 32
assert uid_url_table_mous[0]['URL'].split("/")[-1] == '2011.0.00121.S_2012-08-16_001_of_002.tar'
assert uid_url_table_mous[0]['uid'] == 'uid://A002/X327408/X246'
small = uid_url_table_mous['size'] < 1
urls_to_download = uid_url_table_mous[small]['URL']
# Check that all URLs show up in the Cycle 0 table
for url in urls_to_download:
tarfile_name = os.path.split(url)[-1]
assert tarfile_name in alma._cycle0_tarfile_content['ID']
data = alma.download_and_extract_files(urls_to_download)
# There are 10 small files, but only 8 unique
assert len(data) == 8
def test_keywords(self, temp_dir):
alma = get_client()
alma.help_tap()
result = alma.query_tap(
"select * from ivoa.obscore where s_resolution <0.1 and "
"science_keyword in ('High-mass star formation', 'Disks around "
"high-mass stars')")
assert len(result) >= 72
# TODO why is it failing
# assert 'Orion_Source_I' in result['target_name']
@pytest.mark.remote_data
def test_project_metadata():
alma = get_client()
metadata = alma.get_project_metadata('2013.1.00269.S')
assert metadata == ['Sgr B2, a high-mass molecular cloud in our Galaxy\'s '
'Central Molecular Zone, is the most extreme site of '
'ongoing star formation in the Local Group in terms '
'of its gas content, temperature, and velocity '
'dispersion. If any cloud in our galaxy is analogous '
'to the typical cloud at the universal peak of star '
'formation at z~2, this is it. We propose a 6\'x6\' '
'mosaic in the 3mm window targeting gas thermometer '
'lines, specifically CH3CN and its isotopologues. We '
'will measure the velocity dispersion and temperature '
'of the molecular gas on all scales (0.02 - 12 pc, '
'0.5" - 5\') within the cloud, which will yield '
'resolved measurements of the Mach number and the '
'sonic scale of the gas. We will assess the relative '
'importance of stellar feedback and turbulence on the '
'star-forming gas, determining how extensive the '
'feedback effects are within an ultradense '
'environment. The observations will provide '
'constraints on the inputs to star formation theories '
'and will determine their applicability in extremely '
'dense, turbulent, and hot regions. Sgr B2 will be '
'used as a testing ground for star formation theories '
'in an environment analogous to high-z starburst '
'clouds in which they must be applied.']
@pytest.mark.remote_data
@pytest.mark.parametrize('dataarchive_url', _test_url_list)
@pytest.mark.skip('Not working for now - Investigating')
def test_staging_postfeb2020(dataarchive_url):
alma = get_client()
tbl = alma.stage_data('uid://A001/X121/X4ba')
assert 'mous_uid' in tbl.colnames
assert '2013.1.00269.S_uid___A002_X9de499_X3d6c.asdm.sdm.tar' in tbl['name']
@pytest.mark.remote_data
@pytest.mark.parametrize('dataarchive_url', _url_list)
@pytest.mark.skip('Not working for now - Investigating')
def test_staging_uptofeb2020(dataarchive_url):
alma = get_client()
tbl = alma.stage_data('uid://A001/X121/X4ba')
assert 'mous_uid' in tbl.colnames
names = [x.split("/")[-1] for x in tbl['URL']]
assert '2013.1.00269.S_uid___A002_X9de499_X3d6c.asdm.sdm.tar' in names
@pytest.mark.remote_data
@pytest.mark.parametrize('dataarchive_url', _test_url_list)
def test_staging_stacking(dataarchive_url):
alma = get_client()
alma.stage_data(['uid://A001/X13d5/X1d', 'uid://A002/X3216af/X31',
'uid://A001/X12a3/X240'])
|
[
"astropy.units.Unit",
"os.path.join",
"shutil.rmtree",
"os.path.split",
"unittest.mock.Mock",
"pytest.fixture",
"urllib.parse.urlparse",
"unittest.mock.patch",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"tempfile.mkdtemp",
"pytest.mark.skipif",
"pytest.raises",
"pytest.mark.parametrize",
"pytest.mark.skip",
"astropy.coordinates.SkyCoord",
"numpy.unique",
"re.compile"
] |
[((27292, 27350), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataarchive_url"""', '_test_url_list'], {}), "('dataarchive_url', _test_url_list)\n", (27315, 27350), False, 'import pytest\n'), ((27352, 27407), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Not working for now - Investigating"""'], {}), "('Not working for now - Investigating')\n", (27368, 27407), False, 'import pytest\n'), ((27679, 27732), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataarchive_url"""', '_url_list'], {}), "('dataarchive_url', _url_list)\n", (27702, 27732), False, 'import pytest\n'), ((27734, 27789), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Not working for now - Investigating"""'], {}), "('Not working for now - Investigating')\n", (27750, 27789), False, 'import pytest\n'), ((28107, 28165), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataarchive_url"""', '_test_url_list'], {}), "('dataarchive_url', _test_url_list)\n", (28130, 28165), False, 'import pytest\n'), ((1780, 1796), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1794, 1796), False, 'import pytest\n'), ((3418, 3520), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {'reason': '"""Extremely slow due to limitations of the implementation"""'}), "('SKIP_SLOW', reason=\n 'Extremely slow due to limitations of the implementation')\n", (3436, 3520), False, 'import pytest\n'), ((4850, 4903), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {'reason': '"""Known issue"""'}), "('SKIP_SLOW', reason='Known issue')\n", (4868, 4903), False, 'import pytest\n'), ((5085, 5116), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {}), "('SKIP_SLOW')\n", (5103, 5116), False, 'import pytest\n'), ((6432, 6485), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {'reason': '"""Known issue"""'}), "('SKIP_SLOW', reason='Known issue')\n", (6450, 6485), False, 'import pytest\n'), ((15115, 15168), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {'reason': '"""Known issue"""'}), "('SKIP_SLOW', reason='Known issue')\n", (15133, 15168), False, 'import pytest\n'), ((17992, 18059), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {'reason': '"""ra dec search known issue"""'}), "('SKIP_SLOW', reason='ra dec search known issue')\n", (18010, 18059), False, 'import pytest\n'), ((20621, 20652), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {}), "('SKIP_SLOW')\n", (20639, 20652), False, 'import pytest\n'), ((21165, 21196), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {}), "('SKIP_SLOW')\n", (21183, 21196), False, 'import pytest\n'), ((23456, 23487), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""SKIP_SLOW"""'], {}), "('SKIP_SLOW')\n", (23474, 23487), False, 'import pytest\n'), ((23493, 23532), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Not working anymore"""'], {}), "('Not working anymore')\n", (23509, 23532), False, 'import pytest\n'), ((1852, 1870), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1868, 1870), False, 'import tempfile\n'), ((5322, 5358), 'numpy.unique', 'np.unique', (["m83_data['Member ous id']"], {}), "(m83_data['Member ous id'])\n", (5331, 5358), True, 'import numpy as np\n'), ((11733, 11739), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (11737, 11739), False, 'from unittest.mock import Mock, patch\n'), ((12511, 12535), 're.compile', 're.compile', (['""".*\\\\.fits$"""'], {}), "('.*\\\\.fits$')\n", (12521, 12535), False, 'import re\n'), ((12560, 12588), 'unittest.mock.Mock', 'Mock', ([], {'side_effect': 'myrequests'}), '(side_effect=myrequests)\n', (12564, 12588), False, 'from unittest.mock import Mock, patch\n'), ((13340, 13368), 'unittest.mock.Mock', 'Mock', ([], {'side_effect': 'myrequests'}), '(side_effect=myrequests)\n', (13344, 13368), False, 'from unittest.mock import Mock, patch\n'), ((14192, 14198), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (14196, 14198), False, 'from unittest.mock import Mock, patch\n'), ((14228, 14246), 'unittest.mock.Mock', 'Mock', ([], {'path': '"""/tmp/"""'}), "(path='/tmp/')\n", (14232, 14246), False, 'from unittest.mock import Mock, patch\n'), ((14421, 14439), 'unittest.mock.Mock', 'Mock', ([], {'path': '"""/tmp/"""'}), "(path='/tmp/')\n", (14425, 14439), False, 'from unittest.mock import Mock, patch\n'), ((14621, 14627), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (14625, 14627), False, 'from unittest.mock import Mock, patch\n'), ((15614, 15674), 'astropy.coordinates.SkyCoord', 'coordinates.SkyCoord', (['(0 * u.deg)', '(0 * u.deg)'], {'frame': '"""galactic"""'}), "(0 * u.deg, 0 * u.deg, frame='galactic')\n", (15634, 15674), False, 'from astropy import coordinates\n'), ((15903, 15939), 'numpy.unique', 'np.unique', (["m83_data['Member ous id']"], {}), "(m83_data['Member ous id'])\n", (15912, 15939), True, 'import numpy as np\n'), ((17564, 17607), 'datetime.datetime.strptime', 'datetime.strptime', (['"""11-11-2011"""', '"""%d-%m-%Y"""'], {}), "('11-11-2011', '%d-%m-%Y')\n", (17581, 17607), False, 'from datetime import datetime\n'), ((22669, 22698), 'urllib.parse.urlparse', 'urlparse', (['urls_to_download[0]'], {}), '(urls_to_download[0])\n', (22677, 22698), False, 'from urllib.parse import urlparse\n'), ((1903, 1929), 'shutil.rmtree', 'shutil.rmtree', (['my_temp_dir'], {}), '(my_temp_dir)\n', (1916, 1929), False, 'import shutil\n'), ((8652, 8686), 're.compile', 're.compile', (['""".*(uid__.*)\\\\.asdm.*"""'], {}), "('.*(uid__.*)\\\\.asdm.*')\n", (8662, 8686), False, 'import re\n'), ((9603, 9632), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (9616, 9632), False, 'import pytest\n'), ((10289, 10318), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (10302, 10318), False, 'import pytest\n'), ((14706, 14761), 'unittest.mock.patch', 'patch', (['"""astroquery.alma.core.tarfile"""', 'tarfile_pkg_mock'], {}), "('astroquery.alma.core.tarfile', tarfile_pkg_mock)\n", (14711, 14761), False, 'from unittest.mock import Mock, patch\n'), ((16568, 16594), 'astropy.units.Unit', 'u.Unit', (["mous1['size'].unit"], {}), "(mous1['size'].unit)\n", (16574, 16594), True, 'from astropy import units as u\n'), ((16752, 16777), 'astropy.units.Unit', 'u.Unit', (["mous['size'].unit"], {}), "(mous['size'].unit)\n", (16758, 16777), True, 'from astropy import units as u\n'), ((19312, 19398), 'astropy.coordinates.SkyCoord', 'coordinates.SkyCoord', (['"""5:35:14.461 -5:21:54.41"""'], {'frame': '"""fk5"""', 'unit': '(u.hour, u.deg)'}), "('5:35:14.461 -5:21:54.41', frame='fk5', unit=(u.hour,\n u.deg))\n", (19332, 19398), False, 'from astropy import coordinates\n'), ((19522, 19608), 'astropy.coordinates.SkyCoord', 'coordinates.SkyCoord', (['"""5:35:14.461 -5:21:54.41"""'], {'frame': '"""fk5"""', 'unit': '(u.hour, u.deg)'}), "('5:35:14.461 -5:21:54.41', frame='fk5', unit=(u.hour,\n u.deg))\n", (19542, 19608), False, 'from astropy import coordinates\n'), ((11619, 11647), 'os.path.join', 'os.path.join', (['temp_dir', 'file'], {}), '(temp_dir, file)\n', (11631, 11647), False, 'import os\n'), ((12219, 12257), 'unittest.mock.Mock', 'Mock', ([], {'headers': "{'Content-Type': 'fits'}"}), "(headers={'Content-Type': 'fits'})\n", (12223, 12257), False, 'from unittest.mock import Mock, patch\n'), ((13145, 13183), 'unittest.mock.Mock', 'Mock', ([], {'headers': "{'Content-Type': 'fits'}"}), "(headers={'Content-Type': 'fits'})\n", (13149, 13183), False, 'from unittest.mock import Mock, patch\n'), ((14780, 14819), 'unittest.mock.patch', 'patch', (['"""astroquery.alma.core.os.remove"""'], {}), "('astroquery.alma.core.os.remove')\n", (14785, 14819), False, 'from unittest.mock import Mock, patch\n'), ((15075, 15107), 'os.path.join', 'os.path.join', (['temp_dir', '"""foo.py"""'], {}), "(temp_dir, 'foo.py')\n", (15087, 15107), False, 'import os\n'), ((17686, 17720), 'datetime.datetime.strptime', 'datetime.strptime', (['row', '"""%d-%m-%Y"""'], {}), "(row, '%d-%m-%Y')\n", (17703, 17720), False, 'from datetime import datetime\n'), ((24609, 24627), 'os.path.split', 'os.path.split', (['url'], {}), '(url)\n', (24622, 24627), False, 'import os\n'), ((9930, 9947), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (9945, 9947), False, 'from datetime import datetime\n')]
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data=np.genfromtxt(path,delimiter=",",skip_header=1)
##print(data)
census=np.concatenate((data,new_record))
#print(census.shape)
#print(census)
# --------------
#Code starts here
age=census[:,0]
##print(age)
max_age=np.max(age)
##print(max_age)
min_age=np.min(age)
##print(min_age)
age_mean=np.mean(age)
##print(age_mean)
age_std=np.std(age)
##print(age_std)
# --------------
import numpy as np
#Code starts here
##race=census[:,2]
##print(race)
race_0=census[census[:,2]==0]
##print(race_0)
race_1=census[census[:,2]==1]
##print(race_1
race_2=census[census[:,2]==2]
##print(race_2)
race_3=census[census[:,2]==3]
##print(race_3)
race_4=census[census[:,2]==4]
##print(race_4)
len_0=len(race_0)
print(len_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print(len_1)
print(len_2)
print(len_3)
print(len_4)
minority=np.array([len_0,len_1,len_2,len_3,len_4])
minority_race=minority.argmin()
print(minority_race)
# --------------
#Code starts here
import numpy as np
senior_citizens=census[census[:,0]>60]
##print(senior_citizens)
working_hours_sum=np.sum(senior_citizens[:,6])
print(working_hours_sum)
senior_citizens_len=len(senior_citizens)
print(senior_citizens_len)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
##print(high)
##print(low)
avg_pay_high=np.mean(high[:,7])
avg_pay_low=np.mean(low[:,7])
print(avg_pay_high)
print(avg_pay_low)
|
[
"numpy.sum",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.mean",
"numpy.min",
"numpy.array",
"numpy.concatenate"
] |
[((215, 264), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (228, 264), True, 'import numpy as np\n'), ((286, 320), 'numpy.concatenate', 'np.concatenate', (['(data, new_record)'], {}), '((data, new_record))\n', (300, 320), True, 'import numpy as np\n'), ((435, 446), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (441, 446), True, 'import numpy as np\n'), ((474, 485), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (480, 485), True, 'import numpy as np\n'), ((514, 526), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (521, 526), True, 'import numpy as np\n'), ((555, 566), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (561, 566), True, 'import numpy as np\n'), ((1091, 1136), 'numpy.array', 'np.array', (['[len_0, len_1, len_2, len_3, len_4]'], {}), '([len_0, len_1, len_2, len_3, len_4])\n', (1099, 1136), True, 'import numpy as np\n'), ((1330, 1359), 'numpy.sum', 'np.sum', (['senior_citizens[:, 6]'], {}), '(senior_citizens[:, 6])\n', (1336, 1359), True, 'import numpy as np\n'), ((1676, 1695), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1683, 1695), True, 'import numpy as np\n'), ((1708, 1726), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1715, 1726), True, 'import numpy as np\n')]
|
#! /usr/bin/env python3
"""与加载了RNN Classifier导出的Servable的TensorFlow Serving进行通信
"""
import numpy as np
import jieba
import tensorlayer as tl
from grpc.beta import implementations
import predict_pb2
import prediction_service_pb2
from packages import text_regularization as tr
def text_tensor(text, wv):
"""获取文本向量
Args:
text: 待检测文本
wv: 词向量模型
Returns:
[[[ 3.80905056 1.94315064 -0.20703495 -1.31589055 1.9627794
...
2.16935492 2.95426321 -4.71534014 -3.25034237 -11.28901672]]]
"""
text = tr.extractWords(text)
words = jieba.cut(text.strip())
text_sequence = []
for word in words:
try:
text_sequence.append(wv[word])
except KeyError:
text_sequence.append(wv['UNK'])
text_sequence = np.asarray(text_sequence)
sample = text_sequence.reshape(1, len(text_sequence), 200)
return sample
print(" ".join(jieba.cut('分词初始化')))
wv = tl.files.load_npy_to_any(name='../word2vec/output/model_word2vec_200.npy')
host, port = ('localhost', '9000')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'antispam'
|
[
"tensorlayer.files.load_npy_to_any",
"jieba.cut",
"prediction_service_pb2.beta_create_PredictionService_stub",
"predict_pb2.PredictRequest",
"numpy.asarray",
"packages.text_regularization.extractWords"
] |
[((965, 1039), 'tensorlayer.files.load_npy_to_any', 'tl.files.load_npy_to_any', ([], {'name': '"""../word2vec/output/model_word2vec_200.npy"""'}), "(name='../word2vec/output/model_word2vec_200.npy')\n", (989, 1039), True, 'import tensorlayer as tl\n'), ((1143, 1209), 'prediction_service_pb2.beta_create_PredictionService_stub', 'prediction_service_pb2.beta_create_PredictionService_stub', (['channel'], {}), '(channel)\n', (1200, 1209), False, 'import prediction_service_pb2\n'), ((1220, 1248), 'predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (1246, 1248), False, 'import predict_pb2\n'), ((566, 587), 'packages.text_regularization.extractWords', 'tr.extractWords', (['text'], {}), '(text)\n', (581, 587), True, 'from packages import text_regularization as tr\n'), ((815, 840), 'numpy.asarray', 'np.asarray', (['text_sequence'], {}), '(text_sequence)\n', (825, 840), True, 'import numpy as np\n'), ((939, 957), 'jieba.cut', 'jieba.cut', (['"""分词初始化"""'], {}), "('分词初始化')\n", (948, 957), False, 'import jieba\n')]
|
import numpy as np
import tensorflow as tf
def mmd_penalty(sample_qz, sample_pz, pz_scale, kernel='RBF'):
sigma2_p = pz_scale ** 2
n, d = sample_pz.get_shape().as_list()
n = tf.cast(n, tf.int32)
nf = tf.cast(n, tf.float32)
half_size = (n * n - n) / 2
norms_pz = tf.reduce_sum(tf.square(sample_pz), axis=1, keep_dims=True)
dotprods_pz = tf.matmul(sample_pz, sample_pz, transpose_b=True)
distances_pz = norms_pz + tf.transpose(norms_pz) - 2. * dotprods_pz
norms_qz = tf.reduce_sum(tf.square(sample_qz), axis=1, keep_dims=True)
dotprods_qz = tf.matmul(sample_qz, sample_qz, transpose_b=True)
distances_qz = norms_qz + tf.transpose(norms_qz) - 2. * dotprods_qz
dotprods = tf.matmul(sample_qz, sample_pz, transpose_b=True)
distances = norms_qz + tf.transpose(norms_pz) - 2. * dotprods
if kernel == 'RBF':
# Median heuristic for the sigma^2 of Gaussian kernel
'''
sigma2_k = tf.nn.top_k(
tf.reshape(distances, [-1]), half_size).values[half_size - 1]
sigma2_k += tf.nn.top_k(
tf.reshape(distances_qz, [-1]), half_size).values[half_size - 1]
'''
# Maximal heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(tf.reshape(distances_qz, [-1]), 1).values[0]
# sigma2_k += tf.nn.top_k(tf.reshape(distances, [-1]), 1).values[0]
sigma2_k = d * sigma2_p
res1 = tf.exp( - distances_qz / 2. / sigma2_k)
res1 += tf.exp( - distances_pz / 2. / sigma2_k)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = tf.exp( - distances / 2. / sigma2_k)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat = res1 - res2
elif kernel == 'IMQ':
# k(x, y) = C / (C + ||x - y||^2)
# C = tf.nn.top_k(tf.reshape(distances, [-1]), half_size).values[half_size - 1]
# C += tf.nn.top_k(tf.reshape(distances_qz, [-1]), half_size).values[half_size - 1]
pz_kind = 'normal'
if pz_kind == 'normal':
Cbase = 2. * d * sigma2_p
elif pz_kind == 'sphere':
Cbase = 2.
elif pz_kind == 'uniform':
# E ||x - y||^2 = E[sum (xi - yi)^2]
# = zdim E[(xi - yi)^2]
# = const * zdim
Cbase = d
stat = 0.
for scale in [.1, .2, .5, 1., 2., 5., 10.]:
C = Cbase * scale
res1 = C / (C + distances_qz)
res1 += C / (C + distances_pz)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = C / (C + distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat += res1 - res2
else:
assert False
return stat
def main():
with tf.Session() as sess:
def e(t):
return sess.run(t)
def p(s, t):
print(s, e(t))
n = 10000
d = 64
scale = tf.Variable(1.0, dtype=tf.float32)
sample_qz = scale * tf.random.normal((n, d), dtype=tf.float32)
sample_pz = tf.random.normal((n, d), dtype=tf.float32)
mmd = mmd_penalty(sample_qz, sample_pz, pz_scale=1.0, kernel='IMQ')
e(tf.global_variables_initializer())
for scale_np in np.linspace(-2, +2, 21):
print(scale_np, sess.run(mmd, feed_dict={scale: scale_np}))
if __name__ == "__main__":
main()
|
[
"tensorflow.reduce_sum",
"tensorflow.random.normal",
"tensorflow.global_variables_initializer",
"tensorflow.eye",
"tensorflow.Session",
"tensorflow.transpose",
"tensorflow.cast",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.exp",
"numpy.linspace",
"tensorflow.square"
] |
[((187, 207), 'tensorflow.cast', 'tf.cast', (['n', 'tf.int32'], {}), '(n, tf.int32)\n', (194, 207), True, 'import tensorflow as tf\n'), ((217, 239), 'tensorflow.cast', 'tf.cast', (['n', 'tf.float32'], {}), '(n, tf.float32)\n', (224, 239), True, 'import tensorflow as tf\n'), ((366, 415), 'tensorflow.matmul', 'tf.matmul', (['sample_pz', 'sample_pz'], {'transpose_b': '(True)'}), '(sample_pz, sample_pz, transpose_b=True)\n', (375, 415), True, 'import tensorflow as tf\n'), ((582, 631), 'tensorflow.matmul', 'tf.matmul', (['sample_qz', 'sample_qz'], {'transpose_b': '(True)'}), '(sample_qz, sample_qz, transpose_b=True)\n', (591, 631), True, 'import tensorflow as tf\n'), ((720, 769), 'tensorflow.matmul', 'tf.matmul', (['sample_qz', 'sample_pz'], {'transpose_b': '(True)'}), '(sample_qz, sample_pz, transpose_b=True)\n', (729, 769), True, 'import tensorflow as tf\n'), ((302, 322), 'tensorflow.square', 'tf.square', (['sample_pz'], {}), '(sample_pz)\n', (311, 322), True, 'import tensorflow as tf\n'), ((518, 538), 'tensorflow.square', 'tf.square', (['sample_qz'], {}), '(sample_qz)\n', (527, 538), True, 'import tensorflow as tf\n'), ((1427, 1465), 'tensorflow.exp', 'tf.exp', (['(-distances_qz / 2.0 / sigma2_k)'], {}), '(-distances_qz / 2.0 / sigma2_k)\n', (1433, 1465), True, 'import tensorflow as tf\n'), ((1483, 1521), 'tensorflow.exp', 'tf.exp', (['(-distances_pz / 2.0 / sigma2_k)'], {}), '(-distances_pz / 2.0 / sigma2_k)\n', (1489, 1521), True, 'import tensorflow as tf\n'), ((1639, 1674), 'tensorflow.exp', 'tf.exp', (['(-distances / 2.0 / sigma2_k)'], {}), '(-distances / 2.0 / sigma2_k)\n', (1645, 1674), True, 'import tensorflow as tf\n'), ((2848, 2860), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2858, 2860), True, 'import tensorflow as tf\n'), ((3017, 3051), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'dtype': 'tf.float32'}), '(1.0, dtype=tf.float32)\n', (3028, 3051), True, 'import tensorflow as tf\n'), ((3143, 3185), 'tensorflow.random.normal', 'tf.random.normal', (['(n, d)'], {'dtype': 'tf.float32'}), '((n, d), dtype=tf.float32)\n', (3159, 3185), True, 'import tensorflow as tf\n'), ((3331, 3354), 'numpy.linspace', 'np.linspace', (['(-2)', '(+2)', '(21)'], {}), '(-2, +2, 21)\n', (3342, 3354), True, 'import numpy as np\n'), ((446, 468), 'tensorflow.transpose', 'tf.transpose', (['norms_pz'], {}), '(norms_pz)\n', (458, 468), True, 'import tensorflow as tf\n'), ((662, 684), 'tensorflow.transpose', 'tf.transpose', (['norms_qz'], {}), '(norms_qz)\n', (674, 684), True, 'import tensorflow as tf\n'), ((797, 819), 'tensorflow.transpose', 'tf.transpose', (['norms_pz'], {}), '(norms_pz)\n', (809, 819), True, 'import tensorflow as tf\n'), ((1587, 1606), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res1'], {}), '(res1)\n', (1600, 1606), True, 'import tensorflow as tf\n'), ((3080, 3122), 'tensorflow.random.normal', 'tf.random.normal', (['(n, d)'], {'dtype': 'tf.float32'}), '((n, d), dtype=tf.float32)\n', (3096, 3122), True, 'import tensorflow as tf\n'), ((3272, 3305), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3303, 3305), True, 'import tensorflow as tf\n'), ((1561, 1570), 'tensorflow.eye', 'tf.eye', (['n'], {}), '(n)\n', (1567, 1570), True, 'import tensorflow as tf\n'), ((1691, 1710), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res2'], {}), '(res2)\n', (1704, 1710), True, 'import tensorflow as tf\n'), ((2613, 2632), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res1'], {}), '(res1)\n', (2626, 2632), True, 'import tensorflow as tf\n'), ((2583, 2592), 'tensorflow.eye', 'tf.eye', (['n'], {}), '(n)\n', (2589, 2592), True, 'import tensorflow as tf\n'), ((2708, 2727), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['res2'], {}), '(res2)\n', (2721, 2727), True, 'import tensorflow as tf\n')]
|
from pouring_base import Pouring_base
from gym import spaces
from scipy.spatial.transform import Rotation as R
from collections import deque
import math
import numpy as np
import os,sys
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
class Pouring_featured(Pouring_base):
"""A concrete water-pouring gym environment that uses handcrafted features as
observations of the state. Thus, this environment describes a Partially Observable
Markov Decision Process.
Attributes:
max_in_air: Maximum amount of water-particles in the air that is assumed to be
possible. Used for normalization of observations.
"""
def __init__(self,**kwargs):
"""Initialize the water-pouring environment.
Args:
**kwargs: Keyword arguments that are forwarded to the abstract init method
of the base implementation.
"""
self.max_in_air = 40
super(Pouring_featured, self).__init__(**kwargs)
self.action_space = spaces.Box(low=-1,high=1,shape=(3,))
self.observation_space = spaces.Box(low=-1,high=1,shape=(11+(2*self.action_space.shape[0] if self.jerk_punish>0 else 0),))
def _observe(self):
"""Make an observation of the current state by the use of handcrafted features, which
do not describe the full state completely.
Returns:
A 11 or 17 dimensional numpy array that contains:
1. Bottle Rotation
2. The x-translation of the bottle
3. The y-translation of the bottle
4. This episodes time_step_punish
5. This episodes spill_punish
6. This episodes target_fill_state
7. The number of steps that have been performed since the start of the episode.
8. The fill-level of the glass.
9. The amount of water in the bottle.
10. The amount of water in the air between bottle and glass.
11. The amount of spilled particles.
12-14. If self.jerk_punish > 0, the last performed action.
15-17. If self.jerk_punish > 0, the next to last performed action
All values in the array are normalized to the range -1 to 1.
"""
rotation = R.from_matrix(self.bottle.rotation).as_euler("zyx")[0]
rotation = (rotation-self.min_rotation)/(math.pi-self.min_rotation)
translation_x,translation_y = self.bottle.translation[:2]
translation_x = (translation_x - self.translation_bounds[0][0]) / (self.translation_bounds[0][1]-self.translation_bounds[0][0])
translation_y = (translation_y - self.translation_bounds[1][0]) / (self.translation_bounds[1][1]-self.translation_bounds[1][0])
tsp_obs = ((self.time_step_punish-self.time_step_punish_range[0]) /
(self.time_step_punish_range[1]-self.time_step_punish_range[0]))*2-1
time_obs = (self._step_number/self._max_episode_steps)*2-1
spill_punish_obs = ((self.spill_punish-self.spill_range[0]) /
(self.spill_range[1]-self.spill_range[0]))*2-1
target_fill_obs = ((self.target_fill_state-self.target_fill_range[0]) /
(self.target_fill_range[1]-self.target_fill_range[0]))*2-1
feat_dat = [rotation,translation_x,translation_y,tsp_obs,spill_punish_obs,target_fill_obs,time_obs]
feat_dat.append((self.particle_locations["glass"]/self.max_in_glass)*2-1)
feat_dat.append((self.particle_locations["bottle"]/self.max_particles)*2-1)
feat_dat.append((self.particle_locations["air"]/self.max_in_air)*2-1)
feat_dat.append((self.particle_locations["spilled"]/self.max_spill)*2-1)
if self.jerk_punish>0:
# Extend the observation with the actions from the two last steps.
feat_dat.extend(np.array(self.last_actions)[:-1].flatten())
feat_dat = np.clip(np.array(feat_dat),-1,1)
return feat_dat
|
[
"os.path.dirname",
"numpy.array",
"gym.spaces.Box",
"scipy.spatial.transform.Rotation.from_matrix"
] |
[((215, 240), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (230, 240), False, 'import os, sys\n'), ((1024, 1062), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(3,)'}), '(low=-1, high=1, shape=(3,))\n', (1034, 1062), False, 'from gym import spaces\n'), ((1094, 1204), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(11 + (2 * self.action_space.shape[0] if self.jerk_punish > 0 else 0),)'}), '(low=-1, high=1, shape=(11 + (2 * self.action_space.shape[0] if \n self.jerk_punish > 0 else 0),))\n', (1104, 1204), False, 'from gym import spaces\n'), ((3984, 4002), 'numpy.array', 'np.array', (['feat_dat'], {}), '(feat_dat)\n', (3992, 4002), True, 'import numpy as np\n'), ((2329, 2364), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['self.bottle.rotation'], {}), '(self.bottle.rotation)\n', (2342, 2364), True, 'from scipy.spatial.transform import Rotation as R\n'), ((3913, 3940), 'numpy.array', 'np.array', (['self.last_actions'], {}), '(self.last_actions)\n', (3921, 3940), True, 'import numpy as np\n')]
|
'''
Functions to call appropriate constructor functions based on UI data and to link decoder objects in the database
'''
import os
import re
import tempfile
import xmlrpc.client
import pickle
import json
import logging
import numpy as np
from celery import task, chain
from django.http import HttpResponse
from riglib.bmi import extractor, train
from riglib import experiment
@task
def cache_plx(plxfile):
"""
Create cache for plexon file
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
from plexon import plexfile
plexfile.openFile(str(plxfile))
@task
def make_bmi(name, clsname, extractorname, entry, cells, channels, binlen, tslice, ssm, pos_key, kin_extractor, zscore):
"""
Create a new Decoder object from training data and save a record to the database
Parameters
----------
name : string
Name assigned to decoder object in the database
clsname : string
BMI algorithm name (passed to bmilist lookup table 'bmis')
extractorname : string
feature extractor algorithm name (passed to bmilist lookup table 'extractors')
entry : models.TaskEntry
Django record of training task
cells : string
Single string containing all the units to be in decoder, matching
format in global regex 'cellname' (used only for spike extractors)
channels : string
Single string containing all the channels to be in decoder; must be a
comma separated list of values with spaces (e.g., "1, 2, 3")
(used only for, e.g., LFP extractors)
binlen : float
Time of spike history to consider
tslice : slice
Task time to use when training the decoder
ssm : string
TODO
pos_key : string
TODO
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
cellname = re.compile(r'(\d{1,3})\s*(\w{1})')
print("make bmi")
extractor_cls = namelist.extractors[extractorname]
print('Training with extractor class:', extractor_cls)
if 'spike' in extractor_cls.feature_type: # e.g., 'spike_counts'
# look at "cells" argument (ignore "channels")
cells = [ (int(c), ord(u) - 96) for c, u in cellname.findall(cells)]
if cells == []:
units = None # use all units by default
# Note: inside training functions (e.g., _train_KFDecoder_manual_control,
# _train_KFDecoder_visual_feedback, etc.), remember to check if units
# variable is None, and if so, set the units from the plx file:
# if units == None:
# units = np.array(plx.units).astype(np.int32)"
else:
unique_cells = []
for c in cells:
if c not in unique_cells:
unique_cells.append(c)
units = np.array(unique_cells).astype(np.int32)
elif ('lfp' in extractor_cls.feature_type) or ('ai_' in extractor_cls.feature_type): # e.g., 'lfp_power'
# look at "channels" argument (ignore "cells")
channels = np.array(channels.split(', ')).astype(np.int32) # convert str to list of numbers
if len(channels) == 0:
channels = [1, 2, 3, 4] # use these channels by default
else:
channels = np.unique(channels)
# units = np.hstack([channels.reshape(-1, 1), np.zeros(channels.reshape(-1, 1).shape, dtype=np.int32)])
units = np.hstack([channels.reshape(-1, 1), np.ones(channels.reshape(-1, 1).shape, dtype=np.int32)])
else:
raise Exception('Unknown extractor class!')
task_update_rate = 60 # NOTE may not be true for all tasks?!
extractor_kwargs = dict()
if extractor_cls == extractor.BinnedSpikeCountsExtractor:
extractor_kwargs['units'] = units
extractor_kwargs['n_subbins'] = max(1, int((1./task_update_rate)/binlen))
elif extractor_cls == extractor.LFPButterBPFPowerExtractor:
extractor_kwargs['channels'] = channels
elif extractor_cls == extractor.LFPMTMPowerExtractor:
extractor_kwargs['channels'] = channels
elif extractor_cls == extractor.AIMTMPowerExtractor:
extractor_kwargs['channels'] = channels
else:
raise Exception("Unknown extractor_cls: %s" % extractor_cls)
database = xmlrpc.client.ServerProxy("http://localhost:8000/RPC2/", allow_none=True)
# list of DataFile objects
datafiles = models.DataFile.objects.filter(entry_id=entry)
# key: a string representing a system name (e.g., 'plexon', 'blackrock', 'task', 'hdf')
# value: a single filename, or a list of filenames if there are more than one for that system
files = dict()
system_names = set(d.system.name for d in datafiles)
for system_name in system_names:
filenames = [d.get_path() for d in datafiles if d.system.name == system_name]
if system_name in ['blackrock', 'blackrock2']:
files[system_name] = filenames # list of (one or more) files
else:
assert(len(filenames) == 1)
files[system_name] = filenames[0] # just one file
training_method = namelist.bmi_algorithms[clsname]
ssm = namelist.bmi_state_space_models[ssm]
kin_extractor_fn = namelist.kin_extractors[kin_extractor]
decoder = training_method(files, extractor_cls, extractor_kwargs, kin_extractor_fn, ssm, units, update_rate=binlen, tslice=tslice, pos_key=pos_key,
zscore=zscore)
decoder.te_id = entry
tf = tempfile.NamedTemporaryFile('wb')
pickle.dump(decoder, tf, 2)
tf.flush()
database.save_bmi(name, int(entry), tf.name)
def cache_and_train(*args, **kwargs):
"""
Cache plexon file (if using plexon system) and train BMI.
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
recording_sys = models.KeyValueStore.get('recording_sys', None)
if recording_sys == 'plexon':
print("cache and train")
entry = kwargs['entry']
print(entry)
plxfile = models.DataFile.objects.get(system__name='plexon', entry=entry)
print(plxfile)
if not plxfile.has_cache():
cache = cache_plx.si(plxfile.get_path())
train = make_bmi.si(*args, **kwargs)
chain(cache, train)()
else:
print("calling")
make_bmi.delay(*args, **kwargs)
elif recording_sys == 'blackrock':
make_bmi.delay(*args, **kwargs)
else:
raise Exception('Unknown recording_system!')
def save_new_decoder_from_existing(obj, orig_decoder_record, suffix='_'):
'''
Save a decoder that is created by manipulating the parameters of an older decoder
Parameters
----------
obj: riglib.bmi.Decoder instance
New decoder object to be saved
orig_decoder_record: tracker.models.Decoder instance
Database record of the original decoder
suffix: string, default='_'
The name of the new decoder is created by taking the name of the old decoder and adding the specified suffix
Returns
-------
None
'''
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
import riglib.bmi
if not isinstance(obj, riglib.bmi.bmi.Decoder):
raise ValueError("This function is only intended for saving Decoder objects!")
new_decoder_fname = obj.save()
new_decoder_name = orig_decoder_record.name + suffix
training_block_id = orig_decoder_record.entry_id
print("Saving new decoder:", new_decoder_name)
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
## Functions to manipulate existing (KF)Decoders. These belong elsewhere
def conv_mm_dec_to_cm(decoder_record):
'''
Convert a mm unit decoder to cm
'''
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
decoder_fname = os.path.join('/storage/decoders/', decoder_record.path)
print(decoder_fname)
decoder_name = decoder_record.name
dec = pickle.load(open(decoder_fname))
from riglib.bmi import train
dec_cm = train.rescale_KFDecoder_units(dec, 10)
new_decoder_basename = os.path.basename(decoder_fname).rstrip('.pkl') + '_cm.pkl'
new_decoder_fname = '/tmp/%s' % new_decoder_basename
pickle.dump(dec_cm, open(new_decoder_fname, 'w'))
new_decoder_name = decoder_name + '_cm'
training_block_id = decoder_record.entry_id
print(new_decoder_name)
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
def zero_out_SSKF_bias(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
dec = open_decoder_from_record(decoder_record)
dec.filt.C_xpose_Q_inv_C[:,-1] = 0
dec.filt.C_xpose_Q_inv_C[-1,:] = 0
save_new_decoder_from_existing(dec, decoder_record, suffix='_zero_bias')
def conv_kfdecoder_binlen(decoder_record, new_binlen):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
dec = open_decoder_from_record(decoder_record)
dec.change_binlen(new_binlen)
save_new_decoder_from_existing(dec, decoder_record, suffix='_%dHz' % int(1./new_binlen))
def conv_kfdecoder_to_ppfdecoder(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
# Load the decoder
decoder_fname = os.path.join('/storage/decoders/', decoder_record.path)
print(decoder_fname)
decoder_name = decoder_record.name
dec = pickle.load(open(decoder_fname))
from riglib.bmi import train
dec_ppf = train.convert_KFDecoder_to_PPFDecoder(dec)
new_decoder_basename = os.path.basename(decoder_fname).rstrip('.pkl') + '_ppf.pkl'
new_decoder_fname = '/tmp/%s' % new_decoder_basename
pickle.dump(dec_ppf, open(new_decoder_fname, 'w'))
new_decoder_name = decoder_name + '_ppf'
training_block_id = decoder_record.entry_id
print(new_decoder_name)
from .tracker import dbq
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
def conv_kfdecoder_to_sskfdecoder(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
dec = open_decoder_from_record(decoder_record)
F, K = dec.filt.get_sskf()
from riglib.bmi import sskfdecoder
filt = sskfdecoder.SteadyStateKalmanFilter(F=F, K=K)
dec_sskf = sskfdecoder.SSKFDecoder(filt, dec.units, dec.ssm, binlen=decoder.binlen)
save_new_decoder_from_existing(decoder_record, '_sskf')
def make_kfdecoder_interpolate(decoder_record):
os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
from .tracker import dbq
from . import namelist
from .tracker import models
from . import dbfunctions as dbfn
from .json_param import Parameters
from .tasktrack import Track
from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder
# Load the decoder
decoder_fname = os.path.join('/storage/decoders/', decoder_record.path)
print(decoder_fname)
decoder_name = decoder_record.name
dec = pickle.load(open(decoder_fname))
from riglib.bmi import train
dec_ppf = train._interpolate_KFDecoder_state_between_updates(dec)
new_decoder_basename = os.path.basename(decoder_fname).rstrip('.pkl') + '_ppf.pkl'
new_decoder_fname = '/tmp/%s' % new_decoder_basename
pickle.dump(dec_ppf, open(new_decoder_fname, 'w'))
new_decoder_name = decoder_name + '_60hz'
training_block_id = decoder_record.entry_id
print(new_decoder_name)
from .tracker import dbq
dbq.save_bmi(new_decoder_name, training_block_id, new_decoder_fname)
|
[
"tempfile.NamedTemporaryFile",
"pickle.dump",
"riglib.bmi.train.rescale_KFDecoder_units",
"riglib.bmi.sskfdecoder.SteadyStateKalmanFilter",
"celery.chain",
"os.path.basename",
"riglib.bmi.sskfdecoder.SSKFDecoder",
"riglib.bmi.train._interpolate_KFDecoder_state_between_updates",
"numpy.array",
"riglib.bmi.train.convert_KFDecoder_to_PPFDecoder",
"os.path.join",
"numpy.unique",
"re.compile"
] |
[((2464, 2500), 're.compile', 're.compile', (['"""(\\\\d{1,3})\\\\s*(\\\\w{1})"""'], {}), "('(\\\\d{1,3})\\\\s*(\\\\w{1})')\n", (2474, 2500), False, 'import re\n'), ((6088, 6121), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""wb"""'], {}), "('wb')\n", (6115, 6121), False, 'import tempfile\n'), ((6126, 6153), 'pickle.dump', 'pickle.dump', (['decoder', 'tf', '(2)'], {}), '(decoder, tf, 2)\n', (6137, 6153), False, 'import pickle\n'), ((9329, 9384), 'os.path.join', 'os.path.join', (['"""/storage/decoders/"""', 'decoder_record.path'], {}), "('/storage/decoders/', decoder_record.path)\n", (9341, 9384), False, 'import os\n'), ((9538, 9576), 'riglib.bmi.train.rescale_KFDecoder_units', 'train.rescale_KFDecoder_units', (['dec', '(10)'], {}), '(dec, 10)\n', (9567, 9576), False, 'from riglib.bmi import train\n'), ((11654, 11709), 'os.path.join', 'os.path.join', (['"""/storage/decoders/"""', 'decoder_record.path'], {}), "('/storage/decoders/', decoder_record.path)\n", (11666, 11709), False, 'import os\n'), ((11865, 11907), 'riglib.bmi.train.convert_KFDecoder_to_PPFDecoder', 'train.convert_KFDecoder_to_PPFDecoder', (['dec'], {}), '(dec)\n', (11902, 11907), False, 'from riglib.bmi import train\n'), ((12887, 12932), 'riglib.bmi.sskfdecoder.SteadyStateKalmanFilter', 'sskfdecoder.SteadyStateKalmanFilter', ([], {'F': 'F', 'K': 'K'}), '(F=F, K=K)\n', (12922, 12932), False, 'from riglib.bmi import sskfdecoder\n'), ((12948, 13020), 'riglib.bmi.sskfdecoder.SSKFDecoder', 'sskfdecoder.SSKFDecoder', (['filt', 'dec.units', 'dec.ssm'], {'binlen': 'decoder.binlen'}), '(filt, dec.units, dec.ssm, binlen=decoder.binlen)\n', (12971, 13020), False, 'from riglib.bmi import sskfdecoder\n'), ((13544, 13599), 'os.path.join', 'os.path.join', (['"""/storage/decoders/"""', 'decoder_record.path'], {}), "('/storage/decoders/', decoder_record.path)\n", (13556, 13599), False, 'import os\n'), ((13755, 13810), 'riglib.bmi.train._interpolate_KFDecoder_state_between_updates', 'train._interpolate_KFDecoder_state_between_updates', (['dec'], {}), '(dec)\n', (13805, 13810), False, 'from riglib.bmi import train\n'), ((3894, 3913), 'numpy.unique', 'np.unique', (['channels'], {}), '(channels)\n', (3903, 3913), True, 'import numpy as np\n'), ((7149, 7168), 'celery.chain', 'chain', (['cache', 'train'], {}), '(cache, train)\n', (7154, 7168), False, 'from celery import task, chain\n'), ((9605, 9636), 'os.path.basename', 'os.path.basename', (['decoder_fname'], {}), '(decoder_fname)\n', (9621, 9636), False, 'import os\n'), ((11936, 11967), 'os.path.basename', 'os.path.basename', (['decoder_fname'], {}), '(decoder_fname)\n', (11952, 11967), False, 'import os\n'), ((13839, 13870), 'os.path.basename', 'os.path.basename', (['decoder_fname'], {}), '(decoder_fname)\n', (13855, 13870), False, 'import os\n'), ((3451, 3473), 'numpy.array', 'np.array', (['unique_cells'], {}), '(unique_cells)\n', (3459, 3473), True, 'import numpy as np\n')]
|
"""
PREPARE
Before running train, you need to run prepare.py with the respective task.
Example (in the command line):
> cd to root dir
> conda activate nlp
> python src/prepare.py --do_format --task 1
"""
#NOTE: the following is a workaround for AML to load modules
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import os
import spacy
import pandas as pd
import numpy as np
import string
import re
import argparse
from sklearn.model_selection import StratifiedShuffleSplit
# Custom functions
import sys
sys.path.append('./src')
import helper as he
import data as dt
import custom as cu
logger = he.get_logger(location=__name__)
class Clean():
"""Text preprocessing and cleaning steps
SUPPORTED LANGUAGES
- EN
- DE
- IT
- ES
- FR
- XX (multi - NER only)
SUPPORTED MODULES
- Remove Noise
Remove formatting and other noise that may be contained in emails or
other document types.
- Get Placeholders
Placeholders for common items such as dates, times, urls but also
custom customer IDs.
- Remove Stopwords
Stopwords can be added by adding a language specific stopword file
to /assets. Format: "assets/stopwords_<language>.txt".
- Lemmatize
"""
def __init__(self, task,
download_source=False,
download_train=False,
inference=False):
self.task = task
self.language = cu.params.get('language')
# Load data class
self.dt = dt.Data(task=self.task, inference=inference)
# Download data, if needed
if download_train:
self.dt.download('data_dir', dir = 'data_dir', source = 'datastore')
# Load spacy model
self.nlp = he.load_spacy_model(language=self.language, disable=['ner','parser','tagger'])
# Create stopword list
stopwords_active = []
## Load names
try:
names = self.dt.load('fn_names', dir = 'asset_dir', file_type = 'list')
stopwords_active = stopwords_active + names
except FileNotFoundError as e:
logger.warning(f'[WARNING] No names list loaded: {e}')
## Load stopwords
try:
stopwords = self.dt.load('fn_stopwords', dir = 'asset_dir', file_type = 'list')
stopwords_active = stopwords_active + stopwords
except FileNotFoundError as e:
logger.warning(f'[WARNING] No stopwords list loaded: {e}')
## Add to Spacy stopword list
logger.warning(f'[INFO] Active stopwords list lenght: {len(stopwords_active)}')
for w in stopwords_active:
self.nlp.vocab[w.replace('\n','')].is_stop = True
def remove(self, line,
rm_email_formatting=False,
rm_email_header=False,
rm_email_footer=False,
rm_punctuation=False):
"""Remove content from text"""
if not isinstance(line, str):
line = str(line)
# Customer Remove
line = cu.remove(line)
if rm_email_formatting:
line = re.sub(r'<[^>]+>', ' ', line) # Remove HTML tags
line = re.sub(r'^(.*\.eml)', ' ', line) # remove header for system generated emails
if rm_email_header:
#DE/EN
if self.language == 'en' or self.language == 'de':
line = re.sub(r'\b(AW|RE|VON|WG|FWD|FW)(\:| )', '', line, flags=re.I)
#DE
if self.language == 'de':
line = re.sub(r'(Sehr geehrte( Damen und Herren.)?.)|hallo.|guten( tag)?.', '', line, flags=re.I)
if rm_email_footer:
#EN
if self.language == 'en':
line = re.sub(r'\bkind regards.*', '', line, flags=re.I)
#DE
if self.language == 'de':
line = re.sub(r'\b(mit )?(beste|viele|liebe|freundlich\w+)? (gr[u,ü][ß,ss].*)', '', line, flags=re.I)
line = re.sub(r'\b(besten|herzlichen|lieben) dank.*', '', line, flags=re.I)
line = re.sub(r'\bvielen dank für ihr verständnis.*', '', line, flags=re.I)
line = re.sub(r'\bvielen dank im voraus.*', '', line, flags=re.I)
line = re.sub(r'\b(mfg|m\.f\.g) .*','', line, flags=re.I)
line = re.sub(r'\b(lg) .*','',line, flags=re.I)
line = re.sub(r'\b(meinem iPhone gesendet) .*','',line, flags=re.I)
line = re.sub(r'\b(Gesendet mit der (WEB|GMX)) .*','',line, flags=re.I)
line = re.sub(r'\b(Diese E-Mail wurde von Avast) .*','',line, flags=re.I)
# Remove remaining characters
##NOTE: may break other regex
if rm_punctuation:
line = re.sub('['+string.punctuation+']',' ',line)
return line
def get_placeholder(self, line,
rp_generic=False,
rp_custom=False,
rp_num=False):
"""Replace text with type specfic placeholders"""
# Customer placeholders
line = cu.get_placeholder(line)
# Generic placeholder
if rp_generic:
line = re.sub(r' \+[0-9]+', ' ', line) # remove phone numbers
line = re.sub(r'0x([a-z]|[0-9])+ ',' PER ',line, re.IGNORECASE) # replace
line = re.sub(r'[0-9]{2}[\/.,:][0-9]{2}[\/.,:][0-9]{2,4}', ' PDT ', line) # remove dates and time, replace with placeholder
line = re.sub(r'([0-9]{2,3}[\.]){3}[0-9]{1,3}',' PIP ',line) # replace ip with placeholder
line = re.sub(r'[0-9]{1,2}[\/.,:][0-9]{1,2}', ' PTI ', line) # remove only time, replace with placeholder
line = re.sub(r'[\w\.-]+@[\w\.-]+', ' PEM ', line) # remove emails
line = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', ' PUR ', line) # Remove links
line = re.sub(r'€|\$|(USD)|(EURO)', ' PMO ', line)
# Placeholders for numerics
if rp_num:
line = re.sub(r' ([0-9]{4,30}) ',' PNL ', line) # placeholder for long stand alone numbers
line = re.sub(r' [0-9]{2,3} ',' PNS ', line) # placeholder for short stand alone numbers
return line
def tokenize(self, line, lemmatize = False, rm_stopwords = False):
"""Tokenizer for non DL tasks"""
if not isinstance(line, str):
line = str(line)
if lemmatize and rm_stopwords:
line = ' '.join([t.lemma_ for t in self.nlp(line) if not t.is_stop])
elif lemmatize:
line = ' '.join([t.lemma_ for t in self.nlp(line)])
elif rm_stopwords:
line = ' '.join([t.text for t in self.nlp(line) if not t.is_stop])
return line
def transform(self, texts,
to_lower = False,
# Remove
rm_email_formatting = False,
rm_email_header = False,
rm_email_footer = False,
rm_punctuation = False,
# Placeholders
rp_generic = False,
rp_num = False,
# Tokenize
lemmatize = False,
rm_stopwords = False,
return_token = False,
# Whitespace
remove_whitespace = True
):
"""Main run function for cleaning process"""
if isinstance(texts, str):
texts = [texts]
# Convert to series for improved efficiency
df_texts = pd.Series(texts)
# Avoid loading errors
df_texts = df_texts.replace('\t', ' ', regex=True)
# Remove noise
if any((rm_email_formatting, rm_email_header,
rm_email_footer, rm_punctuation)):
df_texts = df_texts.apply(lambda x: self.remove(x,
rm_email_formatting = rm_email_formatting,
rm_email_header = rm_email_header,
rm_email_footer = rm_email_footer,
rm_punctuation = rm_punctuation))
# Replace placeholders
if any((rp_generic, rp_num)):
df_texts = df_texts.apply(lambda x: self.get_placeholder(x,
rp_generic = rp_generic,
rp_num = rp_num))
# Tokenize text
if any((lemmatize, rm_stopwords, return_token)):
df_texts = df_texts.apply(self.tokenize,
lemmatize = lemmatize,
rm_stopwords = rm_stopwords)
# To lower
if to_lower:
df_texts = df_texts.apply(str.lower)
# Remove spacing
if remove_whitespace:
df_texts = df_texts.apply(lambda x: " ".join(x.split()))
# Return Tokens
if return_token:
return [t.split(' ') for t in df_texts.to_list()]
else:
return df_texts.to_list()
def transform_by_task(self, text):
# CUSTOM FUNCTION
if cu.tasks.get(str(self.task)).get('type') == 'classification':
return self.transform(text,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)[0]
elif cu.tasks.get(str(self.task)).get('type') == 'multi_classification':
return self.transform(text,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)[0]
elif cu.tasks.get(str(self.task)).get('type') == 'ner':
return text[0]
elif cu.tasks.get(str(self.task)).get('type') == 'qa':
return self.transform(text,
to_lower = True,
# Remove
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
# Placeholders
rp_generic = True,
rp_num = True,
# Tokenize
lemmatize = True,
rm_stopwords = True,
return_token = True
)[0]
else:
logger.warning('[WARNING] No transform by task found.')
return text[0]
def prepare_classification(task, do_format, train_split, min_cat_occurance,
min_char_length, register_data):
# Get clean object
cl = Clean(task=task, download_source=True)
# Load data
if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format:
data = dt.get_dataset(cl, source="cdb")
else:
data = cl.dt.load('fn_prep', dir = 'data_dir')
logger.warning(f'Data Length : {len(data)}')
# Load text & label field
text_raw = cu.load_text(data)
data['label'] = cu.load_label(data, task)
if cu.tasks.get(str(task)).get('type') == 'multi_classification':
data['label'] = data['label'].str.replace(', ', '_').str.replace(' ', '_')
flat_labels = [row['label'].split(',') for index, row in data.iterrows()]
labels_clean = []
for labels in flat_labels:
for label in labels:
labels_clean.append(label)
label_list_raw = pd.DataFrame({'label':labels_clean})
label_list_raw = label_list_raw[label_list_raw.label != '']
label_list_raw = label_list_raw.label.drop_duplicates()
elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification
label_list_raw = data.label.drop_duplicates()
# Clean text
data['text'] = cl.transform(text_raw,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rp_generic = True)
# Filter by length
data = he.remove_short(data, 'text', min_char_length=min_char_length)
logger.warning(f'Data Length : {len(data)}')
# Remove duplicates
data_red = data.drop_duplicates(subset=['text'])
logger.warning(f'Data Length : {len(data_red)}')
# Min class occurance
if cu.tasks.get(str(task)).get('type') == 'classification':
data_red = data_red[data_red.groupby('label').label.transform('size') > min_cat_occurance]
elif cu.tasks.get(str(task)).get('type') == 'multi_classification':
# Split rows
data_transform = data_red[['id', 'label']].copy()
data_transform['label'] = [row['label'].split(",") for index, row in data_transform.iterrows()] # pipe it to list
data_transform = pd.DataFrame({'index':data_transform.index.repeat(data_transform.label.str.len()), 'label':np.concatenate(data_transform.label.values)}) # explode df
data_transform = data_transform[data_transform.groupby('label').label.transform('size') > min_cat_occurance] # count for min occurance and only keep relevant ones
data_transform = data_transform.groupby(['index'])['label'].apply(lambda x: ','.join(x.astype(str))).reset_index() # re-merge
data_transform = data_transform.set_index('index')
del data_red['label']
data_red = pd.concat([data_red, data_transform], join='inner', axis=1)
logger.warning(f'Data Length : {len(data_red)}')
data_red = data_red.tail(300000).reset_index(drop=True).copy()
#TODO: .tail() temp is for debugging
## There is a memory issue for the EN dataset, due to its size. Needs further investigation.
# Label list
if cu.tasks.get(str(task)).get('type') == 'multi_classification': # 2 = task for multi-label classification
flat_labels = [row['label'].split(',') for index, row in data_red.iterrows()]
labels_clean = []
for labels in flat_labels:
for label in labels:
labels_clean.append(label)
label_list = pd.DataFrame({'label':labels_clean})
label_list = label_list[label_list.label != '']
label_list = label_list.label.drop_duplicates()
elif cu.tasks.get(str(task)).get('type') == 'classification': # in case of single label classification
label_list = data_red.label.drop_duplicates()
logger.warning(f'Excluded labels: {list(set(label_list_raw)-set(label_list))}')
# Split data
strf_split = StratifiedShuffleSplit(n_splits = 1, test_size=(1-train_split), random_state=200)
if cu.tasks.get(str(task)).get('type') == 'classification':
for train_index, test_index in strf_split.split(data_red, data_red['label']):
df_cat_train = data_red.loc[train_index]
df_cat_test = data_red.loc[test_index]
elif cu.tasks.get(str(task)).get('type') == 'multi_classification':
for train_index, test_index in strf_split.split(data_red, pd.DataFrame({'label':[l.split(',')[0] for l in data_red['label']]})['label']):
df_cat_train = data_red.loc[train_index]
df_cat_test = data_red.loc[test_index]
# Save data
cl.dt.save(data_red, fn = 'fn_clean', dir = 'data_dir')
cl.dt.save(df_cat_train[['text','label']], fn = 'fn_train', dir = 'data_dir')
cl.dt.save(df_cat_test[['text','label']], fn = 'fn_test', dir = 'data_dir')
cl.dt.save(label_list, fn = 'fn_label', header=False, dir = 'data_dir')
# Upload data
if register_data:
cl.dt.upload('data_dir', destination='dataset')
def prepare_ner(task, do_format, register_data):
pass
def prepare_qa(task, do_format, min_char_length, register_data):
# Get clean object
cl = Clean(task=task, download_source=True)
# Load data
if not os.path.isfile(cl.dt.get_path('fn_prep', dir = 'data_dir')) or do_format:
data = dt.get_dataset(cl, source="cdb")
else:
data = cl.dt.load('fn_prep', dir = 'data_dir')
logger.warning(f'Data Length : {len(data)}')
# Filter relevant question answer pairs
data = cu.filter_qa(data)
logger.warning(f'Data Length : {len(data)}')
# Load question & answer fields
question, answer = cu.load_qa(data)
# Clean text
data['question_clean'] = cl.transform(question,
to_lower = True,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
rp_generic = True,
rp_num = True,
lemmatize = True,
rm_stopwords = True
)
data['answer_clean'] = cl.transform(answer,
to_lower = True,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True,
rm_punctuation = True,
rp_generic = True,
rp_num = True,
lemmatize = True,
rm_stopwords = True
)
# For display
data['answer_text_clean'] = cl.transform(answer,
rm_email_formatting = True,
rm_email_header = True,
rm_email_footer = True
)
# Filter by length
data = he.remove_short(data, 'question_clean', min_char_length=min_char_length)
logger.warning(f'Data Length : {len(data)}')
# Remove duplicates
data = data.drop_duplicates(subset=['question_clean'])
logger.warning(f'Data Length : {len(data)}')
data = data.reset_index(drop=True).copy()
# Save data
cl.dt.save(data, fn = 'fn_clean', dir = 'data_dir')
# Upload data
if register_data:
cl.dt.upload('data_dir', destination='dataset')
def main(task=1,
do_format=False,
split=0.9,
min_cat_occurance=300,
min_char_length=20,
register_data=False):
logger.warning(f'Running <PREPARE> for task {task}')
task_type = cu.tasks.get(str(task)).get('type')
if 'classification' == task_type:
prepare_classification(task, do_format, split, min_cat_occurance, min_char_length, register_data)
elif 'multi_classification' == task_type:
prepare_classification(task, do_format, split, min_cat_occurance, min_char_length, register_data)
elif 'ner' == task_type:
prepare_ner(task, do_format, register_data)
elif 'qa' == task_type:
prepare_qa(task, do_format, min_char_length, register_data)
else:
logger.warning('[ERROR] TASK TYPE UNKNOWN. Nothing was processed.')
def run():
"""Run from the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--task",
default=1,
type=int,
help="Task where: \
-task 1 : classification subcat \
-task 2 : classification cat \
-task 3 : ner \
-task 4 : qa")
parser.add_argument('--do_format',
action='store_true',
help="Avoid reloading and normalizing data")
parser.add_argument("--split",
default=0.9,
type=float,
help="Train test split. Dev split is taken from train set.")
parser.add_argument("--min_cat_occurance",
default=300,
type=int,
help="Min occurance required by category.")
parser.add_argument("--min_char_length",
default=20,
type=int,
help="")
parser.add_argument('--register_data',
action='store_true',
help="")
args = parser.parse_args()
main(args.task, args.do_format, args.split, min_cat_occurance=args.min_cat_occurance,
min_char_length=args.min_char_length, register_data=args.register_data)
if __name__ == '__main__':
run()
|
[
"argparse.ArgumentParser",
"helper.load_spacy_model",
"custom.filter_qa",
"custom.load_qa",
"helper.get_logger",
"sys.path.append",
"pandas.DataFrame",
"re.sub",
"pandas.concat",
"os.path.realpath",
"custom.load_text",
"pandas.Series",
"data.Data",
"helper.remove_short",
"custom.get_placeholder",
"custom.remove",
"numpy.concatenate",
"data.get_dataset",
"sklearn.model_selection.StratifiedShuffleSplit",
"custom.params.get",
"custom.load_label"
] |
[((537, 561), 'sys.path.append', 'sys.path.append', (['"""./src"""'], {}), "('./src')\n", (552, 561), False, 'import sys\n'), ((630, 662), 'helper.get_logger', 'he.get_logger', ([], {'location': '__name__'}), '(location=__name__)\n', (643, 662), True, 'import helper as he\n'), ((11464, 11482), 'custom.load_text', 'cu.load_text', (['data'], {}), '(data)\n', (11476, 11482), True, 'import custom as cu\n'), ((11503, 11528), 'custom.load_label', 'cu.load_label', (['data', 'task'], {}), '(data, task)\n', (11516, 11528), True, 'import custom as cu\n'), ((12553, 12615), 'helper.remove_short', 'he.remove_short', (['data', '"""text"""'], {'min_char_length': 'min_char_length'}), "(data, 'text', min_char_length=min_char_length)\n", (12568, 12615), True, 'import helper as he\n'), ((14973, 15052), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(1 - train_split)', 'random_state': '(200)'}), '(n_splits=1, test_size=1 - train_split, random_state=200)\n', (14995, 15052), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((16568, 16586), 'custom.filter_qa', 'cu.filter_qa', (['data'], {}), '(data)\n', (16580, 16586), True, 'import custom as cu\n'), ((16696, 16712), 'custom.load_qa', 'cu.load_qa', (['data'], {}), '(data)\n', (16706, 16712), True, 'import custom as cu\n'), ((17999, 18071), 'helper.remove_short', 'he.remove_short', (['data', '"""question_clean"""'], {'min_char_length': 'min_char_length'}), "(data, 'question_clean', min_char_length=min_char_length)\n", (18014, 18071), True, 'import helper as he\n'), ((19374, 19399), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19397, 19399), False, 'import argparse\n'), ((316, 342), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (332, 342), False, 'import os\n'), ((1471, 1496), 'custom.params.get', 'cu.params.get', (['"""language"""'], {}), "('language')\n", (1484, 1496), True, 'import custom as cu\n'), ((1550, 1594), 'data.Data', 'dt.Data', ([], {'task': 'self.task', 'inference': 'inference'}), '(task=self.task, inference=inference)\n', (1557, 1594), True, 'import data as dt\n'), ((1786, 1871), 'helper.load_spacy_model', 'he.load_spacy_model', ([], {'language': 'self.language', 'disable': "['ner', 'parser', 'tagger']"}), "(language=self.language, disable=['ner', 'parser', 'tagger']\n )\n", (1805, 1871), True, 'import helper as he\n'), ((3105, 3120), 'custom.remove', 'cu.remove', (['line'], {}), '(line)\n', (3114, 3120), True, 'import custom as cu\n'), ((5141, 5165), 'custom.get_placeholder', 'cu.get_placeholder', (['line'], {}), '(line)\n', (5159, 5165), True, 'import custom as cu\n'), ((7740, 7756), 'pandas.Series', 'pd.Series', (['texts'], {}), '(texts)\n', (7749, 7756), True, 'import pandas as pd\n'), ((11271, 11303), 'data.get_dataset', 'dt.get_dataset', (['cl'], {'source': '"""cdb"""'}), "(cl, source='cdb')\n", (11285, 11303), True, 'import data as dt\n'), ((11927, 11964), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels_clean}"], {}), "({'label': labels_clean})\n", (11939, 11964), True, 'import pandas as pd\n'), ((14544, 14581), 'pandas.DataFrame', 'pd.DataFrame', (["{'label': labels_clean}"], {}), "({'label': labels_clean})\n", (14556, 14581), True, 'import pandas as pd\n'), ((16365, 16397), 'data.get_dataset', 'dt.get_dataset', (['cl'], {'source': '"""cdb"""'}), "(cl, source='cdb')\n", (16379, 16397), True, 'import data as dt\n'), ((3173, 3201), 're.sub', 're.sub', (['"""<[^>]+>"""', '""" """', 'line'], {}), "('<[^>]+>', ' ', line)\n", (3179, 3201), False, 'import re\n'), ((3241, 3273), 're.sub', 're.sub', (['"""^(.*\\\\.eml)"""', '""" """', 'line'], {}), "('^(.*\\\\.eml)', ' ', line)\n", (3247, 3273), False, 'import re\n'), ((4802, 4851), 're.sub', 're.sub', (["('[' + string.punctuation + ']')", '""" """', 'line'], {}), "('[' + string.punctuation + ']', ' ', line)\n", (4808, 4851), False, 'import re\n'), ((5239, 5270), 're.sub', 're.sub', (['""" \\\\+[0-9]+"""', '""" """', 'line'], {}), "(' \\\\+[0-9]+', ' ', line)\n", (5245, 5270), False, 'import re\n'), ((5313, 5370), 're.sub', 're.sub', (['"""0x([a-z]|[0-9])+ """', '""" PER """', 'line', 're.IGNORECASE'], {}), "('0x([a-z]|[0-9])+ ', ' PER ', line, re.IGNORECASE)\n", (5319, 5370), False, 'import re\n'), ((5400, 5467), 're.sub', 're.sub', (['"""[0-9]{2}[\\\\/.,:][0-9]{2}[\\\\/.,:][0-9]{2,4}"""', '""" PDT """', 'line'], {}), "('[0-9]{2}[\\\\/.,:][0-9]{2}[\\\\/.,:][0-9]{2,4}', ' PDT ', line)\n", (5406, 5467), False, 'import re\n'), ((5536, 5591), 're.sub', 're.sub', (['"""([0-9]{2,3}[\\\\.]){3}[0-9]{1,3}"""', '""" PIP """', 'line'], {}), "('([0-9]{2,3}[\\\\.]){3}[0-9]{1,3}', ' PIP ', line)\n", (5542, 5591), False, 'import re\n'), ((5639, 5692), 're.sub', 're.sub', (['"""[0-9]{1,2}[\\\\/.,:][0-9]{1,2}"""', '""" PTI """', 'line'], {}), "('[0-9]{1,2}[\\\\/.,:][0-9]{1,2}', ' PTI ', line)\n", (5645, 5692), False, 'import re\n'), ((5757, 5803), 're.sub', 're.sub', (['"""[\\\\w\\\\.-]+@[\\\\w\\\\.-]+"""', '""" PEM """', 'line'], {}), "('[\\\\w\\\\.-]+@[\\\\w\\\\.-]+', ' PEM ', line)\n", (5763, 5803), False, 'import re\n'), ((5836, 5946), 're.sub', 're.sub', (['"""http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-f][0-9a-f]))+"""', '""" PUR """', 'line'], {}), "(\n 'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-f][0-9a-f]))+'\n , ' PUR ', line)\n", (5842, 5946), False, 'import re\n'), ((5970, 6013), 're.sub', 're.sub', (['"""€|\\\\$|(USD)|(EURO)"""', '""" PMO """', 'line'], {}), "('€|\\\\$|(USD)|(EURO)', ' PMO ', line)\n", (5976, 6013), False, 'import re\n'), ((6097, 6137), 're.sub', 're.sub', (['""" ([0-9]{4,30}) """', '""" PNL """', 'line'], {}), "(' ([0-9]{4,30}) ', ' PNL ', line)\n", (6103, 6137), False, 'import re\n'), ((6200, 6237), 're.sub', 're.sub', (['""" [0-9]{2,3} """', '""" PNS """', 'line'], {}), "(' [0-9]{2,3} ', ' PNS ', line)\n", (6206, 6237), False, 'import re\n'), ((13851, 13910), 'pandas.concat', 'pd.concat', (['[data_red, data_transform]'], {'join': '"""inner"""', 'axis': '(1)'}), "([data_red, data_transform], join='inner', axis=1)\n", (13860, 13910), True, 'import pandas as pd\n'), ((3452, 3515), 're.sub', 're.sub', (['"""\\\\b(AW|RE|VON|WG|FWD|FW)(\\\\:| )"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(AW|RE|VON|WG|FWD|FW)(\\\\:| )', '', line, flags=re.I)\n", (3458, 3515), False, 'import re\n'), ((3592, 3685), 're.sub', 're.sub', (['"""(Sehr geehrte( Damen und Herren.)?.)|hallo.|guten( tag)?."""', '""""""', 'line'], {'flags': 're.I'}), "('(Sehr geehrte( Damen und Herren.)?.)|hallo.|guten( tag)?.', '',\n line, flags=re.I)\n", (3598, 3685), False, 'import re\n'), ((3789, 3838), 're.sub', 're.sub', (['"""\\\\bkind regards.*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\bkind regards.*', '', line, flags=re.I)\n", (3795, 3838), False, 'import re\n'), ((3916, 4015), 're.sub', 're.sub', (['"""\\\\b(mit )?(beste|viele|liebe|freundlich\\\\w+)? (gr[u,ü][ß,ss].*)"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(mit )?(beste|viele|liebe|freundlich\\\\w+)? (gr[u,ü][ß,ss].*)',\n '', line, flags=re.I)\n", (3922, 4015), False, 'import re\n'), ((4034, 4102), 're.sub', 're.sub', (['"""\\\\b(besten|herzlichen|lieben) dank.*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(besten|herzlichen|lieben) dank.*', '', line, flags=re.I)\n", (4040, 4102), False, 'import re\n'), ((4126, 4194), 're.sub', 're.sub', (['"""\\\\bvielen dank für ihr verständnis.*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\bvielen dank für ihr verständnis.*', '', line, flags=re.I)\n", (4132, 4194), False, 'import re\n'), ((4219, 4277), 're.sub', 're.sub', (['"""\\\\bvielen dank im voraus.*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\bvielen dank im voraus.*', '', line, flags=re.I)\n", (4225, 4277), False, 'import re\n'), ((4302, 4355), 're.sub', 're.sub', (['"""\\\\b(mfg|m\\\\.f\\\\.g) .*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(mfg|m\\\\.f\\\\.g) .*', '', line, flags=re.I)\n", (4308, 4355), False, 'import re\n'), ((4376, 4418), 're.sub', 're.sub', (['"""\\\\b(lg) .*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(lg) .*', '', line, flags=re.I)\n", (4382, 4418), False, 'import re\n'), ((4440, 4502), 're.sub', 're.sub', (['"""\\\\b(meinem iPhone gesendet) .*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(meinem iPhone gesendet) .*', '', line, flags=re.I)\n", (4446, 4502), False, 'import re\n'), ((4524, 4590), 're.sub', 're.sub', (['"""\\\\b(Gesendet mit der (WEB|GMX)) .*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(Gesendet mit der (WEB|GMX)) .*', '', line, flags=re.I)\n", (4530, 4590), False, 'import re\n'), ((4612, 4680), 're.sub', 're.sub', (['"""\\\\b(Diese E-Mail wurde von Avast) .*"""', '""""""', 'line'], {'flags': 're.I'}), "('\\\\b(Diese E-Mail wurde von Avast) .*', '', line, flags=re.I)\n", (4618, 4680), False, 'import re\n'), ((13379, 13422), 'numpy.concatenate', 'np.concatenate', (['data_transform.label.values'], {}), '(data_transform.label.values)\n', (13393, 13422), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import sys
import os
import pickle
def extract_points(images):
'''
args:
- images: list of strings containing the filenames of the calibration image set
returns:
- mtx: camera calibration matrix
- dist: distortion coefficients
'''
obj = np.zeros((6 * 9, 3), np.float32)
obj[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
obj_points = []
img_points = []
for filename in images:
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if ret:
obj_points.append(obj)
img_points.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump(dist_pickle, open("dist_pickle.p", "wb"))
return mtx, dist
def camera_cal(image, mtx, dist, filename = None, save = False):
'''
args:
- filename: filename (paths also accepted)
- mtx: camera matrix from `extract_points()`, or loaded from saved file
- dist: distortion coefficients from `extract_points()`, or loaded from saved file
returns:
- dst: undistorted image
'''
if filename:
image = mpimg.imread(filename)
# undistort image
dst = cv2.undistort(image, mtx, dist, None, mtx)
# write to new image for checking purposes
if save:
split = filename.split('.')
new_filename = filename.split('.')[-2].split('/')[-1]
cv2.imwrite("../undistorted/{}_undist.{}".format(new_filename, split[-1]), dst)
return dst
if __name__ == "__main__":
if len(sys.argv) > 1:
# preferably a path without a trailing '/'
image_list = glob.glob(sys.argv[1] + "/*")
else:
image_list = glob.glob("../camera_cal/*")
mtx, dist = extract_points(image_list)
os.makedirs("../undistorted/", exist_ok = True)
dst = camera_cal("../camera_cal/calibration1.jpg", mtx, dist)
|
[
"cv2.findChessboardCorners",
"matplotlib.image.imread",
"os.makedirs",
"cv2.cvtColor",
"numpy.zeros",
"cv2.imread",
"cv2.calibrateCamera",
"glob.glob",
"cv2.undistort"
] |
[((398, 430), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (406, 430), True, 'import numpy as np\n'), ((844, 917), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['obj_points', 'img_points', 'gray.shape[::-1]', 'None', 'None'], {}), '(obj_points, img_points, gray.shape[::-1], None, None)\n', (863, 917), False, 'import cv2\n'), ((1542, 1584), 'cv2.undistort', 'cv2.undistort', (['image', 'mtx', 'dist', 'None', 'mtx'], {}), '(image, mtx, dist, None, mtx)\n', (1555, 1584), False, 'import cv2\n'), ((2116, 2161), 'os.makedirs', 'os.makedirs', (['"""../undistorted/"""'], {'exist_ok': '(True)'}), "('../undistorted/', exist_ok=True)\n", (2127, 2161), False, 'import os\n'), ((571, 591), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (581, 591), False, 'import cv2\n'), ((607, 646), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (619, 646), False, 'import cv2\n'), ((671, 716), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (696, 716), False, 'import cv2\n'), ((1477, 1499), 'matplotlib.image.imread', 'mpimg.imread', (['filename'], {}), '(filename)\n', (1489, 1499), True, 'import matplotlib.image as mpimg\n'), ((1976, 2005), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '/*')"], {}), "(sys.argv[1] + '/*')\n", (1985, 2005), False, 'import glob\n'), ((2038, 2066), 'glob.glob', 'glob.glob', (['"""../camera_cal/*"""'], {}), "('../camera_cal/*')\n", (2047, 2066), False, 'import glob\n')]
|
"""
DEPRECATED
USE kwcoco.metrics instead!
Faster pure-python versions of sklearn functions that avoid expensive checks
and label rectifications. It is assumed that all labels are consecutive
non-negative integers.
"""
from scipy.sparse import coo_matrix
import numpy as np
def confusion_matrix(y_true, y_pred, n_labels=None, labels=None,
sample_weight=None):
"""
faster version of sklearn confusion matrix that avoids the
expensive checks and label rectification
Runs in about 0.7ms
Returns:
ndarray: matrix where rows represent real and cols represent pred
Example:
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 1])
>>> y_pred = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])
>>> confusion_matrix(y_true, y_pred, 2)
array([[4, 2],
[3, 1]])
>>> confusion_matrix(y_true, y_pred, 2).ravel()
array([4, 2, 3, 1])
Benchmarks:
import ubelt as ub
y_true = np.random.randint(0, 2, 10000)
y_pred = np.random.randint(0, 2, 10000)
n = 1000
for timer in ub.Timerit(n, bestof=10, label='py-time'):
sample_weight = [1] * len(y_true)
confusion_matrix(y_true, y_pred, 2, sample_weight=sample_weight)
for timer in ub.Timerit(n, bestof=10, label='np-time'):
sample_weight = np.ones(len(y_true), dtype=np.int)
confusion_matrix(y_true, y_pred, 2, sample_weight=sample_weight)
"""
if sample_weight is None:
sample_weight = np.ones(len(y_true), dtype=np.int)
if n_labels is None:
n_labels = len(labels)
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=np.int64).toarray()
return CM
def global_accuracy_from_confusion(cfsn):
# real is rows, pred is columns
n_ii = np.diag(cfsn)
# sum over pred = columns = axis1
t_i = cfsn.sum(axis=1)
global_acc = n_ii.sum() / t_i.sum()
return global_acc
def class_accuracy_from_confusion(cfsn):
# real is rows, pred is columns
n_ii = np.diag(cfsn)
# sum over pred = columns = axis1
t_i = cfsn.sum(axis=1)
per_class_acc = (n_ii / t_i).mean()
class_acc = np.nan_to_num(per_class_acc).mean()
return class_acc
|
[
"numpy.diag",
"scipy.sparse.coo_matrix",
"numpy.nan_to_num"
] |
[((1890, 1903), 'numpy.diag', 'np.diag', (['cfsn'], {}), '(cfsn)\n', (1897, 1903), True, 'import numpy as np\n'), ((2121, 2134), 'numpy.diag', 'np.diag', (['cfsn'], {}), '(cfsn)\n', (2128, 2134), True, 'import numpy as np\n'), ((1645, 1738), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(sample_weight, (y_true, y_pred))'], {'shape': '(n_labels, n_labels)', 'dtype': 'np.int64'}), '((sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels),\n dtype=np.int64)\n', (1655, 1738), False, 'from scipy.sparse import coo_matrix\n'), ((2256, 2284), 'numpy.nan_to_num', 'np.nan_to_num', (['per_class_acc'], {}), '(per_class_acc)\n', (2269, 2284), True, 'import numpy as np\n')]
|
"""
YANK Health Report Notebook formatter
This module handles all the figure formatting and processing to minimize the code shown in the Health Report Jupyter
Notebook. All data processing and analysis is handled by the main multistate.analyzers package,
mainly image formatting is passed here.
"""
import os
import yaml
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import gridspec
from pymbar import MBAR
import seaborn as sns
from simtk import unit as units
from .. import analyze
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
class HealthReportData(analyze.ExperimentAnalyzer):
"""
Class which houses the data used for the notebook and the generation of all plots including formatting
"""
def general_simulation_data(self):
"""
General purpose simulation data on number of iterations, number of states, and number of atoms.
This just prints out this data in a regular, formatted pattern.
"""
general = self.get_general_simulation_data()
iterations = {}
nreplicas = {}
nstates = {}
natoms = {}
for phase_name in self.phase_names:
iterations[phase_name] = general[phase_name]['iterations']
nreplicas[phase_name] = general[phase_name]['nreplicas']
nstates[phase_name] = general[phase_name]['nstates']
natoms[phase_name] = general[phase_name]['natoms']
leniter = max(len('Iterations'), *[len(str(i)) for i in iterations.values()]) + 2
lenreplica = max(len('Replicas'), *[len(str(i)) for i in nreplicas.values()]) + 2
lenstates = max(len('States'), *[len(str(i)) for i in nstates.values()]) + 2
lennatoms = max(len('Num Atoms'), *[len(str(i)) for i in natoms.values()]) + 2
lenleftcol = max(len('Phase'), *[len(phase) for phase in self.phase_names]) + 2
lines = []
headstring = ''
headstring += ('{:^' + '{}'.format(lenleftcol) + '}').format('Phase') + '|'
headstring += ('{:^' + '{}'.format(leniter) + '}').format('Iterations') + '|'
headstring += ('{:^' + '{}'.format(lenreplica) + '}').format('Replicas') + '|'
headstring += ('{:^' + '{}'.format(lenstates) + '}').format('States') + '|'
headstring += ('{:^' + '{}'.format(lennatoms) + '}').format('Num Atoms')
lines.append(headstring)
lenline = len(headstring)
topdiv = '=' * lenline
lines.append(topdiv)
for phase in self.phase_names:
phasestring = ''
phasestring += ('{:^' + '{}'.format(lenleftcol) + '}').format(phase) + '|'
phasestring += ('{:^' + '{}'.format(leniter) + '}').format(iterations[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lenreplica) + '}').format(nreplicas[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lenstates) + '}').format(nstates[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lennatoms) + '}').format(natoms[phase])
lines.append(phasestring)
lines.append('-' * lenline)
for line in lines:
print(line)
def generate_equilibration_plots(self, discard_from_start=1):
"""
Create the equilibration scatter plots showing the trend lines, correlation time,
and number of effective samples
Returns
-------
equilibration_figure : matplotlib.figure
Figure showing the equilibration between both phases
"""
serial_data = self.get_equilibration_data(discard_from_start=discard_from_start)
# Adjust figure size
plt.rcParams['figure.figsize'] = 20, 6 * self.nphases * 2
plot_grid = gridspec.GridSpec(self.nphases, 1) # Vertical distribution
equilibration_figure = plt.figure()
# Add some space between the figures
equilibration_figure.subplots_adjust(hspace=0.4)
for i, phase_name in enumerate(self.phase_names):
phase_data = serial_data[phase_name]
sub_grid = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=plot_grid[i])
# FIRST SUBPLOT: energy scatter
# Attach subplot to figure
p = equilibration_figure.add_subplot(sub_grid[0])
# Data assignment for plot generation
y = self.u_ns[phase_name]
N = y.size
x = np.arange(N)
# Scatter plot
p.plot(x, y, 'k.')
# Smoothed equilibrium, this is very crude but it works for large data
tck = interpolate.splrep(x, y, k=5, s=N * 1E7)
smoothed = interpolate.splev(x, tck, der=0)
p.plot(x, smoothed, '-r', linewidth=4)
# Nequil line
ylim = p.get_ylim()
p.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
p.set_ylim(*ylim) # Reset limits in case vlines expanded them
p.set_xlim([0, N])
# Set text
p.set_title(phase_name + " phase", fontsize=20)
p.set_ylabel(r'$\Sigma_n u_n$ in kT', fontsize=20)
# Extra info in text boxes
subsample_string = 'Subsample Rate: {0:.2f}\nDecorelated Samples: {1:d}'.format(self.g_ts[phase_name], int(
np.floor(self.Neff_maxs[phase_name])))
if np.mean([0, N]) > self.nequils[phase_name]:
txt_horz = 'right'
txt_xcoord = 0.95
else:
txt_horz = 'left'
txt_xcoord = 0.05
smooth_index = {'right': -1, 'left': 0} # condition y
if np.mean(ylim) > smoothed[smooth_index[txt_horz]]:
txt_vert = 'top'
txt_ycoord = 0.95
else:
txt_vert = 'bottom'
txt_ycoord = 0.05
p.text(txt_xcoord, txt_ycoord,
subsample_string,
verticalalignment=txt_vert, horizontalalignment=txt_horz,
transform=p.transAxes,
fontsize=15,
bbox={'alpha': 1.0, 'facecolor': 'white'}
)
# SECOND SUBPLOT: g_t trace
i_t = phase_data['iterations_considered']
g_i = phase_data['subsample_rate_by_iterations_considered']
n_effective_i = phase_data['effective_samples_by_iterations_considered']
x = i_t
g = equilibration_figure.add_subplot(sub_grid[1])
g.plot(x, g_i)
ylim = g.get_ylim()
g.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
g.set_ylim(*ylim) # Reset limits in case vlines expanded them
g.set_xlim([0, N])
g.set_ylabel(r'Decor. Time', fontsize=20)
# THRID SUBPLOT: Neff trace
ne = equilibration_figure.add_subplot(sub_grid[2])
ne.plot(x, n_effective_i)
ylim = ne.get_ylim()
ne.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
ne.set_ylim(*ylim) # Reset limits in case vlines expanded them
ne.set_xlim([0, N])
ne.set_ylabel(r'Neff samples', fontsize=20)
ne.set_xlabel(r'Iteration', fontsize=20)
return equilibration_figure
def compute_rmsds(self):
return NotImplementedError("This function is still a prototype and has segfault issues, please disable for now")
# """Compute the RMSD of the ligand and the receptor by state"""
# if not self._equilibration_run:
# raise RuntimeError("Cannot run RMSD without first running the equilibration. Please run the "
# "corresponding function/cell first!")
# plt.rcParams['figure.figsize'] = 20, 6 * self.nphases * 2
# rmsd_figure, subplots = plt.subplots(2, 1)
# for i, phase_name in enumerate(self.phase_names):
# if phase_name not in self._serialized_data:
# self._serialized_data[phase_name] = {}
# self._serialized_data[phase_name]['rmsd'] = {}
# serial = self._serialized_data[phase_name]['rmsd']
# analyzer = self.analyzers[phase_name]
# reporter = analyzer.reporter
# metadata = reporter.read_dict('metadata')
# topography = mmtools.utils.deserialize(metadata['topography'])
# topology = topography.topology
# test_positions = reporter.read_sampler_states(0, analysis_particles_only=True)[0]
# atoms_analysis = test_positions.positions.shape[0]
# topology = topology.subset(range(atoms_analysis))
# iterations = self.iterations[phase_name]
# positions = np.zeros([iterations, atoms_analysis, 3])
# for j in range(iterations):
# sampler_states = reporter.read_sampler_states(j, analysis_particles_only=True)
# # Deconvolute
# thermo_states = reporter.read_replica_thermodynamic_states(iteration=j)
# sampler = sampler_states[thermo_states[0]]
# positions[j, :, :] = sampler.positions
# trajectory = md.Trajectory(positions, topology)
# rmsd_ligand = md.rmsd(trajectory, trajectory, frame=0, atom_indices=topography.ligand_atoms)
# rmsd_recpetor = md.rmsd(trajectory, trajectory, frame=0, atom_indices=topography.receptor_atoms)
# serial['ligand'] = rmsd_ligand.tolist()
# serial['receptor'] = rmsd_recpetor.tolist()
# p = subplots[i]
# x = range(iterations)
# p.set_title(phase_name + " phase", fontsize=20)
# p.plot(x, rmsd_ligand, label='Ligand RMSD')
# p.plot(x, rmsd_recpetor, label='Receptor RMSD')
# p.legend()
# p.set_xlim([0, iterations])
# ylim = p.get_ylim()
# p.set_ylim([0, ylim[-1]])
# p.set_ylabel(r'RMSD (nm)', fontsize=20)
# p.set_xlabel(r'Iteration', fontsize=20)
# return rmsd_figure
def generate_decorrelation_plots(self, decorrelation_threshold=0.1):
"""
Parameters
----------
decorrelation_threshold : float, Optional
When number of decorrelated samples is less than this percent of the total number of samples, raise a
warning. Default: `0.1`.
Returns
-------
decorrelation_figure : matplotlib.figure
Figure showing the decorrelation pie chart data of how the samples are distributed between equilibration,
correlation, and decorrelation.
"""
if not self._general_run or not self._equilibration_run:
raise RuntimeError("Cannot generate decorrelation data without general simulation data and equilibration "
"data first! Please run the corresponding functions/cells.")
# This will exist because of _equilibration_run
eq_data = self.get_equilibration_data(discard_from_start=self._n_discarded)
# Readjust figure output
plt.rcParams['figure.figsize'] = 20, 8
decorrelation_figure = plt.figure()
decorrelation_figure.subplots_adjust(wspace=0.2)
plotkeys = [100 + (10 * self.nphases) + (i + 1) for i in range(self.nphases)] # Horizontal distribution
for phase_name, plotid in zip(self.phase_names, plotkeys):
serial = eq_data[phase_name]
# Create subplot
p = decorrelation_figure.add_subplot(plotid)
labels = ['Decorrelated', 'Correlated', 'Equilibration']
colors = ['#2c7bb6', '#abd0e0', '#fdae61'] # blue, light blue, and orange
explode = [0, 0, 0.0]
n_iter = self.iterations[phase_name]
decor = serial['count_decorrelated_samples']
eq = serial['count_total_equilibration_samples']
cor = serial['count_correlated_samples']
dat = np.array([decor, cor, eq]) / float(n_iter)
if dat[0] <= decorrelation_threshold:
colors[0] = '#d7191c' # Red for warning
patch, txt, autotxt = p.pie(
dat,
explode=explode,
labels=labels,
colors=colors,
autopct='%1.1f%%',
shadow=True,
startangle=90 + 360 * dat[0] / 2, # put center of decor at top
counterclock=False,
textprops={'fontsize': 14}
)
for tx in txt: # This is the only way I have found to adjust the label font size
tx.set_fontsize(18)
p.axis('equal')
p.set_title(phase_name + " phase", fontsize=20, y=1.05)
# Generate warning if need be
if dat[0] <= decorrelation_threshold:
p.text(
0.5, -0.1,
"Warning! Fewer than {0:.1f}% samples are\nequilibrated and decorelated!".format(
decorrelation_threshold * 100),
verticalalignment='bottom', horizontalalignment='center',
transform=p.transAxes,
fontsize=20,
color='red',
bbox={'alpha': 1.0, 'facecolor': 'white', 'lw': 0, 'pad': 0}
)
return decorrelation_figure
def generate_mixing_plot(self, mixing_cutoff=0.05, mixing_warning_threshold=0.90, cmap_override=None):
"""
Generate the state diffusion mixing map as an image instead of array of number
Parameters
----------
mixing_cutoff : float
Minimal level of mixing percent from state `i` to `j` that will be plotted.
Domain: [0,1]
Default: 0.05.
mixing_warning_threshold : float
Level of mixing where transition from state `i` to `j` generates a warning based on percent of total swaps.
Domain (mixing_cutoff, 1)
Default: `0.90`.
cmap_override : None or string
Override the custom colormap that is used for this figure in case the figure is too white or you wnat to
do something besides the custom one here.
Returns
-------
mixing_figure : matplotlib.figure
Figure showing the state mixing as a color diffusion map instead of grid of numbers
"""
mixing_serial = self.get_mixing_data()
# Set up image
mixing_figure, subplots = plt.subplots(1, 2)
# Create custom cmap goes from white to pure blue, goes red if the threshold is reached
if mixing_cutoff is None:
mixing_cutoff = 0
if mixing_warning_threshold <= mixing_cutoff:
raise ValueError("mixing_warning_threshold must be larger than mixing_cutoff")
if (mixing_warning_threshold > 1 or mixing_cutoff > 1 or
mixing_warning_threshold < 0 or mixing_cutoff < 0):
raise ValueError("mixing_warning_threshold and mixing_cutoff must be between [0,1]")
cdict = {'red': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(mixing_warning_threshold, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(1.0, 0.0, 0.0))}
if cmap_override is not None:
# Use this cmap instead if your results are too diffuse to see over the white
cmap = plt.get_cmap("Blues")
else:
cmap = LinearSegmentedColormap('BlueWarnRed', cdict)
# Plot a diffusing mixing map for each phase.
for phase_name, subplot in zip(self.phase_names, subplots):
serial = mixing_serial[phase_name]
transition_matrix = serial['transitions']
eigenvalues = serial['eigenvalues']
statistical_inefficiency = serial['stat_inefficiency']
# Without vmin/vmax, the image normalizes the values to mixing_data.max
# which screws up the warning colormap.
# Can also use norm=NoNorm(), but that makes the colorbar manipulation fail.
output_image = subplot.imshow(transition_matrix, aspect='equal',
cmap=cmap, vmin=0, vmax=1)
# Add colorbar.
decimal = 2 # Precision setting
nticks = 11
# The color bar has to be configured independently of the source image
# or it cant be truncated to only show the data. i.e. it would instead
# go 0-1 always.
ubound = np.min([np.around(transition_matrix.max(), decimals=decimal) + 10 ** (-decimal), 1])
lbound = np.max([np.around(transition_matrix.min(), decimals=decimal) - 10 ** (-decimal), 0])
boundslice = np.linspace(lbound, ubound, 256)
cbar = plt.colorbar(output_image, ax=subplot, orientation='vertical',
boundaries=boundslice,
values=boundslice[1:],
format='%.{}f'.format(decimal))
# Update ticks.
ticks = np.linspace(lbound, ubound, nticks)
cbar.set_ticks(ticks)
# Title: Perron eigenvalue, equilibration time and statistical inefficiency.
perron_eigenvalue = eigenvalues[1]
title_txt = (phase_name + ' phase\n'
'Perron eigenvalue: {}\n'
'State equilibration timescale: ~{} iterations\n')
if perron_eigenvalue >= 1:
title_txt = title_txt.format('1.0', '$\infty$')
else:
equilibration_timescale = 1.0 / (1.0 - perron_eigenvalue)
title_txt = title_txt.format('{:.5f}', '{:.1f}')
title_txt = title_txt.format(perron_eigenvalue, equilibration_timescale)
title_txt += 'Replica state index statistical inefficiency: {:.3f}'.format(statistical_inefficiency)
subplot.set_title(title_txt, fontsize=20, y=1.05)
# Display Warning.
if np.any(transition_matrix >= mixing_warning_threshold):
subplot.text(
0.5, -0.2,
("Warning!\nThere were states that less than {0:.2f}% swaps!\n"
"Consider adding more states!".format((1 - mixing_warning_threshold) * 100)),
verticalalignment='bottom', horizontalalignment='center',
transform=subplot.transAxes,
fontsize=20,
color='red',
bbox={'alpha': 1.0, 'facecolor': 'white', 'lw': 0, 'pad': 0}
)
return mixing_figure
def generate_replica_mixing_plot(self, phase_stacked_replica_plots=False):
"""
Generate the replica trajectory mixing plots. Show the state of each replica as a function of simulation time
Parameters
----------
phase_stacked_replica_plots : boolean, Default: False
Determine if the phases should be shown side by side, or one on top of the other. If True, the two phases
will be shown with phase 1 on top and phase 2 on bottom.
Returns
-------
replica_figure : matplotlib.figure
Figure showing the replica state trajectories for both phases
"""
# Determine max number of states
max_n_replicas = 0
for i, phase_name in enumerate(self.phase_names):
# Gather state NK
analyzer = self.analyzers[phase_name]
n_replicas = analyzer.reporter.n_replicas
max_n_replicas = max(n_replicas, max_n_replicas)
# Create Parent Gridspec
if phase_stacked_replica_plots:
plot_grid = gridspec.GridSpec(2, 1)
plt.rcParams['figure.figsize'] = 20, max_n_replicas * 6
else:
plot_grid = gridspec.GridSpec(1, 2)
plt.rcParams['figure.figsize'] = 20, max_n_replicas * 3
replica_figure = plt.figure()
for i, phase_name in enumerate(self.phase_names):
# Gather state NK
analyzer = self.analyzers[phase_name]
sampled_energies, _, _, state_kn = analyzer.read_energies()
n_replicas, n_states, n_iterations = sampled_energies.shape
# Create subgrid
sub_grid = gridspec.GridSpecFromSubplotSpec(n_replicas, 1, subplot_spec=plot_grid[i])
# Loop through all states
for replica_index in range(n_replicas):
# Add plot
plot = replica_figure.add_subplot(sub_grid[replica_index])
# Actually plot
plot.plot(state_kn[replica_index, :], 'k.')
# Format plot
plot.set_yticks([])
plot.set_xlim([0, n_iterations])
plot.set_ylim([0, n_states])
if replica_index < n_replicas - 1:
plot.set_xticks([])
plot.set_ylabel('{}'.format(replica_index))
if replica_index == 0: # Title
plot.set_title('{} phase'.format(phase_name), fontsize=20)
self._replica_mixing_run = True
return replica_figure
def generate_free_energy(self):
fe_data = self.get_experiment_free_energy_data()
delta_f = fe_data['free_energy_diff']
delta_h = fe_data['enthalpy_diff']
delta_f_err = fe_data['free_energy_diff_error']
delta_h_err = fe_data['enthalpy_diff_error']
delta_f_unit = fe_data['free_energy_diff_unit']
delta_h_unit = fe_data['enthalpy_diff_unit']
delta_f_err_unit = fe_data['free_energy_diff_error_unit']
delta_h_err_unit = fe_data['enthalpy_diff_error_unit']
# Attempt to guess type of calculation
calculation_type = ''
for phase in self.phase_names:
if 'complex' in phase:
calculation_type = ' of binding'
elif 'solvent1' in phase:
calculation_type = ' of solvation'
print('Free energy{:<13}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_f, delta_f_err, delta_f_unit / units.kilocalories_per_mole,
delta_f_err_unit / units.kilocalories_per_mole))
for phase in self.phase_names:
delta_f_phase = fe_data[phase]['free_energy_diff']
delta_f_err_phase = fe_data[phase]['free_energy_diff_error']
detla_f_ssc_phase = fe_data[phase]['free_energy_diff_standard_state_correction']
print('DeltaG {:<17}: {:9.3f} +- {:.3f} kT'.format(phase, delta_f_phase,
delta_f_err_phase))
if detla_f_ssc_phase != 0.0:
print('DeltaG {:<17}: {:18.3f} kT'.format('standard state correction', detla_f_ssc_phase))
print('')
print('Enthalpy{:<16}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_h, delta_h_err, delta_h_unit / units.kilocalories_per_mole,
delta_h_err_unit / units.kilocalories_per_mole)
)
def free_energy_trace(self, discard_from_start=1, n_trace=10):
"""
Trace the free energy by keeping fewer and fewer samples in both forward and reverse direction
Returns
-------
free_energy_trace_figure : matplotlib.figure
Figure showing the equilibration between both phases
"""
trace_spacing = 1.0/n_trace
def format_trace_plot(plot: plt.Axes, trace_forward: np.ndarray, trace_reverse: np.ndarray):
x = np.arange(n_trace + 1)[1:] * trace_spacing * 100
plot.errorbar(x, trace_forward[:, 0], yerr=2 * trace_forward[:, 1], ecolor='b',
elinewidth=0, mec='none', mew=0, linestyle='None',
zorder=10)
plot.plot(x, trace_forward[:, 0], 'b-', marker='o', mec='b', mfc='w', label='Forward', zorder=20,)
plot.errorbar(x, trace_reverse[:, 0], yerr=2 * trace_reverse[:, 1], ecolor='r',
elinewidth=0, mec='none', mew=0, linestyle='None',
zorder=10)
plot.plot(x, trace_reverse[:, 0], 'r-', marker='o', mec='r', mfc='w', label='Reverse', zorder=20)
y_fill_upper = [trace_forward[-1, 0] + 2 * trace_forward[-1, 1]] * 2
y_fill_lower = [trace_forward[-1, 0] - 2 * trace_forward[-1, 1]] * 2
xlim = [0, 100]
plot.fill_between(xlim, y_fill_lower, y_fill_upper, color='orchid', zorder=5)
plot.set_xlim(xlim)
plot.legend()
plot.set_xlabel("% Samples Analyzed", fontsize=20)
plot.set_ylabel(r"$\Delta G$ in kcal/mol", fontsize=20)
# Adjust figure size
plt.rcParams['figure.figsize'] = 15, 6 * (self.nphases + 1) * 2
plot_grid = gridspec.GridSpec(self.nphases + 1, 1) # Vertical distribution
free_energy_trace_figure = plt.figure()
# Add some space between the figures
free_energy_trace_figure.subplots_adjust(hspace=0.4)
traces = {}
for i, phase_name in enumerate(self.phase_names):
traces[phase_name] = {}
if phase_name not in self._serialized_data:
self._serialized_data[phase_name] = {}
serial = self._serialized_data[phase_name]
if "free_energy" not in serial:
serial["free_energy"] = {}
serial = serial["free_energy"]
free_energy_trace_f = np.zeros([n_trace, 2], dtype=float)
free_energy_trace_r = np.zeros([n_trace, 2], dtype=float)
p = free_energy_trace_figure.add_subplot(plot_grid[i])
analyzer = self.analyzers[phase_name]
kcal = analyzer.kT / units.kilocalorie_per_mole
# Data crunching to get timeseries
sampled_energies, _, _, states = analyzer.read_energies()
n_replica, n_states, _ = sampled_energies.shape
# Sample at index 0 is actually the minimized structure and NOT from the equilibrium distribution
# This throws off all of the equilibrium data
sampled_energies = sampled_energies[:, :, discard_from_start:]
states = states[:, discard_from_start:]
total_iterations = sampled_energies.shape[-1]
for trace_factor in range(n_trace, 0, -1): # Reverse order tracing
trace_percent = trace_spacing*trace_factor
j = trace_factor - 1 # Indexing
kept_iterations = int(np.ceil(trace_percent*total_iterations))
u_forward = sampled_energies[:, :, :kept_iterations]
s_forward = states[:, :kept_iterations]
u_reverse = sampled_energies[:, :, -1:-kept_iterations-1:-1]
s_reverse = states[:, -1:-kept_iterations - 1:-1]
for energy_sub, state_sub, storage in [
(u_forward, s_forward, free_energy_trace_f), (u_reverse, s_reverse, free_energy_trace_r)]:
u_n = analyzer.get_effective_energy_timeseries(energies=energy_sub,
replica_state_indices=state_sub)
i_t, g_i, n_effective_i = analyze.multistate.get_equilibration_data_per_sample(u_n)
i_max = n_effective_i.argmax()
number_equilibrated = i_t[i_max]
g_t = g_i[i_max]
if not self.use_full_trajectory:
energy_sub = analyze.multistate.utils.remove_unequilibrated_data(energy_sub,
number_equilibrated,
-1)
state_sub = analyze.multistate.utils.remove_unequilibrated_data(state_sub,
number_equilibrated, -1)
energy_sub = analyze.multistate.utils.subsample_data_along_axis(energy_sub, g_t, -1)
state_sub = analyze.multistate.utils.subsample_data_along_axis(state_sub, g_t, -1)
samples_per_state = np.zeros([n_states], dtype=int)
unique_sampled_states, counts = np.unique(state_sub, return_counts=True)
# Assign those counts to the correct range of states
samples_per_state[unique_sampled_states] = counts
mbar = MBAR(energy_sub, samples_per_state)
fe_data = mbar.getFreeEnergyDifferences(compute_uncertainty=True)
# Trap theta_ij output
try:
fe, dfe, _ = fe_data
except ValueError:
fe, dfe = fe_data
ref_i, ref_j = analyzer.reference_states
storage[j, :] = fe[ref_i, ref_j] * kcal, dfe[ref_i, ref_j] * kcal
format_trace_plot(p, free_energy_trace_f, free_energy_trace_r)
p.set_title("{} Phase".format(phase_name.title()), fontsize=20)
traces[phase_name]['f'] = free_energy_trace_f
traces[phase_name]['r'] = free_energy_trace_r
serial['forward'] = free_energy_trace_f.tolist()
serial['reverse'] = free_energy_trace_r.tolist()
# Finally handle last combined plot
combined_trace_f = np.zeros([n_trace, 2], dtype=float)
combined_trace_r = np.zeros([n_trace, 2], dtype=float)
for phase_name in self.phase_names:
phase_f = traces[phase_name]['f']
phase_r = traces[phase_name]['r']
combined_trace_f[:, 0] += phase_f[:, 0]
combined_trace_f[:, 1] = np.sqrt(combined_trace_f[:, 1]**2 + phase_f[:, 1]**2)
combined_trace_r[:, 0] += phase_r[:, 0]
combined_trace_r[:, 1] = np.sqrt(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)
p = free_energy_trace_figure.add_subplot(plot_grid[-1])
format_trace_plot(p, combined_trace_f, combined_trace_r)
p.set_title("Combined Phases", fontsize=20)
return free_energy_trace_figure
def restraint_distributions_plot(self):
ENERGIES_IDX = 0
DISTANCES_IDX = 1
# Find the phase that defines the restraint energies and distances.
for phase_name in self.phase_names:
analyzer = self.analyzers[phase_name]
lambda1_data = list(analyzer._get_restraint_energies_distances_at_state(0))
if len(lambda1_data[ENERGIES_IDX]) != 0:
break
# Check if we have a restraint at all.
if len(lambda1_data[ENERGIES_IDX]) == 0:
print('The restraint unbiasing step was not performed for this calculation.')
return
# The restraint distances are not computed if there's no distance cutoff.
lambda0_data = list(analyzer._get_restraint_energies_distances_at_state(-1))
cutoffs = list(analyzer._get_restraint_cutoffs())
xlabels = ['Restraint energies [kT]', 'Restraint distances [Angstrom]']
for data in [lambda1_data, lambda0_data, cutoffs, xlabels]:
if len(lambda1_data[DISTANCES_IDX]) == 0:
del data[DISTANCES_IDX]
elif isinstance(data[DISTANCES_IDX], units.Quantity):
# Convert the distances into the units that will be printed.
data[DISTANCES_IDX] /= units.angstroms
# Plot the lambda=1 and lambda=0 restraints data.
figure, axes = plt.subplots(ncols=len(lambda1_data), figsize=(20, 10))
if len(lambda1_data) == 1:
axes = [axes]
for ax, lambda1, lambda0 in zip(axes, lambda1_data, lambda0_data):
sns.distplot(lambda1, ax=ax, kde=False, label='bound state')
sns.distplot(lambda0, ax=ax, kde=False, label='non-interacting state')
# Plot the cutoffs used for the restraint unbiasing.
for ax, cutoff in zip(axes, cutoffs):
limits = ax.get_ylim()
ax.plot([cutoff for _ in range(100)], np.linspace(limits[0], limits[1]/2, num=100))
# Labels and legend.
for i, (ax, xlabel) in enumerate(zip(axes, xlabels)):
ax.set_xlabel(xlabel)
if i == 0:
ax.set_ylabel('Number of samples')
elif i == 1:
ax.legend(loc='upper right')
return figure
def report_version(self):
current_version = self._serialized_data['yank_version']
print("Rendered with YANK Version {}".format(current_version))
def dump_serial_data(self, path):
"""Dump the serialized data to YAML file"""
true_path, ext = os.path.splitext(path)
if not ext: # empty string check
ext = '.yaml'
true_path += ext
with open(true_path, 'w') as f:
f.write(yaml.dump(self._serialized_data))
|
[
"matplotlib.colors.LinearSegmentedColormap",
"numpy.floor",
"yaml.dump",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"pymbar.MBAR",
"numpy.unique",
"numpy.linspace",
"scipy.interpolate.splrep",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap",
"numpy.ceil",
"numpy.zeros",
"numpy.any",
"numpy.array",
"os.path.splitext",
"seaborn.distplot",
"scipy.interpolate.splev",
"matplotlib.gridspec.GridSpec",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"numpy.sqrt"
] |
[((3800, 3834), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['self.nphases', '(1)'], {}), '(self.nphases, 1)\n', (3817, 3834), False, 'from matplotlib import gridspec\n'), ((3891, 3903), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3901, 3903), True, 'from matplotlib import pyplot as plt\n'), ((11248, 11260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11258, 11260), True, 'from matplotlib import pyplot as plt\n'), ((14592, 14610), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (14604, 14610), True, 'from matplotlib import pyplot as plt\n'), ((20617, 20629), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20627, 20629), True, 'from matplotlib import pyplot as plt\n'), ((25534, 25572), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(self.nphases + 1)', '(1)'], {}), '(self.nphases + 1, 1)\n', (25551, 25572), False, 'from matplotlib import gridspec\n'), ((25633, 25645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25643, 25645), True, 'from matplotlib import pyplot as plt\n'), ((30192, 30227), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (30200, 30227), True, 'import numpy as np\n'), ((30255, 30290), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (30263, 30290), True, 'import numpy as np\n'), ((33484, 33506), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (33500, 33506), False, 'import os\n'), ((4136, 4201), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(3)', '(1)'], {'subplot_spec': 'plot_grid[i]'}), '(3, 1, subplot_spec=plot_grid[i])\n', (4168, 4201), False, 'from matplotlib import gridspec\n'), ((4475, 4487), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4484, 4487), True, 'import numpy as np\n'), ((4647, 4694), 'scipy.interpolate.splrep', 'interpolate.splrep', (['x', 'y'], {'k': '(5)', 's': '(N * 10000000.0)'}), '(x, y, k=5, s=N * 10000000.0)\n', (4665, 4694), False, 'from scipy import interpolate\n'), ((4711, 4743), 'scipy.interpolate.splev', 'interpolate.splev', (['x', 'tck'], {'der': '(0)'}), '(x, tck, der=0)\n', (4728, 4743), False, 'from scipy import interpolate\n'), ((16038, 16059), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (16050, 16059), True, 'from matplotlib import pyplot as plt\n'), ((16093, 16138), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""BlueWarnRed"""', 'cdict'], {}), "('BlueWarnRed', cdict)\n", (16116, 16138), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((17379, 17411), 'numpy.linspace', 'np.linspace', (['lbound', 'ubound', '(256)'], {}), '(lbound, ubound, 256)\n', (17390, 17411), True, 'import numpy as np\n'), ((17716, 17751), 'numpy.linspace', 'np.linspace', (['lbound', 'ubound', 'nticks'], {}), '(lbound, ubound, nticks)\n', (17727, 17751), True, 'import numpy as np\n'), ((18670, 18723), 'numpy.any', 'np.any', (['(transition_matrix >= mixing_warning_threshold)'], {}), '(transition_matrix >= mixing_warning_threshold)\n', (18676, 18723), True, 'import numpy as np\n'), ((20370, 20393), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {}), '(2, 1)\n', (20387, 20393), False, 'from matplotlib import gridspec\n'), ((20500, 20523), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (20517, 20523), False, 'from matplotlib import gridspec\n'), ((20965, 21039), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['n_replicas', '(1)'], {'subplot_spec': 'plot_grid[i]'}), '(n_replicas, 1, subplot_spec=plot_grid[i])\n', (20997, 21039), False, 'from matplotlib import gridspec\n'), ((26196, 26231), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (26204, 26231), True, 'import numpy as np\n'), ((26266, 26301), 'numpy.zeros', 'np.zeros', (['[n_trace, 2]'], {'dtype': 'float'}), '([n_trace, 2], dtype=float)\n', (26274, 26301), True, 'import numpy as np\n'), ((30516, 30573), 'numpy.sqrt', 'np.sqrt', (['(combined_trace_f[:, 1] ** 2 + phase_f[:, 1] ** 2)'], {}), '(combined_trace_f[:, 1] ** 2 + phase_f[:, 1] ** 2)\n', (30523, 30573), True, 'import numpy as np\n'), ((30659, 30716), 'numpy.sqrt', 'np.sqrt', (['(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)'], {}), '(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)\n', (30666, 30716), True, 'import numpy as np\n'), ((32526, 32586), 'seaborn.distplot', 'sns.distplot', (['lambda1'], {'ax': 'ax', 'kde': '(False)', 'label': '"""bound state"""'}), "(lambda1, ax=ax, kde=False, label='bound state')\n", (32538, 32586), True, 'import seaborn as sns\n'), ((32599, 32669), 'seaborn.distplot', 'sns.distplot', (['lambda0'], {'ax': 'ax', 'kde': '(False)', 'label': '"""non-interacting state"""'}), "(lambda0, ax=ax, kde=False, label='non-interacting state')\n", (32611, 32669), True, 'import seaborn as sns\n'), ((5414, 5429), 'numpy.mean', 'np.mean', (['[0, N]'], {}), '([0, N])\n', (5421, 5429), True, 'import numpy as np\n'), ((5695, 5708), 'numpy.mean', 'np.mean', (['ylim'], {}), '(ylim)\n', (5702, 5708), True, 'import numpy as np\n'), ((12053, 12079), 'numpy.array', 'np.array', (['[decor, cor, eq]'], {}), '([decor, cor, eq])\n', (12061, 12079), True, 'import numpy as np\n'), ((32863, 32909), 'numpy.linspace', 'np.linspace', (['limits[0]', '(limits[1] / 2)'], {'num': '(100)'}), '(limits[0], limits[1] / 2, num=100)\n', (32874, 32909), True, 'import numpy as np\n'), ((33660, 33692), 'yaml.dump', 'yaml.dump', (['self._serialized_data'], {}), '(self._serialized_data)\n', (33669, 33692), False, 'import yaml\n'), ((5360, 5396), 'numpy.floor', 'np.floor', (['self.Neff_maxs[phase_name]'], {}), '(self.Neff_maxs[phase_name])\n', (5368, 5396), True, 'import numpy as np\n'), ((27235, 27276), 'numpy.ceil', 'np.ceil', (['(trace_percent * total_iterations)'], {}), '(trace_percent * total_iterations)\n', (27242, 27276), True, 'import numpy as np\n'), ((28974, 29005), 'numpy.zeros', 'np.zeros', (['[n_states]'], {'dtype': 'int'}), '([n_states], dtype=int)\n', (28982, 29005), True, 'import numpy as np\n'), ((29058, 29098), 'numpy.unique', 'np.unique', (['state_sub'], {'return_counts': '(True)'}), '(state_sub, return_counts=True)\n', (29067, 29098), True, 'import numpy as np\n'), ((29269, 29304), 'pymbar.MBAR', 'MBAR', (['energy_sub', 'samples_per_state'], {}), '(energy_sub, samples_per_state)\n', (29273, 29304), False, 'from pymbar import MBAR\n'), ((24262, 24284), 'numpy.arange', 'np.arange', (['(n_trace + 1)'], {}), '(n_trace + 1)\n', (24271, 24284), True, 'import numpy as np\n')]
|
import math
import numpy as np
from typing import Dict
from typing import List
from typing import Union
from typing import Iterator
from typing import Optional
from .types import *
from .data_types import *
from .normalizers import *
from .distributions import *
from ...misc import *
params_type = Dict[str, Union[DataType, Iterable, "params_type"]]
class ParamsGenerator:
"""
Parameter generator for param searching, see cftool.ml.hpo.base.HPOBase for usage.
Parameters
----------
params : params_type, parameter settings.
Examples
----------
>>> grid = ParamsGenerator({
>>> "a": Any(Choice(values=[1, 2, 3])),
>>> "c": {
>>> "d": Int(Choice(values=[1, 2, 3])),
>>> "e": Float(Choice(values=[1, 2])),
>>> }
>>> })
>>> for param in grid.all():
>>> print(param)
>>> # output : {'a': 1, 'c': {'d': 1, 'e': 1, 'f': 3}}, {'a': 1, 'c': {'d': 1, 'e': 1, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 1, 'e': 2, 'f': 3}}, {'a': 1, 'c': {'d': 1, 'e': 2, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 2, 'e': 1, 'f': 3}}, {'a': 1, 'c': {'d': 2, 'e': 1, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 2, 'e': 2, 'f': 3}}, {'a': 1, 'c': {'d': 2, 'e': 2, 'f': 4}}
>>> # ......
>>> # {'a': 3, 'c': {'d': 3, 'e': 2, 'f': 3}}, {'a': 3, 'c': {'d': 3, 'e': 2, 'f': 4}}
"""
def __init__(
self,
params: params_type,
*,
normalize_method: Optional[str] = None,
normalize_config: Optional[Dict[str, Any]] = None,
):
self._data_types = params
def _data_type_offset(value: DataType) -> int:
if not isinstance(value, Iterable):
return 1
return len(value.values)
self._data_types_nested = Nested(params, offset_fn=_data_type_offset)
if normalize_method is None:
self._normalizers_flattened = None
else:
if normalize_config is None:
normalize_config = {}
def _data_type_normalizer(value: DataType) -> Normalizer:
return Normalizer(normalize_method, value, **normalize_config)
normalizers_nested = self._data_types_nested.apply(_data_type_normalizer)
self._normalizers_flattened = normalizers_nested.flattened
self._all_params_nested = self._all_flattened_data_types = None
self._array_dim = self._all_bounds = None
@property
def params(self) -> params_type:
return self._data_types
@property
def num_params(self) -> number_type:
def _num_params(params):
if isinstance(params, (DataType, Iterable)):
return params.num_params
assert isinstance(params, dict)
num_params = prod(_num_params(v) for v in params.values())
if math.isinf(num_params):
return num_params
return int(num_params)
return _num_params(self._data_types)
@property
def array_dim(self) -> int:
if self._array_dim is None:
self._array_dim = self.flattened2array(
self.flatten_nested(self.pop())
).shape[0]
return self._array_dim
@property
def all_bounds(self) -> np.ndarray:
if self._all_bounds is None:
bounds_list = []
for key in self.sorted_flattened_keys:
if self._normalizers_flattened is None:
normalizer = None
else:
normalizer = self._normalizers_flattened[key]
if normalizer is None:
data_type = self._data_types_nested.get_value_from(key)
if not isinstance(data_type, Iterable):
bounds_list.append(list(data_type.bounds))
else:
bounds_list.extend(list(map(list, data_type.bounds)))
else:
if normalizer.is_iterable:
bounds_list.extend(list(map(list, normalizer.bounds)))
else:
bounds_list.append(list(normalizer.bounds))
self._all_bounds = np.array(bounds_list, np.float32)
return self._all_bounds
@property
def all_flattened_params(self) -> all_flattened_type:
if self._all_params_nested is None:
apply = lambda data_type: data_type.all()
self._all_params_nested = self._data_types_nested.apply(apply)
return self._all_params_nested.flattened
@property
def sorted_flattened_keys(self) -> List[str]:
return self._data_types_nested.sorted_flattened_keys
def pop(self) -> nested_type:
def _pop(src: dict, tgt: dict):
for k, v in src.items():
if isinstance(v, dict):
next_tgt = tgt.setdefault(k, {})
_pop(v, next_tgt)
else:
tgt[k] = v.pop()
return tgt
return _pop(self._data_types, {})
def all(self) -> Iterator[nested_type]:
for flattened_params in Grid(self.all_flattened_params):
yield self._data_types_nested.nest_flattened(flattened_params)
def flatten_nested(self, nested: nested_type) -> nested_type:
return self._data_types_nested.flatten_nested(nested)
def nest_flattened(self, flattened: flattened_type) -> nested_type:
return self._data_types_nested.nest_flattened(flattened)
def flattened2array(self, flattened: flattened_type) -> np.ndarray:
if self._normalizers_flattened is None:
normalized = flattened
else:
normalized = {
k: self._normalizers_flattened[k].normalize(v)
for k, v in flattened.items()
}
return self._data_types_nested.flattened2array(normalized)
def array2flattened(self, array: np.ndarray) -> flattened_type:
normalized = self._data_types_nested.array2flattened(array)
if self._normalizers_flattened is None:
flattened = normalized
else:
flattened = {
k: self._normalizers_flattened[k].recover(v)
for k, v in normalized.items()
}
for key, value in flattened.items():
data_type = self._data_types_nested.get_value_from(key)
flattened[key] = data_type.transform(value)
return flattened
__all__ = ["ParamsGenerator", "params_type"]
|
[
"math.isinf",
"numpy.array"
] |
[((2877, 2899), 'math.isinf', 'math.isinf', (['num_params'], {}), '(num_params)\n', (2887, 2899), False, 'import math\n'), ((4226, 4259), 'numpy.array', 'np.array', (['bounds_list', 'np.float32'], {}), '(bounds_list, np.float32)\n', (4234, 4259), True, 'import numpy as np\n')]
|
"""EM 算法的实现
"""
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
isdebug = True
# 指定k个高斯分布参数,这里指定k=2。注意2个高斯分布具有相同均方差Sigma,均值分别为Mu1,Mu2。
def init_data(Sigma, Mu1, Mu2, k, N):
global X
global Mu
global Expectations
X = np.zeros((1, N))
Mu = np.random.random(k)
Expectations = np.zeros((N, k))
for i in range(0, N):
if np.random.random(1) > 0.5:
X[0, i] = np.random.normal(Mu1, Sigma)
else:
X[0, i] = np.random.normal(Mu2, Sigma)
if isdebug:
print("***********")
print("初始观测数据X:")
print(X)
# EM算法:步骤1,计算E[zij]
def e_step(Sigma, k, N):
global Expectations
global Mu
global X
for i in range(0, N):
Denom = 0
Numer = [0.0] * k
for j in range(0, k):
Numer[j] = math.exp((-1 / (2 * (float(Sigma**2)))) * (float(X[0, i] - Mu[j]))**2)
Denom += Numer[j]
for j in range(0, k):
Expectations[i, j] = Numer[j] / Denom
if isdebug:
print("***********")
print("隐藏变量E(Z):")
print(Expectations)
# EM算法:步骤2,求最大化E[zij]的参数Mu
def m_step(k, N):
global Expectations
global X
for j in range(0, k):
Numer = 0
Denom = 0
for i in range(0, N):
Numer += Expectations[i, j] * X[0, i]
Denom += Expectations[i, j]
Mu[j] = Numer / Denom
# 算法迭代iter_num次,或达到精度Epsilon停止迭代
def run(Sigma, Mu1, Mu2, k, N, iter_num, Epsilon):
init_data(Sigma, Mu1, Mu2, k, N)
print("初始<u1,u2>:", Mu)
for i in range(iter_num):
Old_Mu = copy.deepcopy(Mu)
e_step(Sigma, k, N)
m_step(k, N)
print(i, Mu)
if sum(abs(Mu - Old_Mu)) < Epsilon:
break
if __name__ == '__main__':
sigma = 6 # 高斯分布具有相同的方差
mu1 = 40 # 第一个高斯分布的均值 用于产生样本
mu2 = 20 # 第二个高斯分布的均值 用于产生样本
k = 2 # 高斯分布的个数
N = 1000 # 样本个数
iter_num = 1000 # 最大迭代次数
epsilon = 0.0001 # 当两次误差小于这个时退出
run(sigma, mu1, mu2, k, N, iter_num, epsilon)
plt.hist(X[0, :], 50)
plt.show()
|
[
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"numpy.zeros",
"numpy.random.random",
"numpy.random.normal"
] |
[((264, 280), 'numpy.zeros', 'np.zeros', (['(1, N)'], {}), '((1, N))\n', (272, 280), True, 'import numpy as np\n'), ((290, 309), 'numpy.random.random', 'np.random.random', (['k'], {}), '(k)\n', (306, 309), True, 'import numpy as np\n'), ((329, 345), 'numpy.zeros', 'np.zeros', (['(N, k)'], {}), '((N, k))\n', (337, 345), True, 'import numpy as np\n'), ((2064, 2085), 'matplotlib.pyplot.hist', 'plt.hist', (['X[0, :]', '(50)'], {}), '(X[0, :], 50)\n', (2072, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2098, 2100), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1627), 'copy.deepcopy', 'copy.deepcopy', (['Mu'], {}), '(Mu)\n', (1623, 1627), False, 'import copy\n'), ((383, 402), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (399, 402), True, 'import numpy as np\n'), ((432, 460), 'numpy.random.normal', 'np.random.normal', (['Mu1', 'Sigma'], {}), '(Mu1, Sigma)\n', (448, 460), True, 'import numpy as np\n'), ((497, 525), 'numpy.random.normal', 'np.random.normal', (['Mu2', 'Sigma'], {}), '(Mu2, Sigma)\n', (513, 525), True, 'import numpy as np\n')]
|
import os
import h5py
import pytest
import numpy as np
import pandas as pd
import automatic_speech_recognition as asr
@pytest.fixture
def dataset() -> asr.dataset.Features:
file_path = 'test.h5'
reference = pd.DataFrame({
'path': [f'dataset/{i}' for i in range(10)],
'transcript': [f'transcript-{i}' for i in range(10)],
})
with h5py.File(file_path, 'w') as store:
for path in reference.path:
store[path] = np.random.random([20, 10])
with pd.HDFStore(file_path, mode='r+') as store:
store['references'] = reference
return asr.dataset.Features.from_hdf(file_path, batch_size=3)
def test_get_batch(dataset):
batch_audio, transcripts = dataset.get_batch(index=1)
a, b, c = transcripts
assert b == 'transcript-4'
a, b, c = batch_audio
assert b.shape == (20, 10)
# Remove store at the end of tests
os.remove('test.h5')
|
[
"automatic_speech_recognition.dataset.Features.from_hdf",
"os.remove",
"h5py.File",
"pandas.HDFStore",
"numpy.random.random"
] |
[((595, 649), 'automatic_speech_recognition.dataset.Features.from_hdf', 'asr.dataset.Features.from_hdf', (['file_path'], {'batch_size': '(3)'}), '(file_path, batch_size=3)\n', (624, 649), True, 'import automatic_speech_recognition as asr\n'), ((896, 916), 'os.remove', 'os.remove', (['"""test.h5"""'], {}), "('test.h5')\n", (905, 916), False, 'import os\n'), ((364, 389), 'h5py.File', 'h5py.File', (['file_path', '"""w"""'], {}), "(file_path, 'w')\n", (373, 389), False, 'import h5py\n'), ((499, 532), 'pandas.HDFStore', 'pd.HDFStore', (['file_path'], {'mode': '"""r+"""'}), "(file_path, mode='r+')\n", (510, 532), True, 'import pandas as pd\n'), ((462, 488), 'numpy.random.random', 'np.random.random', (['[20, 10]'], {}), '([20, 10])\n', (478, 488), True, 'import numpy as np\n')]
|
"""
This is an implementation of paper
"Attention-based LSTM for Aspect-level Sentiment Classification" with Keras.
Based on dataset from "SemEval 2014 Task 4".
"""
import os
from time import time
# TODO, Here we need logger!
import numpy as np
from lxml import etree
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Input, Embedding, LSTM, Dense
from keras.layers import RepeatVector, Dot, Concatenate, Reshape
from keras.activations import softmax
from keras.models import Model, load_model
from keras import regularizers, initializers, optimizers
from keras.layers import Lambda
import keras.backend as K
TEXT_KEY = 'text'
TERM_KEY = 'aspect_terms'
CATEGORY_KEY = 'aspect_categories'
I_TEXT, I_ASPECT, I_POLARITY = 0, 1, 2
# Correspond to settings in paper.
EMBEDDING_DIM = 300
ASPECT_EMBEDDING_DIM = 300
HIDDEN_LAYER_SIZE = 300
# Hyper-parameters for training.
L2_REGULARIZATION = 0.001
MOMENTUM = 0.9
LEARNING_RATE = 0.001
MINI_BATCH_SIZE = 25
RANDOM_UNIFORM = .01
POLARITY_TO_INDEX = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
def extract_data(data_file='Restaurants_Train_v2.xml'):
"""
Extract train data from xml file provided buy 'SemEval 2014 Task 4."
:param file: XML file that contains training data.
:return: A list of dictionaries of training data with TEXT_KEY, 'aspect
terms' and 'aspect categories'.
"""
tree = etree.parse(data_file)
sents_root = tree.getroot()
data = []
def get_content(sent):
"""
Get all contents from a single 'sentence node', including TEXT_KEY,
values of 'aspect terms' and 'aspect categories'.
:param sent: a single xml node of sentence.
:type: _Element
:return: A dictionary of contents.
"""
content = {}
# We assume that there is must a text node here.
content[TEXT_KEY] = sent.xpath(TEXT_KEY)[0].text
terms = sent.xpath('aspectTerms')
if terms:
# As there is only one element of 'aspectTerms'.
# And we only need the first two values, 'aspect' and 'polarity'.
content[TERM_KEY] = list(map(lambda term: term.values()[:2],
terms[0].iterchildren()))
else:
pass
categories = sent.xpath('aspectCategories')
if categories:
content[CATEGORY_KEY] = list(
map(lambda category: category.values(),
categories[0].iterchildren()))
else:
pass
return content
for sent in sents_root.iterchildren():
data.append(get_content(sent))
return data
def check_absent(data):
"""
Checking absent 'aspect terms' or 'aspect categories'.
And check if there is sentence missing both 'terms' and 'categories'.
:param data: dataset with all contents. And the max length of all sentence.
:type: list of dictionary.
:return: sentence indices that with absent terms, categories and flag of
both missing as well as their count separately in tuple.
:type: tuple of (list, list, boolean)
"""
exist_both_missing = False
term_absent_indices = []
term_absent_cnt = 0
category_absent_indices = []
category_absent_cnt = 0
max_len = 0
for idx, sent in enumerate(data):
max_len = max(len(sent[TEXT_KEY]), max_len)
term_absent = TERM_KEY not in sent.keys()
category_absent = CATEGORY_KEY not in sent.keys()
if term_absent and category_absent:
exist_both_missing = True
if term_absent:
term_absent_indices.append(idx)
term_absent_cnt += 1
if category_absent:
category_absent_indices.append(idx)
category_absent_cnt += 1
return (term_absent_indices, term_absent_cnt,
category_absent_indices, category_absent_cnt,
exist_both_missing, max_len)
def combine_data(data, mess=True, replace_space=True, replace_space_char='_'):
"""
If `mess` is True, means we would mess all data together.
Combine text with all aspects related to it, both aspect
terms and aspect categories. And mess them up.
But if `mess` is False. we will combined TEXT_KEY and aspect separately
with 'terms' or 'categories', and return them as tuple.
And also return the max length of sentence per term or category
if `mess` is True or separate max length if `mess` is False.
:param data: all data with TEXT_KEY and lists of 'aspect terms' and
'categories'.
:return: all combined data or combined data with 'aspect terms' and
'categories' separately along with their max length or in all.
"""
term_data, category_data = [], []
term_max_len, category_max_len = 0, 0
# TODO, How do we treat multi-word token as aspect term?
# 1. take whole as one token an replace space with other mask.
# 2. split into multiple tokens and average all embeddings.
# 3. only take one word into consideration.
# Note for aspect terms, it could contains spaces in the word, so should
# not use space to split tokenizer, and take all as one token.
# And also, there are other special characters in the phrase, like '-'.
# They should be keep.
for sent in data:
text = sent[TEXT_KEY]
is_term_exist = TERM_KEY in sent.keys()
is_category_exist = CATEGORY_KEY in sent.keys()
if is_term_exist:
term_max_len = max(term_max_len, len(sent[TEXT_KEY]))
for term, polarity in sent[TERM_KEY]:
if replace_space:
term = term.replace(' ', replace_space_char)
term_data.append([text, term, polarity])
if is_category_exist:
category_max_len = max(category_max_len, len(sent[TEXT_KEY]))
for category, polarity in sent[CATEGORY_KEY]:
if replace_space:
category = category.replace(' ', replace_space_char)
category_data.append([text, category, polarity])
# print(len(term_data), len(category_data))
if mess:
max_len = max(term_max_len, category_max_len)
term_data.extend(category_data)
return term_data, max_len
else:
return (term_data, term_max_len), (category_data, category_max_len)
def convert_data(data, max_len=None, with_label=True, extra_data=False):
"""
Convert data to tuples of (word_vectors, aspect_indices, polarity) to
word indices sequences and labels to one hot. In order to lookup in
embedding layer.
And convert polarity to class identifier, as defined by default in
polarity to index.
NOTE: keep in mind to match label and 'text' and 'aspect'!
:param data: List of data with element of (text, aspect, polarity).
:param word_vectors: Word Vector lookup table.
:param with_label: Whether it is training data with label or
test/customized data without label.
:return: Arrays contain (word vectors, aspect indices, polarity class
index), and each of them is a numpy array, along with the word to index
dictionary.
:type: numpy array.
"""
# Set indicator for 'text', 'aspect' and 'polarity(label)'.
converted_data, lookups = [], []
texts, aspects, labels = [], [], []
# TODO, we should count max length here?!
for d in data:
texts.append(d[I_TEXT])
aspects.append(d[I_ASPECT])
if with_label:
labels.append(d[I_POLARITY])
def convert_to_indices(examples, max_len=None, need_tokenizer=False,
customized_filter='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'):
"""
Fit and convert word to indices sequences and word index lookup, and if
needed, return tokenizer as well.
:param examples: list of words or sentences.
:param max_len: the max length of indices sequences.
:param need_tokenizer: return tokenizer or not.
:type: boolean
:return: (indices sequence, word index lookup, <tokenizer>)
:type: tuple
"""
tokenizer = Tokenizer(filters=customized_filter)
tokenizer.fit_on_texts(examples)
seqs = tokenizer.texts_to_sequences(examples)
word_idx = tokenizer.word_index
# TODO, do we need to pad, if yes, 'pre' or 'post'?
if max_len:
seqs = pad_sequences(seqs, maxlen=max_len)
if need_tokenizer:
return seqs, word_idx, tokenizer
else:
return seqs, word_idx
text_seqs, text_word_idx = convert_to_indices(texts, max_len)
converted_data.append(np.asarray(text_seqs, dtype='int32'))
lookups.append(text_word_idx)
# For aspect term maybe we should not use tokenizer and filter.
aspects_seqs, aspects_idx = convert_to_indices(
aspects,
# TODO, should use less filter.
customized_filter='#$%&/:;<=>?@[\\]^`{|}~\t\n')
converted_data.append(np.asarray(aspects_seqs, dtype='int32'))
lookups.append(aspects_idx)
if with_label:
labels_seqs, labels_idx = convert_to_indices(labels)
# Normalize label sequences as we only need '4' classes and do not need
# extra class for 'other'.
labels_arr = np.asarray(labels_seqs, dtype='int') - 1
labels_one_hot = to_categorical(labels_arr) # aspects_seqs,
# [:, np.newaxis],
converted_data.append(labels_one_hot)
lookups.append(labels_idx)
# print(aspects_seqs)
# # Preprocessing text without max number of words.
# text_tokenizer = Tokenizer()
# text_tokenizer.fit_on_texts(texts)
# text_seqs = text_tokenizer.texts_to_sequences(texts)
# text_word_idx = text_tokenizer.word_index
# # Just get indices of words, and does not categorize it as we won't
# # multiply one-hot vector in practice as it is computation costly.
# # Instead we just lookup with embedding layer.
# text_data = pad_sequences(text_seqs, maxlen=max_len)
#
# # Preprocessing aspects.
# # The same as word in text, it will be lookup in embedding layer.
# aspects_tokenizer = Tokenizer()
# aspects_tokenizer.fit_on_texts(aspects)
# aspects_seqs = aspects_tokenizer.texts_to_sequences(aspects)
# aspects_idx = aspects_tokenizer.word_index
#
# # Processing labels
# # Convert labels from words into indices and then to one-hot categorical
# # indices.
# labels_tokenizer = Tokenizer()
# labels_tokenizer.fit_on_texts(labels)
# labels_seqs = labels_tokenizer.texts_to_sequences(labels)
# labels_idx = labels_tokenizer.
return converted_data, lookups
def load_w2v(idxes, emb_file, save_to_file=None):
"""
Load pre-trained embedding and match words in training data to form a
small set of word embedding matrix with OOV with all '0's.
NOTE: Keras tokenizer.word_index start from 1, in order to use '0'
padding in pad_sequence and mask_zero in embedding layer and following
layer.
:param idxes: the word loopup dictionary of word indices.
:param emb_file: pre-trained embedding file.
:return: word embedding matrix fit for the training data.
"""
# Only need the lookup for 'text'.
idx = idxes[I_TEXT]
# Initial word embedding matrix with all '0's.
# TODO, here we could set embedding dimesion automatically.
emb_matrix = np.zeros((len(idx) + 1, EMBEDDING_DIM))
# Timing it.
start_time = time()
with open(emb_file) as emb:
for line in emb:
pieces = line.strip().split()
word, coef = pieces[0].strip(), pieces[1:]
begin_idx = 0
for elem_idx, elem in enumerate(coef):
# In case there is space in the word,
# continuously test if the string could be interpret as float,
# if yes, it means this piece element is the beginning of the
# coefficient and if no, then append to word as part of the
# token.
try:
# Test if an element in coefficient is an actual
# coefficient of a part of key token.
float(elem)
# Record begin index of actual coefficient.
begin_idx = elem_idx + 1
# Only break when we find the begin index of actual
# coefficient.
break
except Exception as e:
word += elem
# print(e)
# TODO, we could record the trail and error in log.
# print("Filed to load record with word: '{}' and "
# "coefficient: {}".format(word, coef))
# print(word)
coef = np.asarray(pieces[begin_idx:], dtype=np.float32)
if word in idx.keys():
# Lookup the indices(index) of word and set the corresponding
# vector to the one in pre-trained embedding matrix.
emb_matrix[idx[word]] = coef
print('Loaded word embedding matrix within {}'.format(
time() - start_time))
# Save loaded subset of word embedding into files.
if save_to_file:
np.save(save_to_file, emb_matrix)
return emb_matrix
def build_net(data, max_len, w2is, atae=True, extra_outputs=True,
emb_mtrx_file=None, save_to_file=None):
"""
Build ATAE-LSTM mentioned in paper 'Attention-based LSTM for Aspect-level
Sentiment Classification', with uniform randomly initialized aspect
embedding and word embedding subset according training data and given
pre-trained embedding file.
Adapt 'inter' attention before do multiple classes classification by
softmax, which introduce aspect-level attention as part of the encoding
of source sentence.`
:param data: Indices of training data including (sentences, aspect,
polarity(one-hot label))
:param max_len: the max length of sentence as it has been padding with
'0's and need to set for the input shape with mini-batch.
:param w2is: Index lookup table of components above.
:param atae: If 'False' then only use 'AE'.
:param extra_outputs: return extra outputs like attention weights,
aspect embeddings or so.
:param emb_mtrx_file: Pre-saved embedding matrix corresponding to
training data and given pre-trained embedding. If 'None' is set,
then reload from embedding file.
:param save_to_file: File path to save model, if 'None' is set, then its
a one way training.
:return: Training loss and accuracy for all classes?
"""
# TODO, max length should be fixed.
sents, aspects, labels = data
sents_idx, aspects_idx, _ = w2is
emb_mtrx = np.load(emb_mtrx_file)
# Input of sentences.
sents_tensor_input = Input(shape=(sents.shape[1],), dtype='int32')
# Do not retrain embedding of sentences.
sents_tensor = Embedding(len(sents_idx) + 1,
# EMBEDDING_DIM
emb_mtrx.shape[1],
weights=[emb_mtrx],
input_length=max_len,
trainable=False)(sents_tensor_input)
# Input of aspect
# As we use ATAE-LSTM, aspect embedding need to be concated to each time
# steps in sentences.
# Aspect is a single index of integer.
aspects_tensor_input = Input(shape=(1,), dtype='int32')
# Randomly initialize aspect embedding.
aspects_emb_initializer = initializers.RandomUniform(minval=-RANDOM_UNIFORM,
maxval=RANDOM_UNIFORM)
aspects_emb_layer = Embedding(len(aspects_idx) + 1,
ASPECT_EMBEDDING_DIM,
embeddings_initializer=aspects_emb_initializer,
trainable=True,
name='asp_emb_layer')
# In order to get embedding weights.
# aspects_emb_matrix = Lambda(lambda x: x, name='asp_emb_weight')(
# aspects_emb_layer.weights)
aspects_emb = aspects_emb_layer(aspects_tensor_input)
# Here, before repeat we need reshape aspect_tensor act as 'squeeze' with
# the dimension with '1', say Reshape((10, ), input_shape=(1, 10))(...)
# then got keras tensor with shape of (10,), which will then feed into
# `RepeatVector`.
aspects_tensor = Reshape((ASPECT_EMBEDDING_DIM,))(aspects_emb)
# Repeat aspects tensor in order to correspond to the time step of
# sentences, with shape of (max_len, ASPECT_EMBEDDNING_DIM).
# TODO, could use Timedistributed?
aspects_tensor = RepeatVector(max_len)(aspects_tensor)
lstm_input = Concatenate()([sents_tensor, aspects_tensor])
if atae:
lstm_output = LSTM(HIDDEN_LAYER_SIZE, return_sequences=True)(lstm_input)
# Attention with concatenation of sequential output of LSTM and
# aspect embedding.
attention_input = Concatenate()([lstm_output, aspects_tensor])
attention_score = Dense(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM,
use_bias=False,
name='attention_score_1')(attention_input)
# We need an extra `Dense/Activation` layer here for axis related
# softmax with should be align on time step instead the last axis.
attention_weight = Dense(1, use_bias=False,
name='attention_score_2')(attention_score)
attention_weight = Lambda(lambda x: softmax(x, axis=1))(
attention_weight, name='attention_weights')
# permuted_weight = Permute((2, 1))(attention_weight)
# attention_represent = Multiply(name='r')([lstm_output, permuted_weight])
# attention_represent = Multiply(name='r')([lstm_output, attention_weight])
attention_represent = Dot(axes=1, name='r')([lstm_output,
attention_weight])
attention_represent = Reshape((EMBEDDING_DIM,))(attention_represent)
last_hidden = Lambda(lambda tensor: tensor[:, -1, :])(lstm_output)
final_represent = Concatenate(name='final_concatenate')([
attention_represent, last_hidden])
final_represent = Dense(EMBEDDING_DIM, activation='tanh',
use_bias=False, name='final_representation')(
final_represent)
model_output = Dense(labels.shape[1],
activation='softmax',
activity_regularizer=regularizers.l2(
L2_REGULARIZATION),
name='ATAE_LSTM_output')(final_represent)
# outs = [model_output]
# if extra_outputs:
# outs.append(attention_weight)
# TODO, get from model outside
# outs.append(aspects_emb_matrix)
# print(outs)
else:
lstm_output = LSTM(HIDDEN_LAYER_SIZE,
return_sequences=False)(lstm_input)
model_output = Dense(labels.shape[1],
activation='softmax',
name='Simple_AE_LSTM_ouptut')(lstm_output)
# outs = [model_output]
model = Model(inputs=[sents_tensor_input,
aspects_tensor_input],
outputs=model_output)
if save_to_file:
model.save(save_to_file)
return model
def train(data, model, model_optimizer=None, metrics=None, valid_ratio=0.1,
epoch=10, mini_batch=25, save_to_file=None):
"""
:param data: Training data in tuples of lists with form of (sentences,
aspect word, polarity).
:param model: Predefined model generated by `build_net`, if None,
then if will be build with default values.
:param optimizer: Optimizer used to train/compile model. Default is
Adagrad with learning rate as '0.001'.
:param metrics: Metrics are interested in list. If not set then default
is ['accuracy']
:return: None
"""
if not model and not data:
print('Please passed in data and model!')
return
if not metrics:
metrics = ['accuracy']
if not model_optimizer:
model_optimizer = optimizers.Adagrad(lr=0.001)
print("Training Model ...")
print(model.summary())
# print('\t\twith data as')
# print('\t\t{}'.format(check_absent(data)))
print('\t\twith hyper-parametes as')
print('\t\t\tMini-Batch : {}'.format(mini_batch))
print('\t\t\tEpoch : {}'.format(epoch))
model.compile(model_optimizer, 'categorical_crossentropy', metrics=metrics)
model.fit([seqs_data[I_TEXT], seqs_data[I_ASPECT]], seqs_data[I_POLARITY],
mini_batch, epochs=epoch, validation_split=valid_ratio)
if save_to_file:
model.save(save_to_file)
def train_dev_split(data, ratio=0.8, seed=42):
"""
Function to split train and dev set with given ratio.
:param data: whole dataset.
:param ratio: percentage that training data occupied.
:return: tuple of list of (training, dev), and each of them should be
formed as (sentences, aspect word, polarity)
"""
np.random.seed(42)
sents, aspects, labels = data[I_TEXT], data[I_ASPECT], data[I_POLARITY]
idx = np.arange(sents.shape[0])
np.random.shuffle(idx)
sents = sents[idx]
aspects = aspects[idx]
labels = labels[idx]
# Calculate split boundary.
bnd = int(len(idx) * ratio)
train_set = [sents[:bnd], aspects[:bnd], labels[:bnd]]
dev_set = [sents[bnd:], aspects[bnd:], labels[bnd:]]
return train_set, dev_set
def predict(data, lookup, max_len, model=None, save_to_file=None,
extra_output=True):
"""
Predict with given data and model or load model from saved pre-trained
model in file.
:param data: data in tuple or list (sentence, aspect)
:param w2is: index to lookup for predictions.
:param max_len: length to padding to.
:param model: pre-trained model, if not set loaded from file,
and if file for model is also not set, return with error.
:param save_to_file: file saved with model.
:return: prediction
"""
# Omit word index lookups.
converted_data, _ = convert_data(data, max_len, with_label=False)
# print(converted_data)
if not model:
if save_to_file:
model = load_model(save_to_file,
custom_objects={'softmax': softmax})
else:
# TODO, should raise exception?
raise ValueError('Please pass in model instance or '
'the path of file model saved to.')
pred_vec = model.predict([converted_data[I_TEXT],
converted_data[I_ASPECT]])
pred_idx = np.argmax(pred_vec, axis=1)
func_get_label = np.vectorize(lambda p: lookup.get(p))
# print(pred_idx, func_get_label(pred_idx), lookup.get(0))
# Need to add '1' for keras labels start from '0'.
pred = func_get_label(pred_idx + 1)
# if extra_output:
# model.layers
return pred
def get_layer(model, layer_name):
"""
Get layer from model by name or index.
:param layer_name: the name or index of layer.
:return: layer instance extract from model.
"""
if isinstance(layer_name, int):
return model.layers[layer_name]
elif isinstance(layer_name, str):
return model.get_layer(layer_name)
else:
raise ValueError('The layer name should only be `int` or `str`.')
def get_aspect_embeddings(model, layer_name, save_to_file=None):
"""
Get aspect embedding from specific layer with given name.
:param model: the pre-trained model, if not set, reload form saved model
file. If it also failed to load model from file, 'ValueError' will be thrown.
:param layer_name: the name or index of embedding layer, or ValueError
will be thrown.
:param save_to_file: file saved pre-trained model, load model if model is 'None'.
:return: tensor of apsect embeddings.
"""
if not model:
if not save_to_file:
raise ValueError('No model found from parameter or file!')
else:
model = load_model(save_to_file)
# Get embeddings of aspect words.
emb_layer = get_layer(model, layer_name)
return K.eval(emb_layer.embeddings)
def get_attention_weighs(data, att_layer_name, input_layers_names: list,
model=None, save_to_file=None):
"""
Get attention weights(intermediate) from specific layer with given layer
name and input layers.
:param data: data to attendant to.
:param model: the pre-trained model, if not set, reload form saved model
file. If it also failed to load model from file, 'ValueError' will be thrown.
:param att_layer_name: the name or index of embedding layer, or ValueError
will be thrown.
:param input_layers: the name or index list of all input layer in order.
:param save_to_file: file saved pre-trained model, load model if model is 'None'.
:return: tensor of attention indices.
"""
if not model:
if not save_to_file:
raise ValueError('No model found from parameter or file!')
else:
model = load_model(save_to_file,
custom_objects={'softmax': softmax})
# Must be sure input layers are in order.
att_layer = get_layer(model, att_layer_name)
input_layers = []
for layer_name in input_layers_names:
layer = get_layer(model, layer_name)
if layer:
input_layers.append(layer.input)
get_attention_weights = K.function(input_layers, [att_layer.output])
weights = get_attention_weights([data[I_TEXT], data[I_ASPECT]])[0]
# print(weights.shape)
return weights
def plot_attention_weight(weights, focus_len):
"""
Plot attention weights within the focus length.
:param weights: attention weights.
:param focus_len: the length to focus to, usually the length of sentences.
:return: None
"""
# score_file = os.path.join(RAW_DATA_FILE_BASE, 'intermeidate_score')
# np.save(score_file, weights)
# score_input = Input(shape=(term_max_len, 600))
# get_weights = Dense(1, use_bias=False)(score_input)
# get_weights = Activation('softmax', axis=1)(get_weights)
# get_weights = Lambda(lambda x: tf.nn.softmax())
# from keras.activations import softmax
# # # get_weights = Lambda(lambda x: softmax(x, axis=1))(get_weights)
# # # score_model = Model(score_input, get_weights)
# # # print(score_model.summary())
# #
# # score_model.compile(optimizer='adam', loss='categorical_crossentropy')
# weight_result = score_model.predict(weights)
# print(weight_result[0].shape)
# begin_idx = len(converted_data[I_TEXT][0])
# print(begin_idx)
import matplotlib.pyplot as plt
# hist, bins = np.histogram(weight_result[0].reshape((1, -1)))
# We have to remember the length of input sentences in order to align the
# attention weights.
# plt.imshow(weight_result[0][-20:].reshape((1, -1)), cmap="plasma",
# aspect="auto", extent=[0, 20, 0, 1])
# TODO, Here is 'pre pad', so its '-focus_len' for the actual token.
attentions = weights.reshape((1, -1))[:, -focus_len:]
print(attentions.shape)
plt.imshow(attentions, cmap='plasma',
aspect='auto', extent=[0, focus_len, 0, 1])
# plt.grid(True)
plt.colorbar()
plt.show()
if __name__ == '__main__':
RAW_DATA_FILE_BASE = '/Users/jiazhen/datasets/SemEval' \
'/SemEval_2014_task4/ABSA_v2'
RES_RAW_DATA_FILE = os.path.join(RAW_DATA_FILE_BASE,
'Restaurants_Train_v2.xml')
LAP_RAW_DATA_FILE = os.path.join(RAW_DATA_FILE_BASE, 'Laptop_Train_v2.xml')
WORD_EMB_BASE = '/Users/jiazhen/datasets'
WORD_EMB_FILE = os.path.join(WORD_EMB_BASE, 'glove.840B.300d.txt')
SAVED_EMB_FILE = os.path.join(RAW_DATA_FILE_BASE, 'glove_res_emb.npy')
SAVED_MDL_FILE = os.path.join(RAW_DATA_FILE_BASE, 'atae_model.keras')
res_data = extract_data(RES_RAW_DATA_FILE)
# print(res_data[7])
check_absent(res_data)
(term_data, term_max_len), _ = combine_data(res_data, mess=False)
# print(term_data[7])
# No padding here according to the paper.
# Need padding for mini-batch.
seqs_data, w2is = convert_data(term_data, max_len=term_max_len)
# emb_matrix = load_w2v(w2is, WORD_EMB_FILE, SAVED_EMB_FILE)
# print(emb_matrix[1])
# print(len(seqs_data))
# print(seqs_data[0].shape, seqs_data[1].shape, seqs_data[2].shape)
# print(seqs_data[1])
# for i, d in enumerate(seqs_data[1]):
# if len(d) > 1:
# print(i, d)
# print(term_data[i][I_ASPECT])
# print('raw data', res_data[92]['aspect_terms'])
# print(type(seqs_data[1][0][0]))
# print(type(seqs_data[2][0][0]))
# print(w2is[0])
# reloaded_emb = np.load(SAVED_EMB_FILE)
# print(reloaded_emb[1])
# Train model.
# model = build_net(seqs_data, term_max_len, w2is,
# atae=True, extra_outputs=True,
# emb_mtrx_file=SAVED_EMB_FILE,
# save_to_file=SAVED_MDL_FILE + '2')
# train(seqs_data, model, epoch=3)
label_lookup = {idx: polarity
for polarity, idx in w2is[I_POLARITY].items()}
# print(label_lookup)
customized_data = [['The food is really delicious but '
'I hate the service', 'food'],
['The food is really delicious but '
'I hate the service', 'serivce'],
['I have to say there is no on could be faster than '
'him, but he need to take care of his bad motion as '
'a bar attendant, which will impact his serivce.',
'serivce']]
pred = predict(customized_data, label_lookup, term_max_len,
save_to_file=SAVED_MDL_FILE + '2')
print(pred)
# Get attention weights for sentences.
converted_data, _ = convert_data(customized_data,
term_max_len,
with_label=False)
weights = get_attention_weighs(converted_data,
att_layer_name='attention_weight',
# att_layer_name='attention_weights',
input_layers_names=[2, 0],
save_to_file=SAVED_MDL_FILE + '2')
# print(weights[0])
print(len(customized_data[0][I_TEXT].split()))
focus_len = len(customized_ata[0][I_TEXT].split())
plot_attention_weight(weights[0], focus_len=focus_len)
# for weight in weights:
# print(weight.shape)
# TODO, Use gemsim to visualize aspect word embeddings.
|
[
"keras.models.load_model",
"keras.regularizers.l2",
"numpy.load",
"numpy.random.seed",
"numpy.argmax",
"keras.preprocessing.sequence.pad_sequences",
"keras.optimizers.Adagrad",
"keras.models.Model",
"numpy.arange",
"keras.layers.Input",
"keras.activations.softmax",
"keras.layers.Reshape",
"os.path.join",
"keras.initializers.RandomUniform",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"keras.backend.eval",
"keras.preprocessing.text.Tokenizer",
"keras.utils.np_utils.to_categorical",
"lxml.etree.parse",
"numpy.random.shuffle",
"numpy.save",
"matplotlib.pyplot.show",
"numpy.asarray",
"keras.backend.function",
"keras.layers.Concatenate",
"keras.layers.RepeatVector",
"keras.layers.LSTM",
"keras.layers.Dot",
"time.time",
"keras.layers.Dense",
"keras.layers.Lambda"
] |
[((1528, 1550), 'lxml.etree.parse', 'etree.parse', (['data_file'], {}), '(data_file)\n', (1539, 1550), False, 'from lxml import etree\n'), ((11765, 11771), 'time.time', 'time', ([], {}), '()\n', (11769, 11771), False, 'from time import time\n'), ((15107, 15129), 'numpy.load', 'np.load', (['emb_mtrx_file'], {}), '(emb_mtrx_file)\n', (15114, 15129), True, 'import numpy as np\n'), ((15182, 15227), 'keras.layers.Input', 'Input', ([], {'shape': '(sents.shape[1],)', 'dtype': '"""int32"""'}), "(shape=(sents.shape[1],), dtype='int32')\n", (15187, 15227), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((15777, 15809), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""'}), "(shape=(1,), dtype='int32')\n", (15782, 15809), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((15884, 15957), 'keras.initializers.RandomUniform', 'initializers.RandomUniform', ([], {'minval': '(-RANDOM_UNIFORM)', 'maxval': 'RANDOM_UNIFORM'}), '(minval=-RANDOM_UNIFORM, maxval=RANDOM_UNIFORM)\n', (15910, 15957), False, 'from keras import regularizers, initializers, optimizers\n'), ((19635, 19713), 'keras.models.Model', 'Model', ([], {'inputs': '[sents_tensor_input, aspects_tensor_input]', 'outputs': 'model_output'}), '(inputs=[sents_tensor_input, aspects_tensor_input], outputs=model_output)\n', (19640, 19713), False, 'from keras.models import Model, load_model\n'), ((21587, 21605), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (21601, 21605), True, 'import numpy as np\n'), ((21692, 21717), 'numpy.arange', 'np.arange', (['sents.shape[0]'], {}), '(sents.shape[0])\n', (21701, 21717), True, 'import numpy as np\n'), ((21722, 21744), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (21739, 21744), True, 'import numpy as np\n'), ((23197, 23224), 'numpy.argmax', 'np.argmax', (['pred_vec'], {'axis': '(1)'}), '(pred_vec, axis=1)\n', (23206, 23224), True, 'import numpy as np\n'), ((24747, 24775), 'keras.backend.eval', 'K.eval', (['emb_layer.embeddings'], {}), '(emb_layer.embeddings)\n', (24753, 24775), True, 'import keras.backend as K\n'), ((26085, 26129), 'keras.backend.function', 'K.function', (['input_layers', '[att_layer.output]'], {}), '(input_layers, [att_layer.output])\n', (26095, 26129), True, 'import keras.backend as K\n'), ((27798, 27884), 'matplotlib.pyplot.imshow', 'plt.imshow', (['attentions'], {'cmap': '"""plasma"""', 'aspect': '"""auto"""', 'extent': '[0, focus_len, 0, 1]'}), "(attentions, cmap='plasma', aspect='auto', extent=[0, focus_len, \n 0, 1])\n", (27808, 27884), True, 'import matplotlib.pyplot as plt\n'), ((27920, 27934), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (27932, 27934), True, 'import matplotlib.pyplot as plt\n'), ((27939, 27949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27947, 27949), True, 'import matplotlib.pyplot as plt\n'), ((28119, 28179), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""Restaurants_Train_v2.xml"""'], {}), "(RAW_DATA_FILE_BASE, 'Restaurants_Train_v2.xml')\n", (28131, 28179), False, 'import os\n'), ((28241, 28296), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""Laptop_Train_v2.xml"""'], {}), "(RAW_DATA_FILE_BASE, 'Laptop_Train_v2.xml')\n", (28253, 28296), False, 'import os\n'), ((28364, 28414), 'os.path.join', 'os.path.join', (['WORD_EMB_BASE', '"""glove.840B.300d.txt"""'], {}), "(WORD_EMB_BASE, 'glove.840B.300d.txt')\n", (28376, 28414), False, 'import os\n'), ((28437, 28490), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""glove_res_emb.npy"""'], {}), "(RAW_DATA_FILE_BASE, 'glove_res_emb.npy')\n", (28449, 28490), False, 'import os\n'), ((28512, 28564), 'os.path.join', 'os.path.join', (['RAW_DATA_FILE_BASE', '"""atae_model.keras"""'], {}), "(RAW_DATA_FILE_BASE, 'atae_model.keras')\n", (28524, 28564), False, 'import os\n'), ((8251, 8287), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'filters': 'customized_filter'}), '(filters=customized_filter)\n', (8260, 8287), False, 'from keras.preprocessing.text import Tokenizer\n'), ((8772, 8808), 'numpy.asarray', 'np.asarray', (['text_seqs'], {'dtype': '"""int32"""'}), "(text_seqs, dtype='int32')\n", (8782, 8808), True, 'import numpy as np\n'), ((9104, 9143), 'numpy.asarray', 'np.asarray', (['aspects_seqs'], {'dtype': '"""int32"""'}), "(aspects_seqs, dtype='int32')\n", (9114, 9143), True, 'import numpy as np\n'), ((9461, 9487), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['labels_arr'], {}), '(labels_arr)\n', (9475, 9487), False, 'from keras.utils.np_utils import to_categorical\n'), ((13562, 13595), 'numpy.save', 'np.save', (['save_to_file', 'emb_matrix'], {}), '(save_to_file, emb_matrix)\n', (13569, 13595), True, 'import numpy as np\n'), ((16794, 16826), 'keras.layers.Reshape', 'Reshape', (['(ASPECT_EMBEDDING_DIM,)'], {}), '((ASPECT_EMBEDDING_DIM,))\n', (16801, 16826), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((17036, 17057), 'keras.layers.RepeatVector', 'RepeatVector', (['max_len'], {}), '(max_len)\n', (17048, 17057), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((17092, 17105), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (17103, 17105), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((20649, 20677), 'keras.optimizers.Adagrad', 'optimizers.Adagrad', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (20667, 20677), False, 'from keras import regularizers, initializers, optimizers\n'), ((8522, 8557), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['seqs'], {'maxlen': 'max_len'}), '(seqs, maxlen=max_len)\n', (8535, 8557), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((9395, 9431), 'numpy.asarray', 'np.asarray', (['labels_seqs'], {'dtype': '"""int"""'}), "(labels_seqs, dtype='int')\n", (9405, 9431), True, 'import numpy as np\n'), ((13103, 13151), 'numpy.asarray', 'np.asarray', (['pieces[begin_idx:]'], {'dtype': 'np.float32'}), '(pieces[begin_idx:], dtype=np.float32)\n', (13113, 13151), True, 'import numpy as np\n'), ((17174, 17220), 'keras.layers.LSTM', 'LSTM', (['HIDDEN_LAYER_SIZE'], {'return_sequences': '(True)'}), '(HIDDEN_LAYER_SIZE, return_sequences=True)\n', (17178, 17220), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((17359, 17372), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (17370, 17372), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((17430, 17520), 'keras.layers.Dense', 'Dense', (['(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM)'], {'use_bias': '(False)', 'name': '"""attention_score_1"""'}), "(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM, use_bias=False, name=\n 'attention_score_1')\n", (17435, 17520), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((17773, 17823), 'keras.layers.Dense', 'Dense', (['(1)'], {'use_bias': '(False)', 'name': '"""attention_score_2"""'}), "(1, use_bias=False, name='attention_score_2')\n", (17778, 17823), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((18255, 18276), 'keras.layers.Dot', 'Dot', ([], {'axes': '(1)', 'name': '"""r"""'}), "(axes=1, name='r')\n", (18258, 18276), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((18393, 18418), 'keras.layers.Reshape', 'Reshape', (['(EMBEDDING_DIM,)'], {}), '((EMBEDDING_DIM,))\n', (18400, 18418), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((18463, 18502), 'keras.layers.Lambda', 'Lambda', (['(lambda tensor: tensor[:, -1, :])'], {}), '(lambda tensor: tensor[:, -1, :])\n', (18469, 18502), False, 'from keras.layers import Lambda\n'), ((18542, 18579), 'keras.layers.Concatenate', 'Concatenate', ([], {'name': '"""final_concatenate"""'}), "(name='final_concatenate')\n", (18553, 18579), False, 'from keras.layers import RepeatVector, Dot, Concatenate, Reshape\n'), ((18655, 18744), 'keras.layers.Dense', 'Dense', (['EMBEDDING_DIM'], {'activation': '"""tanh"""', 'use_bias': '(False)', 'name': '"""final_representation"""'}), "(EMBEDDING_DIM, activation='tanh', use_bias=False, name=\n 'final_representation')\n", (18660, 18744), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((19334, 19381), 'keras.layers.LSTM', 'LSTM', (['HIDDEN_LAYER_SIZE'], {'return_sequences': '(False)'}), '(HIDDEN_LAYER_SIZE, return_sequences=False)\n', (19338, 19381), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((19444, 19518), 'keras.layers.Dense', 'Dense', (['labels.shape[1]'], {'activation': '"""softmax"""', 'name': '"""Simple_AE_LSTM_ouptut"""'}), "(labels.shape[1], activation='softmax', name='Simple_AE_LSTM_ouptut')\n", (19449, 19518), False, 'from keras.layers import Input, Embedding, LSTM, Dense\n'), ((22790, 22851), 'keras.models.load_model', 'load_model', (['save_to_file'], {'custom_objects': "{'softmax': softmax}"}), "(save_to_file, custom_objects={'softmax': softmax})\n", (22800, 22851), False, 'from keras.models import Model, load_model\n'), ((24628, 24652), 'keras.models.load_model', 'load_model', (['save_to_file'], {}), '(save_to_file)\n', (24638, 24652), False, 'from keras.models import Model, load_model\n'), ((25694, 25755), 'keras.models.load_model', 'load_model', (['save_to_file'], {'custom_objects': "{'softmax': softmax}"}), "(save_to_file, custom_objects={'softmax': softmax})\n", (25704, 25755), False, 'from keras.models import Model, load_model\n'), ((13455, 13461), 'time.time', 'time', ([], {}), '()\n', (13459, 13461), False, 'from time import time\n'), ((17918, 17936), 'keras.activations.softmax', 'softmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (17925, 17936), False, 'from keras.activations import softmax\n'), ((18949, 18983), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (18964, 18983), False, 'from keras import regularizers, initializers, optimizers\n')]
|
import math
import itertools as itt
import numpy as np
from collections import namedtuple
from datetime import datetime
from scipy.special import gamma
from sklearn.neighbors import BallTree
import random
from pywde.pywt_ext import WaveletTensorProduct
from pywde.common import all_zs_tensor
class dictwithfactory(dict):
def __init__(self, factory):
super(dictwithfactory, self).__init__()
self._factory = factory
def __getitem__(self, key):
if key in self:
return self.get(key)
val = self._factory(key)
self[key] = val
return val
class SPWDE(object):
def __init__(self, waves, k=1):
self.wave = WaveletTensorProduct([wave_desc[0] for wave_desc in waves])
self.j0s = [wave_desc[1] for wave_desc in waves]
self.k = k
self.minx = None
self.maxx = None
# target distance
TARGET_NORMED = 'normed'
TARGET_DIFF = 'diff'
# threshold calculation
TH_CLASSIC = 'classic' # Donoho et al
TH_ADJUSTED = 'adjusted' # Delyon & Judistky
TH_EMP_STD = 'emp-var' # New
def best_j(self, xs, mode, stop_on_max=False):
t0 = datetime.now()
assert mode in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong mode'
best_j_data = []
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
omega = calc_omega(xs.shape[0], self.k)
best_b_hat_j = None
best_j = None
for j in range(8):
# In practice, one would stop when maximum is reached, i.e. after first decreasing value of B Hat
g_ring_no_i_xs = []
wave_base_j_00_ZS, wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs = self.calc_funs_at(j, (0, 0), xs)
if mode == self.TARGET_DIFF:
coeff_j_00_ZS = self.calc_coeffs(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, balls_info, (0, 0))
coeffs = np.array(list(coeff_j_00_ZS.values()))
alphas_norm_2 = (coeffs[:,0] * coeffs[:,1]).sum()
for i, x in enumerate(xs):
coeff_no_i_j_00_ZS = self.calc_coeffs_no_i(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, i, balls_info, (0, 0))
g_ring_no_i_at_xi = 0.0
norm2 = 0.0
for zs in coeff_no_i_j_00_ZS:
if zs not in wave_base_j_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_j_00_ZS[zs]
g_ring_no_i_at_xi += alpha_zs * wave_base_j_00_ZS_at_xs[zs][i]
norm2 += alpha_zs * alpha_d_zs
# q_ring_x ^ 2 / norm2 == f_at_x
if norm2 == 0.0:
if g_ring_no_i_at_xi == 0.0:
g_ring_no_i_xs.append(0.0)
else:
raise RuntimeError('Got norms but no value')
else:
if mode == self.TARGET_NORMED:
g_ring_no_i_xs.append(g_ring_no_i_at_xi * g_ring_no_i_at_xi / norm2)
else: # mode == self.MODE_DIFF:
g_ring_no_i_xs.append(g_ring_no_i_at_xi * g_ring_no_i_at_xi)
g_ring_no_i_xs = np.array(g_ring_no_i_xs)
if mode == self.TARGET_NORMED:
b_hat_j = omega * (np.sqrt(g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_j = 2 * omega * (np.sqrt(g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - alphas_norm_2
print(mode, j, b_hat_j)
if best_j is None:
best_j = j
best_b_hat_j = b_hat_j
elif b_hat_j > best_b_hat_j:
best_j = j
best_b_hat_j = b_hat_j
elif stop_on_max:
self.the_best_j = best_j
return best_j
if stop_on_max:
continue
# if calculating pdf
name = 'WDE Alphas, dj=%d' % j
if mode == self.TARGET_DIFF:
pdf = self.calc_pdf(wave_base_j_00_ZS, coeff_j_00_ZS, name)
else:
coeff_j_00_ZS = self.calc_coeffs(wave_base_j_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs, j, xs, balls_info, (0, 0))
pdf = self.calc_pdf(wave_base_j_00_ZS, coeff_j_00_ZS, name)
elapsed = (datetime.now() - t0).total_seconds()
best_j_data.append((j, b_hat_j, pdf, elapsed))
best_b_hat = max([info_j[1] for info_j in best_j_data])
best_j = list(filter(lambda info_j: info_j[1] == best_b_hat, best_j_data))[0][0]
self.best_j_data = [
tuple([info_j[0], info_j[0] == best_j, info_j[1], info_j[2], info_j[3]])
for info_j in best_j_data]
def best_c(self, xs, delta_j, opt_target, th_mode):
"""best c - hard thresholding"""
assert delta_j > 0, 'delta_j must be 1 or more'
assert opt_target in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong optimisation target'
assert th_mode in [self.TH_CLASSIC, self.TH_ADJUSTED, self.TH_EMP_STD], 'Wrong threshold strategy'
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = {}
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, qqs[0])] = self.calc_funs_at(0, qqs[0], xs)
for j, qq in itt.product(range(delta_j), qqs[1:]):
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)] = self.calc_funs_at(j, qq, xs)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
beta_var = True
all_betas = []
for (j, qq), triple in dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at.items():
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
if qq == (0, 0):
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
continue
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
if beta_var:
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j,
xs, i, all_balls[i], qq, zs)
coeff_i_vals.append(coeff_i)
# coeff_i_std = np.array(coeff_i_vals).std()
coeff_i_std = (np.array(coeff_i_vals) - coeff_zs).std()
else:
coeff_i_std = 0.
all_betas.append((j, qq, zs, coeff_zs, coeff_d_zs, coeff_i_std))
# order2 : 1995, Donoho, Johnstone, Kerkyacharian, Picard - Wavelet Shrinkage, Asymptopia
order1 = lambda tt: math.fabs(tt[3])
# order1 : 1996, Delyon, Juditsky - On Minimax Wavelet Estimators
order2 = lambda tt: math.fabs(tt[3]) / math.sqrt(delta_j - tt[0])
# order3 : New things
# order3 = lambda tt: math.fabs(tt[3]) - 4 * tt[5] ## kind of work for low n
# order3 = lambda tt: math.fabs(tt[3]) / (math.fabs(tt[3]) * 0.5 + tt[5]) # ??
# order3 = lambda tt: tt[5]
# order3 = lambda tt: math.fabs(tt[3]) / tt[5] / math.sqrt(delta_j - tt[0])
order4 = lambda tt: math.fabs(tt[3]) / tt[5]
if th_mode == self.TH_CLASSIC:
key_order = order1
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C$"
elif th_mode == self.TH_ADJUSTED:
key_order = order2
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C \sqrt{j + 1}$"
elif th_mode == self.TH_EMP_STD:
key_order = order4
subtitle = r"$\left| \beta_{j,q,z} \right| \geq C \hat{\sigma}\left[\beta_{j,q,z}^{(-i)}\right]$"
else:
raise RuntimeError('Unknown threshold mode')
all_betas = sorted(all_betas, key=key_order, reverse=True)
# get base line for acummulated values by computing alphas and the
# target HD_i functions
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
num_alphas = 0
for zs in alphas_dict:
alpha_zs, alpha_d_zs = alphas_dict[zs]
if alpha_zs == 0.0 or alpha_d_zs == 0.0:
continue
num_alphas += 1
omega_nk = calc_omega(xs.shape[0], self.k)
best_c_data = []
best_hat = None
self.best_c_found = None
for cx, beta_info in enumerate(all_betas):
j, qq, zs, coeff , coeff_d, coeff_i_std = beta_info
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i, all_balls[i], qq, zs)
if zs not in wave_base_j_qq_ZS_at_xs:
continue
g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
if opt_target == self.TARGET_NORMED:
b_hat_beta = omega_nk * (np.sqrt(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(g_ring_no_i_xs * g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - norm2_xs.mean()
best_c_data.append((key_order(beta_info), b_hat_beta, np.array(coeff_i_vals).std(), num_alphas + cx + 1))
# calc best
if len(best_c_data) > 0:
pos_c = np.argmax(np.array([tt[1] for tt in best_c_data]))
print('Best C', best_c_data[pos_c], '@ %d' % pos_c)
name = 'WDE C = %f (%d + %d)' % (best_c_data[pos_c][0], num_alphas, pos_c + 1)
the_betas = all_betas[:pos_c + 1]
else:
name = 'WDE C = None'
the_betas = []
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas, name, subtitle)
if len(best_c_data) > 0:
self.best_c_found = (pdf, best_c_data[pos_c])
self.best_c_data = best_c_data
else:
self.best_c_found = (pdf, None)
self.best_c_data = best_c_data
def best_greedy_not_working(self, xs, delta_j, mode):
"best c - greedy optimisation `go`"
assert delta_j > 0, 'delta_j must be 1 or more'
assert mode in [self.MODE_NORMED, self.MODE_DIFF], 'Wrong mode'
random.seed(1)
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
calc_funs_at = lambda key: self.calc_funs_at(key[0], key[1], xs)
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = dictwithfactory(calc_funs_at)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
# get base line for acummulated values by computing alphas and the
# target HD_i functions
# >> calculate alphas >> same as best_c
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
def populate_at(new_key, populate_mode):
if populate_mode == 'by_j':
j, _, _ = new_key
if len(curr_betas.keys()) == 0:
# add new level
j = j + 1
print('populate_at - new level', j)
for qq in qqs[1:]:
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
curr_betas[(j, qq, zs)] = coeff_zs, coeff_d_zs
print('curr_betas #', len(curr_betas))
return
if populate_mode == 'by_near_zs':
raise RuntimeError('by_near_zs not implemented')
raise RuntimeError('populate_mode_wrong')
def beta_factory(key):
j, qq, zs, i = key
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i,
all_balls[i], qq, zs)
return coeff_i, coeff_d_i
betas_no_i_j_qq_zz_i = dictwithfactory(beta_factory)
def g_ring_calc(j, qq, zs):
loc_g_ring_no_i_xs = g_ring_no_i_xs.copy()
loc_norm2_xs = norm2_xs.copy()
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = betas_no_i_j_qq_zz_i[(j, qq, zs, i)]
if zs not in wave_base_j_qq_ZS_at_xs:
continue
loc_g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
loc_norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
return loc_g_ring_no_i_xs, loc_norm2_xs, np.array(coeff_i_vals)
ball_std = balls_info.sqrt_vol_k.std()
def get_all_betas():
resp = []
for k, v in curr_betas.items():
j, qq, zs = k
coeff_zs, coeff_d_zs = v
loc_g_ring_no_i_xs, loc_norm2_xs, betas_j_qq_zs_no_i = g_ring_calc(j, qq, zs)
if mode == self.MODE_NORMED:
b_hat_beta = omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - loc_norm2_xs.mean()
if len(betas_j_qq_zs_no_i) == 0:
continue
#print(j, qq, zs, b_hat_beta, coeff_zs, 3 * math.sqrt(betas_j_qq_zs_no_i.std()))
correction = 2 * math.sqrt(betas_j_qq_zs_no_i.std()) ##np.abs(loc_g_ring_no_i_xs).std() ## * (j+1) ##* ball_std
b_hat_std = betas_j_qq_zs_no_i.std()
resp.append((j, qq, zs, coeff_zs, coeff_d_zs, b_hat_beta + correction, b_hat_beta, b_hat_std))
return resp
popu_mode = 'by_j'
the_betas = []
omega_nk = calc_omega(xs.shape[0], self.k)
found = True
curr_betas = {}
curr_b_hat_beta = None
# populate w/ j = 0, all QQ
populate_at((-1, None, None), 'by_j')
betas_num = 10
## << BEST !! count number of betas of current level as we know it
## 180 or 90 give very good results
curr_j = 0
used_level = False
while curr_j < 6:
all_betas = get_all_betas()
if len(all_betas) == 0:
populate_at((curr_j, None, None), popu_mode)
curr_j += 1
used_level = False
continue
fkey1 = lambda tt: tt[5]
fkey2 = lambda tt: math.fabs(tt[3])*tt[5]
fkey3 = lambda tt: tt[3]*tt[3]*tt[5]
fkey4 = lambda tt: math.fabs(tt[3])*tt[5]/tt[6]
fkey5 = lambda tt: math.fabs(tt[3]) * tt[5] - tt[6]
fkey6 = lambda tt: tt[5] - tt[6] / (curr_j + 1)
fkey7 = lambda tt: tt[5] / tt[6]
fkey8 = lambda tt: math.fabs(tt[3])/tt[6]
fkey = fkey1
all_betas = sorted(all_betas, key=fkey, reverse=True)
##print(all_betas)
# print(all_betas[0], ':', fkey(all_betas[0]), '..(%d)..' % len(all_betas), all_betas[-1], ':', fkey(all_betas[-1]))
# import seaborn as sns
# import matplotlib.pyplot as plt
# xx = np.array([(tt[3], fkey(tt)) for tt in all_betas])
# ##xx = xx - xx.min()
# sns.scatterplot(xx[:,0], xx[:,1])
# plt.show()
# raise RuntimeError('blah')
## ix = random.choices(list(range(all_betas)), weights=[fkey(tt) for tt in all_betas])
chosen_betas = all_betas[:betas_num]
new_b_hat_beta = max([tt[5] for tt in chosen_betas])
if curr_b_hat_beta is None or new_b_hat_beta > curr_b_hat_beta:
## print('.'*betas_num, end='')
curr_b_hat_beta = min([tt[5] for tt in chosen_betas])
used_level = True
print(all_betas[0], curr_b_hat_beta)
for ix_tuple in chosen_betas:
the_betas.append(ix_tuple)
del curr_betas[ix_tuple[:3]]
## populate_at(ix_tuple[:3], popu_mode)
g_ring_no_i_xs, norm2_xs, _ = g_ring_calc(*ix_tuple[:3])
continue
if not used_level:
break
if curr_j + 1 >= 6:
break
print('\n next level, # betas =', len(the_betas))
for k in list(curr_betas.keys()):
del curr_betas[k]
populate_at((curr_j, None, None), popu_mode)
curr_j += 1
used_level = False
print('')
name = 'WDE greedy = %f' % curr_b_hat_beta
the_betas_p = [tt[:6] for tt in the_betas]
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas_p, name)
self.best_c_found = (pdf, curr_b_hat_beta)
self.best_c_data = [(ix, tt[5]) for ix, tt in enumerate(the_betas)]
def best_greedy(self, xs, delta_j, j0, opt_target):
"best c - greedy optimisation `go`"
assert delta_j > 0, 'delta_j must be 1 or more'
assert opt_target in [self.TARGET_NORMED, self.TARGET_DIFF], 'Wrong optimisation target'
balls_info = calc_sqrt_vs(xs, self.k)
self.minx = np.amin(xs, axis=0)
self.maxx = np.amax(xs, axis=0)
qqs = self.wave.qq
# base funs for levels of interest
calc_funs_at = lambda key: self.calc_funs_at(key[0], key[1], xs)
dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at = dictwithfactory(calc_funs_at)
# dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at [ (j, qq) ] => a triple with
# wave_base_0_00_ZS, wave_base_0_00_ZS_at_xs, wave_dual_j_00_ZS_at_xs
# memoise balls
all_balls = []
for i in range(len(xs)):
balls = balls_no_i(balls_info, i)
all_balls.append(balls)
# rank betas from large to smallest; we will incrementaly calculate
# the HD_i for each in turn
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
alphas_dict = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, 0, xs, balls_info, (0, 0))
# get base line for acummulated values by computing alphas and the
# target HD_i functions
# >> calculate alphas >> same as best_c
_, wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(0, (0, 0))]
g_ring_no_i_xs = np.zeros(xs.shape[0])
norm2_xs = np.zeros(xs.shape[0])
for i, x in enumerate(xs):
coeff_no_i_0_00_ZS = self.calc_coeffs_no_i(wave_base_0_00_ZS_at_xs, wave_dual_0_00_ZS_at_xs, 0, xs, i,
balls_info, (0, 0))
for zs in coeff_no_i_0_00_ZS:
if zs not in wave_base_0_00_ZS_at_xs:
continue
alpha_zs, alpha_d_zs = coeff_no_i_0_00_ZS[zs]
g_ring_no_i_xs[i] += alpha_zs * wave_base_0_00_ZS_at_xs[zs][i]
norm2_xs[i] += alpha_zs * alpha_d_zs
## print('g_ring_no_i_xs', g_ring_no_i_xs * g_ring_no_i_xs) << !!! OK !!!
def populate_betas():
for dj in range(delta_j):
j = j0 + dj
print('Calc. betas for level %d' % j)
for qq in qqs[1:]:
triple = dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at[(j, qq)]
_, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs = triple
cc = self.calc_coeffs(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq)
for zs in cc:
coeff_zs, coeff_d_zs = cc[zs]
if coeff_zs == 0.0:
continue
curr_betas[(j, qq, zs)] = coeff_zs, coeff_d_zs
print('curr_betas #', len(curr_betas))
def beta_factory(key):
j, qq, zs, i = key
coeff_i, coeff_d_i = self.calc_1_coeff_no_i(wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i,
all_balls[i], qq, zs)
return coeff_i, coeff_d_i
betas_no_i_j_qq_zz_i = dictwithfactory(beta_factory)
def g_ring_calc(j, qq, zs):
loc_g_ring_no_i_xs = g_ring_no_i_xs.copy()
loc_norm2_xs = norm2_xs.copy()
coeff_i_vals = []
for i, x in enumerate(xs):
coeff_i, coeff_d_i = betas_no_i_j_qq_zz_i[(j, qq, zs, i)]
if zs not in wave_base_j_qq_ZS_at_xs:
continue
loc_g_ring_no_i_xs[i] += coeff_i * wave_base_j_qq_ZS_at_xs[zs][i]
loc_norm2_xs[i] += coeff_i * coeff_d_i
coeff_i_vals.append(coeff_i)
return loc_g_ring_no_i_xs, loc_norm2_xs, np.array(coeff_i_vals)
ball_std = balls_info.sqrt_vol_k.std()
def calc_b_hat():
resp = []
for k, v in curr_betas.items():
j, qq, zs = k
coeff_zs, coeff_d_zs = v
loc_g_ring_no_i_xs, loc_norm2_xs, betas_j_qq_zs_no_i = g_ring_calc(j, qq, zs)
if opt_target == self.TARGET_NORMED:
b_hat_beta = omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs) * balls_info.sqrt_vol_k).sum()
else: # mode == self.MODE_DIFF:
b_hat_beta = 2 * omega_nk * (np.sqrt(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs) * balls_info.sqrt_vol_k).sum() - loc_norm2_xs.mean()
if len(betas_j_qq_zs_no_i) == 0:
continue
#print(j, qq, zs, b_hat_beta, coeff_zs, 3 * math.sqrt(betas_j_qq_zs_no_i.std()))
## correction = 3 * math.sqrt(betas_j_qq_zs_no_i.std()) ##np.abs(loc_g_ring_no_i_xs).std() ## * (j+1) ##* ball_std
correction = math.fabs(coeff_zs) / betas_j_qq_zs_no_i.std()
b_hat_std = betas_j_qq_zs_no_i.std()
resp.append((j, qq, zs, coeff_zs, coeff_d_zs, b_hat_beta + correction, b_hat_beta, b_hat_std))
return resp
popu_mode = 'by_j'
the_betas = []
omega_nk = calc_omega(xs.shape[0], self.k)
found = True
curr_betas = {}
curr_b_hat_beta = None
# populate w/ j = 0, all QQ
populate_betas()
# betas_ref : position of b_hat that we will use to stop iteration. If we can't improve \hat{B}
# beyond the value at position betas_ref next time, we consider the optimum reached.
betas_ref = 3
while True:
curr_b_hat = calc_b_hat()
if len(curr_b_hat) == 0:
break
fkey1 = lambda tt: tt[5]
fkey = fkey1
curr_b_hat = sorted(curr_b_hat, key=fkey, reverse=True)
new_b_hat_beta = fkey(curr_b_hat[0])
if curr_b_hat_beta is None or new_b_hat_beta > curr_b_hat_beta:
## we use a slightly less optimal value to smooth target a little bit
curr_b_hat_beta = fkey(curr_b_hat[betas_ref - 1])
print(curr_b_hat[0], curr_b_hat_beta)
the_betas.append(curr_b_hat[0])
del curr_betas[curr_b_hat[0][:3]]
g_ring_no_i_xs, norm2_xs, _ = g_ring_calc(*curr_b_hat[0][:3])
continue
else:
break
print('')
name = 'WDE greedy = %f' % curr_b_hat_beta
the_betas_p = [tt[:6] for tt in the_betas]
pdf = self.calc_pdf_with_betas(dict_triple_J_QQ_ZS__wbase_wbase_at_wdual_at, alphas_dict, the_betas_p, name)
self.best_c_found = (pdf, curr_b_hat_beta)
self.best_c_data = [(ix, tt[5]) for ix, tt in enumerate(the_betas)]
def calc_pdf(self, base_fun, alphas, name):
norm2 = 0.0
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
norm2 += alpha_zs * alpha_d_zs
if norm2 == 0.0:
raise RuntimeError('No norm')
def pdf(xs, alphas=alphas, norm2=norm2, base_fun=base_fun):
g_ring_xs = np.zeros(xs.shape[0])
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
g_ring_xs += alpha_zs * base_fun[zs](xs)
# q_ring_x ^ 2 / norm2 == f_at_x
return g_ring_xs * g_ring_xs / norm2
pdf.name = name
return pdf
def calc_pdf_with_betas(self, base_funs_j, alphas, betas, name, subtitle=None):
"Calculate the pdf for given alphas and betas"
norm2 = 0.0
base_fun, _, _ = base_funs_j[(0, (0, 0))]
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
norm2 += alpha_zs * alpha_d_zs
for j, qq, zs, coeff_zs, coeff_d_zs, coeff_std in betas:
base_fun, _, _ = base_funs_j[(j, qq)]
if zs not in base_fun:
continue
norm2 += coeff_zs * coeff_d_zs
if norm2 == 0.0:
raise RuntimeError('No norm')
def pdf(xs, alphas=alphas, betas=betas, norm2=norm2, base_funs_j=base_funs_j):
g_ring_xs = np.zeros(xs.shape[0])
base_fun, _, _ = base_funs_j[(0, (0, 0))]
for zs in alphas:
if zs not in base_fun:
continue
alpha_zs, alpha_d_zs = alphas[zs]
g_ring_xs += alpha_zs * base_fun[zs](xs)
for j, qq, zs, coeff_zs, coeff_d_zs, coeff_std in betas:
base_fun, _, _ = base_funs_j[(j, qq)]
if zs not in base_fun:
continue
g_ring_xs += coeff_zs * base_fun[zs](xs)
# q_ring_x ^ 2 / norm2 == f_at_x
return g_ring_xs * g_ring_xs / norm2
pdf.name = name
pdf.subtitle = subtitle
return pdf
def calc_funs_at(self, j, qq, xs):
"""
:param j: int, resolution level
:param qq: tensor index in R^d
:param xs: data in R^d
:return: (base funs, base @ xs, dual @ xs)
funs[zs] = base-wave _{j,zs}^{(qq)}
base @ xs[zs] = base-wave _{j,zs}^{(qq)}(xs)
dual @ xs[zs] = dual-wave _{j,zs}^{(qq)}(xs)
"""
wave_base_j_qq_ZS, wave_dual_j_qq_ZS = self.calc_funs(j, qq)
base_fun_xs = {}
for zs in wave_base_j_qq_ZS:
base_fun_xs[zs] = wave_base_j_qq_ZS[zs](xs)
dual_fun_xs = {}
for zs in wave_dual_j_qq_ZS:
dual_fun_xs[zs] = wave_dual_j_qq_ZS[zs](xs)
return wave_base_j_qq_ZS, base_fun_xs, dual_fun_xs
def calc_funs(self, j, qq):
"""
:param j: int, resolution level
:param qq: tensor index in R^d
:return: (base funs, dual funs)
funs[zs] = base|dual wave _{j,zs}^{(qq)}
wave_base_j_qq_ZS, wave_dual_j_qq_ZS
"""
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
funs = {}
for what in ['dual', 'base']:
zs_min, zs_max = self.wave.z_range(what, (qq, jpow2, None), self.minx, self.maxx)
funs[what] = {}
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
funs[what][zs] = self.wave.fun_ix(what, (qq, jpow2, zs))
return funs['base'], funs['dual']
def calc_coeffs(self, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, balls_info, qq):
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
zs_min, zs_max = self.wave.z_range('dual', (qq, jpow2, None), self.minx, self.maxx)
omega = calc_omega(xs.shape[0], self.k)
resp = {}
balls = balls_info.sqrt_vol_k
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
alpha_zs = omega * (wave_dual_j_qq_ZS_at_xs[zs] * balls).sum()
resp[zs] = (alpha_zs, alpha_zs)
if self.wave.orthogonal:
# we are done
return resp
zs_min, zs_max = self.wave.z_range('base', (qq, jpow2, None), self.minx, self.maxx)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
if zs not in resp:
continue
alpha_d_zs = omega * (wave_base_j_qq_ZS_at_xs[zs] * balls).sum()
resp[zs] = (resp[zs][0], alpha_d_zs)
return resp
def calc_coeffs_no_i(self, wave_base_j_qq_ZS_at_xs, wave_dual_j_qq_ZS_at_xs, j, xs, i, balls_info, qq):
"Calculate alphas (w/ dual) and alpha-duals (w/ base)"
jj = [j + j0 for j0 in self.j0s]
jpow2 = np.array([2 ** j for j in jj])
zs_min, zs_max = self.wave.z_range('dual', (qq, jpow2, None), self.minx, self.maxx)
omega_no_i = calc_omega(xs.shape[0] - 1, self.k)
resp = {}
vol_no_i = balls_no_i(balls_info, i)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
# below, we remove factor for i from sum << this has the biggest impact in performance
# also, we calculated alpha_zs previously and cen be further optimised w/ calc_coeffs
alpha_zs = omega_no_i * ((wave_dual_j_qq_ZS_at_xs[zs] * vol_no_i).sum() - wave_dual_j_qq_ZS_at_xs[zs][i] * vol_no_i[i])
resp[zs] = (alpha_zs, alpha_zs)
if self.wave.orthogonal:
# we are done
return resp
zs_min, zs_max = self.wave.z_range('base', (qq, jpow2, None), self.minx, self.maxx)
for zs in itt.product(*all_zs_tensor(zs_min, zs_max)):
if zs not in resp:
continue
# below, we remove factor for i from sum << this has the biggest impact in performance
alpha_d_zs = omega_no_i * ((wave_base_j_qq_ZS_at_xs[zs] * vol_no_i).sum() - wave_base_j_qq_ZS_at_xs[zs][i] * vol_no_i[i])
resp[zs] = (resp[zs][0], alpha_d_zs)
return resp
def calc_1_coeff_no_i(self, base_fun_xs, dual_fun_xs, j, xs, i, balls, qq, zs):
omega_no_i = calc_omega(xs.shape[0] - 1, self.k)
if zs in dual_fun_xs:
coeff = omega_no_i * ((dual_fun_xs[zs] * balls).sum() - dual_fun_xs[zs][i] * balls[i])
else:
coeff = 0.0
if self.wave.orthogonal:
# we are done
return coeff, coeff
if zs in base_fun_xs:
coeff_d = omega_no_i * ((base_fun_xs[zs] * balls).sum() - base_fun_xs[zs][i] * balls[i])
else:
coeff_d = 0.0
return coeff, coeff_d
def balls_no_i(balls_info, i):
n = balls_info.nn_indexes.shape[0]
resp = []
for i_prim in range(n):
# note index i is removed at callers site
if i in balls_info.nn_indexes[i_prim, :-1]:
resp.append(balls_info.sqrt_vol_k_plus_1[i_prim])
else:
resp.append(balls_info.sqrt_vol_k[i_prim])
return np.array(resp)
def calc_omega(n, k):
"Bias correction for k-th nearest neighbours sum for sample size n"
return math.sqrt(n - 1) * gamma(k) / gamma(k + 0.5) / n
BallsInfo = namedtuple('BallsInfo', ['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes'])
def calc_sqrt_vs(xs, k):
"Returns BallsInfo object with sqrt of volumes of k-th balls and (k+1)-th balls"
dim = xs.shape[1]
ball_tree = BallTree(xs)
# as xs is both data and query, xs's nearest neighbour would be xs itself, hence the k+2 below
dist, inx = ball_tree.query(xs, k + 2)
k_near_radious = dist[:, -2:]
xs_balls_both = np.power(k_near_radious, dim / 2)
xs_balls = xs_balls_both[:, 0] * sqrt_vunit(dim)
xs_balls2 = xs_balls_both[:, 1] * sqrt_vunit(dim)
return BallsInfo(xs_balls, xs_balls2, inx)
def sqrt_vunit(dim):
"Square root of Volume of unit hypersphere in d dimensions"
return math.sqrt((np.pi ** (dim / 2)) / gamma(dim / 2 + 1))
|
[
"numpy.amin",
"math.fabs",
"math.sqrt",
"numpy.power",
"scipy.special.gamma",
"numpy.zeros",
"numpy.amax",
"sklearn.neighbors.BallTree",
"numpy.array",
"collections.namedtuple",
"random.seed",
"pywde.pywt_ext.WaveletTensorProduct",
"pywde.common.all_zs_tensor",
"datetime.datetime.now",
"numpy.sqrt"
] |
[((35069, 35143), 'collections.namedtuple', 'namedtuple', (['"""BallsInfo"""', "['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes']"], {}), "('BallsInfo', ['sqrt_vol_k', 'sqrt_vol_k_plus_1', 'nn_indexes'])\n", (35079, 35143), False, 'from collections import namedtuple\n'), ((34883, 34897), 'numpy.array', 'np.array', (['resp'], {}), '(resp)\n', (34891, 34897), True, 'import numpy as np\n'), ((35294, 35306), 'sklearn.neighbors.BallTree', 'BallTree', (['xs'], {}), '(xs)\n', (35302, 35306), False, 'from sklearn.neighbors import BallTree\n'), ((35503, 35536), 'numpy.power', 'np.power', (['k_near_radious', '(dim / 2)'], {}), '(k_near_radious, dim / 2)\n', (35511, 35536), True, 'import numpy as np\n'), ((682, 741), 'pywde.pywt_ext.WaveletTensorProduct', 'WaveletTensorProduct', (['[wave_desc[0] for wave_desc in waves]'], {}), '([wave_desc[0] for wave_desc in waves])\n', (702, 741), False, 'from pywde.pywt_ext import WaveletTensorProduct\n'), ((1163, 1177), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1175, 1177), False, 'from datetime import datetime\n'), ((1346, 1365), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (1353, 1365), True, 'import numpy as np\n'), ((1386, 1405), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (1393, 1405), True, 'import numpy as np\n'), ((5248, 5267), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (5255, 5267), True, 'import numpy as np\n'), ((5288, 5307), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (5295, 5307), True, 'import numpy as np\n'), ((8951, 8972), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (8959, 8972), True, 'import numpy as np\n'), ((8992, 9013), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (9000, 9013), True, 'import numpy as np\n'), ((12164, 12178), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (12175, 12178), False, 'import random\n'), ((12246, 12265), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (12253, 12265), True, 'import numpy as np\n'), ((12286, 12305), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (12293, 12305), True, 'import numpy as np\n'), ((13536, 13557), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (13544, 13557), True, 'import numpy as np\n'), ((13577, 13598), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (13585, 13598), True, 'import numpy as np\n'), ((21049, 21068), 'numpy.amin', 'np.amin', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (21056, 21068), True, 'import numpy as np\n'), ((21089, 21108), 'numpy.amax', 'np.amax', (['xs'], {'axis': '(0)'}), '(xs, axis=0)\n', (21096, 21108), True, 'import numpy as np\n'), ((22339, 22360), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (22347, 22360), True, 'import numpy as np\n'), ((22380, 22401), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (22388, 22401), True, 'import numpy as np\n'), ((31021, 31053), 'numpy.array', 'np.array', (['[(2 ** j) for j in jj]'], {}), '([(2 ** j) for j in jj])\n', (31029, 31053), True, 'import numpy as np\n'), ((31571, 31603), 'numpy.array', 'np.array', (['[(2 ** j) for j in jj]'], {}), '([(2 ** j) for j in jj])\n', (31579, 31603), True, 'import numpy as np\n'), ((32649, 32681), 'numpy.array', 'np.array', (['[(2 ** j) for j in jj]'], {}), '([(2 ** j) for j in jj])\n', (32657, 32681), True, 'import numpy as np\n'), ((3277, 3301), 'numpy.array', 'np.array', (['g_ring_no_i_xs'], {}), '(g_ring_no_i_xs)\n', (3285, 3301), True, 'import numpy as np\n'), ((7554, 7570), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (7563, 7570), False, 'import math\n'), ((28089, 28110), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (28097, 28110), True, 'import numpy as np\n'), ((29236, 29257), 'numpy.zeros', 'np.zeros', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (29244, 29257), True, 'import numpy as np\n'), ((35035, 35049), 'scipy.special.gamma', 'gamma', (['(k + 0.5)'], {}), '(k + 0.5)\n', (35040, 35049), False, 'from scipy.special import gamma\n'), ((35822, 35840), 'scipy.special.gamma', 'gamma', (['(dim / 2 + 1)'], {}), '(dim / 2 + 1)\n', (35827, 35840), False, 'from scipy.special import gamma\n'), ((7673, 7689), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (7682, 7689), False, 'import math\n'), ((7692, 7718), 'math.sqrt', 'math.sqrt', (['(delta_j - tt[0])'], {}), '(delta_j - tt[0])\n', (7701, 7718), False, 'import math\n'), ((8069, 8085), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (8078, 8085), False, 'import math\n'), ((11248, 11287), 'numpy.array', 'np.array', (['[tt[1] for tt in best_c_data]'], {}), '([tt[1] for tt in best_c_data])\n', (11256, 11287), True, 'import numpy as np\n'), ((16312, 16334), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (16320, 16334), True, 'import numpy as np\n'), ((24740, 24762), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (24748, 24762), True, 'import numpy as np\n'), ((31829, 31858), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (31842, 31858), False, 'from pywde.common import all_zs_tensor\n'), ((32186, 32215), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (32199, 32215), False, 'from pywde.common import all_zs_tensor\n'), ((32923, 32952), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (32936, 32952), False, 'from pywde.common import all_zs_tensor\n'), ((33534, 33563), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (33547, 33563), False, 'from pywde.common import all_zs_tensor\n'), ((35005, 35021), 'math.sqrt', 'math.sqrt', (['(n - 1)'], {}), '(n - 1)\n', (35014, 35021), False, 'import math\n'), ((35024, 35032), 'scipy.special.gamma', 'gamma', (['k'], {}), '(k)\n', (35029, 35032), False, 'from scipy.special import gamma\n'), ((18297, 18313), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18306, 18313), False, 'import math\n'), ((18629, 18645), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18638, 18645), False, 'import math\n'), ((25800, 25819), 'math.fabs', 'math.fabs', (['coeff_zs'], {}), '(coeff_zs)\n', (25809, 25819), False, 'import math\n'), ((31266, 31295), 'pywde.common.all_zs_tensor', 'all_zs_tensor', (['zs_min', 'zs_max'], {}), '(zs_min, zs_max)\n', (31279, 31295), False, 'from pywde.common import all_zs_tensor\n'), ((4419, 4433), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4431, 4433), False, 'from datetime import datetime\n'), ((18400, 18416), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18409, 18416), False, 'import math\n'), ((18460, 18476), 'math.fabs', 'math.fabs', (['tt[3]'], {}), '(tt[3])\n', (18469, 18476), False, 'import math\n'), ((11112, 11134), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (11120, 11134), True, 'import numpy as np\n'), ((3380, 3403), 'numpy.sqrt', 'np.sqrt', (['g_ring_no_i_xs'], {}), '(g_ring_no_i_xs)\n', (3387, 3403), True, 'import numpy as np\n'), ((7246, 7268), 'numpy.array', 'np.array', (['coeff_i_vals'], {}), '(coeff_i_vals)\n', (7254, 7268), True, 'import numpy as np\n'), ((10781, 10832), 'numpy.sqrt', 'np.sqrt', (['(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs)'], {}), '(g_ring_no_i_xs * g_ring_no_i_xs / norm2_xs)\n', (10788, 10832), True, 'import numpy as np\n'), ((3518, 3541), 'numpy.sqrt', 'np.sqrt', (['g_ring_no_i_xs'], {}), '(g_ring_no_i_xs)\n', (3525, 3541), True, 'import numpy as np\n'), ((10955, 10995), 'numpy.sqrt', 'np.sqrt', (['(g_ring_no_i_xs * g_ring_no_i_xs)'], {}), '(g_ring_no_i_xs * g_ring_no_i_xs)\n', (10962, 10995), True, 'import numpy as np\n'), ((16736, 16799), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)\n', (16743, 16799), True, 'import numpy as np\n'), ((25169, 25232), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs / loc_norm2_xs)\n', (25176, 25232), True, 'import numpy as np\n'), ((16929, 16977), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)\n', (16936, 16977), True, 'import numpy as np\n'), ((25362, 25410), 'numpy.sqrt', 'np.sqrt', (['(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)'], {}), '(loc_g_ring_no_i_xs * loc_g_ring_no_i_xs)\n', (25369, 25410), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# <examples/doc_mode_savemodel.py>
import numpy as np
from lmfit.model import Model, save_model
def mysine(x, amp, freq, shift):
return amp * np.sin(x*freq + shift)
sinemodel = Model(mysine)
pars = sinemodel.make_params(amp=1, freq=0.25, shift=0)
save_model(sinemodel, 'sinemodel.sav')
# <end examples/doc_model_savemodel.py>
|
[
"lmfit.model.save_model",
"numpy.sin",
"lmfit.model.Model"
] |
[((209, 222), 'lmfit.model.Model', 'Model', (['mysine'], {}), '(mysine)\n', (214, 222), False, 'from lmfit.model import Model, save_model\n'), ((280, 318), 'lmfit.model.save_model', 'save_model', (['sinemodel', '"""sinemodel.sav"""'], {}), "(sinemodel, 'sinemodel.sav')\n", (290, 318), False, 'from lmfit.model import Model, save_model\n'), ((172, 196), 'numpy.sin', 'np.sin', (['(x * freq + shift)'], {}), '(x * freq + shift)\n', (178, 196), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.special as sp
import matplotlib.pyplot as plt
# radius of the oberservation circle
def NMLA_radius(omega,Rest=1):
# Input: omega--frequency; Rest--estimate of the distance from source to observation point
#
# Output: the radius of the oberservation circle
poly = [1,0,1,-2.5-0.775*(omega*Rest)**0.5]
rt = np.roots(poly)
rs = np.real(rt[2])**3/omega
return rs
# NMLA filtering in the fourier space
def BGFiltrage(fu,kr,imp,L,gau,M):
# INPUT: fu: FFT of impedance quantity U
# kr: k*r
# imp: parameter in impedance quantity
# L: truncation level
# gau: parameter in gaussian kernel
# M: number of sample points on the observation cicle
#
# OUTPUT: filterd quantity BU
#
# bessel and derivative of bessel
LP = max( L+2, 3)
idx = np.array(list(range(LP)))
Bj = sp.jv(idx, kr) # bessel J_l(kr)
DBj = np.array([0.0]*(LP-1))
DBj[0] = -Bj[1]
DBj[1:] = 0.5*(Bj[:LP-2] - Bj[2:LP]) # derivative of bessel
# gausian kernel
A = gau/L
G = np.array([0.0]*(L+1))
G[0] = 1.0
idx = np.array(list(xrange(1,L+1)))
G[1:] = np.exp(-0.5*(A*idx)**2)
G /= 2*np.sum(G) - 1
# filtering operator
Fltr = np.array([0.0 + 0.0*1j]*(L+1))
Fltr[0] = Bj[0]-1j*DBj[0]*imp
Fltr[1:] = (Bj[1:L+1]-1j*DBj[1:L+1]*imp)*(1j**idx)
Fltr = G/Fltr
fb = np.array([0.0 + 0.0*1j]*(M))
fb[0] = Fltr[0]*fu[0] # FU_0
fb[idx] = Fltr[idx]*fu[idx] # FU_{1,...,L}
fb[M-idx] = Fltr[idx]*fu[M-idx] # FU_{-1,...,-L}
return fb
# NMLA to estimate the ray direction
def NMLA(x0,y0,c0,omega,Rest,u,ux,uy):
imp = 0.5 # parameter in impedance quantity
gau = 3.5 # Parameter in Gaussian function
r = NMLA_radius(omega,Rest) # radius of the oberservation circle
kr = r*omega/c0 # k*r
L = int(round(kr + (kr)**(1.0/3) -2.5)) # truncation level to obtain needed precision
L = max(1,L)
M = 2*(4*L)+1 # number of samples on the observation circle
# Angle discretizaion on the circle
angl = np.linspace(0,2*np.pi,M+1)
ang = angl[:M]
X = x0 + r*np.cos(ang)
Y = y0 + r*np.sin(ang)
# compute the impedance quantity
Field = u(X, Y, omega)
DUx = ux(X, Y, omega)
DUy = uy(X, Y, omega)
DField = DUx*np.cos(ang) + DUy*np.sin(ang)
U = imp*DField/(1j*omega/c0) + Field
# filtering
fu = np.fft.fft(U)
fbeta = BGFiltrage(fu,kr,imp,L,gau,M)
beta = np.fft.ifft(fbeta)
# estimate the ray angle
sorted_index = sorted(range(len(beta)),key=lambda x:abs(beta[x]), reverse = True)
est_ang = ang[sorted_index[0]]
# plot
plt.plot(ang/np.pi,np.abs(beta))
plt.xlabel(r'$\theta/\pi$')
plt.show()
return est_ang
|
[
"numpy.roots",
"numpy.fft.ifft",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.sum",
"numpy.fft.fft",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"scipy.special.jv",
"numpy.real",
"numpy.cos",
"matplotlib.pyplot.xlabel"
] |
[((363, 377), 'numpy.roots', 'np.roots', (['poly'], {}), '(poly)\n', (371, 377), True, 'import numpy as np\n'), ((918, 932), 'scipy.special.jv', 'sp.jv', (['idx', 'kr'], {}), '(idx, kr)\n', (923, 932), True, 'import scipy.special as sp\n'), ((961, 987), 'numpy.array', 'np.array', (['([0.0] * (LP - 1))'], {}), '([0.0] * (LP - 1))\n', (969, 987), True, 'import numpy as np\n'), ((1117, 1142), 'numpy.array', 'np.array', (['([0.0] * (L + 1))'], {}), '([0.0] * (L + 1))\n', (1125, 1142), True, 'import numpy as np\n'), ((1206, 1235), 'numpy.exp', 'np.exp', (['(-0.5 * (A * idx) ** 2)'], {}), '(-0.5 * (A * idx) ** 2)\n', (1212, 1235), True, 'import numpy as np\n'), ((1296, 1334), 'numpy.array', 'np.array', (['([0.0 + 0.0 * 1.0j] * (L + 1))'], {}), '([0.0 + 0.0 * 1.0j] * (L + 1))\n', (1304, 1334), True, 'import numpy as np\n'), ((1448, 1480), 'numpy.array', 'np.array', (['([0.0 + 0.0 * 1.0j] * M)'], {}), '([0.0 + 0.0 * 1.0j] * M)\n', (1456, 1480), True, 'import numpy as np\n'), ((2257, 2289), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(M + 1)'], {}), '(0, 2 * np.pi, M + 1)\n', (2268, 2289), True, 'import numpy as np\n'), ((2603, 2616), 'numpy.fft.fft', 'np.fft.fft', (['U'], {}), '(U)\n', (2613, 2616), True, 'import numpy as np\n'), ((2672, 2690), 'numpy.fft.ifft', 'np.fft.ifft', (['fbeta'], {}), '(fbeta)\n', (2683, 2690), True, 'import numpy as np\n'), ((2902, 2930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta/\\\\pi$"""'], {}), "('$\\\\theta/\\\\pi$')\n", (2912, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2942, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2896), 'numpy.abs', 'np.abs', (['beta'], {}), '(beta)\n', (2890, 2896), True, 'import numpy as np\n'), ((387, 401), 'numpy.real', 'np.real', (['rt[2]'], {}), '(rt[2])\n', (394, 401), True, 'import numpy as np\n'), ((1241, 1250), 'numpy.sum', 'np.sum', (['G'], {}), '(G)\n', (1247, 1250), True, 'import numpy as np\n'), ((2322, 2333), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2328, 2333), True, 'import numpy as np\n'), ((2350, 2361), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2356, 2361), True, 'import numpy as np\n'), ((2502, 2513), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (2508, 2513), True, 'import numpy as np\n'), ((2520, 2531), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (2526, 2531), True, 'import numpy as np\n')]
|
'''
Created on Oct 26, 2015
@author: wirkert
'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
def preprocess2(df, nr_samples=None, snr=None, movement_noise_sigma=None,
magnification=None, bands_to_sortout=None):
# first set 0 reflectances to nan
df["reflectances"] = df["reflectances"].replace(to_replace=0.,
value=np.nan)
# remove nan
df.dropna(inplace=True)
# extract nr_samples samples from data
if nr_samples is not None:
df = df.sample(nr_samples)
# get reflectance and oxygenation
X = df.reflectances
if bands_to_sortout is not None and bands_to_sortout.size > 0:
X.drop(X.columns[bands_to_sortout], axis=1, inplace=True)
snr = np.delete(snr, bands_to_sortout)
X = X.values
y = df.layer0[["sao2", "vhb"]]
# do data magnification
if magnification is not None:
X_temp = X
y_temp = y
for i in range(magnification - 1):
X = np.vstack((X, X_temp))
y = pd.concat([y, y_temp])
# add noise to reflectances
camera_noise = 0.
if snr is not None:
sigmas = X / snr
noises = np.random.normal(loc=0., scale=1, size=X.shape)
camera_noise = sigmas*noises
movement_noise = 0.
if movement_noise_sigma is not None:
nr_bands = X.shape[1]
nr_samples = X.shape[0]
# we assume no correlation between neighboring bands
CORRELATION_COEFFICIENT = 0.0
movement_variance = movement_noise_sigma ** 2
movement_variances = np.ones(nr_bands) * movement_variance
movement_covariances = np.ones(nr_bands-1) * CORRELATION_COEFFICIENT * \
movement_variance
movement_covariance_matrix = np.diag(movement_variances) + \
np.diag(movement_covariances, -1) + \
np.diag(movement_covariances, 1)
# percentual sample errors
sample_errors_p = np.random.multivariate_normal(mean=np.zeros(nr_bands),
cov=movement_covariance_matrix,
size=nr_samples)
# errors w.r.t. the curve height.
movement_noise = X * sample_errors_p
X += camera_noise + movement_noise
X = np.clip(X, 0.00001, 1.)
# do normalizations
X = normalize(X)
return X, y
def preprocess(batch, nr_samples=None, snr=None, movement_noise_sigma=None,
magnification=None, bands_to_sortout=None):
X, y = preprocess2(batch, nr_samples, snr, movement_noise_sigma,
magnification, bands_to_sortout)
return X, y["sao2"]
def normalize(X):
# normalize reflectances
normalizer = Normalizer(norm='l1')
X = normalizer.transform(X)
# reflectances to absorption
absorptions = -np.log(X)
X = absorptions
# get rid of sorted out bands
normalizer = Normalizer(norm='l2')
X = normalizer.transform(X)
return X
|
[
"numpy.log",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"numpy.random.normal",
"numpy.diag",
"sklearn.preprocessing.Normalizer",
"pandas.concat",
"numpy.delete",
"numpy.vstack"
] |
[((2277, 2299), 'numpy.clip', 'np.clip', (['X', '(1e-05)', '(1.0)'], {}), '(X, 1e-05, 1.0)\n', (2284, 2299), True, 'import numpy as np\n'), ((2715, 2736), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (2725, 2736), False, 'from sklearn.preprocessing import Normalizer\n'), ((2902, 2923), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (2912, 2923), False, 'from sklearn.preprocessing import Normalizer\n'), ((808, 840), 'numpy.delete', 'np.delete', (['snr', 'bands_to_sortout'], {}), '(snr, bands_to_sortout)\n', (817, 840), True, 'import numpy as np\n'), ((1236, 1284), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1)', 'size': 'X.shape'}), '(loc=0.0, scale=1, size=X.shape)\n', (1252, 1284), True, 'import numpy as np\n'), ((2821, 2830), 'numpy.log', 'np.log', (['X'], {}), '(X)\n', (2827, 2830), True, 'import numpy as np\n'), ((1053, 1075), 'numpy.vstack', 'np.vstack', (['(X, X_temp)'], {}), '((X, X_temp))\n', (1062, 1075), True, 'import numpy as np\n'), ((1092, 1114), 'pandas.concat', 'pd.concat', (['[y, y_temp]'], {}), '([y, y_temp])\n', (1101, 1114), True, 'import pandas as pd\n'), ((1631, 1648), 'numpy.ones', 'np.ones', (['nr_bands'], {}), '(nr_bands)\n', (1638, 1648), True, 'import numpy as np\n'), ((1911, 1943), 'numpy.diag', 'np.diag', (['movement_covariances', '(1)'], {}), '(movement_covariances, 1)\n', (1918, 1943), True, 'import numpy as np\n'), ((1700, 1721), 'numpy.ones', 'np.ones', (['(nr_bands - 1)'], {}), '(nr_bands - 1)\n', (1707, 1721), True, 'import numpy as np\n'), ((1817, 1844), 'numpy.diag', 'np.diag', (['movement_variances'], {}), '(movement_variances)\n', (1824, 1844), True, 'import numpy as np\n'), ((1861, 1894), 'numpy.diag', 'np.diag', (['movement_covariances', '(-1)'], {}), '(movement_covariances, -1)\n', (1868, 1894), True, 'import numpy as np\n'), ((2040, 2058), 'numpy.zeros', 'np.zeros', (['nr_bands'], {}), '(nr_bands)\n', (2048, 2058), True, 'import numpy as np\n')]
|
'''
Functions to go in here (I think!?):
KC: 01/12/2018, ideas-
KC: 19/12/2018, added-
~NuSTAR class
'''
from . import data_handling
import sys
#from os.path import *
import os
from os.path import isfile
import astropy
from astropy.io import fits
import astropy.units as u
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from pylab import figure, cm
from astropy.coordinates import SkyCoord
import numpy as np
import nustar_pysolar as nustar
from . import filter_with_tmrng ######Kris
from . import custom_map ######Kris
import sunpy.map
from scipy import ndimage
from scipy.optimize import curve_fit
from scipy.ndimage import rotate
import re #for regular expressions
import warnings #suppress astropy warnings
import datetime
from datetime import timedelta
from astropy.io.fits.verify import VerifyWarning
import matplotlib.dates as mdates
import pickle
import subprocess
import pytz
from skimage import restoration
# from . import interp
from scipy import interpolate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters() # was told to do this by the machine
'''
Alterations:
KC: 22/01/2019 - .
'''
#NuSTAR class for Python
class NustarDo:
np.seterr(divide='ignore', invalid='ignore') #ignore warnings resulting from missing header info
warnings.simplefilter('ignore', VerifyWarning)
warnings.simplefilter('ignore', RuntimeWarning)
warnings.simplefilter('ignore', UserWarning)
def __init__(self, evt_filename='', energy_range=[2.5,79], time_range = None): #set-up parameters
#if a filename is not given then the static functions can still be used
if evt_filename == '':
return
#directory of the file
directory_regex = re.compile(r'\w+/')
directory = directory_regex.findall(evt_filename)
self.evt_directory = '/'+''.join(directory)
#search of the form of stuff (no slashes included), dot, then more stuff
evt_filename_regex = re.compile(r'\w+\.\w+')
name_of_file = evt_filename_regex.findall(evt_filename)[0]
#for a sunpy map object to be made then the file has to be positioned on the Sun
sunpos_regex = re.compile(r'sunpos')
sunpos = sunpos_regex.findall(name_of_file)
if sunpos == []:
raise ValueError('\nThe file must be a \'sunpos\' file, i.e. the observation is converted to appropriate solar coordinates.')
#search for 2 digits, a non-digit, then 2 digits again
fpm_regex = re.compile(r'\d{2}\D\d{2}')
focal_plane_module = fpm_regex.findall(name_of_file)[0][2]
#search for chu followed by however many consecutive digits
chu_regex = re.compile(r'chu\d+')
chu = chu_regex.findall(name_of_file)
if chu != []:
chu_state = chu[0]
else:
chu_state = 'not_split'
# search for a underscore, a non-digit, and an underscore (for the mode the pipeline was run if a chu file is given)
mode_regex = re.compile(r"_\D_")
mode = mode_regex.findall(name_of_file)
self.pipeline_mode = mode[0] if len(mode)>0 else ""
#search for all seperate sub-strings composed of digits, first one in evt_filename is observation id
obs_id_regex = re.compile(r'\d+')
obs_id = obs_id_regex.findall(name_of_file)[0]
self.obs_id = obs_id
#set attributes of the file and parameters used in other functions on the class
self.evt_filename = name_of_file
self.fpm = focal_plane_module
self.time_range = time_range
self.energy_range = energy_range
self.chu_state = chu_state
self.rectangles = None #set so that you don't have to plot a map to get a light curve
# for plot title
self.e_range_str = str(self.energy_range[0])+'-'+str(self.energy_range[1]) if self.energy_range[1]<79 else ">"+str(self.energy_range[0])
self.rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00") # nustar times are measured in seconds from this date
#extract the data within the provided parameters
hdulist = fits.open(evt_filename) #not self.evt_filename as fits.open needs to know the full path to the file
self.evt_data = hdulist[1].data
self.evt_header = hdulist[1].header
hdulist.close()
############*********** this is a hacky fix but will do for now ***********############
# if Python code is used for the sunpos file creation the re-written header keywords aren't saved properly, so...
if (round(self.evt_header['TCDLT13'], 1)!=2.5) or (round(self.evt_header['TCDLT14'], 1)==2.5):
self.evt_header['TCDLT13'] = 2.45810736 # x
self.evt_header['TCDLT14'] = 2.45810736 # y
#check evt_filename matches evt_header info
assert obs_id == self.evt_header['OBS_ID'], 'Observation ID in the .evt filename does not match ID in the .evt header info. {} =/= {}'.format(obs_id, self.evt_header['OBS_ID'])
assert focal_plane_module == self.evt_header['INSTRUME'][-1], 'Focal Plane Module (FPM) in the .evt filename does not match FPM in the .evt header info. {} =/= {}'.format(focal_plane_module, self.evt_header['INSTRUME'][-1])
if self.time_range == None:
#filter away the non grade zero counts and bad pixels
self.cleanevt = filter_with_tmrng.event_filter(self.evt_data, fpm=focal_plane_module,
energy_low=self.energy_range[0],
energy_high=self.energy_range[1])
#start and end time of the NuSTAR observation as datetime objects
self.time_range = [(self.rel_t+ timedelta(seconds=np.min(self.cleanevt['TIME']))).strftime('%Y/%m/%d, %H:%M:%S'),
(self.rel_t + timedelta(seconds=np.max(self.cleanevt['TIME']))).strftime('%Y/%m/%d, %H:%M:%S')]
elif len(self.time_range) == 2:
try:
self.cleanevt = filter_with_tmrng.event_filter(self.evt_data, fpm=focal_plane_module,
energy_low=self.energy_range[0],
energy_high=self.energy_range[1],
tmrng=self.time_range) ######Kris
except TypeError as error:
raise TypeError('\nTimes need to be a string in the form \'%y/%m/%d, %H:%M:%S\', '
'e.g.\'2018/12/25, 12:30:52\'')
else:
raise TypeError('\nCheck that it is only a start time and end time you are giving.')
#if there are no counts in cleanevt
if len(self.cleanevt) == 0:
raise ValueError('\nThere there are no counts within these paramenters. '
'\nThis may be because no counts were recorded or that the paramenters are outwith the '
'scope of NuSTAR and/or the observation.')
# now for the time tick marks...
clevt_duration = np.max(self.cleanevt['TIME'])-np.min(self.cleanevt['TIME'])
if clevt_duration > 3600*0.5:
self.xlocator = mdates.MinuteLocator(byminute=[0, 10, 20, 30, 40, 50], interval = 1)
elif 600 < clevt_duration <= 3600*0.5:
self.xlocator = mdates.MinuteLocator(byminute=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55], interval = 1)
elif 240 < clevt_duration <= 600:
self.xlocator = mdates.MinuteLocator(interval = 2)
else:
self.xlocator = mdates.MinuteLocator(interval = 1)
@staticmethod
def shift(evt_data, pix_xshift=None, pix_yshift=None):
if pix_xshift != None:
for X in evt_data:
X['X'] = X['X'] + pix_xshift
if pix_yshift != None:
for Y in evt_data:
Y['Y'] = Y['Y'] + pix_yshift
return evt_data
@staticmethod
def arcsec_to_pixel(*args, **kwargs):
#NuSTAR values: ['crpix1'+0.5,'crpix2','cdelt1']
meta = {'centre_pix_val': [1499.5+0.5, 1500], 'arc_per_pix':[2.45810736], 'length':False}
#change list with kwargs
for key, kwarg in kwargs.items():
meta[key] = kwarg
#convert numbers so that they are easier to work with
indices_for_centre = {'x':meta['centre_pix_val'][0], 'y':meta['centre_pix_val'][1]}
assert 1 <= len(meta['arc_per_pix']) <= 2, '\'arc_per_pix\' needs to have one or two arguments only.'
if len(meta['arc_per_pix']) == 2:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][1]
elif len(meta['arc_per_pix']) == 1:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][0]
# if have an arcsec length and want the length in pixels
pixel_lengths = []
if meta['length'] == True:
for arg in args:
x_length = (arg[0] / delta_x)
y_length = (arg[1] / delta_y)
pixel_lengths.append([int(round(x_length,0)), int(round(y_length,0))])
return pixel_lengths
#input coordinates as [x,y] in arcseconds
pixel_coords = []
for arg in args:
x_index = indices_for_centre['x'] + (arg[0] / delta_x)
y_index = indices_for_centre['y'] + (arg[1] / delta_y)
pixel_coords.append([int(round(x_index,0)), int(round(y_index,0))])
return pixel_coords
@staticmethod
def pixel_to_arcsec(*args, **kwargs):
#NuSTAR values: ['crpix1'+0.5,'crpix2','cdelt1']
meta = {'centre_pix_val': [1499.5+0.5, 1500], 'arc_per_pix':[2.45810736], 'length':False}
#change list with kwargs
for key, kwarg in kwargs.items():
meta[key] = kwarg
#convert numbers so that they are easier to work with
indices_for_centre = {'x':meta['centre_pix_val'][0], 'y':meta['centre_pix_val'][1]}
assert 1 <= len(meta['arc_per_pix']) <= 2, '\'arc_per_pix\' needs to have one or two arguments only.'
if len(meta['arc_per_pix']) == 2:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][1]
elif len(meta['arc_per_pix']) == 1:
delta_x = meta['arc_per_pix'][0]
delta_y = meta['arc_per_pix'][0]
# if have a pixel length and want the length in arcsec
arcsec_lengths = []
if meta['length'] == True:
for arg in args:
x_length = arg[0] * delta_x
y_length = arg[1] * delta_y
arcsec_lengths.append([x_length, y_length])
return arcsec_lengths
#input coordinates as [col,row] in pixels
arcsec_coords = []
for arg in args:
# arg[0] is x pixel position, so column
x_arcsec = (arg[0] - indices_for_centre['x']) * delta_x
# arg[1] is y pixel position, so row
y_arcsec = (arg[1] - indices_for_centre['y']) * delta_y
arcsec_coords.append([x_arcsec, y_arcsec])
return arcsec_coords
def nustar_shift_map(self, x_shift_arc, y_shift_arc):
#find shift in pix
shift_pix = self.arcsec_to_pixel([x_shift_arc, y_shift_arc], length=True)
#shift data now
shift_cleanevt = self.shift(self.cleanevt, pix_xshift=shift_pix[0][0], pix_yshift=shift_pix[0][1])
self.cleanevt = shift_cleanevt
@staticmethod
def fov_rotation(evt_data):
""" Returns the average rotation of the NuSTAR FoV from the gradient of the edges between
det0&3 and 1&2.
Parameters
----------
*args : list [rawx0, rawy0, solx0, soly0, int]
Each input should contain the raw X and Y coordinates from the (sunpos) evt file and the
solar X and Y coordinates from the sunpos evt file as well as the detector these
coordinates come from as an integer from 0 to 3.
Returns
-------
A float of the average rotation from "North" in degrees where anticlockwise is positive.
This assumes the rotation is between 90 and -90 degrees.
Examples
--------
getMeanAngle([rawx0, rawy0, solx0, soly0, 0],
[rawx1, rawy1, solx1, soly1, 1],
[rawx2, rawy2, solx2, soly2, 2],
[rawx3, rawy3, solx3, soly3, 3])
>>> a number
"""
## split the detectors
d0_counts = evt_data[evt_data["det_id"]==0]
d1_counts = evt_data[evt_data["det_id"]==1]
d2_counts = evt_data[evt_data["det_id"]==2]
d3_counts = evt_data[evt_data["det_id"]==3]
## now split up for the coordinates
rawx0, rawy0, solx0, soly0 = d0_counts["RAWX"], d0_counts["RAWY"], d0_counts["X"], d0_counts["Y"]
rawx1, rawy1, solx1, soly1 = d1_counts["RAWX"], d1_counts["RAWY"], d1_counts["X"], d1_counts["Y"]
rawx2, rawy2, solx2, soly2 = d2_counts["RAWX"], d2_counts["RAWY"], d2_counts["X"], d2_counts["Y"]
rawx3, rawy3, solx3, soly3 = d3_counts["RAWX"], d3_counts["RAWY"], d3_counts["X"], d3_counts["Y"]
args = [[rawx0, rawy0, solx0, soly0, 0],
[rawx1, rawy1, solx1, soly1, 1],
[rawx2, rawy2, solx2, soly2, 2],
[rawx3, rawy3, solx3, soly3, 3]]
gradients = 0
for a in args:
rawx, rawy, solx, soly, det = a
# use the pixel edges between det 0&3 and 1&2, use the raw pixel coordinates for this
# orientation from the nustar_swguide.pdf, Figure 3
if det==0:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==1:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==2:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==3:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
# working with rawx and y to make sure using correct edge then find the
# corresponding entries in solar coords
aAndY = getXandY(m_row_per_col)
x, y = aAndY[0], aAndY[1]
## do I want to filter some out?
## leave for now
#if det in [0, 1]:
# x = x[y>np.median(y)]
# y = y[y>np.median(y)]
#elif det in [2, 3]:
# x = x[y<np.median(y)]
# y = y[y<np.median(y)]
# fit a straight line to the edge
popt, pcov = curve_fit(straightLine, x, y, p0=[0, np.mean(y)])
gradients += getDegrees(popt[0])
return gradients/len(args)
def nustar_deconv(self, map_array=None, psf_array=None, it=10, OA2source_offset=None, hor2SourceAngle=None, clip=False):
"""Class mathod to take a map (map_array) and a point spread function (psf_array) and deconvolve using
the Richardson-Lucy method with a number of iterations (it).
Parameters
----------
map_array : 2d array
The map of the data. Should be over the field of view. If "None" then the self.nustar_map class
attribute is used.
Default: None
psf_array : file string or 2d array
The PSF you want to use. This can be a string of the fits file for the PSF or a 2d numpy array.
If "None" then several common paths for nu'+self.fpm+'2dpsfen1_20100101v001.fits' are check and
if the file cannot be found the original map is returned. Currently this won't be rescaled if
it is a different resolution to the map data, it will just crash instead.
Default: None
it : int
Number of iterations for the deconvolution.
Default: 10
OA2source_offset : float
Angle subtended between the optical axis (OA), observer, and the X-ray source in arcminutes
(0<=OA2source_angle<8.5 arcminutes), i.e. radial distance to the source from the OA. Chooses
the correct PSF data to use.
Default: None
hor2SourceAngle : float
Angle subtended between horizontal through the optical axis (OA), and the line through the X-ray source and OA in degrees.
Clockwise is positive and anticlockwise is negative. Symmetric reflected in the origin so -90<=hor2SourceAngle<=90.
Default: None
clip : bool
Set values >1 and <-1 to 1 and -1 respectively after each iteration. Unless working with a
normalised image this should be "False" otherwise it's a mess.
Default: False
Returns
-------
A 2d numpy array of the deconvolved map.
Examples
--------
*Use within the class:
NU_SUNPOS_FILE, ITERATIONS = "nustar_filename", 10
nu = NustarDo(NU_SUNPOS_FILE)
nu.deconvolve['apply'] = True
nu.deconvolve['iterations'] = ITERATIONS
nu.nustar_setmap(submap='FoV')
deconv_map = nu.nustar_map.data
*Use without class:
STRING, FPM = "psf_filename", "A" or "B"
nu = NustarDo()
nu.fpm = FPM
nu.nustar_map = Sunpy NuSTAR map
deconv_map = nu.nustar_deconv(psf_array=STRING)
-or-
MAP, ARRAY, FPM = nustar data 2d numpy array, psf 2d numpy array, "A" or "B"
nu = NustarDo()
nu.fpm = FPM
deconv_map = nu.nustar_deconv(map_array=MAP, psf_array=ARRAY)
"""
## for defaults
if type(map_array) == type(None):
map_array = self.nustar_map.data
if type(psf_array) == type(None):
# defualt is to check for the nu'+self.fpm+'2dpsfen1_20100101v001.fits' PSF file (the one used in Glesener code)
trials = ['/opt/caldb/data/nustar/fpm/bcf/psf/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/usr/local/caldb/data/nustar/fpm/bcf/psf/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/home/kris/Desktop/link_to_kris_ganymede/old_scratch_kris/data_and_coding_folder/nustar_psfs/nu'+self.fpm+'2dpsfen1_20100101v001.fits',
'/home/kris/Desktop/nustar_psfs/nu'+self.fpm+'2dpsfen1_20100101v001.fits']
if type(OA2source_offset) != type(None):
psf_OA_angles = np.arange(0,9,0.5) # angles of 0 to 8.5 arcmin in 0.5 arcmin increments
index = np.argmin([abs(psfoaangles - OA2source_offset) for psfoaangles in psf_OA_angles]) # find the closest arcmin array
hdr_unit = index+1 # header units 1 to 18 (one for each of the arcmin entries) and 0 arcmin would be hdr_unit=1, hence the +1
# print("using angle: ", hdr_unit)
else:
hdr_unit = 1
#assume we can't find the file
found_psf = False
for t in trials:
# try the files, if one exists use it
if os.path.exists(t):
psfhdu = fits.open(t)
psf_h = psfhdu[hdr_unit].header['CDELT1'] # increment in degrees/pix
psf_array = psfhdu[hdr_unit].data
psfhdu.close()
psf_used = t
found_psf = True
# if we still couldn't find a defualt PSF then print this, set self.deconvole to False, and just return the original map
if found_psf == False:
print('Could not find PSF file. Please provide the PSF filename or array.')
print('Returning original map.')
self.deconvolve['apply'] = False
self.deconv_settings_info = {'map':None, 'psf_file':None, 'psf_array':None, 'iterations':None}
return map_array
# check same res, at least in 1-D
assert psf_h*3600 == self.nustar_map.meta['CDELT1'], "The resolution in the PSF and the current map are different."
# if you have provided your own psf file use that instead
elif type(psf_array) == str:
psf_used = psf_array
psfhdu = fits.open(psf_array)
psf_h = psfhdu[1].header['CDELT1'] # increment in degrees/pix
psf_array = psfhdu[1].data
psfhdu.close()
# check same res, at least in 1-D
assert psf_h*3600 == self.nustar_map.meta['CDELT1'], "The resolution in the PSF and the current map are different."
else:
psf_used = 'Custom Array. Hopefully some numbers though.'
if type(hor2SourceAngle)!=type(None):
assert -90<=hor2SourceAngle<=90, "Please give \"hor2SourceAngle\" as an angle from horzontal to the source -90<=hor2SourceAngle<=90 where clockwise is positive and anticlockwise is negative"
psf_array = rotate(psf_array, hor2SourceAngle, reshape=True)
# deconvolve
deconvolved_RL = restoration.richardson_lucy(map_array, psf_array, iterations=it, clip=False)
# deconvolution info for later use
self.deconv_settings_info = {'map':map_array, 'psf_file':psf_used, 'psf_array':psf_array, 'iterations':it}
return deconvolved_RL
@staticmethod
def find_boxOfData(array):
'''If there is an array with loads of 0s or nans and a region of numbers then this returns the rows
and columns the block of numbers is encased between'''
array = np.array(array)
array[np.isnan(array)] = 0
# first and last row
dataRows = []
for i,row in enumerate(array):
rSum = np.sum(row)
if rSum > 0:
dataRows.append(i)
between_rows = [dataRows[0], dataRows[-1]]
# first and last column
dataCols = []
for j,col in enumerate(array.T):
cSum = np.sum(col)
if cSum > 0:
dataCols.append(j)
between_cols = [dataCols[0], dataCols[-1]]
return {'rowIndices':between_rows, 'columnIndices':between_cols}
@staticmethod
def create_submap(sunpy_map_obj, lose_off_limb, submap):
if (lose_off_limb == True) and (len(submap) == 0):
#fix really large plot, instead of going from -3600 to 3600 in x and y
bl = SkyCoord(-1200*u.arcsec, -1200*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
tr = SkyCoord(1200*u.arcsec, 1200*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
return sunpy_map_obj.submap(bl,top_right=tr)
elif len(submap) == 4: #Submap to plot?
bottom_left = {'x':submap[0], 'y':submap[1]}
top_right = {'x':submap[2], 'y':submap[3]}
bl = SkyCoord(bottom_left['x']*u.arcsec, bottom_left['y']*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
tr = SkyCoord(top_right['x']*u.arcsec, top_right['y']*u.arcsec, frame=sunpy_map_obj.coordinate_frame)
return sunpy_map_obj.submap(bl,top_right=tr)
if (lose_off_limb == False):
return sunpy_map_obj
else:
raise TypeError('\nCheck the submap coordinates that were given please. It should be a list with four '
'float/int entries in arcseconds in the form [bottom left x, bottom left y, top right x, '
'top right y].')
if (self.deconvolve['apply'] == True) and (self.gaussian_filter['apply'] == True):
print('Caution! Did you mean to set deconvolve AND gaussian blurr to True? If so, then the'
'deconvolution will happen first then the Gaussian filter is applied.')
# might be best to only allow one of these at a time, either deconvolve OR gaussian filter
deconvolve = {'apply':False, 'iterations':10, 'OA2source_offset':None, 'hor2SourceAngle':None, 'clip':False} # set before nustar_setmap to run deconvolution on map
gaussian_filter = {'apply':False, 'sigma':2, 'mode':'nearest'}
sub_lt_zero = np.nan # replace less than zeroes with this value for plotting in a linear scale
own_map = None # if you already have a map that you want a submap of then set this, be careful not to time normalize again though
def nustar_setmap(self, time_norm=True, lose_off_limb=True, limits=None,
submap=None, rebin_factor=1, norm='linear', house_keeping_file=None):
# adapted from Iain's python code
# Map the filtered evt, into one corrected for livetime (so units count/s)
if type(self.own_map) == type(None):
self.nustar_map = custom_map.make_sunpy(self.cleanevt, self.evt_header, norm_map=False)
else:
self.nustar_map = self.own_map
if time_norm == True:
time_norm = input('Caution! Do you mean to time normalize your \'own_map\'? True or False: ')
# field of view in arcseconds
FoVlimits = self.find_boxOfData(self.nustar_map.data)
bottom_left = self.pixel_to_arcsec([FoVlimits['columnIndices'][0], FoVlimits['rowIndices'][0]])[0]
top_right = self.pixel_to_arcsec([FoVlimits['columnIndices'][1]+1, FoVlimits['rowIndices'][1]+1])[0] # plus one as index stops one short
self.FoV = [*bottom_left, *top_right]
if limits == None:
limits = []
if submap == None:
submap = []
elif type(submap) == str:
if submap.upper() == 'FOV':
submap = self.FoV
else:
print('The only string input to submap that is supported at the moment is FOV, fov, FoV, etc.')
self.submap = submap
self.time_norm = time_norm
if self.time_norm == True:
self.livetime(hk_filename=house_keeping_file, set_up_plot=False, show_fig=False)
#livetime correction
time_range = [(data_handling.getTimeFromFormat(tm) - self.rel_t).total_seconds() for tm in self.time_range]
indices = ((self.hk_times>=time_range[0]) & (self.hk_times<time_range[1]))
ltimes_in_range = self.hk_livetimes[indices]
livetime = np.average(ltimes_in_range)
lc_cor_nustar_map = self.nustar_map.data / (livetime * (time_range[1] - time_range[0]))
self.nustar_map = sunpy.map.Map(lc_cor_nustar_map, self.nustar_map.meta)
if (self.deconvolve['apply'] == False):
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.submap)
elif (self.deconvolve['apply'] == True):
# make sure it's over the FoV
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.FoV)
dconv = self.nustar_deconv(it=self.deconvolve['iterations'], OA2source_offset=self.deconvolve['OA2source_offset'],
hor2SourceAngle=self.deconvolve['hor2SourceAngle'], clip=self.deconvolve['clip'])
# make new map
self.nustar_map = sunpy.map.Map(dconv, self.nustar_map.meta)
# now cut to the shape you want
self.nustar_map = self.create_submap(self.nustar_map, lose_off_limb, self.submap)
if self.gaussian_filter['apply'] == True:
gaussian_width = self.gaussian_filter['sigma']
m = self.gaussian_filter['mode']
#Apply a guassian blur to the data to bring out the faint feature
dd = ndimage.gaussian_filter(self.nustar_map.data, gaussian_width, mode=m)
if limits == []:
dmin = np.min(dd[np.nonzero(self.nustar_map.data)])#*1e6 factor was here as the lowest value will come (came from dd) from the gaussian
#filter and not the actual lowest count rate hence the factor
dmax = np.max(dd[np.isfinite(self.nustar_map.data)])
elif len(limits) == 2:
if norm == 'lognorm':
if limits[0] <= 0:
dmin = 0.1
dmax=limits[1]
else:
dmin=limits[0]
dmax=limits[1]
elif norm == 'linear':
dmin=limits[0]
dmax=limits[1]
else:
raise TypeError('\nCheck the limits that were given please.')
else:
dd = self.nustar_map.data
if limits == []:
finite_vals = dd[np.isfinite(dd)]
dmin = np.min(finite_vals[np.nonzero(finite_vals)])
dmax = np.max(finite_vals)
elif len(limits) == 2:
if norm == 'lognorm':
if limits[0] <= 0:
dmin = 0.1
dmax=limits[1]
else:
dmin=limits[0]
dmax=limits[1]
elif norm == 'linear':
dmin=limits[0]
dmax=limits[1]
else:
raise TypeError('\nCheck the limits that were given please. It should be a list with two float/int '
'entries')
self.dmin = dmin # make it possible to get min and max normalisation values of the NuSTAR map
self.dmax = dmax
# Tidy up before plotting
dd[dd < dmin]=0
nm = sunpy.map.Map(dd, self.nustar_map.meta)
if rebin_factor != 1:
#can rebin the pixels if we want to further bring out faint features
#set to 1 means no actual rebinning
nx,ny = np.shape(nm.data)
if rebin_factor >= 1/nx and rebin_factor >= 1/ny:
dimensions = u.Quantity([nx*rebin_factor, ny*rebin_factor], u.pixel)
rsn_map = nm.resample(dimensions)
else:
raise TypeError(f'\nRebin factor must be greater than one over the x,y dimensions (1/{nx} and '
f'1/{ny}) as to rebin to get one, or more, pixel(s) fro the entire image, i.e. can\'t rebin to half a pixel.')
elif rebin_factor == 1:
rsn_map = nm
del nm
if norm == 'linear':
#change all zeros to NaNs so they appear white in the plot otherwise zeros appear as the lowest colour
#on the colourbar
rsn_map_data = rsn_map.data
rsn_map_data[rsn_map_data <= 0] = self.sub_lt_zero
rsn_map = sunpy.map.Map(rsn_map_data, rsn_map.meta)
# Setup the scaling of the map and colour table
rsn_map.plot_settings['norm'] = colors.Normalize(vmin=dmin,vmax=dmax)
rsn_map.plot_settings['cmap'] = cm.get_cmap('Spectral_r')
elif norm == 'lognorm':
#log(0) produces a NaN (-inf) here anyway so appears white
# Setup the scaling of the map and colour table
rsn_map.plot_settings['norm'] = colors.LogNorm(vmin=dmin,vmax=dmax)
rsn_map.plot_settings['cmap'] = cm.get_cmap('Spectral_r')
self.rsn_map = rsn_map
return rsn_map
annotations = {'apply':False, 'text':'Some text', 'position':(0,0), 'color':'black', 'fontsize':12, 'weight':'normal'}
rcParams_default_setup = True
cbar_title = 'Counts'
ax_label_size = 18
@staticmethod
def draw_solar_grid(rsnmap, axes):
rsnmap.draw_limb(color='black',linewidth=1,linestyle='dashed', zorder=0)
# Manually plot a heliographic overlay - hopefully future no_ticks option in draw_grid
overlay = axes.get_coords_overlay('heliographic_stonyhurst')
lon = overlay[0]
lat = overlay[1]
lon.set_ticks_visible(False)
lat.set_ticks_visible(False)
lat.set_ticklabel_visible(False)
lon.set_ticklabel_visible(False)
lon.coord_wrap = 180
lon.set_major_formatter('dd')
overlay.grid(color='grey', linewidth=0.5, linestyle='dashed', zorder=0)
plt_plot_lines = None
@staticmethod
def execute_plt(*arg):
"""
# Example
file = 'file_sunpos.evt'
nu = nustardo.NustarDo(file)
plt.figure(figsize=(10,10))
nu.nustar_setmap(submap="fov")
x,y = [0, 200], [0, 200]
nu.plt_plot_lines = [f'plt.plot({x},{y}, marker="o", ms=10, c="r")']
nu.nustar_plot(show_fig=False)
plt.show()
"""
for a in arg:
exec(a)
def nustar_plot(self, boxes=None, show_fig=True, save_fig=None, usr_title=None, draw_grid=True):
# adapted from Iain's python code
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,8)
plt.rcParams['font.size'] = 18
plt.rcParams['axes.facecolor']='white'
plt.rcParams['savefig.facecolor']='white'
# Start the plot - many things here just to make matplotlib look decent
self.rectangles = boxes
#fig = plt.figure(figsize=(9, 8), frameon=False)
ax = plt.subplot(projection=self.rsn_map, frame_on=False) #rsn_map nustar_submap
self.axes = ax
ax.set_facecolor((1.0, 1.0, 1.0))
self.rsn_map.plot()
# can't plot properly if the grid is drawn first so this allows plt.plot lines to be passed an executed before the grid in drawn
if type(self.plt_plot_lines)!=type(None):
self.execute_plt(*self.plt_plot_lines)
if self.annotations['apply'] == True:
plt.annotate(self.annotations['text'], self.annotations['position'], color=self.annotations['color'], fontsize=self.annotations['fontsize'], weight=self.annotations['weight'])
if draw_grid:
self.draw_solar_grid(self.rsn_map, ax)
# Tweak the titles and labels
title_obsdate = self.rsn_map.date.strftime('%Y-%b-%dT%H:%M:%S.%f')[:-13] #'{:.20}'.format('{:%Y-%b-%d}'.format(self.rsn_map.date))
fpm = 'FPM'+self.fpm
title_obstime_start = self.time_range[0][-8:]
title_obstime_end = self.time_range[1][-8:]
if type(usr_title) == type(None):
if self.chu_state == 'not_split':
ax.set_title('NuSTAR '+self.e_range_str+' keV '+fpm+' '+ title_obsdate+' '+title_obstime_start+' to '+title_obstime_end)
else:
ax.set_title('NuSTAR '+self.e_range_str+' keV '+fpm+' '+self.chu_state+' '+ title_obsdate+' '+title_obstime_start+' to '+title_obstime_end)
else:
ax.set_title(usr_title)
ax.set_ylabel('y [arcsec]', fontsize=self.ax_label_size)
ax.set_xlabel('x [arcsec]', fontsize=self.ax_label_size)
tx, ty = ax.coords
tx.set_major_formatter('s')
ty.set_major_formatter('s')
ax.grid(False)
# Add a colour bar
if self.time_norm == True:
plt.colorbar(fraction=0.035, pad=0.03,label=self.cbar_title+' $s^{-1}$')
else:
plt.colorbar(fraction=0.035, pad=0.03,label=self.cbar_title)
if boxes is not None:
if np.shape(boxes)==(4,):
rect = boxes
bottom_left_rectangle = SkyCoord(rect[0]*u.arcsec, rect[1]*u.arcsec, frame=self.rsn_map.coordinate_frame)
length = rect[2] - rect[0]
height = rect[3] - rect[1]
self.rsn_map.draw_rectangle(bottom_left_rectangle, width=length*u.arcsec, height=height*u.arcsec, color='black')
else:
b = 1
for rect in boxes:
bottom_left_rectangle = SkyCoord(rect[0]*u.arcsec, rect[1]*u.arcsec, frame=self.rsn_map.coordinate_frame)
length = rect[2] - rect[0]
height = rect[3] - rect[1]
self.rsn_map.draw_rectangle(bottom_left_rectangle, width=length*u.arcsec, height=height*u.arcsec, color='black')
for_text = self.arcsec_to_pixel([rect[0]-10,rect[3]+20], centre_pix_val= [self.rsn_map.meta['crpix1']+0.5, self.rsn_map.meta['crpix2']])
plt.text(for_text[0][0], for_text[0][1], 'Box '+str(b), fontsize=10)
b += 1
if save_fig != None:
plt.savefig(save_fig, dpi=300, bbox_inches='tight')
if show_fig == True:
plt.show('all')
def nustar_peek(self):
#just to view the map with all default settings
self.nustar_setmap()
self.nustar_plot()
@staticmethod
def stepped_lc_from_hist(x, y, inc_edges=True):
"""Takes an x and y input, duplicates the x values and y values with the offset as to produce a new x and y which
will produce a stepped graph once all the scatter points are plotted.
Parameters
----------
x : 1-d list/array
This is the original set of x values or, in the case for a histogram, the bin edges.
y : 1-d list/array
This is the original set of y values.
inc_edges : bool
This determines whether the ends should go from their value to zero (True) or stop where they are (False).
Default: True
Returns
-------
New x and y values that, when plotted, will produce a stepped graph. Can be used to represent binning along the x
axis.
"""
if len(x) == len(y)+1: #since histogram gives one more as they are the boundaries of the bins
old_x = x
x = x[:-1]
elif len(x) == len(y):
x = x #not necessary, but more readable just now
else:
raise ValueError('Either the x-axis array is the edge of the bins (len(x) == len(y)+1) or the x-axis is the '
'value for the beginning of each bin (len(x) == len(y)), you haven\'t satisfied either of '
'these.')
new_x = np.array(np.zeros(2*len(x)))
new_y = np.array(np.zeros(2*len(y)))
for i in range(len(x)): #x and y should be the same length to plot anyway
if i == 0: #start with the 1st and 2nd x value having the same y.
new_x[i] = x[i]
new_y[2*i], new_y[2*i+1] = y[i], y[i]
elif i == len(x)-1: #the last new_x should be one beyond the last x as this value for the start of its bin
if len(x) == len(y)+1:
new_x[2*i-1], new_x[2*i], new_x[2*i+1] = x[i], x[i], old_x[-1]
elif len(x) == len(y):
new_x[2*i-1], new_x[2*i], new_x[2*i+1] = x[i], x[i], x[i]+(x[i]-x[i-1])
new_y[2*i] , new_y[2*i+1] = y[i], y[i]
break
else: #else keep the pattern going that two adjacent x's should share a y
new_x[2*i-1], new_x[2*i] = x[i], x[i]
new_y[2*i], new_y[2*i+1] = y[i], y[i]
if inc_edges == True: #create first and last coordinates to have a new_y of zero
new_x = np.insert(new_x, 0, [new_x[0]])
new_x = np.append(new_x,[new_x[-1]])
new_y = np.insert(new_y, 0, [0])
new_y = np.append(new_y,[0])
return new_x, new_y
@staticmethod
def dt_to_md(dt_array):
if type(dt_array) != list:
dt_array = [dt_array]
new_array = np.zeros(len(dt_array))
for c, d in enumerate(dt_array):
plt_date = mdates.date2num(d)
new_array[c] = plt_date
return new_array
@staticmethod
def spatial_filter(evt_data, sub_region_in_pixels):
x = evt_data['X']
y = evt_data['Y']
#find indices within the x and y pixel range
indices = (sub_region_in_pixels[0][0] < x)&(x<= sub_region_in_pixels[1][0]) & \
(sub_region_in_pixels[0][1] < y)&(y <= sub_region_in_pixels[1][1])
evt_data = evt_data[:len(indices)][indices] # [:len(indices)] is a quick fix, doesn't work otherwise if cleanevt is loaded from pickle
return evt_data
@staticmethod
def time_filter(evtdata, tmrng=None):
''' ***** From filter function ***** >4x quicker to just filter with time than with full filter ***** '''
if tmrng is None:
tmrng = [evtdata['TIME'][0], evtdata['TIME'][-1]]
elif tmrng is not None:
tstart = data_handling.getTimeFromFormat(tmrng[0]) #date must be in this format 'yyyy/mm/dd, HH:MM:SS'
tend = data_handling.getTimeFromFormat(tmrng[1])
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00") #the date NuSTAR times are defined from
tstart_s = (tstart - rel_t).total_seconds() #both dates are converted to number of seconds from 2010-Jan-1
tend_s = (tend - rel_t).total_seconds()
tmrng = [tstart_s, tend_s]
time_filter = ( (evtdata['TIME']>tmrng[0]) & (evtdata['TIME']<tmrng[1]) )
inds = (time_filter).nonzero()
goodinds=inds[0]
return evtdata[goodinds]
@staticmethod
def nustar_file_finder(start_directory='', obs_id='', descriptor='', fpm='', ext=''):
full_filename = None
file_directory = None
file_name = None
#expression for everything that ends in a slash
search_directory_regex = re.compile(r'\w+/')
#find all the folders in the evt directory (they end with a slash)
search_directory = search_directory_regex.findall(start_directory)
# search the folder the evt file is in first
sd = '/'+''.join(search_directory)
for in_dir in os.listdir(sd):
if in_dir == 'nu' + obs_id + fpm + descriptor + ext:
full_filename = os.path.join(sd, in_dir)
file_directory = sd
file_name = in_dir
return full_filename, file_directory, file_name
#don't includce the last folder to go back a directory
search_directory = '/'+''.join(search_directory[:-1]) #go back a directory to search for the house keeping file
for _dirpath, _dirnames, _filenames in os.walk(search_directory):
for _file in _filenames:
if _file == 'nu' + obs_id + fpm + descriptor + ext:
full_filename = os.path.join(_dirpath, _file)
file_directory = _dirpath
file_name = _file
return full_filename, file_directory, file_name
return full_filename, file_directory, file_name
def livetime(self, hk_filename=None, set_up_plot=True, show_fig=True):
#file = '/Users/kris/Documents/PhD/data/nustar/nu80414201001A_fpm.hk'
'''
This has to be moved above the time profile function so it is defined to be called
'''
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams['font.size'] = 18
if hk_filename == None:
hk_filename, self.hk_directory, self.hk_filename = self.nustar_file_finder(start_directory=self.evt_directory, obs_id=self.obs_id, descriptor='_fpm', fpm=self.fpm, ext='.hk')
if hk_filename == None: #if there is still no hk_filename then there won't be one used
print('Unable to find appropriate .hk file.')
self.hk_times = 0
self.hk_livetimes = [] # so the this length is 0
return #stops the function here but doesn't stop the code, this is the same as 'return None'
name_of_hk_file_regex = re.compile(r'\w+\.\w+')
name_of_hk_file = name_of_hk_file_regex.findall(hk_filename)[0]
hk_obs_id_regex = re.compile(r'\d+')
hk_obs_id = hk_obs_id_regex.findall(name_of_hk_file)[0]
hk_fpm_regex = re.compile(r'[A-Z]')
hk_fpm = hk_fpm_regex.findall(name_of_hk_file)[0]
#check .evt file and .hk file match
assert self.obs_id == hk_obs_id, 'The observation id from the .evt file and the .hk are different, i.e. {} =/= {}'.format(self.obs_id, hk_obs_id)
assert self.fpm == hk_fpm, 'The FPM from the .evt file and the .hk are different, i.e. {} =/= {}'.format(self.fpm, hk_fpm)
hdulist = fits.open(hk_filename)
self.hk_header = hdulist[1].header
self.hk_data = hdulist[1].data
hdulist.close()
#check .hk filename matches its header info
assert self.hk_header['OBS_ID'] == hk_obs_id, 'The observation id from the .hk file header and the .hk filename are different, i.e. {} =/= {}'.format(self.hk_header['OBS_ID'], hk_obs_id)
assert self.hk_header['INSTRUME'][-1] == hk_fpm, 'The FPM from the .hk header and the .hk filename are different, i.e. {} =/= {}'.format(self.hk_header['INSTRUME'][-1], hk_fpm)
self.hk_times = self.hk_data['time']
self.lvt_times = [(self.rel_t + timedelta(seconds=t)) for t in self.hk_times]
self.hk_livetimes = self.hk_data['livetime']
if set_up_plot:
hktime = self.hk_times - self.hk_times[0]
dt_times = self.lvt_times
lt_start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(self.hk_times))).strftime('%Y/%m/%d, %H:%M:%S'))
fig = plt.figure()
ax = plt.axes()
plt.semilogy(self.dt_to_md(dt_times), self.hk_livetimes, drawstyle='steps-mid')
plt.title('Livetime - '+lt_start_hhmmss[:10]) #get the date in the title
plt.xlabel('Start Time - '+lt_start_hhmmss[12:])
plt.ylabel('Livetime Fraction')
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])#[dt_times[0], dt_times[-1]])
plt.ylim([0,1])
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator) # xlocator was plt.LinearLocator(9)
plt.xticks(rotation=30)
if show_fig == True:
plt.show()
t_bin = {'seconds_per_bin':10, 'method':'approx'}
def light_curve(self, cleanevt=None, hdr=None, sub_reg=None, tstart=None, tend=None,
count_rate=True, house_keeping_file=None, show_fig=True):
if self.rcParams_default_setup:
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams['font.size'] = 18
if cleanevt == None:
cleanevt = self.cleanevt
if hdr == None:
hdr = self.evt_header
if sub_reg == 'boxes':
sub_reg = self.rectangles
self.sub_reg_lc = sub_reg
single_lc = True # just start by assuming one light curve, don't worry, this only gets set to False if not
if tstart == None:
tstart = np.min(cleanevt['TIME'])
self.rel_tstart = tstart #already relative to 1/1/2010 and in seconds
else:
tstart = data_handling.getTimeFromFormat(tstart)
self.rel_tstart = (tstart - self.rel_t).total_seconds()
if tend == None:
tend = np.max(cleanevt['TIME'])
self.rel_tend = tend #already relative to 1/1/2010 and in seconds
else:
tend = data_handling.getTimeFromFormat(tend)
self.rel_tend = (tend - self.rel_t).total_seconds()
if count_rate == True:
self.livetime(hk_filename=house_keeping_file, set_up_plot=False, show_fig=False) #run to get times and livetimes
if len(self.hk_times) == 0:
decision = input('No livetimes present. Do you just want to see the counts vs. time instead: ')
if decision in ['Yes', 'yes', 'Y', 'y']:
count_rate = False
else:
print('Will not show plot.')
return
self.lc_livetimes = 0 # just to have it defined
if self.t_bin['method'] == 'approx':
if (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg == None): #data form of NuSTAR
t_bin_conversion = int((self.rel_tend - self.rel_tstart) // self.t_bin['seconds_per_bin']) #get approximately t_bin seconds per bin as start and end of
#data are fixed when the histogram is created
assert t_bin_conversion >= 1, 'Number of bins cannot be <1. Decrease \'t_bin\' value to get more bins.'
counts = np.histogram(cleanevt['TIME'], t_bin_conversion) #gives out bin values and bin edges
self.lc_counts = counts[0]
times = counts[1][:-1]
self.t_bin_edges = counts[1]
start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%H:%M:%S'))
start_yyyymmdd = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%Y/%m/%d'))
elif (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg != None):
#this is to plot the light curve of a sub-region.
print('Inconvenient to approximate the time bins for the light curve of a sub_region.'
'\nChanging to \'exact\'.')
self.t_bin['method'] = 'exact'
else:
raise TypeError('\'astropy.io.fits.fitsrec.FITS_rec\' is the only supported data type at the moment.')
if self.t_bin['method'] == 'exact': #if since if the 'approx' flag is up and also submap!=None then time profile should be made here
t_bin_number = int((self.rel_tend - self.rel_tstart) // self.t_bin['seconds_per_bin']) #get whole number of bins that are t_bin seconds long and
#doesn't include any time at the end that only has data for some of the last range
assert t_bin_number >= 1, 'Number of bins cannot be <1. Decrease \'t_bin\' value to get more bins.'
edge = self.rel_tstart
self.t_bin_edges = np.zeros(t_bin_number+1) #+1 for the last edge
for t in range(len(self.t_bin_edges)):
self.t_bin_edges[t] = edge
edge += self.t_bin['seconds_per_bin']
times = self.t_bin_edges[:-1]
start_hhmmss = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%H:%M:%S'))
start_yyyymmdd = str((self.rel_t + timedelta(seconds=np.min(times))).strftime('%Y/%m/%d'))
if (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg == None): #data form of NuSTAR
counts = np.histogram(cleanevt['TIME'], self.t_bin_edges) #gives out bin values and bin edges
self.lc_counts = counts[0]
elif (type(cleanevt) == astropy.io.fits.fitsrec.FITS_rec) and (sub_reg != None):
if np.shape(sub_reg) == (4,):
counts = []
pixels = self.arcsec_to_pixel([sub_reg[0],sub_reg[1]], [sub_reg[2],sub_reg[3]])
spatial_evtdata = self.spatial_filter(self.cleanevt, pixels)
for t in range(len(self.t_bin_edges)-1):
# ts = (datetime.datetime(1970, 1, 1) + timedelta(seconds=(float(self.rel_t.strftime("%s"))+self.t_bin_edges[t]))).strftime('%Y/%m/%d, %H:%M:%S')
# te = (datetime.datetime(1970, 1, 1) + timedelta(seconds=(float(self.rel_t.strftime("%s"))+self.t_bin_edges[t+1]))).strftime('%Y/%m/%d, %H:%M:%S')
ts = (self.rel_t + timedelta(seconds=self.t_bin_edges[t])).strftime('%Y/%m/%d, %H:%M:%S')
te = (self.rel_t + timedelta(seconds=self.t_bin_edges[t+1])).strftime('%Y/%m/%d, %H:%M:%S')
sub_cleanevt = self.time_filter(spatial_evtdata, tmrng=[ts, te])
counts.append(len(sub_cleanevt['TIME']))
self.lc_counts = np.array(counts)
elif np.shape(sub_reg)[1] == 4:
all_counts = {}
all_count_rates = {}
for b, sub_r in enumerate(sub_reg, start=1):
counts = []
pixels = self.arcsec_to_pixel([sub_r[0],sub_r[1]], [sub_r[2],sub_r[3]])
spatial_evtdata = self.spatial_filter(self.cleanevt, pixels)
for t in range(len(self.t_bin_edges)-1):
ts = (self.rel_t + timedelta(seconds=self.t_bin_edges[t])).strftime('%Y/%m/%d, %H:%M:%S')
te = (self.rel_t + timedelta(seconds=self.t_bin_edges[t+1])).strftime('%Y/%m/%d, %H:%M:%S')
sub_cleanevt = self.time_filter(spatial_evtdata, tmrng=[ts, te])
counts.append(len(sub_cleanevt['TIME']))
box = ' (Box '+str(b)+')'
all_counts[box] = np.array(counts)
#if make_final_graph == True:
if count_rate == True:
#livetime correction
livetimes = np.zeros(len(self.t_bin_edges)-1)
for t in range(len(self.t_bin_edges)-1):
indices = ((self.hk_times>=self.t_bin_edges[t]) & (self.hk_times<self.t_bin_edges[t+1]))
ltimes_in_range = self.hk_livetimes[indices]
livetimes[t] = np.average(ltimes_in_range)
self.lc_livetimes = livetimes
counts_per_second = np.array(counts) / (livetimes * (times[1]-times[0]))
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts_per_second))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd + box)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts_per_second[np.isfinite(counts_per_second)])*1.05])
plt.ylabel('Counts $s^{-1}$')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
all_count_rates[box] = counts_per_second
else:
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd + box)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts[np.isfinite(counts)])*1.05])
plt.ylabel('Counts')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_counts = all_counts
if all_count_rates == []:
self.lc_count_rates = None
else:
self.lc_count_rates = all_count_rates
self.lc_times = dt_times
if show_fig:
plt.show()
single_lc = False
else:
raise TypeError('Check the form of the sub-region was given in, e.g. need [bx,by,tx,ty] or [[bx,by,tx,ty], ...].')
else:
raise TypeError('\'astropy.io.fits.fitsrec.FITS_rec\' is the only supported data type at the moment.')
else:
if (self.t_bin['method'] != 'exact') and (self.t_bin['method'] != 'approx'):
raise ValueError('Only options for the time bins is \'approx\' or \'exact\'.')
if single_lc == True: #only in case multiple regions are plotted then they are handled in its own 'for' loop
if count_rate == True:
#livetime correction
livetimes = np.zeros(len(self.t_bin_edges)-1)
for t in range(len(self.t_bin_edges)-1):
indices = ((self.hk_times>=self.t_bin_edges[t]) & (self.hk_times<self.t_bin_edges[t+1]))
ltimes_in_range = self.hk_livetimes[indices]
livetimes[t] = np.average(ltimes_in_range)
self.lc_livetimes = livetimes
counts_per_second = self.lc_counts / (livetimes * (times[1]-times[0]))
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), counts_per_second))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(counts_per_second[np.isfinite(counts_per_second)])*1.05])
plt.ylabel('Counts $s^{-1}$')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_times = dt_times
self.lc_count_rates = counts_per_second
else:
fig = plt.figure()
ax = plt.axes()
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in times]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), self.lc_counts))
plt.title('NuSTAR FPM'+self.fpm+' '+self.e_range_str+' keV Light Curve - '+start_yyyymmdd)
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
plt.xlabel('Start Time - '+start_hhmmss)
plt.ylim([0, np.max(self.lc_counts[np.isfinite(self.lc_counts)])*1.05])
plt.ylabel('Counts')
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
#plt.show()
self.lc_times = dt_times
self.lc_count_rates = None
if show_fig:
plt.show()
def full_obs_chus(self, start_directory=None, obs_id=None, descriptor='_chu123', ext='.fits' ,show_fig=True):
'''
Apapted from:
https://github.com/ianan/nustar_sac/blob/master/idl/load_nschu.pro
and
https://github.com/NuSTAR/nustar_solar/blob/master/depricated/solar_mosaic_20150429/read_chus.pro
'''
if start_directory == None:
start_directory=self.evt_directory
if obs_id == None:
obs_id=self.obs_id
chu_filename, self.chu_directory, self.chu_filename = self.nustar_file_finder(start_directory=start_directory, obs_id=obs_id, descriptor=descriptor, ext=ext)
#not self.chu_filename as fits.open needs to know the full path to the file
hdulist = fits.open(chu_filename)
data1 = hdulist[1].data
data2 = hdulist[2].data
data3 = hdulist[3].data
hdulist.close()
# easier to work with numpy arrays later
data_c1 = np.array(data1)
data_c2 = np.array(data2)
data_c3 = np.array(data3)
maxres = 20
for chu_num, dat in enumerate([data_c1, data_c2, data_c3]):
chu_bool = ((dat['VALID']==1) &
(dat['RESIDUAL']<maxres) &
(dat['STARSFAIL']<dat['OBJECTS']) &
(dat['CHUQ'][:,3]!=1))
chu_01 = chu_bool*1 # change true/false into 1/0
chu_mask = chu_01* (chu_num+1)**2 # give each chu a unique number that when it is added to another it gives a unique chu combo, like file permissions
if chu_num == 0:
chu_all = chu_mask # after chu 1 file have an array with 1s and 0s
else:
chu_all += chu_mask # after the others (chu2 and chu3) have an array with 1,4,9,5,10,13,14
# last data array in the for loop can give the time, no. of seconds from 1-Jan-2010
chu_time = dat['TIME']
# reassigned values are at 100, etc. as to not accidently double sort the values again
# e.g. if mask value was changed to 10, then if it was accidently run again it would get sorted into chu state 13 etc.
chu_all[chu_all == 1] = 100 #chu1 # mask value in array is changed to chu state, e.g. mask value=5, chu state is 12, and value 102
chu_all[chu_all == 4] = 101 #chu2
chu_all[chu_all == 5] = 102 #chu12
chu_all[chu_all == 9] = 103 #chu3
chu_all[chu_all == 10] = 104 #chu13
chu_all[chu_all == 13] = 105 #chu23
chu_all[chu_all == 14] = 106 #chu123
chu_time = chu_time[chu_all > 0] # if there is still no chu assignment for that time then remove
chu_all = chu_all[chu_all > 0]
self.chu_all = chu_all
self.chu_reference = {'chu1':100, 'chu2':101, 'chu12':102, 'chu3':103, 'chu13':104, 'chu23':105, 'chu123':106}
tick_labels = ['','1', '2', '12', '3', '13', '23', '123']
self.chu_times = [(self.rel_t + datetime.timedelta(seconds=t)) for t in chu_time]
dt_times = self.chu_times
fig = plt.figure(figsize=(10,5))
ax = plt.axes()
plt.plot(dt_times, chu_all,'x')
plt.title('CHU States of NuSTAR on ' + dt_times[0].strftime('%Y/%m/%d')) #get the date in the title
plt.xlabel('Start Time - ' + dt_times[0].strftime('%H:%M:%S'))
plt.ylabel('NuSTAR CHUs')
plt.xlim([data_handling.getTimeFromFormat(t) for t in self.time_range])
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
ax.axes.set_yticklabels(tick_labels)
plt.xticks(rotation=30)
if show_fig == True:
plt.show()
lc_3D_params = {'energy_low':1.6, 'energy_high':80, 'time_range':None} # start at 1.6 keV as this is the lowest (yet not trusted) bin for NuSTAR for binning in 0.04 keV steps
def lightcurves_3D(self, all_evt_data=None, energy_increment=0.04, aspect=6):
'''***Under Construction***'''
if all_evt_data == None:
all_evt_data = self.evt_data
if self.lc_3D_params['time_range'] == None:
self.lc_3D_params['time_range'] = self.time_range
cleaned_all_evt = filter_with_tmrng.event_filter(all_evt_data, fpm = self.fpm,
energy_low = self.lc_3D_params['energy_low'],
energy_high = self.lc_3D_params['energy_high'],
tmrng=self.lc_3D_params['time_range'])
energies = np.arange(1.6 , self.lc_3D_params['energy_high'], energy_increment)
no_of_time = 200
times = np.arange(no_of_time, 1)
er_and_tc = []
for e in range(len(energies)-1):
specific_lc_inds = filter_with_tmrng.by_energy(cleaned_all_evt, energies[e], energies[e+1])
specific_lc_data = cleaned_all_evt[specific_lc_inds]
counts = np.histogram(specific_lc_data['TIME'], no_of_time)[0]
er_and_tc.append(counts)
er_and_tc = np.array(er_and_tc)
print(np.max(er_and_tc))
fig = plt.figure(figsize=(6,8))
plt.imshow(er_and_tc, origin='lower', aspect=aspect, vmax=1)
plt.ylim([self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']])
plt.xlabel('Time')
plt.ylabel('Energy')
plt.show()
## event list for each energy bin (get energy filter function)
## get lightcurve for each energy bin
## Get 2D array for counts for each energy along rows, and time steps along the columns
## 1D array for the energies, 1D array for time steps
## get seperate, static method for 3D plot creation, return axis object
## axis limits to 2.5--80 keV (range of NuSTAR that's well calibrated)
def detectors(self, show_fig=True):
self.all_detectors = {}
plt.figure()
ax = plt.axes()
for d in range(4):
# if the detector is the one I want then I want the time of it, else leave it alone
self.all_detectors['det'+str(d)] = [self.cleanevt['TIME'][c] for c,i in enumerate(self.cleanevt['DET_ID']) if i==d]
# get percentage of counts each detector contributed to the full time
self.all_detectors['det'+str(d)+'%'] = len(self.all_detectors['det'+str(d)]) / len(self.cleanevt['TIME']) * 100
dets = np.histogram(self.all_detectors['det'+str(d)], self.t_bin_edges) #gives out bin values and bin edges
dt_times = [(self.rel_t + timedelta(seconds=t)) for t in dets[1]]
plt.plot(*self.stepped_lc_from_hist(self.dt_to_md(dt_times), dets[0]), label='det'+str(d)+': '+'{:.1f}'.format(self.all_detectors['det'+str(d)+'%'])+'%')
plt.legend()
fmt = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(self.xlocator)
plt.xticks(rotation=30)
plt.title('Detector Contribution '+self.e_range_str+" keV")
plt.ylabel('Counts from detector')
plt.xlabel('Time')
if show_fig:
plt.show()
#return plt.g
def plotChuTimes(self, span=True, axis=None):
# remember to show_fig=False for the plotting methods as to allow alterations of the figures once run
# look for and get the start and end times for each CHU file
chus = ['chu1', 'chu2', 'chu12', 'chu3', 'chu13', 'chu23', 'chu123']
colours = ['k', 'r', 'g', 'c', 'm', 'b', 'y']
chuChanges = {}
axis = {'ax':plt} if axis is None else {'ax':axis}
pipeline_modes = ["_S_", "_N_"]
for c, chu in enumerate(chus):
for pm in pipeline_modes:
chuFile = self.evt_directory+'nu' + self.obs_id + self.fpm + '06_' + chu + pm + 'cl_sunpos.evt'
if isfile(chuFile):
break
if not isfile(chuFile):
continue
hdulist = fits.open(chuFile)
evt_data = hdulist[1].data
hdulist.close()
chuChanges[chu] = [self.rel_t + timedelta(seconds=min(evt_data['time'])),
self.rel_t + timedelta(seconds=max(evt_data['time']))]
# plot a shaded region or just the time boundaries for the chu changes
if span:
axis['ax'].axvspan(*chuChanges[chu], alpha=0.1, color=colours[c])
else:
axis['ax'].axvline(chuChanges[chu][0], color=colours[c])
axis['ax'].axvline(chuChanges[chu][1], color=colours[c])
self.chuChanges = chuChanges
def save(self, save_dir='./', folder_name=None, overwrite=False, **kwargs):
#replace folder of saved data if run twice or just make a new one?
"""
Can I automate the process using dir(nu) since this has every variable created?
Or at least add to a list of attributes to be saved.
Use os module to create appropriate directory structure for saved attributes.
"""
#print(dir(nuA))
'''
Variables/info to save:
***** evt_file_used *****
~evt_directory, evt_filename, evt_data, evt_header #where did the data come from?
~meta data, chu_state, energy range, fpm, obs id
***** house_keeping_file_used *****
~self.hk_directory, self.hk_filename, hk_data, hk_header #what hk file was used?
***** nustar_livetime_data *****
~hk_livetimes, hk_times, livetimes plot
***** nustar_map_data *****
~rsn_map and plot (for plot need to run the nustar_plot() with save enabled) #what does it look like?
~gaussian filter applied, rectangle coordinates
***** nustar_light_curve_data *****
~lc_counts/lc_count_rates, lc_times, lightcurve plot(s)
~rectangle coordinates
New stuff to save:
***** chu function ***** deconvolve settings *****
'''
if self.chu_state != 'not_split' and folder_name is None:
nustar_folder = save_dir + self.obs_id + self.fpm + '_' + self.chu_state + '_nustar_folder'
elif folder_name is not None:
nustar_folder = folder_name
else:
nustar_folder = save_dir + self.obs_id + self.fpm + '_nustar_folder'
# Create target Directory if don't exist
if not os.path.exists(nustar_folder + '/'):
nustar_folder = nustar_folder + '/'
os.mkdir(nustar_folder) #make empty folder
print("Directory " , nustar_folder , " Created.", end='')
# If the folder exists and overwrite is True then replace the first one
elif os.path.exists(nustar_folder + '/') and (overwrite == True):
nustar_folder = nustar_folder + '/'
subprocess.check_output(['rm', '-r', nustar_folder]) #remove evrything in it too
os.mkdir(nustar_folder) #make empty folder
print("Replacing directory " , nustar_folder, end='')
# If the folder exists and overwrite is False then just make another file with an index
elif os.path.exists(nustar_folder + '/') and (overwrite == False):
number_exist = len(np.nonzero(['nustar_folder' in f for f in os.listdir(save_dir)])[0])
nustar_folder = nustar_folder + '(' + str(number_exist) + ')/'
os.mkdir(nustar_folder)
print("Directory " , nustar_folder , " already exists. Creating another.", end='')
self.nustar_folder = nustar_folder
# Now 'nustar_folder' is the folder things will be save into
# Start with evt file information
evt_folder = nustar_folder + 'evt_file_used/'
os.mkdir(evt_folder)
evt_list_to_save = ['evt_directory', 'evt_filename', 'obs_id', 'fpm', 'chu_state', 'energy_range',
'time_range', 'evt_data', 'evt_header', 'cleanevt']
evt_info = list(set(dir(self)) & set(evt_list_to_save))
evt_to_store = {}
for name in evt_info:
evt_to_store[name] = self.__dict__[name]
with open(evt_folder + 'evt_file_info.pickle', 'wb') as evt_save_file:
pickle.dump(evt_to_store, evt_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# hk file information
hk_folder = nustar_folder + 'hk_file_used/'
os.mkdir(hk_folder)
hk_list_to_save = ['hk_directory', 'hk_filename', 'hk_data', 'hk_header']
hk_info = list(set(dir(self)) & set(hk_list_to_save))
hk_to_store = {}
for name in hk_info:
hk_to_store[name] = self.__dict__[name]
with open(hk_folder + 'hk_file_info.pickle', 'wb') as hk_save_file:
pickle.dump(hk_to_store, hk_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Livetime info
lvt_folder = nustar_folder + 'livetime_data/'
os.mkdir(lvt_folder)
lvt_list_to_save = ['hk_times', 'hk_livetimes']
lvt_info = list(set(dir(self)) & set(lvt_list_to_save))
lvt_to_store = {}
for name in lvt_info:
lvt_to_store[name] = self.__dict__[name]
with open(lvt_folder + 'livetime_data.pickle', 'wb') as lvt_save_file:
pickle.dump(lvt_to_store, lvt_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Map info
map_folder = nustar_folder + 'map_data/'
os.mkdir(map_folder)
map_list_to_save = ['rsn_map', 'gaussian_filter', 'time_norm', 'rectangles']
map_info = list(set(dir(self)) & set(map_list_to_save))
map_to_store = {}
for name in map_info:
try:
map_to_store[name] = self.__dict__[name]
except KeyError:
map_to_store[name] = NustarDo.__dict__[name]
with open(map_folder + 'map_data.pickle', 'wb') as map_save_file:
pickle.dump(map_to_store, map_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Light curve info
lc_folder = nustar_folder + 'light_curve_data/'
os.mkdir(lc_folder)
lc_list_to_save = ['lc_times', 'lc_counts', 'lc_count_rates', 'sub_reg_lc', 'lc_livetimes']
lc_info = list(set(dir(self)) & set(lc_list_to_save))
lc_to_store = {}
for name in lc_info:
lc_to_store[name] = self.__dict__[name]
with open(lc_folder + 'light_curve_data.pickle', 'wb') as lc_save_file:
pickle.dump(lc_to_store, lc_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# Can save your own stuff
if len(kwargs) > 0:
own_folder = nustar_folder
with open(own_folder + 'kwargs_data.pickle', 'wb') as own_save_file:
pickle.dump(kwargs, own_save_file, protocol=pickle.HIGHEST_PROTOCOL)
# save the object that can be loaded back in
with open(nustar_folder + nustar_folder[:-1].split('/')[-1] + '.pickle', 'wb') as object_file:
pickle.dump(self.__dict__, object_file, protocol=pickle.HIGHEST_PROTOCOL)
self.object_file = nustar_folder + nustar_folder[:-1].split('/')[-1] + '.pickle'
print(' Now Populated.')
def load(self, object_file=None):
'''Takes the object's namespace from the save() method and loads it back in to all it's attributes.'''
if not hasattr(self, 'object_file') and object_file is None:
print('\'object_file\' attribute and input to this function are both \'None\', please provide one. \n Note: the input for this method takes priority.')
return
object_file = object_file if (object_file is not None) else self.object_file
with open(object_file, "rb") as input_file:
self.__dict__ = pickle.load(input_file)
def shift(evt_data, pix_xshift=None, pix_yshift=None):
if pix_xshift != None:
for X in evt_data:
X['X'] = X['X'] + pix_xshift
if pix_yshift != None:
for Y in evt_data:
Y['Y'] = Y['Y'] + pix_yshift
return evt_data
def nustars_synth_count(temp_response_dataxy, plasma_temp, plasma_em, source_area, errors=None, Tresp_syserror=0, log_data=False):
"""Takes data for a channel's temperature response, plasma temperature and emission measure and area of source and
returns the expected DN/s per pixel.
*** Check output and make sure your units work ***
Parameters
----------
temp_response_dataxy : dict
The x and y data for the temperature response of the channel of interest, e.g. {'x':[...], 'y':[...]}.
plasma_temp : float
Temperature of the response you want in MK.
plasma_em : float
Volumetric emission measure of the plasma in cm^-3.
(If you have column emission measure, i.e. cm^-5, then set source_area=1.)
source_area : float
Area of the source in cm^2.
errors : dict
A dictionary of dictionaries containing the errors on T and EM, e.g. {'T':{'+':a, '-':b},
'EM':{'+':c, '-':d}}.
Defualt: None
Tresp_syserror : float
Fractional systematic error on the temperature response, e.g. 20% error on temp_response_dataxy['y'] means Tresp_error=0.2
Default: 0
log_data : bool
Do you want the data (x and y) logged (base 10) for the interpolation?
Default: False
Returns
-------
A dictionary of floats that is the synthetic DN/s per pixel for the data given, temperature response,
temperature, and emission measure with units and errors.
"""
# find temperature response at the given plasma temperature in DN cm^5 pix^-1 s^-1
if log_data:
f = interpolate.interp1d(np.log10(temp_response_dataxy['x']), np.log10(temp_response_dataxy['y']))
temp_response = [10**f(np.log10(plasma_temp))]
else:
f = interpolate.interp1d(temp_response_dataxy['x'], temp_response_dataxy['y'])
temp_response = [f(plasma_temp)]
syn_flux = [tr * plasma_em * (1 / source_area) for tr in temp_response]
# For errors
if errors is not None:
min_T, max_T = plasma_temp - errors['T']['-'], plasma_temp + errors['T']['+']
min_EM, max_EM = plasma_em - errors['EM']['-'], plasma_em + errors['EM']['+']
e_response = []
for Ts in [min_T, max_T]:
# find temperature response at the given plasma temperature in DN cm^5 pix^-1 s^-1
r = [f(Ts)]
e_response.append(r[0])
temp_max_response = temp_response_dataxy['x'][np.argmax(temp_response_dataxy['y'])]
# what if there is a bump between central value and error range
if (e_response[0] < temp_response[0]) and (e_response[1] < temp_response[0]):
if min_T < temp_max_response < plasma_temp:
e_response[0] = np.max(temp_response_dataxy['y'])
elif plasma_temp < temp_max_response < max_T:
e_response[1] = np.max(temp_response_dataxy['y'])
min_R, max_R = e_response[0], e_response[1] #R from min_T and R from max_T
# include temperature response error
up_resp = 1 + Tresp_syserror
down_resp = 1 - Tresp_syserror
#flux from min_T(max_EM) and flux from max_T(min_EM)
min_flux, max_flux = min_R * max_EM * (1 / source_area), max_R * min_EM * (1 / source_area)
flux_range = [min_flux, max_flux]
e_response = np.array(e_response)[np.isfinite(e_response)]
flux_range = np.array(flux_range)[np.isfinite(flux_range)]
# max flux could be up_resp more, and min flux could be be down_resp more
f_err = [up_resp*np.max(flux_range) - syn_flux[0], syn_flux[0] - down_resp*np.min(flux_range)]
for n,f in enumerate(f_err):
if f < 0:
f_err[n] = np.max(f_err)
errors = {'syn_flux_err':{'+': f_err[0], '-':f_err[1]},
't_res_err':{'+': abs(up_resp*np.max(e_response) - temp_response[0]), '-':abs(temp_response[0] - down_resp*np.min(e_response))},
't_res_syserr':[Tresp_syserror*100, '%'],
'T_err':{'+': errors['T']['+'], '-':errors['T']['-']},
'EM_err':{'+': errors['EM']['+'],' -':errors['EM']['-']}}
return {'syn_flux':[syn_flux[0],'DN pix^-1 s^-1'], 't_res':[temp_response, 'DN cm^5 pix^-1 s^-1'], 'T':[plasma_temp, 'MK'], 'EM':[plasma_em, 'cm^-3'], 'errors':errors}
def timefilter_evt(file, time_range=None, save_dir=None):
"""Takes a .evt file and filters the events list to a given time range. Only for region selection, do not use directly with spectral fitting software.
Parameters
----------
file : Str
File (or directory/file) of the .evt file to be filtered by time.
time_range : list
A list of length 2 with the start and end date and time. Must be given in a specific format, e.g. time_range=['2018/09/10, 16:22:30', '2018/09/10, 16:24:30'].
Default: None
save_dir : Str
String of the directory for the filtered file to be saved.
Default: None
Returns
-------
Creates a new file file with '_tf' before the file extension (meaning time filtered) and returns the name of the new file.
"""
if time_range == None:
print('No time_range given. Nothing will be done.')
return
file_regex = re.compile(r'.\w+') # form to split up filename string
ext = file_regex.findall(file) # splits up file into all components, directories, filename, extension
if save_dir == None:
new_file_name = ''.join(ext[:-1]) + '_tf' + ext[-1] # '_tf' for time filtered
else:
new_file_name = save_dir + ext[-2] + '_tf' + ext[-1]
hdulist = fits.open(file)
evtdata=hdulist[1].data # data to be filtered
evt_in_time = NustarDo().time_filter(evtdata, tmrng=time_range) # picks events inside time range
hdulist[1].data = evt_in_time # replaces this hdu with the filtered events list
hdulist.writeto(new_file_name, overwrite=True) # saves the edited file, original stays as is
hdulist.close()
return new_file_name
def CheckGrade0ToAllGrades(evtFile, wholeRangeToo=False, saveFig=None, timeRange=None, printOut=False, shortTitle=""):
"""Takes a NuSTAR evt file and compares the grade 0 events to the events of all grades.
Adapted from: https://github.com/ianan/ns_proc_test/blob/main/test_proc_jun20_002.ipynb
Parameters
----------
evtFile : str
The .evt file.
wholeRangeToo : Bool
If you want to plot the whole energy range in a second plot, next to the one ranging from
1.6--10 keV, set thi to True.
Default: False
saveFig : str
If you want to save the figure made as a PDF then set this to a string of the save name.
Defualt: None
timeRange : list, 2 strings
If you only want a certain time range of the total file's spectrum to be plotted, e.g.
["%Y/%m/%d, %H:%M:%S", "%Y/%m/%d, %H:%M:%S"].
Defualt: None
printOut : Bool
If you want to print out the output nicely(-ish) set this to True.
Default: False
shortTitle : Str
Add a quick title to help keep track of the plots
Default: ""
Returns
-------
Dictionary containing the file name used ["file"], the time range of the file ["fileTimeRange"],
time range you asked it to plot ["timeRangeGivenToPlot"], effective exposure of full file ["eff_exp"],
ontime of full file ["ontime"], and percentage livetime ["lvtime_percent"] of full file given.
"""
# read in .pha files for grade 0 and all grades
hdulist = fits.open(evtFile)
evt_data = hdulist[1].data
evt_header = hdulist[1].header
hdulist.close()
# what is the time range of the file before filtering with time if you want
## nustar times are measured in seconds from this date
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00")
file_start = str((rel_t + timedelta(seconds=np.min(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
file_end = str((rel_t + timedelta(seconds=np.max(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
# filter evt file by time?
if type(timeRange) == list:
if len(timeRange) == 2:
evt_data = NustarDo().time_filter(evt_data, tmrng=timeRange)
# get the data
hist_gradeAll, be_gradeAll = np.histogram(evt_data['pi']*0.04+1.6,bins=np.arange(1.6,79,0.04))
# work out the grade 0 spectra as well
data_grade0 = evt_data['pi'][evt_data['grade']==0]
hist_grade0, be_grade0 = np.histogram(data_grade0*0.04+1.6,bins=np.arange(1.6,79,0.04))
# plotting info
width = 11 if wholeRangeToo else 5
columns = 2 if wholeRangeToo else 1
y_lims_spec = [1e-1, 1.1*np.max(hist_gradeAll)]
ratio = hist_gradeAll/hist_grade0
fintie_vals = np.isfinite(ratio)
y_lims_ratio = [0.95, 1.05*np.max(ratio[fintie_vals])] if wholeRangeToo else [0.95, 1.05*np.max(ratio[fintie_vals][:int((10-1.6)/0.04)])]
axes_made = []
plt.figure(figsize=(width,7))
# define subplots for close look
ax1 = plt.subplot2grid((4, columns), (0, 0), colspan=1, rowspan=3)
axes_made.append(ax1)
ax2 = plt.subplot2grid((4, columns), (3, 0), colspan=1, rowspan=1)
axes_made.append(ax2)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
ax1.plot(be_gradeAll[:-1], hist_gradeAll, drawstyle="steps-pre", label="Grade All")
ax1.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax1.set_yscale("log")
ax1.set_ylim(y_lims_spec)
ax1.set_ylabel("Counts")# s$^{-1}$ keV$^{-1}$")
plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_xlim([1.6,10])
ax1.set_title("Grade 0 vs All Grades - "+shortTitle)
ax1.legend()
# axis 2: the difference between all grades and grade 0
ax2.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax2.set_ylabel("All Grades / Grade0")
ax2.set_ylim(y_lims_ratio)
ax2.set_xlim([1.6,10])
ax2.set_xlabel("Energy [keV]")
ax2.grid(axis='y')
# define subplots for whole energy range
if wholeRangeToo:
# define subplots for close look
ax3 = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=3)
axes_made.append(ax3)
ax4 = plt.subplot2grid((4, 2), (3, 1), colspan=1, rowspan=1)
axes_made.append(ax4)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
ax3.plot(be_gradeAll[:-1], hist_gradeAll, drawstyle="steps-pre", label="Grade All")
ax3.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax3.set_yscale("log")
ax3.set_ylim(y_lims_spec)
ax3.set_xscale("log")
plt.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim([1.6,79])
ax3.set_title("Same But Whole E-range")
ax3.legend()
# axis 2: the difference between all grades and grade 0
ax4.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax4.set_ylim(y_lims_ratio)
ax4.set_xscale("log")
ax4.set_xlim([1.6,79])
ax4.set_xlabel("Energy [keV]")
ax4.grid(axis='y')
if type(saveFig) == str:
plt.savefig(saveFig, bbox_inches="tight")
# plt.show()
inform = {"file":evtFile,
"fileTimeRange":[file_start, file_end],
"timeRangeGivenToPlot":timeRange,
"eff_exp":evt_header['livetime'],
"ontime":evt_header['ontime'],
"lvtime_percent":100*evt_header['livetime']/evt_header['ontime']}
if printOut:
for key in inform.keys():
print(key, " : ", inform[key])
return inform, axes_made
def CheckGrade0ToAnyGrades(evtFile, grades, wholeRangeToo=False, saveFig=None, timeRange=None, printOut=False, shortTitle="", xlims=None):
"""Takes a NuSTAR evt file and compares the grade 0 events to the events of all grades.
Adapted from: https://github.com/ianan/ns_proc_test/blob/main/test_proc_jun20_002.ipynb
Parameters
----------
evtFile : str
The .evt file.
grades : list of length 1 or 2 list
A list of the lists of grades you want the grade 0 counts to be compared against. E.g. grades=[[1], [0,4]]
means that grade zero will be checked against grade 1 counts and grade 0-4 counts inclusive.
wholeRangeToo : Bool
If you want to plot the whole energy range in a second plot, next to the one ranging from
1.6--10 keV, set thi to True.
Default: False
saveFig : str
If you want to save the figure made as a PDF then set this to a string of the save name.
Defualt: None
timeRange : list, 2 strings
If you only want a certain time range of the total file's spectrum to be plotted, e.g.
["%Y/%m/%d, %H:%M:%S", "%Y/%m/%d, %H:%M:%S"].
Defualt: None
printOut : Bool
If you want to print out the output nicely(-ish) set this to True.
Default: False
shortTitle : Str
Add a quick title to help keep track of the plots
Default: ""
Returns
-------
Dictionary containing the file name used ["file"], the time range of the file ["fileTimeRange"],
time range you asked it to plot ["timeRangeGivenToPlot"], effective exposure of full file ["eff_exp"],
ontime of full file ["ontime"], percentage livetime ["lvtime_percent"] of full file given, Grade 0
plotting info, and you custom grade info too.
"""
# read in .pha files for grade 0 and all grades
hdulist = fits.open(evtFile)
evt_data = hdulist[1].data
evt_header = hdulist[1].header
hdulist.close()
# what is the time range of the file before filtering with time if you want
## nustar times are measured in seconds from this date
rel_t = data_handling.getTimeFromFormat("2010/01/01, 00:00:00")
file_start = str((rel_t + timedelta(seconds=np.min(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
file_end = str((rel_t + timedelta(seconds=np.max(evt_data["time"]))).strftime('%Y/%m/%d, %H:%M:%S'))
# filter evt file by time?
if type(timeRange) == list:
if len(timeRange) == 2:
evt_data = NustarDo().time_filter(evt_data, tmrng=timeRange)
# work out the grade 0 spectra as well
data_grade0 = evt_data['pi'][evt_data['grade']==0]
hist_grade0, be_grade0 = np.histogram(data_grade0*0.04+1.6,bins=np.arange(1.6,79,0.04))
other_grades = {}
ratios = []
max_ratios, min_ratios = [], []
# get the data
for g in grades:
if len(g)==1:
data_grade = evt_data['pi'][evt_data['grade']==g[0]]
g_str = "Grade "+str(g[0])
other_grades[g_str] = np.histogram(data_grade*0.04+1.6,bins=np.arange(1.6,79,0.04))
else:
data_grade = evt_data['pi'][(evt_data['grade']>=g[0]) & (evt_data['grade']<=g[1])]
g_str = "Grade "+str(g[0])+"-"+str(g[1])
other_grades[g_str] = np.histogram(data_grade*0.04+1.6,bins=np.arange(1.6,79,0.04))
ratio = other_grades[g_str][0]/hist_grade0
ratios.append(ratio)
maximum = np.max(ratio[np.isfinite(ratio)]) if wholeRangeToo else np.max(ratio[np.isfinite(ratio)][:int((10-1.6)/0.04)])
minimum = np.min(ratio[np.isfinite(ratio)]) if wholeRangeToo else np.min(ratio[np.isfinite(ratio)][:int((10-1.6)/0.04)])
max_ratios.append(maximum)
min_ratios.append(minimum)
# plotting info
width = 11 if wholeRangeToo else 5
columns = 2 if wholeRangeToo else 1
y_lims_spec = [1e-1, 1.1*np.max(hist_grade0)]
y_lims_ratio = [0.95*np.min(min_ratios), 1.05*np.max(max_ratios)]
axes_made = []
plt.figure(figsize=(width,7))
# define subplots for close look
ax1 = plt.subplot2grid((4, columns), (0, 0), colspan=1, rowspan=3)
axes_made.append(ax1)
ax2 = plt.subplot2grid((4, columns), (3, 0), colspan=1, rowspan=1)
axes_made.append(ax2)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
for key, r in zip(other_grades.keys(), ratios):
ax1.plot(other_grades[key][1][:-1], other_grades[key][0], drawstyle="steps-pre", label=key)
ax2.plot(other_grades[key][1][:-1], r, drawstyle="steps-pre")
ax1.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax1.set_yscale("log")
ax1.set_ylim(y_lims_spec)
ax1.set_ylabel("Counts")# s$^{-1}$ keV$^{-1}$")
plt.setp(ax1.get_xticklabels(), visible=False)
xlims = xlims if type(xlims)!=type(None) else [1.6,10]
ax1.set_xlim(xlims)
ax1.set_title("Grade 0 vs Chosen Grades - "+shortTitle)
ax1.legend()
# axis 2: the difference between all grades and grade 0
# ax2.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax2.set_ylabel("Chosen Grades / Grade0")
ax2.set_ylim(y_lims_ratio)
ax2.set_xlim(xlims)
ax2.set_xlabel("Energy [keV]")
ax2.grid(axis='y')
# define subplots for whole energy range
if wholeRangeToo:
# define subplots for close look
ax3 = plt.subplot2grid((4, 2), (0, 1), colspan=1, rowspan=3)
axes_made.append(ax3)
ax4 = plt.subplot2grid((4, 2), (3, 1), colspan=1, rowspan=1)
axes_made.append(ax4)
plt.tight_layout()
# axis 1: the plots for all grades and grade 0
for key, r in zip(other_grades.keys(), ratios):
ax3.plot(other_grades[key][1][:-1], other_grades[key][0], drawstyle="steps-pre", label=key)
ax4.plot(other_grades[key][1][:-1], r, drawstyle="steps-pre")
ax3.plot(be_grade0[:-1], hist_grade0, drawstyle="steps-pre", label="Grade 0")
ax3.set_yscale("log")
ax3.set_ylim(y_lims_spec)
ax3.set_xscale("log")
plt.setp(ax3.get_xticklabels(), visible=False)
ax3.set_xlim([1.6,79])
ax3.set_title("Same But Whole E-range")
ax3.legend()
# axis 2: the difference between all grades and grade 0
# ax4.plot(be_grade0[:-1], ratio, drawstyle="steps-pre", color='k')
ax4.set_ylim(y_lims_ratio)
ax4.set_xscale("log")
ax4.set_xlim([1.6,79])
ax4.set_xlabel("Energy [keV]")
ax4.grid(axis='y')
if type(saveFig) == str:
plt.savefig(saveFig, bbox_inches="tight")
# plt.show()
inform = {"file":evtFile,
"fileTimeRange":[file_start, file_end],
"timeRangeGivenToPlot":timeRange,
"eff_exp":evt_header['livetime'],
"ontime":evt_header['ontime'],
"lvtime_percent":100*evt_header['livetime']/evt_header['ontime'],
"Grade 0":[hist_grade0, be_grade0],
**other_grades}
if printOut:
for key in inform.keys():
print(key, " : ", inform[key])
return inform, axes_made
## functions to help find the FoV rotation
def collectSameXs(rawx, rawy, solx, soly):
""" Returns a dictionary where each column is given a unique entry with a list
of the rows that correspond to that one column from the evt file. Also saves the
solar coordinates for that raw coordinate column with the rawx column key+"map2sol".
Parameters
----------
rawx, rawy : lists
Raw coordinates of the evt counts.
solx, soly : lists
Solar coordinates of the sunpos evt counts.
Returns
-------
A dictionary.
Examples
--------
rawx, rawy = [1,2,3,3], [7,8,4,9]
solx, soly = [101, 102, 103, 104], [250, 252, 254, 256]
collectSameXs(rawx, rawy, solx, soly)
>>> {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
"""
output = {}
for c,xs in enumerate(rawx):
if str(xs) not in output:
output[str(xs)] = [rawy[c]]
output[str(xs)+"map2sol"] = [[solx[c], soly[c]]]
else:
output[str(xs)].append(rawy[c])
output[str(xs)+"map2sol"].append([solx[c], soly[c]])
assert len([solx[c], soly[c]])==2
return output
def minRowInCol(columns):
""" Returns a dictionary where each key is the solar X position of each raw
coordinate chosen (edges between det0&3 and 1&2) with its value being the
solar Y coordinate.
Parameters
----------
columns : dictionary
Information of the raw and solar coordinates of the counts in order to
each other.
Returns
-------
A dictionary.
Examples
--------
cols = {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
minRowInCol(cols)
>>> {"101":250, "102":252, "103":254}
"""
output_sol = {}
for key in columns.keys():
if "map2sol" not in key:
# find the corresponding solar coords to the minimum rawy
sol_coords = columns[key+"map2sol"][np.argmin(columns[key])]
# now have the solarX key with the solarY as its value
assert len(sol_coords)==2
output_sol[str(sol_coords[0])] = sol_coords[1]
return output_sol
def maxRowInCol(columns):
""" Returns a dictionary where each key is the solar X position of each raw
coordinate chosen (edges between det0&3 and 1&2) with its value being the
solar Y coordinate.
Parameters
----------
columns : dictionary
Information of the raw and solar coordinates of the counts in order to
each other.
Returns
-------
A dictionary.
Examples
--------
cols = {"1":[7], "1map2sol":[101, 250],
"2":[8], "2map2sol":[102, 252],
"3":[4, 9], "3map2sol":[[103, 254], [104, 256]]}
minRowInCol(cols)
>>> {"101":250, "102":252, "104":256}
"""
output_sol = {}
for key in columns.keys():
if "map2sol" not in key:
# find the corresponding solar coords to the maximum rawy
sol_coords = columns[key+"map2sol"][np.argmax(columns[key])]
# now have the solarX key with the solarY as its value
output_sol[str(sol_coords[0])] = sol_coords[1]
return output_sol
def getXandY(colsAndRows):
""" Returns solar X and Y coordinates.
Parameters
----------
colsAndRows : dictionary
Keys as the solar X and values of solar Y coordinates.
Returns
-------
Two numpy arrays.
Examples
--------
colsAndRows = {"101":250, "102":252, "104":256}
getXandY(colsAndRows)
>>> [101, 102, 104], [250, 252, 256]
"""
return np.array([int(c) for c in list(colsAndRows.keys())]), np.array(list(colsAndRows.values()))
def getDegrees(grad):
""" Returns angle of rotation in degrees.
Parameters
----------
grad : float
Gradient.
Returns
-------
Angle in degrees.
Examples
--------
grad = 1
getDegrees(grad)
>>> 45
"""
return np.arctan(grad)*(180/np.pi)
def straightLine(x, m, c):
""" A straight line model.
Parameters
----------
x : numpy list
X positions.
m : float
Gradient.
c : float
Y-intercept.
Returns
-------
Ys for a straight line.
Examples
--------
x, m, c = [1, 2], 0.25, 1
straightLine(x, m, c)
>>> [1.25, 1.5]
"""
return m*x + c
def getAngle_plot(rawx, rawy, solx, soly, det, **kwargs):
""" Returns the rotation of the NuSTAR FoV from the gradient of the edges between
det0&3 and 1&2 for whatever detector(s) you give it.
Parameters
----------
rawx, rawy : lists
Raw coordinates of the evt counts.
solx, soly : lists
Solar coordinates of the sunpos evt counts.
det : int
The detector for the counts (0--3).
**kwargs : Can pass an axis to it.
Returns
-------
A float of the rotation from "North" in degrees where anticlockwise is positive.
This assumes the rotation is between 90 and -90 degrees.
Examples
--------
fig, axs = plt.subplots(2,2, figsize=(14,10))
# get orientation from the nustar_swguide.pdf, Figure 3
gradient0 = getAngle_plot(rawx0, rawy0, solx0, soly0, 0, axes=axs[0][0])
gradient1 = getAngle_plot(rawx1, rawy1, solx1, soly1, 1, axes=axs[0][1])
gradient2 = getAngle_plot(rawx2, rawy2, solx2, soly2, 2, axes=axs[1][1])
gradient3 = getAngle_plot(rawx3, rawy3, solx3, soly3, 3, axes=axs[1][0])
plt.show()
"""
k = {"axes":plt}
for kw in kwargs:
k[kw] = kwargs[kw]
if det==0:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==1:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==2:
cols = collectSameXs(rawy, rawx, solx, soly)
m_row_per_col = maxRowInCol(cols)
elif det==3:
cols = collectSameXs(rawx, rawy, solx, soly)
m_row_per_col = maxRowInCol(cols)
# working with rawx and y to make sure using correct edge then find the
# corresponding entries in solar coords
aAndY = getXandY(m_row_per_col)
x, y = aAndY[0], aAndY[1]
xlim, ylim = [np.min(x)-5, np.max(x)+5], [np.min(y)-5, np.max(y)+5]
#if det in [0, 1]:
# x = x[y>np.median(y)]
# y = y[y>np.median(y)]
#elif det in [2, 3]:
# x = x[y<np.median(y)]
# y = y[y<np.median(y)]
popt, pcov = curve_fit(straightLine, x, y, p0=[0, np.mean(y)])
k["axes"].plot(x, y, '.')
k["axes"].plot(x, straightLine(x, *popt))
if k["axes"] != plt:
k["axes"].set_ylim(ylim)
k["axes"].set_xlim(xlim)
k["axes"].set_ylabel("Solar-Y")
k["axes"].set_xlabel("Solar-X")
else:
k["axes"].ylim(ylim)
k["axes"].xlim(xlim)
k["axes"].ylabel("Solar-Y")
k["axes"].xlabel("Solar-X")
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2+5, "Grad: "+str(popt[0]))
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2, "Angle: "+str(np.arctan(popt[0]))+" rad")
k["axes"].text(np.min(x), (ylim[0]+ylim[1])/2-5, "Angle: "+str(np.arctan(popt[0])*(180/np.pi))+" deg")
k["axes"].text(np.max(x)*0.99, ylim[0]*1.001, "DET: "+str(det), fontweight="bold")
return np.arctan(popt[0])*(180/np.pi)
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"pickle.dump",
"numpy.sum",
"numpy.argmax",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.subplot2grid",
"os.walk",
"numpy.isnan",
"numpy.argmin",
"numpy.shape",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"matplotlib.colors.LogNorm",
"numpy.histogram",
"pylab.cm.get_cmap",
"matplotlib.dates.date2num",
"scipy.interpolate.interp1d",
"os.path.isfile",
"numpy.mean",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"warnings.simplefilter",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.imshow",
"scipy.ndimage.gaussian_filter",
"os.path.exists",
"numpy.isfinite",
"matplotlib.pyplot.colorbar",
"numpy.insert",
"numpy.append",
"matplotlib.dates.DateFormatter",
"numpy.max",
"datetime.timedelta",
"numpy.log10",
"matplotlib.pyplot.xticks",
"astropy.units.Quantity",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"subprocess.check_output",
"numpy.min",
"astropy.io.fits.open",
"matplotlib.pyplot.ylabel",
"numpy.arctan",
"skimage.restoration.richardson_lucy",
"os.listdir",
"re.compile",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.annotate",
"numpy.seterr",
"numpy.zeros",
"pandas.plotting.register_matplotlib_converters",
"numpy.nonzero",
"numpy.array",
"matplotlib.dates.MinuteLocator",
"matplotlib.pyplot.xlabel",
"astropy.coordinates.SkyCoord",
"scipy.ndimage.rotate",
"matplotlib.pyplot.savefig"
] |
[((1121, 1153), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (1151, 1153), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1288, 1332), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1297, 1332), True, 'import numpy as np\n'), ((1389, 1435), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'VerifyWarning'], {}), "('ignore', VerifyWarning)\n", (1410, 1435), False, 'import warnings\n'), ((1440, 1487), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (1461, 1487), False, 'import warnings\n'), ((1493, 1537), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (1514, 1537), False, 'import warnings\n'), ((82811, 82830), 're.compile', 're.compile', (['""".\\\\w+"""'], {}), "('.\\\\w+')\n", (82821, 82830), False, 'import re\n'), ((83173, 83188), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (83182, 83188), False, 'from astropy.io import fits\n'), ((85199, 85217), 'astropy.io.fits.open', 'fits.open', (['evtFile'], {}), '(evtFile)\n', (85208, 85217), False, 'from astropy.io import fits\n'), ((86438, 86456), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (86449, 86456), True, 'import numpy as np\n'), ((86624, 86654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, 7)'}), '(figsize=(width, 7))\n', (86634, 86654), True, 'import matplotlib.pyplot as plt\n'), ((86706, 86766), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(0, 0)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, columns), (0, 0), colspan=1, rowspan=3)\n', (86722, 86766), True, 'import matplotlib.pyplot as plt\n'), ((86803, 86863), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(3, 0)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, columns), (3, 0), colspan=1, rowspan=1)\n', (86819, 86863), True, 'import matplotlib.pyplot as plt\n'), ((86894, 86912), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (86910, 86912), True, 'import matplotlib.pyplot as plt\n'), ((91350, 91368), 'astropy.io.fits.open', 'fits.open', (['evtFile'], {}), '(evtFile)\n', (91359, 91368), False, 'from astropy.io import fits\n'), ((93511, 93541), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, 7)'}), '(figsize=(width, 7))\n', (93521, 93541), True, 'import matplotlib.pyplot as plt\n'), ((93593, 93653), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(0, 0)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, columns), (0, 0), colspan=1, rowspan=3)\n', (93609, 93653), True, 'import matplotlib.pyplot as plt\n'), ((93690, 93750), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, columns)', '(3, 0)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, columns), (3, 0), colspan=1, rowspan=1)\n', (93706, 93750), True, 'import matplotlib.pyplot as plt\n'), ((93781, 93799), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (93797, 93799), True, 'import matplotlib.pyplot as plt\n'), ((1834, 1853), 're.compile', 're.compile', (['"""\\\\w+/"""'], {}), "('\\\\w+/')\n", (1844, 1853), False, 'import re\n'), ((2075, 2100), 're.compile', 're.compile', (['"""\\\\w+\\\\.\\\\w+"""'], {}), "('\\\\w+\\\\.\\\\w+')\n", (2085, 2100), False, 'import re\n'), ((2288, 2308), 're.compile', 're.compile', (['"""sunpos"""'], {}), "('sunpos')\n", (2298, 2308), False, 'import re\n'), ((2617, 2646), 're.compile', 're.compile', (['"""\\\\d{2}\\\\D\\\\d{2}"""'], {}), "('\\\\d{2}\\\\D\\\\d{2}')\n", (2627, 2646), False, 'import re\n'), ((2809, 2830), 're.compile', 're.compile', (['"""chu\\\\d+"""'], {}), "('chu\\\\d+')\n", (2819, 2830), False, 'import re\n'), ((3127, 3146), 're.compile', 're.compile', (['"""_\\\\D_"""'], {}), "('_\\\\D_')\n", (3137, 3146), False, 'import re\n'), ((3396, 3414), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (3406, 3414), False, 'import re\n'), ((4271, 4294), 'astropy.io.fits.open', 'fits.open', (['evt_filename'], {}), '(evt_filename)\n', (4280, 4294), False, 'from astropy.io import fits\n'), ((21662, 21738), 'skimage.restoration.richardson_lucy', 'restoration.richardson_lucy', (['map_array', 'psf_array'], {'iterations': 'it', 'clip': '(False)'}), '(map_array, psf_array, iterations=it, clip=False)\n', (21689, 21738), False, 'from skimage import restoration\n'), ((22178, 22193), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (22186, 22193), True, 'import numpy as np\n'), ((33855, 33907), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': 'self.rsn_map', 'frame_on': '(False)'}), '(projection=self.rsn_map, frame_on=False)\n', (33866, 33907), True, 'import matplotlib.pyplot as plt\n'), ((42138, 42157), 're.compile', 're.compile', (['"""\\\\w+/"""'], {}), "('\\\\w+/')\n", (42148, 42157), False, 'import re\n'), ((42429, 42443), 'os.listdir', 'os.listdir', (['sd'], {}), '(sd)\n', (42439, 42443), False, 'import os\n'), ((42933, 42958), 'os.walk', 'os.walk', (['search_directory'], {}), '(search_directory)\n', (42940, 42958), False, 'import os\n'), ((44523, 44548), 're.compile', 're.compile', (['"""\\\\w+\\\\.\\\\w+"""'], {}), "('\\\\w+\\\\.\\\\w+')\n", (44533, 44548), False, 'import re\n'), ((44654, 44672), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (44664, 44672), False, 'import re\n'), ((44769, 44788), 're.compile', 're.compile', (['"""[A-Z]"""'], {}), "('[A-Z]')\n", (44779, 44788), False, 'import re\n'), ((45213, 45235), 'astropy.io.fits.open', 'fits.open', (['hk_filename'], {}), '(hk_filename)\n', (45222, 45235), False, 'from astropy.io import fits\n'), ((61827, 61850), 'astropy.io.fits.open', 'fits.open', (['chu_filename'], {}), '(chu_filename)\n', (61836, 61850), False, 'from astropy.io import fits\n'), ((62040, 62055), 'numpy.array', 'np.array', (['data1'], {}), '(data1)\n', (62048, 62055), True, 'import numpy as np\n'), ((62074, 62089), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (62082, 62089), True, 'import numpy as np\n'), ((62108, 62123), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (62116, 62123), True, 'import numpy as np\n'), ((64164, 64191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (64174, 64191), True, 'import matplotlib.pyplot as plt\n'), ((64204, 64214), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (64212, 64214), True, 'import matplotlib.pyplot as plt\n'), ((64223, 64255), 'matplotlib.pyplot.plot', 'plt.plot', (['dt_times', 'chu_all', '"""x"""'], {}), "(dt_times, chu_all, 'x')\n", (64231, 64255), True, 'import matplotlib.pyplot as plt\n'), ((64442, 64467), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NuSTAR CHUs"""'], {}), "('NuSTAR CHUs')\n", (64452, 64467), True, 'import matplotlib.pyplot as plt\n'), ((64562, 64591), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (64582, 64591), True, 'import matplotlib.dates as mdates\n'), ((64737, 64760), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (64747, 64760), True, 'import matplotlib.pyplot as plt\n'), ((65708, 65774), 'numpy.arange', 'np.arange', (['(1.6)', "self.lc_3D_params['energy_high']", 'energy_increment'], {}), "(1.6, self.lc_3D_params['energy_high'], energy_increment)\n", (65717, 65774), True, 'import numpy as np\n'), ((65817, 65841), 'numpy.arange', 'np.arange', (['no_of_time', '(1)'], {}), '(no_of_time, 1)\n', (65826, 65841), True, 'import numpy as np\n'), ((66208, 66227), 'numpy.array', 'np.array', (['er_and_tc'], {}), '(er_and_tc)\n', (66216, 66227), True, 'import numpy as np\n'), ((66275, 66301), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (66285, 66301), True, 'import matplotlib.pyplot as plt\n'), ((66309, 66369), 'matplotlib.pyplot.imshow', 'plt.imshow', (['er_and_tc'], {'origin': '"""lower"""', 'aspect': 'aspect', 'vmax': '(1)'}), "(er_and_tc, origin='lower', aspect=aspect, vmax=1)\n", (66319, 66369), True, 'import matplotlib.pyplot as plt\n'), ((66378, 66455), 'matplotlib.pyplot.ylim', 'plt.ylim', (["[self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']]"], {}), "([self.lc_3D_params['energy_low'], self.lc_3D_params['energy_high']])\n", (66386, 66455), True, 'import matplotlib.pyplot as plt\n'), ((66464, 66482), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (66474, 66482), True, 'import matplotlib.pyplot as plt\n'), ((66491, 66511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy"""'], {}), "('Energy')\n", (66501, 66511), True, 'import matplotlib.pyplot as plt\n'), ((66520, 66530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (66528, 66530), True, 'import matplotlib.pyplot as plt\n'), ((67048, 67060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (67058, 67060), True, 'import matplotlib.pyplot as plt\n'), ((67074, 67084), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (67082, 67084), True, 'import matplotlib.pyplot as plt\n'), ((67928, 67940), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (67938, 67940), True, 'import matplotlib.pyplot as plt\n'), ((67956, 67985), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (67976, 67985), True, 'import matplotlib.dates as mdates\n'), ((68086, 68109), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (68096, 68109), True, 'import matplotlib.pyplot as plt\n'), ((68119, 68182), 'matplotlib.pyplot.title', 'plt.title', (["('Detector Contribution ' + self.e_range_str + ' keV')"], {}), "('Detector Contribution ' + self.e_range_str + ' keV')\n", (68128, 68182), True, 'import matplotlib.pyplot as plt\n'), ((68187, 68221), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts from detector"""'], {}), "('Counts from detector')\n", (68197, 68221), True, 'import matplotlib.pyplot as plt\n'), ((68230, 68248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (68240, 68248), True, 'import matplotlib.pyplot as plt\n'), ((72969, 72989), 'os.mkdir', 'os.mkdir', (['evt_folder'], {}), '(evt_folder)\n', (72977, 72989), False, 'import os\n'), ((73619, 73638), 'os.mkdir', 'os.mkdir', (['hk_folder'], {}), '(hk_folder)\n', (73627, 73638), False, 'import os\n'), ((74149, 74169), 'os.mkdir', 'os.mkdir', (['lvt_folder'], {}), '(lvt_folder)\n', (74157, 74169), False, 'import os\n'), ((74654, 74674), 'os.mkdir', 'os.mkdir', (['map_folder'], {}), '(map_folder)\n', (74662, 74674), False, 'import os\n'), ((75314, 75333), 'os.mkdir', 'os.mkdir', (['lc_folder'], {}), '(lc_folder)\n', (75322, 75333), False, 'import os\n'), ((79196, 79270), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (["temp_response_dataxy['x']", "temp_response_dataxy['y']"], {}), "(temp_response_dataxy['x'], temp_response_dataxy['y'])\n", (79216, 79270), False, 'from scipy import interpolate\n'), ((87819, 87873), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(0, 1)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, 2), (0, 1), colspan=1, rowspan=3)\n', (87835, 87873), True, 'import matplotlib.pyplot as plt\n'), ((87918, 87972), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(3, 1)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, 2), (3, 1), colspan=1, rowspan=1)\n', (87934, 87972), True, 'import matplotlib.pyplot as plt\n'), ((88011, 88029), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (88027, 88029), True, 'import matplotlib.pyplot as plt\n'), ((88875, 88916), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFig'], {'bbox_inches': '"""tight"""'}), "(saveFig, bbox_inches='tight')\n", (88886, 88916), True, 'import matplotlib.pyplot as plt\n'), ((94906, 94960), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(0, 1)'], {'colspan': '(1)', 'rowspan': '(3)'}), '((4, 2), (0, 1), colspan=1, rowspan=3)\n', (94922, 94960), True, 'import matplotlib.pyplot as plt\n'), ((95005, 95059), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 2)', '(3, 1)'], {'colspan': '(1)', 'rowspan': '(1)'}), '((4, 2), (3, 1), colspan=1, rowspan=1)\n', (95021, 95059), True, 'import matplotlib.pyplot as plt\n'), ((95098, 95116), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (95114, 95116), True, 'import matplotlib.pyplot as plt\n'), ((96110, 96151), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFig'], {'bbox_inches': '"""tight"""'}), "(saveFig, bbox_inches='tight')\n", (96121, 96151), True, 'import matplotlib.pyplot as plt\n'), ((100936, 100951), 'numpy.arctan', 'np.arctan', (['grad'], {}), '(grad)\n', (100945, 100951), True, 'import numpy as np\n'), ((103979, 103988), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (103985, 103988), True, 'import numpy as np\n'), ((104055, 104064), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (104061, 104064), True, 'import numpy as np\n'), ((104148, 104157), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (104154, 104157), True, 'import numpy as np\n'), ((104339, 104357), 'numpy.arctan', 'np.arctan', (['popt[0]'], {}), '(popt[0])\n', (104348, 104357), True, 'import numpy as np\n'), ((7321, 7350), 'numpy.max', 'np.max', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (7327, 7350), True, 'import numpy as np\n'), ((7351, 7380), 'numpy.min', 'np.min', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (7357, 7380), True, 'import numpy as np\n'), ((7447, 7513), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'byminute': '[0, 10, 20, 30, 40, 50]', 'interval': '(1)'}), '(byminute=[0, 10, 20, 30, 40, 50], interval=1)\n', (7467, 7513), True, 'import matplotlib.dates as mdates\n'), ((21566, 21614), 'scipy.ndimage.rotate', 'rotate', (['psf_array', 'hor2SourceAngle'], {'reshape': '(True)'}), '(psf_array, hor2SourceAngle, reshape=True)\n', (21572, 21614), False, 'from scipy.ndimage import rotate\n'), ((22208, 22223), 'numpy.isnan', 'np.isnan', (['array'], {}), '(array)\n', (22216, 22223), True, 'import numpy as np\n'), ((22339, 22350), 'numpy.sum', 'np.sum', (['row'], {}), '(row)\n', (22345, 22350), True, 'import numpy as np\n'), ((22584, 22595), 'numpy.sum', 'np.sum', (['col'], {}), '(col)\n', (22590, 22595), True, 'import numpy as np\n'), ((23025, 23112), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(-1200 * u.arcsec)', '(-1200 * u.arcsec)'], {'frame': 'sunpy_map_obj.coordinate_frame'}), '(-1200 * u.arcsec, -1200 * u.arcsec, frame=sunpy_map_obj.\n coordinate_frame)\n', (23033, 23112), False, 'from astropy.coordinates import SkyCoord\n'), ((23121, 23206), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(1200 * u.arcsec)', '(1200 * u.arcsec)'], {'frame': 'sunpy_map_obj.coordinate_frame'}), '(1200 * u.arcsec, 1200 * u.arcsec, frame=sunpy_map_obj.coordinate_frame\n )\n', (23129, 23206), False, 'from astropy.coordinates import SkyCoord\n'), ((26841, 26868), 'numpy.average', 'np.average', (['ltimes_in_range'], {}), '(ltimes_in_range)\n', (26851, 26868), True, 'import numpy as np\n'), ((28147, 28216), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['self.nustar_map.data', 'gaussian_width'], {'mode': 'm'}), '(self.nustar_map.data, gaussian_width, mode=m)\n', (28170, 28216), False, 'from scipy import ndimage\n'), ((30288, 30305), 'numpy.shape', 'np.shape', (['nm.data'], {}), '(nm.data)\n', (30296, 30305), True, 'import numpy as np\n'), ((31308, 31346), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'dmin', 'vmax': 'dmax'}), '(vmin=dmin, vmax=dmax)\n', (31324, 31346), True, 'import matplotlib.colors as colors\n'), ((31390, 31415), 'pylab.cm.get_cmap', 'cm.get_cmap', (['"""Spectral_r"""'], {}), "('Spectral_r')\n", (31401, 31415), False, 'from pylab import figure, cm\n'), ((34323, 34507), 'matplotlib.pyplot.annotate', 'plt.annotate', (["self.annotations['text']", "self.annotations['position']"], {'color': "self.annotations['color']", 'fontsize': "self.annotations['fontsize']", 'weight': "self.annotations['weight']"}), "(self.annotations['text'], self.annotations['position'], color=\n self.annotations['color'], fontsize=self.annotations['fontsize'],\n weight=self.annotations['weight'])\n", (34335, 34507), True, 'import matplotlib.pyplot as plt\n'), ((35680, 35755), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.035)', 'pad': '(0.03)', 'label': "(self.cbar_title + ' $s^{-1}$')"}), "(fraction=0.035, pad=0.03, label=self.cbar_title + ' $s^{-1}$')\n", (35692, 35755), True, 'import matplotlib.pyplot as plt\n'), ((35779, 35840), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.035)', 'pad': '(0.03)', 'label': 'self.cbar_title'}), '(fraction=0.035, pad=0.03, label=self.cbar_title)\n', (35791, 35840), True, 'import matplotlib.pyplot as plt\n'), ((37030, 37081), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_fig'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(save_fig, dpi=300, bbox_inches='tight')\n", (37041, 37081), True, 'import matplotlib.pyplot as plt\n'), ((37123, 37138), 'matplotlib.pyplot.show', 'plt.show', (['"""all"""'], {}), "('all')\n", (37131, 37138), True, 'import matplotlib.pyplot as plt\n'), ((39819, 39850), 'numpy.insert', 'np.insert', (['new_x', '(0)', '[new_x[0]]'], {}), '(new_x, 0, [new_x[0]])\n', (39828, 39850), True, 'import numpy as np\n'), ((39871, 39900), 'numpy.append', 'np.append', (['new_x', '[new_x[-1]]'], {}), '(new_x, [new_x[-1]])\n', (39880, 39900), True, 'import numpy as np\n'), ((39920, 39944), 'numpy.insert', 'np.insert', (['new_y', '(0)', '[0]'], {}), '(new_y, 0, [0])\n', (39929, 39944), True, 'import numpy as np\n'), ((39965, 39986), 'numpy.append', 'np.append', (['new_y', '[0]'], {}), '(new_y, [0])\n', (39974, 39986), True, 'import numpy as np\n'), ((40264, 40282), 'matplotlib.dates.date2num', 'mdates.date2num', (['d'], {}), '(d)\n', (40279, 40282), True, 'import matplotlib.dates as mdates\n'), ((46233, 46245), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (46243, 46245), True, 'import matplotlib.pyplot as plt\n'), ((46263, 46273), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (46271, 46273), True, 'import matplotlib.pyplot as plt\n'), ((46378, 46425), 'matplotlib.pyplot.title', 'plt.title', (["('Livetime - ' + lt_start_hhmmss[:10])"], {}), "('Livetime - ' + lt_start_hhmmss[:10])\n", (46387, 46425), True, 'import matplotlib.pyplot as plt\n'), ((46463, 46513), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + lt_start_hhmmss[12:])"], {}), "('Start Time - ' + lt_start_hhmmss[12:])\n", (46473, 46513), True, 'import matplotlib.pyplot as plt\n'), ((46524, 46555), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Livetime Fraction"""'], {}), "('Livetime Fraction')\n", (46534, 46555), True, 'import matplotlib.pyplot as plt\n'), ((46681, 46697), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (46689, 46697), True, 'import matplotlib.pyplot as plt\n'), ((46715, 46744), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (46735, 46744), True, 'import matplotlib.dates as mdates\n'), ((46893, 46916), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (46903, 46916), True, 'import matplotlib.pyplot as plt\n'), ((47887, 47911), 'numpy.min', 'np.min', (["cleanevt['TIME']"], {}), "(cleanevt['TIME'])\n", (47893, 47911), True, 'import numpy as np\n'), ((48183, 48207), 'numpy.max', 'np.max', (["cleanevt['TIME']"], {}), "(cleanevt['TIME'])\n", (48189, 48207), True, 'import numpy as np\n'), ((51057, 51083), 'numpy.zeros', 'np.zeros', (['(t_bin_number + 1)'], {}), '(t_bin_number + 1)\n', (51065, 51083), True, 'import numpy as np\n'), ((61049, 61059), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61057, 61059), True, 'import matplotlib.pyplot as plt\n'), ((64802, 64812), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (64810, 64812), True, 'import matplotlib.pyplot as plt\n'), ((66242, 66259), 'numpy.max', 'np.max', (['er_and_tc'], {}), '(er_and_tc)\n', (66248, 66259), True, 'import numpy as np\n'), ((68283, 68293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (68291, 68293), True, 'import matplotlib.pyplot as plt\n'), ((69137, 69155), 'astropy.io.fits.open', 'fits.open', (['chuFile'], {}), '(chuFile)\n', (69146, 69155), False, 'from astropy.io import fits\n'), ((71609, 71644), 'os.path.exists', 'os.path.exists', (["(nustar_folder + '/')"], {}), "(nustar_folder + '/')\n", (71623, 71644), False, 'import os\n'), ((71706, 71729), 'os.mkdir', 'os.mkdir', (['nustar_folder'], {}), '(nustar_folder)\n', (71714, 71729), False, 'import os\n'), ((73441, 73515), 'pickle.dump', 'pickle.dump', (['evt_to_store', 'evt_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(evt_to_store, evt_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (73452, 73515), False, 'import pickle\n'), ((73977, 74049), 'pickle.dump', 'pickle.dump', (['hk_to_store', 'hk_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(hk_to_store, hk_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (73988, 74049), False, 'import pickle\n'), ((74490, 74564), 'pickle.dump', 'pickle.dump', (['lvt_to_store', 'lvt_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(lvt_to_store, lvt_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (74501, 74564), False, 'import pickle\n'), ((75135, 75209), 'pickle.dump', 'pickle.dump', (['map_to_store', 'map_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(map_to_store, map_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (75146, 75209), False, 'import pickle\n'), ((75694, 75766), 'pickle.dump', 'pickle.dump', (['lc_to_store', 'lc_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(lc_to_store, lc_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (75705, 75766), False, 'import pickle\n'), ((76216, 76289), 'pickle.dump', 'pickle.dump', (['self.__dict__', 'object_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.__dict__, object_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (76227, 76289), False, 'import pickle\n'), ((77006, 77029), 'pickle.load', 'pickle.load', (['input_file'], {}), '(input_file)\n', (77017, 77029), False, 'import pickle\n'), ((79045, 79080), 'numpy.log10', 'np.log10', (["temp_response_dataxy['x']"], {}), "(temp_response_dataxy['x'])\n", (79053, 79080), True, 'import numpy as np\n'), ((79082, 79117), 'numpy.log10', 'np.log10', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (79090, 79117), True, 'import numpy as np\n'), ((79903, 79939), 'numpy.argmax', 'np.argmax', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (79912, 79939), True, 'import numpy as np\n'), ((80810, 80830), 'numpy.array', 'np.array', (['e_response'], {}), '(e_response)\n', (80818, 80830), True, 'import numpy as np\n'), ((80831, 80854), 'numpy.isfinite', 'np.isfinite', (['e_response'], {}), '(e_response)\n', (80842, 80854), True, 'import numpy as np\n'), ((80877, 80897), 'numpy.array', 'np.array', (['flux_range'], {}), '(flux_range)\n', (80885, 80897), True, 'import numpy as np\n'), ((80898, 80921), 'numpy.isfinite', 'np.isfinite', (['flux_range'], {}), '(flux_range)\n', (80909, 80921), True, 'import numpy as np\n'), ((86003, 86027), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (86012, 86027), True, 'import numpy as np\n'), ((86193, 86217), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (86202, 86217), True, 'import numpy as np\n'), ((86354, 86375), 'numpy.max', 'np.max', (['hist_gradeAll'], {}), '(hist_gradeAll)\n', (86360, 86375), True, 'import numpy as np\n'), ((92222, 92246), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (92231, 92246), True, 'import numpy as np\n'), ((93390, 93409), 'numpy.max', 'np.max', (['hist_grade0'], {}), '(hist_grade0)\n', (93396, 93409), True, 'import numpy as np\n'), ((93441, 93459), 'numpy.min', 'np.min', (['min_ratios'], {}), '(min_ratios)\n', (93447, 93459), True, 'import numpy as np\n'), ((93466, 93484), 'numpy.max', 'np.max', (['max_ratios'], {}), '(max_ratios)\n', (93472, 93484), True, 'import numpy as np\n'), ((104255, 104264), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (104261, 104264), True, 'import numpy as np\n'), ((7591, 7685), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'byminute': '[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]', 'interval': '(1)'}), '(byminute=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55\n ], interval=1)\n', (7611, 7685), True, 'import matplotlib.dates as mdates\n'), ((19071, 19091), 'numpy.arange', 'np.arange', (['(0)', '(9)', '(0.5)'], {}), '(0, 9, 0.5)\n', (19080, 19091), True, 'import numpy as np\n'), ((19697, 19714), 'os.path.exists', 'os.path.exists', (['t'], {}), '(t)\n', (19711, 19714), False, 'import os\n'), ((20859, 20879), 'astropy.io.fits.open', 'fits.open', (['psf_array'], {}), '(psf_array)\n', (20868, 20879), False, 'from astropy.io import fits\n'), ((23446, 23555), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(bottom_left['x'] * u.arcsec)", "(bottom_left['y'] * u.arcsec)"], {'frame': 'sunpy_map_obj.coordinate_frame'}), "(bottom_left['x'] * u.arcsec, bottom_left['y'] * u.arcsec, frame=\n sunpy_map_obj.coordinate_frame)\n", (23454, 23555), False, 'from astropy.coordinates import SkyCoord\n'), ((23564, 23669), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(top_right['x'] * u.arcsec)", "(top_right['y'] * u.arcsec)"], {'frame': 'sunpy_map_obj.coordinate_frame'}), "(top_right['x'] * u.arcsec, top_right['y'] * u.arcsec, frame=\n sunpy_map_obj.coordinate_frame)\n", (23572, 23669), False, 'from astropy.coordinates import SkyCoord\n'), ((29263, 29282), 'numpy.max', 'np.max', (['finite_vals'], {}), '(finite_vals)\n', (29269, 29282), True, 'import numpy as np\n'), ((30397, 30456), 'astropy.units.Quantity', 'u.Quantity', (['[nx * rebin_factor, ny * rebin_factor]', 'u.pixel'], {}), '([nx * rebin_factor, ny * rebin_factor], u.pixel)\n', (30407, 30456), True, 'import astropy.units as u\n'), ((31624, 31660), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': 'dmin', 'vmax': 'dmax'}), '(vmin=dmin, vmax=dmax)\n', (31638, 31660), True, 'import matplotlib.colors as colors\n'), ((31705, 31730), 'pylab.cm.get_cmap', 'cm.get_cmap', (['"""Spectral_r"""'], {}), "('Spectral_r')\n", (31716, 31730), False, 'from pylab import figure, cm\n'), ((35898, 35913), 'numpy.shape', 'np.shape', (['boxes'], {}), '(boxes)\n', (35906, 35913), True, 'import numpy as np\n'), ((35990, 36080), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(rect[0] * u.arcsec)', '(rect[1] * u.arcsec)'], {'frame': 'self.rsn_map.coordinate_frame'}), '(rect[0] * u.arcsec, rect[1] * u.arcsec, frame=self.rsn_map.\n coordinate_frame)\n', (35998, 36080), False, 'from astropy.coordinates import SkyCoord\n'), ((42542, 42566), 'os.path.join', 'os.path.join', (['sd', 'in_dir'], {}), '(sd, in_dir)\n', (42554, 42566), False, 'import os\n'), ((45869, 45889), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (45878, 45889), False, 'from datetime import timedelta\n'), ((46967, 46977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (46975, 46977), True, 'import matplotlib.pyplot as plt\n'), ((49529, 49577), 'numpy.histogram', 'np.histogram', (["cleanevt['TIME']", 't_bin_conversion'], {}), "(cleanevt['TIME'], t_bin_conversion)\n", (49541, 49577), True, 'import numpy as np\n'), ((51662, 51710), 'numpy.histogram', 'np.histogram', (["cleanevt['TIME']", 'self.t_bin_edges'], {}), "(cleanevt['TIME'], self.t_bin_edges)\n", (51674, 51710), True, 'import numpy as np\n'), ((59007, 59019), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (59017, 59019), True, 'import matplotlib.pyplot as plt\n'), ((59041, 59051), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (59049, 59051), True, 'import matplotlib.pyplot as plt\n'), ((59263, 59367), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd)\n", (59272, 59367), True, 'import matplotlib.pyplot as plt\n'), ((59459, 59501), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (59469, 59501), True, 'import matplotlib.pyplot as plt\n'), ((59627, 59656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts $s^{-1}$"""'], {}), "('Counts $s^{-1}$')\n", (59637, 59656), True, 'import matplotlib.pyplot as plt\n'), ((59696, 59725), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (59716, 59725), True, 'import matplotlib.dates as mdates\n'), ((59850, 59873), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (59860, 59873), True, 'import matplotlib.pyplot as plt\n'), ((60056, 60068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (60066, 60068), True, 'import matplotlib.pyplot as plt\n'), ((60090, 60100), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (60098, 60100), True, 'import matplotlib.pyplot as plt\n'), ((60292, 60396), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd)\n", (60301, 60396), True, 'import matplotlib.pyplot as plt\n'), ((60504, 60546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (60514, 60546), True, 'import matplotlib.pyplot as plt\n'), ((60666, 60686), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (60676, 60686), True, 'import matplotlib.pyplot as plt\n'), ((60726, 60755), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (60746, 60755), True, 'import matplotlib.dates as mdates\n'), ((60880, 60903), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (60890, 60903), True, 'import matplotlib.pyplot as plt\n'), ((64056, 64085), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (64074, 64085), False, 'import datetime\n'), ((66097, 66147), 'numpy.histogram', 'np.histogram', (["specific_lc_data['TIME']", 'no_of_time'], {}), "(specific_lc_data['TIME'], no_of_time)\n", (66109, 66147), True, 'import numpy as np\n'), ((69011, 69026), 'os.path.isfile', 'isfile', (['chuFile'], {}), '(chuFile)\n', (69017, 69026), False, 'from os.path import isfile\n'), ((69073, 69088), 'os.path.isfile', 'isfile', (['chuFile'], {}), '(chuFile)\n', (69079, 69088), False, 'from os.path import isfile\n'), ((71925, 71960), 'os.path.exists', 'os.path.exists', (["(nustar_folder + '/')"], {}), "(nustar_folder + '/')\n", (71939, 71960), False, 'import os\n'), ((72046, 72098), 'subprocess.check_output', 'subprocess.check_output', (["['rm', '-r', nustar_folder]"], {}), "(['rm', '-r', nustar_folder])\n", (72069, 72098), False, 'import subprocess\n'), ((72139, 72162), 'os.mkdir', 'os.mkdir', (['nustar_folder'], {}), '(nustar_folder)\n', (72147, 72162), False, 'import os\n'), ((75978, 76046), 'pickle.dump', 'pickle.dump', (['kwargs', 'own_save_file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(kwargs, own_save_file, protocol=pickle.HIGHEST_PROTOCOL)\n', (75989, 76046), False, 'import pickle\n'), ((80196, 80229), 'numpy.max', 'np.max', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (80202, 80229), True, 'import numpy as np\n'), ((81194, 81207), 'numpy.max', 'np.max', (['f_err'], {}), '(f_err)\n', (81200, 81207), True, 'import numpy as np\n'), ((86488, 86514), 'numpy.max', 'np.max', (['ratio[fintie_vals]'], {}), '(ratio[fintie_vals])\n', (86494, 86514), True, 'import numpy as np\n'), ((98860, 98883), 'numpy.argmin', 'np.argmin', (['columns[key]'], {}), '(columns[key])\n', (98869, 98883), True, 'import numpy as np\n'), ((99959, 99982), 'numpy.argmax', 'np.argmax', (['columns[key]'], {}), '(columns[key])\n', (99968, 99982), True, 'import numpy as np\n'), ((103265, 103274), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (103271, 103274), True, 'import numpy as np\n'), ((103278, 103287), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (103284, 103287), True, 'import numpy as np\n'), ((103293, 103302), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (103299, 103302), True, 'import numpy as np\n'), ((103306, 103315), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (103312, 103315), True, 'import numpy as np\n'), ((103550, 103560), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (103557, 103560), True, 'import numpy as np\n'), ((7753, 7785), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(2)'}), '(interval=2)\n', (7773, 7785), True, 'import matplotlib.dates as mdates\n'), ((7831, 7863), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (7851, 7863), True, 'import matplotlib.dates as mdates\n'), ((19745, 19757), 'astropy.io.fits.open', 'fits.open', (['t'], {}), '(t)\n', (19754, 19757), False, 'from astropy.io import fits\n'), ((29155, 29170), 'numpy.isfinite', 'np.isfinite', (['dd'], {}), '(dd)\n', (29166, 29170), True, 'import numpy as np\n'), ((36406, 36496), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(rect[0] * u.arcsec)', '(rect[1] * u.arcsec)'], {'frame': 'self.rsn_map.coordinate_frame'}), '(rect[0] * u.arcsec, rect[1] * u.arcsec, frame=self.rsn_map.\n coordinate_frame)\n', (36414, 36496), False, 'from astropy.coordinates import SkyCoord\n'), ((43101, 43130), 'os.path.join', 'os.path.join', (['_dirpath', '_file'], {}), '(_dirpath, _file)\n', (43113, 43130), False, 'import os\n'), ((58806, 58833), 'numpy.average', 'np.average', (['ltimes_in_range'], {}), '(ltimes_in_range)\n', (58816, 58833), True, 'import numpy as np\n'), ((67701, 67721), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (67710, 67721), False, 'from datetime import timedelta\n'), ((72370, 72405), 'os.path.exists', 'os.path.exists', (["(nustar_folder + '/')"], {}), "(nustar_folder + '/')\n", (72384, 72405), False, 'import os\n'), ((72620, 72643), 'os.mkdir', 'os.mkdir', (['nustar_folder'], {}), '(nustar_folder)\n', (72628, 72643), False, 'import os\n'), ((79150, 79171), 'numpy.log10', 'np.log10', (['plasma_temp'], {}), '(plasma_temp)\n', (79158, 79171), True, 'import numpy as np\n'), ((80320, 80353), 'numpy.max', 'np.max', (["temp_response_dataxy['y']"], {}), "(temp_response_dataxy['y'])\n", (80326, 80353), True, 'import numpy as np\n'), ((81030, 81048), 'numpy.max', 'np.max', (['flux_range'], {}), '(flux_range)\n', (81036, 81048), True, 'import numpy as np\n'), ((81088, 81106), 'numpy.min', 'np.min', (['flux_range'], {}), '(flux_range)\n', (81094, 81106), True, 'import numpy as np\n'), ((92563, 92587), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (92572, 92587), True, 'import numpy as np\n'), ((92821, 92845), 'numpy.arange', 'np.arange', (['(1.6)', '(79)', '(0.04)'], {}), '(1.6, 79, 0.04)\n', (92830, 92845), True, 'import numpy as np\n'), ((92956, 92974), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (92967, 92974), True, 'import numpy as np\n'), ((93085, 93103), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (93096, 93103), True, 'import numpy as np\n'), ((104101, 104119), 'numpy.arctan', 'np.arctan', (['popt[0]'], {}), '(popt[0])\n', (104110, 104119), True, 'import numpy as np\n'), ((15131, 15141), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (15138, 15141), True, 'import numpy as np\n'), ((28279, 28311), 'numpy.nonzero', 'np.nonzero', (['self.nustar_map.data'], {}), '(self.nustar_map.data)\n', (28289, 28311), True, 'import numpy as np\n'), ((28510, 28543), 'numpy.isfinite', 'np.isfinite', (['self.nustar_map.data'], {}), '(self.nustar_map.data)\n', (28521, 28543), True, 'import numpy as np\n'), ((29214, 29237), 'numpy.nonzero', 'np.nonzero', (['finite_vals'], {}), '(finite_vals)\n', (29224, 29237), True, 'import numpy as np\n'), ((51923, 51940), 'numpy.shape', 'np.shape', (['sub_reg'], {}), '(sub_reg)\n', (51931, 51940), True, 'import numpy as np\n'), ((53006, 53022), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (53014, 53022), True, 'import numpy as np\n'), ((59111, 59131), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (59120, 59131), False, 'from datetime import timedelta\n'), ((60143, 60163), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (60152, 60163), False, 'from datetime import timedelta\n'), ((93012, 93030), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (93023, 93030), True, 'import numpy as np\n'), ((93141, 93159), 'numpy.isfinite', 'np.isfinite', (['ratio'], {}), '(ratio)\n', (93152, 93159), True, 'import numpy as np\n'), ((104196, 104214), 'numpy.arctan', 'np.arctan', (['popt[0]'], {}), '(popt[0])\n', (104205, 104214), True, 'import numpy as np\n'), ((81330, 81348), 'numpy.max', 'np.max', (['e_response'], {}), '(e_response)\n', (81336, 81348), True, 'import numpy as np\n'), ((81407, 81425), 'numpy.min', 'np.min', (['e_response'], {}), '(e_response)\n', (81413, 81425), True, 'import numpy as np\n'), ((85565, 85589), 'numpy.min', 'np.min', (["evt_data['time']"], {}), "(evt_data['time'])\n", (85571, 85589), True, 'import numpy as np\n'), ((85672, 85696), 'numpy.max', 'np.max', (["evt_data['time']"], {}), "(evt_data['time'])\n", (85678, 85696), True, 'import numpy as np\n'), ((91716, 91740), 'numpy.min', 'np.min', (["evt_data['time']"], {}), "(evt_data['time'])\n", (91722, 91740), True, 'import numpy as np\n'), ((91823, 91847), 'numpy.max', 'np.max', (["evt_data['time']"], {}), "(evt_data['time'])\n", (91829, 91847), True, 'import numpy as np\n'), ((53045, 53062), 'numpy.shape', 'np.shape', (['sub_reg'], {}), '(sub_reg)\n', (53053, 53062), True, 'import numpy as np\n'), ((54070, 54086), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (54078, 54086), True, 'import numpy as np\n'), ((57651, 57661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57659, 57661), True, 'import matplotlib.pyplot as plt\n'), ((5941, 5970), 'numpy.min', 'np.min', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (5947, 5970), True, 'import numpy as np\n'), ((6067, 6096), 'numpy.max', 'np.max', (["self.cleanevt['TIME']"], {}), "(self.cleanevt['TIME'])\n", (6073, 6096), True, 'import numpy as np\n'), ((46159, 46180), 'numpy.min', 'np.min', (['self.hk_times'], {}), '(self.hk_times)\n', (46165, 46180), True, 'import numpy as np\n'), ((51370, 51383), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (51376, 51383), True, 'import numpy as np\n'), ((51473, 51486), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (51479, 51486), True, 'import numpy as np\n'), ((55011, 55023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (55021, 55023), True, 'import matplotlib.pyplot as plt\n'), ((55061, 55071), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (55069, 55071), True, 'import matplotlib.pyplot as plt\n'), ((55314, 55424), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd + box)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd + box)\n", (55323, 55424), True, 'import matplotlib.pyplot as plt\n'), ((55548, 55590), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (55558, 55590), True, 'import matplotlib.pyplot as plt\n'), ((55764, 55793), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts $s^{-1}$"""'], {}), "('Counts $s^{-1}$')\n", (55774, 55793), True, 'import matplotlib.pyplot as plt\n'), ((55833, 55862), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (55853, 55862), True, 'import matplotlib.dates as mdates\n'), ((56035, 56058), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (56045, 56058), True, 'import matplotlib.pyplot as plt\n'), ((56244, 56256), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (56254, 56256), True, 'import matplotlib.pyplot as plt\n'), ((56294, 56304), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (56302, 56304), True, 'import matplotlib.pyplot as plt\n'), ((56536, 56646), 'matplotlib.pyplot.title', 'plt.title', (["('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str + ' keV Light Curve - ' +\n start_yyyymmdd + box)"], {}), "('NuSTAR FPM' + self.fpm + ' ' + self.e_range_str +\n ' keV Light Curve - ' + start_yyyymmdd + box)\n", (56545, 56646), True, 'import matplotlib.pyplot as plt\n'), ((56770, 56812), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Start Time - ' + start_hhmmss)"], {}), "('Start Time - ' + start_hhmmss)\n", (56780, 56812), True, 'import matplotlib.pyplot as plt\n'), ((56964, 56984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (56974, 56984), True, 'import matplotlib.pyplot as plt\n'), ((57024, 57053), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (57044, 57053), True, 'import matplotlib.dates as mdates\n'), ((57226, 57249), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (57236, 57249), True, 'import matplotlib.pyplot as plt\n'), ((59571, 59601), 'numpy.isfinite', 'np.isfinite', (['counts_per_second'], {}), '(counts_per_second)\n', (59582, 59601), True, 'import numpy as np\n'), ((60613, 60640), 'numpy.isfinite', 'np.isfinite', (['self.lc_counts'], {}), '(self.lc_counts)\n', (60624, 60640), True, 'import numpy as np\n'), ((49825, 49838), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (49831, 49838), True, 'import numpy as np\n'), ((49932, 49945), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (49938, 49945), True, 'import numpy as np\n'), ((52609, 52647), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t]'}), '(seconds=self.t_bin_edges[t])\n', (52618, 52647), False, 'from datetime import timedelta\n'), ((52723, 52765), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t + 1]'}), '(seconds=self.t_bin_edges[t + 1])\n', (52732, 52765), False, 'from datetime import timedelta\n'), ((54712, 54739), 'numpy.average', 'np.average', (['ltimes_in_range'], {}), '(ltimes_in_range)\n', (54722, 54739), True, 'import numpy as np\n'), ((54887, 54903), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (54895, 54903), True, 'import numpy as np\n'), ((72506, 72526), 'os.listdir', 'os.listdir', (['save_dir'], {}), '(save_dir)\n', (72516, 72526), False, 'import os\n'), ((55130, 55150), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (55139, 55150), False, 'from datetime import timedelta\n'), ((56363, 56383), 'datetime.timedelta', 'timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (56372, 56383), False, 'from datetime import timedelta\n'), ((53595, 53633), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t]'}), '(seconds=self.t_bin_edges[t])\n', (53604, 53633), False, 'from datetime import timedelta\n'), ((53713, 53755), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.t_bin_edges[t + 1]'}), '(seconds=self.t_bin_edges[t + 1])\n', (53722, 53755), False, 'from datetime import timedelta\n'), ((55692, 55722), 'numpy.isfinite', 'np.isfinite', (['counts_per_second'], {}), '(counts_per_second)\n', (55703, 55722), True, 'import numpy as np\n'), ((56903, 56922), 'numpy.isfinite', 'np.isfinite', (['counts'], {}), '(counts)\n', (56914, 56922), True, 'import numpy as np\n')]
|
# Generator functions to generate batches of data.
import numpy as np
import os
import time
import h5py
import matplotlib.pyplot as plt
import collections
from synth.config import config
from synth.utils import utils
def data_gen_SDN(mode = 'Train', sec_mode = 0):
with h5py.File(config.stat_file, mode='r') as stat_file:
max_feat = stat_file["feats_maximus"][()] + 0.001
min_feat = stat_file["feats_minimus"][()] - 0.001
voc_list = [x for x in os.listdir(config.feats_dir) if x.endswith('.hdf5') and x.split('_')[0].upper() in [x for x in config.datasets if x != "DAMP"]]
if "DAMP" in config.datasets:
damp_list = [x for x in os.listdir(config.feats_dir) if x.endswith('.hdf5') and x.split('_')[1] in config.damp_singers]
voc_list = voc_list+damp_list
# if config.SDN_mix:
# back_list = [x for x in os.listdir(config.backing_dir) if x.endswith('.hdf5')]
# voc_list = [x for x in voc_list if x not in ['csd_alto1_NinoDios_14.hdf5', 'jvs_jvs023_raw_song_unique_11.hdf5', 'jvs_jvs024_raw_song_unique_2.hdf5', 'csd_soprano3_NinoDios_18.hdf5', 'csd_tenor1_ElRossinyol_13.hdf5', 'csd_soprano3_NinoDios_5.hdf5', 'csd_tenor3_NinoDios_8.hdf5', 'csd_tenor2_NinoDios_13.hdf5', 'jvs_jvs047_raw_song_unique_4.hdf5', 'jvs_jvs098_raw_song_unique_1.hdf5', 'jvs_jvs023_raw_song_unique_9.hdf5', 'jvs_jvs023_raw_song_unique_14.hdf5', 'csd_soprano2_NinoDios_13.hdf5', 'csd_tenor4_LocusIste_12.hdf5', 'csd_bass4_NinoDios_5.hdf5', 'jvs_jvs014_raw_song_unique_15.hdf5', 'csd_soprano2_NinoDios_2.hdf5', 'csd_bass4_NinoDios_12.hdf5', 'jvs_jvs041_raw_song_unique_14.hdf5', 'csd_alto3_LocusIste_25.hdf5', 'jvs_jvs023_raw_song_unique_16.hdf5', 'jvs_jvs092_raw_song_unique_12.hdf5', 'jvs_jvs074_raw_song_unique_6.hdf5', 'jvs_jvs017_raw_song_unique_2.hdf5']]
train_list = [x for x in voc_list if not x.split('_')[2]=='04'] + voc_list[:int(len(voc_list)*0.9)]
val_list = [x for x in voc_list if x.split('_')[2]=='04']+ voc_list[int(len(voc_list)*0.9):]
max_files_to_process = int(config.batch_size/config.autovc_samples_per_file)
if mode == "Train":
num_batches = config.autovc_batches_per_epoch_train
file_list = train_list
else:
num_batches = config.autovc_batches_per_epoch_val
file_list = val_list
for k in range(num_batches):
feats_targs = []
stfts_targs = []
targets_speakers = []
# if config.SDN_mix:
# back_index = np.random.randint(0,len(back_list))
# back_to_open = back_list[back_index]
# with h5py.File(os.path.join(config.backing_dir,back_to_open), "r") as hdf5_file:
# back = hdf5_file['backing_stft'][()]
# back = np.clip(back, 0.0, 1.0)
for i in range(max_files_to_process):
voc_index = np.random.randint(0,len(file_list))
voc_to_open = file_list[voc_index]
with h5py.File(os.path.join(config.feats_dir,voc_to_open), "r") as hdf5_file:
mel = hdf5_file['feats'][()]
back = hdf5_file['back_stft'][()]
stfts = hdf5_file['stfts'][()]
back = np.clip(back, 0.0, 1.0)
f0 = mel[:,-2]
med = np.median(f0[f0 > 0])
f0[f0==0] = med
mel[:,-2] = f0
speaker_name = voc_to_open.split('_')[1]
speaker_index = config.singers.index(speaker_name)
mel = (mel - min_feat)/(max_feat-min_feat)
stfts = np.clip(stfts, 0.0, 1.0)
assert mel.max()<=1.0 and mel.min()>=0.0, "Error in file {}, max: {}, min: {}".format(voc_to_open, mel.max(), mel.min())
for j in range(config.autovc_samples_per_file):
voc_idx = np.random.randint(0,len(mel)-config.max_phr_len)
feats_targs.append(mel[voc_idx:voc_idx+config.max_phr_len])
noise = np.random.rand(config.max_phr_len,stfts.shape[-1])*np.random.uniform(0.0,config.noise_threshold)
back_gain = np.random.uniform(0.0, config.back_threshold)
stft = stfts[voc_idx:voc_idx+config.max_phr_len]*np.random.uniform(back_gain, 1.0) + noise
back_sample = back[voc_idx:voc_idx+config.max_phr_len]
stft = stft + back_sample * back_gain
# if config.SDN_mix:
# back_idx = np.random.randint(0,len(back)-config.max_phr_len)
# back_sample = back[back_idx:back_idx+config.max_phr_len]
# stft = stft + back_sample * back_gain
stfts_targs.append(stft)
targets_speakers.append(speaker_index)
feats_targs = np.array(feats_targs)
stfts_targs = np.array(stfts_targs)
yield feats_targs, stfts_targs, np.array(targets_speakers)
|
[
"numpy.random.uniform",
"h5py.File",
"numpy.median",
"numpy.clip",
"numpy.array",
"synth.config.config.singers.index",
"numpy.random.rand",
"os.path.join",
"os.listdir"
] |
[((282, 319), 'h5py.File', 'h5py.File', (['config.stat_file'], {'mode': '"""r"""'}), "(config.stat_file, mode='r')\n", (291, 319), False, 'import h5py\n'), ((4725, 4746), 'numpy.array', 'np.array', (['feats_targs'], {}), '(feats_targs)\n', (4733, 4746), True, 'import numpy as np\n'), ((4769, 4790), 'numpy.array', 'np.array', (['stfts_targs'], {}), '(stfts_targs)\n', (4777, 4790), True, 'import numpy as np\n'), ((478, 506), 'os.listdir', 'os.listdir', (['config.feats_dir'], {}), '(config.feats_dir)\n', (488, 506), False, 'import os\n'), ((3194, 3217), 'numpy.clip', 'np.clip', (['back', '(0.0)', '(1.0)'], {}), '(back, 0.0, 1.0)\n', (3201, 3217), True, 'import numpy as np\n'), ((3272, 3293), 'numpy.median', 'np.median', (['f0[f0 > 0]'], {}), '(f0[f0 > 0])\n', (3281, 3293), True, 'import numpy as np\n'), ((3434, 3468), 'synth.config.config.singers.index', 'config.singers.index', (['speaker_name'], {}), '(speaker_name)\n', (3454, 3468), False, 'from synth.config import config\n'), ((3548, 3572), 'numpy.clip', 'np.clip', (['stfts', '(0.0)', '(1.0)'], {}), '(stfts, 0.0, 1.0)\n', (3555, 3572), True, 'import numpy as np\n'), ((672, 700), 'os.listdir', 'os.listdir', (['config.feats_dir'], {}), '(config.feats_dir)\n', (682, 700), False, 'import os\n'), ((4069, 4114), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'config.back_threshold'], {}), '(0.0, config.back_threshold)\n', (4086, 4114), True, 'import numpy as np\n'), ((4841, 4867), 'numpy.array', 'np.array', (['targets_speakers'], {}), '(targets_speakers)\n', (4849, 4867), True, 'import numpy as np\n'), ((2951, 2994), 'os.path.join', 'os.path.join', (['config.feats_dir', 'voc_to_open'], {}), '(config.feats_dir, voc_to_open)\n', (2963, 2994), False, 'import os\n'), ((3944, 3995), 'numpy.random.rand', 'np.random.rand', (['config.max_phr_len', 'stfts.shape[-1]'], {}), '(config.max_phr_len, stfts.shape[-1])\n', (3958, 3995), True, 'import numpy as np\n'), ((3995, 4041), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'config.noise_threshold'], {}), '(0.0, config.noise_threshold)\n', (4012, 4041), True, 'import numpy as np\n'), ((4180, 4213), 'numpy.random.uniform', 'np.random.uniform', (['back_gain', '(1.0)'], {}), '(back_gain, 1.0)\n', (4197, 4213), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import scipy.sparse as sp
from sklearn.metrics.pairwise import cosine_similarity
class Evaluator():
def __init__(self, k=10, training_set=None, testing_set=None, book_sim=None, novelty_scores=None):
self.k = k
self.book_sim = book_sim
self.novelty_scores = novelty_scores
if training_set is not None:
self.training_set = training_set
self.num_users = len(self.training_set.user_id.unique())
self.num_books = len(self.training_set.book_id.unique())
if testing_set is not None:
self.testing_set = testing_set
self.testing_idx = {}
for user_id in testing_set.user_id.unique():
self.testing_idx[user_id] = testing_set[testing_set.user_id==user_id].book_id.values
self.result = {}
def _average_precision(self, pred, truth):
in_arr = np.in1d(pred, truth)
score = 0.0
num_hits = 0.0
for idx, correct in enumerate(in_arr):
if correct:
num_hits += 1
score += num_hits / (idx + 1)
return score / min(len(truth), self.k)
def _novelty_score(self, pred):
# Recommend the top 10 books in novelty score results in ~10.4
# Crop the score to 10.0 since it won't change anything and make the score range nicer
return min(self.novelty_scores.loc[pred].novelty_score.mean(), 10.0)
def _diversity_score(self, pred):
matrix = self.book_sim.loc[pred, pred].values
ils = matrix[np.triu_indices(len(pred), k=1)].mean()
return (1 - ils) * 10
def _personalization_score(self, preds, user_ids, book_ids):
df = pd.DataFrame(
data=np.zeros([len(user_ids), len(book_ids)]),
index=user_ids,
columns=book_ids
)
for user_id in user_ids:
df.loc[user_id, preds[user_id]] = 1
matrix = sp.csr_matrix(df.values)
#calculate similarity for every user's recommendation list
similarity = cosine_similarity(X=matrix, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity
personalization = np.mean(similarity[upper_right])
return (1 - personalization) * 10
def evaluate(self, model):
model.fit(self.training_set)
preds = model.all_recommendation()
user_ids = list(preds.keys())
book_ids = np.unique(np.array(list(preds.values())).flatten())
ap_sum = 0
nov_score_sum = 0
div_score_sum = 0
for user_id in preds.keys():
pred = preds[user_id]
truth = self.testing_idx[user_id]
ap_sum += self._average_precision(pred, truth)
nov_score_sum += self._novelty_score(pred)
div_score_sum += self._diversity_score(pred)
self.result[model.name] = {}
self.result[model.name]['Mean Average Precision'] = "%.2f%%" % (ap_sum / self.num_users * 100)
self.result[model.name]['Coverage'] = "%.2f%%" % (len(book_ids) / self.num_books * 100)
self.result[model.name]['Novelty Score'] = "%.2f" % (nov_score_sum / self.num_users)
self.result[model.name]['Diversity Score'] = "%.2f" % (div_score_sum / self.num_users)
self.result[model.name]['Personalization Score'] = "%.2f" % self._personalization_score(preds, user_ids, book_ids)
def print_result(self):
print(pd.DataFrame(self.result).loc[['Mean Average Precision', 'Coverage', 'Novelty Score', 'Diversity Score', 'Personalization Score']])
|
[
"pandas.DataFrame",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.triu_indices",
"numpy.mean",
"scipy.sparse.csr_matrix",
"numpy.in1d"
] |
[((925, 945), 'numpy.in1d', 'np.in1d', (['pred', 'truth'], {}), '(pred, truth)\n', (932, 945), True, 'import numpy as np\n'), ((1977, 2001), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['df.values'], {}), '(df.values)\n', (1990, 2001), True, 'import scipy.sparse as sp\n'), ((2091, 2138), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ([], {'X': 'matrix', 'dense_output': '(False)'}), '(X=matrix, dense_output=False)\n', (2108, 2138), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2222, 2263), 'numpy.triu_indices', 'np.triu_indices', (['similarity.shape[0]'], {'k': '(1)'}), '(similarity.shape[0], k=1)\n', (2237, 2263), True, 'import numpy as np\n'), ((2329, 2361), 'numpy.mean', 'np.mean', (['similarity[upper_right]'], {}), '(similarity[upper_right])\n', (2336, 2361), True, 'import numpy as np\n'), ((3604, 3629), 'pandas.DataFrame', 'pd.DataFrame', (['self.result'], {}), '(self.result)\n', (3616, 3629), True, 'import pandas as pd\n')]
|
# Program 19d: Generalized synchronization.
# See Figure 19.8(a).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Constants
mu = 5.7
sigma = 16
b = 4
r = 45.92
g = 8 # When g=4, there is no synchronization.
tmax = 100
t = np.arange(0.0, tmax, 0.1)
def rossler_lorenz_odes(X,t):
x1, x2, x3, y1, y2, y3, z1, z2, z3 = X
dx1 = -(x2 + x3)
dx2 = x1 + 0.2*x2
dx3 = 0.2 + x3 * (x1 - mu)
dy1 = sigma * (y2 - y1) - g * (y1 - x1)
dy2 = -y1 * y3 + r*y1 - y2
dy3 = y1 * y2 - b*y3
dz1 = sigma * (z2 - z1) - g * (z1 - x1)
dz2 = -z1*z3 + r*z1 - z2
dz3 = z1*z2 - b*z3
return (dx1, dx2, dx3, dy1, dy2, dy3, dz1, dz2, dz3)
y0 = [2, -10, 44, 30, 10, 20, 31, 11, 22]
X = odeint(rossler_lorenz_odes, y0, t, rtol=1e-6)
x1, x2, x3, y1, y2, y3, x1, z2, z3 = X.T # unpack columns
plt.figure(1)
# Delete first 500 iterates.
plt.plot(y2[500:len(y2)], z2[500:len(z2)])
plt.xlabel(r'$y_2$', fontsize=15)
plt.ylabel(r'$z_2$', fontsize=15)
plt.show()
|
[
"matplotlib.pyplot.show",
"scipy.integrate.odeint",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((266, 291), 'numpy.arange', 'np.arange', (['(0.0)', 'tmax', '(0.1)'], {}), '(0.0, tmax, 0.1)\n', (275, 291), True, 'import numpy as np\n'), ((740, 786), 'scipy.integrate.odeint', 'odeint', (['rossler_lorenz_odes', 'y0', 't'], {'rtol': '(1e-06)'}), '(rossler_lorenz_odes, y0, t, rtol=1e-06)\n', (746, 786), False, 'from scipy.integrate import odeint\n'), ((846, 859), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (856, 859), True, 'import matplotlib.pyplot as plt\n'), ((932, 964), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$y_2$"""'], {'fontsize': '(15)'}), "('$y_2$', fontsize=15)\n", (942, 964), True, 'import matplotlib.pyplot as plt\n'), ((966, 998), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$z_2$"""'], {'fontsize': '(15)'}), "('$z_2$', fontsize=15)\n", (976, 998), True, 'import matplotlib.pyplot as plt\n'), ((1000, 1010), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1008, 1010), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from tspdb.src.pindex.predict import get_prediction_range, get_prediction
from tspdb.src.pindex.pindex_managment import TSPI
from tspdb.src.pindex.pindex_utils import index_ts_mapper
import time
import timeit
import pandas as pd
from tspdb.src.hdf_util import read_data
from tspdb.src.tsUtils import randomlyHideValues
from scipy.stats import norm
from sklearn.metrics import r2_score
import tspdb
def r2_var(y,y_h,X):
average = np.mean(X**2) - np.mean(X)**2
return 1 - sum((y-y_h)**2)/sum((y-average)**2)
def create_table_data():
obs = np.arange(10**5).astype('float')
means = obs
var = np.zeros(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
print(obs_9)
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time')
timestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s')
df.index = timestamps
df.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS2.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var })
df.index_label = 'time'
df.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time')
# real time series varaince harmonics
data = read_data('testdata/MixtureTS_var.h5')
obs = data['obs'][:]
means = data['means'][:]
var = data['var'][:]
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var })
df.to_csv('testdata/tables/MixtureTS_var.csv', index_label = 'time')
def create_tables(interface):
dir_ = tspdb.__path__[0]+'/tests/'
for table in ['mixturets2','ts_basic_5','ts_basic_ts_5_5','mixturets_var']:
df = pd.read_csv(dir_+'testdata/tables/%s.csv'%table, engine = 'python')
if table == 'ts_basic_ts_5_5': df['time'] = df['time'].astype('datetime64[ns]')
interface.create_table(table, df, 'time', include_index = False)
def update_test(interface, init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_test_pindex'):
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
interface.create_table('ts_basic_test', df, 'row_id', index_label='row_id')
time_series_table = ['ts_basic_test','ts', 'row_id']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
agg_interval = 1.
conn = interface.engine.raw_connection()
cur = conn.cursor()
cur.execute('''SELECT create_pindex('%s','%s','%s','%s', "T" => %s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s)'''%('ts_basic_test','row_id','ts', index_name, T, k,k_var, agg_interval, direct_var))
cur.close()
conn.commit()
conn.close()
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')}, index = np.arange(init_points,points+init_points) )
interface.bulk_insert('ts_basic_test', df, index_label='row_id')
init_points += points
print ('successfully updated %s points' %points)
def ts_table_tests(init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_ts_pindex'):
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
timestamps = pd.date_range('2012-10-01 00:00:00', periods = init_points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.create_table('ts_basic_ts', df, 'timestamp', index_label='timestamp')
time_series_table = ['ts_basic_ts','ts', 'timestamp']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
TSPD = TSPI(_dir = 'C:/Program Files/PostgreSQL/10/data/', agg_interval = 5, T = T,T_var = T, rank = k, rank_var = k_var, col_to_row_ratio = 10, index_name = index_name,gamma = gamma, interface= interface ,time_series_table = time_series_table, direct_var = direct_var )
TSPD.create_index()
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')} )
timestamps = pd.date_range(end, periods = points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.bulk_insert('ts_basic_ts', df, index_label='timestamp')
init_points += points
print ('successfully updated %s points' %points)
def create_pindex_test(interface,table_name, T,T_var, k ,k_var, direct_var,value_column= ['ts'], index_name = None , agg_interval = 1., col_to_row_ratio= 10, time_column = 'row_id'):
T0 = 1000
gamma = 0.5
if index_name is None: index_name = 'pindex'
value_column = ','.join(value_column)
interface.engine.execute('''SELECT create_pindex('%s','%s','{%s}','%s', T => %s,t_var =>%s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s, col_to_row_ratio => %s)'''%(table_name,time_column, value_column, index_name, T, T_var, k,k_var, agg_interval, direct_var, col_to_row_ratio))
|
[
"pandas.DataFrame",
"pandas.date_range",
"pandas.read_csv",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"tspdb.src.hdf_util.read_data",
"numpy.arange",
"numpy.array",
"tspdb.src.pindex.pindex_managment.TSPI"
] |
[((645, 664), 'numpy.zeros', 'np.zeros', (['obs.shape'], {}), '(obs.shape)\n', (653, 664), True, 'import numpy as np\n'), ((791, 883), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (803, 883), True, 'import pandas as pd\n'), ((967, 1031), 'pandas.date_range', 'pd.date_range', (['"""2012-10-01 00:00:00"""'], {'periods': '(10 ** 5)', 'freq': '"""5s"""'}), "('2012-10-01 00:00:00', periods=10 ** 5, freq='5s')\n", (980, 1031), True, 'import pandas as pd\n'), ((1182, 1217), 'tspdb.src.hdf_util.read_data', 'read_data', (['"""testdata/MixtureTS2.h5"""'], {}), "('testdata/MixtureTS2.h5')\n", (1191, 1217), False, 'from tspdb.src.hdf_util import read_data\n'), ((1276, 1294), 'numpy.ones', 'np.ones', (['obs.shape'], {}), '(obs.shape)\n', (1283, 1294), True, 'import numpy as np\n'), ((1406, 1498), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (1418, 1498), True, 'import pandas as pd\n'), ((1643, 1677), 'tspdb.src.hdf_util.read_data', 'read_data', (['"""testdata/MixtureTS.h5"""'], {}), "('testdata/MixtureTS.h5')\n", (1652, 1677), False, 'from tspdb.src.hdf_util import read_data\n'), ((1736, 1754), 'numpy.ones', 'np.ones', (['obs.shape'], {}), '(obs.shape)\n', (1743, 1754), True, 'import numpy as np\n'), ((1866, 1958), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (1878, 1958), True, 'import pandas as pd\n'), ((2077, 2115), 'tspdb.src.hdf_util.read_data', 'read_data', (['"""testdata/MixtureTS_var.h5"""'], {}), "('testdata/MixtureTS_var.h5')\n", (2086, 2115), False, 'from tspdb.src.hdf_util import read_data\n'), ((2300, 2392), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7, 'var': var}"}), "(data={'ts': obs, 'means': means, 'ts_9': obs_9, 'ts_7': obs_7,\n 'var': var})\n", (2312, 2392), True, 'import pandas as pd\n'), ((4303, 4375), 'pandas.date_range', 'pd.date_range', (['"""2012-10-01 00:00:00"""'], {'periods': '(init_points + 1)', 'freq': '"""5s"""'}), "('2012-10-01 00:00:00', periods=init_points + 1, freq='5s')\n", (4316, 4375), True, 'import pandas as pd\n'), ((4621, 4876), 'tspdb.src.pindex.pindex_managment.TSPI', 'TSPI', ([], {'_dir': '"""C:/Program Files/PostgreSQL/10/data/"""', 'agg_interval': '(5)', 'T': 'T', 'T_var': 'T', 'rank': 'k', 'rank_var': 'k_var', 'col_to_row_ratio': '(10)', 'index_name': 'index_name', 'gamma': 'gamma', 'interface': 'interface', 'time_series_table': 'time_series_table', 'direct_var': 'direct_var'}), "(_dir='C:/Program Files/PostgreSQL/10/data/', agg_interval=5, T=T,\n T_var=T, rank=k, rank_var=k_var, col_to_row_ratio=10, index_name=\n index_name, gamma=gamma, interface=interface, time_series_table=\n time_series_table, direct_var=direct_var)\n", (4625, 4876), False, 'from tspdb.src.pindex.pindex_managment import TSPI\n'), ((470, 485), 'numpy.mean', 'np.mean', (['(X ** 2)'], {}), '(X ** 2)\n', (477, 485), True, 'import numpy as np\n'), ((2621, 2690), 'pandas.read_csv', 'pd.read_csv', (["(dir_ + 'testdata/tables/%s.csv' % table)"], {'engine': '"""python"""'}), "(dir_ + 'testdata/tables/%s.csv' % table, engine='python')\n", (2632, 2690), True, 'import pandas as pd\n'), ((5186, 5235), 'pandas.date_range', 'pd.date_range', (['end'], {'periods': '(points + 1)', 'freq': '"""5s"""'}), "(end, periods=points + 1, freq='5s')\n", (5199, 5235), True, 'import pandas as pd\n'), ((486, 496), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (493, 496), True, 'import numpy as np\n'), ((590, 608), 'numpy.arange', 'np.arange', (['(10 ** 5)'], {}), '(10 ** 5)\n', (599, 608), True, 'import numpy as np\n'), ((694, 707), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (702, 707), True, 'import numpy as np\n'), ((746, 759), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (754, 759), True, 'import numpy as np\n'), ((1324, 1337), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1332, 1337), True, 'import numpy as np\n'), ((1376, 1389), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1384, 1389), True, 'import numpy as np\n'), ((1784, 1797), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1792, 1797), True, 'import numpy as np\n'), ((1836, 1849), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1844, 1849), True, 'import numpy as np\n'), ((2218, 2231), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2226, 2231), True, 'import numpy as np\n'), ((2270, 2283), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2278, 2283), True, 'import numpy as np\n'), ((3735, 3779), 'numpy.arange', 'np.arange', (['init_points', '(points + init_points)'], {}), '(init_points, points + init_points)\n', (3744, 3779), True, 'import numpy as np\n'), ((3035, 3057), 'numpy.arange', 'np.arange', (['init_points'], {}), '(init_points)\n', (3044, 3057), True, 'import numpy as np\n'), ((4246, 4268), 'numpy.arange', 'np.arange', (['init_points'], {}), '(init_points)\n', (4255, 4268), True, 'import numpy as np\n'), ((3667, 3711), 'numpy.arange', 'np.arange', (['init_points', '(points + init_points)'], {}), '(init_points, points + init_points)\n', (3676, 3711), True, 'import numpy as np\n'), ((5108, 5152), 'numpy.arange', 'np.arange', (['init_points', '(points + init_points)'], {}), '(init_points, points + init_points)\n', (5117, 5152), True, 'import numpy as np\n')]
|
# 10/4/18
# chenyong
# predict leaf counts using trained model
"""
Make predictions of Leaf counts using trained models
"""
import os.path as op
import sys
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import Image
from schnablelab.apps.base import ActionDispatcher, OptionParser, glob
from schnablelab.apps.headers import Slurm_header, Slurm_gpu_constraint_header, Slurm_gpu_header
from schnablelab.apps.natsort import natsorted
from glob import glob
from PIL import Image
import cv2
from pathlib import Path
def main():
actions = (
('keras', 'using keras model to make prediction'),
('dpp', 'using dpp model to make prediction'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def dpp(args):
"""
%prog model_dir img_dir output_prefix
using your trained dpp model to make predictions.
"""
p = OptionParser(dpp.__doc__)
p.set_slurm_opts(jn=True, gpu=True)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
model_dir, img_dir, otp = args
header = Slurm_gpu_constraint_header%(opts.time, opts.memory, otp, otp, otp, opts.gpu) \
if opts.gpu \
else Slurm_gpu_header%(opts.time, opts.memory, otp, otp, otp)
if opts.env:
header += 'ml anaconda \nsource activate %s\n'%opts.env
cmd = "python -m schnablelab.CNN.CNN_LeafCount_Predict %s %s %s.csv\n"%(model_dir, img_dir, otp)
header += cmd
f0 = open('%s.slurm'%otp, 'w')
f0.write(header)
f0.close()
print('%s.slurm generate, you can submit to a gpu node now.'%otp)
def keras(args):
"""
%prog model_name img_dir target_size output_prefix
using your trained model to make predictions. Target size is the input_shape when
you train your model. an invalid target_size example is 224,224,3
"""
from keras.models import load_model
p = OptionParser(keras.__doc__)
p.set_slurm_opts()
p.add_option('--img_num', default='all',
help='specify how many images used for prediction in the dir')
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
model, img_dir, ts, otp = args
ts = tuple([int(i) for i in ts.split(',')][:-1])
print(ts)
p = Path(img_dir)
ps = list(p.glob('*.png'))[:int(opts.img_num)] \
if opts.img_num!='all' \
else list(p.glob('*.png'))
imgs = []
fns = []
for i in ps:
print(i.name)
fns.append(i.name)
img = cv2.imread(str(i))
img = cv2.resize(img, ts)
imgs.append(img)
imgs_arr = np.asarray(imgs)
my_model = load_model(model)
pre_prob = my_model.predict(imgs_arr)
df = pd.DataFrame(pre_prob)
clss = df.shape[1]
headers = ['class_%s'%i for i in range(1, clss+1)]
df.columns = headers
df['image'] = fns
headers.insert(0, 'image')
df_final = df[headers]
df_final.to_csv('%s.csv'%otp, sep='\t', index=False)
if __name__ == "__main__":
main()
|
[
"keras.models.load_model",
"pandas.DataFrame",
"numpy.asarray",
"pathlib.Path",
"schnablelab.apps.base.ActionDispatcher",
"schnablelab.apps.base.OptionParser",
"cv2.resize"
] |
[((750, 775), 'schnablelab.apps.base.ActionDispatcher', 'ActionDispatcher', (['actions'], {}), '(actions)\n', (766, 775), False, 'from schnablelab.apps.base import ActionDispatcher, OptionParser, glob\n'), ((938, 963), 'schnablelab.apps.base.OptionParser', 'OptionParser', (['dpp.__doc__'], {}), '(dpp.__doc__)\n', (950, 963), False, 'from schnablelab.apps.base import ActionDispatcher, OptionParser, glob\n'), ((1955, 1982), 'schnablelab.apps.base.OptionParser', 'OptionParser', (['keras.__doc__'], {}), '(keras.__doc__)\n', (1967, 1982), False, 'from schnablelab.apps.base import ActionDispatcher, OptionParser, glob\n'), ((2338, 2351), 'pathlib.Path', 'Path', (['img_dir'], {}), '(img_dir)\n', (2342, 2351), False, 'from pathlib import Path\n'), ((2673, 2689), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (2683, 2689), True, 'import numpy as np\n'), ((2705, 2722), 'keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (2715, 2722), False, 'from keras.models import load_model\n'), ((2774, 2796), 'pandas.DataFrame', 'pd.DataFrame', (['pre_prob'], {}), '(pre_prob)\n', (2786, 2796), True, 'import pandas as pd\n'), ((2613, 2632), 'cv2.resize', 'cv2.resize', (['img', 'ts'], {}), '(img, ts)\n', (2623, 2632), False, 'import cv2\n')]
|
#!/usr/bin/python
"""
FlowCal Python API example, without using calibration beads data.
This script is divided in two parts. Part one processes data from five cell
samples, and generates plots of each one.
Part two exemplifies how to use the processed cell sample data with
FlowCal's plotting and statistics modules, in order to produce interesting
plots.
For details about the experiment, samples, and instrument used, please
consult readme.txt.
"""
import os
import os.path
import numpy as np
import matplotlib.pyplot as plt
import FlowCal
###
# Definition of constants
###
# Names of the FCS files containing data from cell samples
samples_filenames = ['FCFiles/Data001.fcs',
'FCFiles/Data002.fcs',
'FCFiles/Data003.fcs',
'FCFiles/Data004.fcs',
'FCFiles/Data005.fcs',
]
# IPTG concentration of each cell sample, in micromolar.
iptg = np.array([0, 81, 161, 318, 1000])
# Plots will be generated after gating and transforming cell samples. These
# will be stored in the following folder.
samples_plot_dir = 'plot_samples'
if __name__ == "__main__":
# Check that plot directory exists, create if it does not.
if not os.path.exists(samples_plot_dir):
os.makedirs(samples_plot_dir)
###
# Part 1: Processing cell sample data
###
print("\nProcessing cell samples...")
# We will use the list ``samples`` to store processed, transformed flow
# cytometry data of each sample.
samples = []
# Iterate over cell sample filenames
for sample_id, sample_filename in enumerate(samples_filenames):
# Load flow cytometry data from the corresponding FCS file.
# ``FlowCal.io.FCSData(filename)`` returns an object that represents
# flow cytometry data loaded from file ``filename``.
print("\nLoading file \"{}\"...".format(sample_filename))
sample = FlowCal.io.FCSData(sample_filename)
# Data loaded from an FCS file is in "Channel Units", the raw numbers
# reported from the instrument's detectors. The FCS file also contains
# information to convert these into Relative Fluorescence Intensity
# (RFI) values, commonly referred to as arbitrary fluorescence units
# (a.u.). The function ``FlowCal.transform.to_rfi()`` performs this
# conversion.
print("Performing data transformation...")
sample = FlowCal.transform.to_rfi(sample)
# Gating
# Gating is the process of removing measurements of irrelevant
# particles, while retaining only the population of interest.
print("Performing gating...")
# ``FlowCal.gate.start_end()`` removes the first and last few events.
# Transients in fluidics can make these events slightly different from
# the rest. This may not be necessary in all instruments.
sample_gated = FlowCal.gate.start_end(sample,
num_start=250,
num_end=100)
# ``FlowCal.gate.high_low()`` removes events outside a range specified
# by a ``low`` and a ``high`` value. If these are not specified (as
# shown below), the function removes events outside the channel's range
# of detection.
# Detectors in a flow cytometer have a finite range of detection. If the
# fluorescence of a particle is higher than the upper limit of this
# range, the instrument will incorrectly record it with a value equal to
# this limit. The same happens for fluorescence values lower than the
# lower limit of detection. These saturated events should be removed,
# otherwise statistics may be calculated incorrectly.
# Note that this might not be necessary with newer instruments that
# record data as floating-point numbers (and in fact it might eliminate
# negative events). To see the data type stored in your FCS files, run
# the following instruction: ``print sample_gated.data_type``.
# We will remove saturated events in the forward/side scatter channels,
# and in the fluorescence channel FL1.
sample_gated = FlowCal.gate.high_low(sample_gated,
channels=['FSC','SSC','FL1'])
# ``FlowCal.gate.density2d()`` preserves only the densest population as
# seen in a 2D density diagram of two channels. This helps remove
# particle aggregations and other sparse populations that are not of
# interest (i.e. debris).
# We use the forward and side scatter channels, and preserve 50% of the
# events. Finally, setting ``full_output=True`` instructs the function
# to return two additional outputs. The last one (``gate_contour``) is
# a curve surrounding the gated region, which we will use for plotting
# later.
sample_gated, __, gate_contour = FlowCal.gate.density2d(
data=sample_gated,
channels=['FSC','SSC'],
gate_fraction=0.5,
full_output=True)
# Plot forward/side scatter 2D density plot and 1D fluorescence
# histograms
print("Plotting density plot and histogram...")
# Parameters for the forward/side scatter density plot
density_params = {}
# We use the "scatter" mode, in which individual particles will be
# plotted individually as in a scatter plot, but with a color
# proportional to the particle density around.
density_params['mode'] = 'scatter'
# Parameters for the fluorescence histograms
hist_params = {}
hist_params['xlabel'] = 'FL1 Fluorescence (a.u.)'
# Plot filename
# The figure can be saved in any format supported by matplotlib (svg,
# jpg, etc.) by just changing the extension.
plot_filename = '{}/density_hist_{}.png'.format(
samples_plot_dir,
'S{:03}'.format(sample_id + 1))
# Plot and save
# The function ``FlowCal.plot.density_and_hist()`` plots a combined
# figure with a 2D density plot at the top, and an arbitrary number of
# 1D histograms below. In this case, we will plot the forward/side
# scatter channels in the density plot, and a histogram of the
# fluorescence channel FL1 below.
# Note that we are providing data both before (``sample``) and after
# (``sample_gated``) gating. The 1D histogram will display the ungated
# dataset with transparency, and the gated dataset in front with a solid
# solid color. In addition, we are providing ``gate_contour`` from the
# density gating step, which will be displayed in the density diagram.
# This will result in a convenient representation of the data both
# before and after gating.
FlowCal.plot.density_and_hist(
sample,
sample_gated,
density_channels=['FSC','SSC'],
hist_channels=['FL1'],
gate_contour=gate_contour,
density_params=density_params,
hist_params=hist_params,
savefig=plot_filename)
# Save cell sample object
samples.append(sample_gated)
###
# Part 3: Examples on how to use processed cell sample data
###
# Histogram of all samples
# Here, we plot the fluorescence histograms of all five samples in the same
# figure, using ``FlowCal.plot.hist1d``. Note how this function can be used
# in the context of accessory matplotlib functions to modify the axes
# limits and labels and add a legend, among others.
plt.figure(figsize=(6,3.5))
FlowCal.plot.hist1d(samples,
channel='FL1',
histtype='step',
bins=128)
plt.ylim([0, 2000])
plt.xlabel('FL1 Fluorescence (a.u.)')
plt.legend(['{} $\mu M$ IPTG'.format(i) for i in iptg],
loc='upper left',
fontsize='small')
plt.tight_layout()
plt.savefig('histograms.png', dpi=200)
plt.close()
# Here we illustrate how to obtain statistics from the fluorescence of each
# sample, and how to use them in a plot.
# The stats module contains functions to calculate different statistics
# such as mean, median, and standard deviation. Here, we calculate the
# geometric mean from channel FL1 of each sample, and plot them against the
# corresponding IPTG concentrations.
samples_fluorescence = [FlowCal.stats.gmean(s, channels='FL1')
for s in samples]
plt.figure(figsize=(5.5, 3.5))
plt.plot(iptg,
samples_fluorescence,
marker='o',
color=(0, 0.4, 0.7))
plt.xlabel('IPTG Concentration ($\mu M$)')
plt.ylabel('FL1 Fluorescence (a.u.)')
plt.tight_layout()
plt.savefig('dose_response.png', dpi=200)
plt.close()
print("\nDone.")
|
[
"FlowCal.gate.start_end",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"FlowCal.io.FCSData",
"matplotlib.pyplot.close",
"os.path.exists",
"FlowCal.stats.gmean",
"matplotlib.pyplot.ylim",
"FlowCal.gate.density2d",
"FlowCal.transform.to_rfi",
"FlowCal.plot.hist1d",
"FlowCal.plot.density_and_hist",
"matplotlib.pyplot.ylabel",
"os.makedirs",
"matplotlib.pyplot.plot",
"numpy.array",
"FlowCal.gate.high_low",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((949, 982), 'numpy.array', 'np.array', (['[0, 81, 161, 318, 1000]'], {}), '([0, 81, 161, 318, 1000])\n', (957, 982), True, 'import numpy as np\n'), ((7748, 7776), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3.5)'}), '(figsize=(6, 3.5))\n', (7758, 7776), True, 'import matplotlib.pyplot as plt\n'), ((7780, 7850), 'FlowCal.plot.hist1d', 'FlowCal.plot.hist1d', (['samples'], {'channel': '"""FL1"""', 'histtype': '"""step"""', 'bins': '(128)'}), "(samples, channel='FL1', histtype='step', bins=128)\n", (7799, 7850), False, 'import FlowCal\n'), ((7927, 7946), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 2000]'], {}), '([0, 2000])\n', (7935, 7946), True, 'import matplotlib.pyplot as plt\n'), ((7951, 7988), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FL1 Fluorescence (a.u.)"""'], {}), "('FL1 Fluorescence (a.u.)')\n", (7961, 7988), True, 'import matplotlib.pyplot as plt\n'), ((8119, 8137), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8135, 8137), True, 'import matplotlib.pyplot as plt\n'), ((8142, 8180), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""histograms.png"""'], {'dpi': '(200)'}), "('histograms.png', dpi=200)\n", (8153, 8180), True, 'import matplotlib.pyplot as plt\n'), ((8185, 8196), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8194, 8196), True, 'import matplotlib.pyplot as plt\n'), ((8712, 8742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.5, 3.5)'}), '(figsize=(5.5, 3.5))\n', (8722, 8742), True, 'import matplotlib.pyplot as plt\n'), ((8747, 8816), 'matplotlib.pyplot.plot', 'plt.plot', (['iptg', 'samples_fluorescence'], {'marker': '"""o"""', 'color': '(0, 0.4, 0.7)'}), "(iptg, samples_fluorescence, marker='o', color=(0, 0.4, 0.7))\n", (8755, 8816), True, 'import matplotlib.pyplot as plt\n'), ((8860, 8903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IPTG Concentration ($\\\\mu M$)"""'], {}), "('IPTG Concentration ($\\\\mu M$)')\n", (8870, 8903), True, 'import matplotlib.pyplot as plt\n'), ((8907, 8944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FL1 Fluorescence (a.u.)"""'], {}), "('FL1 Fluorescence (a.u.)')\n", (8917, 8944), True, 'import matplotlib.pyplot as plt\n'), ((8949, 8967), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8965, 8967), True, 'import matplotlib.pyplot as plt\n'), ((8972, 9013), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dose_response.png"""'], {'dpi': '(200)'}), "('dose_response.png', dpi=200)\n", (8983, 9013), True, 'import matplotlib.pyplot as plt\n'), ((9018, 9029), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9027, 9029), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1271), 'os.path.exists', 'os.path.exists', (['samples_plot_dir'], {}), '(samples_plot_dir)\n', (1253, 1271), False, 'import os\n'), ((1281, 1310), 'os.makedirs', 'os.makedirs', (['samples_plot_dir'], {}), '(samples_plot_dir)\n', (1292, 1310), False, 'import os\n'), ((1943, 1978), 'FlowCal.io.FCSData', 'FlowCal.io.FCSData', (['sample_filename'], {}), '(sample_filename)\n', (1961, 1978), False, 'import FlowCal\n'), ((2464, 2496), 'FlowCal.transform.to_rfi', 'FlowCal.transform.to_rfi', (['sample'], {}), '(sample)\n', (2488, 2496), False, 'import FlowCal\n'), ((2942, 3000), 'FlowCal.gate.start_end', 'FlowCal.gate.start_end', (['sample'], {'num_start': '(250)', 'num_end': '(100)'}), '(sample, num_start=250, num_end=100)\n', (2964, 3000), False, 'import FlowCal\n'), ((4265, 4332), 'FlowCal.gate.high_low', 'FlowCal.gate.high_low', (['sample_gated'], {'channels': "['FSC', 'SSC', 'FL1']"}), "(sample_gated, channels=['FSC', 'SSC', 'FL1'])\n", (4286, 4332), False, 'import FlowCal\n'), ((5017, 5124), 'FlowCal.gate.density2d', 'FlowCal.gate.density2d', ([], {'data': 'sample_gated', 'channels': "['FSC', 'SSC']", 'gate_fraction': '(0.5)', 'full_output': '(True)'}), "(data=sample_gated, channels=['FSC', 'SSC'],\n gate_fraction=0.5, full_output=True)\n", (5039, 5124), False, 'import FlowCal\n'), ((6959, 7185), 'FlowCal.plot.density_and_hist', 'FlowCal.plot.density_and_hist', (['sample', 'sample_gated'], {'density_channels': "['FSC', 'SSC']", 'hist_channels': "['FL1']", 'gate_contour': 'gate_contour', 'density_params': 'density_params', 'hist_params': 'hist_params', 'savefig': 'plot_filename'}), "(sample, sample_gated, density_channels=['FSC',\n 'SSC'], hist_channels=['FL1'], gate_contour=gate_contour,\n density_params=density_params, hist_params=hist_params, savefig=\n plot_filename)\n", (6988, 7185), False, 'import FlowCal\n'), ((8623, 8661), 'FlowCal.stats.gmean', 'FlowCal.stats.gmean', (['s'], {'channels': '"""FL1"""'}), "(s, channels='FL1')\n", (8642, 8661), False, 'import FlowCal\n')]
|
import pytest
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklego.datasets import load_penguins
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer, accuracy_score
from hulearn.preprocessing import PipeTransformer
from hulearn.outlier import InteractiveOutlierDetector
from hulearn.common import flatten
from tests.conftest import (
select_tests,
general_checks,
classifier_checks,
nonmeta_checks,
)
@pytest.mark.parametrize(
"test_fn",
select_tests(
include=flatten([general_checks, classifier_checks, nonmeta_checks]),
exclude=[
"check_estimators_pickle",
"check_estimator_sparse_data",
"check_estimators_nan_inf",
"check_pipeline_consistency",
"check_complex_data",
"check_fit2d_predict1d",
"check_methods_subset_invariance",
"check_fit1d",
"check_dict_unchanged",
"check_classifier_data_not_an_array",
"check_classifiers_one_label",
"check_classifiers_classes",
"check_classifiers_train",
"check_supervised_y_2d",
"check_supervised_y_no_nan",
"check_estimators_unfitted",
"check_estimators_dtypes",
"check_fit_score_takes_y",
"check_dtype_object",
"check_estimators_empty_data_messages",
],
),
)
def test_estimator_checks(test_fn):
"""
We're skipping a lot of tests here mainly because this model is "bespoke"
it is *not* general. Therefore a lot of assumptions are broken.
"""
clf = InteractiveOutlierDetector.from_json(
"tests/test_classification/demo-data.json"
)
test_fn(InteractiveOutlierDetector, clf)
def test_base_predict_usecase():
clf = InteractiveOutlierDetector.from_json(
"tests/test_classification/demo-data.json"
)
df = load_penguins(as_frame=True).dropna()
X, y = df.drop(columns=["species"]), df["species"]
preds = clf.fit(X, y).predict(X)
assert preds.shape[0] == df.shape[0]
def identity(x):
return x
def test_grid_predict():
clf = InteractiveOutlierDetector.from_json(
"tests/test_classification/demo-data.json"
)
pipe = Pipeline(
[
("id", PipeTransformer(identity)),
("mod", clf),
]
)
grid = GridSearchCV(
pipe,
cv=5,
param_grid={},
scoring={"acc": make_scorer(accuracy_score)},
refit="acc",
)
df = load_penguins(as_frame=True).dropna()
X = df.drop(columns=["species", "island", "sex"])
y = (np.random.random(df.shape[0]) < 0.1).astype(int)
preds = grid.fit(X, y).predict(X)
assert preds.shape[0] == df.shape[0]
def test_ignore_bad_data():
"""
There might be some "bad data" drawn. For example, when you quickly hit double-click you might
draw a line instead of a poly. Bokeh is "okeh" with it, but our point-in-poly algorithm is not.
"""
data = [
{
"chart_id": "9ec8e755-2",
"x": "bill_length_mm",
"y": "bill_depth_mm",
"polygons": {
"Adelie": {"bill_length_mm": [], "bill_depth_mm": []},
"Gentoo": {"bill_length_mm": [], "bill_depth_mm": []},
"Chinstrap": {"bill_length_mm": [], "bill_depth_mm": []},
},
},
{
"chart_id": "11640372-c",
"x": "flipper_length_mm",
"y": "body_mass_g",
"polygons": {
"Adelie": {
"flipper_length_mm": [[214.43261376806052, 256.2612913545137]],
"body_mass_g": [[3950.9482324534456, 3859.9137496948247]],
},
"Gentoo": {"flipper_length_mm": [], "body_mass_g": []},
"Chinstrap": {"flipper_length_mm": [], "body_mass_g": []},
},
},
]
clf = InteractiveOutlierDetector(json_desc=data)
assert len(list(clf.poly_data)) == 0
|
[
"sklego.datasets.load_penguins",
"sklearn.metrics.make_scorer",
"numpy.random.random",
"hulearn.common.flatten",
"hulearn.outlier.InteractiveOutlierDetector",
"hulearn.outlier.InteractiveOutlierDetector.from_json",
"hulearn.preprocessing.PipeTransformer"
] |
[((1658, 1743), 'hulearn.outlier.InteractiveOutlierDetector.from_json', 'InteractiveOutlierDetector.from_json', (['"""tests/test_classification/demo-data.json"""'], {}), "('tests/test_classification/demo-data.json'\n )\n", (1694, 1743), False, 'from hulearn.outlier import InteractiveOutlierDetector\n'), ((1843, 1928), 'hulearn.outlier.InteractiveOutlierDetector.from_json', 'InteractiveOutlierDetector.from_json', (['"""tests/test_classification/demo-data.json"""'], {}), "('tests/test_classification/demo-data.json'\n )\n", (1879, 1928), False, 'from hulearn.outlier import InteractiveOutlierDetector\n'), ((2189, 2274), 'hulearn.outlier.InteractiveOutlierDetector.from_json', 'InteractiveOutlierDetector.from_json', (['"""tests/test_classification/demo-data.json"""'], {}), "('tests/test_classification/demo-data.json'\n )\n", (2225, 2274), False, 'from hulearn.outlier import InteractiveOutlierDetector\n'), ((3987, 4029), 'hulearn.outlier.InteractiveOutlierDetector', 'InteractiveOutlierDetector', ([], {'json_desc': 'data'}), '(json_desc=data)\n', (4013, 4029), False, 'from hulearn.outlier import InteractiveOutlierDetector\n'), ((549, 609), 'hulearn.common.flatten', 'flatten', (['[general_checks, classifier_checks, nonmeta_checks]'], {}), '([general_checks, classifier_checks, nonmeta_checks])\n', (556, 609), False, 'from hulearn.common import flatten\n'), ((1947, 1975), 'sklego.datasets.load_penguins', 'load_penguins', ([], {'as_frame': '(True)'}), '(as_frame=True)\n', (1960, 1975), False, 'from sklego.datasets import load_penguins\n'), ((2570, 2598), 'sklego.datasets.load_penguins', 'load_penguins', ([], {'as_frame': '(True)'}), '(as_frame=True)\n', (2583, 2598), False, 'from sklego.datasets import load_penguins\n'), ((2334, 2359), 'hulearn.preprocessing.PipeTransformer', 'PipeTransformer', (['identity'], {}), '(identity)\n', (2349, 2359), False, 'from hulearn.preprocessing import PipeTransformer\n'), ((2504, 2531), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (2515, 2531), False, 'from sklearn.metrics import make_scorer, accuracy_score\n'), ((2671, 2700), 'numpy.random.random', 'np.random.random', (['df.shape[0]'], {}), '(df.shape[0])\n', (2687, 2700), True, 'import numpy as np\n')]
|
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import argparse
import math
from lib import utils
from lib.utils import log_string
from model.DSTGNN import DSTGNN
parser = argparse.ArgumentParser()
parser.add_argument('--P', type = int, default = 12,
help = 'history steps')
parser.add_argument('--Q', type = int, default = 12,
help = 'prediction steps')
parser.add_argument('--L', type = int, default = 5,
help = 'number of STAtt Blocks')
parser.add_argument('--K', type = int, default = 8,
help = 'number of attention heads')
parser.add_argument('--d', type = int, default = 8,
help = 'dims of each head attention outputs')
parser.add_argument('--train_ratio', type = float, default = 0.7,
help = 'training set [default : 0.7]')
parser.add_argument('--val_ratio', type = float, default = 0.1,
help = 'validation set [default : 0.1]')
parser.add_argument('--test_ratio', type = float, default = 0.2,
help = 'testing set [default : 0.2]')
parser.add_argument('--batch_size', type = int, default = 16,
help = 'batch size')
parser.add_argument('--max_epoch', type = int, default = 15,
help = 'epoch to run')
# parser.add_argument('--patience', type = int, default = 10,
# help = 'patience for early stop')
parser.add_argument('--learning_rate', type=float, default = 0.001,
help = 'initial learning rate')
# parser.add_argument('--decay_epoch', type=int, default = 5,
# help = 'decay epoch')
parser.add_argument('--traffic_file', default = 'data/METR-LA/metr-la.h5',
help = 'traffic file')
parser.add_argument('--SE_file', default = 'data/METR-LA/SE(METR).txt',
help = 'spatial emebdding file')
parser.add_argument('--model_file', default = 'data/METR-LA/METR',
help = 'save the model to disk')
parser.add_argument('--log_file', default = 'data/METR-LA/log(METR)',
help = 'log file')
args = parser.parse_args()
log = open(args.log_file, 'w')
device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")
log_string(log, "loading data....")
trainX, trainTE, trainY, valX, valTE, valY, testX, testTE, testY, SE, mean, std = utils.loadData(args)
# adj = np.load('./data/metr_adj.npy')
log_string(log, "loading end....")
def res(model, valX, valTE, valY, mean, std):
model.eval() # 评估模式, 这会关闭dropout
# it = test_iter.get_iterator()
num_val = valX.shape[0]
pred = []
label = []
num_batch = math.ceil(num_val / args.batch_size)
with torch.no_grad():
for batch_idx in range(num_batch):
if isinstance(model, torch.nn.Module):
start_idx = batch_idx * args.batch_size
end_idx = min(num_val, (batch_idx + 1) * args.batch_size)
X = torch.from_numpy(valX[start_idx : end_idx]).float().to(device)
y = valY[start_idx : end_idx]
te = torch.from_numpy(valTE[start_idx : end_idx]).to(device)
y_hat = model(X, te)
pred.append(y_hat.cpu().numpy()*std+mean)
label.append(y)
del X, te, y_hat
pred = np.concatenate(pred, axis = 0)
label = np.concatenate(label, axis = 0)
# print(pred.shape, label.shape)
for i in range(12):
mae, rmse, mape = metric(pred[:,i,:], label[:,i,:])
# if i == 11:
log_string(log,'step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i+1, mae, rmse, mape))
# print('step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i+1, mae, rmse, mape))
mae, rmse , mape = metric(pred, label)
log_string(log, 'average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))
# print('average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))
return mae
def test(model, valX, valTE, valY, mean, std):
model = torch.load(args.model_file)
mae = res(model, valX, valTE, valY, mean, std)
# print(mae)
# print('test loss %.4f, last val loss %.4f' % (test_loss, test_loss_l))
def _compute_loss(y_true, y_predicted):
# y_true = scaler.inverse_transform(y_true)
# y_predicted = scaler.inverse_transform(y_predicted)
return masked_mae(y_predicted, y_true, 0.0)
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mae_loss(y_pred, y_true, flag):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
if flag == True:
loss = loss * mask_l
return loss.mean()
def metric(pred, label):
with np.errstate(divide = 'ignore', invalid = 'ignore'):
mask = np.not_equal(label, 0)
mask = mask.astype(np.float32)
mask /= np.mean(mask)
mae = np.abs(np.subtract(pred, label)).astype(np.float32)
rmse = np.square(mae)
mape = np.divide(mae, label)
mae = np.nan_to_num(mae * mask)
mae = np.mean(mae)
rmse = np.nan_to_num(rmse * mask)
rmse = np.sqrt(np.mean(rmse))
mape = np.nan_to_num(mape * mask)
mape = np.mean(mape)
return mae, rmse, mape
if __name__ == '__main__':
log_string(log, "model constructed begin....")
model = DSTGNN(SE, 1, args.K*args.d, args.K, args.d, args.L).to(device)
log_string(log, "model constructed end....")
log_string(log, "test begin....")
test(model, testX, testTE, testY, mean, std)
log_string(log, "test end....")
|
[
"argparse.ArgumentParser",
"numpy.nan_to_num",
"lib.utils.loadData",
"numpy.isnan",
"numpy.mean",
"torch.no_grad",
"model.DSTGNN.DSTGNN",
"torch.isnan",
"torch.load",
"torch.mean",
"numpy.divide",
"torch.zeros_like",
"math.ceil",
"numpy.square",
"numpy.not_equal",
"torch.cuda.is_available",
"numpy.concatenate",
"torch.from_numpy",
"numpy.subtract",
"numpy.errstate",
"lib.utils.log_string",
"torch.abs"
] |
[((246, 271), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (269, 271), False, 'import argparse\n'), ((2329, 2364), 'lib.utils.log_string', 'log_string', (['log', '"""loading data...."""'], {}), "(log, 'loading data....')\n", (2339, 2364), False, 'from lib.utils import log_string\n'), ((2448, 2468), 'lib.utils.loadData', 'utils.loadData', (['args'], {}), '(args)\n', (2462, 2468), False, 'from lib import utils\n'), ((2512, 2546), 'lib.utils.log_string', 'log_string', (['log', '"""loading end...."""'], {}), "(log, 'loading end....')\n", (2522, 2546), False, 'from lib.utils import log_string\n'), ((2740, 2776), 'math.ceil', 'math.ceil', (['(num_val / args.batch_size)'], {}), '(num_val / args.batch_size)\n', (2749, 2776), False, 'import math\n'), ((3412, 3440), 'numpy.concatenate', 'np.concatenate', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (3426, 3440), True, 'import numpy as np\n'), ((3455, 3484), 'numpy.concatenate', 'np.concatenate', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (3469, 3484), True, 'import numpy as np\n'), ((3869, 3954), 'lib.utils.log_string', 'log_string', (['log', "('average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse, mape))"], {}), "(log, 'average, mae: %.4f, rmse: %.4f, mape: %.4f' % (mae, rmse,\n mape))\n", (3879, 3954), False, 'from lib.utils import log_string\n'), ((4109, 4136), 'torch.load', 'torch.load', (['args.model_file'], {}), '(args.model_file)\n', (4119, 4136), False, 'import torch\n'), ((4545, 4563), 'numpy.isnan', 'np.isnan', (['null_val'], {}), '(null_val)\n', (4553, 4563), True, 'import numpy as np\n'), ((4682, 4698), 'torch.mean', 'torch.mean', (['mask'], {}), '(mask)\n', (4692, 4698), False, 'import torch\n'), ((4784, 4809), 'torch.abs', 'torch.abs', (['(preds - labels)'], {}), '(preds - labels)\n', (4793, 4809), False, 'import torch\n'), ((4914, 4930), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (4924, 4930), False, 'import torch\n'), ((5044, 5070), 'torch.abs', 'torch.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (5053, 5070), False, 'import torch\n'), ((5887, 5933), 'lib.utils.log_string', 'log_string', (['log', '"""model constructed begin...."""'], {}), "(log, 'model constructed begin....')\n", (5897, 5933), False, 'from lib.utils import log_string\n'), ((6014, 6058), 'lib.utils.log_string', 'log_string', (['log', '"""model constructed end...."""'], {}), "(log, 'model constructed end....')\n", (6024, 6058), False, 'from lib.utils import log_string\n'), ((6063, 6096), 'lib.utils.log_string', 'log_string', (['log', '"""test begin...."""'], {}), "(log, 'test begin....')\n", (6073, 6096), False, 'from lib.utils import log_string\n'), ((6150, 6181), 'lib.utils.log_string', 'log_string', (['log', '"""test end...."""'], {}), "(log, 'test end....')\n", (6160, 6181), False, 'from lib.utils import log_string\n'), ((2290, 2315), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2313, 2315), False, 'import torch\n'), ((2786, 2801), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2799, 2801), False, 'import torch\n'), ((3640, 3732), 'lib.utils.log_string', 'log_string', (['log', "('step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i + 1, mae, rmse, mape))"], {}), "(log, 'step %d, mae: %.4f, rmse: %.4f, mape: %.4f' % (i + 1, mae,\n rmse, mape))\n", (3650, 3732), False, 'from lib.utils import log_string\n'), ((4724, 4741), 'torch.isnan', 'torch.isnan', (['mask'], {}), '(mask)\n', (4735, 4741), False, 'import torch\n'), ((4743, 4765), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (4759, 4765), False, 'import torch\n'), ((4854, 4871), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (4865, 4871), False, 'import torch\n'), ((4873, 4895), 'torch.zeros_like', 'torch.zeros_like', (['loss'], {}), '(loss)\n', (4889, 4895), False, 'import torch\n'), ((5318, 5364), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (5329, 5364), True, 'import numpy as np\n'), ((5385, 5407), 'numpy.not_equal', 'np.not_equal', (['label', '(0)'], {}), '(label, 0)\n', (5397, 5407), True, 'import numpy as np\n'), ((5463, 5476), 'numpy.mean', 'np.mean', (['mask'], {}), '(mask)\n', (5470, 5476), True, 'import numpy as np\n'), ((5558, 5572), 'numpy.square', 'np.square', (['mae'], {}), '(mae)\n', (5567, 5572), True, 'import numpy as np\n'), ((5588, 5609), 'numpy.divide', 'np.divide', (['mae', 'label'], {}), '(mae, label)\n', (5597, 5609), True, 'import numpy as np\n'), ((5624, 5649), 'numpy.nan_to_num', 'np.nan_to_num', (['(mae * mask)'], {}), '(mae * mask)\n', (5637, 5649), True, 'import numpy as np\n'), ((5664, 5676), 'numpy.mean', 'np.mean', (['mae'], {}), '(mae)\n', (5671, 5676), True, 'import numpy as np\n'), ((5692, 5718), 'numpy.nan_to_num', 'np.nan_to_num', (['(rmse * mask)'], {}), '(rmse * mask)\n', (5705, 5718), True, 'import numpy as np\n'), ((5772, 5798), 'numpy.nan_to_num', 'np.nan_to_num', (['(mape * mask)'], {}), '(mape * mask)\n', (5785, 5798), True, 'import numpy as np\n'), ((5814, 5827), 'numpy.mean', 'np.mean', (['mape'], {}), '(mape)\n', (5821, 5827), True, 'import numpy as np\n'), ((4581, 4600), 'torch.isnan', 'torch.isnan', (['labels'], {}), '(labels)\n', (4592, 4600), False, 'import torch\n'), ((5742, 5755), 'numpy.mean', 'np.mean', (['rmse'], {}), '(rmse)\n', (5749, 5755), True, 'import numpy as np\n'), ((5946, 6000), 'model.DSTGNN.DSTGNN', 'DSTGNN', (['SE', '(1)', '(args.K * args.d)', 'args.K', 'args.d', 'args.L'], {}), '(SE, 1, args.K * args.d, args.K, args.d, args.L)\n', (5952, 6000), False, 'from model.DSTGNN import DSTGNN\n'), ((5498, 5522), 'numpy.subtract', 'np.subtract', (['pred', 'label'], {}), '(pred, label)\n', (5509, 5522), True, 'import numpy as np\n'), ((3178, 3220), 'torch.from_numpy', 'torch.from_numpy', (['valTE[start_idx:end_idx]'], {}), '(valTE[start_idx:end_idx])\n', (3194, 3220), False, 'import torch\n'), ((3048, 3089), 'torch.from_numpy', 'torch.from_numpy', (['valX[start_idx:end_idx]'], {}), '(valX[start_idx:end_idx])\n', (3064, 3089), False, 'import torch\n')]
|
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from PIL import Image
from nst_utils import *
from loss_function import *
import numpy as np
import tensorflow as tf
import time
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)
]
content_image = scipy.misc.imread("resources/content.jpg")
generate_config = np.array(content_image).shape
content_image = reshape_and_normalize_image(content_image)
style_image = scipy.misc.imread("resources/style.jpg")
style_image = reshape_and_normalize_image(style_image)
generated_image = generate_noise_image(content_image, 0.6, generate_config)
# plt.imshow(generated_image[0])
# plt.show()
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
model = load_vgg_model("resources/imagenet-vgg-verydeep-19.mat", generate_config)
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(sess, model, STYLE_LAYERS)
# Default is 10, 40
J = total_cost(J_content, J_style, 10, 40)
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model['input'].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model['input'])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print(time.asctime(time.localtime(time.time())))
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
print("start:" + time.asctime(time.localtime(time.time())))
model_nn(sess, generated_image)
|
[
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"time.time",
"numpy.array",
"tensorflow.InteractiveSession",
"tensorflow.train.AdamOptimizer"
] |
[((765, 789), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (787, 789), True, 'import tensorflow as tf\n'), ((826, 849), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (847, 849), True, 'import tensorflow as tf\n'), ((1887, 1914), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(2.0)'], {}), '(2.0)\n', (1909, 1914), True, 'import tensorflow as tf\n'), ((423, 446), 'numpy.array', 'np.array', (['content_image'], {}), '(content_image)\n', (431, 446), True, 'import numpy as np\n'), ((2174, 2207), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2205, 2207), True, 'import tensorflow as tf\n'), ((3575, 3586), 'time.time', 'time.time', ([], {}), '()\n', (3584, 3586), False, 'import time\n'), ((3094, 3105), 'time.time', 'time.time', ([], {}), '()\n', (3103, 3105), False, 'import time\n')]
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
"""
Plots random networks with a varying chance of connections between nodes
for figure 2.3.
"""
node_color = 'red'
node_border_color = 'black'
node_border_width = .6
edge_color = 'black'
N = 10
num_graphs = 6
N_columns = 3
N_rows = 2
P = np.linspace(0.0, 1.0, num=num_graphs)
print(P)
def draw(G, pos, ax):
# Plots a graph.
nodes1 = nx.draw_networkx_nodes(G, pos=pos, node_color=node_color, ax=ax)
nodes1.set_edgecolor(node_border_color)
nodes1.set_linewidth(node_border_width)
nx.draw_networkx_edges(G, pos, edge_color=edge_color, alpha=.8,
ax=ax)
ax.axis('off')
return ax
fig, axs = plt.subplots(N_columns, N_rows)
G = nx.fast_gnp_random_graph(N, P[0], seed=0)
pos = nx.spring_layout(G)
c = 0
for i in range(N_columns):
for j in range(N_rows):
G = nx.fast_gnp_random_graph(N, P[c], seed=0)
draw(G, pos, axs[i, j])
axs[i, j].text(0.5, -0.3, "P = " + str(round(P[c], 1)), size=12,
ha="center", transform=axs[i, j].transAxes)
c += 1
plt.subplots_adjust(hspace=0.3)
plt.show()
|
[
"matplotlib.pyplot.show",
"networkx.draw_networkx_edges",
"networkx.fast_gnp_random_graph",
"networkx.spring_layout",
"networkx.draw_networkx_nodes",
"numpy.linspace",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplots"
] |
[((322, 359), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'num_graphs'}), '(0.0, 1.0, num=num_graphs)\n', (333, 359), True, 'import numpy as np\n'), ((729, 760), 'matplotlib.pyplot.subplots', 'plt.subplots', (['N_columns', 'N_rows'], {}), '(N_columns, N_rows)\n', (741, 760), True, 'import matplotlib.pyplot as plt\n'), ((766, 807), 'networkx.fast_gnp_random_graph', 'nx.fast_gnp_random_graph', (['N', 'P[0]'], {'seed': '(0)'}), '(N, P[0], seed=0)\n', (790, 807), True, 'import networkx as nx\n'), ((814, 833), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (830, 833), True, 'import networkx as nx\n'), ((1138, 1169), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (1157, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1180), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1178, 1180), True, 'import matplotlib.pyplot as plt\n'), ((427, 491), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G'], {'pos': 'pos', 'node_color': 'node_color', 'ax': 'ax'}), '(G, pos=pos, node_color=node_color, ax=ax)\n', (449, 491), True, 'import networkx as nx\n'), ((584, 655), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edge_color': 'edge_color', 'alpha': '(0.8)', 'ax': 'ax'}), '(G, pos, edge_color=edge_color, alpha=0.8, ax=ax)\n', (606, 655), True, 'import networkx as nx\n'), ((908, 949), 'networkx.fast_gnp_random_graph', 'nx.fast_gnp_random_graph', (['N', 'P[c]'], {'seed': '(0)'}), '(N, P[c], seed=0)\n', (932, 949), True, 'import networkx as nx\n')]
|
from copy import copy
import numpy as np
from nipy.core.image.image import Image
class ImageList(object):
''' Class to contain ND image as list of (N-1)D images '''
def __init__(self, images=None):
"""
A lightweight implementation of a list of images.
Parameters
----------
images : iterable
a iterable and sliceale object whose items are meant to be
images, this is checked by asserting that each has a
`coordmap` attribute
>>> import numpy as np
>>> from nipy.testing import funcfile
>>> from nipy.core.api import Image, ImageList
>>> from nipy.io.api import load_image
>>> funcim = load_image(funcfile)
>>> ilist = ImageList(funcim)
>>> sublist = ilist[2:5]
Slicing an ImageList returns a new ImageList
>>> isinstance(sublist, ImageList)
True
Indexing an ImageList returns a new Image
>>> newimg = ilist[2]
>>> isinstance(newimg, Image)
True
>>> isinstance(newimg, ImageList)
False
>>> np.asarray(sublist).shape
(3, 2, 20, 20)
>>> np.asarray(newimg).shape
(2, 20, 20)
"""
if images is None:
self.list = []
return
for im in images:
if not hasattr(im, "coordmap"):
raise ValueError("expecting each element of images "
" to have a 'coordmap' attribute")
self.list = images
@classmethod
def from_image(klass, image, axis=-1):
if axis is None:
raise ValueError('axis must be array axis no or -1')
imlist = []
coordmap = image.coordmap
data = np.asarray(image)
data = np.rollaxis(data, axis)
imlist = [Image(dataslice, copy(coordmap))
for dataslice in data]
return klass(imlist)
def __setitem__(self, index, value):
"""
self.list[index] = value
"""
self.list[index] = value
def __getitem__(self, index):
"""
self.list[index]
"""
if type(index) is type(1):
return self.list[index]
else:
return ImageList(images=self.list[index])
def __getslice__(self, i, j):
"""
Return another ImageList instance consisting with
images self.list[i:j]
"""
return ImageList(images=self.list[i:j])
def __array__(self):
"""Return data in ndarray. Called through numpy.array.
Examples
--------
>>> import numpy as np
>>> from nipy.testing import funcfile
>>> from nipy.core.api import ImageList
>>> from nipy.io.api import load_image
>>> funcim = load_image(funcfile)
>>> ilist = ImageList(funcim)
>>> np.asarray(ilist).shape
(20, 2, 20, 20)
"""
return np.asarray([np.asarray(im) for im in self.list])
def __iter__(self):
self._iter = iter(self.list)
return self
def next(self):
return self._iter.next()
|
[
"numpy.asarray",
"copy.copy",
"numpy.rollaxis"
] |
[((1776, 1793), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1786, 1793), True, 'import numpy as np\n'), ((1809, 1832), 'numpy.rollaxis', 'np.rollaxis', (['data', 'axis'], {}), '(data, axis)\n', (1820, 1832), True, 'import numpy as np\n'), ((1868, 1882), 'copy.copy', 'copy', (['coordmap'], {}), '(coordmap)\n', (1872, 1882), False, 'from copy import copy\n'), ((3009, 3023), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3019, 3023), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def plot_price_history(hist):
''' plot price history '''
plt.plot(hist, '-')
plt.xlabel("time steps"); plt.ylabel("price")
plt.title("price history")
plt.show()
def plot_price_std(arr):
''' plot std of price history over simulations'''
plt.plot(arr, '-')
plt.xlabel("time steps"); plt.ylabel("std of price")
plt.title("standard deviation of daily prices")
plt.show()
def plot_wealth_dist(total):
total = {"user": total[0], "miner": total[1], "speculator": total[2]}
total = dict(sorted(total.items(), key=lambda x:x[1]))
plt.pie(list(total.values()), labels=list(total.keys()) )
plt.title("wealth distribution post simulation")
plt.show()
def plot_hash_power_prop(prop):
''' plot miner proportion '''
plt.pie(prop)
plt.title("proportions of miner hash power")
plt.show()
price_hist = np.load("price_hist.npy")
hash_power = np.load("hash_power.npy")
wealth_dist = np.load("wealth_dist.npy")
plot_price_history(np.mean(price_hist, axis=0))
plot_price_std(np.std(price_hist, axis=0))
plot_wealth_dist(np.mean(wealth_dist, axis=0))
# # plot_hash_power_prop(np.mean(hash_power, axis=0))
keep = 20
arr = np.zeros(keep)
for hp in hash_power:
arr = np.add(hp[:keep], arr)
arr /= 100
plot_hash_power_prop(arr)
|
[
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.add",
"numpy.zeros",
"numpy.mean",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((859, 884), 'numpy.load', 'np.load', (['"""price_hist.npy"""'], {}), "('price_hist.npy')\n", (866, 884), True, 'import numpy as np\n'), ((898, 923), 'numpy.load', 'np.load', (['"""hash_power.npy"""'], {}), "('hash_power.npy')\n", (905, 923), True, 'import numpy as np\n'), ((938, 964), 'numpy.load', 'np.load', (['"""wealth_dist.npy"""'], {}), "('wealth_dist.npy')\n", (945, 964), True, 'import numpy as np\n'), ((1175, 1189), 'numpy.zeros', 'np.zeros', (['keep'], {}), '(keep)\n', (1183, 1189), True, 'import numpy as np\n'), ((111, 130), 'matplotlib.pyplot.plot', 'plt.plot', (['hist', '"""-"""'], {}), "(hist, '-')\n", (119, 130), True, 'import matplotlib.pyplot as plt\n'), ((132, 156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time steps"""'], {}), "('time steps')\n", (142, 156), True, 'import matplotlib.pyplot as plt\n'), ((158, 177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""price"""'], {}), "('price')\n", (168, 177), True, 'import matplotlib.pyplot as plt\n'), ((179, 205), 'matplotlib.pyplot.title', 'plt.title', (['"""price history"""'], {}), "('price history')\n", (188, 205), True, 'import matplotlib.pyplot as plt\n'), ((207, 217), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (215, 217), True, 'import matplotlib.pyplot as plt\n'), ((296, 314), 'matplotlib.pyplot.plot', 'plt.plot', (['arr', '"""-"""'], {}), "(arr, '-')\n", (304, 314), True, 'import matplotlib.pyplot as plt\n'), ((316, 340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time steps"""'], {}), "('time steps')\n", (326, 340), True, 'import matplotlib.pyplot as plt\n'), ((342, 368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""std of price"""'], {}), "('std of price')\n", (352, 368), True, 'import matplotlib.pyplot as plt\n'), ((370, 417), 'matplotlib.pyplot.title', 'plt.title', (['"""standard deviation of daily prices"""'], {}), "('standard deviation of daily prices')\n", (379, 417), True, 'import matplotlib.pyplot as plt\n'), ((419, 429), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (427, 429), True, 'import matplotlib.pyplot as plt\n'), ((647, 695), 'matplotlib.pyplot.title', 'plt.title', (['"""wealth distribution post simulation"""'], {}), "('wealth distribution post simulation')\n", (656, 695), True, 'import matplotlib.pyplot as plt\n'), ((697, 707), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (705, 707), True, 'import matplotlib.pyplot as plt\n'), ((773, 786), 'matplotlib.pyplot.pie', 'plt.pie', (['prop'], {}), '(prop)\n', (780, 786), True, 'import matplotlib.pyplot as plt\n'), ((788, 832), 'matplotlib.pyplot.title', 'plt.title', (['"""proportions of miner hash power"""'], {}), "('proportions of miner hash power')\n", (797, 832), True, 'import matplotlib.pyplot as plt\n'), ((834, 844), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((985, 1012), 'numpy.mean', 'np.mean', (['price_hist'], {'axis': '(0)'}), '(price_hist, axis=0)\n', (992, 1012), True, 'import numpy as np\n'), ((1029, 1055), 'numpy.std', 'np.std', (['price_hist'], {'axis': '(0)'}), '(price_hist, axis=0)\n', (1035, 1055), True, 'import numpy as np\n'), ((1074, 1102), 'numpy.mean', 'np.mean', (['wealth_dist'], {'axis': '(0)'}), '(wealth_dist, axis=0)\n', (1081, 1102), True, 'import numpy as np\n'), ((1219, 1241), 'numpy.add', 'np.add', (['hp[:keep]', 'arr'], {}), '(hp[:keep], arr)\n', (1225, 1241), True, 'import numpy as np\n')]
|
#Importing libraries
import argparse
import cv2
from imutils.video import VideoStream #it creates a really good video stream
from imutils import face_utils, translate, resize
#face_utils : something that converts dlib to numpy so it can be furthur used.
#translate : it's going to translate the current position of our eyes to the previous pos.
#resize : for faster computation
import time
import dlib
import numpy as np
#Taking arguments from command line
parser = argparse.ArgumentParser() #you iniatize as such
parser.add_argument("-predictor", required=True, help="path to predictor")
#the add_argument tells you what needs to be given as an input sp its help
args = parser.parse_args() #you take the arguments from command line
#Controls
print("Starting Program.")
print("Press 'Esc' to quit")
#Video from webcam
video = VideoStream().start()
time.sleep(1.5) #to hault the code it will stop after 1.5 sec
#the detector responsible for detecting the face and predictor responsible for predict the 68 points on the face
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args.predictor) #used for getting outside loop
#For taking video
counter = 0
#creating separate eye layer and eye mask to like extract only the eyes from the face and to work with it
eye_layer = np.zeros((300,400,3),dtype = 'uint8')
eye_mask = eye_layer.copy()
eye_mask = cv2.cvtColor(eye_mask , cv2.COLOR_BGR2GRAY) #eyes are white on a black background
#create translated mask to know the mask of the previous pos of all the eyes
translated = np.zeros((300,400,3),dtype = 'uint8')
translated_mask = eye_mask.copy()
#creating the eye list for storing all the positions of eyes in different frames
class EyeList(object) :
def __init__(self,length) :
self.length = length #length will be the total number of pos to be stored
self.eyes = []
def push(self,newCoords) :
if len(self.eyes) < self.length :
self.eyes.append(newCoords)
#when we reach the max limit for the eyelist , we remove the oldest coordinates
else :
self.eyes.pop(0)
self.eyes.append(newCoords)
eye_list = EyeList(10) #10 coordinates/positions of eyes
#Making Video
img_list = []
out = cv2.VideoWriter('Filter-Eyes.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, (400,300))
while True :
frame = video.read()
frame = resize(frame,width = 400)
gray = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY)
rectangle = detector(gray,0)
# fill our masks and frames with 0 (black) on every draw loop
eye_mask.fill(0) #this will only show the exact eye pos on the screen and not its continuous movement
eye_layer.fill(0)
translated.fill(0)
translated_mask.fill(0)
for rect in rectangle :
x,y,w,h = face_utils.rect_to_bb(rect) #gives the coordinates and size
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
shape = predictor(gray,rect) #dlib output will be received after predicting
shape = face_utils.shape_to_np(shape)
lefteye = shape[36:42]
righteye = shape[42:48]
# fill our mask in the shape of our eyes
cv2.fillPoly(eye_mask,[lefteye],255)
cv2.fillPoly(eye_mask,[righteye],255)
#take the eyemask and do bitwise AND with the frame
'''What happens is that the bitwise AND will be performed between eyemask and frame , whichever gives the
true or 1 will be shown actually in the eyelayer'''
eye_layer = cv2.bitwise_and(frame,frame,mask = eye_mask)
#Getting the coordinates for the eye at diff position in each frame
ex,ey,ew,eh = cv2.boundingRect(eye_mask)
eye_list.push([ex,ey])
#Accessing the coordinates in the reverse order
for i in reversed(eye_list.eyes) :
translated1 = translate(eye_layer, i[0] -ex ,i[1]-ey) #translate take x and y coords to translate/move from
translated1_mask = translate(eye_mask, i[0]-ex ,i[1]-ey)
translated_mask = np.maximum(translated_mask , translated1_mask) #when you've 255 in both and if you add them you get overflow in np hence you get max
#cut out the new translated mask
translated = cv2.bitwise_and(translated,translated,mask=255-translated1_mask)
#paste in the newly translated eye position
translated += translated1
'''for point in shape[36:48] : #we will only extract the eyes points in the entire face
cv2.circle(frame,tuple(point),2,(0,255,0)) #marks the points embracing the detected shape of face'''
#translated_mask will have all the previous eye position so we will black out those ones from the current eye
frame = cv2.bitwise_and(frame,frame,mask = 255-translated_mask)
frame += translated #paste in the translated eye image
cv2.imshow("Eye Glitch",frame)
img_list.append(frame)
key = cv2.waitKey(1) & 0xFF
if key == 27 :
break
#Writing all frames to make a video file
for img in img_list :
out.write(img)
out.release()
video.stop()
cv2.destroyAllWindows()
|
[
"numpy.maximum",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"cv2.bitwise_and",
"cv2.fillPoly",
"imutils.face_utils.shape_to_np",
"imutils.translate",
"imutils.resize",
"cv2.imshow",
"dlib.shape_predictor",
"cv2.cvtColor",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"cv2.waitKey",
"time.sleep",
"dlib.get_frontal_face_detector",
"imutils.video.VideoStream",
"numpy.zeros",
"imutils.face_utils.rect_to_bb"
] |
[((482, 507), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (505, 507), False, 'import argparse\n'), ((879, 894), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (889, 894), False, 'import time\n'), ((1069, 1101), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1099, 1101), False, 'import dlib\n'), ((1116, 1152), 'dlib.shape_predictor', 'dlib.shape_predictor', (['args.predictor'], {}), '(args.predictor)\n', (1136, 1152), False, 'import dlib\n'), ((1341, 1379), 'numpy.zeros', 'np.zeros', (['(300, 400, 3)'], {'dtype': '"""uint8"""'}), "((300, 400, 3), dtype='uint8')\n", (1349, 1379), True, 'import numpy as np\n'), ((1421, 1463), 'cv2.cvtColor', 'cv2.cvtColor', (['eye_mask', 'cv2.COLOR_BGR2GRAY'], {}), '(eye_mask, cv2.COLOR_BGR2GRAY)\n', (1433, 1463), False, 'import cv2\n'), ((1598, 1636), 'numpy.zeros', 'np.zeros', (['(300, 400, 3)'], {'dtype': '"""uint8"""'}), "((300, 400, 3), dtype='uint8')\n", (1606, 1636), True, 'import numpy as np\n'), ((5484, 5507), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5505, 5507), False, 'import cv2\n'), ((2362, 2393), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (2384, 2393), False, 'import cv2\n'), ((2474, 2498), 'imutils.resize', 'resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (2480, 2498), False, 'from imutils import face_utils, translate, resize\n'), ((2516, 2555), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2528, 2555), False, 'import cv2\n'), ((5067, 5124), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': '(255 - translated_mask)'}), '(frame, frame, mask=255 - translated_mask)\n', (5082, 5124), False, 'import cv2\n'), ((5203, 5234), 'cv2.imshow', 'cv2.imshow', (['"""Eye Glitch"""', 'frame'], {}), "('Eye Glitch', frame)\n", (5213, 5234), False, 'import cv2\n'), ((856, 869), 'imutils.video.VideoStream', 'VideoStream', ([], {}), '()\n', (867, 869), False, 'from imutils.video import VideoStream\n'), ((2939, 2966), 'imutils.face_utils.rect_to_bb', 'face_utils.rect_to_bb', (['rect'], {}), '(rect)\n', (2960, 2966), False, 'from imutils import face_utils, translate, resize\n'), ((3174, 3203), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (3196, 3203), False, 'from imutils import face_utils, translate, resize\n'), ((3356, 3394), 'cv2.fillPoly', 'cv2.fillPoly', (['eye_mask', '[lefteye]', '(255)'], {}), '(eye_mask, [lefteye], 255)\n', (3368, 3394), False, 'import cv2\n'), ((3407, 3446), 'cv2.fillPoly', 'cv2.fillPoly', (['eye_mask', '[righteye]', '(255)'], {}), '(eye_mask, [righteye], 255)\n', (3419, 3446), False, 'import cv2\n'), ((3732, 3776), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'eye_mask'}), '(frame, frame, mask=eye_mask)\n', (3747, 3776), False, 'import cv2\n'), ((3885, 3911), 'cv2.boundingRect', 'cv2.boundingRect', (['eye_mask'], {}), '(eye_mask)\n', (3901, 3911), False, 'import cv2\n'), ((5292, 5306), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5303, 5306), False, 'import cv2\n'), ((4094, 4136), 'imutils.translate', 'translate', (['eye_layer', '(i[0] - ex)', '(i[1] - ey)'], {}), '(eye_layer, i[0] - ex, i[1] - ey)\n', (4103, 4136), False, 'from imutils import face_utils, translate, resize\n'), ((4226, 4267), 'imutils.translate', 'translate', (['eye_mask', '(i[0] - ex)', '(i[1] - ey)'], {}), '(eye_mask, i[0] - ex, i[1] - ey)\n', (4235, 4267), False, 'from imutils import face_utils, translate, resize\n'), ((4300, 4345), 'numpy.maximum', 'np.maximum', (['translated_mask', 'translated1_mask'], {}), '(translated_mask, translated1_mask)\n', (4310, 4345), True, 'import numpy as np\n'), ((4514, 4582), 'cv2.bitwise_and', 'cv2.bitwise_and', (['translated', 'translated'], {'mask': '(255 - translated1_mask)'}), '(translated, translated, mask=255 - translated1_mask)\n', (4529, 4582), False, 'import cv2\n')]
|
import torch
import numpy as np
import random
from transformers import T5Tokenizer, T5ForConditionalGeneration
#Set all seeds to make output deterministic
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
#Paragraphs for which we want to generate queries
paragraphs = [
"Python is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant whitespace. Its language constructs and object-oriented approach aim to help programmers write clear, logical code for small and large-scale projects.",
"Python is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured (particularly, procedural), object-oriented and functional programming. Python is often described as a \"batteries included\" language due to its comprehensive standard library.",
"Python was created in the late 1980s, and first released in 1991, by <NAME> as a successor to the ABC programming language. Python 2.0, released in 2000, introduced new features, such as list comprehensions, and a garbage collection system with reference counting, and was discontinued with version 2.7 in 2020. Python 3.0, released in 2008, was a major revision of the language that is not completely backward-compatible and much Python 2 code does not run unmodified on Python 3. With Python 2's end-of-life (and pip having dropped support in 2021), only Python 3.6.x and later are supported, with older versions still supporting e.g. Windows 7 (and old installers not restricted to 64-bit Windows).",
"Python interpreters are supported for mainstream operating systems and available for a few more (and in the past supported many more). A global community of programmers develops and maintains CPython, a free and open-source reference implementation. A non-profit organization, the Python Software Foundation, manages and directs resources for Python and CPython development.",
"As of January 2021, Python ranks third in TIOBE’s index of most popular programming languages, behind C and Java, having previously gained second place and their award for the most popularity gain for 2020.",
"Java is a class-based, object-oriented programming language that is designed to have as few implementation dependencies as possible. It is a general-purpose programming language intended to let application developers write once, run anywhere (WORA), meaning that compiled Java code can run on all platforms that support Java without the need for recompilation. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of the underlying computer architecture. The syntax of Java is similar to C and C++, but has fewer low-level facilities than either of them. The Java runtime provides dynamic capabilities (such as reflection and runtime code modification) that are typically not available in traditional compiled languages. As of 2019, Java was one of the most popular programming languages in use according to GitHub, particularly for client-server web applications, with a reported 9 million developers.",
"Java was originally developed by <NAME> at Sun Microsystems (which has since been acquired by Oracle) and released in 1995 as a core component of Sun Microsystems' Java platform. The original and reference implementation Java compilers, virtual machines, and class libraries were originally released by Sun under proprietary licenses. As of May 2007, in compliance with the specifications of the Java Community Process, Sun had relicensed most of its Java technologies under the GNU General Public License. Oracle offers its own HotSpot Java Virtual Machine, however the official reference implementation is the OpenJDK JVM which is free open source software and used by most developers and is the default JVM for almost all Linux distributions.",
"As of September 2020, the latest version is Java 15, with Java 11, a currently supported long-term support (LTS) version, released on September 25, 2018. Oracle released the last zero-cost public update for the legacy version Java 8 LTS in January 2019 for commercial use, although it will otherwise still support Java 8 with public updates for personal use indefinitely. Other vendors have begun to offer zero-cost builds of OpenJDK 8 and 11 that are still receiving security and other upgrades.",
"Oracle (and others) highly recommend uninstalling outdated versions of Java because of serious risks due to unresolved security issues. Since Java 9, 10, 12, 13, and 14 are no longer supported, Oracle advises its users to immediately transition to the latest version (currently Java 15) or an LTS release."
]
# For available models for query generation, see: https://huggingface.co/BeIR/
# Here, we use a T5-large model was trained on the MS MARCO dataset
tokenizer = T5Tokenizer.from_pretrained('BeIR/query-gen-msmarco-t5-large')
model = T5ForConditionalGeneration.from_pretrained('BeIR/query-gen-msmarco-t5-large')
model.eval()
#Select the device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
#Iterate over the paragraphs and generate for each some queries
with torch.no_grad():
for para in paragraphs:
input_ids = tokenizer.encode(para, return_tensors='pt').to(device)
outputs = model.generate(
input_ids=input_ids,
max_length=64,
do_sample=True,
top_p=0.95,
num_return_sequences=3)
print("\nParagraph:")
print(para)
print("\nGenerated Queries:")
for i in range(len(outputs)):
query = tokenizer.decode(outputs[i], skip_special_tokens=True)
print(f'{i + 1}: {query}')
"""
Output of the script:
Paragraph:
Python is an interpreted, high-level and general-purpose programming language. Python's design philosophy emphasizes code readability with its notable use of significant whitespace. Its language constructs and object-oriented approach aim to help programmers write clear, logical code for small and large-scale projects.
Generated Queries:
1: what is python language used for
2: what is python programming
3: what language do i use for scripts
Paragraph:
Python is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured (particularly, procedural), object-oriented and functional programming. Python is often described as a "batteries included" language due to its comprehensive standard library.
Generated Queries:
1: what is python language
2: what programming paradigms do python support
3: what programming languages use python
Paragraph:
Python was created in the late 1980s, and first released in 1991, by <NAME> as a successor to the ABC programming language. Python 2.0, released in 2000, introduced new features, such as list comprehensions, and a garbage collection system with reference counting, and was discontinued with version 2.7 in 2020. Python 3.0, released in 2008, was a major revision of the language that is not completely backward-compatible and much Python 2 code does not run unmodified on Python 3. With Python 2's end-of-life (and pip having dropped support in 2021), only Python 3.6.x and later are supported, with older versions still supporting e.g. Windows 7 (and old installers not restricted to 64-bit Windows).
Generated Queries:
1: what year did python start
2: when does the next python update release
3: when did python come out?
Paragraph:
Python interpreters are supported for mainstream operating systems and available for a few more (and in the past supported many more). A global community of programmers develops and maintains CPython, a free and open-source reference implementation. A non-profit organization, the Python Software Foundation, manages and directs resources for Python and CPython development.
Generated Queries:
1: what platform is python available on
2: what is python used for
3: what is python?
Paragraph:
As of January 2021, Python ranks third in TIOBE’s index of most popular programming languages, behind C and Java, having previously gained second place and their award for the most popularity gain for 2020.
Generated Queries:
1: what is the most used programming language in the world
2: what is python language
3: what is the most popular programming language in the world?
Paragraph:
Java is a class-based, object-oriented programming language that is designed to have as few implementation dependencies as possible. It is a general-purpose programming language intended to let application developers write once, run anywhere (WORA), meaning that compiled Java code can run on all platforms that support Java without the need for recompilation. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of the underlying computer architecture. The syntax of Java is similar to C and C++, but has fewer low-level facilities than either of them. The Java runtime provides dynamic capabilities (such as reflection and runtime code modification) that are typically not available in traditional compiled languages. As of 2019, Java was one of the most popular programming languages in use according to GitHub, particularly for client-server web applications, with a reported 9 million developers.
Generated Queries:
1: java how java works
2: what language is similar to java
3: what is java language
Paragraph:
Java was originally developed by <NAME> at Sun Microsystems (which has since been acquired by Oracle) and released in 1995 as a core component of Sun Microsystems' Java platform. The original and reference implementation Java compilers, virtual machines, and class libraries were originally released by Sun under proprietary licenses. As of May 2007, in compliance with the specifications of the Java Community Process, Sun had relicensed most of its Java technologies under the GNU General Public License. Oracle offers its own HotSpot Java Virtual Machine, however the official reference implementation is the OpenJDK JVM which is free open source software and used by most developers and is the default JVM for almost all Linux distributions.
Generated Queries:
1: what is java created by
2: when was java introduced to linux
3: who developed java?
Paragraph:
As of September 2020, the latest version is Java 15, with Java 11, a currently supported long-term support (LTS) version, released on September 25, 2018. Oracle released the last zero-cost public update for the legacy version Java 8 LTS in January 2019 for commercial use, although it will otherwise still support Java 8 with public updates for personal use indefinitely. Other vendors have begun to offer zero-cost builds of OpenJDK 8 and 11 that are still receiving security and other upgrades.
Generated Queries:
1: what is the latest version of java
2: what is the latest java version
3: what is the latest version of java
Paragraph:
Oracle (and others) highly recommend uninstalling outdated versions of Java because of serious risks due to unresolved security issues. Since Java 9, 10, 12, 13, and 14 are no longer supported, Oracle advises its users to immediately transition to the latest version (currently Java 15) or an LTS release.
Generated Queries:
1: why is oracle not supported
2: what version is oracle used in
3: which java version is obsolete
"""
|
[
"numpy.random.seed",
"torch.manual_seed",
"transformers.T5ForConditionalGeneration.from_pretrained",
"random.seed",
"torch.cuda.is_available",
"transformers.T5Tokenizer.from_pretrained",
"torch.no_grad"
] |
[((156, 176), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (173, 176), False, 'import torch\n'), ((177, 194), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (191, 194), True, 'import numpy as np\n'), ((195, 209), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (206, 209), False, 'import random\n'), ((4876, 4938), 'transformers.T5Tokenizer.from_pretrained', 'T5Tokenizer.from_pretrained', (['"""BeIR/query-gen-msmarco-t5-large"""'], {}), "('BeIR/query-gen-msmarco-t5-large')\n", (4903, 4938), False, 'from transformers import T5Tokenizer, T5ForConditionalGeneration\n'), ((4947, 5024), 'transformers.T5ForConditionalGeneration.from_pretrained', 'T5ForConditionalGeneration.from_pretrained', (['"""BeIR/query-gen-msmarco-t5-large"""'], {}), "('BeIR/query-gen-msmarco-t5-large')\n", (4989, 5024), False, 'from transformers import T5Tokenizer, T5ForConditionalGeneration\n'), ((5077, 5102), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5100, 5102), False, 'import torch\n'), ((5202, 5217), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5215, 5217), False, 'import torch\n')]
|
from __future__ import print_function
import time
import numpy as np
import sys
import gym
from PIL import Image
from gibson.core.render.profiler import Profiler
from gibson.envs.husky_env import *
from gibson.envs.ant_env import *
from gibson.envs.humanoid_env import *
from gibson.envs.drone_env import *
import pybullet as p
class RandomAgent(object):
"""The world's simplest agent"""
def __init__(self, action_space, is_discrete):
self.action_space = action_space
self.is_discrete = is_discrete
def act(self, observation, reward=None):
if self.is_discrete:
action = np.random.randint(self.action_space.n)
else:
action = np.zeros(self.action_space.shape[0])
if (np.random.random() < 0.5):
action[np.random.choice(action.shape[0], 1)] = np.random.randint(-1, 2)
return action
def testEnv(Env, config="test_filled.yaml", frame_total=10, is_discrete=False):
print("Currently testing", Env)
config = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'examples', 'configs', 'test', config)
env = Env(config)
obs = env.reset()
agent = RandomAgent(env.action_space, is_discrete)
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
obs, r, done, meta = env.step(a)
score += r
frame += 1
if not done and frame < frame_total: continue
env.close()
return
if __name__ == '__main__':
testEnv(HuskyNavigateEnv, "test_semantics.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_filled.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_prefilled.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_depth.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_normal.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_three.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_four.yaml", 10, is_discrete=True)
testEnv(HuskyNavigateEnv, "test_nonviz.yaml", 10, is_discrete=True)
testEnv(HuskyGibsonFlagRunEnv, "test_nonviz.yaml", 10, is_discrete=True)
testEnv(HuskyGibsonFlagRunEnv, "test_depth.yaml", 10, is_discrete=True)
testEnv(AntGibsonFlagRunEnv, "test_nonviz_nondiscrete.yaml", 10, is_discrete=False)
testEnv(AntFlagRunEnv, "test_nonviz_nondiscrete.yaml", 10, is_discrete=False)
testEnv(AntClimbEnv, "test_nonviz_nondiscrete.yaml", 10, is_discrete=False)
testEnv(AntNavigateEnv, "test_nonviz_nondiscrete.yaml", 10, is_discrete=False)
testEnv(AntClimbEnv, "test_four_nondiscrete.yaml", 10, is_discrete=False)
testEnv(HumanoidNavigateEnv, "test_nonviz_nondiscrete.yaml", 10, is_discrete=False)
testEnv(HumanoidGibsonFlagRunEnv, "test_nonviz_nondiscrete.yaml", 10, is_discrete=False)
testEnv(HumanoidNavigateEnv, "test_four_nondiscrete.yaml", 10, is_discrete=False)
testEnv(DroneNavigateEnv, "test_nonviz_nondiscrete.yaml", 100, is_discrete=False)
testEnv(DroneNavigateEnv, "test_four_nondiscrete.yaml", 100, is_discrete=False)
|
[
"numpy.zeros",
"time.sleep",
"numpy.random.random",
"numpy.random.randint",
"numpy.random.choice"
] |
[((1326, 1342), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1336, 1342), False, 'import time\n'), ((628, 666), 'numpy.random.randint', 'np.random.randint', (['self.action_space.n'], {}), '(self.action_space.n)\n', (645, 666), True, 'import numpy as np\n'), ((702, 738), 'numpy.zeros', 'np.zeros', (['self.action_space.shape[0]'], {}), '(self.action_space.shape[0])\n', (710, 738), True, 'import numpy as np\n'), ((755, 773), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (771, 773), True, 'import numpy as np\n'), ((845, 869), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {}), '(-1, 2)\n', (862, 869), True, 'import numpy as np\n'), ((805, 841), 'numpy.random.choice', 'np.random.choice', (['action.shape[0]', '(1)'], {}), '(action.shape[0], 1)\n', (821, 841), True, 'import numpy as np\n')]
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import re
from brightics.common.utils import check_required_parameters
import itertools
def polynomial_expansion(table, **params):
check_required_parameters(_polynomial_expansion, params, ['table'])
return _polynomial_expansion(table, **params)
def _polynomial_expansion(table, input_cols, hold_cols=False):
out_table = pd.DataFrame()
out_table[input_cols] = table[input_cols]
if hold_cols:
hold_cols = list(set(hold_cols) - set(input_cols))
out_table[hold_cols] = table[hold_cols]
for i in range(len(input_cols)):
for j in range(i, len(input_cols)):
out_table[input_cols[i] + '_' + input_cols[j]] = np.array(table[input_cols[i]]) * np.array(table[input_cols[j]])
return {'out_table' : out_table}
|
[
"pandas.DataFrame",
"numpy.array",
"brightics.common.utils.check_required_parameters"
] |
[((789, 856), 'brightics.common.utils.check_required_parameters', 'check_required_parameters', (['_polynomial_expansion', 'params', "['table']"], {}), "(_polynomial_expansion, params, ['table'])\n", (814, 856), False, 'from brightics.common.utils import check_required_parameters\n'), ((991, 1005), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1003, 1005), True, 'import pandas as pd\n'), ((1319, 1349), 'numpy.array', 'np.array', (['table[input_cols[i]]'], {}), '(table[input_cols[i]])\n', (1327, 1349), True, 'import numpy as np\n'), ((1352, 1382), 'numpy.array', 'np.array', (['table[input_cols[j]]'], {}), '(table[input_cols[j]])\n', (1360, 1382), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
#Concatenación de los datos
data1 = pd.read_csv("transacciones2008.txt",sep = ";",names=['Fecha','Hora','Conversion','Monto'],decimal =",")
data2 = pd.read_csv("transacciones2009.txt",sep = ";",names=['Fecha','Hora','Conversion','Monto'],decimal =",")
data3 = pd.read_csv("transacciones2010.txt",sep = ";",names=['Fecha','Hora','Conversion','Monto'],decimal =",")
a = data1["Fecha"].str.split(" ",expand = True)
b = data1["Hora"].str.split(" ",expand = True)
c = data2["Fecha"].str.split(" ",expand = True)
d = data2["Hora"].str.split(" ",expand = True)
e = data3["Fecha"].str.split(" ",expand = True)
f = data3["Hora"].str.split(" ",expand = True)
n1 = pd.DataFrame({'Fecha': a[0] + " " + b[1],'Conversion':data1["Conversion"],'Monto':data1["Monto"]})
n2 = pd.DataFrame({'Fecha': c[0] + " " + d[1],'Conversion':data2["Conversion"],'Monto':data2["Monto"]})
n3 = pd.DataFrame({'Fecha': e[0] + " " + f[1],'Conversion':data3["Conversion"],'Monto':data3["Monto"]})
data = pd.concat([n1,n2,n3],ignore_index = True)
data["Fecha"] = pd.to_datetime(data["Fecha"],format='%d/%m/%Y %H:%M:%S')
data.to_csv('datos.csv',index = False)
#Plot de los datos para la señal
plt.figure(figsize=(15,10))
plt.plot(data["Fecha"],data["Conversion"])
plt.savefig("Señal.png")
#Filtros
N1 = 1
Wn1 = 0.1
B1, A1 = signal.butter(N1, Wn1)
precio_filtrado1 = signal.filtfilt(B1,A1, data["Conversion"])
N2 = 2
Wn2 = 0.01
B2, A2 = signal.butter(N2, Wn2)
precio_filtrado2 = signal.filtfilt(B2,A2, data["Conversion"])
N3 = 3
Wn3 = 0.01
B3, A3 = signal.butter(N3, Wn3)
precio_filtrado3 = signal.filtfilt(B3,A3, data["Conversion"])
plt.figure(figsize=(10,15))
plt.subplot(3,1,1)
plt.plot(data["Fecha"],data["Conversion"], label = "Original")
plt.plot(data["Fecha"],precio_filtrado1, label = "Filtrado")
plt.xlabel("Fecha")
plt.ylabel("Precio")
plt.legend(loc=0.0)
plt.subplot(3,1,2)
plt.plot(data["Fecha"],data["Conversion"], label = "Original")
plt.plot(data["Fecha"],precio_filtrado2, label = "Filtrado")
plt.xlabel("Fecha")
plt.ylabel("Precio")
plt.legend(loc=0.0)
plt.subplot(3,1,3)
plt.plot(data["Fecha"],data["Conversion"], label = "Original")
plt.plot(data["Fecha"],precio_filtrado3, label = "Filtrado")
plt.xlabel("Fecha")
plt.ylabel("Precio")
plt.legend(loc=0.0)
plt.savefig("Filtros.png")
# Correlaciones
ruido1 = data["Conversion"]-precio_filtrado1
ruido2 = data["Conversion"]-precio_filtrado2
ruido3 = data["Conversion"]-precio_filtrado3
corr1=np.correlate(ruido1,ruido1,mode="full")
corr2=np.correlate(ruido2,ruido2,mode="full")
corr3=np.correlate(ruido3,ruido3,mode="full")
plt.figure(figsize=(10,15))
plt.subplot(3,1,1)
plt.plot(np.abs(corr1[len(corr1)//2:]))
plt.subplot(3,1,2)
plt.plot(np.abs(corr2[len(corr2)//2:]))
plt.subplot(3,1,3)
plt.plot(np.abs(corr3[len(corr3)//2:]))
plt.savefig("Correlaciones.png")
|
[
"pandas.DataFrame",
"matplotlib.pylab.savefig",
"matplotlib.pylab.legend",
"scipy.signal.filtfilt",
"matplotlib.pylab.subplot",
"pandas.read_csv",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.plot",
"pandas.to_datetime",
"scipy.signal.butter",
"numpy.correlate",
"matplotlib.pylab.xlabel",
"pandas.concat",
"matplotlib.pylab.figure"
] |
[((141, 251), 'pandas.read_csv', 'pd.read_csv', (['"""transacciones2008.txt"""'], {'sep': '""";"""', 'names': "['Fecha', 'Hora', 'Conversion', 'Monto']", 'decimal': '""","""'}), "('transacciones2008.txt', sep=';', names=['Fecha', 'Hora',\n 'Conversion', 'Monto'], decimal=',')\n", (152, 251), True, 'import pandas as pd\n'), ((253, 363), 'pandas.read_csv', 'pd.read_csv', (['"""transacciones2009.txt"""'], {'sep': '""";"""', 'names': "['Fecha', 'Hora', 'Conversion', 'Monto']", 'decimal': '""","""'}), "('transacciones2009.txt', sep=';', names=['Fecha', 'Hora',\n 'Conversion', 'Monto'], decimal=',')\n", (264, 363), True, 'import pandas as pd\n'), ((365, 475), 'pandas.read_csv', 'pd.read_csv', (['"""transacciones2010.txt"""'], {'sep': '""";"""', 'names': "['Fecha', 'Hora', 'Conversion', 'Monto']", 'decimal': '""","""'}), "('transacciones2010.txt', sep=';', names=['Fecha', 'Hora',\n 'Conversion', 'Monto'], decimal=',')\n", (376, 475), True, 'import pandas as pd\n'), ((764, 870), 'pandas.DataFrame', 'pd.DataFrame', (["{'Fecha': a[0] + ' ' + b[1], 'Conversion': data1['Conversion'], 'Monto':\n data1['Monto']}"], {}), "({'Fecha': a[0] + ' ' + b[1], 'Conversion': data1['Conversion'],\n 'Monto': data1['Monto']})\n", (776, 870), True, 'import pandas as pd\n'), ((868, 974), 'pandas.DataFrame', 'pd.DataFrame', (["{'Fecha': c[0] + ' ' + d[1], 'Conversion': data2['Conversion'], 'Monto':\n data2['Monto']}"], {}), "({'Fecha': c[0] + ' ' + d[1], 'Conversion': data2['Conversion'],\n 'Monto': data2['Monto']})\n", (880, 974), True, 'import pandas as pd\n'), ((972, 1078), 'pandas.DataFrame', 'pd.DataFrame', (["{'Fecha': e[0] + ' ' + f[1], 'Conversion': data3['Conversion'], 'Monto':\n data3['Monto']}"], {}), "({'Fecha': e[0] + ' ' + f[1], 'Conversion': data3['Conversion'],\n 'Monto': data3['Monto']})\n", (984, 1078), True, 'import pandas as pd\n'), ((1081, 1123), 'pandas.concat', 'pd.concat', (['[n1, n2, n3]'], {'ignore_index': '(True)'}), '([n1, n2, n3], ignore_index=True)\n', (1090, 1123), True, 'import pandas as pd\n'), ((1139, 1196), 'pandas.to_datetime', 'pd.to_datetime', (["data['Fecha']"], {'format': '"""%d/%m/%Y %H:%M:%S"""'}), "(data['Fecha'], format='%d/%m/%Y %H:%M:%S')\n", (1153, 1196), True, 'import pandas as pd\n'), ((1272, 1300), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (1282, 1300), True, 'import matplotlib.pylab as plt\n'), ((1300, 1343), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", "data['Conversion']"], {}), "(data['Fecha'], data['Conversion'])\n", (1308, 1343), True, 'import matplotlib.pylab as plt\n'), ((1343, 1367), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Señal.png"""'], {}), "('Señal.png')\n", (1354, 1367), True, 'import matplotlib.pylab as plt\n'), ((1413, 1435), 'scipy.signal.butter', 'signal.butter', (['N1', 'Wn1'], {}), '(N1, Wn1)\n', (1426, 1435), True, 'import scipy.signal as signal\n'), ((1455, 1498), 'scipy.signal.filtfilt', 'signal.filtfilt', (['B1', 'A1', "data['Conversion']"], {}), "(B1, A1, data['Conversion'])\n", (1470, 1498), True, 'import scipy.signal as signal\n'), ((1533, 1555), 'scipy.signal.butter', 'signal.butter', (['N2', 'Wn2'], {}), '(N2, Wn2)\n', (1546, 1555), True, 'import scipy.signal as signal\n'), ((1575, 1618), 'scipy.signal.filtfilt', 'signal.filtfilt', (['B2', 'A2', "data['Conversion']"], {}), "(B2, A2, data['Conversion'])\n", (1590, 1618), True, 'import scipy.signal as signal\n'), ((1653, 1675), 'scipy.signal.butter', 'signal.butter', (['N3', 'Wn3'], {}), '(N3, Wn3)\n', (1666, 1675), True, 'import scipy.signal as signal\n'), ((1695, 1738), 'scipy.signal.filtfilt', 'signal.filtfilt', (['B3', 'A3', "data['Conversion']"], {}), "(B3, A3, data['Conversion'])\n", (1710, 1738), True, 'import scipy.signal as signal\n'), ((1739, 1767), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (1749, 1767), True, 'import matplotlib.pylab as plt\n'), ((1767, 1787), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1778, 1787), True, 'import matplotlib.pylab as plt\n'), ((1786, 1847), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", "data['Conversion']"], {'label': '"""Original"""'}), "(data['Fecha'], data['Conversion'], label='Original')\n", (1794, 1847), True, 'import matplotlib.pylab as plt\n'), ((1849, 1908), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", 'precio_filtrado1'], {'label': '"""Filtrado"""'}), "(data['Fecha'], precio_filtrado1, label='Filtrado')\n", (1857, 1908), True, 'import matplotlib.pylab as plt\n'), ((1910, 1929), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Fecha"""'], {}), "('Fecha')\n", (1920, 1929), True, 'import matplotlib.pylab as plt\n'), ((1930, 1950), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Precio"""'], {}), "('Precio')\n", (1940, 1950), True, 'import matplotlib.pylab as plt\n'), ((1951, 1970), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '(0.0)'}), '(loc=0.0)\n', (1961, 1970), True, 'import matplotlib.pylab as plt\n'), ((1971, 1991), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (1982, 1991), True, 'import matplotlib.pylab as plt\n'), ((1990, 2051), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", "data['Conversion']"], {'label': '"""Original"""'}), "(data['Fecha'], data['Conversion'], label='Original')\n", (1998, 2051), True, 'import matplotlib.pylab as plt\n'), ((2053, 2112), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", 'precio_filtrado2'], {'label': '"""Filtrado"""'}), "(data['Fecha'], precio_filtrado2, label='Filtrado')\n", (2061, 2112), True, 'import matplotlib.pylab as plt\n'), ((2114, 2133), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Fecha"""'], {}), "('Fecha')\n", (2124, 2133), True, 'import matplotlib.pylab as plt\n'), ((2134, 2154), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Precio"""'], {}), "('Precio')\n", (2144, 2154), True, 'import matplotlib.pylab as plt\n'), ((2155, 2174), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '(0.0)'}), '(loc=0.0)\n', (2165, 2174), True, 'import matplotlib.pylab as plt\n'), ((2175, 2195), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (2186, 2195), True, 'import matplotlib.pylab as plt\n'), ((2194, 2255), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", "data['Conversion']"], {'label': '"""Original"""'}), "(data['Fecha'], data['Conversion'], label='Original')\n", (2202, 2255), True, 'import matplotlib.pylab as plt\n'), ((2257, 2316), 'matplotlib.pylab.plot', 'plt.plot', (["data['Fecha']", 'precio_filtrado3'], {'label': '"""Filtrado"""'}), "(data['Fecha'], precio_filtrado3, label='Filtrado')\n", (2265, 2316), True, 'import matplotlib.pylab as plt\n'), ((2318, 2337), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Fecha"""'], {}), "('Fecha')\n", (2328, 2337), True, 'import matplotlib.pylab as plt\n'), ((2338, 2358), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Precio"""'], {}), "('Precio')\n", (2348, 2358), True, 'import matplotlib.pylab as plt\n'), ((2359, 2378), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '(0.0)'}), '(loc=0.0)\n', (2369, 2378), True, 'import matplotlib.pylab as plt\n'), ((2379, 2405), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Filtros.png"""'], {}), "('Filtros.png')\n", (2390, 2405), True, 'import matplotlib.pylab as plt\n'), ((2567, 2608), 'numpy.correlate', 'np.correlate', (['ruido1', 'ruido1'], {'mode': '"""full"""'}), "(ruido1, ruido1, mode='full')\n", (2579, 2608), True, 'import numpy as np\n'), ((2613, 2654), 'numpy.correlate', 'np.correlate', (['ruido2', 'ruido2'], {'mode': '"""full"""'}), "(ruido2, ruido2, mode='full')\n", (2625, 2654), True, 'import numpy as np\n'), ((2659, 2700), 'numpy.correlate', 'np.correlate', (['ruido3', 'ruido3'], {'mode': '"""full"""'}), "(ruido3, ruido3, mode='full')\n", (2671, 2700), True, 'import numpy as np\n'), ((2700, 2728), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (2710, 2728), True, 'import matplotlib.pylab as plt\n'), ((2728, 2748), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (2739, 2748), True, 'import matplotlib.pylab as plt\n'), ((2787, 2807), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (2798, 2807), True, 'import matplotlib.pylab as plt\n'), ((2846, 2866), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (2857, 2866), True, 'import matplotlib.pylab as plt\n'), ((2905, 2937), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Correlaciones.png"""'], {}), "('Correlaciones.png')\n", (2916, 2937), True, 'import matplotlib.pylab as plt\n')]
|
import unittest
import filterdesigner.FIRDesign as FIRDesign
import numpy as np
class TestKaiserord(unittest.TestCase):
def setUp(self):
self.f1 = 0.2
self.f2 = 0.3
self.f3 = 0.4
self.f4 = 0.5
self.f5 = 0.6
self.f6 = 0.7
self.m1 = 1
self.m2 = 0
self.dev1 = 0.05
self.dev2 = 0.01
def test_kaiserord_1(self):
# Test case for lowapass filter
self.assertTrue(np.all(FIRDesign.kaiserord([self.f1, self.f2], [self.m1, self.m2], self.dev2) == (45, 0.25, 3.3953210522614574, 'low')))
def test_kaiserord_2(self):
# Test case for highpass filter
self.assertTrue(np.all(FIRDesign.kaiserord([self.f1, self.f2], [self.m2, self.m1], self.dev1) == (26, 0.25, 1.509869637041394, 'high')))
def test_kaiserord_3(self):
# Test case for bandpass filter
ORD = FIRDesign.kaiserord([self.f1, self.f2, self.f3, self.f4], [self.m2, self.m1, self.m2], self.dev2)
self.assertTrue((ORD[0] == 45) and np.all(ORD[1] == [0.25, 0.45]) and (ORD[2] == 3.3953210522614574) and (ORD[3] == 'bandpass'))
def test_kaiserord_4(self):
# Test case for bandstop filter
ORD = FIRDesign.kaiserord([self.f1, self.f2, self.f3, self.f4], [self.m1, self.m2, self.m1], self.dev2)
self.assertTrue((ORD[0] == 46) and np.all(ORD[1] == [0.25, 0.45]) and (ORD[2] == 3.3953210522614574) and (ORD[3] == 'stop'))
def test_kaiserord_5(self):
# Test case for 'DC-1' filter
ORD = FIRDesign.kaiserord([self.f1, self.f2, self.f3, self.f4, self.f5, self.f6], [self.m1, self.m2, self.m1, self.m2], self.dev2)
self.assertTrue((ORD[0] == 45) and np.all(ORD[1] == [0.25, 0.45, 0.6499999999999999]) and (ORD[2] == 3.3953210522614574) and (ORD[3] == 'DC-1'))
def test_kaiserord_6(self):
# Test case for 'DC-0' filter
ORD = FIRDesign.kaiserord([self.f1, self.f2, self.f3, self.f4, self.f5, self.f6], [self.m2, self.m1, self.m2, self.m1], self.dev2)
self.assertTrue((ORD[0] == 46) and np.all(ORD[1] == [0.25, 0.45, 0.6499999999999999]) and (ORD[2] == 3.3953210522614574) and (ORD[3] == 'DC-0'))
def test_kaiserord_7(self):
# Test case for Exception 1
with self.assertRaises(ValueError):
FIRDesign.kaiserord([self.f1, self.f2], [self.m1, self.m2, self.m1], self.dev2)
def test_kaiserord_8(self):
# Test case for Exception 2
with self.assertRaises(ValueError):
FIRDesign.kaiserord([self.f1, self.f2, self.f3, self.f4], [self.m1, self.m2, self.m1], [self.dev1, self.dev2])
def test_kaiserord_9(self):
# Test case for Exception 3
with self.assertRaises(ValueError):
FIRDesign.kaiserord([self.f1, self.f2, self.f3, self.f4], [self.m1, self.m2, self.m1], [self.dev1, -0.2])
|
[
"filterdesigner.FIRDesign.kaiserord",
"numpy.all"
] |
[((916, 1017), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2, self.f3, self.f4]', '[self.m2, self.m1, self.m2]', 'self.dev2'], {}), '([self.f1, self.f2, self.f3, self.f4], [self.m2, self.m1,\n self.m2], self.dev2)\n', (935, 1017), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((1251, 1352), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2, self.f3, self.f4]', '[self.m1, self.m2, self.m1]', 'self.dev2'], {}), '([self.f1, self.f2, self.f3, self.f4], [self.m1, self.m2,\n self.m1], self.dev2)\n', (1270, 1352), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((1580, 1708), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2, self.f3, self.f4, self.f5, self.f6]', '[self.m1, self.m2, self.m1, self.m2]', 'self.dev2'], {}), '([self.f1, self.f2, self.f3, self.f4, self.f5, self.f6],\n [self.m1, self.m2, self.m1, self.m2], self.dev2)\n', (1599, 1708), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((1956, 2084), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2, self.f3, self.f4, self.f5, self.f6]', '[self.m2, self.m1, self.m2, self.m1]', 'self.dev2'], {}), '([self.f1, self.f2, self.f3, self.f4, self.f5, self.f6],\n [self.m2, self.m1, self.m2, self.m1], self.dev2)\n', (1975, 2084), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((2373, 2452), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2]', '[self.m1, self.m2, self.m1]', 'self.dev2'], {}), '([self.f1, self.f2], [self.m1, self.m2, self.m1], self.dev2)\n', (2392, 2452), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((2583, 2697), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2, self.f3, self.f4]', '[self.m1, self.m2, self.m1]', '[self.dev1, self.dev2]'], {}), '([self.f1, self.f2, self.f3, self.f4], [self.m1, self.m2,\n self.m1], [self.dev1, self.dev2])\n', (2602, 2697), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((2824, 2933), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2, self.f3, self.f4]', '[self.m1, self.m2, self.m1]', '[self.dev1, -0.2]'], {}), '([self.f1, self.f2, self.f3, self.f4], [self.m1, self.m2,\n self.m1], [self.dev1, -0.2])\n', (2843, 2933), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((1058, 1088), 'numpy.all', 'np.all', (['(ORD[1] == [0.25, 0.45])'], {}), '(ORD[1] == [0.25, 0.45])\n', (1064, 1088), True, 'import numpy as np\n'), ((1393, 1423), 'numpy.all', 'np.all', (['(ORD[1] == [0.25, 0.45])'], {}), '(ORD[1] == [0.25, 0.45])\n', (1399, 1423), True, 'import numpy as np\n'), ((1749, 1799), 'numpy.all', 'np.all', (['(ORD[1] == [0.25, 0.45, 0.6499999999999999])'], {}), '(ORD[1] == [0.25, 0.45, 0.6499999999999999])\n', (1755, 1799), True, 'import numpy as np\n'), ((2125, 2175), 'numpy.all', 'np.all', (['(ORD[1] == [0.25, 0.45, 0.6499999999999999])'], {}), '(ORD[1] == [0.25, 0.45, 0.6499999999999999])\n', (2131, 2175), True, 'import numpy as np\n'), ((489, 559), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2]', '[self.m1, self.m2]', 'self.dev2'], {}), '([self.f1, self.f2], [self.m1, self.m2], self.dev2)\n', (508, 559), True, 'import filterdesigner.FIRDesign as FIRDesign\n'), ((711, 781), 'filterdesigner.FIRDesign.kaiserord', 'FIRDesign.kaiserord', (['[self.f1, self.f2]', '[self.m2, self.m1]', 'self.dev1'], {}), '([self.f1, self.f2], [self.m2, self.m1], self.dev1)\n', (730, 781), True, 'import filterdesigner.FIRDesign as FIRDesign\n')]
|
#!/usr/bin/env python3
import os
import sys
import socket
import datetime
import argparse
import torch
import numpy as np
from baselines import logger
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.vec_normalize import VecNormalize
from envs import make_env
from model_tor import ActorCriticNetwork
from storage_tor import ExperienceBuffer
from ppo_tor import VanillaPPO
def main():
# Init
args = parse_args()
env_id = 'Reacher-v2'
nprocess = 1
n_step_per_update = 2500
gamma = 0.99
epsilon = 1e-5
log_interval = 1
use_gae=False; tau=None
tag = '_'.join(['ppo', env_id, args.opt])
log_dir = os.path.join(args.log_dir, make_stamp(tag))
logger.configure(dir=log_dir)
torch.manual_seed(args.seed)
torch.set_num_threads(4)
assert nprocess==1
# assert not using cuda!
# assert not using recurrent net!
envs = [make_env(env_id, seed=args.seed, rank=i, log_dir=log_dir, add_timestep=False) for i in range(nprocess)]
envs = DummyVecEnv(envs)
envs = VecNormalize(envs, ob=True, ret=True, gamma=gamma, epsilon=epsilon, clipob=10., cliprew=10.)
observ_dim = envs.observation_space.shape[0]
action_dim = envs.action_space.shape[0]
assert len(envs.observation_space.shape)==1
assert len(envs.action_space.shape)==1
assert envs.action_space.__class__.__name__ == "Box"
actor_critic_net = ActorCriticNetwork(input_dim=observ_dim,
hidden_dim=64,
actor_output_dim=action_dim,
critic_output_dim=1) # one neuron estimating the value of any state
agent = VanillaPPO(actor_critic_net, optim_id=args.opt, lr=3e-4, clip_eps=0.2,
max_grad_norm=0.5, n_epoch=10, n_minibatch=32, epsilon=epsilon)
experience = ExperienceBuffer(n_step_per_update, nprocess, observ_dim, action_dim)
# Train
observ = envs.reset(); observ = torch.from_numpy(observ).float()
experience.observations[0].copy_(observ)
for update_idx in range(args.n_update):
# Get experience via rollouts for n_step_per_update steps
for step_idx in range(n_step_per_update):
# Sample actions
with torch.no_grad():
action, action_log_prob, pred_state_value = actor_critic_net.act(observ)
# print(action); print(action_log_prob); print(pred_state_value)
# Step
observ, reward, done, info = envs.step(action.squeeze(1).cpu().numpy())
mask = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float()
observ = torch.from_numpy(observ).float()
observ *= mask
experience.insert(action, action_log_prob, pred_state_value, reward, next_observ=observ, next_mask=mask)
# Update
with torch.no_grad():
pred_next_state_value = actor_critic_net.predict_state_value(observ).detach()
experience.compute_returns(pred_next_state_value, gamma)
loss, value_loss, action_loss, distrib_entropy = agent.update(experience)
experience.after_update()
# Log
if (update_idx % log_interval)==0:
n_step_so_far = (update_idx+1) * nprocess * n_step_per_update
logs = ['update {}/{}'.format(update_idx+1, args.n_update)]
logs += ['loss {:.5f}'.format(loss)]
logs += ['action_loss {:.5f}'.format(action_loss)]
logs += ['value_loss {:.5f}'.format(value_loss)]
logs += ['distrib_entropy {:.5f}'.format(distrib_entropy)]
logs += ['n_step_so_far {}'.format(n_step_so_far)]
logger.log(' | '.join(logs))
def make_stamp(tag):
hostname = socket.gethostname(); hostname = hostname.split('.')[0]
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S-%f")
stamp = '_'.join([tag, hostname, timestamp])
return stamp
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--opt', help='optimizer ID', type=str, default=None, required=True)
parser.add_argument('--n_update', help='number of update', type=int, default=None, required=True)
parser.add_argument('--seed', help='RNG seed', type=int, default=None, required=True)
parser.add_argument('--log_dir', help='root xprmt log dir', type=str, default=None, required=True)
return parser.parse_args()
if __name__ == '__main__':
main()
|
[
"numpy.stack",
"model_tor.ActorCriticNetwork",
"baselines.common.vec_env.dummy_vec_env.DummyVecEnv",
"envs.make_env",
"argparse.ArgumentParser",
"ppo_tor.VanillaPPO",
"storage_tor.ExperienceBuffer",
"baselines.common.vec_env.vec_normalize.VecNormalize",
"torch.manual_seed",
"torch.FloatTensor",
"socket.gethostname",
"torch.set_num_threads",
"baselines.logger.configure",
"torch.no_grad",
"datetime.datetime.now",
"torch.from_numpy"
] |
[((730, 759), 'baselines.logger.configure', 'logger.configure', ([], {'dir': 'log_dir'}), '(dir=log_dir)\n', (746, 759), False, 'from baselines import logger\n'), ((764, 792), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (781, 792), False, 'import torch\n'), ((797, 821), 'torch.set_num_threads', 'torch.set_num_threads', (['(4)'], {}), '(4)\n', (818, 821), False, 'import torch\n'), ((1040, 1057), 'baselines.common.vec_env.dummy_vec_env.DummyVecEnv', 'DummyVecEnv', (['envs'], {}), '(envs)\n', (1051, 1057), False, 'from baselines.common.vec_env.dummy_vec_env import DummyVecEnv\n'), ((1069, 1168), 'baselines.common.vec_env.vec_normalize.VecNormalize', 'VecNormalize', (['envs'], {'ob': '(True)', 'ret': '(True)', 'gamma': 'gamma', 'epsilon': 'epsilon', 'clipob': '(10.0)', 'cliprew': '(10.0)'}), '(envs, ob=True, ret=True, gamma=gamma, epsilon=epsilon, clipob=\n 10.0, cliprew=10.0)\n', (1081, 1168), False, 'from baselines.common.vec_env.vec_normalize import VecNormalize\n'), ((1427, 1537), 'model_tor.ActorCriticNetwork', 'ActorCriticNetwork', ([], {'input_dim': 'observ_dim', 'hidden_dim': '(64)', 'actor_output_dim': 'action_dim', 'critic_output_dim': '(1)'}), '(input_dim=observ_dim, hidden_dim=64, actor_output_dim=\n action_dim, critic_output_dim=1)\n', (1445, 1537), False, 'from model_tor import ActorCriticNetwork\n'), ((1718, 1858), 'ppo_tor.VanillaPPO', 'VanillaPPO', (['actor_critic_net'], {'optim_id': 'args.opt', 'lr': '(0.0003)', 'clip_eps': '(0.2)', 'max_grad_norm': '(0.5)', 'n_epoch': '(10)', 'n_minibatch': '(32)', 'epsilon': 'epsilon'}), '(actor_critic_net, optim_id=args.opt, lr=0.0003, clip_eps=0.2,\n max_grad_norm=0.5, n_epoch=10, n_minibatch=32, epsilon=epsilon)\n', (1728, 1858), False, 'from ppo_tor import VanillaPPO\n'), ((1893, 1962), 'storage_tor.ExperienceBuffer', 'ExperienceBuffer', (['n_step_per_update', 'nprocess', 'observ_dim', 'action_dim'], {}), '(n_step_per_update, nprocess, observ_dim, action_dim)\n', (1909, 1962), False, 'from storage_tor import ExperienceBuffer\n'), ((3861, 3881), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3879, 3881), False, 'import socket\n'), ((4084, 4163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (4107, 4163), False, 'import argparse\n'), ((925, 1002), 'envs.make_env', 'make_env', (['env_id'], {'seed': 'args.seed', 'rank': 'i', 'log_dir': 'log_dir', 'add_timestep': '(False)'}), '(env_id, seed=args.seed, rank=i, log_dir=log_dir, add_timestep=False)\n', (933, 1002), False, 'from envs import make_env\n'), ((2012, 2036), 'torch.from_numpy', 'torch.from_numpy', (['observ'], {}), '(observ)\n', (2028, 2036), False, 'import torch\n'), ((2604, 2670), 'torch.FloatTensor', 'torch.FloatTensor', (['[([0.0] if done_ else [1.0]) for done_ in done]'], {}), '([([0.0] if done_ else [1.0]) for done_ in done])\n', (2621, 2670), False, 'import torch\n'), ((2982, 2997), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2995, 2997), False, 'import torch\n'), ((3933, 3956), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3954, 3956), False, 'import datetime\n'), ((2297, 2312), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2310, 2312), False, 'import torch\n'), ((2773, 2797), 'torch.from_numpy', 'torch.from_numpy', (['observ'], {}), '(observ)\n', (2789, 2797), False, 'import torch\n'), ((2722, 2738), 'numpy.stack', 'np.stack', (['reward'], {}), '(reward)\n', (2730, 2738), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.