code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import copy
import numpy as np
import os
import torch
import pickle
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
PolygonMasks,
polygons_to_bitmask,
)
import pycocotools.mask as mask_util
from PIL import Image
import torchvision.transforms as transforms
from . import GaussianBlur
__all__ = ["PlaneRCNNMapper"]
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def annotations_to_instances(
annos, image_size, mask_format="polygon", max_num_planes=20
):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of annotations, one per instance.
image_size (tuple): height, width
Returns:
Instances: It will contains fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
"""
boxes = [
BoxMode.convert(obj["bbox"], BoxMode(obj["bbox_mode"]), BoxMode.XYXY_ABS)
for obj in annos
]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert (
segm.ndim == 2
), "Expect segmentation of 2 dimensions, got {}.".format(segm.ndim)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "plane" in annos[0]:
plane = [torch.tensor(obj["plane"]) for obj in annos]
plane_idx = [torch.tensor([i]) for i in range(len(plane))]
target.gt_planes = torch.stack(plane, dim=0)
target.gt_plane_idx = torch.stack(plane_idx, dim=0)
return target
class PlaneRCNNMapper:
"""
A callable which takes a dict produced by the detection dataset, and applies transformations,
including image resizing and flipping. The transformation parameters are parsed from cfg file
and depending on the is_train condition.
Note that for our existing models, mean/std normalization is done by the model instead of here.
"""
def __init__(self, cfg, is_train=True, dataset_names=None):
self.cfg = cfg
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.depth_on = cfg.MODEL.DEPTH_ON
self.camera_on = cfg.MODEL.CAMERA_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
self._eval_gt_box = cfg.TEST.EVAL_GT_BOX
self._augmentation = cfg.DATALOADER.AUGMENTATION
# fmt: on
if self.load_proposals:
raise ValueError("Loading proposals not yet supported")
self.is_train = is_train
assert dataset_names is not None
if self.camera_on:
kmeans_trans_path = cfg.MODEL.CAMERA_HEAD.KMEANS_TRANS_PATH
kmeans_rots_path = cfg.MODEL.CAMERA_HEAD.KMEANS_ROTS_PATH
assert os.path.exists(kmeans_trans_path)
assert os.path.exists(kmeans_rots_path)
with open(kmeans_trans_path, "rb") as f:
self.kmeans_trans = pickle.load(f)
with open(kmeans_rots_path, "rb") as f:
self.kmeans_rots = pickle.load(f)
if self._augmentation:
color_jitter = transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)
augmentation = [
transforms.RandomApply([color_jitter], p=0.2),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
transforms.ToTensor(),
]
self.img_transform = transforms.Compose(augmentation)
def __call__(self, dataset_dict):
"""
Transform the dataset_dict according to the configured transformations.
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a new dict that's going to be processed by the model.
It currently does the following:
1. Read the image from "file_name"
2. Transform the image and annotations
3. Prepare the annotations to :class:`Instances`
"""
dataset_dict = copy.deepcopy(dataset_dict)
for i in range(2):
image = utils.read_image(
dataset_dict[str(i)]["file_name"], format=self.img_format
)
utils.check_image_size(dataset_dict[str(i)], image)
if self.is_train and self._augmentation:
image = Image.fromarray(image)
dataset_dict[str(i)]["image"] = self.img_transform(image) * 255.0
image_shape = dataset_dict[str(i)]["image"].shape[1:]
else:
image_shape = image.shape[:2]
dataset_dict[str(i)]["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
if self.depth_on:
if "depth_head" in self.cfg.MODEL.FREEZE:
dataset_dict[str(i)]["depth"] = torch.as_tensor(
np.zeros((480, 640)).astype("float32")
)
else:
# load depth map
house, img_id = dataset_dict[str(i)]["image_id"].split("_", 1)
depth_path = os.path.join(
"/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations",
house,
img_id + ".pkl",
)
with open(depth_path, "rb") as f:
obs = pickle.load(f)
# This assertion is to check dataset is clean
# assert((obs['color_sensor'][:,:,:3][:,:,::-1].transpose(2, 0, 1)-dataset_dict[str(i)]["image"].numpy()).sum()==0)
depth = obs["depth_sensor"]
dataset_dict[str(i)]["depth"] = torch.as_tensor(
depth.astype("float32")
)
if self.camera_on:
relative_pose = dataset_dict["rel_pose"]
x, y, z = relative_pose["position"]
w, xi, yi, zi = relative_pose["rotation"]
dataset_dict["rel_pose"]["tran_cls"] = torch.LongTensor(
self.xyz2class(x, y, z)
)
dataset_dict["rel_pose"]["rot_cls"] = torch.LongTensor(
self.quat2class(w, xi, yi, zi)
)
if not self.is_train and not self._eval_gt_box:
return dataset_dict
if not self._eval_gt_box:
for i in range(2):
if "annotations" in dataset_dict[str(i)]:
annos = [
self.transform_annotations(obj)
for obj in dataset_dict[str(i)].pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Should not be empty during training
instances = annotations_to_instances(annos, image_shape)
dataset_dict[str(i)]["instances"] = instances[
instances.gt_boxes.nonempty()
]
else:
for i in range(2):
if "annotations" in dataset_dict[str(i)]:
annos = [
self.transform_annotations(obj)
for obj in dataset_dict[str(i)]["annotations"]
if obj.get("iscrowd", 0) == 0
]
# Should not be empty during training
instances = annotations_to_instances(annos, image_shape)
dataset_dict[str(i)]["instances"] = instances[
instances.gt_boxes.nonempty()
]
return dataset_dict
def transform_annotations(self, annotation, transforms=None, image_size=None):
"""
Apply image transformations to the annotations.
After this method, the box mode will be set to XYXY_ABS.
"""
return annotation
def xyz2class(self, x, y, z):
return self.kmeans_trans.predict([[x, y, z]])
def quat2class(self, w, xi, yi, zi):
return self.kmeans_rots.predict([[w, xi, yi, zi]])
def class2xyz(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_trans.n_clusters).all()
return self.kmeans_trans.cluster_centers_[cls]
def class2quat(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_rots.n_clusters).all()
return self.kmeans_rots.cluster_centers_[cls]
|
[
"pycocotools.mask.decode",
"pickle.load",
"detectron2.structures.Instances",
"os.path.join",
"detectron2.structures.polygons_to_bitmask",
"detectron2.structures.PolygonMasks",
"os.path.exists",
"torchvision.transforms.Compose",
"copy.deepcopy",
"numpy.asarray",
"detectron2.structures.Boxes",
"torchvision.transforms.RandomApply",
"detectron2.structures.BoxMode",
"torchvision.transforms.ColorJitter",
"torch.stack",
"numpy.zeros",
"numpy.expand_dims",
"torchvision.transforms.RandomGrayscale",
"numpy.array",
"PIL.Image.fromarray",
"numpy.ascontiguousarray",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((1016, 1033), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1026, 1033), True, 'import numpy as np\n'), ((2086, 2107), 'detectron2.structures.Instances', 'Instances', (['image_size'], {}), '(image_size)\n', (2095, 2107), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((2138, 2150), 'detectron2.structures.Boxes', 'Boxes', (['boxes'], {}), '(boxes)\n', (2143, 2150), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((2245, 2285), 'torch.tensor', 'torch.tensor', (['classes'], {'dtype': 'torch.int64'}), '(classes, dtype=torch.int64)\n', (2257, 2285), False, 'import torch\n'), ((1141, 1166), 'numpy.expand_dims', 'np.expand_dims', (['image', '(-1)'], {}), '(image, -1)\n', (1155, 1166), True, 'import numpy as np\n'), ((4025, 4050), 'torch.stack', 'torch.stack', (['plane'], {'dim': '(0)'}), '(plane, dim=0)\n', (4036, 4050), False, 'import torch\n'), ((4081, 4110), 'torch.stack', 'torch.stack', (['plane_idx'], {'dim': '(0)'}), '(plane_idx, dim=0)\n', (4092, 4110), False, 'import torch\n'), ((6618, 6645), 'copy.deepcopy', 'copy.deepcopy', (['dataset_dict'], {}), '(dataset_dict)\n', (6631, 6645), False, 'import copy\n'), ((1997, 2022), 'detectron2.structures.BoxMode', 'BoxMode', (["obj['bbox_mode']"], {}), "(obj['bbox_mode'])\n", (2004, 2022), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((2481, 2500), 'detectron2.structures.PolygonMasks', 'PolygonMasks', (['segms'], {}), '(segms)\n', (2493, 2500), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((3886, 3912), 'torch.tensor', 'torch.tensor', (["obj['plane']"], {}), "(obj['plane'])\n", (3898, 3912), False, 'import torch\n'), ((3952, 3969), 'torch.tensor', 'torch.tensor', (['[i]'], {}), '([i])\n', (3964, 3969), False, 'import torch\n'), ((5309, 5342), 'os.path.exists', 'os.path.exists', (['kmeans_trans_path'], {}), '(kmeans_trans_path)\n', (5323, 5342), False, 'import os\n'), ((5362, 5394), 'os.path.exists', 'os.path.exists', (['kmeans_rots_path'], {}), '(kmeans_rots_path)\n', (5376, 5394), False, 'import os\n'), ((5660, 5702), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.8)', '(0.8)', '(0.8)', '(0.2)'], {}), '(0.8, 0.8, 0.8, 0.2)\n', (5682, 5702), True, 'import torchvision.transforms as transforms\n'), ((6007, 6039), 'torchvision.transforms.Compose', 'transforms.Compose', (['augmentation'], {}), '(augmentation)\n', (6025, 6039), True, 'import torchvision.transforms as transforms\n'), ((5484, 5498), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5495, 5498), False, 'import pickle\n'), ((5586, 5600), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5597, 5600), False, 'import pickle\n'), ((5748, 5793), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[color_jitter]'], {'p': '(0.2)'}), '([color_jitter], p=0.2)\n', (5770, 5793), True, 'import torchvision.transforms as transforms\n'), ((5811, 5844), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (5837, 5844), True, 'import torchvision.transforms as transforms\n'), ((5937, 5958), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5956, 5958), True, 'import torchvision.transforms as transforms\n'), ((6940, 6962), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (6955, 6962), False, 'from PIL import Image\n'), ((7806, 7912), 'os.path.join', 'os.path.join', (['"""/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations"""', 'house', "(img_id + '.pkl')"], {}), "('/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations',\n house, img_id + '.pkl')\n", (7818, 7912), False, 'import os\n'), ((1397, 1417), 'numpy.array', 'np.array', (['_M_RGB2YUV'], {}), '(_M_RGB2YUV)\n', (1405, 1417), True, 'import numpy as np\n'), ((2732, 2770), 'detectron2.structures.polygons_to_bitmask', 'polygons_to_bitmask', (['segm', '*image_size'], {}), '(segm, *image_size)\n', (2751, 2770), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((8088, 8102), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8099, 8102), False, 'import pickle\n'), ((2881, 2903), 'pycocotools.mask.decode', 'mask_util.decode', (['segm'], {}), '(segm)\n', (2897, 2903), True, 'import pycocotools.mask as mask_util\n'), ((3737, 3760), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x'], {}), '(x)\n', (3757, 3760), True, 'import numpy as np\n'), ((7570, 7590), 'numpy.zeros', 'np.zeros', (['(480, 640)'], {}), '((480, 640))\n', (7578, 7590), True, 'import numpy as np\n')]
|
from datetime import datetime
from multiprocessing import Array
from queue import Empty, Full
# except AttributeError:
# from multiprocessing import Queue
import numpy as np
# try:
from arrayqueues.portable_queue import PortableQueue # as Queue
class ArrayView:
def __init__(self, array, max_bytes, dtype, el_shape, i_item=0):
self.dtype = dtype
self.el_shape = el_shape
self.nbytes_el = self.dtype.itemsize * np.product(self.el_shape)
self.n_items = int(np.floor(max_bytes / self.nbytes_el))
self.total_shape = (self.n_items,) + self.el_shape
self.i_item = i_item
self.view = np.frombuffer(array, dtype, np.product(self.total_shape)).reshape(
self.total_shape
)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.el_shape == other.el_shape and self.dtype == other.dtype
return False
def push(self, element):
self.view[self.i_item, ...] = element
i_inserted = self.i_item
self.i_item = (self.i_item + 1) % self.n_items
# a tuple is returned to maximise performance
return self.dtype, self.el_shape, i_inserted
def pop(self, i_item):
return self.view[i_item, ...]
def fits(self, item):
if isinstance(item, np.ndarray):
return item.dtype == self.dtype and item.shape == self.el_shape
return (
item[0] == self.dtype
and item[1] == self.el_shape
and item[2] < self.n_items
)
class ArrayQueue:
"""A drop-in replacement for the multiprocessing queue, usable
only for numpy arrays, which removes the need for pickling and
should provide higher speeds and lower memory usage
"""
def __init__(self, max_mbytes=10):
self.maxbytes = int(max_mbytes * 1000000)
self.array = Array("c", self.maxbytes)
self.view = None
self.queue = PortableQueue()
self.read_queue = PortableQueue()
self.last_item = 0
def check_full(self):
while True:
try:
self.last_item = self.read_queue.get(timeout=0.00001)
except Empty:
break
if self.view.i_item == self.last_item:
raise Full(
"Queue of length {} full when trying to insert {},"
" last item read was {}".format(
self.view.n_items, self.view.i_item, self.last_item
)
)
def put(self, element):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
self.last_item = 0
else:
self.check_full()
qitem = self.view.push(element)
self.queue.put(qitem)
def get(self, **kwargs):
aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return self.view.pop(aritem[2])
def clear(self):
"""Empties the queue without the need to read all the existing
elements
:return: nothing
"""
self.view = None
while True:
try:
_ = self.queue.get_nowait()
except Empty:
break
while True:
try:
_ = self.read_queue.get_nowait()
except Empty:
break
self.last_item = 0
def empty(self):
return self.queue.empty()
def qsize(self):
return self.queue.qsize()
class TimestampedArrayQueue(ArrayQueue):
"""A small extension to support timestamps saved alongside arrays"""
def put(self, element, timestamp=None):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
else:
self.check_full()
qitem = self.view.push(element)
if timestamp is None:
timestamp = datetime.now()
self.queue.put((timestamp, qitem))
def get(self, **kwargs):
timestamp, aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return timestamp, self.view.pop(aritem[2])
class IndexedArrayQueue(ArrayQueue):
"""A small extension to support timestamps saved alongside arrays"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
def put(self, element, timestamp=None):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
else:
self.check_full()
qitem = self.view.push(element)
if timestamp is None:
timestamp = datetime.now()
self.queue.put((timestamp, self.counter, qitem))
self.counter += 1
def get(self, **kwargs):
timestamp, index, aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return timestamp, index, self.view.pop(aritem[2])
|
[
"multiprocessing.Array",
"arrayqueues.portable_queue.PortableQueue",
"numpy.floor",
"numpy.product",
"datetime.datetime.now"
] |
[((1929, 1954), 'multiprocessing.Array', 'Array', (['"""c"""', 'self.maxbytes'], {}), "('c', self.maxbytes)\n", (1934, 1954), False, 'from multiprocessing import Array\n'), ((2001, 2016), 'arrayqueues.portable_queue.PortableQueue', 'PortableQueue', ([], {}), '()\n', (2014, 2016), False, 'from arrayqueues.portable_queue import PortableQueue\n'), ((2043, 2058), 'arrayqueues.portable_queue.PortableQueue', 'PortableQueue', ([], {}), '()\n', (2056, 2058), False, 'from arrayqueues.portable_queue import PortableQueue\n'), ((443, 468), 'numpy.product', 'np.product', (['self.el_shape'], {}), '(self.el_shape)\n', (453, 468), True, 'import numpy as np\n'), ((496, 532), 'numpy.floor', 'np.floor', (['(max_bytes / self.nbytes_el)'], {}), '(max_bytes / self.nbytes_el)\n', (504, 532), True, 'import numpy as np\n'), ((4287, 4301), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4299, 4301), False, 'from datetime import datetime\n'), ((5257, 5271), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5269, 5271), False, 'from datetime import datetime\n'), ((670, 698), 'numpy.product', 'np.product', (['self.total_shape'], {}), '(self.total_shape)\n', (680, 698), True, 'import numpy as np\n')]
|
'''
์ ๋ต : ํ๋ฃจ์ ์ฃผ๊ฐ๋ฅผ ๋๊ณ ๋ณด๋ฉด ์ค๋ฅธ ๊ฒฝ์ฐ๊ฐ 42%, ๋ด๋ฆฐ ๊ฒฝ์ฐ๊ฐ 46%, ๋๋จธ์ง 12%๋ ๋ณ๋์ด ์๋ค. ์ฆ๋ช
์๊ณ ๋ฆฌ์ฆ : PyportfolioOpt ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ด์ฉํ ์ต์ ํ
(max sharp, risk, return, fund remaining)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
import datetime
from pykrx import stock
import requests
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
# ์ค๋ KOSPI&KOSDAQ ์ข
๋ชฉ ์ ์ฒด ๋ถ๋ฌ์ค๊ธฐ
today = datetime.datetime.today().strftime("%Y%m%d")
kospi = stock.get_market_fundamental_by_ticker(today, market='KOSPI').index
kosdaq = stock.get_market_fundamental_by_ticker(today, market='KOSDAQ').index
stocks = kospi.append(kosdaq)
def up_down_zero(code): # ์ข
๋ชฉ๊ณผ ์ฐ๋์ ๋ง๋ ์์น/ํ๋ฝ/๋ณ๋ ์๋ ๋ ์๋ฅผ ๋ฆฌ์คํธ ๋ฐํ
today = datetime.datetime.today().strftime("%Y-%m-%d")
year = today[0:4]
month_day = today[4:]
one_year_ago = str(int(year) - 1) + month_day
data = fdr.DataReader(code, one_year_ago)[['Close']]
data_rtn = data.pct_change()
up = 0
nothing = 0
down = 0
for i, date in enumerate(data.index):
if data_rtn.Close.iloc[i] > 0:
up = up + 1
elif data_rtn.Close.iloc[i] == 0:
nothing = nothing + 1
else:
down = down + 1
total_days = len(data_rtn.index)
return up / total_days, down / total_days, nothing / total_days
def get_up_down_zero_df(stocks): # stocks ๋ฆฌ์คํธ๋ฅผ ๋ฃ์ผ๋ฉด, ์์น/ํ๋ฝ/๋ณ๋์๋ ํ๋ฅ ๋ฐ์ดํฐํ๋ ์ ๋ฐํ
up_list = []
down_list = []
zero_list = []
for i in stocks:
temp = up_down_zero(i)
up_list.append(temp[0])
down_list.append(temp[1])
zero_list.append(temp[2])
# ๋ฐ์ดํฐ ํ๋ ์ ๋ง๋ค๊ธฐ
up_down_zero_df = pd.DataFrame()
up_down_zero_df['์ข
๋ชฉ ์ฝ๋'] = stocks # ์ข
๋ชฉ์ฝ๋
up_down_zero_df['์์น ํ๋ฅ '] = up_list # ์ผ๊ฐ ๋ณ๋๋ฅ ์ด ์์์ธ ๋ ์ ์
up_down_zero_df['ํ๋ฝ ํ๋ฅ '] = down_list # ์ผ๊ฐ ๋ณ๋๋ฅ ์ด ์์์ธ ๋ ์ ์
up_down_zero_df['๋ณ๋ ์๋ ํ๋ฅ '] = zero_list # ์ผ๊ฐ ๋ณ๋๋ฅ ์ด 0์ธ ๋ ์ ์
up_down_zero_df['์์น ํ๋ฅ ๋์ ์์'] = up_down_zero_df['์์น ํ๋ฅ '].rank(ascending=False)
up_down_zero_df = up_down_zero_df.sort_values(by='์์น ํ๋ฅ ๋์ ์์')
return up_down_zero_df
up_down_zero_df = get_up_down_zero_df(stocks)
symbol_udz = []
for i in idx_list:
symbol_udz.append(up_down_zero_df.loc[i][0])
symbol_udz
# ๊ธ๋ฑ์ฃผ ์ข
๋ชฉ ์ ์ฅ
assets = np.array(symbol_udz)
start_date = '2018-07-21'
end_date = '2021-07-21'
df = pd.DataFrame()
for stock in assets:
df[stock] = fdr.DataReader(stock, start_date, end_date)['Close']
df_dropna = df.dropna(axis = 1)
mu = expected_returns.mean_historical_return(df_dropna)
S = risk_models.sample_cov(df_dropna)
ef = EfficientFrontier(mu, S, solver="SCS")
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print(ef.portfolio_performance(verbose=True))
portfolio_val = 15000000
latest_prices = get_latest_prices(df_dropna)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio(verbose=False)
#rmse = da._allocation_rmse_error(verbose=False)
print('Discrete Allocaion: ', allocation)
print('Funds Remaining: ', leftover, ' KRW')
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol))
portfolio_df = pd.DataFrame(columns = ['company_Ticker', 'Discrete_val_'+str(portfolio_val)])
portfolio_df['company_Ticker'] = allocation
portfolio_df['Discrete_val_'+str(portfolio_val)] = discrete_allocation_list
portfolio_df_sorted = portfolio_df.sort_values('Discrete_val_'+str(portfolio_val), ascending = False)
portfolio_df_sorted = portfolio_df_sorted.reset_index(drop=True)
print('Funds Remaining: ', leftover, ' KRW')
print(ef.portfolio_performance(verbose=True))
print('Allocation has RMSE: {:.3f}'.format(rmse))
|
[
"pandas.DataFrame",
"pypfopt.risk_models.sample_cov",
"pykrx.stock.get_market_fundamental_by_ticker",
"datetime.datetime.today",
"pypfopt.efficient_frontier.EfficientFrontier",
"pypfopt.discrete_allocation.get_latest_prices",
"numpy.array",
"pypfopt.expected_returns.mean_historical_return",
"pypfopt.discrete_allocation.DiscreteAllocation",
"FinanceDataReader.DataReader"
] |
[((2378, 2398), 'numpy.array', 'np.array', (['symbol_udz'], {}), '(symbol_udz)\n', (2386, 2398), True, 'import numpy as np\n'), ((2454, 2468), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2466, 2468), True, 'import pandas as pd\n'), ((2596, 2646), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df_dropna'], {}), '(df_dropna)\n', (2635, 2646), False, 'from pypfopt import expected_returns\n'), ((2651, 2684), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['df_dropna'], {}), '(df_dropna)\n', (2673, 2684), False, 'from pypfopt import risk_models\n'), ((2691, 2729), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['mu', 'S'], {'solver': '"""SCS"""'}), "(mu, S, solver='SCS')\n", (2708, 2729), False, 'from pypfopt.efficient_frontier import EfficientFrontier\n'), ((2881, 2909), 'pypfopt.discrete_allocation.get_latest_prices', 'get_latest_prices', (['df_dropna'], {}), '(df_dropna)\n', (2898, 2909), False, 'from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\n'), ((2941, 3020), 'pypfopt.discrete_allocation.DiscreteAllocation', 'DiscreteAllocation', (['weights', 'latest_prices'], {'total_portfolio_value': 'portfolio_val'}), '(weights, latest_prices, total_portfolio_value=portfolio_val)\n', (2959, 3020), False, 'from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\n'), ((604, 665), 'pykrx.stock.get_market_fundamental_by_ticker', 'stock.get_market_fundamental_by_ticker', (['today'], {'market': '"""KOSPI"""'}), "(today, market='KOSPI')\n", (642, 665), False, 'from pykrx import stock\n'), ((681, 743), 'pykrx.stock.get_market_fundamental_by_ticker', 'stock.get_market_fundamental_by_ticker', (['today'], {'market': '"""KOSDAQ"""'}), "(today, market='KOSDAQ')\n", (719, 743), False, 'from pykrx import stock\n'), ((1789, 1803), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1801, 1803), True, 'import pandas as pd\n'), ((551, 576), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (574, 576), False, 'import datetime\n'), ((1013, 1047), 'FinanceDataReader.DataReader', 'fdr.DataReader', (['code', 'one_year_ago'], {}), '(code, one_year_ago)\n', (1027, 1047), True, 'import FinanceDataReader as fdr\n'), ((2504, 2547), 'FinanceDataReader.DataReader', 'fdr.DataReader', (['stock', 'start_date', 'end_date'], {}), '(stock, start_date, end_date)\n', (2518, 2547), True, 'import FinanceDataReader as fdr\n'), ((856, 881), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (879, 881), False, 'import datetime\n')]
|
# pylint: disable=invalid-name,protected-access
from copy import deepcopy
from unittest import TestCase
import codecs
import gzip
import logging
import os
import shutil
from keras import backend as K
import numpy
from numpy.testing import assert_allclose
from deep_qa.common.checks import log_keras_version_info
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.common.params import Params
class DeepQaTestCase(TestCase): # pylint: disable=too-many-public-methods
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
TRAIN_BACKGROUND = TEST_DIR + 'train_background'
VALIDATION_BACKGROUND = TEST_DIR + 'validation_background'
SNLI_FILE = TEST_DIR + 'snli_file'
PRETRAINED_VECTORS_FILE = TEST_DIR + 'pretrained_glove_vectors_file'
PRETRAINED_VECTORS_GZIP = TEST_DIR + 'pretrained_glove_vectors_file.gz'
def setUp(self):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.DEBUG)
log_keras_version_info()
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.TEST_DIR)
TextInstance.tokenizer = tokenizers["words"](Params({}))
K.clear_session()
def get_model_params(self, additional_arguments=None):
params = Params({})
params['save_models'] = False
params['model_serialization_prefix'] = self.TEST_DIR
params['train_files'] = [self.TRAIN_FILE]
params['validation_files'] = [self.VALIDATION_FILE]
params['embeddings'] = {'words': {'dimension': 6}, 'characters': {'dimension': 2}}
params['encoder'] = {"default": {'type': 'bow'}}
params['num_epochs'] = 1
params['validation_split'] = 0.0
if additional_arguments:
for key, value in additional_arguments.items():
params[key] = deepcopy(value)
return params
def get_model(self, model_class, additional_arguments=None):
params = self.get_model_params(additional_arguments)
return model_class(params)
def ensure_model_trains_and_loads(self, model_class, args: Params):
args['save_models'] = True
# Our loading tests work better if you're not using data generators. Unless you
# specifically request it in your test, we'll avoid using them here, and if you _do_ use
# them, we'll skip some of the stuff below that isn't compatible.
args.setdefault('data_generator', None)
model = self.get_model(model_class, args)
model.train()
# load the model that we serialized
loaded_model = self.get_model(model_class, args)
loaded_model.load_model()
# verify that original model and the loaded model predict the same outputs
if model._uses_data_generators():
# We shuffle the data in the data generator. Instead of making that logic more
# complicated, we'll just pass on the loading tests here. See comment above.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(model.validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
# We should get the same result if we index the data from the original model and the loaded
# model.
_, indexed_validation_arrays = loaded_model.load_data_arrays(model.validation_files)
if model._uses_data_generators():
# As above, we'll just pass on this.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(indexed_validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
return model, loaded_model
@staticmethod
def one_hot(index, length):
vector = numpy.zeros(length)
vector[index] = 1
return vector
def write_snli_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ttext 1\thypothesis1\tentails\n')
train_file.write('2\ttext 2\thypothesis2\tcontradicts\n')
train_file.write('3\ttext3\thypothesis3\tentails\n')
train_file.write('4\ttext 4\thypothesis4\tneutral\n')
train_file.write('5\ttext5\thypothesis 5\tentails\n')
train_file.write('6\ttext6\thypothesis6\tcontradicts\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\ttext 1 with extra words\thypothesis1\tentails\n')
validation_file.write('2\ttext 2\tlonger hypothesis 2\tcontradicts\n')
validation_file.write('3\ttext3\thypothesis withreallylongfakeword\tentails\n')
def write_sequence_tagging_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('cats###N\tare###V\tanimals###N\t.###N\n')
train_file.write('dogs###N\tare###V\tanimals###N\t.###N\n')
train_file.write('snakes###N\tare###V\tanimals###N\t.###N\n')
train_file.write('birds###N\tare###V\tanimals###N\t.###N\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('horses###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('blue###N\tcows###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('monkeys###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('caterpillars###N\tare###V\tanimals###N\t.###N\n')
def write_verb_semantics_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
train_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
train_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
validation_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
validation_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
def write_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq1a1\t0\n')
validation_file.write('2\tq1a2\t1\n')
validation_file.write('3\tq1a3\t0\n')
validation_file.write('4\tq1a4\t0\n')
validation_file.write('5\tq2a1\t0\n')
validation_file.write('6\tq2a2\t0\n')
validation_file.write('7\tq2a3\t1\n')
validation_file.write('8\tq2a4\t0\n')
validation_file.write('9\tq3a1\t0\n')
validation_file.write('10\tq3a2\t0\n')
validation_file.write('11\tq3a3\t0\n')
validation_file.write('12\tq3a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence1\t0\n')
train_file.write('2\tsentence2 word2 word3\t1\n')
train_file.write('3\tsentence3 word2\t0\n')
train_file.write('4\tsentence4\t1\n')
train_file.write('5\tsentence5\t0\n')
train_file.write('6\tsentence6\t0\n')
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as test_file:
test_file.write('1\ttestsentence1\t0\n')
test_file.write('2\ttestsentence2 word2 word3\t1\n')
test_file.write('3\ttestsentence3 word2\t0\n')
test_file.write('4\ttestsentence4\t1\n')
test_file.write('5\ttestsentence5 word4\t0\n')
test_file.write('6\ttestsentence6\t0\n')
def write_additional_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq4a1\t0\n')
validation_file.write('2\tq4a2\t1\n')
validation_file.write('3\tq4a3\t0\n')
validation_file.write('4\tq4a4\t0\n')
validation_file.write('5\tq5a1\t0\n')
validation_file.write('6\tq5a2\t0\n')
validation_file.write('7\tq5a3\t1\n')
validation_file.write('8\tq5a4\t0\n')
validation_file.write('9\tq6a1\t0\n')
validation_file.write('10\tq6a2\t0\n')
validation_file.write('11\tq6a3\t0\n')
validation_file.write('12\tq6a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence7\t0\n')
train_file.write('2\tsentence8 word4 word5\t1\n')
train_file.write('3\tsentence9 word4\t0\n')
train_file.write('4\tsentence10\t1\n')
train_file.write('5\tsentence11 word3 word2\t0\n')
train_file.write('6\tsentence12\t0\n')
def write_question_answer_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion1\tanswer1###answer2\t0\n')
with codecs.open(self.VALIDATION_BACKGROUND, 'w', 'utf-8') as validation_background:
validation_background.write('1\tvb1\tvb2\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ta b e i d\tanswer 1###answer2\t0\n')
train_file.write('2\ta b c d\tanswer3###answer4\t1\n')
train_file.write('3\te d w f d s a b\tanswer5###answer6###answer9\t2\n')
train_file.write('4\te fj k w q\tanswer7###answer8\t0\n')
with codecs.open(self.TRAIN_BACKGROUND, 'w', 'utf-8') as train_background:
train_background.write('1\tsb1\tsb2\n')
train_background.write('2\tsb3\n')
train_background.write('3\tsb4\n')
train_background.write('4\tsb5\tsb6\n')
def write_who_did_what_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tHe went to the store to buy goods, because he wanted to.'
'\tHe bought xxxxx\tgoods###store\t0\n')
validation_file.write('1\tShe hiking on the weekend with her friend.'
'\tShe went xxxxx\thiking###friend###weekend###her friend\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tFred hit the ball with the bat.\tHe hit the ball with the xxxxx\tbat###ball\t0\n')
train_file.write('1\tShe walked the dog today.\tThe xxxxx was walked today.\tShe###dog###today\t1\n')
train_file.write('1\tHe kept typing at his desk.\tHe typed at his xxxxx\tdesk###kept\t0\n')
train_file.write('1\tThe pup at the bone but not the biscuit.\tThe pup ate the xxxxx\t'
'bone###biscuit\t0\n')
def write_tuple_inference_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tss<>v f d<>oo o<>c$$$s<>v ff<>o i###ss r<>v<>o e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tss<>v<>oo o<>c$$$s e<>ff<>o ii i###ss r<>rr<>o e<>o ee\t'
'ss<>ve gg<>o sd<>ccs\t0\n')
train_file.write('2\tsg g<>vg<>oo o<>c$$$s e<>v ff<>o ii i###ss<>v rr<>o e<>o ee'
'###hh kk<>hdj d<>hh\tss ss<>ve gg<>o sd<>ccs\t2\n')
train_file.write('3\ts r<>v f d<>o ss<>c$$$s e<>v ff<>o ss i$$$r<>v ss<>s o e<>o ee\t'
'ss ss<>v g<>o sd<>ccs\t0\n')
train_file.write('4\tty y<>cf fv ss<>s ss<>c$$$rt e<>vv f<>oss i i###ss<>v<>os e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t1\n')
def write_span_prediction_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion 1 with extra words\t'
'passage with answer and a reallylongword\t13,18\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tquestion 1\tpassage1 with answer1\t14,20\n')
train_file.write('2\tquestion 2\tpassage2 with answer2\t0,8\n')
train_file.write('3\tquestion 3\tpassage3 with answer3\t9,13\n')
train_file.write('4\tquestion 4\tpassage4 with answer4\t14,20\n')
def write_sentence_selection_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tWhere is Paris?\tParis is the capital of France.###It '
'is by the Seine.###It is quite old###this is a '
'very long sentence meant to test that loading '
'and padding works properly in the model.\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tWho won Super Bowl 50?\tSuper Bowl 50 was in Santa '
'Clara.###The Patriots beat the Broncos.\t1\n')
train_file.write('2\tWhen is Thanksgiving?\tFolk tales tell '
'of the Pilgrims celebrating the holiday.###Many '
'people eat a lot.###It is in November.\t2\n')
train_file.write('3\tWhen were computers invented?\tThe ancient Chinese used '
'abacuses.###Alan Turing cracked Enigma.###It is hard to '
'pinpoint an inventor of the computer.\t2\n')
def write_pretrained_vector_files(self):
# write the file
with codecs.open(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8') as vector_file:
vector_file.write('word2 0.21 0.57 0.51 0.31\n')
vector_file.write('sentence1 0.81 0.48 0.19 0.47\n')
# compress the file
with open(self.PRETRAINED_VECTORS_FILE, 'rb') as f_in:
with gzip.open(self.PRETRAINED_VECTORS_GZIP, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def write_sentence_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write("This is a sentence for language modelling.\n")
train_file.write("Here's another one for language modelling.\n")
def write_original_snli_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# pylint: disable=line-too-long
train_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
# pylint: disable=line-too-long
validation_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
|
[
"copy.deepcopy",
"gzip.open",
"os.makedirs",
"logging.basicConfig",
"codecs.open",
"numpy.testing.assert_allclose",
"numpy.zeros",
"deep_qa.common.params.Params",
"deep_qa.common.checks.log_keras_version_info",
"shutil.rmtree",
"shutil.copyfileobj",
"keras.backend.clear_session"
] |
[((1030, 1143), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG\n )\n", (1049, 1143), False, 'import logging\n'), ((1170, 1194), 'deep_qa.common.checks.log_keras_version_info', 'log_keras_version_info', ([], {}), '()\n', (1192, 1194), False, 'from deep_qa.common.checks import log_keras_version_info\n'), ((1203, 1244), 'os.makedirs', 'os.makedirs', (['self.TEST_DIR'], {'exist_ok': '(True)'}), '(self.TEST_DIR, exist_ok=True)\n', (1214, 1244), False, 'import os\n'), ((1278, 1306), 'shutil.rmtree', 'shutil.rmtree', (['self.TEST_DIR'], {}), '(self.TEST_DIR)\n', (1291, 1306), False, 'import shutil\n'), ((1380, 1397), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (1395, 1397), True, 'from keras import backend as K\n'), ((1475, 1485), 'deep_qa.common.params.Params', 'Params', (['{}'], {}), '({})\n', (1481, 1485), False, 'from deep_qa.common.params import Params\n'), ((4340, 4359), 'numpy.zeros', 'numpy.zeros', (['length'], {}), '(length)\n', (4351, 4359), False, 'import numpy\n'), ((1360, 1370), 'deep_qa.common.params.Params', 'Params', (['{}'], {}), '({})\n', (1366, 1370), False, 'from deep_qa.common.params import Params\n'), ((4454, 4496), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (4465, 4496), False, 'import codecs\n'), ((4927, 4974), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (4938, 4974), False, 'import codecs\n'), ((5316, 5358), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (5327, 5358), False, 'import codecs\n'), ((5678, 5725), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (5689, 5725), False, 'import codecs\n'), ((6133, 6175), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (6144, 6175), False, 'import codecs\n'), ((6546, 6593), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (6557, 6593), False, 'import codecs\n'), ((7034, 7081), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (7045, 7081), False, 'import codecs\n'), ((7718, 7760), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (7729, 7760), False, 'import codecs\n'), ((8107, 8148), 'codecs.open', 'codecs.open', (['self.TEST_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TEST_FILE, 'w', 'utf-8')\n", (8118, 8148), False, 'import codecs\n'), ((8574, 8621), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (8585, 8621), False, 'import codecs\n'), ((9258, 9300), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (9269, 9300), False, 'import codecs\n'), ((9706, 9753), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (9717, 9753), False, 'import codecs\n'), ((9861, 9914), 'codecs.open', 'codecs.open', (['self.VALIDATION_BACKGROUND', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_BACKGROUND, 'w', 'utf-8')\n", (9872, 9914), False, 'import codecs\n'), ((10011, 10053), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (10022, 10053), False, 'import codecs\n'), ((10374, 10422), 'codecs.open', 'codecs.open', (['self.TRAIN_BACKGROUND', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_BACKGROUND, 'w', 'utf-8')\n", (10385, 10422), False, 'import codecs\n'), ((10696, 10743), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (10707, 10743), False, 'import codecs\n'), ((11129, 11171), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (11140, 11171), False, 'import codecs\n'), ((11773, 11820), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (11784, 11820), False, 'import codecs\n'), ((12016, 12058), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (12027, 12058), False, 'import codecs\n'), ((12819, 12866), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (12830, 12866), False, 'import codecs\n'), ((13056, 13098), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (13067, 13098), False, 'import codecs\n'), ((13483, 13530), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (13494, 13530), False, 'import codecs\n'), ((13908, 13950), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (13919, 13950), False, 'import codecs\n'), ((14697, 14752), 'codecs.open', 'codecs.open', (['self.PRETRAINED_VECTORS_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8')\n", (14708, 14752), False, 'import codecs\n'), ((15156, 15198), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (15167, 15198), False, 'import codecs\n'), ((15422, 15464), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (15433, 15464), False, 'import codecs\n'), ((18223, 18270), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (18234, 18270), False, 'import codecs\n'), ((2041, 2056), 'copy.deepcopy', 'deepcopy', (['value'], {}), '(value)\n', (2049, 2056), False, 'from copy import deepcopy\n'), ((3500, 3552), 'numpy.testing.assert_allclose', 'assert_allclose', (['model_prediction', 'loaded_prediction'], {}), '(model_prediction, loaded_prediction)\n', (3515, 3552), False, 'from numpy.testing import assert_allclose\n'), ((4184, 4236), 'numpy.testing.assert_allclose', 'assert_allclose', (['model_prediction', 'loaded_prediction'], {}), '(model_prediction, loaded_prediction)\n', (4199, 4236), False, 'from numpy.testing import assert_allclose\n'), ((15003, 15048), 'gzip.open', 'gzip.open', (['self.PRETRAINED_VECTORS_GZIP', '"""wb"""'], {}), "(self.PRETRAINED_VECTORS_GZIP, 'wb')\n", (15012, 15048), False, 'import gzip\n'), ((15075, 15106), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (15093, 15106), False, 'import shutil\n')]
|
import torch
import random
import socket
import os
import numpy as np
def get_device(device):
assert device in (
'cpu', 'cuda'), 'device {} should be in (cpu, cuda)'.format(device)
if socket.gethostname() == 'gemini' or not torch.cuda.is_available():
device = 'cpu'
else:
device = 'cuda' if device == 'cuda' else "cpu"
return device
def seed_exp(seed, device='cuda'):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == 'cuda':
torch.cuda.manual_seed(seed)
torch.set_num_threads(1)
def update_arguments(model=None, dataset=None, collect=None, sim2real=None):
""" User provides the arguments in a user-friendly way.
This function takes care of converting them to the format used by the repo. """
from bc.settings import MODEL_LOGDIR, DATASET_LOGDIR
def update_model_args(model):
if model is None:
return None
# convert the input_type argument from a string to a tuple
if isinstance(model['input_type'], (tuple, list)):
return
input_type_str2list = {
'rgb': ('rgb', ),
'depth': ('depth', ),
'rgbd': ('depth', 'rgb')
}
assert model['input_type'] in input_type_str2list
model['input_type'] = input_type_str2list[model['input_type']]
# get the full paths using the user-speicified settings
model['model_dir'] = os.path.join(MODEL_LOGDIR, model['name'])
model.pop('name')
return model
def update_dataset_args(dataset):
if dataset is None:
return None
dataset['dataset_dir'] = os.path.join(DATASET_LOGDIR, dataset['name'])
dataset.pop('name')
signal_keys_updated = []
for signal_key in dataset['signal_keys']:
signal_keys_updated.append(('state', signal_key))
dataset['signal_keys'] = signal_keys_updated
return dataset
def update_collect_args(collect):
if collect is None:
return None
collect['collect_dir'] = os.path.join(DATASET_LOGDIR, collect['folder'])
collect.pop('folder')
return collect
def update_sim2real_args(sim2real):
if sim2real is None:
return None
sim2real['mcts_dir'] = os.path.join(MODEL_LOGDIR, sim2real['name'])
sim2real['trainset_dir'] = os.path.join(DATASET_LOGDIR, sim2real['trainset_name'])
sim2real['evalset_dir'] = os.path.join(DATASET_LOGDIR, sim2real['evalset_name'])
sim2real.pop('name')
return sim2real
model = update_model_args(model)
dataset = update_dataset_args(dataset)
collect = update_collect_args(collect)
sim2real = update_sim2real_args(sim2real)
return [args for args in (model, dataset, collect, sim2real) if args is not None]
|
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"socket.gethostname",
"torch.set_num_threads",
"random.seed",
"torch.cuda.is_available",
"os.path.join"
] |
[((416, 433), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (427, 433), False, 'import random\n'), ((438, 458), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (452, 458), True, 'import numpy as np\n'), ((463, 486), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (480, 486), False, 'import torch\n'), ((553, 577), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (574, 577), False, 'import torch\n'), ((520, 548), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (542, 548), False, 'import torch\n'), ((1453, 1494), 'os.path.join', 'os.path.join', (['MODEL_LOGDIR', "model['name']"], {}), "(MODEL_LOGDIR, model['name'])\n", (1465, 1494), False, 'import os\n'), ((1666, 1711), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "dataset['name']"], {}), "(DATASET_LOGDIR, dataset['name'])\n", (1678, 1711), False, 'import os\n'), ((2085, 2132), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "collect['folder']"], {}), "(DATASET_LOGDIR, collect['folder'])\n", (2097, 2132), False, 'import os\n'), ((2311, 2355), 'os.path.join', 'os.path.join', (['MODEL_LOGDIR', "sim2real['name']"], {}), "(MODEL_LOGDIR, sim2real['name'])\n", (2323, 2355), False, 'import os\n'), ((2391, 2446), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "sim2real['trainset_name']"], {}), "(DATASET_LOGDIR, sim2real['trainset_name'])\n", (2403, 2446), False, 'import os\n'), ((2481, 2535), 'os.path.join', 'os.path.join', (['DATASET_LOGDIR', "sim2real['evalset_name']"], {}), "(DATASET_LOGDIR, sim2real['evalset_name'])\n", (2493, 2535), False, 'import os\n'), ((202, 222), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (220, 222), False, 'import socket\n'), ((242, 267), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (265, 267), False, 'import torch\n')]
|
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset, sampler
from scipy.ndimage.filters import gaussian_filter
class MRIDataset(Dataset):
"""Dataset of MRI organized in a CAPS folder."""
def __init__(self, img_dir, data_file, preprocessing='linear', transform=None):
"""
Args:
img_dir (string): Directory of all the images.
data_file (string): File name of the train/test split file.
preprocessing (string): Defines the path to the data in CAPS
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.img_dir = img_dir
self.transform = transform
self.diagnosis_code = {'CN': 0, 'AD': 1, 'sMCI': 0, 'pMCI': 1, 'MCI': 1, 'unlabeled': -1}
self.data_path = preprocessing
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = pd.read_csv(data_file, sep='\t')
elif isinstance(data_file, pd.DataFrame):
self.df = data_file
else:
raise Exception('The argument datafile is not of correct type.')
if ('diagnosis' not in list(self.df.columns.values)) or ('session_id' not in list(self.df.columns.values)) or \
('participant_id' not in list(self.df.columns.values)):
raise Exception("the data file is not in the correct format."
"Columns should include ['participant_id', 'session_id', 'diagnosis']")
self.size = self[0]['image'].numpy().size
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_name = self.df.loc[idx, 'participant_id']
img_label = self.df.loc[idx, 'diagnosis']
sess_name = self.df.loc[idx, 'session_id']
# Not in BIDS but in CAPS
if self.data_path == "linear":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'preprocessing_dl',
img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')
elif self.data_path == "mni":
image_path = path.join(self.img_dir, 'subjects', img_name, sess_name,
't1', 'spm', 'segmentation', 'normalized_space',
img_name + '_' + sess_name + '_space-Ixi549Space_T1w.pt')
else:
raise NotImplementedError("The data path %s is not implemented" % self.data_path)
image = torch.load(image_path)
label = self.diagnosis_code[img_label]
if self.transform:
image = self.transform(image)
sample = {'image': image, 'label': label, 'participant_id': img_name, 'session_id': sess_name,
'image_path': image_path}
return sample
def session_restriction(self, session):
"""
Allows to generate a new MRIDataset using some specific sessions only (mostly used for evaluation of test)
:param session: (str) the session wanted. Must be 'all' or 'ses-MXX'
:return: (DataFrame) the dataset with the wanted sessions
"""
from copy import copy
data_output = copy(self)
if session == "all":
return data_output
else:
df_session = self.df[self.df.session_id == session]
df_session.reset_index(drop=True, inplace=True)
data_output.df = df_session
if len(data_output) == 0:
raise Exception("The session %s doesn't exist for any of the subjects in the test data" % session)
return data_output
class GaussianSmoothing(object):
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, sample):
image = sample['image']
np.nan_to_num(image, copy=False)
smoothed_image = gaussian_filter(image, sigma=self.sigma)
sample['image'] = smoothed_image
return sample
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, image):
return (image - image.min()) / (image.max() - image.min())
def load_data(train_val_path, diagnoses_list, split, n_splits=None, baseline=True):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
if n_splits is None:
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
else:
train_path = path.join(train_val_path, 'train_splits-' + str(n_splits),
'split-' + str(split))
valid_path = path.join(train_val_path, 'validation_splits-' + str(n_splits),
'split-' + str(split))
print("Train", train_path)
print("Valid", valid_path)
for diagnosis in diagnoses_list:
if baseline:
train_diagnosis_path = path.join(train_path, diagnosis + '_baseline.tsv')
else:
train_diagnosis_path = path.join(train_path, diagnosis + '.tsv')
valid_diagnosis_path = path.join(valid_path, diagnosis + '_baseline.tsv')
train_diagnosis_df = pd.read_csv(train_diagnosis_path, sep='\t')
valid_diagnosis_df = pd.read_csv(valid_diagnosis_path, sep='\t')
train_df = pd.concat([train_df, train_diagnosis_df])
valid_df = pd.concat([valid_df, valid_diagnosis_df])
train_df.reset_index(inplace=True, drop=True)
valid_df.reset_index(inplace=True, drop=True)
return train_df, valid_df
def load_data_test(test_path, diagnoses_list):
test_df = pd.DataFrame()
for diagnosis in diagnoses_list:
test_diagnosis_path = path.join(test_path, diagnosis + '_baseline.tsv')
test_diagnosis_df = pd.read_csv(test_diagnosis_path, sep='\t')
test_df = pd.concat([test_df, test_diagnosis_df])
test_df.reset_index(inplace=True, drop=True)
return test_df
|
[
"pandas.DataFrame",
"scipy.ndimage.filters.gaussian_filter",
"numpy.nan_to_num",
"pandas.read_csv",
"torch.load",
"copy.copy",
"os.path.join",
"pandas.concat",
"torch.from_numpy"
] |
[((4592, 4606), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4604, 4606), True, 'import pandas as pd\n'), ((4622, 4636), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4634, 4636), True, 'import pandas as pd\n'), ((5913, 5927), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5925, 5927), True, 'import pandas as pd\n'), ((2569, 2591), 'torch.load', 'torch.load', (['image_path'], {}), '(image_path)\n', (2579, 2591), False, 'import torch\n'), ((3276, 3286), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (3280, 3286), False, 'from copy import copy\n'), ((3876, 3908), 'numpy.nan_to_num', 'np.nan_to_num', (['image'], {'copy': '(False)'}), '(image, copy=False)\n', (3889, 3908), True, 'import numpy as np\n'), ((3934, 3974), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': 'self.sigma'}), '(image, sigma=self.sigma)\n', (3949, 3974), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((4176, 4208), 'numpy.nan_to_num', 'np.nan_to_num', (['image'], {'copy': '(False)'}), '(image, copy=False)\n', (4189, 4208), True, 'import numpy as np\n'), ((4684, 4718), 'os.path.join', 'path.join', (['train_val_path', '"""train"""'], {}), "(train_val_path, 'train')\n", (4693, 4718), False, 'from os import path\n'), ((4740, 4779), 'os.path.join', 'path.join', (['train_val_path', '"""validation"""'], {}), "(train_val_path, 'validation')\n", (4749, 4779), False, 'from os import path\n'), ((5396, 5446), 'os.path.join', 'path.join', (['valid_path', "(diagnosis + '_baseline.tsv')"], {}), "(valid_path, diagnosis + '_baseline.tsv')\n", (5405, 5446), False, 'from os import path\n'), ((5477, 5520), 'pandas.read_csv', 'pd.read_csv', (['train_diagnosis_path'], {'sep': '"""\t"""'}), "(train_diagnosis_path, sep='\\t')\n", (5488, 5520), True, 'import pandas as pd\n'), ((5550, 5593), 'pandas.read_csv', 'pd.read_csv', (['valid_diagnosis_path'], {'sep': '"""\t"""'}), "(valid_diagnosis_path, sep='\\t')\n", (5561, 5593), True, 'import pandas as pd\n'), ((5614, 5655), 'pandas.concat', 'pd.concat', (['[train_df, train_diagnosis_df]'], {}), '([train_df, train_diagnosis_df])\n', (5623, 5655), True, 'import pandas as pd\n'), ((5675, 5716), 'pandas.concat', 'pd.concat', (['[valid_df, valid_diagnosis_df]'], {}), '([valid_df, valid_diagnosis_df])\n', (5684, 5716), True, 'import pandas as pd\n'), ((5997, 6046), 'os.path.join', 'path.join', (['test_path', "(diagnosis + '_baseline.tsv')"], {}), "(test_path, diagnosis + '_baseline.tsv')\n", (6006, 6046), False, 'from os import path\n'), ((6075, 6117), 'pandas.read_csv', 'pd.read_csv', (['test_diagnosis_path'], {'sep': '"""\t"""'}), "(test_diagnosis_path, sep='\\t')\n", (6086, 6117), True, 'import pandas as pd\n'), ((6136, 6175), 'pandas.concat', 'pd.concat', (['[test_df, test_diagnosis_df]'], {}), '([test_df, test_diagnosis_df])\n', (6145, 6175), True, 'import pandas as pd\n'), ((981, 1013), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'sep': '"""\t"""'}), "(data_file, sep='\\t')\n", (992, 1013), True, 'import pandas as pd\n'), ((1938, 2080), 'os.path.join', 'path.join', (['self.img_dir', '"""subjects"""', 'img_name', 'sess_name', '"""t1"""', '"""preprocessing_dl"""', "(img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')"], {}), "(self.img_dir, 'subjects', img_name, sess_name, 't1',\n 'preprocessing_dl', img_name + '_' + sess_name + '_space-MNI_res-1x1x1.pt')\n", (1947, 2080), False, 'from os import path\n'), ((5222, 5272), 'os.path.join', 'path.join', (['train_path', "(diagnosis + '_baseline.tsv')"], {}), "(train_path, diagnosis + '_baseline.tsv')\n", (5231, 5272), False, 'from os import path\n'), ((5322, 5363), 'os.path.join', 'path.join', (['train_path', "(diagnosis + '.tsv')"], {}), "(train_path, diagnosis + '.tsv')\n", (5331, 5363), False, 'from os import path\n'), ((2210, 2381), 'os.path.join', 'path.join', (['self.img_dir', '"""subjects"""', 'img_name', 'sess_name', '"""t1"""', '"""spm"""', '"""segmentation"""', '"""normalized_space"""', "(img_name + '_' + sess_name + '_space-Ixi549Space_T1w.pt')"], {}), "(self.img_dir, 'subjects', img_name, sess_name, 't1', 'spm',\n 'segmentation', 'normalized_space', img_name + '_' + sess_name +\n '_space-Ixi549Space_T1w.pt')\n", (2219, 2381), False, 'from os import path\n'), ((4261, 4299), 'torch.from_numpy', 'torch.from_numpy', (['image[np.newaxis, :]'], {}), '(image[np.newaxis, :])\n', (4277, 4299), False, 'import torch\n')]
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
from typing import List, Callable, Tuple
import cirq
import numpy as np
import pyqsp.phases
import scipy.linalg
from plot_qsp import qsp_plot
from qsp import to_r_z_from_wx
@dataclasses.dataclass
class FixedPointAmplitudeAmplification:
"""Amplitude amplification inputs.
Based on the inputs for an amplitude amplification problem, it
creates the fixed point amplitude amplification circuit and after the
proper projection (depending on whether the number of
coefficients is even or odd) it returns the amplitude.
On a real quantum computer, we'll need to provide all of u, u_inv, a0, b0,
rotA0=e^(i * phi * (2|a0><a0|-I)) and rotB0=e^(i * phi * (2|b0><b0|-I))
for the algorithm as black boxes, but in this simulation we can just
calculate them from u, a0, b0. Finally, coeffs determine the polynomial
we'd like to convert <a0|u|b0> with.
Attributes:
u: the unitary to amplify
a0: the goal state
b0: the starting state
coeffs: the coefficients in QSP(R, <0|.|0>) convention
"""
u: cirq.Gate
a0: cirq.STATE_VECTOR_LIKE
b0: cirq.STATE_VECTOR_LIKE
coeffs: List[float]
u_inv: cirq.Gate = None
rot_a0: Callable[[float], cirq.Gate] = None
rot_b0: Callable[[float], cirq.Gate] = None
num_qubits: int = 2
_amplitude_projector: Callable[[np.ndarray], np.complex] = None
def __post_init__(self):
self.u_inv = cirq.inverse(self.u)
self.rot_a0 = self._rot_state("a0", self.a0)
self.rot_b0 = self._rot_state("b0", self.b0)
self.num_qubits = cirq.num_qubits(self.u)
self._amplitude_projector = lambda uni: (
self.a0 @ uni @ self.b0
if len(self.coeffs) % 2 == 1
else self.b0 @ uni @ self.b0
)
def _rot_state(
self, name: str, state_vector: np.ndarray
) -> Callable[[float], cirq.Gate]:
"""Rotates the state around a given state."""
return lambda phi: cirq.MatrixGate(
name=f"{name}[{phi:.2f}]",
matrix=scipy.linalg.expm(
1j * phi * (2 * np.outer(state_vector, state_vector) - np.identity(4))
),
)
def get_circuit(self) -> cirq.Circuit:
qs = cirq.LineQubit.range(self.num_qubits)
# reverse operation order for circuits
# we mandatorily start with U, as this is the U|B0> in Eq (13)
if len(self.coeffs) == 0:
return cirq.Circuit(self.u(*qs))
ops = []
i = 1
for phi in self.coeffs[::-1]:
if i % 2 == 1:
ops += [self.u(*qs)]
ops += [self.rot_a0(phi)(*qs)]
else:
ops += [self.u_inv(*qs)]
ops += [self.rot_b0(phi)(*qs)]
i += 1
return cirq.Circuit(ops)
def run(self) -> float:
return self._amplitude_projector(cirq.unitary(self.get_circuit()))
def __str__(self):
return f"""FixedPointAmplification:
num qubits: {self.num_qubits},
u: {self.u},
a0: {self.a0},
b0: {self.b0},
{self.get_circuit()}"""
class Experiment:
def __init__(
self,
coeffs: List[float],
n_points: int,
basis_a: int = 2,
basis_b: int = 3,
n_qubits: int = 2,
):
self.coeffs = coeffs
self.basis_a = basis_a
self.basis_b = basis_b
self.n_points = n_points
self.a_s = []
self.fa_s = []
self.a0 = cirq.to_valid_state_vector(basis_a, n_qubits)
self.b0 = cirq.to_valid_state_vector(basis_b, n_qubits)
def _get_u_gate_and_initial_amplitude(
self, p: float, sign: int
) -> Tuple[float, cirq.Gate]:
"""Creates a CNOT-like unitary with a real amplitude."""
u = sign * scipy.linalg.expm(1j * p * cirq.unitary(cirq.CX))
a = u[self.basis_a][self.basis_b]
new_a = a * sign * np.conj(a) / np.abs(a)
return new_a, cirq.MatrixGate(
name="u", matrix=sign * np.conj(a) / np.abs(a) * u
)
def _run_half(self, sign: int):
for p in np.linspace(1e-8, np.pi, self.n_points):
a, u = self._get_u_gate_and_initial_amplitude(p, sign)
fp_amp = self._get_fpamp(u)
self.a_s.append(a)
self.fa_s.append(fp_amp.run())
def _get_fpamp(self, u):
return FixedPointAmplitudeAmplification(u, self.a0, self.b0, self.coeffs)
def run(self) -> Tuple[List[float], List[float]]:
_, sample_fpamp = self._get_u_gate_and_initial_amplitude(0.123, -1)
print(self._get_fpamp(sample_fpamp))
self._run_half(-1)
self._run_half(1)
return self.a_s, self.fa_s
def experiment(
coeffs,
npoints=50,
title=None,
filename="fp_amp.png",
target_fn=None,
target_fn_label: str = None,
):
"""The main function to qsp the two cases presented in the paper."""
title = f"Fixed amplitude amplification for {title}"
a_s, f_as = Experiment(coeffs, npoints).run()
qsp_plot(np.real(a_s), f_as, filename, target_fn, target_fn_label, title)
if __name__ == "__main__":
experiment(
title="$T_1$",
coeffs=to_r_z_from_wx([0, 0]),
npoints=10,
filename="fp_amp_t1.png",
target_fn=lambda a_s: a_s,
target_fn_label="$T_1(a)=a$",
)
experiment(
title="$T_2$",
coeffs=to_r_z_from_wx([0, 0, 0]),
npoints=100,
filename="fp_amp_t2.png",
target_fn=lambda a_s: 2 * a_s ** 2 - 1,
target_fn_label="$T_2(a)=2a^2-1$",
)
experiment(
title="$T_3$",
coeffs=to_r_z_from_wx([0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t3.png",
target_fn=lambda a_s: 4 * a_s ** 3 - 3 * a_s,
target_fn_label="$T_3(a)=4 a^3-3 a$",
)
experiment(
title="$T_4$",
coeffs=to_r_z_from_wx([0, 0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t4.png",
target_fn=lambda a_s: 8 * a_s ** 4 - 8 * a_s ** 2 + 1,
target_fn_label="$T_4(a)=8 a^4-8 a^2 +1$",
)
experiment(
title="$T_5$",
coeffs=to_r_z_from_wx([0, 0, 0, 0, 0, 0]),
npoints=100,
filename="fp_amp_t5.png",
target_fn=lambda a_s: 16 * a_s ** 5 - 20 * a_s ** 3 + 5 * a_s,
target_fn_label="$T_5(a)=16 a^5-20 a^3 + 5 a$",
)
# these are the same as in the Martyn et al paper
wx_phis = pyqsp.phases.FPSearch().generate(10, 0.5)
experiment(
title="FPSearch(10,0.5)",
coeffs=to_r_z_from_wx(wx_phis),
npoints=100,
filename="fp_amp_fpsearch_10_0.5.png",
)
|
[
"numpy.conj",
"numpy.outer",
"numpy.abs",
"cirq.inverse",
"cirq.unitary",
"cirq.to_valid_state_vector",
"numpy.identity",
"cirq.num_qubits",
"qsp.to_r_z_from_wx",
"cirq.Circuit",
"numpy.linspace",
"numpy.real",
"cirq.LineQubit.range"
] |
[((2056, 2076), 'cirq.inverse', 'cirq.inverse', (['self.u'], {}), '(self.u)\n', (2068, 2076), False, 'import cirq\n'), ((2209, 2232), 'cirq.num_qubits', 'cirq.num_qubits', (['self.u'], {}), '(self.u)\n', (2224, 2232), False, 'import cirq\n'), ((2865, 2902), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['self.num_qubits'], {}), '(self.num_qubits)\n', (2885, 2902), False, 'import cirq\n'), ((3424, 3441), 'cirq.Circuit', 'cirq.Circuit', (['ops'], {}), '(ops)\n', (3436, 3441), False, 'import cirq\n'), ((4102, 4147), 'cirq.to_valid_state_vector', 'cirq.to_valid_state_vector', (['basis_a', 'n_qubits'], {}), '(basis_a, n_qubits)\n', (4128, 4147), False, 'import cirq\n'), ((4166, 4211), 'cirq.to_valid_state_vector', 'cirq.to_valid_state_vector', (['basis_b', 'n_qubits'], {}), '(basis_b, n_qubits)\n', (4192, 4211), False, 'import cirq\n'), ((4716, 4756), 'numpy.linspace', 'np.linspace', (['(1e-08)', 'np.pi', 'self.n_points'], {}), '(1e-08, np.pi, self.n_points)\n', (4727, 4756), True, 'import numpy as np\n'), ((5655, 5667), 'numpy.real', 'np.real', (['a_s'], {}), '(a_s)\n', (5662, 5667), True, 'import numpy as np\n'), ((4540, 4549), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4546, 4549), True, 'import numpy as np\n'), ((5803, 5825), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0]'], {}), '([0, 0])\n', (5817, 5825), False, 'from qsp import to_r_z_from_wx\n'), ((6014, 6039), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6028, 6039), False, 'from qsp import to_r_z_from_wx\n'), ((6247, 6275), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (6261, 6275), False, 'from qsp import to_r_z_from_wx\n'), ((6492, 6523), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (6506, 6523), False, 'from qsp import to_r_z_from_wx\n'), ((6754, 6788), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (6768, 6788), False, 'from qsp import to_r_z_from_wx\n'), ((7154, 7177), 'qsp.to_r_z_from_wx', 'to_r_z_from_wx', (['wx_phis'], {}), '(wx_phis)\n', (7168, 7177), False, 'from qsp import to_r_z_from_wx\n'), ((4527, 4537), 'numpy.conj', 'np.conj', (['a'], {}), '(a)\n', (4534, 4537), True, 'import numpy as np\n'), ((4435, 4456), 'cirq.unitary', 'cirq.unitary', (['cirq.CX'], {}), '(cirq.CX)\n', (4447, 4456), False, 'import cirq\n'), ((4638, 4647), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (4644, 4647), True, 'import numpy as np\n'), ((2767, 2781), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2778, 2781), True, 'import numpy as np\n'), ((4625, 4635), 'numpy.conj', 'np.conj', (['a'], {}), '(a)\n', (4632, 4635), True, 'import numpy as np\n'), ((2728, 2764), 'numpy.outer', 'np.outer', (['state_vector', 'state_vector'], {}), '(state_vector, state_vector)\n', (2736, 2764), True, 'import numpy as np\n')]
|
from ...isa.inst import *
import numpy as np
class Vmfeq_vf(Inst):
name = 'vmfeq.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['rs1'] == self['vs2'][no]
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfne_vf(Inst):
name = 'vmfne.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['rs1'] != self['vs2'][no]
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmflt_vf(Inst):
name = 'vmflt.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] < self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfle_vf(Inst):
name = 'vmfle.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] <= self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfgt_vf(Inst):
name = 'vmfgt.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] > self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
class Vmfge_vf(Inst):
name = 'vmfge.vf'
def golden(self):
if 'vs2' in self:
result = np.unpackbits( self['orig'], bitorder='little' )
if 'vstart' in self:
vstart = self['vstart']
else:
vstart = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[vstart: self['vl']]
else:
if self['vl'] >= vstart:
mask = np.ones( self['vl'] - vstart, dtype = np.uint8 )
for no in range(vstart, self['vl']):
if mask[ no - vstart ] == 1:
result[ no ] = self['vs2'][no] >= self['rs1']
result = np.packbits( result, bitorder='little' )
return result
else:
return 0
|
[
"numpy.packbits",
"numpy.ones",
"numpy.unpackbits"
] |
[((161, 207), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (174, 207), True, 'import numpy as np\n'), ((782, 820), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (793, 820), True, 'import numpy as np\n'), ((1003, 1049), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (1016, 1049), True, 'import numpy as np\n'), ((1624, 1662), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (1635, 1662), True, 'import numpy as np\n'), ((1844, 1890), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (1857, 1890), True, 'import numpy as np\n'), ((2465, 2503), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (2476, 2503), True, 'import numpy as np\n'), ((2686, 2732), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (2699, 2732), True, 'import numpy as np\n'), ((3308, 3346), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (3319, 3346), True, 'import numpy as np\n'), ((3530, 3576), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (3543, 3576), True, 'import numpy as np\n'), ((4151, 4189), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (4162, 4189), True, 'import numpy as np\n'), ((4373, 4419), 'numpy.unpackbits', 'np.unpackbits', (["self['orig']"], {'bitorder': '"""little"""'}), "(self['orig'], bitorder='little')\n", (4386, 4419), True, 'import numpy as np\n'), ((4995, 5033), 'numpy.packbits', 'np.packbits', (['result'], {'bitorder': '"""little"""'}), "(result, bitorder='little')\n", (5006, 5033), True, 'import numpy as np\n'), ((385, 431), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (398, 431), True, 'import numpy as np\n'), ((538, 582), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (545, 582), True, 'import numpy as np\n'), ((1227, 1273), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (1240, 1273), True, 'import numpy as np\n'), ((1380, 1424), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (1387, 1424), True, 'import numpy as np\n'), ((2068, 2114), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (2081, 2114), True, 'import numpy as np\n'), ((2221, 2265), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (2228, 2265), True, 'import numpy as np\n'), ((2910, 2956), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (2923, 2956), True, 'import numpy as np\n'), ((3063, 3107), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (3070, 3107), True, 'import numpy as np\n'), ((3754, 3800), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (3767, 3800), True, 'import numpy as np\n'), ((3907, 3951), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (3914, 3951), True, 'import numpy as np\n'), ((4597, 4643), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (4610, 4643), True, 'import numpy as np\n'), ((4750, 4794), 'numpy.ones', 'np.ones', (["(self['vl'] - vstart)"], {'dtype': 'np.uint8'}), "(self['vl'] - vstart, dtype=np.uint8)\n", (4757, 4794), True, 'import numpy as np\n')]
|
from typing import List, Tuple
import numpy as np
from evobench.benchmark import Benchmark
# from evobench.linkage import DependencyStructureMatrix
from evobench.model import Population, Solution
from ..operator import Operator
class RestrictedMixing(Operator):
def __init__(self, benchmark: Benchmark):
super(RestrictedMixing, self).__init__(benchmark)
# def apply(
# self,
# population: Population,
# dsm: DependencyStructureMatrix
# ) -> Population:
def mix(
self,
source: Solution,
ils: List[int], population: Population
) -> Tuple[Solution, np.ndarray]:
assert source.genome.size == self.benchmark.genome_size
if not source.fitness:
source.fitness = self.benchmark.evaluate_solution(source)
trial = Solution(source.genome.copy())
best_fitness = source.fitness
mask = np.zeros(self.benchmark.genome_size, dtype=bool)
for gene_index in ils:
trial.genome[gene_index] = 1 - trial.genome[gene_index]
fitness = self.benchmark.evaluate_solution(trial)
# ! TODO: benchmark min/max
if fitness >= best_fitness and not population.contains(trial):
best_fitness = fitness
mask[gene_index] = True
else:
trial.genome[gene_index] = 1 - trial.genome[gene_index]
trial.fitness = best_fitness
return trial, mask
|
[
"numpy.zeros"
] |
[((911, 959), 'numpy.zeros', 'np.zeros', (['self.benchmark.genome_size'], {'dtype': 'bool'}), '(self.benchmark.genome_size, dtype=bool)\n', (919, 959), True, 'import numpy as np\n')]
|
from unittest import TestCase
import os
import tempfile
import numpy as np
from keras_trans_mask.backend import keras
from keras_trans_mask import CreateMask, RemoveMask, RestoreMask
class TestMasks(TestCase):
def test_over_fit(self):
input_layer = keras.layers.Input(shape=(None,))
embed_layer = keras.layers.Embedding(
input_dim=10,
output_dim=15,
)(input_layer)
mask_layer = CreateMask(mask_value=9)(input_layer)
embed_layer = RestoreMask()([embed_layer, mask_layer])
removed_layer = RemoveMask()(embed_layer)
conv_layer = keras.layers.Conv1D(
filters=32,
kernel_size=3,
padding='same',
)(removed_layer)
restored_layer = RestoreMask()([conv_layer, embed_layer])
lstm_layer = keras.layers.LSTM(units=5)(restored_layer)
dense_layer = keras.layers.Dense(units=2, activation='softmax')(lstm_layer)
model = keras.models.Model(inputs=input_layer, outputs=dense_layer)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.summary()
x = np.array([
[1, 2, 3, 4, 5, 9, 9, 9],
[6, 7, 8, 9, 9, 9, 9, 9],
] * 1024)
y = np.array([[0], [1]] * 1024)
model_path = os.path.join(tempfile.gettempdir(), 'test_trans_mask_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'CreateMask': CreateMask,
'RemoveMask': RemoveMask,
'RestoreMask': RestoreMask,
})
model.fit(x, y, epochs=10)
|
[
"keras_trans_mask.RestoreMask",
"keras_trans_mask.RemoveMask",
"keras_trans_mask.backend.keras.layers.Embedding",
"tempfile.gettempdir",
"keras_trans_mask.backend.keras.layers.Input",
"keras_trans_mask.backend.keras.models.Model",
"keras_trans_mask.CreateMask",
"numpy.array",
"keras_trans_mask.backend.keras.layers.Conv1D",
"numpy.random.random",
"keras_trans_mask.backend.keras.layers.LSTM",
"keras_trans_mask.backend.keras.layers.Dense",
"keras_trans_mask.backend.keras.models.load_model"
] |
[((266, 299), 'keras_trans_mask.backend.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(None,)'}), '(shape=(None,))\n', (284, 299), False, 'from keras_trans_mask.backend import keras\n'), ((970, 1029), 'keras_trans_mask.backend.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layer', 'outputs': 'dense_layer'}), '(inputs=input_layer, outputs=dense_layer)\n', (988, 1029), False, 'from keras_trans_mask.backend import keras\n'), ((1146, 1215), 'numpy.array', 'np.array', (['([[1, 2, 3, 4, 5, 9, 9, 9], [6, 7, 8, 9, 9, 9, 9, 9]] * 1024)'], {}), '([[1, 2, 3, 4, 5, 9, 9, 9], [6, 7, 8, 9, 9, 9, 9, 9]] * 1024)\n', (1154, 1215), True, 'import numpy as np\n'), ((1263, 1290), 'numpy.array', 'np.array', (['([[0], [1]] * 1024)'], {}), '([[0], [1]] * 1024)\n', (1271, 1290), True, 'import numpy as np\n'), ((1441, 1577), 'keras_trans_mask.backend.keras.models.load_model', 'keras.models.load_model', (['model_path'], {'custom_objects': "{'CreateMask': CreateMask, 'RemoveMask': RemoveMask, 'RestoreMask': RestoreMask\n }"}), "(model_path, custom_objects={'CreateMask':\n CreateMask, 'RemoveMask': RemoveMask, 'RestoreMask': RestoreMask})\n", (1464, 1577), False, 'from keras_trans_mask.backend import keras\n'), ((322, 373), 'keras_trans_mask.backend.keras.layers.Embedding', 'keras.layers.Embedding', ([], {'input_dim': '(10)', 'output_dim': '(15)'}), '(input_dim=10, output_dim=15)\n', (344, 373), False, 'from keras_trans_mask.backend import keras\n'), ((443, 467), 'keras_trans_mask.CreateMask', 'CreateMask', ([], {'mask_value': '(9)'}), '(mask_value=9)\n', (453, 467), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((503, 516), 'keras_trans_mask.RestoreMask', 'RestoreMask', ([], {}), '()\n', (514, 516), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((568, 580), 'keras_trans_mask.RemoveMask', 'RemoveMask', ([], {}), '()\n', (578, 580), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((615, 677), 'keras_trans_mask.backend.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=32, kernel_size=3, padding='same')\n", (634, 677), False, 'from keras_trans_mask.backend import keras\n'), ((765, 778), 'keras_trans_mask.RestoreMask', 'RestoreMask', ([], {}), '()\n', (776, 778), False, 'from keras_trans_mask import CreateMask, RemoveMask, RestoreMask\n'), ((827, 853), 'keras_trans_mask.backend.keras.layers.LSTM', 'keras.layers.LSTM', ([], {'units': '(5)'}), '(units=5)\n', (844, 853), False, 'from keras_trans_mask.backend import keras\n'), ((892, 941), 'keras_trans_mask.backend.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(2)', 'activation': '"""softmax"""'}), "(units=2, activation='softmax')\n", (910, 941), False, 'from keras_trans_mask.backend import keras\n'), ((1325, 1346), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1344, 1346), False, 'import tempfile\n'), ((1374, 1392), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1390, 1392), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.stats import entropy
from bisect import bisect
from scipy import stats
from scipy.stats import median_absolute_deviation as mad
from sklearn.metrics import r2_score, mean_squared_error
from pyapprox.multivariate_polynomials import conditional_moments_of_polynomial_chaos_expansion as cond_moments
def group_fix(partial_result, func, x, y_true, x_default,
rand, pool_results, file_exist=False):
"""
Function for compare results between conditioned and unconditioned QoI.
Fix parameters from the least influential group
based on results from partially sorting.
Four error measure types will be returned.
Parameters
----------
partial_result : dict,
dictionary of parameter groups, results of partial sort
func : list of function,
function for analysis (analytical formula or model)
x : np.array,
Input with shape of N * D where N is sampling size and
D is the number of parameters
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
x_default : int, float, list,
Default values of x as a scalar or list of scalars
rand : np.ndarray,
Resample index in bootstrap, shape of R * N,
where R is the number of resamples
pool_results : dict,
Index of fixed parameters and the corresponding results
file_exist : bool (default: False),
If true, reads cached partial-ranking results from a file.
Otherwise, calculates results.
Returns
----------
Tuple of:
dict_return: dictionary of uncertainty measures
mae and the uncertain ranges:
Changes in absolute mean error of the func results due to fixing
parameters
var and the uncertain ranges :
Changes in variance of the func results due to fixing parameters
ks measures and the uncertain ranges :
Changes in pearson correlation coefficients
of the func results due to fixing parameters
pool_results:
"""
num_group = len(partial_result) - 1
# store results from fixing parameters in dict
cf_upper = {i: None for i in range(num_group)}
cf_lower, cv, ks, pvalue = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_upper_upper, cf_upper_lower, ks_upper, pvalue_upper = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_lower_lower, cf_lower_upper, ks_lower, pvalue_lower = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
cf_width, cf_width_lower, cf_width_upper, cond_mean = dict(cf_upper), dict(cf_upper), dict(cf_upper), dict(cf_upper)
ind_fix = []
conf_level = [0.025, 0.975]
measures_all = [cf_upper, cf_lower, ks, pvalue, cv,
cf_upper_upper, cf_upper_lower, cf_lower_upper,
cf_lower_lower, ks_lower, ks_upper,
pvalue_lower, pvalue_upper,
cf_width, cf_width_lower,
cf_width_upper, cond_mean]
for i in range(num_group, -1, -1):
if file_exist:
try:
ind_fix.extend(partial_result[str(i)])
except NameError:
ind_fix = partial_result[str(i)]
else:
try:
ind_fix.extend(partial_result[i])
except NameError:
ind_fix = partial_result[i]
ind_fix.sort()
x_temp = x_default[ind_fix]
# check whether results existing
skip_calcul = results_exist(ind_fix, pool_results)
# print(skip_calcul)
if skip_calcul == False:
x_copy = np.copy(x)
x_copy[ind_fix, :] = x_temp
# compare results with insignificant parameters fixed
Nresample = rand.shape[0]
num_func = len(func)
total_resample = num_func * Nresample
pvalue_bt, ks_bt, cf_upper_bt, cf_lower_bt, cf_width_bt, y_true_width = \
np.zeros(total_resample), np.zeros(total_resample), np.zeros(total_resample), np.zeros(total_resample), \
np.zeros(total_resample), np.zeros(total_resample)
## Add the bootstrap of PCE
for jj in range(num_func):
fun = func[jj]
results_fix = fun(x_copy).flatten()
for ii in range(Nresample):
I = rand[ii]
ind_resample = jj * Nresample + ii
[cf_lower_bt[ind_resample], cf_upper_bt[ind_resample], ks_bt[ind_resample], pvalue_bt[ind_resample], y_true_width[ind_resample]] \
= error_measure(I, y_true[jj], results_fix, conf_level)
cf_width_bt = (cf_upper_bt - cf_lower_bt) / y_true_width
# End for
cf_upper[i], cf_lower[i], ks[i], pvalue[i] = cf_upper_bt.mean(), cf_lower_bt.mean(), ks_bt.mean(), pvalue_bt.mean()
cf_upper_lower[i], cf_upper_upper[i] = np.quantile(cf_upper_bt, conf_level)
cf_lower_lower[i], cf_lower_upper[i] = np.quantile(cf_lower_bt, conf_level)
cf_width[i], cf_width_lower[i], cf_width_upper[i] = cf_width_bt.mean(), *np.quantile(cf_width_bt, conf_level)
ks_lower[i], ks_upper[i] = np.quantile(ks_bt, conf_level)
pvalue_lower[i], pvalue_upper[i] = np.quantile(pvalue_bt, conf_level)
cond_mean[i] = results_fix.mean()
if len(ind_fix) == x.shape[0]:
cv[i] = 0
# cond_mean[i] = func(x_temp)[0][0]
else:
mean, variance = cond_moments(fun, x_temp, ind_fix, return_variance=True)
# cond_mean[i] = mean[0]
cv[i] = (np.sqrt(variance) / mean)[0]
# End If
# update pool_results
measure_list = [measure_ele[i] for measure_ele in measures_all]
pool_results = pool_update(ind_fix, measure_list, pool_results)
else:
# map index to calculated values
for ele in range(len(measures_all)):
measures_all[ele][i] = skip_calcul[ele]
# End if
# End for()
names = ['cf_upper', 'cf_lower', 'ks', 'pvalue', 'cv',
'cf_upper_upper', 'cf_upper_lower', 'cf_lower_upper',
'cf_lower_lower', 'ks_lower', 'ks_upper',
'pvalue_lower', 'pvalue_upper',
'cf_width', 'cf_width_lower',
'cf_width_upper', 'cond_mean']
dict_return = dict(zip(names, measures_all))
return dict_return, pool_results
def error_measure(I, y_true, results_fix, conf_level):
"""
Calculate the error measures with a resample dataset.
Parameters:
----------
I : np.array
the random index of each bootstrap
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
result_fix : list,
Conditional results with all some x fixed
conf_level: list, percentiles used to calculate the confidence intervals
Returns:
----------
List, values of uncertainty measures
"""
y_true_resample = y_true[I]
results_fix_resample = results_fix[I]
cf_lower_temp, cf_upper_temp = np.quantile(results_fix_resample, conf_level)
ks_bt_temp, pvalue_bt_temp = stats.ks_2samp(y_true_resample, results_fix_resample)
y_true_width_temp = np.quantile(y_true_resample, conf_level[1]) - np.quantile(y_true_resample, conf_level[0])
return [cf_lower_temp, cf_upper_temp, ks_bt_temp, pvalue_bt_temp, y_true_width_temp]
def uncond_cal(y_true, conf_level, rand):
"""
Calculate the unconditional results
Parameters:
----------
y_true : list,
Function results with all x varying (the raw sampling matrix of x)
conf_level: list, percentiles used to calculate the confidence intervals
rand : np.ndarray,
Resample index in bootstrap, shape of R * N,
where R is the number of resamples
Returns:
----------
"""
# if rand is None:
# y_true_bt = y_true
# elif isinstance(rand, np.ndarray):
# y_true_bt = y_true[rand]
# else:
# AssertionError
y_true_bt = np.zeros(shape=(y_true.shape[0], rand.shape[0], y_true.shape[1]))
# import pdb; pdb.set_trace()
for ii in range(y_true.shape[0]):
y_true_bt[ii] = y_true[ii][rand]
uncond_cf_bt = np.quantile(y_true_bt, conf_level, axis=2)
uncond_cf_low, uncond_cf_up = {}, {}
uncond_cf_low['mean'] = uncond_cf_bt[0].mean()
uncond_cf_low['low'], uncond_cf_low['up'] = np.quantile(uncond_cf_bt[0], conf_level)
uncond_cf_up['mean'] = uncond_cf_bt[1].mean()
uncond_cf_up['low'], uncond_cf_up['up'] = np.quantile(uncond_cf_bt[1], conf_level)
uncond_dict = {
'uncond_cf_low' : uncond_cf_low,
'uncond_cf_up' : uncond_cf_up,
'uncond_mean': y_true_bt.mean()
}
return uncond_dict
def results_exist(parms_fixed, pool_results):
"""
Helper function to determine whether results exist.
Parameters
----------
parms_fixed : list,
Index of parameters to fix
pool_results : dict,
Contains both index of parameters fixed and the corresponding results
Returns
-------
skip_cal : bool
"""
if pool_results == {}:
skip_cal = False
elif parms_fixed in pool_results['parms']:
index_measure = pool_results['parms'].index(parms_fixed)
skip_cal = pool_results[f'measures_{index_measure}']
else:
skip_cal = False
return skip_cal
def pool_update(parms_fixed, measure_list, pool_results):
"""Update pool_results with new values.
Parameters
----------
parms_fixed : list,
Index of parameters to fix
measure_list : list,
Measures newly calculated for parameters in parms_fixed
pool_results : dict,
Contains both index of parameters fixed and the corresponding results
Returns
----------
Updated pool_results
"""
try:
pool_results['parms'].append(parms_fixed[:])
except KeyError:
pool_results['parms'] = [parms_fixed[:]]
index_measure = pool_results['parms'].index(parms_fixed)
pool_results[f'measures_{index_measure}'] = measure_list
return pool_results
|
[
"numpy.quantile",
"pyapprox.multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion",
"numpy.copy",
"numpy.zeros",
"scipy.stats.ks_2samp",
"numpy.sqrt"
] |
[((7295, 7340), 'numpy.quantile', 'np.quantile', (['results_fix_resample', 'conf_level'], {}), '(results_fix_resample, conf_level)\n', (7306, 7340), True, 'import numpy as np\n'), ((7374, 7427), 'scipy.stats.ks_2samp', 'stats.ks_2samp', (['y_true_resample', 'results_fix_resample'], {}), '(y_true_resample, results_fix_resample)\n', (7388, 7427), False, 'from scipy import stats\n'), ((8267, 8332), 'numpy.zeros', 'np.zeros', ([], {'shape': '(y_true.shape[0], rand.shape[0], y_true.shape[1])'}), '(shape=(y_true.shape[0], rand.shape[0], y_true.shape[1]))\n', (8275, 8332), True, 'import numpy as np\n'), ((8465, 8507), 'numpy.quantile', 'np.quantile', (['y_true_bt', 'conf_level'], {'axis': '(2)'}), '(y_true_bt, conf_level, axis=2)\n', (8476, 8507), True, 'import numpy as np\n'), ((8657, 8697), 'numpy.quantile', 'np.quantile', (['uncond_cf_bt[0]', 'conf_level'], {}), '(uncond_cf_bt[0], conf_level)\n', (8668, 8697), True, 'import numpy as np\n'), ((8797, 8837), 'numpy.quantile', 'np.quantile', (['uncond_cf_bt[1]', 'conf_level'], {}), '(uncond_cf_bt[1], conf_level)\n', (8808, 8837), True, 'import numpy as np\n'), ((7452, 7495), 'numpy.quantile', 'np.quantile', (['y_true_resample', 'conf_level[1]'], {}), '(y_true_resample, conf_level[1])\n', (7463, 7495), True, 'import numpy as np\n'), ((7498, 7541), 'numpy.quantile', 'np.quantile', (['y_true_resample', 'conf_level[0]'], {}), '(y_true_resample, conf_level[0])\n', (7509, 7541), True, 'import numpy as np\n'), ((3705, 3715), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (3712, 3715), True, 'import numpy as np\n'), ((5033, 5069), 'numpy.quantile', 'np.quantile', (['cf_upper_bt', 'conf_level'], {}), '(cf_upper_bt, conf_level)\n', (5044, 5069), True, 'import numpy as np\n'), ((5121, 5157), 'numpy.quantile', 'np.quantile', (['cf_lower_bt', 'conf_level'], {}), '(cf_lower_bt, conf_level)\n', (5132, 5157), True, 'import numpy as np\n'), ((5319, 5349), 'numpy.quantile', 'np.quantile', (['ks_bt', 'conf_level'], {}), '(ks_bt, conf_level)\n', (5330, 5349), True, 'import numpy as np\n'), ((5397, 5431), 'numpy.quantile', 'np.quantile', (['pvalue_bt', 'conf_level'], {}), '(pvalue_bt, conf_level)\n', (5408, 5431), True, 'import numpy as np\n'), ((4043, 4067), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4051, 4067), True, 'import numpy as np\n'), ((4069, 4093), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4077, 4093), True, 'import numpy as np\n'), ((4095, 4119), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4103, 4119), True, 'import numpy as np\n'), ((4121, 4145), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4129, 4145), True, 'import numpy as np\n'), ((4161, 4185), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4169, 4185), True, 'import numpy as np\n'), ((4187, 4211), 'numpy.zeros', 'np.zeros', (['total_resample'], {}), '(total_resample)\n', (4195, 4211), True, 'import numpy as np\n'), ((5651, 5707), 'pyapprox.multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion', 'cond_moments', (['fun', 'x_temp', 'ind_fix'], {'return_variance': '(True)'}), '(fun, x_temp, ind_fix, return_variance=True)\n', (5663, 5707), True, 'from pyapprox.multivariate_polynomials import conditional_moments_of_polynomial_chaos_expansion as cond_moments\n'), ((5243, 5279), 'numpy.quantile', 'np.quantile', (['cf_width_bt', 'conf_level'], {}), '(cf_width_bt, conf_level)\n', (5254, 5279), True, 'import numpy as np\n'), ((5791, 5808), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (5798, 5808), True, 'import numpy as np\n')]
|
import numpy as np
import gpflow
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from mpl_toolkits.mplot3d import axes3d, Axes3D
from BoManifolds.Riemannian_utils.sphere_utils import logmap
from BoManifolds.kernel_utils.kernels_sphere_tf import SphereGaussianKernel, SphereLaplaceKernel
from BoManifolds.plot_utils.manifold_plots import plot_sphere
plt.rcParams['text.usetex'] = True # use Latex font for plots
plt.rcParams['text.latex.preamble'] = [r'\usepackage{bm}']
"""
This example shows the use of different kernels for the hypershere manifold S^n , used for Gaussian process regression.
The tested function corresponds to a Gaussian distribution with a mean defined on the sphere and a covariance defined on
the tangent space of the mean. Training data are generated "far" from the mean. The trained Gaussian process is then
used to determine the value of the function from test data sampled around the mean of the test function.
The kernels used are:
- Manifold-RBF kernel (geometry-aware)
- Laplace kernel (geometry-aware)
- Euclidean kernel (classical geometry-unaware)
This example works with GPflow version = 0.5 (used by GPflowOpt).
Authors: <NAME> and <NAME>, 2019
License: MIT
Contact: <EMAIL>, <EMAIL>
"""
def test_function(x, mu_test_function):
# Parameters
sigma_test_fct = np.array([[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]])
inv_sigma_test_fct = np.linalg.inv(sigma_test_fct)
det_sigma_test_fct = np.linalg.det(sigma_test_fct)
# Function value
x_proj = logmap(x, mu_test_function)
return np.exp(- 0.5 * np.dot(x_proj.T, np.dot(inv_sigma_test_fct, x_proj))) / np.sqrt(
(2 * np.pi) ** dim * det_sigma_test_fct)
def plot_gaussian_process_prediction(figure_handle, mu, test_data, mean_est, mu_test_fct, title):
ax = Axes3D(figure_handle)
# Make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Make the grid lines transparent
ax.xaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]['color'] = (1, 1, 1, 0)
# Remove axis
ax._axis3don = False
# Initial view
# ax.view_init(elev=10, azim=-20.) # (default: elev=30, azim=-60)
ax.view_init(elev=10, azim=30.) # (default: elev=30, azim=-60)
# Plot sphere
plot_sphere(ax, alpha=0.4)
# Plot training data on the manifold
plt_scale_fact = test_function(mu_test_fct, mu_test_fct)[0, 0]
nb_data_test = test_data.shape[0]
for n in range(nb_data_test):
ax.scatter(test_data[n, 0], test_data[n, 1], test_data[n, 2], c=pl.cm.inferno(mean_est[n] / plt_scale_fact))
# Plot mean of Gaussian test function
ax.scatter(mu[0], mu[1], mu[2], c='g', marker='D')
plt.title(title, size=25)
if __name__ == "__main__":
np.random.seed(1234)
# Define the test function mean
mu_test_fct = np.array([1 / np.sqrt(2), 1 / np.sqrt(2), 0])
# Generate random data on the sphere
nb_data = 20
dim = 3
mean = np.array([1, 0, 0])
mean = mean / np.linalg.norm(mean)
fact_cov = 0.1
cov = fact_cov * np.eye(dim)
data = np.random.multivariate_normal(mean, cov, nb_data)
x_man = data / np.linalg.norm(data, axis=1)[:, None]
y_train = np.zeros((nb_data,1))
for n in range(nb_data):
y_train[n] = test_function(x_man[n], mu_test_fct)
# Generate test data on the sphere
nb_data_test = 10
mean_test = mu_test_fct
mean_test = mean_test / np.linalg.norm(mean)
fact_cov = 0.1
cov_test = fact_cov * np.eye(dim)
data = np.random.multivariate_normal(mean_test, cov_test, nb_data_test)
x_man_test = data / np.linalg.norm(data, axis=1)[:, None]
y_test = np.zeros((nb_data_test, 1))
for n in range(nb_data_test):
y_test[n] = test_function(x_man_test[n], mu_test_fct)
# Plot training data - 3D figure
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man, y_train, mu_test_fct, r'Training data')
# Plot true test data - 3D figure
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, y_test, mu_test_fct, r'Test data (ground truth)')
# ### Gaussian kernel
# Define the kernel
k_gauss = SphereGaussianKernel(input_dim=dim, active_dims=range(dim), beta_min=7.0, beta=10.0, variance=1.)
# Kernel computation
K1 = k_gauss.compute_K_symm(x_man)
K12 = k_gauss.compute_K(x_man, x_man_test)
K2 = k_gauss.compute_K_symm(x_man_test)
# GPR model
m_gauss = gpflow.gpr.GPR(x_man, y_train, kern=k_gauss, mean_function=None)
# Optimization of the model parameters
m_gauss.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_gauss, cov_est_gauss = m_gauss.predict_f_full_cov(x_man_test)
# mean, cov = m.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_gauss = np.diag(cov_est_gauss[0])[:, None]
# Error computation
error_gauss = np.sqrt(np.sum((y_test - mean_est_gauss) ** 2) / nb_data_test)
print('Estimation error (Manifold-RBF kernel) = ', error_gauss)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_gauss, mu_test_fct, r'Manifold-RBF kernel')
# ### Laplace kernel
# Define the kernel
k_laplace = SphereLaplaceKernel(input_dim=dim, active_dims=range(dim), beta=10.0, variance=1.)
# Kernel computation
K1 = k_laplace.compute_K_symm(x_man)
K12 = k_laplace.compute_K(x_man, x_man_test)
K2 = k_laplace.compute_K_symm(x_man_test)
# GPR model
m_laplace = gpflow.gpr.GPR(x_man, y_train, kern=k_laplace, mean_function=None)
# Optimization of the model parameters
m_laplace.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_laplace, cov_est_laplace = m_laplace.predict_f_full_cov(x_man_test)
# mean, cov = m.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_laplace = np.diag(cov_est_laplace[0])[:, None]
# Error computation
error_laplace = np.sqrt(np.sum((y_test - mean_est_laplace) ** 2) / nb_data_test)
print('Estimation error (Laplace kernel) = ', error_laplace)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_laplace, mu_test_fct, r'Laplace kernel')
# ### Euclidean RBF
# Define the kernel
k_eucl = gpflow.kernels.RBF(input_dim=dim, ARD=False)
# Kernel computation
K1 = k_eucl.compute_K_symm(x_man)
K12 = k_eucl.compute_K(x_man, x_man_test)
K2 = k_eucl.compute_K_symm(x_man_test)
# GPR model
m_eucl = gpflow.gpr.GPR(x_man, y_train, kern=k_eucl, mean_function=None)
# Optimization of the model parameters
m_eucl.optimize()
# Compute posterior samples
# Does not always work due to Cholesky decomposition used in gpflow
# nb_samples_post = 10
# posterior_samples = m.predict_f_samples(y_man_test.T, nb_samples_post)
# Prediction
mean_est_eucl, cov_est_eucl = m_eucl.predict_f_full_cov(x_man_test)
# mean, cov = m_eucl.predict_y(x_new) # includes noise variance (seems not to be included in predict_f functions
var_est_eucl = np.diag(cov_est_eucl[0])[:, None]
# Error computation
error_eucl = np.sqrt(np.sum((y_test - mean_est_eucl) ** 2) / nb_data_test)
print('Estimation error (Euclidean-RBF kernel) = ', error_eucl)
# Plot test data
fig = plt.figure(figsize=(5, 5))
plot_gaussian_process_prediction(fig, mu_test_fct, x_man_test, mean_est_eucl, mu_test_fct, r'Euclidean-RBF kernel')
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"numpy.sum",
"BoManifolds.Riemannian_utils.sphere_utils.logmap",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"gpflow.kernels.RBF",
"numpy.diag",
"numpy.linalg.det",
"matplotlib.pylab.cm.inferno",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.linalg.inv",
"gpflow.gpr.GPR",
"numpy.dot",
"numpy.zeros",
"BoManifolds.plot_utils.manifold_plots.plot_sphere",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.eye",
"numpy.sqrt"
] |
[((1336, 1397), 'numpy.array', 'np.array', (['[[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]]'], {}), '([[0.6, 0.2, 0], [0.2, 0.3, -0.01], [0, -0.01, 0.2]])\n', (1344, 1397), True, 'import numpy as np\n'), ((1423, 1452), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_test_fct'], {}), '(sigma_test_fct)\n', (1436, 1452), True, 'import numpy as np\n'), ((1478, 1507), 'numpy.linalg.det', 'np.linalg.det', (['sigma_test_fct'], {}), '(sigma_test_fct)\n', (1491, 1507), True, 'import numpy as np\n'), ((1543, 1570), 'BoManifolds.Riemannian_utils.sphere_utils.logmap', 'logmap', (['x', 'mu_test_function'], {}), '(x, mu_test_function)\n', (1549, 1570), False, 'from BoManifolds.Riemannian_utils.sphere_utils import logmap\n'), ((1820, 1841), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['figure_handle'], {}), '(figure_handle)\n', (1826, 1841), False, 'from mpl_toolkits.mplot3d import axes3d, Axes3D\n'), ((2450, 2476), 'BoManifolds.plot_utils.manifold_plots.plot_sphere', 'plot_sphere', (['ax'], {'alpha': '(0.4)'}), '(ax, alpha=0.4)\n', (2461, 2476), False, 'from BoManifolds.plot_utils.manifold_plots import plot_sphere\n'), ((2878, 2903), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(25)'}), '(title, size=25)\n', (2887, 2903), True, 'import matplotlib.pyplot as plt\n'), ((2937, 2957), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (2951, 2957), True, 'import numpy as np\n'), ((3142, 3161), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (3150, 3161), True, 'import numpy as np\n'), ((3265, 3314), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'nb_data'], {}), '(mean, cov, nb_data)\n', (3294, 3314), True, 'import numpy as np\n'), ((3387, 3409), 'numpy.zeros', 'np.zeros', (['(nb_data, 1)'], {}), '((nb_data, 1))\n', (3395, 3409), True, 'import numpy as np\n'), ((3705, 3769), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_test', 'cov_test', 'nb_data_test'], {}), '(mean_test, cov_test, nb_data_test)\n', (3734, 3769), True, 'import numpy as np\n'), ((3846, 3873), 'numpy.zeros', 'np.zeros', (['(nb_data_test, 1)'], {}), '((nb_data_test, 1))\n', (3854, 3873), True, 'import numpy as np\n'), ((4018, 4044), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (4028, 4044), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4222), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (4206, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4688, 4752), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (['x_man', 'y_train'], {'kern': 'k_gauss', 'mean_function': 'None'}), '(x_man, y_train, kern=k_gauss, mean_function=None)\n', (4702, 4752), False, 'import gpflow\n'), ((5491, 5517), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5501, 5517), True, 'import matplotlib.pyplot as plt\n'), ((5980, 6046), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (['x_man', 'y_train'], {'kern': 'k_laplace', 'mean_function': 'None'}), '(x_man, y_train, kern=k_laplace, mean_function=None)\n', (5994, 6046), False, 'import gpflow\n'), ((6798, 6824), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (6808, 6824), True, 'import matplotlib.pyplot as plt\n'), ((7004, 7048), 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'input_dim': 'dim', 'ARD': '(False)'}), '(input_dim=dim, ARD=False)\n', (7022, 7048), False, 'import gpflow\n'), ((7230, 7293), 'gpflow.gpr.GPR', 'gpflow.gpr.GPR', (['x_man', 'y_train'], {'kern': 'k_eucl', 'mean_function': 'None'}), '(x_man, y_train, kern=k_eucl, mean_function=None)\n', (7244, 7293), False, 'import gpflow\n'), ((8029, 8055), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (8039, 8055), True, 'import matplotlib.pyplot as plt\n'), ((8181, 8191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8189, 8191), True, 'import matplotlib.pyplot as plt\n'), ((1653, 1701), 'numpy.sqrt', 'np.sqrt', (['((2 * np.pi) ** dim * det_sigma_test_fct)'], {}), '((2 * np.pi) ** dim * det_sigma_test_fct)\n', (1660, 1701), True, 'import numpy as np\n'), ((3180, 3200), 'numpy.linalg.norm', 'np.linalg.norm', (['mean'], {}), '(mean)\n', (3194, 3200), True, 'import numpy as np\n'), ((3241, 3252), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (3247, 3252), True, 'import numpy as np\n'), ((3615, 3635), 'numpy.linalg.norm', 'np.linalg.norm', (['mean'], {}), '(mean)\n', (3629, 3635), True, 'import numpy as np\n'), ((3681, 3692), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (3687, 3692), True, 'import numpy as np\n'), ((5252, 5277), 'numpy.diag', 'np.diag', (['cov_est_gauss[0]'], {}), '(cov_est_gauss[0])\n', (5259, 5277), True, 'import numpy as np\n'), ((6556, 6583), 'numpy.diag', 'np.diag', (['cov_est_laplace[0]'], {}), '(cov_est_laplace[0])\n', (6563, 6583), True, 'import numpy as np\n'), ((7793, 7817), 'numpy.diag', 'np.diag', (['cov_est_eucl[0]'], {}), '(cov_est_eucl[0])\n', (7800, 7817), True, 'import numpy as np\n'), ((3334, 3362), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3348, 3362), True, 'import numpy as np\n'), ((3794, 3822), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3808, 3822), True, 'import numpy as np\n'), ((5337, 5375), 'numpy.sum', 'np.sum', (['((y_test - mean_est_gauss) ** 2)'], {}), '((y_test - mean_est_gauss) ** 2)\n', (5343, 5375), True, 'import numpy as np\n'), ((6645, 6685), 'numpy.sum', 'np.sum', (['((y_test - mean_est_laplace) ** 2)'], {}), '((y_test - mean_est_laplace) ** 2)\n', (6651, 6685), True, 'import numpy as np\n'), ((7876, 7913), 'numpy.sum', 'np.sum', (['((y_test - mean_est_eucl) ** 2)'], {}), '((y_test - mean_est_eucl) ** 2)\n', (7882, 7913), True, 'import numpy as np\n'), ((2730, 2773), 'matplotlib.pylab.cm.inferno', 'pl.cm.inferno', (['(mean_est[n] / plt_scale_fact)'], {}), '(mean_est[n] / plt_scale_fact)\n', (2743, 2773), True, 'import matplotlib.pylab as pl\n'), ((3027, 3037), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3034, 3037), True, 'import numpy as np\n'), ((3043, 3053), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3050, 3053), True, 'import numpy as np\n'), ((1614, 1648), 'numpy.dot', 'np.dot', (['inv_sigma_test_fct', 'x_proj'], {}), '(inv_sigma_test_fct, x_proj)\n', (1620, 1648), True, 'import numpy as np\n')]
|
import os, logging, json, re
import pandas as pd
import numpy as np
from BarSeqPy.translate_R_to_pandas import *
def data_prep_1(data_dir, FEBA_dir, debug_bool=False, meta_ix=7, cfg=None):
""" The first phase of data preparation for the BarSeqR Computations
Args:
data_dir: (str) Path to directory which contains the
following files: 'all.poolcount', 'genes',
'exps', 'pool' - all TSV files.
Optionally contains the following files:
strainusage.barcodes.json - json list
strainusage.genes.json - json list
strainusage.genes12.json - json list
ignore_list.json - json list ( list of str
with sample-index name to ignore )
All these files are changed depending on the input.
FEBA_dir: (str) Path to directory which contains the
following files: 'desc_short_rules'
debug_bool: Whether you'd like to print the dataframes
as a test to the data_dir before running FEBA_Fit
meta_ix (int): The number of meta column indeces in all.poolcount
cfg (python dict): The default and config variables required:
drop_exps (bool): Do we drop the 'Drop' experiments
from the experiments dataframe
already?
okControls (bool): Are we defining controls by
the method where it's written
into the Experiments file?
Returns:
list<exps_df, all_df, genes_df,
strainsUsed_list, genesUsed_list, genesUsed12_list>
exps_df (pandas DataFrame): Must contain cols: (Variable)
all_df (pandas DataFrame): Must contain cols:
genes_df (pandas DataFrame): Must contain cols:
scaffold, begin
strainsUsed_list (py list or None):
genesUsed_list (py list or None):
genesUsed12_list (py list or None):
Description:
Within data_prep1 we perform the following functions:
getDataFrames:
We import the tables genes, all, exps, rules using a dict to say which
data type is in each column. The dataframes we get are called:
genes_df, all_df, exps_df, rules_df
Within exps_df:
We optionally remove the rows who have 'Drop' set to True (if drop_exps==True).
We strip (remove the spaces from) the values in 'Group',
'Condition_1', 'Condition_2'
We check that the right column names exist in each of the tables.
checkLocusIdEquality:
We check all the locusIds in all_df are also present in genes_df
If debugging we also print the number of unique locusIds in each.
check_exps_df_against_all_df:
We check that the index names in all.poolcount are equivalent to the
'SetName' + '.' + 'Index' in exps
prepare_set_names:
We replace the SetNames from their original version to a simplified standard one,
remove the period in between SetName and Index in all.poolcount columns,
and make the 'names' column in the experiments file and the all.poolcount columns
have the same values. For example, we move column name from Keio_ML9_set2.IT004 to
set2IT004, and rename the values in the Experiments file similarly.
get_special_lists:
We get the lists from the files in data_dir if they are there,
otherwise we return their values as empty lists. The lists we
look for are genesUsed, which should be a list of locusIds
from this genome that we are using, and ignore_list, which is a list
of experiment names to ignore (columns from all.poolcount).
If debug_bool is set to true we print out resultant exps, all, genes to 'tmp' dir
We return the following variables:
'exps_df' (The experiments dataframe)
'all_df' (The barcodes and locations dataframe)
'genes_df' (The total genes dataframe)
'genesUsed_list' (A python list of locusIds that we will use)
'ignore_list' (A python list of experiment names to ignore)
"""
genes_df, all_df, exps_df, rules_df = getDataFrames(data_dir, FEBA_dir,
drop_exps=cfg['drop_exps'],
okControls = cfg['okControls'],
dbg_lvl=0)
# Makes no changes to the variables
checkLocusIdEquality(all_df, genes_df, debug_bool=debug_bool)
# We check that SetNames and Indexes in experiments file match all.poolcount file
check_exps_df_against_all_df(exps_df, all_df, meta_ix)
# We make it so the names are cleaner and create 'names', 'num', 'short' in exps_df
exps_df, all_df, replace_col_d = prepare_set_names(exps_df, all_df, rules_df,
okControls=cfg['okControls'],
meta_ix=meta_ix,
debug_bool=debug_bool)
genesUsed_list, ignore_list = get_special_lists(data_dir, all_df,
replace_col_d, debug_bool=debug_bool)
if debug_bool:
exps_df.to_csv("tmp/py_test1_exps_fp.tsv", sep="\t")
all_df.to_csv("tmp/py_test1_all_fp.tsv", sep="\t")
genes_df.to_csv("tmp/py_test1_genes_fp.tsv", sep="\t")
return [exps_df, all_df, genes_df, genesUsed_list, ignore_list]
def getDataFrames(data_dir, FEBA_dir, drop_exps=False,
okControls=False, dbg_lvl=0):
"""
Args:
data_dir: (str) Path to directory which contains the
following files: 'all.poolcount', 'genes',
'exps' - all TSV files.
Optionally contains the following files:
strainusage.barcodes.json - json list
strainusage.genes.json - json list
strainusage.genes12.json - json list
All these files are changed depending on the input.
FEBA_dir: (str) Path to directory which contains the
following files: 'desc_short_rules'
drop_exps (bool): Should we drop all experiments that have Drop=True
already?
Returns:
genes_df (pandas DataFrame): Contains columns:
locusId, sysName, type, scaffoldId, begin, end, strand, name, desc, GC, nTA
all_df (pandas DataFrame): Contains columns:
barcode, rcbarcode, scaffold, strand, pos, locusId, f, setName1, ..., setNameN
exps_df (pandas DataFrame): Must contains columns:
Index (str)
Date_pool_expt_started (str)
Description (str)
SetName (Str)
Group (str)
Drop (bool)
[Condition_1]
[Condition_2]
rules_df (pandas DataFrame): Contains columns:
V1 (str): Original string to replace
V2 (str): String to replace V1 by
Description:
We import the tables using a dict to say which data type is in each column.
In exps_df:
We might remove the rows who have 'Drop' set to True (if drop_exps==True).
We remove the spaces from the values in 'Group', 'Condition_1', 'Condition_2'
We check that the right column names exist in each of the tables.
To Do:
Should we strip all of the column names when we import them?
"""
data_files = os.listdir(data_dir)
for x in ["all.poolcount", "genes", "exps", "pool"]:
if x not in data_files:
raise Exception("Input data_dir to RunFEBA must include files:\n"
"all.poolcount, genes, exps, and pool."
" Currently missing: " + x)
all_fp = os.path.join(data_dir, "all.poolcount")
genes_fp = os.path.join(data_dir, "genes")
exps_fp = os.path.join(data_dir, "exps")
short_rules_fp = os.path.join(FEBA_dir, "desc_short_rules.tsv")
# Checking access permissions
for x in [all_fp, genes_fp, exps_fp]:
if not os.access(x, os.R_OK):
raise Exception("To run, program requires read permission to file " + x)
# Read tsv files into dataframes, making sure columns locusId and scaffoldId read as stings
genes_dtypes = {
'locusId': str,
'sysName': str,
'type': int,
'scaffoldId': str,
'begin': int,
'end': int,
'strand': str,
'name': str,
'desc': str,
'GC': float,
'nTA': int
}
genes_df = pd.read_table(genes_fp, dtype=genes_dtypes)
#barcode rcbarcode scaffold strand pos locusId f
all_dtypes = {
'barcode': str,
'rcbarcode': str,
'scaffold': str,
'strand': str,
'pos': int,
'locusId': str,
'f': float
}
all_df = pd.read_table(all_fp, dtype=all_dtypes)
exps_dtypes = {
'SetName': str,
'Index': str,
'Date_pool_expt_started': str,
"Description": str,
"Group": str,
"Drop": str,
"Condition_1": str,
"Condition_2": str,
"control_group": str,
"control_bool": str
}
exps_df = pd.read_table(exps_fp, dtype=exps_dtypes)
# We update the 'Drop' experiments
if 'Drop' in exps_df:
new_drops = []
for ix, value in exps_df['Drop'].items():
if not isinstance(value, str):
if pd.isna(value):
new_drops.append(False)
else:
raise Exception(f"Value in 'Drop' not string: {value}")
elif str(value).strip().upper() == "TRUE":
new_drops.append(True)
elif value.strip().upper() == "FALSE":
new_drops.append(False)
else:
raise Exception(f"Cannot recognize Drop value in row {ix}:"
f" {value}")
exps_df['Drop'] = new_drops
else:
exps_df['Drop'] = [False]*exps_df.shape[0]
"""
if drop_exps:
# Removing Drop rows
exps_df.drop(remove_indeces, axis=0, inplace=True)
"""
# Remove trailing spaces:
for x in ["Group", "Condition_1", "Condition_2", "control_bool"]:
if x in exps_df:
# We take the entire column (pandas Series) and remove the spaces
# from either end
exps_df[x] = exps_df[x].str.strip()
rules_dtypes = {
"V1": str,
"V2": str
}
rules_df = pd.read_table(short_rules_fp, keep_default_na=False, dtype=rules_dtypes)
# Checking genes.GC
for x in ["scaffoldId", "locusId", "sysName", "desc", "begin", "end"]:
if x not in genes_df.columns:
raise Exception(f"Genes table must include header {x}")
# Checking exps table
for x in ["SetName", "Index", "Date_pool_expt_started", "Description"]:
if x not in exps_df.columns:
raise Exception(f"Experiments table must include header {x}")
if okControls:
for x in ["control_group", "control_bool"]:
if x not in exps_df.columns:
raise Exception("If okControls is set To True, then "
f"experiments table must include header {x}")
# Checking all_df
for x in ["scaffold", "locusId", "f", "pos"]:
if x not in all_df.columns:
raise Exception(f"All.PoolCount file must include header {x}")
if dbg_lvl > 1:
print(genes_df)
print(all_df)
print(exps_df)
print(rules_df)
return [genes_df, all_df, exps_df, rules_df]
def checkLocusIdEquality(all_df, genes_df, debug_bool=False):
""" We check all the locusIds in all_df are also present in genes_df
Description:
We check all the locusIds in all_df are also present in genes_df
If debugging we also print the number of unique locusIds
"""
if debug_bool:
logging.debug("Original locusId col")
logging.debug(all_df['locusId'])
# below both are pandas series
unique_all_locusIds = all_df['locusId'].dropna().unique()
unique_genes_locusIds = genes_df['locusId'].dropna().unique()
if debug_bool:
# All
logging.debug("Unique All Locus Ids: ")
logging.debug(unique_all_locusIds)
logging.debug("Number of Unique All Locus Ids: ")
logging.debug(len(unique_all_locusIds))
# Genes
logging.debug("Unique Gene Locus Ids: ")
logging.debug(unique_genes_locusIds)
logging.debug("Number of Unique Gene Locus Ids: ")
logging.debug(len(unique_genes_locusIds))
# Checking if every locusId from all.poolcount also exists in genes
not_found_locusIds = []
for x in unique_all_locusIds:
if x not in unique_genes_locusIds:
not_found_locusIds.append(x)
if len(not_found_locusIds) > 0:
raise Exception("The following locusIds were not found in the genes file."
" (All locusIds from all.poolcount must also be in the genes"
" file.)"
"', '".join(not_found_locusIds))
def check_exps_df_against_all_df(exps_df, all_df, meta_ix):
"""
We make sure that all the experiment names left in the all_df dataframe
are the same as the experiment names in the rows of the experiments
dataframe.
"""
experiment_names_test = [exps_df['SetName'].iat[i] + "." + exps_df['Index'].iat[i] for i in \
range(len(exps_df['SetName']))]
index_names = list(all_df.head())[meta_ix:]
# Number of rows:
if len(index_names) != exps_df.shape[0]:
raise Exception(f"Number of data columns in {all_fp} does not match"
f" number of rows in {exps_fp}\n"
f"{len(index_names)} != {exps_df.shape[0]}")
for i in range(len(index_names)):
if index_names[i] not in experiment_names_test:
raise Exception(f"Column names in {all_fp} do not match names from"
f"{exps_fp} at index {i}")
logging.debug("There are the same experiment names in all_df and exps_df.")
def prepare_set_names(exps_df, all_df, rules_df,
okControls=False, meta_ix=7, debug_bool=False):
"""
Description:
We replace the SetNames from the complicated version to a simpler one,
remove the period in between SetName and Index in all.poolcount columns,
and make the 'names' column in the experiments file and the all.poolcount columns
have the same values. For example, we move column name from Keio_ML9_set2.IT004 to
set2IT004, and rename the values in the Experiments file similarly.
We also add multiple new columns to exps_df:
"num", "short", "name", "t0set"
We also make sure that any experiment with its "Group" being "Time0" has
its short as "Time0" as well.
We initialize the 't0set' column as being the date + the set name (lane).
"""
# Below is a numpy array, not a series
uniqueSetNames_nparray = exps_df['SetName'].unique()
# shortSetNames is numpy ndarray, shortNamesTranslation_d is a dict which contains
# conversions from original names to short names.
shortSetNames, shortNamesTranslation_d = ShortSetNames(uniqueSetNames_nparray)
if debug_bool:
logging.debug("uniqueSetNames:")
logging.debug(uniqueSetNames_nparray)
logging.debug("shortSetNames")
logging.debug(shortSetNames)
logging.debug("Above 2 arrays should be the same length.")
# We concatenate the string of the set name and the index column
# But first we need to find the original location of the set name
# match_list is a list of indeces (int) for each element in the first list
# where it is found in the second list.
match_list = match_ix(list(exps_df['SetName']), list(uniqueSetNames_nparray))
# We apply the match list to shortSetNames_list to recreate the original SetName order
# just with the newly created 'short' setNames.
short_names_srs = shortSetNames[match_list]
if debug_bool:
logging.info("short_names_srs: (shortSetNames[match_list])")
logging.info(short_names_srs)
logging.info("original set Names:")
logging.info(exps_df['SetName'])
logging.info('match_list')
logging.info(match_list)
# If there are 3 unique set names and 100 items in exps_df['SetName'],
# then match_list will contain 100 items with only 3 different values (0, 1, 2)
# expNamesNew ends up being a list<str>
expNamesNew = []
for i in range(len(short_names_srs)):
if not short_names_srs[i] in [None, np.nan]:
expNamesNew.append(short_names_srs[i] + exps_df['Index'][i])
else:
expNamesNew.append(exps_df['Index'][i])
if debug_bool:
logging.info('expNamesNew:')
logging.info(expNamesNew)
exps_df['num'] = range(1, exps_df.shape[0] + 1)
# We replace certain strings with others using the 'rules' table.
exps_df['short'] = applyRules(rules_df, list(exps_df['Description']))
if okControls:
if not "control_bool" in exps_df.columns:
raise Exception("Using manual control label but no column "
"'control_bool' in Experiments file!")
else:
for ix, val in exps_df["control_bool"].iteritems():
if val.strip().upper() == "TRUE":
exps_df["short"].loc[ix] = "Time0"
else:
# Should not be a Time0 short
if exps_df["short"].loc[ix].upper() == "TIME0":
raise Exception("Description of experiment indicates Time0, but"
f" value in control_bool is not 'True', instead '{val}'.")
if debug_bool:
logging.info("exps_df of col 'short':")
logging.info(exps_df['short'])
# We remove the "." in the names of the values. Just SetNameIndex now
replace_col_d = {list(all_df.head())[meta_ix + i]: expNamesNew[i] for i in range(len(expNamesNew))}
if debug_bool:
logging.info('replace_col_d')
logging.info(replace_col_d)
logging.info('original all_df col names:')
logging.info(list(all_df.columns))
all_df = all_df.rename(columns=replace_col_d)
if debug_bool:
logging.info('after replacement all_df col names:')
logging.info(list(all_df.columns))
exps_df['name'] = expNamesNew
# updating short to include Groups with Time0
num_time_zero = 0
for ix, val in exps_df['Group'].items():
if val.strip().upper() == "TIME0":
num_time_zero += 1
exps_df.loc[ix, 'short'] = "Time0"
# Updating column 't0sets' which refers to the date and SetName
exps_df['t0set'] = [exps_df['Date_pool_expt_started'].iat[ix] + " " + \
val for ix, val in exps_df['SetName'].items()]
if okControls:
if not "control_group" in exps_df.columns:
raise Exception("Using manual control label but no column "
"'control_group' in Experiments file!")
else:
for ix, val in exps_df["control_group"].iteritems():
exps_df['t0set'].loc[ix] = val
if debug_bool:
logging.info('exps_df short: ')
logging.info(exps_df['short'])
logging.info('exps_df t0set: ')
logging.info(exps_df['t0set'])
logging.info(f"Total number of time zeros: {num_time_zero}")
return exps_df, all_df, replace_col_d
def ShortSetNames(set_names_nparray, dbg_lvl=0):
""" Using a table with rules, shorten the names of these sets
Args:
set_names_nparray (numpy.ndarray): Array of string, unique set names from exps file
Returns:
set_names_nparray (numpy.ndarray): Edited set Names to be
in the format setX* or testX*
This might convert
[ Keio_ML9_set2, Keio_ML9_set2, Keio_ML9_set2, ..., Keio_ML9_set3, Keio_ML9_set3,..., Keio_ML9_set3]
to
[ set2, set2, set2, ..., set3, set3, ..., set3]
"""
set_names_nparray = np.copy(set_names_nparray)
# Below returns a TRUE/FALSE vector indicating which
# elements of the character vector contain a match (i.o.w a simple name)
simple = [bool(re.search(r"(set|test)[0-9A-Z]+[0-9A-Z0-9]*$", x)) for x in set_names_nparray]
if dbg_lvl > 0:
if len(simple) > 0:
logging.debug("simple names: \n" + ",".join(list([str(x) for x in simple])))
else:
logging.debug("No simple names found.")
# We edit the values of set_names_nparray who are true for simple
# by removing anything before 'set' or 'test'
# We count the number of values that were false
nleft = 0
simple_set_names = []
for i in range(len(simple)):
if simple[i]:
new_set_name = re.sub("^.*(set|test)", "\\1", set_names_nparray[i])
set_names_nparray[i] = new_set_name
simple_set_names.append(new_set_name)
else:
nleft += 1
if dbg_lvl > 0:
logging.debug("fixed set_names:\n" + ",".join(list(set_names_nparray)))
candidates = []
for x in "A.B.C.D.E.F.G.H.I.J.K.L.M.N.O.P.Q.R.S.T.U.V.W.X.Y.Z".split("."):
candidates.append("set" + x)
if dbg_lvl > 0:
logging.debug(candidates)
# get the elements in candidates that are not in set_names_nparray[simple]
candidates = [x for x in candidates if x not in simple_set_names]
if (nleft > len(candidates)):
raise Exception(f"Too many unexpected set names: {nleft}.\n To fix this, contact developer "
"and say to change the number of possible extensions in list candidates (A.B...Z).")
# Get the non-simple values from set_names_nparray
oldComplex = [x for x in set_names_nparray if x not in simple_set_names]
if dbg_lvl > 0:
logging.debug("oldComplex:\n" + ",".join(oldComplex))
cnd_ix = 0
translation_dict = {}
for i in range(len(simple)):
if not simple[i]:
logging.info(f"Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}")
translation_dict[set_names_nparray[i]] = candidates[cnd_ix]
set_names_nparray[i] = candidates[cnd_ix]
cnd_ix += 1
crnt_unq = list(pd.Series(set_names_nparray).unique())
repeats = []
for x in list(set_names_nparray):
if x in crnt_unq:
crnt_unq.remove(x)
else:
repeats.append(x)
if not (len(repeats) == 0):
raise Exception("Non-unique set names! :\n" + \
", ".join(repeats))
else:
logging.debug("Finished running short set names")
if dbg_lvl > 0:
logging.debug("Final set names list: " + ", ".join(set_names_nparray))
return set_names_nparray, translation_dict
def get_special_lists(data_dir, all_df, replace_col_d, debug_bool=False):
"""
Args:
replace_col_d: Dict mapping original all_df experiment name to replacement name
data_dir
Returns:
genesUsed_list list<str>: LocusIds of genes to use
ignore_list: List<str> New names for the experiments we want to ignore.
Description: We get the lists from the files in data_dir if they are there.
Otherwise we return their values as empty lists. The lists we
look for are genesUsed, which should be a list of locusIds
from this genome that we are using, and ignore_list, which is a list
of experiment names to ignore (columns from all.poolcount)
"""
genesUsed_list = []
ignore_list = []
# list of locusIds
genesUsed_fp = os.path.join(data_dir, "strainusage.genes.json")
# list of extra ignored experiments
ignore_list_fp = os.path.join(data_dir, "ignore_list.json")
if os.path.isfile(genesUsed_fp) and os.access(genesUsed_fp, os.R_OK):
genesUsed_list = json.loads(open(GenesUsed_fp).read())
logging.info(f"Loaded {len(genesUsed_list)} genes to include in the "
"analysis\n")
if os.path.isfile(ignore_list_fp) and os.access(ignore_list_fp, os.R_OK):
pre_ignore_list = json.loads(open(ignore_list_fp).read())
for x in pre_ignore_list:
if x in replace_col_d:
ignore_list.append(x)
else:
raise Exception(f"Avoid list contains experiment {x} but experiment name"
" not found in all.poolcount."
f" Possible names: {', '.join(list(replace_col_d.keys()))}")
ignore_list = [replace_col_d[x] for x in ignore_list]
return genesUsed_list, ignore_list
def applyRules(rules_df, desc_str_list):
"""
We replace str value in V1 with value in V2
Args:
rules_df: data frame with cols:
V1, V2
desc_str_list: list<str>
Returns:
new_desc_list: list<str>
"""
new_desc_list = []
for j in range(len(desc_str_list)):
new_desc_list.append(desc_str_list[j])
for i in range(0, rules_df.shape[0]):
new_desc_list[-1] = new_desc_list[-1].replace(rules_df["V1"].iloc[i],
rules_df["V2"].iloc[i])
return new_desc_list
|
[
"logging.debug",
"numpy.copy",
"logging.info",
"os.path.isfile",
"pandas.Series",
"re.search",
"os.access",
"pandas.read_table",
"pandas.isna",
"os.path.join",
"os.listdir",
"re.sub"
] |
[((8321, 8341), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (8331, 8341), False, 'import os, logging, json, re\n'), ((8648, 8687), 'os.path.join', 'os.path.join', (['data_dir', '"""all.poolcount"""'], {}), "(data_dir, 'all.poolcount')\n", (8660, 8687), False, 'import os, logging, json, re\n'), ((8703, 8734), 'os.path.join', 'os.path.join', (['data_dir', '"""genes"""'], {}), "(data_dir, 'genes')\n", (8715, 8734), False, 'import os, logging, json, re\n'), ((8749, 8779), 'os.path.join', 'os.path.join', (['data_dir', '"""exps"""'], {}), "(data_dir, 'exps')\n", (8761, 8779), False, 'import os, logging, json, re\n'), ((8801, 8847), 'os.path.join', 'os.path.join', (['FEBA_dir', '"""desc_short_rules.tsv"""'], {}), "(FEBA_dir, 'desc_short_rules.tsv')\n", (8813, 8847), False, 'import os, logging, json, re\n'), ((9482, 9525), 'pandas.read_table', 'pd.read_table', (['genes_fp'], {'dtype': 'genes_dtypes'}), '(genes_fp, dtype=genes_dtypes)\n', (9495, 9525), True, 'import pandas as pd\n'), ((9814, 9853), 'pandas.read_table', 'pd.read_table', (['all_fp'], {'dtype': 'all_dtypes'}), '(all_fp, dtype=all_dtypes)\n', (9827, 9853), True, 'import pandas as pd\n'), ((10210, 10251), 'pandas.read_table', 'pd.read_table', (['exps_fp'], {'dtype': 'exps_dtypes'}), '(exps_fp, dtype=exps_dtypes)\n', (10223, 10251), True, 'import pandas as pd\n'), ((11528, 11600), 'pandas.read_table', 'pd.read_table', (['short_rules_fp'], {'keep_default_na': '(False)', 'dtype': 'rules_dtypes'}), '(short_rules_fp, keep_default_na=False, dtype=rules_dtypes)\n', (11541, 11600), True, 'import pandas as pd\n'), ((15136, 15211), 'logging.debug', 'logging.debug', (['"""There are the same experiment names in all_df and exps_df."""'], {}), "('There are the same experiment names in all_df and exps_df.')\n", (15149, 15211), False, 'import os, logging, json, re\n'), ((21318, 21344), 'numpy.copy', 'np.copy', (['set_names_nparray'], {}), '(set_names_nparray)\n', (21325, 21344), True, 'import numpy as np\n'), ((24942, 24990), 'os.path.join', 'os.path.join', (['data_dir', '"""strainusage.genes.json"""'], {}), "(data_dir, 'strainusage.genes.json')\n", (24954, 24990), False, 'import os, logging, json, re\n'), ((25052, 25094), 'os.path.join', 'os.path.join', (['data_dir', '"""ignore_list.json"""'], {}), "(data_dir, 'ignore_list.json')\n", (25064, 25094), False, 'import os, logging, json, re\n'), ((12964, 13001), 'logging.debug', 'logging.debug', (['"""Original locusId col"""'], {}), "('Original locusId col')\n", (12977, 13001), False, 'import os, logging, json, re\n'), ((13010, 13042), 'logging.debug', 'logging.debug', (["all_df['locusId']"], {}), "(all_df['locusId'])\n", (13023, 13042), False, 'import os, logging, json, re\n'), ((13249, 13288), 'logging.debug', 'logging.debug', (['"""Unique All Locus Ids: """'], {}), "('Unique All Locus Ids: ')\n", (13262, 13288), False, 'import os, logging, json, re\n'), ((13297, 13331), 'logging.debug', 'logging.debug', (['unique_all_locusIds'], {}), '(unique_all_locusIds)\n', (13310, 13331), False, 'import os, logging, json, re\n'), ((13340, 13389), 'logging.debug', 'logging.debug', (['"""Number of Unique All Locus Ids: """'], {}), "('Number of Unique All Locus Ids: ')\n", (13353, 13389), False, 'import os, logging, json, re\n'), ((13462, 13502), 'logging.debug', 'logging.debug', (['"""Unique Gene Locus Ids: """'], {}), "('Unique Gene Locus Ids: ')\n", (13475, 13502), False, 'import os, logging, json, re\n'), ((13511, 13547), 'logging.debug', 'logging.debug', (['unique_genes_locusIds'], {}), '(unique_genes_locusIds)\n', (13524, 13547), False, 'import os, logging, json, re\n'), ((13556, 13606), 'logging.debug', 'logging.debug', (['"""Number of Unique Gene Locus Ids: """'], {}), "('Number of Unique Gene Locus Ids: ')\n", (13569, 13606), False, 'import os, logging, json, re\n'), ((16443, 16475), 'logging.debug', 'logging.debug', (['"""uniqueSetNames:"""'], {}), "('uniqueSetNames:')\n", (16456, 16475), False, 'import os, logging, json, re\n'), ((16484, 16521), 'logging.debug', 'logging.debug', (['uniqueSetNames_nparray'], {}), '(uniqueSetNames_nparray)\n', (16497, 16521), False, 'import os, logging, json, re\n'), ((16530, 16560), 'logging.debug', 'logging.debug', (['"""shortSetNames"""'], {}), "('shortSetNames')\n", (16543, 16560), False, 'import os, logging, json, re\n'), ((16569, 16597), 'logging.debug', 'logging.debug', (['shortSetNames'], {}), '(shortSetNames)\n', (16582, 16597), False, 'import os, logging, json, re\n'), ((16606, 16664), 'logging.debug', 'logging.debug', (['"""Above 2 arrays should be the same length."""'], {}), "('Above 2 arrays should be the same length.')\n", (16619, 16664), False, 'import os, logging, json, re\n'), ((17241, 17301), 'logging.info', 'logging.info', (['"""short_names_srs: (shortSetNames[match_list])"""'], {}), "('short_names_srs: (shortSetNames[match_list])')\n", (17253, 17301), False, 'import os, logging, json, re\n'), ((17310, 17339), 'logging.info', 'logging.info', (['short_names_srs'], {}), '(short_names_srs)\n', (17322, 17339), False, 'import os, logging, json, re\n'), ((17348, 17383), 'logging.info', 'logging.info', (['"""original set Names:"""'], {}), "('original set Names:')\n", (17360, 17383), False, 'import os, logging, json, re\n'), ((17392, 17424), 'logging.info', 'logging.info', (["exps_df['SetName']"], {}), "(exps_df['SetName'])\n", (17404, 17424), False, 'import os, logging, json, re\n'), ((17433, 17459), 'logging.info', 'logging.info', (['"""match_list"""'], {}), "('match_list')\n", (17445, 17459), False, 'import os, logging, json, re\n'), ((17468, 17492), 'logging.info', 'logging.info', (['match_list'], {}), '(match_list)\n', (17480, 17492), False, 'import os, logging, json, re\n'), ((17989, 18017), 'logging.info', 'logging.info', (['"""expNamesNew:"""'], {}), "('expNamesNew:')\n", (18001, 18017), False, 'import os, logging, json, re\n'), ((18026, 18051), 'logging.info', 'logging.info', (['expNamesNew'], {}), '(expNamesNew)\n', (18038, 18051), False, 'import os, logging, json, re\n'), ((19002, 19041), 'logging.info', 'logging.info', (['"""exps_df of col \'short\':"""'], {}), '("exps_df of col \'short\':")\n', (19014, 19041), False, 'import os, logging, json, re\n'), ((19050, 19080), 'logging.info', 'logging.info', (["exps_df['short']"], {}), "(exps_df['short'])\n", (19062, 19080), False, 'import os, logging, json, re\n'), ((19287, 19316), 'logging.info', 'logging.info', (['"""replace_col_d"""'], {}), "('replace_col_d')\n", (19299, 19316), False, 'import os, logging, json, re\n'), ((19325, 19352), 'logging.info', 'logging.info', (['replace_col_d'], {}), '(replace_col_d)\n', (19337, 19352), False, 'import os, logging, json, re\n'), ((19361, 19403), 'logging.info', 'logging.info', (['"""original all_df col names:"""'], {}), "('original all_df col names:')\n", (19373, 19403), False, 'import os, logging, json, re\n'), ((19524, 19575), 'logging.info', 'logging.info', (['"""after replacement all_df col names:"""'], {}), "('after replacement all_df col names:')\n", (19536, 19575), False, 'import os, logging, json, re\n'), ((20480, 20511), 'logging.info', 'logging.info', (['"""exps_df short: """'], {}), "('exps_df short: ')\n", (20492, 20511), False, 'import os, logging, json, re\n'), ((20520, 20550), 'logging.info', 'logging.info', (["exps_df['short']"], {}), "(exps_df['short'])\n", (20532, 20550), False, 'import os, logging, json, re\n'), ((20559, 20590), 'logging.info', 'logging.info', (['"""exps_df t0set: """'], {}), "('exps_df t0set: ')\n", (20571, 20590), False, 'import os, logging, json, re\n'), ((20599, 20629), 'logging.info', 'logging.info', (["exps_df['t0set']"], {}), "(exps_df['t0set'])\n", (20611, 20629), False, 'import os, logging, json, re\n'), ((20638, 20698), 'logging.info', 'logging.info', (['f"""Total number of time zeros: {num_time_zero}"""'], {}), "(f'Total number of time zeros: {num_time_zero}')\n", (20650, 20698), False, 'import os, logging, json, re\n'), ((22542, 22567), 'logging.debug', 'logging.debug', (['candidates'], {}), '(candidates)\n', (22555, 22567), False, 'import os, logging, json, re\n'), ((23893, 23942), 'logging.debug', 'logging.debug', (['"""Finished running short set names"""'], {}), "('Finished running short set names')\n", (23906, 23942), False, 'import os, logging, json, re\n'), ((25103, 25131), 'os.path.isfile', 'os.path.isfile', (['genesUsed_fp'], {}), '(genesUsed_fp)\n', (25117, 25131), False, 'import os, logging, json, re\n'), ((25136, 25168), 'os.access', 'os.access', (['genesUsed_fp', 'os.R_OK'], {}), '(genesUsed_fp, os.R_OK)\n', (25145, 25168), False, 'import os, logging, json, re\n'), ((25357, 25387), 'os.path.isfile', 'os.path.isfile', (['ignore_list_fp'], {}), '(ignore_list_fp)\n', (25371, 25387), False, 'import os, logging, json, re\n'), ((25392, 25426), 'os.access', 'os.access', (['ignore_list_fp', 'os.R_OK'], {}), '(ignore_list_fp, os.R_OK)\n', (25401, 25426), False, 'import os, logging, json, re\n'), ((8940, 8961), 'os.access', 'os.access', (['x', 'os.R_OK'], {}), '(x, os.R_OK)\n', (8949, 8961), False, 'import os, logging, json, re\n'), ((21500, 21548), 're.search', 're.search', (['"""(set|test)[0-9A-Z]+[0-9A-Z0-9]*$"""', 'x'], {}), "('(set|test)[0-9A-Z]+[0-9A-Z0-9]*$', x)\n", (21509, 21548), False, 'import os, logging, json, re\n'), ((21743, 21782), 'logging.debug', 'logging.debug', (['"""No simple names found."""'], {}), "('No simple names found.')\n", (21756, 21782), False, 'import os, logging, json, re\n'), ((22082, 22134), 're.sub', 're.sub', (['"""^.*(set|test)"""', '"""\\\\1"""', 'set_names_nparray[i]'], {}), "('^.*(set|test)', '\\\\1', set_names_nparray[i])\n", (22088, 22134), False, 'import os, logging, json, re\n'), ((23291, 23369), 'logging.info', 'logging.info', (['f"""Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}"""'], {}), "(f'Set {set_names_nparray[i]} simplified to {candidates[cnd_ix]}')\n", (23303, 23369), False, 'import os, logging, json, re\n'), ((10453, 10467), 'pandas.isna', 'pd.isna', (['value'], {}), '(value)\n', (10460, 10467), True, 'import pandas as pd\n'), ((23547, 23575), 'pandas.Series', 'pd.Series', (['set_names_nparray'], {}), '(set_names_nparray)\n', (23556, 23575), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 herrlich10
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, shlex, time
import subprocess, multiprocessing, ctypes
import numpy as np
__author__ = 'herrlich10 <<EMAIL>>'
__version__ = '0.1.3'
if sys.version_info[0] == 3:
string_types = (str,)
else:
string_types = (basestring,)
def cmd_for_exec(cmd, cmd_kws):
'''
Format cmd appropriately for execution according to whether shell=True.
Split the cmd string into a list, if not shell=True.
Join the cmd list into a string, if shell=True.
Do nothing to callable.
'''
if not callable(cmd):
if 'shell' in cmd_kws and cmd_kws['shell']: # cmd string is required
if not isinstance(cmd, string_types):
cmd = ' '.join(cmd)
else: # cmd list is required
if isinstance(cmd, string_types):
cmd = shlex.split(cmd) # Split by space, preserving quoted substrings
return cmd
def cmd_for_disp(cmd):
'''
Format cmd for printing.
'''
if isinstance(cmd, list):
return ' '.join(cmd)
else:
return cmd
def format_duration(duration, format='standard'):
'''
Format duration (in seconds) in a more human friendly way.
'''
if format == 'short':
units = ['d', 'h', 'm', 's']
elif format == 'long':
units = [' days', ' hours', ' minutes', ' seconds']
else:
units = [' day', ' hr', ' min', ' sec']
values = [int(duration//86400), int(duration%86400//3600), int(duration%3600//60), duration%60]
for K in range(len(values)): # values[K] would be the first non-zero value
if values[K] > 0:
break
formatted = ((('%d' if k<len(values)-1 else '%.3f') % values[k]) + units[k] for k in range(len(values)) if k >= K)
return ' '.join(formatted)
class PooledCaller(object):
'''
Execute multiple command line programs, as well as python callables,
asynchronously and parallelly across a pool of processes.
'''
def __init__(self, pool_size=None):
if pool_size is None:
self.pool_size = multiprocessing.cpu_count() * 3 // 4
else:
self.pool_size = pool_size
self.ps = []
self.cmd_queue = []
self._n_cmds = 0 # Accumulated counter for generating cmd idx
self._pid2idx = {}
self._return_codes = []
def check_call(self, cmd, *args, **kwargs):
'''
Asynchronous check_call (queued execution, return immediately).
See subprocess.Popen() for more information about the arguments.
Multiple commands can be separated with ";" and executed sequentially
within a single subprocess in linux/mac, only if shell=True.
Python callable can also be executed in parallel via multiprocessing.
Note that only the return code of the child process will be retrieved
later when calling wait(), not the actual return value of the callable.
So the result of the computation needs to be saved in a file.
Parameters
----------
cmd : list, str, or callable
Computation in command line programs is handled with subprocess.
Computation in python callable is handled with multiprocessing.
shell : bool
If provided, must be a keyword argument.
If shell is True, the command will be executed through the shell.
*args, **kwargs :
If cmd is a callable, *args and **kwargs are passed to the callable as its arguments.
If cmd is a list or str, **kwargs are passed to subprocess.Popen().
'''
cmd = cmd_for_exec(cmd, kwargs)
self.cmd_queue.append((self._n_cmds, cmd, args, kwargs))
self._n_cmds += 1
def dispatch(self):
# If there are free slot and more jobs
while len(self.ps) < self.pool_size and len(self.cmd_queue) > 0:
idx, cmd, args, kwargs = self.cmd_queue.pop(0)
print('>> job {0}: {1}'.format(idx, cmd_for_disp(cmd)))
if callable(cmd):
p = multiprocessing.Process(target=cmd, args=args, kwargs=kwargs)
p.start()
else:
p = subprocess.Popen(cmd, **kwargs)
self.ps.append(p)
self._pid2idx[p.pid] = idx
def wait(self):
'''
Wait for all jobs in the queue to finish.
Returns
-------
codes : list
The return code of the child process for each job.
'''
self._start_time = time.time()
while len(self.ps) > 0 or len(self.cmd_queue) > 0:
# Dispatch jobs if possible
self.dispatch()
# Poll workers' state
for p in self.ps:
if isinstance(p, subprocess.Popen) and p.poll() is not None: # If the process is terminated
self._return_codes.append((self._pid2idx[p.pid], p.returncode))
self.ps.remove(p)
elif isinstance(p, multiprocessing.Process) and not p.is_alive(): # If the process is terminated
self._return_codes.append((self._pid2idx[p.pid], p.exitcode))
self.ps.remove(p)
time.sleep(0.1)
codes = [code for idx, code in sorted(self._return_codes)]
duration = time.time() - self._start_time
print('>> All {0} jobs done in {1}.'.format(self._n_cmds, format_duration(duration)))
if np.any(codes):
print('returncode: {0}', codes)
else:
print('all returncodes are 0.')
self._n_cmds = 0
self._pid2idx = {}
self._return_codes = []
return codes
class ArrayWrapper(type):
'''
This is the metaclass for classes that wrap an np.ndarray and delegate
non-reimplemented operators (among other magic functions) to the wrapped array.
'''
def __init__(cls, name, bases, dct):
def make_descriptor(name):
return property(lambda self: getattr(self.arr, name))
type.__init__(cls, name, bases, dct)
ignore = 'class mro new init setattr getattr getattribute'
ignore = set('__{0}__'.format(name) for name in ignore.split())
for name in dir(np.ndarray):
if name.startswith('__'):
if name not in ignore and name not in dct:
setattr(cls, name, make_descriptor(name))
class SharedMemoryArray(object, metaclass=ArrayWrapper):
'''
This class can be used as a usual np.ndarray, but its data buffer
is allocated in shared memory (under Cached Files in memory monitor),
and can be passed across processes without any data copy/duplication,
even when write access happens (which is lock-synchronized).
The idea is to allocate memory using multiprocessing.Array, and
access it from current or another process via a numpy.ndarray view,
without actually copying the data.
So it is both convenient and efficient when used with multiprocessing.
This implementation also demonstrates the power of composition + metaclass,
as opposed to the canonical multiple inheritance.
'''
def __init__(self, dtype, shape, initializer=None, lock=True):
self.dtype = np.dtype(dtype)
self.shape = shape
if initializer is None:
# Preallocate memory using multiprocessing is the preferred usage
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], int(np.prod(self.shape)), lock=lock)
else:
self.shared_arr = multiprocessing.Array(self.dtype2ctypes[self.dtype], initializer, lock=lock)
if not lock:
self.arr = np.frombuffer(self.shared_arr, dtype=self.dtype).reshape(self.shape)
else:
self.arr = np.frombuffer(self.shared_arr.get_obj(), dtype=self.dtype).reshape(self.shape)
@classmethod
def zeros(cls, shape, dtype=float, lock=True):
'''
Return a new array of given shape and dtype, filled with zeros.
This is the preferred usage, which avoids holding two copies of the
potentially very large data simultaneously in the memory.
'''
return cls(dtype, shape, lock=lock)
@classmethod
def from_array(cls, arr, lock=True):
'''
Initialize a new shared-memory array with an existing array.
'''
# return cls(arr.dtype, arr.shape, arr.ravel(), lock=lock) # Slow and memory inefficient, why?
a = cls.zeros(arr.shape, dtype=arr.dtype, lock=lock)
a[:] = arr # This is a more efficient way of initialization
return a
def __getattr__(self, attr):
if attr in ['acquire', 'release']:
return getattr(self.shared_arr, attr)
else:
return getattr(self.arr, attr)
def __dir__(self):
return list(self.__dict__.keys()) + ['acquire', 'release'] + dir(self.arr)
# At present, only numerical dtypes are supported.
dtype2ctypes = {
bool: ctypes.c_bool,
int: ctypes.c_long,
float: ctypes.c_double,
np.dtype('bool'): ctypes.c_bool,
np.dtype('int64'): ctypes.c_long,
np.dtype('int32'): ctypes.c_int,
np.dtype('int16'): ctypes.c_short,
np.dtype('int8'): ctypes.c_byte,
np.dtype('uint64'): ctypes.c_ulong,
np.dtype('uint32'): ctypes.c_uint,
np.dtype('uint16'): ctypes.c_ushort,
np.dtype('uint8'): ctypes.c_ubyte,
np.dtype('float64'): ctypes.c_double,
np.dtype('float32'): ctypes.c_float,
}
|
[
"subprocess.Popen",
"multiprocessing.Array",
"numpy.frombuffer",
"numpy.dtype",
"shlex.split",
"time.sleep",
"numpy.any",
"time.time",
"multiprocessing.Process",
"numpy.prod",
"multiprocessing.cpu_count"
] |
[((5724, 5735), 'time.time', 'time.time', ([], {}), '()\n', (5733, 5735), False, 'import sys, shlex, time\n'), ((6640, 6653), 'numpy.any', 'np.any', (['codes'], {}), '(codes)\n', (6646, 6653), True, 'import numpy as np\n'), ((8430, 8445), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (8438, 8445), True, 'import numpy as np\n'), ((10272, 10288), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (10280, 10288), True, 'import numpy as np\n'), ((10313, 10330), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (10321, 10330), True, 'import numpy as np\n'), ((10355, 10372), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (10363, 10372), True, 'import numpy as np\n'), ((10396, 10413), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (10404, 10413), True, 'import numpy as np\n'), ((10439, 10455), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (10447, 10455), True, 'import numpy as np\n'), ((10480, 10498), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (10488, 10498), True, 'import numpy as np\n'), ((10524, 10542), 'numpy.dtype', 'np.dtype', (['"""uint32"""'], {}), "('uint32')\n", (10532, 10542), True, 'import numpy as np\n'), ((10567, 10585), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (10575, 10585), True, 'import numpy as np\n'), ((10612, 10629), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (10620, 10629), True, 'import numpy as np\n'), ((10655, 10674), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (10663, 10674), True, 'import numpy as np\n'), ((10701, 10720), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (10709, 10720), True, 'import numpy as np\n'), ((6402, 6417), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6412, 6417), False, 'import sys, shlex, time\n'), ((6504, 6515), 'time.time', 'time.time', ([], {}), '()\n', (6513, 6515), False, 'import sys, shlex, time\n'), ((8747, 8823), 'multiprocessing.Array', 'multiprocessing.Array', (['self.dtype2ctypes[self.dtype]', 'initializer'], {'lock': 'lock'}), '(self.dtype2ctypes[self.dtype], initializer, lock=lock)\n', (8768, 8823), False, 'import subprocess, multiprocessing, ctypes\n'), ((2030, 2046), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (2041, 2046), False, 'import sys, shlex, time\n'), ((5250, 5311), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'cmd', 'args': 'args', 'kwargs': 'kwargs'}), '(target=cmd, args=args, kwargs=kwargs)\n', (5273, 5311), False, 'import subprocess, multiprocessing, ctypes\n'), ((5376, 5407), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {}), '(cmd, **kwargs)\n', (5392, 5407), False, 'import subprocess, multiprocessing, ctypes\n'), ((3260, 3287), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3285, 3287), False, 'import subprocess, multiprocessing, ctypes\n'), ((8670, 8689), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (8677, 8689), True, 'import numpy as np\n'), ((8868, 8916), 'numpy.frombuffer', 'np.frombuffer', (['self.shared_arr'], {'dtype': 'self.dtype'}), '(self.shared_arr, dtype=self.dtype)\n', (8881, 8916), True, 'import numpy as np\n')]
|
import numpy as np
from pommerman import constants
from pommerman.constants import Item
from util.data import calc_dist
def staying_alive_reward(nobs, agent_id):
"""
Return a reward if the agent with the given id is alive.
:param nobs: The game state
:param agent_id: The agent to check
:return: The reward for staying alive
"""
#print(nobs[0]['position'][0])
if agent_id in nobs[0]['alive']:
return 1.0
else:
return 0.0
def go_down_right_reward(nobs, high_pos, agent_num, act):
"""
Return a reward for going to the low or right side of the board
:param nobs: The current observation
:param high_pos: Tuple of lowest and most-right position
:param agent_num: The id of the agent to check (0-3)
:return: The reward for going down or right
"""
# only give rewards if a new highest point is reached
bomb_bonus = 0
if act[agent_num] == 5:
bomb_bonus = 0.00
if nobs[agent_num]['position'][0] > high_pos[0]:
return 1 + bomb_bonus, (nobs[agent_num]['position'][0], high_pos[1])
elif nobs[agent_num]['position'][1] > high_pos[1]:
return 1 + bomb_bonus, (high_pos[0], nobs[agent_num]['position'][1])
else:
return 0 + bomb_bonus, high_pos
def bomb_reward(nobs, act, agent_ind):
dist = calc_dist(agent_ind, nobs)
rwd = 0.0
if act[agent_ind] == 5:
rwd = 5.0/dist
elif act[agent_ind] == 0:
rwd = 0.0
else:
rwd = 1.0/dist
return rwd
def skynet_reward(obs, act, nobs, fifo, agent_inds, log):
"""
Skynet reward function rewarding enemy deaths, powerup pickups and stepping on blocks not in FIFO
:param obs: previous observation
:param nobs: new observation
:param fifo: 121 (11x11) cell queue
:return:
"""
# calculate rewards for player agents, rest are zero
r = [0.0] * len(obs)
for i in range(len(obs)):
if i not in agent_inds:
continue
log_ind = 0 if i <= 1 else 1
teammate_ind = i + 2 if log_ind == 0 else i - 2
n_enemies_prev = 0
alive_prev = obs[i]['alive']
for e in obs[i]['enemies']:
if e.value in alive_prev:
n_enemies_prev += 1
prev_n_teammate = 1 if obs[i]['teammate'].value in alive_prev else 0
prev_can_kick = obs[i]['can_kick']
prev_n_ammo = obs[i]['ammo']
prev_n_blast = obs[i]['blast_strength']
cur_alive = nobs[i]['alive']
n_enemy_cur = 0
for e in nobs[i]['enemies']:
if e.value in cur_alive:
n_enemy_cur += 1
cur_n_teammate = 1 if nobs[i]['teammate'].value in cur_alive else 0
cur_can_kick = nobs[i]['can_kick']
cur_n_ammo = nobs[i]['ammo']
cur_n_blast = nobs[i]['blast_strength']
cur_position = nobs[i]['position']
if n_enemies_prev - n_enemy_cur > 0:
r[i] += (n_enemies_prev - n_enemy_cur) * 0.5
log[log_ind][0] += (n_enemies_prev - n_enemy_cur) * 0.5
# if prev_n_teammate - cur_n_teammate > 0:
# r[i] -= (prev_n_teammate-cur_n_teammate)*0.5
# log[log_ind][4] -= (prev_n_teammate-cur_n_teammate)*0.5
if not prev_can_kick and cur_can_kick:
r[i] += 0.02
log[log_ind][1] += 0.02
if cur_n_ammo - prev_n_ammo > 0 and obs[i]['board'][cur_position[0]][cur_position[1]] == Item.ExtraBomb.value:
r[i] += 0.01
log[log_ind][1] += 0.01
if cur_n_blast - prev_n_blast > 0:
r[i] += 0.01
log[log_ind][1] += 0.01
if cur_position not in fifo[i]:
r[i] += 0.001
log[log_ind][2] += 0.001
if len(fifo[i]) == 121:
fifo[i].pop()
fifo[i].append(cur_position)
return r
def _get_positions(board, value):
wood_bitmap = np.isin(board, value).astype(np.uint8)
wood_positions = np.where(wood_bitmap == 1)
return list(zip(wood_positions[0], wood_positions[1]))
def woods_close_to_bomb_reward(obs, bomb_pos, blast_strength, agent_ids):
'''
:param obs: observation
:param bomb_pos: position bomb is layed
:param blast_strength: current blast strength of the agent
:param agent_ids: agent ids of teammates
:return: reward for laying bombs near wood and enemies
'''
board = obs['board']
wood_positions = _get_positions(board, constants.Item.Wood.value)
rigid_positions = _get_positions(board, constants.Item.Rigid.value)
enemy_ids = [10,11,12,13]
for id in agent_ids:
enemy_ids.remove(id)
enemy_positions =[]
for e in enemy_ids:
enemy_positions += _get_positions(board, e)
woods_in_range = 0.0
enemies_in_range = 0.0
# for every wooden block check if it would be destroyed
left_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength+1):
if left_pos[0] == 0:
break
left_pos = (bomb_pos[0] - i, bomb_pos[1])
if left_pos in rigid_positions:
break
elif left_pos in enemy_positions:
enemies_in_range +=1
break
elif left_pos in wood_positions:
woods_in_range += 1
break
right_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if right_pos[0] == len(board)-1:
break
right_pos = (bomb_pos[0] + i, bomb_pos[1])
if right_pos in rigid_positions:
break
elif right_pos in enemy_positions:
enemies_in_range += 1
break
elif right_pos in wood_positions:
woods_in_range += 1
break
down_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if down_pos[1] == 0:
break
down_pos = (bomb_pos[0], bomb_pos[1] - i)
if down_pos in rigid_positions:
break
elif down_pos in enemy_positions:
enemies_in_range += 1
break
elif down_pos in wood_positions:
woods_in_range += 1
break
up_pos = np.asarray(bomb_pos)
for i in range(1, blast_strength + 1):
if up_pos[1] == len(board)-1:
break
up_pos = (bomb_pos[0], bomb_pos[1] + i)
if up_pos in rigid_positions:
break
elif up_pos in enemy_positions:
enemies_in_range += 1
break
elif up_pos in wood_positions:
woods_in_range += 1
break
# for each wood close to bomb reward x
reward = (0.01 * woods_in_range) + (0.3 * enemies_in_range)
return reward
|
[
"numpy.isin",
"numpy.asarray",
"numpy.where",
"util.data.calc_dist"
] |
[((1392, 1418), 'util.data.calc_dist', 'calc_dist', (['agent_ind', 'nobs'], {}), '(agent_ind, nobs)\n', (1401, 1418), False, 'from util.data import calc_dist\n'), ((4096, 4122), 'numpy.where', 'np.where', (['(wood_bitmap == 1)'], {}), '(wood_bitmap == 1)\n', (4104, 4122), True, 'import numpy as np\n'), ((5017, 5037), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (5027, 5037), True, 'import numpy as np\n'), ((5447, 5467), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (5457, 5467), True, 'import numpy as np\n'), ((5895, 5915), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (5905, 5915), True, 'import numpy as np\n'), ((6325, 6345), 'numpy.asarray', 'np.asarray', (['bomb_pos'], {}), '(bomb_pos)\n', (6335, 6345), True, 'import numpy as np\n'), ((4035, 4056), 'numpy.isin', 'np.isin', (['board', 'value'], {}), '(board, value)\n', (4042, 4056), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
def intersection_cartesian(L1: pd.DataFrame, L2: pd.DataFrame):
"""
Compute cartesian coordinates of intersection points given two list of lines in general form.
General form for a line: Ax+By+C=0
:param L1:
:param L2:
:return:
"""
if not {'A', 'B', 'C'}.issubset(set(L1.columns)) or not {'A', 'B', 'C'}.issubset(set(L2.columns)):
raise ValueError('L1 and L2 should both contains columns A, B and C, which depicts lines in general form')
d = (L1['A'] * L2['B'] - L1['B'] * L2['A'])
dx = L1['B'] * L2['C'] - L1['C'] * L2['B']
dy = L1['C'] * L2['A'] - L1['A'] * L2['C']
x = dx / d
y = dy / d
return list(zip(x.values.tolist(), y.values.tolist()))
def points2line(p1, p2):
"""
Compute Ax+By+C=0 given a list of point [(x1,y1)] and [(x2,y2)].
Single point is also acceptable.
:param p1: point in tuple or array (x1,y1) or a list of points in tuple or array [(x1_1,y1_1),(x1_2,y1_2),...]
:param p2: point in tuple or array (x2,y2) or a list of points in tuple or array [(x2_1,y2_1),(x2_2,y2_2),...]
:return: pd.DataFrame objects of lines in general form(Ax+By+C=0)
"""
p1 = np.array(p1)
p2 = np.array(p2)
if p1.dtype == np.object or p2.dtype == np.object:
raise ValueError("p1 and p2 should matrix alike")
elif len(p1.shape) == 2 and len(p2.shape) == 2:
if p1.shape[1] != 2 or p2.shape[1] != 2:
raise ValueError("p1 and p2 should be matrix with column size of exactly 2")
elif len(p1.shape) == 1 and len(p1) == 2 and len(p1.shape) == 1 and len(p2) == 2:
p1 = p1.reshape(-1, 2)
p2 = p2.reshape(-1, 2)
else:
raise ValueError("Invalid p1 and p2")
a = (p1[:, 1] - p2[:, 1])
b = (p2[:, 0] - p1[:, 0])
c = (p1[:, 0] * p2[:, 1] - p2[:, 0] * p1[:, 1])
return pd.DataFrame([a, b, c], index=['A', 'B', 'C']).T
def find_y_on_lines(lines: np.array, x: np.array):
"""
find y of a list of x on a list of lines that in polar form.
:param lines:
:param x:
:return: a list of points, 1th dimension for different x and 2th dimension for different lines
"""
if len(lines) == 0:
return lines
lines = np.array(lines)
if lines.dtype == np.object:
raise ValueError("lines should be matrix alike")
elif len(lines.shape) == 1:
if len(lines) == 2:
lines = lines.reshape(-1, 2)
else:
raise ValueError("the length of line vector should 2")
elif len(lines.shape) == 2:
if lines.shape[1] != 2:
raise ValueError("lines should be matrix with column size of exactly 2")
else:
raise ValueError("Invalid lines")
x = np.array(x)
if x.dtype == np.object:
raise ValueError("x should be matrix alike")
rho = lines[:, 1].reshape(-1, 1)
phi = lines[:, 0].reshape(-1, 1)
y = (rho - x * np.cos(phi)) / np.sin(phi)
return y
def find_points_on_lines(lines: np.array, x: np.array):
"""
find points of a list of x on a list of lines that in polar form.
:param lines:
:param x:
:return: a list of points, 1th dimension for different x and 2th dimension for different lines
"""
if len(lines) == 0:
return lines
lines = np.array(lines)
if len(lines.shape) == 1:
if len(lines) == 2:
lines = lines.reshape(-1, 2)
x = np.array(x)
y = find_y_on_lines(lines, x)
points = list()
for ix in range(len(x)):
points_on_a_line = np.zeros((len(lines), 2))
points_on_a_line[:, 0] = x[ix]
points_on_a_line[:, 1] = y[:, ix]
points.append(list(map(lambda x: tuple(x), points_on_a_line.tolist())))
return points
def interpolate_pixels_along_line(p1: np.array or tuple, p2: np.array or tuple, width=2):
"""Uses Xiaolin Wu's line algorithm to interpolate all of the pixels along a
straight line, given two points (x0, y0) and (x1, y1)
Wikipedia article containing pseudo code that function was based off of:
http://en.wikipedia.org/wiki/Xiaolin_Wu's_line_algorithm
Given by Rick(https://stackoverflow.com/users/2025958/rick)
on https://stackoverflow.com/questions/24702868/python3-pillow-get-all-pixels-on-a-line.
"""
if type(p1) is np.ndarray and type(p2) is np.ndarray:
(x1, y1) = p1.flatten()
(x2, y2) = p2.flatten()
elif len(p1) == 2 and len(p2) == 2:
(x1, y1) = p1
(x2, y2) = p2
else:
raise TypeError("p1 and p2 must be tuple or ndarray depicting points")
pixels = []
steep = np.abs(y2 - y1) > np.abs(x2 - x1)
# Ensure that the path to be interpolated is shallow and from left to right
if steep:
t = x1
x1 = y1
y1 = t
t = x2
x2 = y2
y2 = t
if x1 > x2:
t = x1
x1 = x2
x2 = t
t = y1
y1 = y2
y2 = t
dx = x2 - x1
dy = y2 - y1
gradient = dy / dx # slope
# Get the first given coordinate and add it to the return list
x_end = np.round(x1)
y_end = y1 + (gradient * (x_end - x1))
xpxl0 = x_end
ypxl0 = np.round(y_end)
if steep:
pixels.extend([(ypxl0, xpxl0), (ypxl0 + 1, xpxl0)])
else:
pixels.extend([(xpxl0, ypxl0), (xpxl0, ypxl0 + 1)])
interpolated_y = y_end + gradient
# Get the second given coordinate to give the main loop a range
x_end = np.round(x2)
y_end = y2 + (gradient * (x_end - x2))
xpxl1 = x_end
ypxl1 = np.round(y_end)
# Loop between the first x coordinate and the second x coordinate, interpolating the y coordinates
for x in np.arange(xpxl0 + 1, xpxl1):
if steep:
pixels.extend([(np.floor(interpolated_y) + i, x) for i in range(1 - width, width + 1)])
else:
pixels.extend([(x, np.floor(interpolated_y) + i) for i in range(1 - width, width + 1)])
interpolated_y += gradient
# Add the second given coordinate to the given list
if steep:
pixels.extend([(ypxl1, xpxl1), (ypxl1 + 1, xpxl1)])
else:
pixels.extend([(xpxl1, ypxl1), (xpxl1, ypxl1 + 1)])
# convert to int
return list(map(lambda x: tuple(x), np.array(pixels, dtype=np.int)))
|
[
"pandas.DataFrame",
"numpy.abs",
"numpy.floor",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.cos",
"numpy.round"
] |
[((1209, 1221), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (1217, 1221), True, 'import numpy as np\n'), ((1231, 1243), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (1239, 1243), True, 'import numpy as np\n'), ((2246, 2261), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (2254, 2261), True, 'import numpy as np\n'), ((2744, 2755), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2752, 2755), True, 'import numpy as np\n'), ((3304, 3319), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (3312, 3319), True, 'import numpy as np\n'), ((3427, 3438), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3435, 3438), True, 'import numpy as np\n'), ((5098, 5110), 'numpy.round', 'np.round', (['x1'], {}), '(x1)\n', (5106, 5110), True, 'import numpy as np\n'), ((5184, 5199), 'numpy.round', 'np.round', (['y_end'], {}), '(y_end)\n', (5192, 5199), True, 'import numpy as np\n'), ((5464, 5476), 'numpy.round', 'np.round', (['x2'], {}), '(x2)\n', (5472, 5476), True, 'import numpy as np\n'), ((5550, 5565), 'numpy.round', 'np.round', (['y_end'], {}), '(y_end)\n', (5558, 5565), True, 'import numpy as np\n'), ((5683, 5710), 'numpy.arange', 'np.arange', (['(xpxl0 + 1)', 'xpxl1'], {}), '(xpxl0 + 1, xpxl1)\n', (5692, 5710), True, 'import numpy as np\n'), ((1875, 1921), 'pandas.DataFrame', 'pd.DataFrame', (['[a, b, c]'], {'index': "['A', 'B', 'C']"}), "([a, b, c], index=['A', 'B', 'C'])\n", (1887, 1921), True, 'import pandas as pd\n'), ((2946, 2957), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2952, 2957), True, 'import numpy as np\n'), ((4619, 4634), 'numpy.abs', 'np.abs', (['(y2 - y1)'], {}), '(y2 - y1)\n', (4625, 4634), True, 'import numpy as np\n'), ((4637, 4652), 'numpy.abs', 'np.abs', (['(x2 - x1)'], {}), '(x2 - x1)\n', (4643, 4652), True, 'import numpy as np\n'), ((6244, 6274), 'numpy.array', 'np.array', (['pixels'], {'dtype': 'np.int'}), '(pixels, dtype=np.int)\n', (6252, 6274), True, 'import numpy as np\n'), ((2931, 2942), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2937, 2942), True, 'import numpy as np\n'), ((5758, 5782), 'numpy.floor', 'np.floor', (['interpolated_y'], {}), '(interpolated_y)\n', (5766, 5782), True, 'import numpy as np\n'), ((5876, 5900), 'numpy.floor', 'np.floor', (['interpolated_y'], {}), '(interpolated_y)\n', (5884, 5900), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#code adapted from https://github.com/analyticalmindsltd/smote_variants
import numpy as np
import time
import logging
import itertools
from sklearn.neighbors import NearestNeighbors
# setting the _logger format
_logger = logging.getLogger('smote_variants')
_logger.setLevel(logging.DEBUG)
_logger_ch = logging.StreamHandler()
_logger_ch.setFormatter(logging.Formatter(
"%(asctime)s:%(levelname)s:%(message)s"))
_logger.addHandler(_logger_ch)
def mode(data):
values, counts = np.unique(data, return_counts=True)
return values[np.where(counts == max(counts))[0][0]]
class StatisticsMixin:
"""
Mixin to compute class statistics and determine minority/majority labels
"""
def class_label_statistics(self, X, y):
"""
determines class sizes and minority and majority labels
Args:
X (np.array): features
y (np.array): target labels
"""
unique, counts = np.unique(y, return_counts=True)
self.class_stats = dict(zip(unique, counts))
self.min_label = unique[0] if counts[0] < counts[1] else unique[1]
self.maj_label = unique[1] if counts[0] < counts[1] else unique[0]
# shorthands
self.min_label = self.min_label
self.maj_label = self.maj_label
def check_enough_min_samples_for_sampling(self, threshold=2):
if self.class_stats[self.min_label] < threshold:
m = ("The number of minority samples (%d) is not enough "
"for sampling")
m = m % self.class_stats[self.min_label]
_logger.warning(self.__class__.__name__ + ": " + m)
return False
return True
class RandomStateMixin:
"""
Mixin to set random state
"""
def set_random_state(self, random_state):
"""
sets the random_state member of the object
Args:
random_state (int/np.random.RandomState/None): the random state
initializer
"""
self._random_state_init = random_state
if random_state is None:
self.random_state = np.random
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
self.random_state = random_state
elif random_state is np.random:
self.random_state = random_state
else:
raise ValueError(
"random state cannot be initialized by " + str(random_state))
class ParameterCheckingMixin:
"""
Mixin to check if parameters come from a valid range
"""
def check_in_range(self, x, name, r):
"""
Check if parameter is in range
Args:
x (numeric): the parameter value
name (str): the parameter name
r (list-like(2)): the lower and upper bound of a range
Throws:
ValueError
"""
if x < r[0] or x > r[1]:
m = ("Value for parameter %s outside the range [%f,%f] not"
" allowed: %f")
m = m % (name, r[0], r[1], x)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_out_range(self, x, name, r):
"""
Check if parameter is outside of range
Args:
x (numeric): the parameter value
name (str): the parameter name
r (list-like(2)): the lower and upper bound of a range
Throws:
ValueError
"""
if x >= r[0] and x <= r[1]:
m = "Value for parameter %s in the range [%f,%f] not allowed: %f"
m = m % (name, r[0], r[1], x)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_or_equal(self, x, name, val):
"""
Check if parameter is less than or equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x > val:
m = "Value for parameter %s greater than %f not allowed: %f > %f"
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_or_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x > y:
m = ("Value for parameter %s greater than parameter %s not"
" allowed: %f > %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less(self, x, name, val):
"""
Check if parameter is less than value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x >= val:
m = ("Value for parameter %s greater than or equal to %f"
" not allowed: %f >= %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x >= y:
m = ("Value for parameter %s greater than or equal to parameter"
" %s not allowed: %f >= %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_or_equal(self, x, name, val):
"""
Check if parameter is greater than or equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x < val:
m = "Value for parameter %s less than %f is not allowed: %f < %f"
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_or_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x < y:
m = ("Value for parameter %s less than parameter %s is not"
" allowed: %f < %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater(self, x, name, val):
"""
Check if parameter is greater than value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x <= val:
m = ("Value for parameter %s less than or equal to %f not allowed"
" %f < %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_par(self, x, name_x, y, name_y):
"""
Check if parameter is greater than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x <= y:
m = ("Value for parameter %s less than or equal to parameter %s"
" not allowed: %f <= %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_equal(self, x, name, val):
"""
Check if parameter is equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x == val:
m = ("Value for parameter %s equal to parameter %f is not allowed:"
" %f == %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x == y:
m = ("Value for parameter %s equal to parameter %s is not "
"allowed: %f == %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_isin(self, x, name, li):
"""
Check if parameter is in list
Args:
x (numeric): the parameter value
name (str): the parameter name
li (list): list to check if parameter is in it
Throws:
ValueError
"""
if x not in li:
m = "Value for parameter %s not in list %s is not allowed: %s"
m = m % (name, str(li), str(x))
raise ValueError(self.__class__.__name__ + ": " + m)
def check_n_jobs(self, x, name):
"""
Check n_jobs parameter
Args:
x (int/None): number of jobs
name (str): the parameter name
Throws:
ValueError
"""
if not ((x is None)
or (x is not None and isinstance(x, int) and not x == 0)):
m = "Value for parameter n_jobs is not allowed: %s" % str(x)
raise ValueError(self.__class__.__name__ + ": " + m)
class ParameterCombinationsMixin:
"""
Mixin to generate parameter combinations
"""
@classmethod
def generate_parameter_combinations(cls, dictionary, raw):
"""
Generates reasonable paramter combinations
Args:
dictionary (dict): dictionary of paramter ranges
num (int): maximum number of combinations to generate
"""
if raw:
return dictionary
keys = sorted(list(dictionary.keys()))
values = [dictionary[k] for k in keys]
combinations = [dict(zip(keys, p))
for p in list(itertools.product(*values))]
return combinations
class NoiseFilter(StatisticsMixin,
ParameterCheckingMixin,
ParameterCombinationsMixin):
"""
Parent class of noise filtering methods
"""
def __init__(self):
"""
Constructor
"""
pass
def remove_noise(self, X, y):
"""
Removes noise
Args:
X (np.array): features
y (np.array): target labels
"""
pass
def get_params(self, deep=False):
"""
Return parameters
Returns:
dict: dictionary of parameters
"""
return {}
def set_params(self, **params):
"""
Set parameters
Args:
params (dict): dictionary of parameters
"""
for key, value in params.items():
setattr(self, key, value)
return self
class OverSampling(StatisticsMixin,
ParameterCheckingMixin,
ParameterCombinationsMixin,
RandomStateMixin):
"""
Base class of oversampling methods
"""
categories = []
cat_noise_removal = 'NR'
cat_dim_reduction = 'DR'
cat_uses_classifier = 'Clas'
cat_sample_componentwise = 'SCmp'
cat_sample_ordinary = 'SO'
cat_sample_copy = 'SCpy'
cat_memetic = 'M'
cat_density_estimation = 'DE'
cat_density_based = 'DB'
cat_extensive = 'Ex'
cat_changes_majority = 'CM'
cat_uses_clustering = 'Clus'
cat_borderline = 'BL'
cat_application = 'A'
def __init__(self):
pass
def det_n_to_sample(self, strategy, n_maj, n_min):
"""
Determines the number of samples to generate
Args:
strategy (str/float): if float, the fraction of the difference
of the minority and majority numbers to
generate, like 0.1 means that 10% of the
difference will be generated if str,
like 'min2maj', the minority class will
be upsampled to match the cardinality
of the majority class
"""
if isinstance(strategy, float) or isinstance(strategy, int):
return max([0, int((n_maj - n_min)*strategy)])
else:
m = "Value %s for parameter strategy is not supported" % strategy
raise ValueError(self.__class__.__name__ + ": " + m)
def sample_between_points(self, x, y):
"""
Sample randomly along the line between two points.
Args:
x (np.array): point 1
y (np.array): point 2
Returns:
np.array: the new sample
"""
return x + (y - x)*self.random_state.random_sample()
def sample_between_points_componentwise(self, x, y, mask=None):
"""
Sample each dimension separately between the two points.
Args:
x (np.array): point 1
y (np.array): point 2
mask (np.array): array of 0,1s - specifies which dimensions
to sample
Returns:
np.array: the new sample being generated
"""
if mask is None:
return x + (y - x)*self.random_state.random_sample()
else:
return x + (y - x)*self.random_state.random_sample()*mask
def sample_by_jittering(self, x, std):
"""
Sample by jittering.
Args:
x (np.array): base point
std (float): standard deviation
Returns:
np.array: the new sample
"""
return x + (self.random_state.random_sample() - 0.5)*2.0*std
def sample_by_jittering_componentwise(self, x, std):
"""
Sample by jittering componentwise.
Args:
x (np.array): base point
std (np.array): standard deviation
Returns:
np.array: the new sample
"""
return x + (self.random_state.random_sample(len(x))-0.5)*2.0 * std
def sample_by_gaussian_jittering(self, x, std):
"""
Sample by Gaussian jittering
Args:
x (np.array): base point
std (np.array): standard deviation
Returns:
np.array: the new sample
"""
return self.random_state.normal(x, std)
def sample(self, X, y):
"""
The samplig function reimplemented in child classes
Args:
X (np.matrix): features
y (np.array): labels
Returns:
np.matrix, np.array: sampled X and y
"""
return X, y
def fit_resample(self, X, y):
"""
Alias of the function "sample" for compatibility with imbalanced-learn
pipelines
"""
return self.sample(X, y)
def sample_with_timing(self, X, y):
begin = time.time()
X_samp, y_samp = self.sample(X, y)
_logger.info(self.__class__.__name__ + ": " +
("runtime: %f" % (time.time() - begin)))
return X_samp, y_samp
def preprocessing_transform(self, X):
"""
Transforms new data according to the possible transformation
implemented by the function "sample".
Args:
X (np.matrix): features
Returns:
np.matrix: transformed features
"""
return X
def get_params(self, deep=False):
"""
Returns the parameters of the object as a dictionary.
Returns:
dict: the parameters of the object
"""
pass
def set_params(self, **params):
"""
Set parameters
Args:
params (dict): dictionary of parameters
"""
for key, value in params.items():
setattr(self, key, value)
return self
def descriptor(self):
"""
Returns:
str: JSON description of the current sampling object
"""
return str((self.__class__.__name__, str(self.get_params())))
def __str__(self):
return self.descriptor()
class FOS_1(OverSampling): #F4_SMOTE(OverSampling):
categories = [OverSampling.cat_sample_ordinary,
OverSampling.cat_extensive]
def __init__(self,
proportion=1.0,
n_neighbors=5,
n_jobs=1,
random_state=None):
super().__init__()
self.check_greater_or_equal(proportion, "proportion", 0)
self.check_greater_or_equal(n_neighbors, "n_neighbors", 1)
self.check_n_jobs(n_jobs, 'n_jobs')
self.proportion = proportion
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.set_random_state(random_state)
@classmethod
def parameter_combinations(cls, raw=False):
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.1,1.5, 2.0],
'n_neighbors': [3, 5, 7]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sample(self, X, y,prot_idx, pv_mid_pt, prot_grp, maj_min, nsamp,
pv_max,pv_min):
_logger.info(self.__class__.__name__ + ": " +
"Running sampling via %s" % self.descriptor())
self.class_label_statistics(X, y)
if not self.check_enough_min_samples_for_sampling():
return X.copy(), y.copy()
y = np.squeeze(y)
n_to_sample = nsamp
if maj_min == 0:
X_min = X[y == self.min_label]
y_min = y[y == self.min_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if prot_grp == 1:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if maj_min == 1:
X_min = X[y == self.maj_label]
y_min = y[y == self.maj_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if prot_grp == 1:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
self.min_label = np.copy(self.maj_label)
if n_to_sample == 0:
return X.copy(), y.copy()
# fitting the model
n_neigh = min([len(X_min), self.n_neighbors+1])
nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)
nn.fit(X_min)
dist, ind = nn.kneighbors(X_min)
if n_to_sample == 0:
return X.copy(), y.copy()
# generating samples
#np.random.seed(seed=1)
base_indices = self.random_state.choice(list(range(len(X_min))),
n_to_sample)
neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),
n_to_sample)
X_base = X_min[base_indices]
X_neighbor = X_min[ind[base_indices, neighbor_indices]]
samples = X_base + np.multiply(self.random_state.rand(n_to_sample,
1),
X_neighbor - X_base)
return (np.vstack([X, samples]),
np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))
def get_params(self, deep=False):
return {'proportion': self.proportion,
'n_neighbors': self.n_neighbors,
'n_jobs': self.n_jobs,
'random_state': self._random_state_init}
class FOS_2(OverSampling): #F3a_SMOTE(OverSampling):
categories = [OverSampling.cat_sample_ordinary,
OverSampling.cat_extensive]
def __init__(self,
proportion=1.0,
n_neighbors=5,
n_jobs=1,
random_state=None):
super().__init__()
self.check_greater_or_equal(proportion, "proportion", 0)
self.check_greater_or_equal(n_neighbors, "n_neighbors", 1)
self.check_n_jobs(n_jobs, 'n_jobs')
self.proportion = proportion
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.set_random_state(random_state)
@classmethod
def parameter_combinations(cls, raw=False):
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.1,1.5, 2.0],
'n_neighbors': [3, 5, 7]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sample(self, X, y,prot_idx, pv_mid_pt, prot_grp, maj_min, nsamp):
_logger.info(self.__class__.__name__ + ": " +
"Running sampling via %s" % self.descriptor())
self.class_label_statistics(X, y)
if not self.check_enough_min_samples_for_sampling():
return X.copy(), y.copy()
n_to_sample = nsamp
if maj_min == 0:
X_min = X[y == self.min_label]
y_min = y[y == self.min_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min1 = X_min[prot<pv_mid_pt]
y_min1 = y_min[prot<pv_mid_pt]
if prot_grp == 1:
X_min1 = X_min[prot>pv_mid_pt]
y_min1 = y_min[prot>pv_mid_pt]
if maj_min == 1:
X_min = X[y == self.maj_label]
y_min = y[y == self.maj_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min1 = X_min[prot<pv_mid_pt]
y_min1 = y_min[prot<pv_mid_pt]
if prot_grp == 1:
X_min1 = X_min[prot>pv_mid_pt]
y_min1 = y_min[prot>pv_mid_pt]
self.min_label = np.copy(self.maj_label)
if n_to_sample == 0:
return X.copy(), y.copy()
# fitting the model
n_neigh = min([len(X_min), self.n_neighbors+1])
nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)
nn.fit(X_min)
dist, ind = nn.kneighbors(X_min1)
if n_to_sample == 0:
return X.copy(), y.copy()
# generating samples
np.random.seed(seed=1)
base_indices = self.random_state.choice(list(range(len(X_min1))),
n_to_sample)
neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),
n_to_sample)
X_base = X_min1[base_indices]
X_neighbor = X_min[ind[base_indices, neighbor_indices]]
samples = X_base + np.multiply(self.random_state.rand(n_to_sample,
1),
X_neighbor - X_base)
return (np.vstack([X, samples]),
np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))
def get_params(self, deep=False):
return {'proportion': self.proportion,
'n_neighbors': self.n_neighbors,
'n_jobs': self.n_jobs,
'random_state': self._random_state_init}
|
[
"numpy.random.seed",
"numpy.copy",
"logging.StreamHandler",
"logging.getLogger",
"time.time",
"logging.Formatter",
"numpy.random.RandomState",
"numpy.hstack",
"sklearn.neighbors.NearestNeighbors",
"itertools.product",
"numpy.squeeze",
"numpy.vstack",
"numpy.unique"
] |
[((260, 295), 'logging.getLogger', 'logging.getLogger', (['"""smote_variants"""'], {}), "('smote_variants')\n", (277, 295), False, 'import logging\n'), ((343, 366), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (364, 366), False, 'import logging\n'), ((392, 450), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s:%(message)s"""'], {}), "('%(asctime)s:%(levelname)s:%(message)s')\n", (409, 450), False, 'import logging\n'), ((539, 574), 'numpy.unique', 'np.unique', (['data'], {'return_counts': '(True)'}), '(data, return_counts=True)\n', (548, 574), True, 'import numpy as np\n'), ((1017, 1049), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (1026, 1049), True, 'import numpy as np\n'), ((16779, 16790), 'time.time', 'time.time', ([], {}), '()\n', (16788, 16790), False, 'import time\n'), ((19515, 19528), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (19525, 19528), True, 'import numpy as np\n'), ((20727, 20784), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neigh', 'n_jobs': 'self.n_jobs'}), '(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n', (20743, 20784), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((24637, 24694), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neigh', 'n_jobs': 'self.n_jobs'}), '(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n', (24653, 24694), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((24891, 24913), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (24905, 24913), True, 'import numpy as np\n'), ((20521, 20544), 'numpy.copy', 'np.copy', (['self.maj_label'], {}), '(self.maj_label)\n', (20528, 20544), True, 'import numpy as np\n'), ((21622, 21645), 'numpy.vstack', 'np.vstack', (['[X, samples]'], {}), '([X, samples])\n', (21631, 21645), True, 'import numpy as np\n'), ((24403, 24426), 'numpy.copy', 'np.copy', (['self.maj_label'], {}), '(self.maj_label)\n', (24410, 24426), True, 'import numpy as np\n'), ((25554, 25577), 'numpy.vstack', 'np.vstack', (['[X, samples]'], {}), '([X, samples])\n', (25563, 25577), True, 'import numpy as np\n'), ((2341, 2376), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (2362, 2376), True, 'import numpy as np\n'), ((11605, 11631), 'itertools.product', 'itertools.product', (['*values'], {}), '(*values)\n', (11622, 11631), False, 'import itertools\n'), ((21678, 21719), 'numpy.hstack', 'np.hstack', (['([self.min_label] * n_to_sample)'], {}), '([self.min_label] * n_to_sample)\n', (21687, 21719), True, 'import numpy as np\n'), ((25610, 25651), 'numpy.hstack', 'np.hstack', (['([self.min_label] * n_to_sample)'], {}), '([self.min_label] * n_to_sample)\n', (25619, 25651), True, 'import numpy as np\n'), ((16930, 16941), 'time.time', 'time.time', ([], {}), '()\n', (16939, 16941), False, 'import time\n')]
|
"""
"""
# IMPORT modules. Must have unittest, and probably coast.
import coast
from coast import general_utils
import unittest
import numpy as np
import os.path as path
import xarray as xr
import matplotlib.pyplot as plt
import unit_test_files as files
class test_transect_methods(unittest.TestCase):
def test_determine_extract_transect_indices(self):
nemo_t = coast.Gridded(files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid)
yt, xt, length_of_line = nemo_t.transect_indices([51, -5], [49, -9])
# Test transect indices
yt_ref = [
164,
163,
162,
162,
161,
160,
159,
158,
157,
156,
156,
155,
154,
153,
152,
152,
151,
150,
149,
148,
147,
146,
146,
145,
144,
143,
142,
142,
141,
140,
139,
138,
137,
136,
136,
135,
134,
]
xt_ref = [
134,
133,
132,
131,
130,
129,
128,
127,
126,
125,
124,
123,
122,
121,
120,
119,
118,
117,
116,
115,
114,
113,
112,
111,
110,
109,
108,
107,
106,
105,
104,
103,
102,
101,
100,
99,
98,
]
length_ref = 37
check1 = xt == xt_ref
check2 = yt == yt_ref
check3 = length_of_line == length_ref
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
def test_calculate_transport_velocity_and_depth(self):
with self.subTest("Calculate_transports and velocties and depth"):
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
nemo_u = coast.Gridded(
fn_data=files.fn_nemo_grid_u_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_u_grid
)
nemo_v = coast.Gridded(
fn_data=files.fn_nemo_grid_v_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_v_grid
)
nemo_f = coast.Gridded(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
tran_f.calc_flow_across_transect(nemo_u, nemo_v)
cksum1 = tran_f.data_cross_tran_flow.normal_velocities.sum(dim=("t_dim", "z_dim", "r_dim")).item()
cksum2 = tran_f.data_cross_tran_flow.normal_transports.sum(dim=("t_dim", "r_dim")).item()
check1 = np.isclose(cksum1, -253.6484375)
check2 = np.isclose(cksum2, -48.67562136873888)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
with self.subTest("plot_transect_on_map"):
fig, ax = tran_f.plot_transect_on_map()
ax.set_xlim([-20, 0]) # Problem: nice to make the land appear.
ax.set_ylim([45, 65]) # But can not call plt.show() before adjustments are made...
# fig.tight_layout()
fig.savefig(files.dn_fig + "transect_map.png")
plt.close("all")
with self.subTest("plot_normal_velocity"):
plot_dict = {"fig_size": (5, 3), "title": "Normal velocities"}
fig, ax = tran_f.plot_normal_velocity(time=0, cmap="seismic", plot_info=plot_dict, smoothing_window=2)
fig.tight_layout()
fig.savefig(files.dn_fig + "transect_velocities.png")
plt.close("all")
with self.subTest("plot_depth_integrated_transport"):
plot_dict = {"fig_size": (5, 3), "title": "Transport across AB"}
fig, ax = tran_f.plot_depth_integrated_transport(time=0, plot_info=plot_dict, smoothing_window=2)
fig.tight_layout()
fig.savefig(files.dn_fig + "transect_transport.png")
plt.close("all")
def test_transect_density_and_pressure(self):
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
tran_t = coast.TransectT(nemo_t, (54, -15), (56, -12))
tran_t.construct_pressure()
cksum1 = tran_t.data.density_zlevels.sum(dim=["t_dim", "r_dim", "depth_z_levels"]).compute().item()
cksum2 = tran_t.data.pressure_h_zlevels.sum(dim=["t_dim", "r_dim", "depth_z_levels"]).compute().item()
cksum3 = tran_t.data.pressure_s.sum(dim=["t_dim", "r_dim"]).compute().item()
check1 = np.isclose(cksum1, 23800545.87457855)
check2 = np.isclose(cksum2, 135536478.93335825)
check3 = np.isclose(cksum3, -285918.5625)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
def test_cross_transect_geostrophic_flow(self):
nemo_f = coast.Gridded(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
tran_f.calc_geostrophic_flow(nemo_t, config_u=files.fn_config_u_grid, config_v=files.fn_config_v_grid)
cksum1 = tran_f.data_cross_tran_flow.normal_velocity_hpg.sum(dim=("t_dim", "depth_z_levels", "r_dim")).item()
cksum2 = tran_f.data_cross_tran_flow.normal_velocity_spg.sum(dim=("t_dim", "r_dim")).item()
cksum3 = tran_f.data_cross_tran_flow.normal_transport_hpg.sum(dim=("t_dim", "r_dim")).item()
cksum4 = tran_f.data_cross_tran_flow.normal_transport_spg.sum(dim=("t_dim", "r_dim")).item()
check1 = np.isclose(cksum1, 84.8632969783)
check2 = np.isclose(cksum2, -5.09718418121)
check3 = np.isclose(cksum3, 115.2587369660)
check4 = np.isclose(cksum4, -106.7897376093)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
self.assertTrue(check4, msg="check4")
|
[
"coast.TransectT",
"matplotlib.pyplot.close",
"coast.TransectF",
"numpy.isclose",
"coast.Gridded"
] |
[((377, 481), 'coast.Gridded', 'coast.Gridded', (['files.fn_nemo_grid_t_dat'], {'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config\n =files.fn_config_t_grid)\n', (390, 481), False, 'import coast\n'), ((4641, 4752), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_t_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_t_grid)\n', (4654, 4752), False, 'import coast\n'), ((4788, 4833), 'coast.TransectT', 'coast.TransectT', (['nemo_t', '(54, -15)', '(56, -12)'], {}), '(nemo_t, (54, -15), (56, -12))\n', (4803, 4833), False, 'import coast\n'), ((5191, 5228), 'numpy.isclose', 'np.isclose', (['cksum1', '(23800545.87457855)'], {}), '(cksum1, 23800545.87457855)\n', (5201, 5228), True, 'import numpy as np\n'), ((5246, 5284), 'numpy.isclose', 'np.isclose', (['cksum2', '(135536478.93335825)'], {}), '(cksum2, 135536478.93335825)\n', (5256, 5284), True, 'import numpy as np\n'), ((5302, 5334), 'numpy.isclose', 'np.isclose', (['cksum3', '(-285918.5625)'], {}), '(cksum3, -285918.5625)\n', (5312, 5334), True, 'import numpy as np\n'), ((5543, 5616), 'coast.Gridded', 'coast.Gridded', ([], {'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_f_grid'}), '(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)\n', (5556, 5616), False, 'import coast\n'), ((5634, 5679), 'coast.TransectF', 'coast.TransectF', (['nemo_f', '(54, -15)', '(56, -12)'], {}), '(nemo_f, (54, -15), (56, -12))\n', (5649, 5679), False, 'import coast\n'), ((5697, 5808), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_t_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_t_grid)\n', (5710, 5808), False, 'import coast\n'), ((6376, 6409), 'numpy.isclose', 'np.isclose', (['cksum1', '(84.8632969783)'], {}), '(cksum1, 84.8632969783)\n', (6386, 6409), True, 'import numpy as np\n'), ((6427, 6461), 'numpy.isclose', 'np.isclose', (['cksum2', '(-5.09718418121)'], {}), '(cksum2, -5.09718418121)\n', (6437, 6461), True, 'import numpy as np\n'), ((6479, 6512), 'numpy.isclose', 'np.isclose', (['cksum3', '(115.258736966)'], {}), '(cksum3, 115.258736966)\n', (6489, 6512), True, 'import numpy as np\n'), ((6531, 6566), 'numpy.isclose', 'np.isclose', (['cksum4', '(-106.7897376093)'], {}), '(cksum4, -106.7897376093)\n', (6541, 6566), True, 'import numpy as np\n'), ((2326, 2437), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_t_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_t_grid'}), '(fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_t_grid)\n', (2339, 2437), False, 'import coast\n'), ((2485, 2596), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_u_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_u_grid'}), '(fn_data=files.fn_nemo_grid_u_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_u_grid)\n', (2498, 2596), False, 'import coast\n'), ((2644, 2755), 'coast.Gridded', 'coast.Gridded', ([], {'fn_data': 'files.fn_nemo_grid_v_dat', 'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_v_grid'}), '(fn_data=files.fn_nemo_grid_v_dat, fn_domain=files.fn_nemo_dom,\n config=files.fn_config_v_grid)\n', (2657, 2755), False, 'import coast\n'), ((2803, 2876), 'coast.Gridded', 'coast.Gridded', ([], {'fn_domain': 'files.fn_nemo_dom', 'config': 'files.fn_config_f_grid'}), '(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)\n', (2816, 2876), False, 'import coast\n'), ((2899, 2944), 'coast.TransectF', 'coast.TransectF', (['nemo_f', '(54, -15)', '(56, -12)'], {}), '(nemo_f, (54, -15), (56, -12))\n', (2914, 2944), False, 'import coast\n'), ((3240, 3272), 'numpy.isclose', 'np.isclose', (['cksum1', '(-253.6484375)'], {}), '(cksum1, -253.6484375)\n', (3250, 3272), True, 'import numpy as np\n'), ((3294, 3332), 'numpy.isclose', 'np.isclose', (['cksum2', '(-48.67562136873888)'], {}), '(cksum2, -48.67562136873888)\n', (3304, 3332), True, 'import numpy as np\n'), ((3813, 3829), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3822, 3829), True, 'import matplotlib.pyplot as plt\n'), ((4181, 4197), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4190, 4197), True, 'import matplotlib.pyplot as plt\n'), ((4556, 4572), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4565, 4572), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from csv import reader
from decimal import *
def SDM(datalist):
"""
้ๅทฎๆณ
:param datalist:
:return:
"""
length = len(datalist)
resultlist = []
halfLen = int(length/2)
for i in range(0, halfLen):
resultlist.append((Decimal(datalist[i+halfLen]) - Decimal(datalist[i])).to_eng_string())
return resultlist
class DataReader:
def __init__(self, filename):
self.filename = filename
with open(filename, 'rt', encoding='UTF-8') as raw_data:
readers = reader(raw_data, delimiter=',')
overx = list(readers)
data = np.array(overx)
self.data = data
self.resultVar = data[0][1]
self.resultUnit = data[0][3]
self.function = data[1][1]
self.P = float(data[0][5])
if data[1][3] == 'Y':
self.flag = True
elif data[1][3] == 'N':
self.flag = False
else:
raise IOError('Y or N wanted, not ' + data[1][3])
experimentdata = data[4:len(data)]
tempvarlist = []
tempunitlist = []
tempdatalist = []
tempUblist = []
tempSDMflag = []
tempUbFunclist = []
tempfunctionlist = []
for item in experimentdata:
tempvarlist.append(item[0])
tempunitlist.append(item[1])
tempUbFunclist.append(item[2])
temptempdata = []
for j in range(3, len(item)):
if j == 3:
tempUblist.append(item[j])
elif j == 4:
tempSDMflag.append(item[j])
elif j == 5:
tempfunctionlist.append(item[j])
else:
if not item[j] == '':
temptempdata.append(item[j])
tempdatalist.append(temptempdata)
self.varList = tempvarlist
self.unitList = tempunitlist
self.UbList = tempUblist
self.UbFuncList = tempUbFunclist
self.SDMflagList = tempSDMflag
self.TempFunctionList = tempfunctionlist
for i in range(0, len(tempSDMflag)):
if tempSDMflag[i] == 'Y':
tempdatalist[i] = SDM(tempdatalist[i])
self.dataList = tempdatalist
|
[
"numpy.array",
"csv.reader"
] |
[((566, 597), 'csv.reader', 'reader', (['raw_data'], {'delimiter': '""","""'}), "(raw_data, delimiter=',')\n", (572, 597), False, 'from csv import reader\n'), ((653, 668), 'numpy.array', 'np.array', (['overx'], {}), '(overx)\n', (661, 668), True, 'import numpy as np\n')]
|
import pretty_midi
import numpy as np
'''
Note class: represent note, including:
1. the note pitch
2. the note duration
3. downbeat
4. intensity of note sound
'''
class Note:
def __init__(self):
self.pitch = 0
self.length = 0
self.downbeat = False
self.force = 0
'''
Midi2Numpy: tool to convert midi file to numpy list of Note
input_path: the path of the input midi file
track_index: the index of the melody track of midi
output_path: the path to save the numpy array
'''
def Midi2Numpy(input_path, output_path, track_index):
midi_data = pretty_midi.PrettyMIDI(input_path)
notes = midi_data.instruments[track_index].notes
downbeats = midi_data.get_downbeats()
dataset = []
for n in notes:
note = Note()
for i in downbeats:
'''
the downbeat locates in this note's duration
we see the note as downbeat
'''
if n.start <= i < n.end:
note.downbeat = True
note.pitch = n.pitch
note.length = n.end - n.start
note.force = n.velocity
dataset.append(note)
np.save(output_path, dataset)
path = 'plag/23_ma este meg.mid'
test = pretty_midi.PrettyMIDI()
midi_data = pretty_midi.PrettyMIDI(path)
# decide the track index
track_index = 0
notes = midi_data.instruments[track_index]
test.instruments.append(notes)
test.write('test.mid')
test.write("newdata" + path[4:])
|
[
"numpy.save",
"pretty_midi.PrettyMIDI"
] |
[((1240, 1264), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', ([], {}), '()\n', (1262, 1264), False, 'import pretty_midi\n'), ((1277, 1305), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['path'], {}), '(path)\n', (1299, 1305), False, 'import pretty_midi\n'), ((613, 647), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['input_path'], {}), '(input_path)\n', (635, 647), False, 'import pretty_midi\n'), ((1167, 1196), 'numpy.save', 'np.save', (['output_path', 'dataset'], {}), '(output_path, dataset)\n', (1174, 1196), True, 'import numpy as np\n')]
|
"""Utility functions and classes for visualization and logging."""
import os
from datetime import datetime
import cv2
import imageio
import numpy as np
from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
transport_wrapper,
)
class LoggerVisualizer:
def __init__(self, exp_name="", log_dir=""):
if log_dir == "":
log_dir = self.__class__.__name__
if exp_name == "":
exp_name = "NoNameExp"
now = datetime.now()
self.exp_name = exp_name
log_dir = os.path.join(
"experiment_output/visualizations",
exp_name,
log_dir + "_" + now.strftime("%m_%d_%Y_%H_%M_%S_%f"),
)
self.log_dir = log_dir
os.makedirs(self.log_dir, exist_ok=True)
self.log_queue = []
self.action_queue = []
self.logger_index = 0
def log(self, environment, action_str):
raise Exception("Not Implemented")
def is_empty(self):
return len(self.log_queue) == 0
def finish_episode_metrics(self, episode_info, task_info, metric_results):
pass
def finish_episode(self, environment, episode_info, task_info):
pass
class TestMetricLogger(LoggerVisualizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.total_metric_dict = {}
log_file_name = os.path.join(
self.log_dir, "test_metric_{}.txt".format(self.exp_name)
)
self.metric_log_file = open(log_file_name, "w")
def average_dict(self):
result = {}
for (k, v) in self.total_metric_dict.items():
result[k] = sum(v) / len(v)
return result
def finish_episode_metrics(self, episode_info, task_info, metric_results=None):
if metric_results is None:
print("had to reset")
self.log_queue = []
self.action_queue = []
return
for k in metric_results.keys():
if "metric" in k or k in ["ep_length", "reward", "success"]:
self.total_metric_dict.setdefault(k, [])
self.total_metric_dict[k].append(metric_results[k])
print(
"total",
len(self.total_metric_dict["success"]),
"average test metric",
self.average_dict(),
)
# save the task info and all the action queue and results
log_dict = {
"task_info_metrics": metric_results,
"action_sequence": self.action_queue,
"logger_number": self.logger_index,
}
self.logger_index += 1
self.metric_log_file.write(str(log_dict))
self.metric_log_file.write("\n")
print("Logging to", self.metric_log_file.name)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
# We can add agent arm and state location if needed
self.action_queue.append(action_str)
self.log_queue.append(action_str)
class BringObjImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%m_%d_%Y_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
source_object_id = task_info["source_object_id"]
goal_object_id = task_info["goal_object_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_from_"
+ source_object_id.split("|")[0]
+ "_to_"
+ goal_object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
this_controller = environment.controller
scene = this_controller.last_event.metadata["sceneName"]
reset_environment_and_additional_commands(this_controller, scene)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
# We should not reset here
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
# Saving the mask
target_object_id = task_info["object_id"]
all_visible_masks = this_controller.last_event.instance_masks
if target_object_id in all_visible_masks:
mask_frame = all_visible_masks[target_object_id]
else:
mask_frame = np.zeros(env.controller.last_event.frame[:, :, 0].shape)
mask_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + "_mask.png"
)
cv2.imwrite(mask_dir, mask_frame.astype(float) * 255.0)
class ImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%m_%d_%Y_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
object_id = task_info["objectId"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_obj_"
+ object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
scene = this_controller.last_event.metadata[
"sceneName"
] # maybe we need to reset env actually]
reset_environment_and_additional_commands(this_controller, scene)
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
def save_image_list_to_gif(image_list, gif_name, gif_dir):
gif_adr = os.path.join(gif_dir, gif_name)
seq_len, cols, w, h, c = image_list.shape
pallet = np.zeros((seq_len, w, h * cols, c))
for col_ind in range(cols):
pallet[:, :, col_ind * h : (col_ind + 1) * h, :] = image_list[:, col_ind]
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
imageio.mimsave(gif_adr, pallet.astype(np.uint8), format="GIF", duration=1 / 5)
print("Saved result in ", gif_adr)
|
[
"numpy.stack",
"allenact_plugins.manipulathor_plugin.manipulathor_utils.transport_wrapper",
"os.makedirs",
"cv2.imwrite",
"numpy.zeros",
"os.path.exists",
"datetime.datetime.now",
"allenact_plugins.manipulathor_plugin.manipulathor_utils.initialize_arm",
"allenact_plugins.manipulathor_plugin.manipulathor_utils.reset_environment_and_additional_commands",
"os.path.join"
] |
[((11803, 11834), 'os.path.join', 'os.path.join', (['gif_dir', 'gif_name'], {}), '(gif_dir, gif_name)\n', (11815, 11834), False, 'import os\n'), ((11896, 11931), 'numpy.zeros', 'np.zeros', (['(seq_len, w, h * cols, c)'], {}), '((seq_len, w, h * cols, c))\n', (11904, 11931), True, 'import numpy as np\n'), ((602, 616), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (614, 616), False, 'from datetime import datetime\n'), ((867, 907), 'os.makedirs', 'os.makedirs', (['self.log_dir'], {'exist_ok': '(True)'}), '(self.log_dir, exist_ok=True)\n', (878, 907), False, 'import os\n'), ((3265, 3279), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3277, 3279), False, 'from datetime import datetime\n'), ((4723, 4788), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.reset_environment_and_additional_commands', 'reset_environment_and_additional_commands', (['this_controller', 'scene'], {}), '(this_controller, scene)\n', (4764, 4788), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((5838, 5869), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.initialize_arm', 'initialize_arm', (['this_controller'], {}), '(this_controller)\n', (5852, 5869), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm\n'), ((6150, 6212), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.transport_wrapper', 'transport_wrapper', (['this_controller', 'object_id', 'object_location'], {}), '(this_controller, object_id, object_location)\n', (6167, 6212), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((7192, 7245), 'cv2.imwrite', 'cv2.imwrite', (['image_dir', 'image_tensor[:, :, [2, 1, 0]]'], {}), '(image_dir, image_tensor[:, :, [2, 1, 0]])\n', (7203, 7245), False, 'import cv2\n'), ((7909, 7923), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7921, 7923), False, 'from datetime import datetime\n'), ((10126, 10191), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.reset_environment_and_additional_commands', 'reset_environment_and_additional_commands', (['this_controller', 'scene'], {}), '(this_controller, scene)\n', (10167, 10191), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((10320, 10351), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.initialize_arm', 'initialize_arm', (['this_controller'], {}), '(this_controller)\n', (10334, 10351), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm\n'), ((10632, 10694), 'allenact_plugins.manipulathor_plugin.manipulathor_utils.transport_wrapper', 'transport_wrapper', (['this_controller', 'object_id', 'object_location'], {}), '(this_controller, object_id, object_location)\n', (10649, 10694), False, 'from allenact_plugins.manipulathor_plugin.manipulathor_utils import reset_environment_and_additional_commands, transport_wrapper\n'), ((11674, 11727), 'cv2.imwrite', 'cv2.imwrite', (['image_dir', 'image_tensor[:, :, [2, 1, 0]]'], {}), '(image_dir, image_tensor[:, :, [2, 1, 0]])\n', (11685, 11727), False, 'import cv2\n'), ((12059, 12082), 'os.path.exists', 'os.path.exists', (['gif_dir'], {}), '(gif_dir)\n', (12073, 12082), False, 'import os\n'), ((12092, 12112), 'os.makedirs', 'os.makedirs', (['gif_dir'], {}), '(gif_dir)\n', (12103, 12112), False, 'import os\n'), ((4485, 4517), 'numpy.stack', 'np.stack', (['self.log_queue'], {'axis': '(0)'}), '(self.log_queue, axis=0)\n', (4493, 4517), True, 'import numpy as np\n'), ((7543, 7599), 'numpy.zeros', 'np.zeros', (['env.controller.last_event.frame[:, :, 0].shape'], {}), '(env.controller.last_event.frame[:, :, 0].shape)\n', (7551, 7599), True, 'import numpy as np\n'), ((8988, 9020), 'numpy.stack', 'np.stack', (['self.log_queue'], {'axis': '(0)'}), '(self.log_queue, axis=0)\n', (8996, 9020), True, 'import numpy as np\n'), ((4935, 4976), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (4947, 4976), False, 'import os\n'), ((5133, 5174), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (5145, 5174), False, 'import os\n'), ((9251, 9292), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (9263, 9292), False, 'import os\n'), ((9449, 9490), 'os.path.join', 'os.path.join', (['self.log_dir', 'time_to_write'], {}), '(self.log_dir, time_to_write)\n', (9461, 9490), False, 'import os\n')]
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import sys
import time
# execution) python gcn_logP.py 3 64 256 0.001 gsc
# Default option
num_layer = 3
hidden_dim1 = 64
hidden_dim2 = 256
init_lr = 0.001
using_sc = 'gsc' # 'sc, 'gsc, 'no'
if( len(sys.argv) == 6 ):
# Note that sys.argv[0] is gcn_logP.py
num_layer = int(sys.argv[1])
hidden_dim1 = int(sys.argv[2])
hidden_dim2 = int(sys.argv[3])
init_lr = float(sys.argv[4])
using_sc = sys.argv[5] # 'sc, 'gsc, 'no'
model_name = 'gcn_logP_' + str(num_layer) + '_' + str(hidden_dim1) + '_' + str(hidden_dim2) + '_' + str(init_lr) + '_' + using_sc
#1. Prepare data - X : fingerprint, Y : logP
# and split to (training:validation:test) set
smi_total, logP_total, tpsa_total = read_ZINC_smiles(50000)
num_train = 30000
num_validation = 10000
num_test = 10000
smi_train = smi_total[0:num_train]
logP_train = logP_total[0:num_train]
smi_validation = smi_total[num_train:(num_train+num_validation)]
logP_validation = logP_total[num_train:(num_train+num_validation)]
smi_test = smi_total[(num_train+num_validation):]
logP_test = logP_total[(num_train+num_validation):]
#2. Construct a neural network
def skip_connection(input_X, new_X, act):
# Skip-connection, H^(l+1)_sc = H^(l) + H^(l+1)
inp_dim = int(input_X.get_shape()[2])
out_dim = int(new_X.get_shape()[2])
if(inp_dim != out_dim):
output_X = act(new_X + tf.layers.dense(input_X, units=out_dim, use_bias=False))
else:
output_X = act(new_X + input_X)
return output_X
def gated_skip_connection(input_X, new_X, act):
# Skip-connection, H^(l+1)_gsc = z*H^(l) + (1-z)*H^(l+1)
inp_dim = int(input_X.get_shape()[2])
out_dim = int(new_X.get_shape()[2])
def get_gate_coefficient(input_X, new_X, out_dim):
X1 = tf.layers.dense(input_X, units=out_dim, use_bias=True)
X2 = tf.layers.dense(new_X, units=out_dim, use_bias=True)
gate_coefficient = tf.nn.sigmoid(X1 + X2)
return gate_coefficient
if(inp_dim != out_dim):
input_X = tf.layers.dense(input_X, units=out_dim, use_bias=False)
gate_coefficient = get_gate_coefficient(input_X, new_X, out_dim)
output_X = tf.multiply(new_X, gate_coefficient) + tf.multiply(input_X, 1.0-gate_coefficient)
return output_X
def graph_convolution(input_X, input_A, hidden_dim, act, using_sc):
# Graph Convolution, H^(l+1) = A{H^(l)W^(l)+b^(l))
output_X = tf.layers.dense(input_X,
units=hidden_dim,
use_bias=True,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output_X = tf.matmul(input_A, output_X)
if( using_sc == 'sc' ):
output_X = skip_connection(input_X, output_X, act)
elif( using_sc == 'gsc' ):
output_X = gated_skip_connection(input_X, output_X, act)
elif( using_sc == 'no' ):
output_X = act(output_X)
else:
output_X = gated_skip_connection(input_X, output_X)
return output_X
# Readout
def readout(input_X, hidden_dim, act):
# Readout, Z = sum_{v in G} NN(H^(L)_v)
output_Z = tf.layers.dense(input_X,
units=hidden_dim,
use_bias=True,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output_Z = tf.reduce_sum(output_Z, axis=1)
output = act(output_Z)
return output_Z
num_atoms=50
num_features=58
X = tf.placeholder(tf.float64, shape=[None, num_atoms, num_features])
A = tf.placeholder(tf.float64, shape=[None, num_atoms, num_atoms])
Y = tf.placeholder(tf.float64, shape=[None, ])
is_training = tf.placeholder(tf.bool, shape=())
h = X
# Graph convolution layers
for i in range(num_layer):
h = graph_convolution(h,
A,
hidden_dim1,
tf.nn.relu,
using_sc)
# Readout layer
h = readout(h, hidden_dim2, tf.nn.sigmoid)
# Predictor composed of MLPs(multi-layer perceptron)
h = tf.layers.dense(h,
units=hidden_dim2,
use_bias=True,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
h = tf.layers.dense(h,
units=hidden_dim2,
use_bias=True,
activation=tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
Y_pred = tf.layers.dense(h,
units=1,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer())
#3. Set a loss function, in this case we will use a MSE-loss (l2-norm)
Y_pred = tf.reshape(Y_pred, shape=[-1,])
Y_pred = tf.cast(Y_pred, tf.float64)
Y = tf.cast(Y, tf.float64)
loss = tf.reduce_mean( (Y_pred - Y)**2 )
#4. Set an optimizer
lr = tf.Variable(0.0, trainable = False) # learning rate
opt = tf.train.AdamOptimizer(lr).minimize(loss) # Note that we use the Adam optimizer in this practice.
#opt = tf.train.GradientDescentOptimizer(lr).minimize(loss)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
#5. Training & validation
batch_size = 100
epoch_size = 100
decay_rate = 0.95
batch_train = int(num_train/batch_size)
batch_validation = int(num_validation/batch_size)
batch_test = int(num_test/batch_size)
total_iter = 0
total_time = 0.0
for t in range(epoch_size):
pred_train = []
sess.run(tf.assign( lr, init_lr*( decay_rate**t ) ))
st = time.time()
for i in range(batch_train):
total_iter += 1
smi_batch = smi_train[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_train[i*batch_size:(i+1)*batch_size]
_opt, _Y, _loss = sess.run([opt, Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : True})
pred_train.append(_Y.flatten())
#print("Epoch :", t, "\t batch:", i, "Loss :", _loss, "\t Training")
pred_train = np.concatenate(pred_train, axis=0)
error = (logP_train-pred_train)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
print ("MAE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Training, \t Epoch :", t)
pred_validation = []
for i in range(batch_validation):
smi_batch = smi_validation[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_validation[i*batch_size:(i+1)*batch_size]
_Y, _loss = sess.run([Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : False})
#print("Epoch :", t, "\t batch:", i, "Loss :", _loss, "\t validation")
pred_validation.append(_Y.flatten())
pred_validation = np.concatenate(pred_validation, axis=0)
error = (logP_validation-pred_validation)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
et = time.time()
print ("MAE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Validation, \t Epoch :", t, "\t Time per epoch", (et-st))
total_time += (et-st)
### save model
ckpt_path = 'save/'+model_name+'.ckpt'
saver.save(sess, ckpt_path, global_step=total_iter)
#6. Test
pred_test = []
for i in range(batch_test):
smi_batch = smi_test[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_test[i*batch_size:(i+1)*batch_size]
_Y, _loss = sess.run([Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : False})
pred_test.append(_Y.flatten())
pred_test = np.concatenate(pred_test, axis=0)
error = (logP_test-pred_test)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
print ("MSE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Test", "\t Total time :", total_time)
plt.figure()
plt.scatter(logP_test, pred_test, s=3)
plt.xlabel('logP - Truth', fontsize=15)
plt.ylabel('logP - Prediction', fontsize=15)
x = np.arange(-4,6)
plt.plot(x,x,c='black')
plt.tight_layout()
plt.savefig('./figures/'+model_name+'_results.png')
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reduce_sum",
"utils.read_ZINC_smiles",
"numpy.abs",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"matplotlib.pyplot.figure",
"tensorflow.Variable",
"numpy.arange",
"numpy.mean",
"tensorflow.multiply",
"tensorflow.assign",
"matplotlib.pyplot.tight_layout",
"numpy.std",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"matplotlib.pyplot.ylabel",
"utils.convert_to_graph",
"numpy.concatenate",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"tensorflow.layers.dense",
"time.time",
"tensorflow.nn.sigmoid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((75, 100), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (93, 100), True, 'import matplotlib.pyplot as plt\n'), ((955, 978), 'utils.read_ZINC_smiles', 'read_ZINC_smiles', (['(50000)'], {}), '(50000)\n', (971, 978), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((3789, 3854), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, num_atoms, num_features]'}), '(tf.float64, shape=[None, num_atoms, num_features])\n', (3803, 3854), True, 'import tensorflow as tf\n'), ((3859, 3921), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, num_atoms, num_atoms]'}), '(tf.float64, shape=[None, num_atoms, num_atoms])\n', (3873, 3921), True, 'import tensorflow as tf\n'), ((3926, 3966), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None]'}), '(tf.float64, shape=[None])\n', (3940, 3966), True, 'import tensorflow as tf\n'), ((3983, 4016), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '()'}), '(tf.bool, shape=())\n', (3997, 4016), True, 'import tensorflow as tf\n'), ((5081, 5111), 'tensorflow.reshape', 'tf.reshape', (['Y_pred'], {'shape': '[-1]'}), '(Y_pred, shape=[-1])\n', (5091, 5111), True, 'import tensorflow as tf\n'), ((5122, 5149), 'tensorflow.cast', 'tf.cast', (['Y_pred', 'tf.float64'], {}), '(Y_pred, tf.float64)\n', (5129, 5149), True, 'import tensorflow as tf\n'), ((5154, 5176), 'tensorflow.cast', 'tf.cast', (['Y', 'tf.float64'], {}), '(Y, tf.float64)\n', (5161, 5176), True, 'import tensorflow as tf\n'), ((5184, 5217), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((Y_pred - Y) ** 2)'], {}), '((Y_pred - Y) ** 2)\n', (5198, 5217), True, 'import tensorflow as tf\n'), ((5245, 5278), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (5256, 5278), True, 'import tensorflow as tf\n'), ((5469, 5481), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5479, 5481), True, 'import tensorflow as tf\n'), ((5489, 5522), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5520, 5522), True, 'import tensorflow as tf\n'), ((5546, 5562), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5560, 5562), True, 'import tensorflow as tf\n'), ((8042, 8075), 'numpy.concatenate', 'np.concatenate', (['pred_test'], {'axis': '(0)'}), '(pred_test, axis=0)\n', (8056, 8075), True, 'import numpy as np\n'), ((8176, 8189), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (8182, 8189), True, 'import numpy as np\n'), ((8286, 8298), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8296, 8298), True, 'import matplotlib.pyplot as plt\n'), ((8299, 8337), 'matplotlib.pyplot.scatter', 'plt.scatter', (['logP_test', 'pred_test'], {'s': '(3)'}), '(logP_test, pred_test, s=3)\n', (8310, 8337), True, 'import matplotlib.pyplot as plt\n'), ((8338, 8377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""logP - Truth"""'], {'fontsize': '(15)'}), "('logP - Truth', fontsize=15)\n", (8348, 8377), True, 'import matplotlib.pyplot as plt\n'), ((8378, 8422), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""logP - Prediction"""'], {'fontsize': '(15)'}), "('logP - Prediction', fontsize=15)\n", (8388, 8422), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8443), 'numpy.arange', 'np.arange', (['(-4)', '(6)'], {}), '(-4, 6)\n', (8436, 8443), True, 'import numpy as np\n'), ((8443, 8468), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'c': '"""black"""'}), "(x, x, c='black')\n", (8451, 8468), True, 'import matplotlib.pyplot as plt\n'), ((8467, 8485), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8483, 8485), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8541), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./figures/' + model_name + '_results.png')"], {}), "('./figures/' + model_name + '_results.png')\n", (8497, 8541), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2948), 'tensorflow.matmul', 'tf.matmul', (['input_A', 'output_X'], {}), '(input_A, output_X)\n', (2929, 2948), True, 'import tensorflow as tf\n'), ((3674, 3705), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['output_Z'], {'axis': '(1)'}), '(output_Z, axis=1)\n', (3687, 3705), True, 'import tensorflow as tf\n'), ((5918, 5929), 'time.time', 'time.time', ([], {}), '()\n', (5927, 5929), False, 'import time\n'), ((6426, 6460), 'numpy.concatenate', 'np.concatenate', (['pred_train'], {'axis': '(0)'}), '(pred_train, axis=0)\n', (6440, 6460), True, 'import numpy as np\n'), ((6579, 6592), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (6585, 6592), True, 'import numpy as np\n'), ((7195, 7234), 'numpy.concatenate', 'np.concatenate', (['pred_validation'], {'axis': '(0)'}), '(pred_validation, axis=0)\n', (7209, 7234), True, 'import numpy as np\n'), ((7363, 7376), 'numpy.std', 'np.std', (['error'], {}), '(error)\n', (7369, 7376), True, 'import numpy as np\n'), ((7387, 7398), 'time.time', 'time.time', ([], {}), '()\n', (7396, 7398), False, 'import time\n'), ((7796, 7823), 'utils.convert_to_graph', 'convert_to_graph', (['smi_batch'], {}), '(smi_batch)\n', (7812, 7823), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((8120, 8133), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (8126, 8133), True, 'import numpy as np\n'), ((8150, 8169), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (8157, 8169), True, 'import numpy as np\n'), ((2012, 2066), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_X'], {'units': 'out_dim', 'use_bias': '(True)'}), '(input_X, units=out_dim, use_bias=True)\n', (2027, 2066), True, 'import tensorflow as tf\n'), ((2080, 2132), 'tensorflow.layers.dense', 'tf.layers.dense', (['new_X'], {'units': 'out_dim', 'use_bias': '(True)'}), '(new_X, units=out_dim, use_bias=True)\n', (2095, 2132), True, 'import tensorflow as tf\n'), ((2160, 2182), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(X1 + X2)'], {}), '(X1 + X2)\n', (2173, 2182), True, 'import tensorflow as tf\n'), ((2263, 2318), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_X'], {'units': 'out_dim', 'use_bias': '(False)'}), '(input_X, units=out_dim, use_bias=False)\n', (2278, 2318), True, 'import tensorflow as tf\n'), ((2404, 2440), 'tensorflow.multiply', 'tf.multiply', (['new_X', 'gate_coefficient'], {}), '(new_X, gate_coefficient)\n', (2415, 2440), True, 'import tensorflow as tf\n'), ((2443, 2487), 'tensorflow.multiply', 'tf.multiply', (['input_X', '(1.0 - gate_coefficient)'], {}), '(input_X, 1.0 - gate_coefficient)\n', (2454, 2487), True, 'import tensorflow as tf\n'), ((4548, 4586), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4584, 4586), True, 'import tensorflow as tf\n'), ((4771, 4809), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4807, 4809), True, 'import tensorflow as tf\n'), ((4960, 4998), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4996, 4998), True, 'import tensorflow as tf\n'), ((5304, 5330), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (5326, 5330), True, 'import tensorflow as tf\n'), ((5865, 5905), 'tensorflow.assign', 'tf.assign', (['lr', '(init_lr * decay_rate ** t)'], {}), '(lr, init_lr * decay_rate ** t)\n', (5874, 5905), True, 'import tensorflow as tf\n'), ((6075, 6102), 'utils.convert_to_graph', 'convert_to_graph', (['smi_batch'], {}), '(smi_batch)\n', (6091, 6102), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((6515, 6528), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (6521, 6528), True, 'import numpy as np\n'), ((6549, 6568), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (6556, 6568), True, 'import numpy as np\n'), ((6836, 6863), 'utils.convert_to_graph', 'convert_to_graph', (['smi_batch'], {}), '(smi_batch)\n', (6852, 6863), False, 'from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph\n'), ((7299, 7312), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (7305, 7312), True, 'import numpy as np\n'), ((7333, 7352), 'numpy.mean', 'np.mean', (['(error ** 2)'], {}), '(error ** 2)\n', (7340, 7352), True, 'import numpy as np\n'), ((2865, 2903), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2901, 2903), True, 'import tensorflow as tf\n'), ((3619, 3657), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (3655, 3657), True, 'import tensorflow as tf\n'), ((1614, 1669), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_X'], {'units': 'out_dim', 'use_bias': '(False)'}), '(input_X, units=out_dim, use_bias=False)\n', (1629, 1669), True, 'import tensorflow as tf\n')]
|
import numpy as np
from ROI_Arrival import ROI_Arrival,ROI_Location
#prefined imports
import sys,time,winsound
import numpy as np
from PyQt5.QtWidgets import (QApplication, QPushButton,QWidget,QGridLayout,
QSizePolicy,QLineEdit,
QMainWindow,QAction,QVBoxLayout
,QDockWidget,QListView,
QAbstractItemView,QLabel,QFileDialog,QTextEdit,
QInputDialog,QSlider,QMdiArea,QMdiSubWindow,
QMessageBox)
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
#import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
class ROI_Viewer(QMainWindow):
done=False
def __init__(self,list_time,list_channel,sync_time,calibration):
super().__init__()
self.num_sync=sync_time.size
self.num_pulses=list_time.size
self.list_time,self.list_channel=list_time,list_channel
self.sync_time,self.calibration=sync_time,calibration
self.sync_delta=sync_time[2]-sync_time[1]
self.lower,self.upper=9.5,10.9
self.font1=QFont()
self.font1.setPointSize(12)
self.size_policy=QSizePolicy.Expanding
self.menu()
self.showMaximized()
self.setWindowTitle('ROI Timing Arrival')
self.geometry()
# self.process()
self.show()
def menu(self):
self.menuFile=self.menuBar().addMenu('&File')
self.save_file=QAction('&Save Spectrum')
self.save_file.triggered.connect(self.save_spectrum)
self.save_file.setShortcut('CTRL+S')
self.save_file.setEnabled(False)
# self.save_roi=QAction('&Save ROI')
# self.save_roi.triggered.connect(self.save_roi_csv)
# self.save_roi.setEnabled(True)
self.menuFile.addActions([self.save_file])
def geometry(self):
r1_label=QLabel(r'Region 1-2 divider: [us]')
r1_label.setFont(self.font1)
r2_label=QLabel(r'Region 2-3 divider: [us]')
r2_label.setFont(self.font1)
self.r_1_slider=QSlider(Qt.Horizontal)
self.r_1_slider.setSizePolicy(self.size_policy,self.size_policy)
self.r_1_slider.setMinimum(0)
self.r_1_slider.setMaximum(self.sync_delta-1)
self.r_1_slider.setSingleStep(1)
self.r_1_slider.setTickInterval(50)
self.r_1_slider.setValue(100)
self.r_1_slider.setTickPosition(QSlider.TicksBelow)
self.r_1_slider.valueChanged.connect(self.update_r_1)
self.r_1_slider.setFont(self.font1)
self.r_2_slider=QSlider(Qt.Horizontal)
self.r_2_slider.setSizePolicy(self.size_policy,self.size_policy)
self.r_2_slider.setMinimum(101)
self.r_2_slider.setMaximum(self.sync_delta)
self.r_2_slider.setSingleStep(1)
self.r_2_slider.setTickInterval(50)
self.r_2_slider.setValue(101)
self.r_2_slider.setTickPosition(QSlider.TicksBelow)
self.r_2_slider.valueChanged.connect(self.update_r_2)
self.r_2_slider.setFont(self.font1)
self.r_1_label=QLabel(self)
self.r_1_label.setSizePolicy(self.size_policy,self.size_policy)
self.r_1_label.setText(str(self.r_1_slider.value()))
self.r_1_label.setFont(self.font1)
self.r_2_label=QLabel(self)
self.r_2_label.setSizePolicy(self.size_policy,self.size_policy)
self.r_2_label.setText(str(self.r_2_slider.value()))
self.r_2_label.setFont(self.font1)
self.processer=QPushButton('Process',self)
self.processer.clicked.connect(self.process)
self.processer.setFont(self.font1)
lower_label=QLabel('Lower ROI: [MeV]',self)
lower_label.setFont(self.font1)
upper_label=QLabel('Upper ROI: [MeV]',self)
upper_label.setFont(self.font1)
self.lower_text=QLineEdit(self)
self.lower_text.setFont(self.font1)
self.lower_text.setText(str(self.lower))
self.upper_text=QLineEdit(self)
self.upper_text.setFont(self.font1)
self.upper_text.setText(str(self.upper))
self.time_plot=QWidget()
self.time_figure=Figure()
self.time_canvas=FigureCanvas(self.time_figure)
self.time_toolbar=NavigationToolbar(self.time_canvas,self)
layout=QVBoxLayout()
layout.addWidget(self.time_toolbar)
layout.addWidget(self.time_canvas)
self.time_plot.setLayout(layout)
self.time_ax=self.time_canvas.figure.subplots()
self.time_ax.set_title('Time')
main_=QWidget()
layout=QGridLayout(self)
layout.addWidget(r1_label,0,0)
layout.addWidget(self.r_1_slider,0,1)
layout.addWidget(self.r_1_label,0,2)
layout.addWidget(lower_label,0,3)
layout.addWidget(self.lower_text,0,4)
layout.addWidget(upper_label,1,3)
layout.addWidget(self.upper_text,1,4)
layout.addWidget(r2_label,1,0)
layout.addWidget(self.r_2_slider,1,1)
layout.addWidget(self.r_2_label,1,2)
layout.addWidget(self.processer,2,0)
layout.addWidget(self.time_plot,3,0,1,5)
main_.setLayout(layout)
self.setCentralWidget(main_)
def update_r_1(self):
self.r_2_slider.setMinimum(self.r_1_slider.value()+1)
self.r_1_label.setText(str(self.r_1_slider.value()))
def update_r_2(self):
self.r_2_label.setText(str(self.r_2_slider.value()))
def process(self):
self.save_file.setEnabled(True)
# self.save_roi.setEnabled(True)
s1=time.time()
delt=(self.sync_time[2]-self.sync_time[1])
self.lower=float(self.lower_text.text())
self.upper=float(self.upper_text.text())
self.arrival,self.height,self.raw=ROI_Arrival(self.sync_time,self.list_time,
self.num_sync,self.list_channel,
self.num_pulses,self.lower,
self.upper,self.calibration)
num_bins=int(delt/4)
bins=np.linspace(0,delt,num_bins)
self.bins=bins
s=len(self.arrival)
self.output=ROI_Location(self.arrival,bins,num_bins,s)
r1,r2,r3=0,0,0
print('Process ROI Arrivals in {:.3f}s'.format(time.time()-s1))
for i in range(num_bins):
if bins[i]<=self.r_1_slider.value():
r1+=self.output[i]
elif bins[i]>self.r_1_slider.value() and bins[i]<=self.r_2_slider.value():
r2+=self.output[i]
else:
r3+=self.output[i]
self.time_ax.clear()
self.time_ax.plot(bins,self.output,'r*')
self.time_ax.axvline(self.r_1_slider.value(),label='Region 1-2 divider at {:.2f}'.format(self.r_1_slider.value()))
self.time_ax.axvline(self.r_2_slider.value(),label='Region 2-3 divider at {:.2f}'.format(self.r_2_slider.value()))
# self.time_ax.set_yscale('log')
self.time_ax.set_ylabel('Counts',fontsize=18)
self.time_ax.set_xlabel(r'Arrival Time [$\mu s$]',fontsize=18)
self.time_canvas.draw()
self.done=True
self.percentages=[r1/(r1+r2+r3)*100,
r2/(r1+r2+r3)*100,
r3/(r1+r2+r3)*100]
QMessageBox.information(self,
'ROI Perecentages','''Region 1:{:.2f}%\nRegion 2:{:.2f}%\nRegion 3:{:.2f}%'''.format(
r1/(r1+r2+r3)*100,
r2/(r1+r2+r3)*100,r3/(r1+r2+r3)*100),
QMessageBox.Ok)
# print('Region 1 total ROI percentage: {:.2f}%'.format(r1/(r1+r2+r3)*100))
# print('Region 2 total ROI percentage: {:.2f}%'.format(r2/(r1+r2+r3)*100))
# print('Region 3 total ROI percentage: {:.2f}%'.format(r3/(r1+r2+r3)*100))
def save_spectrum(self):
name=QFileDialog.getSaveFileName(self,'File Name','',
'Text File (*.txt);;Comma Seperated File (*.csv)')
if name[0]!=' ':
f=open(name[0],'w')
f.write('%{:.2f},{:.2f},{:.2f}\n'.format(*self.percentages))
for i in range(len(self.bins)):
f.write('{:.6f},{}\n'.format(self.bins[i],self.output[i]))
f.close()
# def save_roi_csv(self):
# name,ok=QFileDialog.getSaveFileName(self,'Safe File Name','',
# 'Comma Seperated File (*.csv)')
# if ok:
# f=open(name,'w')
# f.write('Pulse_Height(MeV),Time(s)\n')
# print(len(self.height))
# for i in range(len(self.height)):
# f.write('{:.3f},{:.3f}\n'.format(self.height[i],self.raw[i]*1e-6))
# f.close()
# print('All finished')
|
[
"PyQt5.QtWidgets.QLabel",
"ROI_Arrival.ROI_Arrival",
"ROI_Arrival.ROI_Location",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtGui.QFont",
"time.time",
"matplotlib.backends.backend_qt5agg.FigureCanvas",
"matplotlib.figure.Figure",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QFileDialog.getSaveFileName",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QSlider",
"numpy.linspace",
"PyQt5.QtWidgets.QAction"
] |
[((1247, 1254), 'PyQt5.QtGui.QFont', 'QFont', ([], {}), '()\n', (1252, 1254), False, 'from PyQt5.QtGui import QFont\n'), ((1611, 1636), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""&Save Spectrum"""'], {}), "('&Save Spectrum')\n", (1618, 1636), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2041, 2075), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Region 1-2 divider: [us]"""'], {}), "('Region 1-2 divider: [us]')\n", (2047, 2075), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2131, 2165), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Region 2-3 divider: [us]"""'], {}), "('Region 2-3 divider: [us]')\n", (2137, 2165), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2237, 2259), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (2244, 2259), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((2747, 2769), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (2754, 2769), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3256, 3268), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (3262, 3268), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3468, 3480), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (3474, 3480), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3697, 3725), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Process"""', 'self'], {}), "('Process', self)\n", (3708, 3725), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3850, 3882), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Lower ROI: [MeV]"""', 'self'], {}), "('Lower ROI: [MeV]', self)\n", (3856, 3882), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((3942, 3974), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Upper ROI: [MeV]"""', 'self'], {}), "('Upper ROI: [MeV]', self)\n", (3948, 3974), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4047, 4062), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4056, 4062), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4180, 4195), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4189, 4195), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4321, 4330), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4328, 4330), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4356, 4364), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (4362, 4364), False, 'from matplotlib.figure import Figure\n'), ((4390, 4420), 'matplotlib.backends.backend_qt5agg.FigureCanvas', 'FigureCanvas', (['self.time_figure'], {}), '(self.time_figure)\n', (4402, 4420), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((4447, 4488), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.time_canvas', 'self'], {}), '(self.time_canvas, self)\n', (4464, 4488), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((4503, 4516), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4514, 4516), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4763, 4772), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4770, 4772), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((4788, 4805), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (4799, 4805), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((5783, 5794), 'time.time', 'time.time', ([], {}), '()\n', (5792, 5794), False, 'import sys, time, winsound\n'), ((5986, 6127), 'ROI_Arrival.ROI_Arrival', 'ROI_Arrival', (['self.sync_time', 'self.list_time', 'self.num_sync', 'self.list_channel', 'self.num_pulses', 'self.lower', 'self.upper', 'self.calibration'], {}), '(self.sync_time, self.list_time, self.num_sync, self.\n list_channel, self.num_pulses, self.lower, self.upper, self.calibration)\n', (5997, 6127), False, 'from ROI_Arrival import ROI_Arrival, ROI_Location\n'), ((6296, 6326), 'numpy.linspace', 'np.linspace', (['(0)', 'delt', 'num_bins'], {}), '(0, delt, num_bins)\n', (6307, 6326), True, 'import numpy as np\n'), ((6396, 6441), 'ROI_Arrival.ROI_Location', 'ROI_Location', (['self.arrival', 'bins', 'num_bins', 's'], {}), '(self.arrival, bins, num_bins, s)\n', (6408, 6441), False, 'from ROI_Arrival import ROI_Arrival, ROI_Location\n'), ((8140, 8245), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', (['self', '"""File Name"""', '""""""', '"""Text File (*.txt);;Comma Seperated File (*.csv)"""'], {}), "(self, 'File Name', '',\n 'Text File (*.txt);;Comma Seperated File (*.csv)')\n", (8167, 8245), False, 'from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QGridLayout, QSizePolicy, QLineEdit, QMainWindow, QAction, QVBoxLayout, QDockWidget, QListView, QAbstractItemView, QLabel, QFileDialog, QTextEdit, QInputDialog, QSlider, QMdiArea, QMdiSubWindow, QMessageBox\n'), ((6517, 6528), 'time.time', 'time.time', ([], {}), '()\n', (6526, 6528), False, 'import sys, time, winsound\n')]
|
from corner import corner
import numpy as np
CORNER_KWARGS = dict(
smooth=0.9,
label_kwargs=dict(fontsize=30),
title_kwargs=dict(fontsize=16),
color="tab:blue",
truth_color="tab:orange",
quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9.0 / 2.0)),
plot_density=False,
plot_datapoints=False,
fill_contours=True,
max_n_ticks=3,
verbose=False,
use_math_text=True,
)
LABELS = dict(
q=r"$q$",
xeff=r"$\chi_{\rm eff}$",
a_1=r"$a_1$",
a_2=r"$a_2$",
cos_tilt_1=r"$\cos \theta_1$",
cos_tilt_2=r"$\cos \theta_2$",
)
def plot_corner(df, fname="corner.png"):
labels = [LABELS.get(i, i.replace("_", "")) for i in df.columns.values]
fig = corner(df, labels=labels, **CORNER_KWARGS)
fig.savefig(fname)
|
[
"corner.corner",
"numpy.exp"
] |
[((742, 784), 'corner.corner', 'corner', (['df'], {'labels': 'labels'}), '(df, labels=labels, **CORNER_KWARGS)\n', (748, 784), False, 'from corner import corner\n'), ((252, 264), 'numpy.exp', 'np.exp', (['(-0.5)'], {}), '(-0.5)\n', (258, 264), True, 'import numpy as np\n'), ((270, 280), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (276, 280), True, 'import numpy as np\n'), ((286, 304), 'numpy.exp', 'np.exp', (['(-9.0 / 2.0)'], {}), '(-9.0 / 2.0)\n', (292, 304), True, 'import numpy as np\n')]
|
#Ref: <NAME>
"""
# TTA - Should be called prediction time augmentation
#We can augment each input image, predict augmented images and average all predictions
"""
import os
import cv2
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import random
model = tf.keras.models.load_model("mitochondria_load_from_disk_focal_dice_50epochs.hdf5", compile=False)
image_directory = 'data2/test_images/test/'
mask_directory = 'data2/test_masks/test/'
SIZE = 256
image_dataset = []
mask_dataset = []
images = os.listdir(image_directory)
for i, image_name in enumerate(images): #Remember enumerate method adds a counter and returns the enumerate object
if (image_name.split('.')[1] == 'tif'):
#print(image_directory+image_name)
image = cv2.imread(image_directory+image_name)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
image_dataset.append(np.array(image))
#Iterate through all images in Uninfected folder, resize to 64 x 64
#Then save into the same numpy array 'dataset' but with label 1
masks = os.listdir(mask_directory)
for i, image_name in enumerate(masks):
if (image_name.split('.')[1] == 'tif'):
image = cv2.imread(mask_directory+image_name, 0)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
mask_dataset.append(np.array(image))
#
image_dataset = np.array(image_dataset) / 255.
#D not normalize masks, just rescale to 0 to 1.
mask_dataset = (np.array(mask_dataset)) /255.
#Demonstrate TTP on single image
n = random.randint(0, mask_dataset.shape[0])
temp_test_img = image_dataset[n,:,:,:]
temp_test_img = image_dataset[n,:,:,:]
temp_mask = mask_dataset[n,:,:]
p0 = model.predict(np.expand_dims(temp_test_img, axis=0))[0][:, :, 0]
p1 = model.predict(np.expand_dims(np.fliplr(temp_test_img), axis=0))[0][:, :, 0]
p1 = np.fliplr(p1)
p2 = model.predict(np.expand_dims(np.flipud(temp_test_img), axis=0))[0][:, :, 0]
p2 = np.flipud(p2)
p3 = model.predict(np.expand_dims(np.fliplr(np.flipud(temp_test_img)), axis=0))[0][:, :, 0]
p3 = np.fliplr(np.flipud(p3))
thresh = 0.3
p = (((p0 + p1 + p2 + p3) / 4) > thresh).astype(np.uint8)
plt.figure(figsize=(12, 12))
plt.subplot(231)
plt.title('Original mask')
plt.imshow(temp_mask, cmap='gray')
plt.subplot(232)
plt.title('Prediction No Aug')
plt.imshow(p0>thresh, cmap='gray')
plt.subplot(233)
plt.title('Prediction LR')
plt.imshow(p1>thresh, cmap='gray')
plt.subplot(234)
plt.title('Prediction UD')
plt.imshow(p2>thresh, cmap='gray')
plt.subplot(235)
plt.title('Prediction LR and UD')
plt.imshow(p3>thresh, cmap='gray')
plt.subplot(236)
plt.title('Average Prediction')
plt.imshow(p>thresh, cmap='gray')
plt.show()
#Now that we know the transformations are working, let us extend to all predictions
predictions = []
for image in image_dataset:
pred_original = model.predict(np.expand_dims(image, axis=0))[0][:, :, 0]
pred_lr = model.predict(np.expand_dims(np.fliplr(image), axis=0))[0][:, :, 0]
pred_lr = np.fliplr(pred_lr)
pred_ud = model.predict(np.expand_dims(np.flipud(image), axis=0))[0][:, :, 0]
pred_ud = np.flipud(pred_ud)
pred_lr_ud = model.predict(np.expand_dims(np.fliplr(np.flipud(image)), axis=0))[0][:, :, 0]
pred_lr_ud = np.fliplr(np.flipud(pred_lr_ud))
preds = (pred_original + pred_lr + pred_ud + pred_lr_ud) / 4
predictions.append(preds)
predictions = np.array(predictions)
threshold = 0.5
predictions_th = predictions > threshold
import random
test_img_number = random.randint(0, mask_dataset.shape[0]-1)
test_img = image_dataset[test_img_number]
ground_truth=mask_dataset[test_img_number]
#test_img_norm=test_img[:,:,0][:,:,None]
test_img_input=np.expand_dims(test_img, 0)
prediction = predictions_th[test_img_number]
plt.figure(figsize=(16, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img, cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth, cmap='gray')
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(prediction, cmap='gray')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"tensorflow.keras.models.load_model",
"random.randint",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.flipud",
"numpy.expand_dims",
"numpy.fliplr",
"matplotlib.pyplot.figure",
"cv2.imread",
"numpy.array",
"PIL.Image.fromarray",
"os.listdir"
] |
[((314, 416), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""mitochondria_load_from_disk_focal_dice_50epochs.hdf5"""'], {'compile': '(False)'}), "(\n 'mitochondria_load_from_disk_focal_dice_50epochs.hdf5', compile=False)\n", (340, 416), True, 'import tensorflow as tf\n'), ((564, 591), 'os.listdir', 'os.listdir', (['image_directory'], {}), '(image_directory)\n', (574, 591), False, 'import os\n'), ((1122, 1148), 'os.listdir', 'os.listdir', (['mask_directory'], {}), '(mask_directory)\n', (1132, 1148), False, 'import os\n'), ((1602, 1642), 'random.randint', 'random.randint', (['(0)', 'mask_dataset.shape[0]'], {}), '(0, mask_dataset.shape[0])\n', (1616, 1642), False, 'import random\n'), ((1911, 1924), 'numpy.fliplr', 'np.fliplr', (['p1'], {}), '(p1)\n', (1920, 1924), True, 'import numpy as np\n'), ((2012, 2025), 'numpy.flipud', 'np.flipud', (['p2'], {}), '(p2)\n', (2021, 2025), True, 'import numpy as np\n'), ((2223, 2251), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (2233, 2251), True, 'from matplotlib import pyplot as plt\n'), ((2252, 2268), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (2263, 2268), True, 'from matplotlib import pyplot as plt\n'), ((2269, 2295), 'matplotlib.pyplot.title', 'plt.title', (['"""Original mask"""'], {}), "('Original mask')\n", (2278, 2295), True, 'from matplotlib import pyplot as plt\n'), ((2296, 2330), 'matplotlib.pyplot.imshow', 'plt.imshow', (['temp_mask'], {'cmap': '"""gray"""'}), "(temp_mask, cmap='gray')\n", (2306, 2330), True, 'from matplotlib import pyplot as plt\n'), ((2331, 2347), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (2342, 2347), True, 'from matplotlib import pyplot as plt\n'), ((2348, 2378), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction No Aug"""'], {}), "('Prediction No Aug')\n", (2357, 2378), True, 'from matplotlib import pyplot as plt\n'), ((2379, 2415), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p0 > thresh)'], {'cmap': '"""gray"""'}), "(p0 > thresh, cmap='gray')\n", (2389, 2415), True, 'from matplotlib import pyplot as plt\n'), ((2414, 2430), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (2425, 2430), True, 'from matplotlib import pyplot as plt\n'), ((2431, 2457), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction LR"""'], {}), "('Prediction LR')\n", (2440, 2457), True, 'from matplotlib import pyplot as plt\n'), ((2458, 2494), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p1 > thresh)'], {'cmap': '"""gray"""'}), "(p1 > thresh, cmap='gray')\n", (2468, 2494), True, 'from matplotlib import pyplot as plt\n'), ((2493, 2509), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2504, 2509), True, 'from matplotlib import pyplot as plt\n'), ((2510, 2536), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction UD"""'], {}), "('Prediction UD')\n", (2519, 2536), True, 'from matplotlib import pyplot as plt\n'), ((2537, 2573), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p2 > thresh)'], {'cmap': '"""gray"""'}), "(p2 > thresh, cmap='gray')\n", (2547, 2573), True, 'from matplotlib import pyplot as plt\n'), ((2572, 2588), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2583, 2588), True, 'from matplotlib import pyplot as plt\n'), ((2589, 2622), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction LR and UD"""'], {}), "('Prediction LR and UD')\n", (2598, 2622), True, 'from matplotlib import pyplot as plt\n'), ((2623, 2659), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p3 > thresh)'], {'cmap': '"""gray"""'}), "(p3 > thresh, cmap='gray')\n", (2633, 2659), True, 'from matplotlib import pyplot as plt\n'), ((2658, 2674), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (2669, 2674), True, 'from matplotlib import pyplot as plt\n'), ((2675, 2706), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Prediction"""'], {}), "('Average Prediction')\n", (2684, 2706), True, 'from matplotlib import pyplot as plt\n'), ((2707, 2742), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(p > thresh)'], {'cmap': '"""gray"""'}), "(p > thresh, cmap='gray')\n", (2717, 2742), True, 'from matplotlib import pyplot as plt\n'), ((2741, 2751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2749, 2751), True, 'from matplotlib import pyplot as plt\n'), ((3478, 3499), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (3486, 3499), True, 'import numpy as np\n'), ((3591, 3635), 'random.randint', 'random.randint', (['(0)', '(mask_dataset.shape[0] - 1)'], {}), '(0, mask_dataset.shape[0] - 1)\n', (3605, 3635), False, 'import random\n'), ((3775, 3802), 'numpy.expand_dims', 'np.expand_dims', (['test_img', '(0)'], {}), '(test_img, 0)\n', (3789, 3802), True, 'import numpy as np\n'), ((3849, 3876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (3859, 3876), True, 'from matplotlib import pyplot as plt\n'), ((3877, 3893), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (3888, 3893), True, 'from matplotlib import pyplot as plt\n'), ((3894, 3920), 'matplotlib.pyplot.title', 'plt.title', (['"""Testing Image"""'], {}), "('Testing Image')\n", (3903, 3920), True, 'from matplotlib import pyplot as plt\n'), ((3921, 3954), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_img'], {'cmap': '"""gray"""'}), "(test_img, cmap='gray')\n", (3931, 3954), True, 'from matplotlib import pyplot as plt\n'), ((3955, 3971), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (3966, 3971), True, 'from matplotlib import pyplot as plt\n'), ((3972, 3998), 'matplotlib.pyplot.title', 'plt.title', (['"""Testing Label"""'], {}), "('Testing Label')\n", (3981, 3998), True, 'from matplotlib import pyplot as plt\n'), ((3999, 4036), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ground_truth'], {'cmap': '"""gray"""'}), "(ground_truth, cmap='gray')\n", (4009, 4036), True, 'from matplotlib import pyplot as plt\n'), ((4037, 4053), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (4048, 4053), True, 'from matplotlib import pyplot as plt\n'), ((4054, 4091), 'matplotlib.pyplot.title', 'plt.title', (['"""Prediction on test image"""'], {}), "('Prediction on test image')\n", (4063, 4091), True, 'from matplotlib import pyplot as plt\n'), ((4092, 4127), 'matplotlib.pyplot.imshow', 'plt.imshow', (['prediction'], {'cmap': '"""gray"""'}), "(prediction, cmap='gray')\n", (4102, 4127), True, 'from matplotlib import pyplot as plt\n'), ((4129, 4139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4137, 4139), True, 'from matplotlib import pyplot as plt\n'), ((1436, 1459), 'numpy.array', 'np.array', (['image_dataset'], {}), '(image_dataset)\n', (1444, 1459), True, 'import numpy as np\n'), ((1532, 1554), 'numpy.array', 'np.array', (['mask_dataset'], {}), '(mask_dataset)\n', (1540, 1554), True, 'import numpy as np\n'), ((2134, 2147), 'numpy.flipud', 'np.flipud', (['p3'], {}), '(p3)\n', (2143, 2147), True, 'import numpy as np\n'), ((3067, 3085), 'numpy.fliplr', 'np.fliplr', (['pred_lr'], {}), '(pred_lr)\n', (3076, 3085), True, 'import numpy as np\n'), ((3187, 3205), 'numpy.flipud', 'np.flipud', (['pred_ud'], {}), '(pred_ud)\n', (3196, 3205), True, 'import numpy as np\n'), ((813, 853), 'cv2.imread', 'cv2.imread', (['(image_directory + image_name)'], {}), '(image_directory + image_name)\n', (823, 853), False, 'import cv2\n'), ((868, 890), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (883, 890), False, 'from PIL import Image\n'), ((1248, 1290), 'cv2.imread', 'cv2.imread', (['(mask_directory + image_name)', '(0)'], {}), '(mask_directory + image_name, 0)\n', (1258, 1290), False, 'import cv2\n'), ((1305, 1327), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1320, 1327), False, 'from PIL import Image\n'), ((3334, 3355), 'numpy.flipud', 'np.flipud', (['pred_lr_ud'], {}), '(pred_lr_ud)\n', (3343, 3355), True, 'import numpy as np\n'), ((963, 978), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (971, 978), True, 'import numpy as np\n'), ((1399, 1414), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1407, 1414), True, 'import numpy as np\n'), ((1773, 1810), 'numpy.expand_dims', 'np.expand_dims', (['temp_test_img'], {'axis': '(0)'}), '(temp_test_img, axis=0)\n', (1787, 1810), True, 'import numpy as np\n'), ((1859, 1883), 'numpy.fliplr', 'np.fliplr', (['temp_test_img'], {}), '(temp_test_img)\n', (1868, 1883), True, 'import numpy as np\n'), ((1960, 1984), 'numpy.flipud', 'np.flipud', (['temp_test_img'], {}), '(temp_test_img)\n', (1969, 1984), True, 'import numpy as np\n'), ((2923, 2952), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2937, 2952), True, 'import numpy as np\n'), ((2071, 2095), 'numpy.flipud', 'np.flipud', (['temp_test_img'], {}), '(temp_test_img)\n', (2080, 2095), True, 'import numpy as np\n'), ((3014, 3030), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (3023, 3030), True, 'import numpy as np\n'), ((3134, 3150), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (3143, 3150), True, 'import numpy as np\n'), ((3267, 3283), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (3276, 3283), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
dataset_name = "Caltech"
relative = "../../../"
df = pd.read_csv(relative + "datasets/" + dataset_name + '/'+ dataset_name + '.csv', sep=";", header=None)
df = df.drop(0, 1)
print(df.describe())
print(df.nunique())
print(df.head())
print(df.shape)
df[11] = pd.Categorical(df[11])
df[11] = df[11].cat.codes
num_cols = df.shape[1]-1
np.savetxt(relative + "datasets/" + dataset_name + '/' + dataset_name + "_prep_encoding2.csv", df.values[:,:num_cols], delimiter=",")
np.savetxt(relative + "datasets/" + dataset_name + '/' + dataset_name + "_labels.csv", df.values[:,num_cols], delimiter=",")
import umap
X_embedded = umap.UMAP().fit_transform(df.values[:,:num_cols])
import matplotlib.pyplot as plt
plt.scatter(X_embedded[:,0], X_embedded[:,1], c = df.values[:,num_cols])
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.savetxt",
"umap.UMAP",
"pandas.Categorical"
] |
[((96, 202), 'pandas.read_csv', 'pd.read_csv', (["(relative + 'datasets/' + dataset_name + '/' + dataset_name + '.csv')"], {'sep': '""";"""', 'header': 'None'}), "(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '.csv', sep=';', header=None)\n", (107, 202), True, 'import pandas as pd\n'), ((306, 328), 'pandas.Categorical', 'pd.Categorical', (['df[11]'], {}), '(df[11])\n', (320, 328), True, 'import pandas as pd\n'), ((383, 521), 'numpy.savetxt', 'np.savetxt', (["(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '_prep_encoding2.csv')", 'df.values[:, :num_cols]'], {'delimiter': '""","""'}), "(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '_prep_encoding2.csv', df.values[:, :num_cols], delimiter=',')\n", (393, 521), True, 'import numpy as np\n'), ((517, 646), 'numpy.savetxt', 'np.savetxt', (["(relative + 'datasets/' + dataset_name + '/' + dataset_name + '_labels.csv')", 'df.values[:, num_cols]'], {'delimiter': '""","""'}), "(relative + 'datasets/' + dataset_name + '/' + dataset_name +\n '_labels.csv', df.values[:, num_cols], delimiter=',')\n", (527, 646), True, 'import numpy as np\n'), ((758, 831), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_embedded[:, 0]', 'X_embedded[:, 1]'], {'c': 'df.values[:, num_cols]'}), '(X_embedded[:, 0], X_embedded[:, 1], c=df.values[:, num_cols])\n', (769, 831), True, 'import matplotlib.pyplot as plt\n'), ((831, 841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (839, 841), True, 'import matplotlib.pyplot as plt\n'), ((672, 683), 'umap.UMAP', 'umap.UMAP', ([], {}), '()\n', (681, 683), False, 'import umap\n')]
|
import data
import numpy as np
# TODO: split tests 1 test per assert statement
# TODO: move repeating constants out of functions
class TestHAPT:
def test_get_train_data(self):
d = data.HAPT()
assert d._train_attrs is None
d.get_train_data()
assert len(d._train_attrs) > 0
assert len(d.get_train_data()) > 0
def test_get_train_labels(self):
d = data.HAPT()
assert d._train_labels is None
d.get_train_labels()
assert len(d._train_labels) > 0
assert len(d.get_train_labels()) > 0
def test_get_test_data(self):
d = data.HAPT()
assert d._test_attrs is None
d.get_test_data()
assert len(d._test_attrs) > 0
assert len(d.get_test_data()) > 0
def test_get_test_labels(self):
d = data.HAPT()
assert d._test_labels is None
d.get_test_labels()
assert len(d._test_labels) > 0
assert len(d.get_test_labels()) > 0
def test_load_train_data(self):
d = data.HAPT()
assert d._train_attrs is None
assert d._train_labels is None
d.load_train_data()
assert len(d._train_attrs) > 0
assert len(d._train_labels) > 0
assert len(d._train_attrs) == len(d._train_labels)
assert len(d.get_train_data()) == len(d.get_train_labels())
def test_load_test_data(self):
d = data.HAPT()
assert d._test_attrs is None
assert d._test_labels is None
d.load_test_data()
assert len(d._test_attrs) > 0
assert len(d._test_labels) > 0
assert len(d._test_attrs) == len(d._test_labels)
assert len(d.get_test_data()) == len(d.get_test_labels())
def test_load_all_data(self):
d = data.HAPT()
assert d._train_attrs is None
assert d._train_labels is None
assert d._test_attrs is None
assert d._test_labels is None
d.load_all_data()
assert len(d._train_attrs) > 0
assert len(d._train_labels) > 0
assert len(d._test_attrs) > 0
assert len(d._test_labels) > 0
assert len(d._train_attrs) == len(d._train_labels)
assert len(d._test_attrs) == len(d._test_labels)
assert len(d.get_train_data()) == len(d.get_train_labels())
assert len(d.get_test_data()) == len(d.get_test_labels())
def test_get_labels_map(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
assert d._labels == {}
d.get_labels_map()
assert d._labels == orig_labels
assert d.get_labels_map() == orig_labels
def test_aggregate_groups(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
d.aggregate_groups()
assert np.array_equal(d._aggregated_test_labels, np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]))
assert np.array_equal(d._aggregated_train_labels, np.array([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]))
assert d._aggregated2initial_labels == {0: [1, 2, 3], 1: [4, 5, 6], 2: [7, 8, 9, 10, 11, 12]}
def test_get_aggr2initial_labs_map(self):
d = data.HAPT()
d.load_all_data()
d.aggregate_groups()
assert d.get_aggr2initial_labs_map() == {
'WALKING': ['WALKING', 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'],
'STATIC': ['SITTING', 'STANDING', 'LAYING'],
'TRANSITION': ['STAND_TO_SIT', 'SIT_TO_STAND', 'SIT_TO_LIE', 'LIE_TO_SIT', 'STAND_TO_LIE', 'LIE_TO_STAND']
}
def test_get_aggregated_test_labels(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert d.get_aggregated_test_labels() == d._test_labels
d.aggregate_groups()
print(d._aggregated_test_labels)
assert np.array_equal(d.get_aggregated_test_labels(), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]))
def test_get_aggregated_train_labels(self):
orig_labels = {
1: "WALKING",
2: "WALKING_UPSTAIRS",
3: "WALKING_DOWNSTAIRS",
4: "SITTING",
5: "STANDING",
6: "LAYING",
7: "STAND_TO_SIT",
8: "SIT_TO_STAND",
9: "SIT_TO_LIE",
10: "LIE_TO_SIT",
11: "STAND_TO_LIE",
12: "LIE_TO_STAND"
}
d = data.HAPT()
d._labels = orig_labels
d._test_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
d._train_labels = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert d.get_aggregated_train_labels() == d._train_labels
d.aggregate_groups()
assert np.array_equal(d.get_aggregated_train_labels(), np.array([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]))
def test_get_aggregated_labels_map(self):
d = data.HAPT()
assert d.get_aggregated_labels_map() == {0: "WALKING", 1: "STATIC", 2: "TRANSITION"}
|
[
"data.HAPT",
"numpy.array"
] |
[((195, 206), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (204, 206), False, 'import data\n'), ((404, 415), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (413, 415), False, 'import data\n'), ((616, 627), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (625, 627), False, 'import data\n'), ((820, 831), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (829, 831), False, 'import data\n'), ((1030, 1041), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (1039, 1041), False, 'import data\n'), ((1401, 1412), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (1410, 1412), False, 'import data\n'), ((1762, 1773), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (1771, 1773), False, 'import data\n'), ((2800, 2811), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (2809, 2811), False, 'import data\n'), ((3403, 3414), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (3412, 3414), False, 'import data\n'), ((3979, 3990), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (3988, 3990), False, 'import data\n'), ((4814, 4825), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (4823, 4825), False, 'import data\n'), ((5688, 5699), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (5697, 5699), False, 'import data\n'), ((6128, 6139), 'data.HAPT', 'data.HAPT', ([], {}), '()\n', (6137, 6139), False, 'import data\n'), ((3664, 3710), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2])\n', (3672, 3710), True, 'import numpy as np\n'), ((3770, 3816), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]'], {}), '([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0])\n', (3778, 3816), True, 'import numpy as np\n'), ((5185, 5231), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2])\n', (5193, 5231), True, 'import numpy as np\n'), ((6021, 6067), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0]'], {}), '([2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0])\n', (6029, 6067), True, 'import numpy as np\n')]
|
import numpy as np
from collections import namedtuple
from util import (
vec3d_to_array,
quat_to_array,
array_to_vec3d_pb,
array_to_quat_pb,
)
from radar_data_streamer import RadarData
from data_pb2 import Image
Extrinsic = namedtuple('Extrinsic', ['position', 'attitude'])
class RadarImage(RadarData):
"""
This class is a Python representation of the protobuf Image object for
convinent downstream operations
"""
def __init__(self, timestamp, frame_id, extrinsic, image_model, image):
self.timestamp = timestamp
self.frame_id = frame_id
self.extrinsic = extrinsic
self.image_model = image_model
self.image = image
@classmethod
def from_proto(cls, image_pb):
timestamp = image_pb.meta.timestamp
frame_id = image_pb.meta.frame_id
extrinsic = Extrinsic(
position=vec3d_to_array(image_pb.meta.position),
attitude=quat_to_array(image_pb.meta.attitude))
image_model = ImageModel(
origin=vec3d_to_array(image_pb.cartesian.model.origin),
di=vec3d_to_array(image_pb.cartesian.model.di),
dj=vec3d_to_array(image_pb.cartesian.model.dj))
# create the image array
image_shape = (image_pb.cartesian.data.cols,
image_pb.cartesian.data.rows)
image_data = np.frombuffer(image_pb.cartesian.data.data,
dtype=np.uint32)
# copy image_data because we do not own the memory
image = np.reshape(image_data.copy(), image_shape)
radar_image = cls(timestamp, frame_id, extrinsic, image_model, image)
return radar_image
def to_proto(self, timestamp, frame_id):
image_pb = Image()
image_pb.meta.timestamp = timestamp
image_pb.meta.frame_id = frame_id
# Setting the type to REAL_32U
image_pb.cartesian.data.type = 5
array_to_vec3d_pb(image_pb.meta.position,
self.extrinsic.position)
array_to_quat_pb(image_pb.meta.attitude,
self.extrinsic.attitude)
array_to_vec3d_pb(image_pb.cartesian.model.origin,
self.image_model.origin)
array_to_vec3d_pb(image_pb.cartesian.model.di,
self.image_model.di)
array_to_vec3d_pb(image_pb.cartesian.model.dj,
self.image_model.dj)
image_pb.cartesian.data.cols, image_pb.cartesian.data.rows = \
self.image.shape
return image_pb
class ImageModel(object):
"""
ImageModel describing mapping from world coordinate to image model
"""
def __init__(self, origin, di, dj):
self.di = di
self.dj = dj
self.origin = origin
def global_to_image(self, ecef_point):
radar_to_image = ecef_point - self.origin
i_res = np.linalg.norm(self.di)
j_res = np.linalg.norm(self.dj)
i_dir = self.di/i_res
j_dir = self.dj/j_res
i_proj = int(round(radar_to_image.dot(i_dir)/i_res))
j_proj = int(round(radar_to_image.dot(j_dir)/j_res))
pixel_point = (i_proj, j_proj)
return pixel_point
def image_to_global(self, pixel_point):
i_idx = pixel_point[0]
j_idx = pixel_point[1]
ecef_point = self.origin + (i_idx*self.di) + (j_idx*self.dj)
return ecef_point
|
[
"util.array_to_quat_pb",
"util.quat_to_array",
"data_pb2.Image",
"numpy.frombuffer",
"util.vec3d_to_array",
"numpy.linalg.norm",
"collections.namedtuple",
"util.array_to_vec3d_pb"
] |
[((241, 290), 'collections.namedtuple', 'namedtuple', (['"""Extrinsic"""', "['position', 'attitude']"], {}), "('Extrinsic', ['position', 'attitude'])\n", (251, 290), False, 'from collections import namedtuple\n'), ((1371, 1431), 'numpy.frombuffer', 'np.frombuffer', (['image_pb.cartesian.data.data'], {'dtype': 'np.uint32'}), '(image_pb.cartesian.data.data, dtype=np.uint32)\n', (1384, 1431), True, 'import numpy as np\n'), ((1758, 1765), 'data_pb2.Image', 'Image', ([], {}), '()\n', (1763, 1765), False, 'from data_pb2 import Image\n'), ((1941, 2007), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.meta.position', 'self.extrinsic.position'], {}), '(image_pb.meta.position, self.extrinsic.position)\n', (1958, 2007), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2043, 2108), 'util.array_to_quat_pb', 'array_to_quat_pb', (['image_pb.meta.attitude', 'self.extrinsic.attitude'], {}), '(image_pb.meta.attitude, self.extrinsic.attitude)\n', (2059, 2108), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2143, 2218), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.cartesian.model.origin', 'self.image_model.origin'], {}), '(image_pb.cartesian.model.origin, self.image_model.origin)\n', (2160, 2218), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2254, 2321), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.cartesian.model.di', 'self.image_model.di'], {}), '(image_pb.cartesian.model.di, self.image_model.di)\n', (2271, 2321), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2357, 2424), 'util.array_to_vec3d_pb', 'array_to_vec3d_pb', (['image_pb.cartesian.model.dj', 'self.image_model.dj'], {}), '(image_pb.cartesian.model.dj, self.image_model.dj)\n', (2374, 2424), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((2913, 2936), 'numpy.linalg.norm', 'np.linalg.norm', (['self.di'], {}), '(self.di)\n', (2927, 2936), True, 'import numpy as np\n'), ((2953, 2976), 'numpy.linalg.norm', 'np.linalg.norm', (['self.dj'], {}), '(self.dj)\n', (2967, 2976), True, 'import numpy as np\n'), ((886, 924), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.meta.position'], {}), '(image_pb.meta.position)\n', (900, 924), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((947, 984), 'util.quat_to_array', 'quat_to_array', (['image_pb.meta.attitude'], {}), '(image_pb.meta.attitude)\n', (960, 984), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((1040, 1087), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.cartesian.model.origin'], {}), '(image_pb.cartesian.model.origin)\n', (1054, 1087), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((1104, 1147), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.cartesian.model.di'], {}), '(image_pb.cartesian.model.di)\n', (1118, 1147), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n'), ((1164, 1207), 'util.vec3d_to_array', 'vec3d_to_array', (['image_pb.cartesian.model.dj'], {}), '(image_pb.cartesian.model.dj)\n', (1178, 1207), False, 'from util import vec3d_to_array, quat_to_array, array_to_vec3d_pb, array_to_quat_pb\n')]
|
import csv
import cv2
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Convolution2D,Flatten,Dense,Lambda
from keras import optimizers
from keras import regularizers
BATCH_SIZE=128
BINS=25
BIN_RANGE=[-1.0,1.0]
EPOCHS=5
LEARNING_RATE = 0.001
LEARNING_RATE_DECAY = 0.0001
L2_REGULARIZATION = 0.001
ANGLE_CORRECTION_FACTOR = 0.20
def load_driving_log(csv_path):
'''
Loads the driving data log(csv).
Returns the line data as a string array.
'''
samples = []
with open(csv_path) as csvfile:
header_present = csv.Sniffer().has_header(csvfile.read(1024))
csvfile.seek(0) # back to first line
reader = csv.reader(csvfile)
if header_present:
next(reader) # skip the header
for line in reader:
samples.append(line)
return samples
def cleanup_data(samples):
'''
Removes any data with speed = 0.
Returns cleansed data array.
'''
cleansed_samples = []
for sample in samples:
if (float(sample[6]) != 0.0):# don't add zero speed frames
cleansed_samples.append(sample)
return cleansed_samples
def draw_angles_distribution(samples,bins,angle_range):
'''
Draws a bar chart showing the histogram of the passed in data.
Returns the left edge for each bin (apart form the last one for which right edge is returned)
and the bin value. The no. of bin edges is 'bin' + 1.
'''
angles = []
for sample in samples:
angle = float(sample[3])
angles.append(angle)
plt.figure(figsize=(14,7))
plt.ylabel('Count');
plt.xlabel('Angle');
bar_height_if_uniform_dist = len(samples)/bins
plt.plot(angle_range,[bar_height_if_uniform_dist,bar_height_if_uniform_dist])
plt.text(angle_range[0],bar_height_if_uniform_dist+50,'Uniform Distribution')
plt.title('Angle Histogram')
bin_values,bin_edges,_=plt.hist(angles,bins=bins,range=angle_range)
plt.show()
return bin_edges,bin_values
def balance_dataset(samples,bin_edges,bin_values,bins):
'''
Removes data where:
(i) angle is = +- 1.0
(ii) the bin size is greater than the average bin size
Returns the balanced array of sample data.
'''
balanced_samples = []
for sample in samples:
angle = float(sample[3])
if (angle == 1.0 or angle == -1.0): # Remove extreme angles
continue
# Total bin edges are = no. of bins + 1
# Bin edges are the left most value of the bin range aprt from the last one which is the right most,
# hence check if less than
potential_bins = np.where(bin_edges < angle)
# if no bin found
if (len(potential_bins[0]) == 0):
# For catching cases where the angle is exactly -1 or +1
potential_bins = np.where(bin_edges == angle)
if (len(potential_bins[0]) == 0):
raise Exception('No bin match found for angle:{}'.format(angle))
matched_bin_index = np.max(potential_bins)
matched_bin_value = bin_values[matched_bin_index]
avg_bin_size = len(samples)/bins
# Higher the %, the more that bin gets penalized
keep_probability = 1 - ((matched_bin_value + 10*avg_bin_size)/len(samples))
if (matched_bin_value > avg_bin_size):
if (np.random.rand() < keep_probability):
balanced_samples.append(sample)
else:
balanced_samples.append(sample)
return balanced_samples
def generator(samples,data_dir,batch_size=32):
'''
Generates a batch of images and angles.
Reads-in the sample data and for each record, adds center,left & right images + corresponding angles
Keep in mind that the returned batch is 3 X the passed in batch_size because for each record, 3 images are added.
The benefit of using a generator is that the entire dataset doesn't need to be processed at the same time,
rather only a subset is processed and fed to the model, which greatly helps when working with constrained memory.
'''
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0,num_samples,batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for line in batch_samples:
center_angle = float(line[3])
angles.append(center_angle)
left_angle = center_angle + ANGLE_CORRECTION_FACTOR
angles.append(left_angle)
right_angle = center_angle - ANGLE_CORRECTION_FACTOR
angles.append(right_angle)
center_img_path = data_dir + line[0]
center_img = cv2.cvtColor(cv2.imread(center_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
center_img = center_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
center_img = cv2.resize(center_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(center_img)
left_img_path = data_dir + line[1]
left_img = cv2.cvtColor(cv2.imread(left_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
left_img = left_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
left_img = cv2.resize(left_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(left_img)
right_img_path = data_dir + line[2]
right_img = cv2.cvtColor(cv2.imread(right_img_path),cv2.COLOR_BGR2RGB)
# Crop 70 pixels from top and 24 pixels from bottom, output = 66 x 320
right_img = right_img[70:136,:]
# Resize to 66 x 200 as required by nVidia architecture
right_img = cv2.resize(right_img,(200,66),interpolation = cv2.INTER_AREA)
images.append(right_img)
X_train = np.array(images)
y_train = np.array(angles)
# Return processed images for this batch but remember the value of local variables for next iteration
yield sklearn.utils.shuffle(X_train, y_train)
def nVidiaNet(train_generator,validation_generator,steps_per_epoch,validation_steps,save_model_dir):
'''
Impelments the nVidia CNN architecture (https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/).
Returns the model history object + also saves the model as 'model.h5' in the current working directory.
'''
nVidiaModel = Sequential()
nVidiaModel.add(Lambda(lambda x:(x/255.0)-0.5,input_shape=(66,200,3)))
print('Input shape:{}'.format(nVidiaModel.input_shape))
print('Output shape - after normalization:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(24,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after first convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(36,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after second convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(48,(5,5),strides=(2,2),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after third convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(64,(3,3),strides=(1,1),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after fourth convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Convolution2D(64,(3,3),strides=(1,1),kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after fifth convolution:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Flatten())
print('Output shape - after flattening:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(100,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after first dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(50,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after second dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(10,kernel_initializer='normal',kernel_regularizer=regularizers.l2(L2_REGULARIZATION),activation='elu'))
print('Output shape - after third dense:{}'.format(nVidiaModel.output_shape))
nVidiaModel.add(Dense(1))
print('Output shape - after fourth dense:{}'.format(nVidiaModel.output_shape))
adam_optzr = optimizers.Adam(lr=LEARNING_RATE,decay=LEARNING_RATE_DECAY)
nVidiaModel.compile(optimizer=adam_optzr,loss='mse',metrics = ['accuracy'])
nVidiaModel_history = nVidiaModel.fit_generator(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
epochs=EPOCHS)
dt = datetime.now()
model_name_prefix = dt.strftime("%y-%m-%d-%H-%M")
nVidiaModel.save(save_model_dir + model_name_prefix + '-model.h5')
# Write out the model params
model_params_file = open(save_model_dir + model_name_prefix + '-model-params.txt', 'w')
model_params_file.write('EPOCHS >>> {}\n'.format(EPOCHS))
model_params_file.write('BATCH SIZE >>> {}\n'.format(BATCH_SIZE))
model_params_file.write('LEARNING RATE >>> {}\n'.format(LEARNING_RATE))
model_params_file.write('LEARNING RATE DECAY >>> {}\n'.format(LEARNING_RATE_DECAY))
model_params_file.write('ANGLE CORRECTION FACTOR >>> {}\n'.format(ANGLE_CORRECTION_FACTOR))
model_params_file.write('BINS >>> {}\n'.format(BINS))
model_params_file.write('BIN RANGE >>> {}\n'.format(BIN_RANGE))
model_params_file.close()
return nVidiaModel_history
def main():
data_dir = 'C:/Users/Admin/Desktop/Behavioral Cloning/driving-data/'
driving_log_filename = 'driving_log.csv'
save_model_dir = './saved-models/'
samples = load_driving_log(data_dir + driving_log_filename)
print('Total samples:{}'.format(len(samples)))
samples = cleanup_data(samples)
print('Total samples after removing zero angles:{}'.format(len(samples)))
bin_edges,bin_values = draw_angles_distribution(samples,BINS,BIN_RANGE)
samples = balance_dataset(samples,bin_edges,bin_values,BINS)
_,_ = draw_angles_distribution(samples,BINS,BIN_RANGE)
train_samples,validation_samples = train_test_split(samples,test_size=0.2)
# Set up the data generators
train_generator = generator(train_samples,data_dir,batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples,data_dir,batch_size=BATCH_SIZE)
# As we are adding the left & right images as well, so need x 3 times
total_samples = len(samples) * 3
actual_batch_size = BATCH_SIZE * 3
len_train = len(train_samples) * 3
len_valid = len(validation_samples) * 3
steps_per_epoch = len_train/actual_batch_size
validation_steps = len_valid/actual_batch_size
print('Total number of images used for training & validation:{}'.format(total_samples))
nVidiaModel_history = nVidiaNet(train_generator,validation_generator,steps_per_epoch,validation_steps,save_model_dir)
plt.plot(nVidiaModel_history.history['loss'])
plt.plot(nVidiaModel_history.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"keras.regularizers.l2",
"csv.reader",
"sklearn.model_selection.train_test_split",
"csv.Sniffer",
"matplotlib.pyplot.figure",
"keras.layers.Flatten",
"numpy.max",
"datetime.datetime.now",
"cv2.resize",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"keras.optimizers.Adam",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"cv2.imread",
"numpy.where",
"keras.layers.Lambda",
"keras.layers.Dense",
"numpy.array",
"numpy.random.rand",
"keras.models.Sequential",
"sklearn.utils.shuffle",
"matplotlib.pyplot.xlabel"
] |
[((1728, 1755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (1738, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1778), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (1769, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle"""'], {}), "('Angle')\n", (1794, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1860, 1939), 'matplotlib.pyplot.plot', 'plt.plot', (['angle_range', '[bar_height_if_uniform_dist, bar_height_if_uniform_dist]'], {}), '(angle_range, [bar_height_if_uniform_dist, bar_height_if_uniform_dist])\n', (1868, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1942, 2027), 'matplotlib.pyplot.text', 'plt.text', (['angle_range[0]', '(bar_height_if_uniform_dist + 50)', '"""Uniform Distribution"""'], {}), "(angle_range[0], bar_height_if_uniform_dist + 50,\n 'Uniform Distribution')\n", (1950, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2052), 'matplotlib.pyplot.title', 'plt.title', (['"""Angle Histogram"""'], {}), "('Angle Histogram')\n", (2033, 2052), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2126), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': 'bins', 'range': 'angle_range'}), '(angles, bins=bins, range=angle_range)\n', (2088, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2137, 2139), True, 'import matplotlib.pyplot as plt\n'), ((7079, 7091), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7089, 7091), False, 'from keras.models import Sequential\n'), ((9482, 9542), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'LEARNING_RATE', 'decay': 'LEARNING_RATE_DECAY'}), '(lr=LEARNING_RATE, decay=LEARNING_RATE_DECAY)\n', (9497, 9542), False, 'from keras import optimizers\n'), ((10026, 10040), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10038, 10040), False, 'from datetime import datetime\n'), ((11560, 11600), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (11576, 11600), False, 'from sklearn.model_selection import train_test_split\n'), ((12366, 12411), 'matplotlib.pyplot.plot', 'plt.plot', (["nVidiaModel_history.history['loss']"], {}), "(nVidiaModel_history.history['loss'])\n", (12374, 12411), True, 'import matplotlib.pyplot as plt\n'), ((12416, 12465), 'matplotlib.pyplot.plot', 'plt.plot', (["nVidiaModel_history.history['val_loss']"], {}), "(nVidiaModel_history.history['val_loss'])\n", (12424, 12465), True, 'import matplotlib.pyplot as plt\n'), ((12470, 12512), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (12479, 12512), True, 'import matplotlib.pyplot as plt\n'), ((12517, 12554), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (12527, 12554), True, 'import matplotlib.pyplot as plt\n'), ((12559, 12578), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (12569, 12578), True, 'import matplotlib.pyplot as plt\n'), ((12583, 12648), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (12593, 12648), True, 'import matplotlib.pyplot as plt\n'), ((12653, 12663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12661, 12663), True, 'import matplotlib.pyplot as plt\n'), ((840, 859), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (850, 859), False, 'import csv\n'), ((2812, 2839), 'numpy.where', 'np.where', (['(bin_edges < angle)'], {}), '(bin_edges < angle)\n', (2820, 2839), True, 'import numpy as np\n'), ((3190, 3212), 'numpy.max', 'np.max', (['potential_bins'], {}), '(potential_bins)\n', (3196, 3212), True, 'import numpy as np\n'), ((4385, 4401), 'sklearn.utils.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (4392, 4401), False, 'from sklearn.utils import shuffle\n'), ((7117, 7176), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(66, 200, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(66, 200, 3))\n', (7123, 7176), False, 'from keras.layers import Convolution2D, Flatten, Dense, Lambda\n'), ((8617, 8626), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8624, 8626), False, 'from keras.layers import Convolution2D, Flatten, Dense, Lambda\n'), ((9367, 9375), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (9372, 9375), False, 'from keras.layers import Convolution2D, Flatten, Dense, Lambda\n'), ((3006, 3034), 'numpy.where', 'np.where', (['(bin_edges == angle)'], {}), '(bin_edges == angle)\n', (3014, 3034), True, 'import numpy as np\n'), ((6475, 6491), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (6483, 6491), True, 'import numpy as np\n'), ((6514, 6530), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (6522, 6530), True, 'import numpy as np\n'), ((732, 745), 'csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (743, 745), False, 'import csv\n'), ((3517, 3533), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3531, 3533), True, 'import numpy as np\n'), ((5367, 5430), 'cv2.resize', 'cv2.resize', (['center_img', '(200, 66)'], {'interpolation': 'cv2.INTER_AREA'}), '(center_img, (200, 66), interpolation=cv2.INTER_AREA)\n', (5377, 5430), False, 'import cv2\n'), ((5857, 5918), 'cv2.resize', 'cv2.resize', (['left_img', '(200, 66)'], {'interpolation': 'cv2.INTER_AREA'}), '(left_img, (200, 66), interpolation=cv2.INTER_AREA)\n', (5867, 5918), False, 'import cv2\n'), ((6349, 6411), 'cv2.resize', 'cv2.resize', (['right_img', '(200, 66)'], {'interpolation': 'cv2.INTER_AREA'}), '(right_img, (200, 66), interpolation=cv2.INTER_AREA)\n', (6359, 6411), False, 'import cv2\n'), ((6663, 6702), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (6684, 6702), False, 'import sklearn\n'), ((7425, 7459), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (7440, 7459), False, 'from keras import regularizers\n'), ((7679, 7713), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (7694, 7713), False, 'from keras import regularizers\n'), ((7935, 7969), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (7950, 7969), False, 'from keras import regularizers\n'), ((8190, 8224), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (8205, 8224), False, 'from keras import regularizers\n'), ((8446, 8480), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (8461, 8480), False, 'from keras import regularizers\n'), ((8786, 8820), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (8801, 8820), False, 'from keras import regularizers\n'), ((8998, 9032), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (9013, 9032), False, 'from keras import regularizers\n'), ((9211, 9245), 'keras.regularizers.l2', 'regularizers.l2', (['L2_REGULARIZATION'], {}), '(L2_REGULARIZATION)\n', (9226, 9245), False, 'from keras import regularizers\n'), ((5082, 5109), 'cv2.imread', 'cv2.imread', (['center_img_path'], {}), '(center_img_path)\n', (5092, 5109), False, 'import cv2\n'), ((5580, 5605), 'cv2.imread', 'cv2.imread', (['left_img_path'], {}), '(left_img_path)\n', (5590, 5605), False, 'import cv2\n'), ((6068, 6094), 'cv2.imread', 'cv2.imread', (['right_img_path'], {}), '(right_img_path)\n', (6078, 6094), False, 'import cv2\n')]
|
from keras.models import Sequential, load_model
from keras.callbacks import History, EarlyStopping, Callback
from keras.layers.recurrent import LSTM
from keras.layers import Bidirectional
from keras.losses import mse, binary_crossentropy,cosine
from keras.layers.core import Dense, Activation, Dropout
import numpy as np
import os
from matplotlib import pyplot as plt
from tensorflow import keras
import tensorflow as tf
class LSTM_NETWORK(object):
def __init__(self, input_dim,layers,batch_size=32,l_s=5,l_p=1):
"""input_dim_list must include the original data dimension"""
assert len(layers) >= 2
self.l_s = l_s
self.l_p = l_p
self.batch_size = batch_size
self.loss = 0#zero for mse, 1 for cosine similarity
self.cbs = [History(),EarlyStopping(monitor='val_loss', patience=5, min_delta=0.0003, verbose=0)]
model = Sequential()
model.add((LSTM(layers[0], input_shape=(l_s, input_dim),
return_sequences=True)))
#return_sequences=True)))
model.add(Dropout(0.3))
model.add(LSTM(layers[1], return_sequences=True))#return_sequences=True))
model.add(Dropout(0.3))
model.add(Dense(self.l_p*input_dim))
model.add(Activation("linear"))
# model.add(Dense(activation='linear', units=y_train.shape[2]))
if self.loss == 0:
model.compile(loss='mse', optimizer='adam')
else:
loss_fn = keras.losses.CosineSimilarity()
model.compile(loss=loss_fn, optimizer='adam')
# print("here is model summary")
#print(model.summary())
self.model = model
return
def create_one_layer_model(self,input_dim,layers,batch_size=32,l_s=5,l_p=1):
assert len(layers) >= 2
self.l_s = l_s
self.l_p = l_p
self.batch_size = batch_size
self.cbs = [History(),EarlyStopping(monitor='val_loss', patience=15, min_delta=0.0003, verbose=0)]
model = Sequential()
model.add((LSTM(layers[0], input_shape=(None, input_dim))))
model.add(Dropout(0.3))
model.add(Dense(self.l_p*input_dim))
model.add(Activation("linear"))
# model.add(Dense(activation='linear', units=y_train.shape[2]))
if self.loss == 0:
model.compile(loss='mse', optimizer='adam')
else:
loss_fn = keras.losses.CosineSimilarity()
model.compile(loss=loss_fn, optimizer='adam')
#import tensorflow as tf
#model.compile(loss=tf.keras.losses.CosineSimilarity(), optimizer='adam')
# print("here is model summary")
#print(model.summary())
#print("this is neww model")
self.model = model
return
def fit(self, X,y, epochs=100,validation_split=0.15, verbose=False,model_num=-1):
history = self.model.fit(X, y, batch_size=self.batch_size, epochs=epochs,
validation_split=validation_split, verbose=verbose, callbacks=self.cbs)
#print(history.history.keys())
# "Accuracy"
'''
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
'''
# "Loss"
'''
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
'''
if model_num!=-1:
self.model.save("LSTM_v"+str(model_num)+".h5")
return
def load_model(self,num):
self.model=load_model(os.path.join("", "LSTM_v"+ str(num)+ ".h5"))
return self.model
def predict(self, X_test):
'''
Used trained LSTM model to predict test data arriving in batches
Args:
X_test (np array): numpy array of test inputs with dimensions [timesteps, l_s, input dimensions)
Returns:
y_hat (np array): predicted test values for each timestep in y_test
'''
print("Predicting by Patch")
y_hat = []#np.array([[[]]])
# print("y_hat intially",y_hat.shape)
num_batches = int((X_test.shape[0] - self.l_s) / self.batch_size)
print("number of batches",num_batches)
if num_batches < 0:
raise ValueError("l_s (%s) too large for stream with length %s." % (self.l_s, y_test.shape[0]))
# simulate data arriving in batches
for i in range(1, num_batches + 2):
#print("Inside the loop")
prior_idx = (i - 1) * self.batch_size
idx = i * self.batch_size
if i == num_batches + 1:
idx = X_test.shape[0] # remaining values won't necessarily equal batch size
X_test_period = X_test[prior_idx:idx]
#print("Predict for batch")
#print("X_test_period",type(X_test_period),len(X_test_period))
y_hat_period = self.model.predict(X_test_period)
#print("y_hat_period out",y_hat_period.shape)
#y_hat_period=np.array(y_hat_period)
#print("y_hat_period after reshape",y_hat_period.shape)
#print("y_hat now",y_hat_period.shape)
if i ==1:
y_hat =y_hat_period
#y_hat_period=np.array(y_hat_period)
#print("y_hat now",y_hat_period.shape)
else:
y_hat = np.append(y_hat, y_hat_period)
#print("y_hat", y_hat.shape)
print("Out of loop, final transformation")
y_hat = y_hat.reshape(X_test.shape[0], X_test.shape[2])
print("y_hat final", y_hat.shape)
# np.save(os.path.join("data", anom['run_id'], "y_hat", anom["chan_id"] + ".npy"), np.array(y_hat))
return y_hat
def predict_all(self, X_test):
'''
Used trained LSTM model to predict test data arriving in batches
Args:
y_test (np array): numpy array of test outputs corresponding to true values to be predicted at end of each sequence
X_test (np array): numpy array of test inputs with dimensions [timesteps, l_s, input dimensions)
Returns:
y_hat (np array): predicted test values for each timestep in y_test
'''
#print("Predicting All")
y_hat = self.model.predict(X_test)
#print("y_hat other",y_hat.shape)
return y_hat
|
[
"keras.layers.core.Dense",
"keras.callbacks.History",
"keras.layers.core.Activation",
"tensorflow.keras.losses.CosineSimilarity",
"numpy.append",
"keras.callbacks.EarlyStopping",
"keras.layers.core.Dropout",
"keras.layers.recurrent.LSTM",
"keras.models.Sequential"
] |
[((887, 899), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (897, 899), False, 'from keras.models import Sequential, load_model\n'), ((2012, 2024), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2022, 2024), False, 'from keras.models import Sequential, load_model\n'), ((784, 793), 'keras.callbacks.History', 'History', ([], {}), '()\n', (791, 793), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((794, 868), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)', 'min_delta': '(0.0003)', 'verbose': '(0)'}), "(monitor='val_loss', patience=5, min_delta=0.0003, verbose=0)\n", (807, 868), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((919, 987), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[0]'], {'input_shape': '(l_s, input_dim)', 'return_sequences': '(True)'}), '(layers[0], input_shape=(l_s, input_dim), return_sequences=True)\n', (923, 987), False, 'from keras.layers.recurrent import LSTM\n'), ((1082, 1094), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1089, 1094), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1114, 1152), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[1]'], {'return_sequences': '(True)'}), '(layers[1], return_sequences=True)\n', (1118, 1152), False, 'from keras.layers.recurrent import LSTM\n'), ((1196, 1208), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1203, 1208), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1228, 1255), 'keras.layers.core.Dense', 'Dense', (['(self.l_p * input_dim)'], {}), '(self.l_p * input_dim)\n', (1233, 1255), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1273, 1293), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (1283, 1293), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1487, 1518), 'tensorflow.keras.losses.CosineSimilarity', 'keras.losses.CosineSimilarity', ([], {}), '()\n', (1516, 1518), False, 'from tensorflow import keras\n'), ((1908, 1917), 'keras.callbacks.History', 'History', ([], {}), '()\n', (1915, 1917), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((1918, 1993), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(15)', 'min_delta': '(0.0003)', 'verbose': '(0)'}), "(monitor='val_loss', patience=15, min_delta=0.0003, verbose=0)\n", (1931, 1993), False, 'from keras.callbacks import History, EarlyStopping, Callback\n'), ((2044, 2090), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[0]'], {'input_shape': '(None, input_dim)'}), '(layers[0], input_shape=(None, input_dim))\n', (2048, 2090), False, 'from keras.layers.recurrent import LSTM\n'), ((2111, 2123), 'keras.layers.core.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (2118, 2123), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2143, 2170), 'keras.layers.core.Dense', 'Dense', (['(self.l_p * input_dim)'], {}), '(self.l_p * input_dim)\n', (2148, 2170), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2188, 2208), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (2198, 2208), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2402, 2433), 'tensorflow.keras.losses.CosineSimilarity', 'keras.losses.CosineSimilarity', ([], {}), '()\n', (2431, 2433), False, 'from tensorflow import keras\n'), ((5620, 5650), 'numpy.append', 'np.append', (['y_hat', 'y_hat_period'], {}), '(y_hat, y_hat_period)\n', (5629, 5650), True, 'import numpy as np\n')]
|
"""Module containing the CLI programs for histoprint."""
import numpy as np
import click
from histoprint import *
import histoprint.formatter as formatter
@click.command()
@click.argument("infile", type=click.Path(exists=True, dir_okay=False, allow_dash=True))
@click.option(
"-b",
"--bins",
type=str,
default="10",
help="Number of bins or space-separated bin edges.",
)
@click.option("-t", "--title", type=str, default="", help="Title of the histogram.")
@click.option(
"--stack/--nostack", type=bool, default=False, help="Stack the histograms."
)
@click.option(
"-s/-S",
"--summary/--nosummary",
type=bool,
default=False,
help="Print summary statistics.",
)
@click.option(
"-l",
"--label",
"labels",
type=str,
multiple=True,
default=("",),
help="Labels for the data, one for each column.",
)
@click.option(
"--symbols",
type=str,
default=formatter.DEFAULT_SYMBOLS,
help="Symbol cycle for multiple histograms. Choices & default: '%s'"
% (formatter.DEFAULT_SYMBOLS,),
)
@click.option(
"--fg-colors",
type=str,
default=formatter.DEFAULT_FG_COLORS,
help="Colour cycle for foreground colours. Default: '%s', Choices: '0rgbcmykwRGBCMYKW'"
% (formatter.DEFAULT_FG_COLORS,),
)
@click.option(
"--bg-colors",
type=str,
default=formatter.DEFAULT_BG_COLORS,
help="Colour cycle for background colours. Default: '%s', Choices: '0rgbcmykwRGBCMYKW'"
% (formatter.DEFAULT_BG_COLORS,),
)
@click.option(
"-f",
"--field",
"fields",
type=str,
multiple=True,
help="Which fields to histogram. Interpretation of the fields depends on "
"the file format. TXT files only support integers for column numbers "
"starting at 0. For CSV files, the fields must be the names of the columns "
"as specified in the first line of the file. When plotting from ROOT files, "
"at least one field must be specified. This can either be the path to a "
"single TH1, or one or more paths to TTree branches.",
)
@click.version_option()
def histoprint(infile, **kwargs):
"""Read INFILE and print a histogram of the contained columns.
INFILE can be '-', in which case the data is read from STDIN.
"""
# Try to interpret file as textfile
try:
_histoprint_txt(infile, **kwargs)
exit(0)
except ValueError:
pass
# Try to interpret file as CSV file
try:
_histoprint_csv(infile, **kwargs)
exit(0)
except ImportError:
click.echo("Cannot try CSV file format. Pandas module not found.", err=True)
except UnicodeDecodeError:
pass
# Try to interpret file as ROOT file
try:
_histoprint_root(infile, **kwargs)
exit(0)
except ImportError:
click.echo("Cannot try ROOT file format. Uproot module not found.", err=True)
click.echo("Could not interpret file format.", err=True)
exit(1)
def _bin_edges(kwargs, data):
"""Get the desired bin edges."""
bins = kwargs.pop("bins", "10")
bins = np.fromiter(bins.split(), dtype=float)
if len(bins) == 1:
bins = int(bins[0])
if isinstance(bins, int):
minval = np.inf
maxval = -np.inf
for d in data:
minval = min(minval, np.nanmin(d))
maxval = max(maxval, np.nanmax(d))
bins = np.linspace(minval, maxval, bins + 1)
return bins
def _histoprint_txt(infile, **kwargs):
"""Interpret file as as simple whitespace separated table."""
# Read the data
data = np.loadtxt(click.open_file(infile), ndmin=2)
data = data.T
# Interpret field numbers
fields = kwargs.pop("fields", [])
if len(fields) > 0:
try:
fields = [int(f) for f in fields]
except ValueError:
click.echo("Fields for a TXT file must be integers.", err=True)
exit(1)
try:
data = data[fields]
except KeyError:
click.echo("Field out of bounds.", err=True)
exit(1)
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
def _histoprint_csv(infile, **kwargs):
"""Interpret file as as CSV file."""
import pandas as pd
# Read the data
data = pd.read_csv(click.open_file(infile))
# Interpret field numbers/names
fields = list(kwargs.pop("fields", []))
if len(fields) > 0:
try:
data = data[fields]
except KeyError:
click.echo("Unknown column name.", err=True)
exit(1)
# Get default columns labels
if kwargs.get("labels", ("",)) == ("",):
kwargs["labels"] = data.columns
# Convert to array
data = data.to_numpy().T
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
def _histoprint_root(infile, **kwargs):
"""Interpret file as as ROOT file."""
import uproot as up
# Open root file
F = up.open(infile)
# Interpret field names
fields = list(kwargs.pop("fields", []))
if len(fields) == 0:
click.echo("Must specify at least on field for ROOT files.", err=True)
click.echo(F.keys())
exit(1)
# Get default columns labels
if kwargs.get("labels", ("",)) == ("",):
kwargs["labels"] = [field.split("/")[-1] for field in fields]
# Read the data
if len(fields) == 1:
# Possible a single histogram
try:
hist = F[fields[0]].numpy()
except (AttributeError, KeyError):
pass
else:
kwargs.pop("bins", None) # Get rid of useless parameter
print_hist(hist, **kwargs)
return
data = []
for field in fields:
branch = F
for key in field.split("/"):
try:
branch = branch[key]
except KeyError:
click.echo(
"Could not find key '%s'. Possible values: %s"
% (key, branch.keys())
)
exit(1)
try:
d = np.array(branch.array().flatten())
except ValueError:
click.echo(
"Could not interpret root object '%s'. Possible child branches: %s"
% (key, branch.keys())
)
exit(1)
data.append(d)
# Interpret bins
bins = _bin_edges(kwargs, data)
# Create the histogram(s)
hist = [[], bins]
for d in data:
hist[0].append(np.histogram(d, bins=bins)[0])
# Print the histogram
print_hist(hist, **kwargs)
|
[
"click.version_option",
"click.option",
"click.echo",
"numpy.nanmin",
"click.command",
"click.open_file",
"numpy.histogram",
"numpy.linspace",
"click.Path",
"uproot.open",
"numpy.nanmax"
] |
[((159, 174), 'click.command', 'click.command', ([], {}), '()\n', (172, 174), False, 'import click\n'), ((265, 375), 'click.option', 'click.option', (['"""-b"""', '"""--bins"""'], {'type': 'str', 'default': '"""10"""', 'help': '"""Number of bins or space-separated bin edges."""'}), "('-b', '--bins', type=str, default='10', help=\n 'Number of bins or space-separated bin edges.')\n", (277, 375), False, 'import click\n'), ((395, 483), 'click.option', 'click.option', (['"""-t"""', '"""--title"""'], {'type': 'str', 'default': '""""""', 'help': '"""Title of the histogram."""'}), "('-t', '--title', type=str, default='', help=\n 'Title of the histogram.')\n", (407, 483), False, 'import click\n'), ((480, 574), 'click.option', 'click.option', (['"""--stack/--nostack"""'], {'type': 'bool', 'default': '(False)', 'help': '"""Stack the histograms."""'}), "('--stack/--nostack', type=bool, default=False, help=\n 'Stack the histograms.')\n", (492, 574), False, 'import click\n'), ((577, 687), 'click.option', 'click.option', (['"""-s/-S"""', '"""--summary/--nosummary"""'], {'type': 'bool', 'default': '(False)', 'help': '"""Print summary statistics."""'}), "('-s/-S', '--summary/--nosummary', type=bool, default=False,\n help='Print summary statistics.')\n", (589, 687), False, 'import click\n'), ((708, 842), 'click.option', 'click.option', (['"""-l"""', '"""--label"""', '"""labels"""'], {'type': 'str', 'multiple': '(True)', 'default': "('',)", 'help': '"""Labels for the data, one for each column."""'}), "('-l', '--label', 'labels', type=str, multiple=True, default=(\n '',), help='Labels for the data, one for each column.')\n", (720, 842), False, 'import click\n'), ((870, 1051), 'click.option', 'click.option', (['"""--symbols"""'], {'type': 'str', 'default': 'formatter.DEFAULT_SYMBOLS', 'help': '("Symbol cycle for multiple histograms. Choices & default: \'%s\'" % (\n formatter.DEFAULT_SYMBOLS,))'}), '(\'--symbols\', type=str, default=formatter.DEFAULT_SYMBOLS, help\n ="Symbol cycle for multiple histograms. Choices & default: \'%s\'" % (\n formatter.DEFAULT_SYMBOLS,))\n', (882, 1051), False, 'import click\n'), ((1066, 1276), 'click.option', 'click.option', (['"""--fg-colors"""'], {'type': 'str', 'default': 'formatter.DEFAULT_FG_COLORS', 'help': '("Colour cycle for foreground colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_FG_COLORS,))'}), '(\'--fg-colors\', type=str, default=formatter.DEFAULT_FG_COLORS,\n help=\n "Colour cycle for foreground colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_FG_COLORS,))\n', (1078, 1276), False, 'import click\n'), ((1287, 1497), 'click.option', 'click.option', (['"""--bg-colors"""'], {'type': 'str', 'default': 'formatter.DEFAULT_BG_COLORS', 'help': '("Colour cycle for background colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_BG_COLORS,))'}), '(\'--bg-colors\', type=str, default=formatter.DEFAULT_BG_COLORS,\n help=\n "Colour cycle for background colours. Default: \'%s\', Choices: \'0rgbcmykwRGBCMYKW\'"\n % (formatter.DEFAULT_BG_COLORS,))\n', (1299, 1497), False, 'import click\n'), ((1508, 1997), 'click.option', 'click.option', (['"""-f"""', '"""--field"""', '"""fields"""'], {'type': 'str', 'multiple': '(True)', 'help': '"""Which fields to histogram. Interpretation of the fields depends on the file format. TXT files only support integers for column numbers starting at 0. For CSV files, the fields must be the names of the columns as specified in the first line of the file. When plotting from ROOT files, at least one field must be specified. This can either be the path to a single TH1, or one or more paths to TTree branches."""'}), "('-f', '--field', 'fields', type=str, multiple=True, help=\n 'Which fields to histogram. Interpretation of the fields depends on the file format. TXT files only support integers for column numbers starting at 0. For CSV files, the fields must be the names of the columns as specified in the first line of the file. When plotting from ROOT files, at least one field must be specified. This can either be the path to a single TH1, or one or more paths to TTree branches.'\n )\n", (1520, 1997), False, 'import click\n'), ((2051, 2073), 'click.version_option', 'click.version_option', ([], {}), '()\n', (2071, 2073), False, 'import click\n'), ((2880, 2936), 'click.echo', 'click.echo', (['"""Could not interpret file format."""'], {'err': '(True)'}), "('Could not interpret file format.', err=True)\n", (2890, 2936), False, 'import click\n'), ((5267, 5282), 'uproot.open', 'up.open', (['infile'], {}), '(infile)\n', (5274, 5282), True, 'import uproot as up\n'), ((206, 262), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)', 'allow_dash': '(True)'}), '(exists=True, dir_okay=False, allow_dash=True)\n', (216, 262), False, 'import click\n'), ((3366, 3403), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', '(bins + 1)'], {}), '(minval, maxval, bins + 1)\n', (3377, 3403), True, 'import numpy as np\n'), ((3570, 3593), 'click.open_file', 'click.open_file', (['infile'], {}), '(infile)\n', (3585, 3593), False, 'import click\n'), ((4437, 4460), 'click.open_file', 'click.open_file', (['infile'], {}), '(infile)\n', (4452, 4460), False, 'import click\n'), ((5389, 5459), 'click.echo', 'click.echo', (['"""Must specify at least on field for ROOT files."""'], {'err': '(True)'}), "('Must specify at least on field for ROOT files.', err=True)\n", (5399, 5459), False, 'import click\n'), ((2534, 2610), 'click.echo', 'click.echo', (['"""Cannot try CSV file format. Pandas module not found."""'], {'err': '(True)'}), "('Cannot try CSV file format. Pandas module not found.', err=True)\n", (2544, 2610), False, 'import click\n'), ((2797, 2874), 'click.echo', 'click.echo', (['"""Cannot try ROOT file format. Uproot module not found."""'], {'err': '(True)'}), "('Cannot try ROOT file format. Uproot module not found.', err=True)\n", (2807, 2874), False, 'import click\n'), ((3290, 3302), 'numpy.nanmin', 'np.nanmin', (['d'], {}), '(d)\n', (3299, 3302), True, 'import numpy as np\n'), ((3337, 3349), 'numpy.nanmax', 'np.nanmax', (['d'], {}), '(d)\n', (3346, 3349), True, 'import numpy as np\n'), ((3813, 3876), 'click.echo', 'click.echo', (['"""Fields for a TXT file must be integers."""'], {'err': '(True)'}), "('Fields for a TXT file must be integers.', err=True)\n", (3823, 3876), False, 'import click\n'), ((3979, 4023), 'click.echo', 'click.echo', (['"""Field out of bounds."""'], {'err': '(True)'}), "('Field out of bounds.', err=True)\n", (3989, 4023), False, 'import click\n'), ((4197, 4223), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins'}), '(d, bins=bins)\n', (4209, 4223), True, 'import numpy as np\n'), ((4649, 4693), 'click.echo', 'click.echo', (['"""Unknown column name."""'], {'err': '(True)'}), "('Unknown column name.', err=True)\n", (4659, 4693), False, 'import click\n'), ((5039, 5065), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins'}), '(d, bins=bins)\n', (5051, 5065), True, 'import numpy as np\n'), ((6799, 6825), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins'}), '(d, bins=bins)\n', (6811, 6825), True, 'import numpy as np\n')]
|
import unittest
import libpysal
from libpysal.common import pandas, RTOL, ATOL
from esda.geary_local_mv import Geary_Local_MV
import numpy as np
PANDAS_EXTINCT = pandas is None
class Geary_Local_MV_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(100)
self.w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
self.y1 = np.array(f.by_col['HR8893'])
self.y2 = np.array(f.by_col['HC8488'])
def test_local_geary_mv(self):
lG_mv = Geary_Local_MV(connectivity=self.w).fit([self.y1, self.y2])
print(lG_mv.p_sim[0])
self.assertAlmostEqual(lG_mv.localG[0], 0.4096931479581422)
self.assertAlmostEqual(lG_mv.p_sim[0], 0.211)
suite = unittest.TestSuite()
test_classes = [
Geary_Local_MV_Tester
]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite)
|
[
"numpy.random.seed",
"unittest.TextTestRunner",
"unittest.TestSuite",
"esda.geary_local_mv.Geary_Local_MV",
"numpy.array",
"unittest.TestLoader",
"libpysal.examples.get_path"
] |
[((803, 823), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (821, 823), False, 'import unittest\n'), ((1009, 1034), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (1032, 1034), False, 'import unittest\n'), ((256, 275), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (270, 275), True, 'import numpy as np\n'), ((446, 474), 'numpy.array', 'np.array', (["f.by_col['HR8893']"], {}), "(f.by_col['HR8893'])\n", (454, 474), True, 'import numpy as np\n'), ((493, 521), 'numpy.array', 'np.array', (["f.by_col['HC8488']"], {}), "(f.by_col['HC8488'])\n", (501, 521), True, 'import numpy as np\n'), ((385, 426), 'libpysal.examples.get_path', 'libpysal.examples.get_path', (['"""stl_hom.txt"""'], {}), "('stl_hom.txt')\n", (411, 426), False, 'import libpysal\n'), ((900, 921), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (919, 921), False, 'import unittest\n'), ((574, 609), 'esda.geary_local_mv.Geary_Local_MV', 'Geary_Local_MV', ([], {'connectivity': 'self.w'}), '(connectivity=self.w)\n', (588, 609), False, 'from esda.geary_local_mv import Geary_Local_MV\n'), ((310, 347), 'libpysal.examples.get_path', 'libpysal.examples.get_path', (['"""stl.gal"""'], {}), "('stl.gal')\n", (336, 347), False, 'import libpysal\n')]
|
import pytorch_lightning as pl
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Tuple, Dict, Any, Union, Iterable
try:
import genomics_gans
except:
exec(open('__init__.py').read())
import genomics_gans
from genomics_gans.prepare_data.data_modules import TabularDataset
class LitFFNN(pl.LightningModule):
# ----------------------------------
# Initialize constants and NN architecture
# ----------------------------------
def __init__(self, network: nn.Module, train_set: TabularDataset,
val_set: TabularDataset, test_set: TabularDataset):
""" Feed-Forward Neural Network System
Args:
X (np.ndarray): Feature matrix
"""
super().__init__()
# TODO: train-val-test splits
self.network = network
# Hard-coded constants
self.loss_fn = nn.NLLLoss()
self.lr = 1e-2
self.N_CLASSES = 3
self.epoch = 0
self.epoch_train_losses = []
self.epoch_val_losses = []
self.best_val_epoch = 0
def forward(self, x):
logits = self.network(x)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(
params = self.parameters(), lr = self.lr)
return optimizer
# ----------------------------------
# Training, validation, and test steps
# ----------------------------------
def training_step(self, batch, batch_idx):
x, y = batch
y = y.flatten().long()
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('train_loss', loss, on_step=True, on_epoch=True,
prog_bar=True)
return loss
def validation_step(self, batch, batch_idx, val=True):
x, y = batch
y = y.flatten().long()
# compute loss
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('val_loss', loss, on_step=True, on_epoch=True,
prog_bar=True) # self.log interacts with TensorBoard
return loss
def test_step(self, batch, batch_idx):
x, y = batch
y = y.flatten().long()
# compute loss
logits = self(x)
loss = self.loss_fn(logits, y)
self.log('test_loss', loss, on_step=True, on_epoch=True,
prog_bar=False)
return loss
def training_epoch_end(self, outputs: List[Any]):
outputs: List[torch.Tensor] = [list(d.values())[0] for d in outputs]
sum = torch.zeros(1, dtype=float).to(self.device)
for batch_idx, batch_loss in enumerate(outputs):
sum += batch_loss.to(self.device)
avg_batch_loss = (sum / batch_idx)
self.epoch_train_losses.append({avg_batch_loss[0].item()})
def validation_epoch_end(self, outputs: List[Any]):
sum = torch.zeros(1, dtype=float).to(self.device)
for batch_idx, batch_loss in enumerate(outputs):
sum += batch_loss.to(self.device)
avg_batch_loss = (sum / batch_idx)
self.epoch_val_losses.append({avg_batch_loss[0].item()})
# ---------------------------------------------------------------
# Custom training for evolutionary algorithm
# --------------------------------------------------------------
def custom_training_step(self, verbose=False):
self.network.train()
train_loader = self.train_dl
train_loss: float = 0
for idx, batch in enumerate(train_loader):
self.optimizer.zero_grad() # clears paramter gradient buffers
inputs, targets = batch
# transfer batch data to computation device
inputs, targets = [
tensor.to(self.device) for tensor in [inputs, targets]]
targets = targets.long() # converts dtype to Long
output = self.network(inputs)
loss = self.loss_fn(output, targets.flatten())
loss.backward() # back propagation
self.optimizer.step() # update model weights
train_loss += loss.data.item()
if (idx % 10 == 0) and verbose:
print(f"epoch {self.epoch+1}/{self.n_epochs}, "
+ f"batch {idx}.")
train_loss = train_loss / len(train_loader)
return train_loss
def custom_validation_step(self):
val_loader = self.test_dl
val_loss = 0.0
self.network.eval()
for batch in val_loader:
inputs, targets = batch
inputs, targets = [tensor.to(self.device) for tensor in batch]
targets = targets.long() # converts dtype to Long
output = self.network(inputs)
loss = self.loss_fn(output, targets.flatten())
val_loss += loss.data.item()
val_loss = val_loss / len(val_loader)
return val_loss
def custom_train(self, n_epochs, plot=True, verbose=False, plot_train=False):
train_loader = self.train_dl
val_loader = self.test_dl
device=self.device
self.network.to(self.device)
train_losses, val_losses = [], []
best_val_loss = np.infty
best_val_epoch = 0
early_stopping_buffer = 10
epoch = 0
best_params = None
for epoch in range(n_epochs):
# Training
train_loss = self.custom_training_step()
train_losses.append(train_loss)
# Validation
val_loss = self.custom_validation_step()
val_losses.append(val_loss)
if val_loss < best_val_loss:
best_params = self.network.parameters()
best_val_loss = val_loss
best_val_epoch = epoch
# If validation loss fails to decrease for some number of epochs
# end training
if np.abs(epoch - best_val_epoch) > early_stopping_buffer:
break
print(f"Epoch: {epoch}, Training Loss: {train_loss:.3f}, "
+f"Validation loss: {val_loss:.3f}")
#self.network.parameters = best_params
self.best_val_loss = best_val_loss
self.best_val_epoch = best_val_epoch
if plot:
skip_frames = 3
fig, ax = plt.subplots()
fig.tight_layout()
if plot_train:
ax.plot(np.arange(epoch + 1)[skip_frames:],
train_losses[skip_frames:], '-', label="training set")
ax.plot(np.arange(epoch + 1)[skip_frames:],
val_losses[skip_frames:], '-', label="test set")
ax.set(xlabel="Epoch", ylabel="Loss")
ax.legend()
plt.show()
# ----------------------------------
# Helper functions - Use post-training
# ----------------------------------
def predict(self, x: torch.Tensor) -> torch.Tensor:
self.eval()
x.to(self.device)
logits = self.network(x)
preds = torch.argmax(input = logits, dim=1)
return preds
def accuracy(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
accuracy = pl.metrics.functional.accuracy(pred, target)
return accuracy
def f1(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
pred, target = [t.flatten() for t in [pred, target]]
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
f1 = pl.metrics.functional.f1(
preds = pred, target = target, num_classes = 3, multilabel = True)
return f1
def multiclass_aucroc(self, pred: torch.Tensor, target: torch.Tensor):
self.eval()
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred, target = [t.to(self.device) for t in [pred, target]]
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
tensors = [torch.Tensor(t).to(self.device) for t in [pred, target]]
pred, target = tensors
else:
raise ValueError("The types of `pred` and `target` must match. "
+ "These can be np.ndarrays or torch.Tensors.")
auc_roc = pl.metrics.functional.classification.multiclass_auroc(
pred = pred, target = target)
return auc_roc
def plot_losses(self, plot_train=True):
skip_frames = 1
fig, ax = plt.subplots()
fig.tight_layout()
n_epochs = len(self.epoch_val_losses)
self.epoch_train_losses = [s.pop() for s in self.epoch_train_losses]
self.epoch_val_losses = [s.pop() for s in self.epoch_val_losses]
if plot_train:
n_epochs = len(self.epoch_train_losses)
ax.plot(np.arange(n_epochs)[skip_frames:],
self.epoch_train_losses[skip_frames:], label="train")
ax.plot(np.arange(n_epochs)[skip_frames:],
self.epoch_val_losses[1:][skip_frames:], label="val")
ax.set(xlabel="Epoch", ylabel="Loss")
ax.legend()
plt.show()
|
[
"pytorch_lightning.metrics.functional.classification.multiclass_auroc",
"matplotlib.pyplot.show",
"numpy.abs",
"torch.argmax",
"pytorch_lightning.metrics.functional.f1",
"pytorch_lightning.metrics.functional.accuracy",
"torch.nn.NLLLoss",
"torch.Tensor",
"numpy.arange",
"torch.zeros",
"matplotlib.pyplot.subplots"
] |
[((917, 929), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (927, 929), True, 'import torch.nn as nn\n'), ((7044, 7077), 'torch.argmax', 'torch.argmax', ([], {'input': 'logits', 'dim': '(1)'}), '(input=logits, dim=1)\n', (7056, 7077), False, 'import torch\n'), ((7707, 7751), 'pytorch_lightning.metrics.functional.accuracy', 'pl.metrics.functional.accuracy', (['pred', 'target'], {}), '(pred, target)\n', (7737, 7751), True, 'import pytorch_lightning as pl\n'), ((8430, 8517), 'pytorch_lightning.metrics.functional.f1', 'pl.metrics.functional.f1', ([], {'preds': 'pred', 'target': 'target', 'num_classes': '(3)', 'multilabel': '(True)'}), '(preds=pred, target=target, num_classes=3,\n multilabel=True)\n', (8454, 8517), True, 'import pytorch_lightning as pl\n'), ((9174, 9253), 'pytorch_lightning.metrics.functional.classification.multiclass_auroc', 'pl.metrics.functional.classification.multiclass_auroc', ([], {'pred': 'pred', 'target': 'target'}), '(pred=pred, target=target)\n', (9227, 9253), True, 'import pytorch_lightning as pl\n'), ((9381, 9395), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9393, 9395), True, 'import matplotlib.pyplot as plt\n'), ((10021, 10031), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10029, 10031), True, 'import matplotlib.pyplot as plt\n'), ((6325, 6339), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6337, 6339), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6756), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6754, 6756), True, 'import matplotlib.pyplot as plt\n'), ((2565, 2592), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'float'}), '(1, dtype=float)\n', (2576, 2592), False, 'import torch\n'), ((2893, 2920), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'float'}), '(1, dtype=float)\n', (2904, 2920), False, 'import torch\n'), ((5902, 5932), 'numpy.abs', 'np.abs', (['(epoch - best_val_epoch)'], {}), '(epoch - best_val_epoch)\n', (5908, 5932), True, 'import numpy as np\n'), ((9841, 9860), 'numpy.arange', 'np.arange', (['n_epochs'], {}), '(n_epochs)\n', (9850, 9860), True, 'import numpy as np\n'), ((6554, 6574), 'numpy.arange', 'np.arange', (['(epoch + 1)'], {}), '(epoch + 1)\n', (6563, 6574), True, 'import numpy as np\n'), ((9715, 9734), 'numpy.arange', 'np.arange', (['n_epochs'], {}), '(n_epochs)\n', (9724, 9734), True, 'import numpy as np\n'), ((6422, 6442), 'numpy.arange', 'np.arange', (['(epoch + 1)'], {}), '(epoch + 1)\n', (6431, 6442), True, 'import numpy as np\n'), ((7440, 7455), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (7452, 7455), False, 'import torch\n'), ((8170, 8185), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (8182, 8185), False, 'import torch\n'), ((8909, 8924), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (8921, 8924), False, 'import torch\n')]
|
import os
import pickle
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
def add_capitals(dico):
return {**dico, **{key[0].capitalize() + key[1:]: item for key, item in dico.items()}}
COLORS = {
'causal': 'blue',
'anti': 'red',
'joint': 'green',
'causal_average': 'darkblue',
'anti_average': 'darkred',
'joint_average': 'darkgreen',
'MAP_uniform': 'yellow',
'MAP_source': 'gold',
# guess
'CausalGuessX': 'skyblue',
'CausalGuessY': 'darkcyan',
'AntiGuessX': 'salmon',
'AntiGuessY': 'chocolate',
}
MARKERS = {key: 'o' for key in COLORS}
MARKERS['causal'] = '^'
MARKERS['anti'] = 'v'
COLORS = add_capitals(COLORS)
MARKERS = add_capitals(MARKERS)
def value_at_step(trajectory, nsteps=1000):
"""Return the KL and the integral KL up to nsteps."""
steps = trajectory['steps']
index = np.searchsorted(steps, nsteps) - 1
ans = {}
# ans['end_step'] = steps[index]
for key, item in trajectory.items():
if key.startswith('kl_'):
ans[key[3:]] = item[index].mean()
# ans['endkl_' + key[3:]] = item[index].mean()
# ans['intkl_' + key[3:]] = item[:index].mean()
return ans
def get_best(results, nsteps):
"""Store per model each parameter and kl values
then for each model return the argmax parameters and curves
for kl and integral kl
"""
by_model = {}
# dictionary where each key is a model,
# and each value is a list of this model's hyperparameter
# and outcome at step nsteps
for exp in results:
trajectory = exp['trajectory']
for model, metric in value_at_step(trajectory, nsteps).items():
if model not in by_model:
by_model[model] = []
toadd = {
'hyperparameters': exp['hyperparameters'],
**exp['hyperparameters'],
'value': metric,
'kl': trajectory['kl_' + model],
'steps': trajectory['steps']
}
if 'scoredist_' + model in trajectory:
toadd['scoredist'] = trajectory['scoredist_' + model]
by_model[model] += [toadd]
# select only the best hyperparameters for this model.
for model, metrics in by_model.items():
dalist = sorted(metrics, key=lambda x: x['value'])
# Ensure that the optimal configuration does not diverge as optimization goes on.
for duh in dalist:
if duh['kl'][0].mean() * 2 > duh['kl'][-1].mean():
break
by_model[model] = duh
# print the outcome
for model, item in by_model.items():
if 'MAP' in model:
print(model, ('\t n0={n0:.0f},'
'\t kl={value:.3f}').format(**item))
else:
print(model, ('\t alpha={scheduler_exponent},'
'\t lr={lr:.1e},'
'\t kl={value:.3f}').format(**item))
return by_model
def curve_plot(bestof, nsteps, figsize, logscale=False, endstep=400, confidence=(5, 95)):
"""Draw mean trajectory plot with percentiles"""
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
for model, item in sorted(bestof.items()):
xx = item['steps']
values = item['kl']
# truncate plot for k-invariance
end_id = np.searchsorted(xx, endstep) + 1
xx = xx[:end_id]
values = values[:end_id]
# plot mean and percentile statistics
ax.plot(xx, values.mean(axis=1), label=model,
marker=MARKERS[model], markevery=len(xx) // 6, markeredgewidth=0,
color=COLORS[model], alpha=.9)
ax.fill_between(
xx,
np.percentile(values, confidence[0], axis=1),
np.percentile(values, confidence[1], axis=1),
alpha=.4,
color=COLORS[model]
)
ax.axvline(nsteps, linestyle='--', color='black')
ax.grid(True)
if logscale:
ax.set_yscale('log')
ax.set_ylabel(r'$\mathrm{KL}(\mathbf{p}^*, \mathbf{p}^{(t)})$')
ax.set_xlabel('number of samples t')
ax.legend()
return fig, ax
def scatter_plot(bestof, nsteps, figsize, logscale=False):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
alldist = []
allkl = []
for model, item in sorted(bestof.items()):
if 'scoredist' not in item:
continue
index = min(np.searchsorted(item['steps'], nsteps), len(item['steps']) - 1)
initial_distances = item['scoredist'][0]
end_kl = item['kl'][index]
ax.scatter(
initial_distances,
end_kl,
alpha=.3,
color=COLORS[model],
marker=MARKERS[model],
linewidth=0,
label=model if False else None
)
alldist += list(initial_distances)
allkl += list(end_kl)
# linear regression
slope, intercept, rval, pval, _ = scipy.stats.linregress(alldist, allkl)
x_vals = np.array(ax.get_xlim())
y_vals = intercept + slope * x_vals
ax.plot(
x_vals, y_vals, '--', color='black', alpha=.8,
label=f'y=ax+b, r2={rval ** 2:.2f}'
f',\na={slope:.1e}, b={intercept:.2f}'
)
# look
ax.legend()
ax.grid(True)
if logscale:
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(min(alldist), max(alldist))
else:
ax.ticklabel_format(axis='both', style='sci', scilimits=(0, 0), useMathText=True)
ax.set_ylabel(r'$\mathrm{KL}(\mathbf{p}^*, \mathbf{p}^{(t)}); T=$' + str(nsteps))
ax.set_xlabel(r'$||\theta^{(0)} - \theta^* ||^2$')
return fig, ax
def two_plots(results, nsteps, plotname, dirname, verbose=False, figsize=(6, 3)):
print(dirname, plotname)
bestof = get_best(results, nsteps)
# remove the models I don't want to compare
# eg remove SGD, MAP. Keep ASGD and rename them to remove average.
selected = {
key[0].capitalize() + key[1:-len('_average')].replace('A', 'X').replace('B', 'Y'): item
for key, item in bestof.items()
if key.endswith('_average')}
for key in ['MAP_uniform', 'MAP_source']:
# selected[key] = bestof[key]
pass
if dirname.startswith('guess'):
selected.pop('Joint', None)
curves, ax1 = curve_plot(selected, nsteps, figsize, logscale=False)
# initstring = 'denseinit' if results[0]["is_init_dense"] else 'sparseinit'
# curves.suptitle(f'Average KL tuned for {nsteps} samples with {confidence} percentiles, '
# f'{initstring}, k={results[0]["k"]}')
scatter, ax2 = scatter_plot(selected, nsteps, figsize,
logscale=(dirname == 'guess_sparseinit'))
if verbose:
for ax in [ax1, ax2]:
info = str(next(iter(selected.values()))['hyperparameters'])
txt = ax.text(0.5, 1, info, ha='center', va='top',
wrap=True, transform=ax.transAxes,
# bbox=dict(boxstyle='square')
)
txt._get_wrap_line_width = lambda: 400. # wrap to 600 screen pixels
# small adjustments for intervention guessing
if dirname.startswith('guess'):
curves.axes[0].set_ylim(0, 1.5)
for fig in [curves, scatter]:
fig.axes[0].set_xlabel('')
fig.axes[0].set_ylabel('')
for style, fig in {'curves': curves, 'scatter': scatter}.items():
for figpath in [
os.path.join('plots', dirname, f'{style}_{plotname}.pdf')]:
print("Saving ", figpath)
os.makedirs(os.path.dirname(figpath), exist_ok=True)
# os.path.join('plots/sweep/png', f'{style}_{plotname}.png')]:
fig.savefig(figpath, bbox_inches='tight')
plt.close(curves)
plt.close(scatter)
print()
def plot_marginal_likelihoods(results, intervention, k, dirname):
exp = results[0]
values = {}
for whom in ['A', 'B']:
values[whom] = exp['loglikelihood' + whom][:100].cumsum(0)
xx = np.arange(1, values[whom].shape[0] + 1)
values[whom] /= xx[:, np.newaxis]
if intervention == 'cause':
right, wrong = 'A', 'B'
else:
right, wrong = 'B', 'A'
plt.plot(values[wrong] - values[right], alpha=.2)
plt.hlines(0, 0, values['B'].shape[0])
plt.grid()
plt.ylim(-1, 1)
figpath = os.path.join('plots', dirname, 'guessing', f'guess_{intervention}_k={k}.pdf')
os.makedirs(os.path.dirname(figpath), exist_ok=True)
plt.savefig(figpath, bbox_inches='tight')
plt.close()
def merge_results(results1, results2, bs=5):
"""Combine results from intervention on cause and effect.
Also report statistics about pooled results.
Pooled records the average over 10 cause and 10 effect interventions
the goal is to have tighter percentile curves
which are representative of the algorithm's performance
"""
combined = []
pooled = []
for e1, e2 in zip(results1, results2):
h1, h2 = e1['hyperparameters'], e2['hyperparameters']
assert h1['lr'] == h2['lr']
t1, t2 = e1['trajectory'], e2['trajectory']
combined_trajs = {'steps': t1['steps']}
pooled_trajs = combined_trajs.copy()
for key in t1.keys():
if key.startswith(('scoredist', 'kl')):
combined_trajs[key] = np.concatenate((t1[key], t2[key]), axis=1)
meantraj = (t1[key] + t2[key]) / 2
pooled_trajs[key] = np.array([
meantraj[:, bs * i:bs * (i + 1)].mean(axis=1)
for i in range(meantraj.shape[1] // bs)
]).T
combined += [{'hyperparameters': h1, 'trajectory': combined_trajs}]
pooled += [{'hyperparameters': h2, 'trajectory': pooled_trajs}]
return combined, pooled
def all_plot(guess=False, dense=True,
input_dir='categorical_results', output_dir='camera_ready',
figsize=(3.6, 2.2)):
basefile = '_'.join(['guess' if guess else 'sweep2',
'denseinit' if dense else 'sparseinit'])
print(basefile, '\n---------------------')
prior_string = 'dense' if dense else 'sparse'
for k in [20]: # [10, 20, 50]:
# Optimize hyperparameters for nsteps such that curves are k-invariant
nsteps = k ** 2 // 4
allresults = defaultdict(list)
for intervention in ['cause', 'effect']:
# 'singlecond', 'gmechanism', 'independent', 'geometric', 'weightedgeo']:
plotname = f'{prior_string}_{intervention}_k={k}'
file = f'{basefile}_{intervention}_k={k}.pkl'
filepath = os.path.join(input_dir, file)
print(os.path.abspath(filepath))
if os.path.isfile(filepath):
with open(filepath, 'rb') as fin:
results = pickle.load(fin)
print(1)
two_plots(results, nsteps,
plotname=plotname,
dirname=output_dir,
figsize=figsize)
allresults[intervention] = results
# if guess:
# plot_marginal_likelihoods(results, intervention, k, basefile)
# if not guess and 'cause' in allresults and 'effect' in allresults:
# combined, pooled = merge_results(allresults['cause'], allresults['effect'])
# if len(combined) > 0:
# for key, item in {'combined': combined, 'pooled': pooled}.items():
# two_plots(item, nsteps,
# plotname=f'{prior_string}_{key}_k={k}',
# dirname=output_dir,
# figsize=figsize)
if __name__ == '__main__':
np.set_printoptions(precision=2)
matplotlib.use('pgf')
matplotlib.rcParams['mathtext.fontset'] = 'cm'
matplotlib.rcParams['pdf.fonttype'] = 42
# all_plot(guess=True, dense=True)
# all_plot(guess=True, dense=False)
all_plot(guess=False, dense=True)
all_plot(guess=False, dense=False)
|
[
"collections.defaultdict",
"os.path.isfile",
"pickle.load",
"numpy.arange",
"os.path.join",
"matplotlib.pyplot.hlines",
"os.path.abspath",
"numpy.set_printoptions",
"matplotlib.pyplot.close",
"os.path.dirname",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylim",
"numpy.percentile",
"matplotlib.use",
"matplotlib.pyplot.grid",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.searchsorted",
"matplotlib.pyplot.savefig"
] |
[((3201, 3248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (3213, 3248), True, 'import matplotlib.pyplot as plt\n'), ((4291, 4338), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (4303, 4338), True, 'import matplotlib.pyplot as plt\n'), ((7865, 7882), 'matplotlib.pyplot.close', 'plt.close', (['curves'], {}), '(curves)\n', (7874, 7882), True, 'import matplotlib.pyplot as plt\n'), ((7887, 7905), 'matplotlib.pyplot.close', 'plt.close', (['scatter'], {}), '(scatter)\n', (7896, 7905), True, 'import matplotlib.pyplot as plt\n'), ((8325, 8375), 'matplotlib.pyplot.plot', 'plt.plot', (['(values[wrong] - values[right])'], {'alpha': '(0.2)'}), '(values[wrong] - values[right], alpha=0.2)\n', (8333, 8375), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8417), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0)', '(0)', "values['B'].shape[0]"], {}), "(0, 0, values['B'].shape[0])\n", (8389, 8417), True, 'import matplotlib.pyplot as plt\n'), ((8422, 8432), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8430, 8432), True, 'import matplotlib.pyplot as plt\n'), ((8437, 8452), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (8445, 8452), True, 'import matplotlib.pyplot as plt\n'), ((8467, 8544), 'os.path.join', 'os.path.join', (['"""plots"""', 'dirname', '"""guessing"""', 'f"""guess_{intervention}_k={k}.pdf"""'], {}), "('plots', dirname, 'guessing', f'guess_{intervention}_k={k}.pdf')\n", (8479, 8544), False, 'import os\n'), ((8606, 8647), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figpath'], {'bbox_inches': '"""tight"""'}), "(figpath, bbox_inches='tight')\n", (8617, 8647), True, 'import matplotlib.pyplot as plt\n'), ((8652, 8663), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8661, 8663), True, 'import matplotlib.pyplot as plt\n'), ((11899, 11931), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (11918, 11931), True, 'import numpy as np\n'), ((11936, 11957), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (11950, 11957), False, 'import matplotlib\n'), ((928, 958), 'numpy.searchsorted', 'np.searchsorted', (['steps', 'nsteps'], {}), '(steps, nsteps)\n', (943, 958), True, 'import numpy as np\n'), ((8131, 8170), 'numpy.arange', 'np.arange', (['(1)', '(values[whom].shape[0] + 1)'], {}), '(1, values[whom].shape[0] + 1)\n', (8140, 8170), True, 'import numpy as np\n'), ((8561, 8585), 'os.path.dirname', 'os.path.dirname', (['figpath'], {}), '(figpath)\n', (8576, 8585), False, 'import os\n'), ((10452, 10469), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10463, 10469), False, 'from collections import defaultdict\n'), ((3410, 3438), 'numpy.searchsorted', 'np.searchsorted', (['xx', 'endstep'], {}), '(xx, endstep)\n', (3425, 3438), True, 'import numpy as np\n'), ((3784, 3828), 'numpy.percentile', 'np.percentile', (['values', 'confidence[0]'], {'axis': '(1)'}), '(values, confidence[0], axis=1)\n', (3797, 3828), True, 'import numpy as np\n'), ((3842, 3886), 'numpy.percentile', 'np.percentile', (['values', 'confidence[1]'], {'axis': '(1)'}), '(values, confidence[1], axis=1)\n', (3855, 3886), True, 'import numpy as np\n'), ((4495, 4533), 'numpy.searchsorted', 'np.searchsorted', (["item['steps']", 'nsteps'], {}), "(item['steps'], nsteps)\n", (4510, 4533), True, 'import numpy as np\n'), ((7569, 7626), 'os.path.join', 'os.path.join', (['"""plots"""', 'dirname', 'f"""{style}_{plotname}.pdf"""'], {}), "('plots', dirname, f'{style}_{plotname}.pdf')\n", (7581, 7626), False, 'import os\n'), ((10748, 10777), 'os.path.join', 'os.path.join', (['input_dir', 'file'], {}), '(input_dir, file)\n', (10760, 10777), False, 'import os\n'), ((10838, 10862), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (10852, 10862), False, 'import os\n'), ((7691, 7715), 'os.path.dirname', 'os.path.dirname', (['figpath'], {}), '(figpath)\n', (7706, 7715), False, 'import os\n'), ((9454, 9496), 'numpy.concatenate', 'np.concatenate', (['(t1[key], t2[key])'], {'axis': '(1)'}), '((t1[key], t2[key]), axis=1)\n', (9468, 9496), True, 'import numpy as np\n'), ((10796, 10821), 'os.path.abspath', 'os.path.abspath', (['filepath'], {}), '(filepath)\n', (10811, 10821), False, 'import os\n'), ((10944, 10960), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (10955, 10960), False, 'import pickle\n')]
|
# -*- coding: utf-8 -*-
"""
CW, FGSM, and IFGSM Attack CNN
"""
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.autograd import Variable
import copy
import math
import numpy as np
import os
import argparse
import torch.utils.data as data
#from utils import *
import numpy.matlib
import matplotlib.pyplot as plt
import pickle
# import cPickle
from collections import OrderedDict
import models.cifar as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Fool EnResNet')
ap = parser.add_argument
ap('--method', help='Attack Method', type=str, default="ifgsm") # fgsm, ifgsm, cwl2
ap('--epsilon', help='Attack Strength', type=float, default=0.031) # May 2
ap('--num-ensembles', '--ne', default=2, type=int, metavar='N')
ap('--noise-coef', '--nc', default=0.1, type=float, metavar='W', help='forward noise (default: 0.0)')
ap('--noise-coef-eval', '--nce', default=0.0, type=float, metavar='W', help='forward noise (default: 0.)')
ap('--arch', '-a', metavar='ARCH', default='resnet20',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
ap('--depth', type=int, default=29, help='Model depth.')
ap('--block-name', type=str, default='BasicBlock',
help='the building block for Resnet and Preresnet: BasicBlock, Bottleneck (default: Basicblock for cifar10/cifar100)')
ap('--cardinality', type=int, default=8, help='Model cardinality (group).')
ap('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
ap('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')
ap('--compressionRate', type=int, default=2, help='Compression Rate (theta) for DenseNet.')
ap('--feature_vec', default='x', type=str)
ap('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
ap('-d', '--dataset', default='cifar10', type=str)
ap('--eta', default=1.0, type=float, help='eta in HOResNet')
ap('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
opt = parser.parse_args()
def conv3x3(in_planes, out_planes, stride=1):
"""
3x3 convolution with padding
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
if __name__ == '__main__':
"""
Load the trained DNN, and attack the DNN, finally save the adversarial images
"""
# Model
if opt.dataset == 'cifar10':
dataloader = dset.CIFAR10
num_classes = 10
else:
dataloader = dset.CIFAR100
num_classes = 100
print("==> creating model '{}'".format(opt.arch))
if opt.arch.startswith('resnext'):
net = models.__dict__[opt.arch](
cardinality=opt.cardinality,
num_classes=num_classes,
depth=opt.depth,
widen_factor=opt.widen_factor,
dropRate=opt.drop,
)
elif opt.arch.startswith('densenet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
growthRate=opt.growthRate,
compressionRate=opt.compressionRate,
dropRate=opt.drop,
)
elif opt.arch.startswith('wrn'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
widen_factor=opt.widen_factor,
dropRate=opt.drop,
)
elif opt.arch.startswith('resnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
block_name=opt.block_name,
)
elif opt.arch.startswith('preresnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
block_name=opt.block_name,
)
elif opt.arch.startswith('horesnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
eta=opt.eta,
block_name=opt.block_name,
feature_vec=opt.feature_vec
)
elif opt.arch.startswith('hopreresnet'):
net = models.__dict__[opt.arch](
num_classes=num_classes,
depth=opt.depth,
eta=opt.eta,
block_name=opt.block_name,
feature_vec=opt.feature_vec
)
else:
net = models.__dict__[opt.arch](num_classes=num_classes)
# Load the model
print('==> Resuming from checkpoint..')
assert os.path.isfile(opt.checkpoint), 'Error: no checkpoint directory found!'
opt.checkpoint_dir = os.path.dirname(opt.checkpoint)
checkpoint = torch.load(opt.checkpoint)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
net = net.cuda()
epsilon = opt.epsilon
attack_type = opt.method
# Load the original test data
print('==> Load the clean image')
root = './data'
download = False
kwargs = {'num_workers':1, 'pin_memory':True}
batchsize_test = 1000
if attack_type == 'cw':
batchsize_test = 1
print('Batch size of the test set: ', batchsize_test)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_set = dataloader(root='./data', train=False, download=False, transform=transform_test)
test_loader = data.DataLoader(test_set, batch_size=batchsize_test, shuffle=False, num_workers=1, pin_memory=True)
criterion = nn.CrossEntropyLoss()
#--------------------------------------------------------------------------
# Testing
# images: the original images
# labels: labels of the original images
# images_adv: adversarial image
# labels_pred: the predicted labels of the adversarial images
# noise: the added noise
#--------------------------------------------------------------------------
images, labels, images_adv, labels_pred, noise = [], [], [], [], []
total_fooled = 0; total_correct_classified = 0
if attack_type == 'fgsm':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 2:
x_Test = x1.numpy()
#print x_Test.min(), x_Test.max()
#x_Test = ((x_Test - x_Test.min())/(x_Test.max() - x_Test.min()) - 0.5)*2
#x_Test = (x_Test - x_Test.min() )/(x_Test.max() - x_Test.min())
y_Test = y1_true.numpy()
#x = Variable(torch.cuda.FloatTensor(x_Test.reshape(1, 1, 28, 28)), requires_grad=True)
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
loss = criterion(pred_tmp, y)
# Attack
net.zero_grad()
if x.grad is not None:
x.grad.data.fill_(0)
loss.backward()
x_val_min = 0.0
x_val_max = 1.0
x.grad.sign_()
x_adversarial = x + epsilon*x.grad
x_adversarial = torch.clamp(x_adversarial, x_val_min, x_val_max)
x_adversarial = x_adversarial.data
# Classify the perturbed data
x_adversarial_tmp = Variable(x_adversarial)
pred_tmp = net(x_adversarial_tmp)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred_adversarial[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
for i in range(len(x_Test)):
# Save the perturbed data
images.append(x_Test[i, :, :, :]) # Original image
images_adv.append(x_adversarial.cpu().numpy()[i, :, :, :]) # Perturbed image
noise.append(x_adversarial.cpu().numpy()[i, :, :, :]-x_Test[i, :, :, :]) # Noise
labels.append(y_Test[i])
labels_pred.append(y_pred_adversarial[i])
elif attack_type == 'ifgsm':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 100:
#x_Test = (x_Test - x_Test.min())/(x_Test.max()-x_Test.min())
x_Test = ((x1 - x1.min())/(x1.max() - x1.min()) - 0.5)*2
x_Test = x_Test.numpy()
y_Test = y1_true.numpy()
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
loss = criterion(pred_tmp, y)
# Attack
alpha = epsilon
#iteration = 10
iteration = 20
x_val_min = 0.; x_val_max = 1.
epsilon1 = 0.031
# Helper function
def where(cond, x, y):
"""
code from :
https://discuss.pytorch.org/t/how-can-i-do-the-operation-the-same-as-np-where/1329/8
"""
cond = cond.float()
return (cond*x) + ((1-cond)*y)
# Random perturbation
#x = x + torch.zeros_like(x).uniform_(-epsilon1, epsilon1) # May 2
x_adv = Variable(x.data, requires_grad=True)
for i in range(iteration):
h_adv = net(x_adv)
loss = criterion(h_adv, y)
net.zero_grad()
if x_adv.grad is not None:
x_adv.grad.data.fill_(0)
loss.backward()
x_adv.grad.sign_()
x_adv = x_adv + alpha*x_adv.grad
x_adv = where(x_adv > x+epsilon1, x+epsilon1, x_adv)
x_adv = where(x_adv < x-epsilon1, x-epsilon1, x_adv)
x_adv = torch.clamp(x_adv, x_val_min, x_val_max)
x_adv = Variable(x_adv.data, requires_grad=True)
x_adversarial = x_adv.data
x_adversarial_tmp = Variable(x_adversarial)
pred_tmp = net(x_adversarial_tmp)
loss = criterion(pred_tmp, y)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
#if y_Test == y_pred_adversarial:
# total_correct_classified += 1
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred_adversarial[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
for i in range(len(x_Test)):
# Save the perturbed data
images.append(x_Test[i, :, :, :]) # Original image
images_adv.append(x_adversarial.cpu().numpy()[i, :, :, :]) # Perturbed image
noise.append(x_adversarial.cpu().numpy()[i, :, :, :]-x_Test[i, :, :, :]) # Noise
labels.append(y_Test[i])
labels_pred.append(y_pred_adversarial[i])
elif attack_type == 'cw':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 10:
if batch_idx - int(int(batch_idx/50.)*50) == 0:
print(batch_idx)
x_Test = x1.numpy()
y_Test = y1_true.numpy()
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
loss = criterion(pred_tmp, y)
y_pred = np.argmax(pred_tmp.cpu().data.numpy())
# Attack
cwl2_learning_rate = 0.0006#0.01
max_iter = 50
lambdaf = 10.0
kappa = 0.0
# The input image we will perturb
input = torch.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32))
input_var = Variable(input)
# w is the variable we will optimize over. We will also save the best w and loss
w = Variable(input, requires_grad=True)
best_w = input.clone()
best_loss = float('inf')
# Use the Adam optimizer for the minimization
optimizer = optim.Adam([w], lr=cwl2_learning_rate)
# Get the top2 predictions of the model. Get the argmaxes for the objective function
probs = net(input_var.cuda())
probs_data = probs.data.cpu()
top1_idx = torch.max(probs_data, 1)[1]
probs_data[0][top1_idx] = -1 # making the previous top1 the lowest so we get the top2
top2_idx = torch.max(probs_data, 1)[1]
# Set the argmax (but maybe argmax will just equal top2_idx always?)
argmax = top1_idx[0]
if argmax == y_pred:
argmax = top2_idx[0]
# The iteration
for i in range(0, max_iter):
if i > 0:
w.grad.data.fill_(0)
# Zero grad (Only one line needed actually)
net.zero_grad()
optimizer.zero_grad()
# Compute L2 Loss
loss = torch.pow(w - input_var, 2).sum()
# w variable
w_data = w.data
w_in = Variable(w_data, requires_grad=True)
# Compute output
output = net.forward(w_in.cuda()) #second argument is unneeded
# Calculating the (hinge) loss
loss += lambdaf * torch.clamp( output[0][y_pred] - output[0][argmax] + kappa, min=0).cpu()
# Backprop the loss
loss.backward()
# Work on w (Don't think we need this)
w.grad.data.add_(w_in.grad.data)
# Optimizer step
optimizer.step()
# Save the best w and loss
total_loss = loss.data.cpu()[0]
if total_loss < best_loss:
best_loss = total_loss
##best_w = torch.clamp(best_w, 0., 1.) # BW Added Aug 26
best_w = w.data.clone()
# Set final adversarial image as the best-found w
x_adversarial = best_w
##x_adversarial = torch.clamp(x_adversarial, 0., 1.) # BW Added Aug 26
#--------------- Add to introduce the noise
noise_tmp = x_adversarial.cpu().numpy() - x_Test
x_adversarial = x_Test + epsilon * noise_tmp
#---------------
# Classify the perturbed data
x_adversarial_tmp = Variable(torch.cuda.FloatTensor(x_adversarial), requires_grad=False) #Variable(x_adversarial).cuda()
pred_tmp = net(x_adversarial_tmp)
y_pred_adversarial = np.argmax(pred_tmp.cpu().data.numpy()) # axis=1
if y_Test == y_pred_adversarial:
total_correct_classified += 1
# Save the perturbed data
images.append(x_Test) # Original image
images_adv.append(x_adversarial) # Perturbed image
noise.append(x_adversarial-x_Test) # Noise
labels.append(y_Test)
labels_pred.append(y_pred_adversarial)
elif attack_type == 'clean':
for batch_idx, (x1, y1_true) in enumerate(test_loader):
#if batch_idx < 2:
x_Test = x1.numpy()
#print x_Test.min(), x_Test.max()
#x_Test = ((x_Test - x_Test.min())/(x_Test.max() - x_Test.min()) - 0.5)*2
#x_Test = (x_Test - x_Test.min() )/(x_Test.max() - x_Test.min())
y_Test = y1_true.numpy()
#x = Variable(torch.cuda.FloatTensor(x_Test.reshape(1, 1, 28, 28)), requires_grad=True)
#x, y = torch.autograd.Variable(torch.cuda.FloatTensor(x_Test), volatile=True), torch.autograd.Variable(torch.cuda.LongTensor(y_Test))
x = Variable(torch.cuda.FloatTensor(x_Test.reshape(batchsize_test, 3, 32, 32)), requires_grad=True)
y = Variable(torch.cuda.LongTensor(y_Test), requires_grad=False)
# Classification before perturbation
pred_tmp = net(x)
y_pred = np.argmax(pred_tmp.cpu().data.numpy(), axis=1)
for i in range(len(x_Test)):
#print y_pred_adversarial
if y_Test[i] == y_pred[i]:
#if y_Test == y_pred_adversarial:
total_correct_classified += 1
else:
ValueError('Unsupported Attack')
print(opt.checkpoint)
print('Number of correctly classified images: ', total_correct_classified)
# Save data
#with open("Adversarial" + attack_type + str(int(10*epsilon)) + ".pkl", "w") as f:
#with open("Adversarial" + attack_type + str(int(100*epsilon)) + ".pkl", "w") as f:
# adv_data_dict = {"images":images_adv, "labels":labels}
# cPickle.dump(adv_data_dict, f)
images = np.array(images).squeeze()
images_adv = np.array(images_adv).squeeze()
noise = np.array(noise).squeeze()
labels = np.array(labels).squeeze()
labels_pred = np.array(labels_pred).squeeze()
print([images.shape, images_adv.shape, noise.shape, labels.shape, labels_pred.shape])
# with open("fooled_EnResNet5_20_PGD_10iters_" + attack_type + str(int(1000*epsilon)) + ".pkl", "w") as f:
# #with open("fooled_EnResNet5_20_PGD_20iters_" + attack_type + str(int(1000*epsilon)) + ".pkl", "w") as f:
# adv_data_dict = {
# "images" : images,
# "images_adversarial" : images_adv,
# "y_trues" : labels,
# "noises" : noise,
# "y_preds_adversarial" : labels_pred
# }
# pickle.dump(adv_data_dict, f)
|
[
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.load",
"torch.nn.Conv2d",
"os.path.dirname",
"torch.nn.CrossEntropyLoss",
"torch._utils._rebuild_tensor",
"torch.cuda.FloatTensor",
"os.path.isfile",
"torch.clamp",
"numpy.array",
"torch.optim.Adam",
"torch.max",
"torch.cuda.LongTensor",
"torch.pow",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] |
[((1203, 1255), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fool EnResNet"""'}), "(description='Fool EnResNet')\n", (1226, 1255), False, 'import argparse\n'), ((3100, 3189), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (3109, 3189), True, 'import torch.nn as nn\n'), ((5708, 5738), 'os.path.isfile', 'os.path.isfile', (['opt.checkpoint'], {}), '(opt.checkpoint)\n', (5722, 5738), False, 'import os\n'), ((5805, 5836), 'os.path.dirname', 'os.path.dirname', (['opt.checkpoint'], {}), '(opt.checkpoint)\n', (5820, 5836), False, 'import os\n'), ((5854, 5880), 'torch.load', 'torch.load', (['opt.checkpoint'], {}), '(opt.checkpoint)\n', (5864, 5880), False, 'import torch\n'), ((6681, 6784), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_set'], {'batch_size': 'batchsize_test', 'shuffle': '(False)', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(test_set, batch_size=batchsize_test, shuffle=False,\n num_workers=1, pin_memory=True)\n', (6696, 6784), True, 'import torch.utils.data as data\n'), ((6802, 6823), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6821, 6823), True, 'import torch.nn as nn\n'), ((262, 329), 'torch._utils._rebuild_tensor', 'torch._utils._rebuild_tensor', (['storage', 'storage_offset', 'size', 'stride'], {}), '(storage, storage_offset, size, stride)\n', (290, 329), False, 'import torch\n'), ((6450, 6471), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6469, 6471), True, 'import torchvision.transforms as transforms\n'), ((6481, 6552), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (6501, 6552), True, 'import torchvision.transforms as transforms\n'), ((8565, 8613), 'torch.clamp', 'torch.clamp', (['x_adversarial', 'x_val_min', 'x_val_max'], {}), '(x_adversarial, x_val_min, x_val_max)\n', (8576, 8613), False, 'import torch\n'), ((8748, 8771), 'torch.autograd.Variable', 'Variable', (['x_adversarial'], {}), '(x_adversarial)\n', (8756, 8771), False, 'from torch.autograd import Variable\n'), ((19211, 19227), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (19219, 19227), True, 'import numpy as np\n'), ((19255, 19275), 'numpy.array', 'np.array', (['images_adv'], {}), '(images_adv)\n', (19263, 19275), True, 'import numpy as np\n'), ((19298, 19313), 'numpy.array', 'np.array', (['noise'], {}), '(noise)\n', (19306, 19313), True, 'import numpy as np\n'), ((19337, 19353), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (19345, 19353), True, 'import numpy as np\n'), ((19382, 19403), 'numpy.array', 'np.array', (['labels_pred'], {}), '(labels_pred)\n', (19390, 19403), True, 'import numpy as np\n'), ((7986, 8015), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (8007, 8015), False, 'import torch\n'), ((11002, 11038), 'torch.autograd.Variable', 'Variable', (['x.data'], {'requires_grad': '(True)'}), '(x.data, requires_grad=True)\n', (11010, 11038), False, 'from torch.autograd import Variable\n'), ((11791, 11814), 'torch.autograd.Variable', 'Variable', (['x_adversarial'], {}), '(x_adversarial)\n', (11799, 11814), False, 'from torch.autograd import Variable\n'), ((10104, 10133), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (10125, 10133), False, 'import torch\n'), ((11584, 11624), 'torch.clamp', 'torch.clamp', (['x_adv', 'x_val_min', 'x_val_max'], {}), '(x_adv, x_val_min, x_val_max)\n', (11595, 11624), False, 'import torch\n'), ((11649, 11689), 'torch.autograd.Variable', 'Variable', (['x_adv.data'], {'requires_grad': '(True)'}), '(x_adv.data, requires_grad=True)\n', (11657, 11689), False, 'from torch.autograd import Variable\n'), ((13795, 13810), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (13803, 13810), False, 'from torch.autograd import Variable\n'), ((13933, 13968), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (13941, 13968), False, 'from torch.autograd import Variable\n'), ((14137, 14175), 'torch.optim.Adam', 'optim.Adam', (['[w]'], {'lr': 'cwl2_learning_rate'}), '([w], lr=cwl2_learning_rate)\n', (14147, 14175), True, 'import torch.optim as optim\n'), ((13228, 13257), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (13249, 13257), False, 'import torch\n'), ((14406, 14430), 'torch.max', 'torch.max', (['probs_data', '(1)'], {}), '(probs_data, 1)\n', (14415, 14430), False, 'import torch\n'), ((14555, 14579), 'torch.max', 'torch.max', (['probs_data', '(1)'], {}), '(probs_data, 1)\n', (14564, 14579), False, 'import torch\n'), ((15289, 15325), 'torch.autograd.Variable', 'Variable', (['w_data'], {'requires_grad': '(True)'}), '(w_data, requires_grad=True)\n', (15297, 15325), False, 'from torch.autograd import Variable\n'), ((16807, 16844), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['x_adversarial'], {}), '(x_adversarial)\n', (16829, 16844), False, 'import torch\n'), ((18261, 18290), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['y_Test'], {}), '(y_Test)\n', (18282, 18290), False, 'import torch\n'), ((15154, 15181), 'torch.pow', 'torch.pow', (['(w - input_var)', '(2)'], {}), '(w - input_var, 2)\n', (15163, 15181), False, 'import torch\n'), ((15553, 15618), 'torch.clamp', 'torch.clamp', (['(output[0][y_pred] - output[0][argmax] + kappa)'], {'min': '(0)'}), '(output[0][y_pred] - output[0][argmax] + kappa, min=0)\n', (15564, 15618), False, 'import torch\n')]
|
# encoding: utf-8
__author__ = "<NAME>"
# Parts of the code have been taken from https://github.com/facebookresearch/fastMRI
import numpy as np
import pytest
import torch
from tests.collections.reconstruction.fastmri.create_temp_data import create_temp_data
# these are really slow - skip by default
SKIP_INTEGRATIONS = True
def create_input(shape):
"""
Create a random input tensor of the given shape.
Args:
shape: The shape of the input tensor.
Returns:
A random input tensor.
"""
x = np.arange(np.product(shape)).reshape(shape)
x = torch.from_numpy(x).float()
return x
@pytest.fixture(scope="session")
def fastmri_mock_dataset(tmp_path_factory):
"""
Create a mock dataset for testing.
Args:
tmp_path_factory: A temporary path factory.
Returns:
A mock dataset.
"""
path = tmp_path_factory.mktemp("fastmri_data")
return create_temp_data(path)
@pytest.fixture
def skip_integration_tests():
"""
Skip integration tests if the environment variable is set.
Returns:
A boolean indicating whether to skip integration tests.
"""
return SKIP_INTEGRATIONS
@pytest.fixture
def knee_split_lens():
"""
The split lengths for the knee dataset.
Returns:
A dictionary with the split lengths.
"""
return {
"multicoil_train": 34742,
"multicoil_val": 7135,
"multicoil_test": 4092,
"singlecoil_train": 34742,
"singlecoil_val": 7135,
"singlecoil_test": 3903,
}
@pytest.fixture
def brain_split_lens():
"""
The split lengths for the brain dataset.
Returns:
A dictionary with the split lengths.
"""
return {
"multicoil_train": 70748,
"multicoil_val": 21842,
"multicoil_test": 8852,
}
|
[
"numpy.product",
"tests.collections.reconstruction.fastmri.create_temp_data.create_temp_data",
"pytest.fixture",
"torch.from_numpy"
] |
[((632, 663), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (646, 663), False, 'import pytest\n'), ((927, 949), 'tests.collections.reconstruction.fastmri.create_temp_data.create_temp_data', 'create_temp_data', (['path'], {}), '(path)\n', (943, 949), False, 'from tests.collections.reconstruction.fastmri.create_temp_data import create_temp_data\n'), ((587, 606), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (603, 606), False, 'import torch\n'), ((545, 562), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (555, 562), True, 'import numpy as np\n')]
|
# TODOS
#--------------------------------------
# imports
import matplotlib.pyplot as plt
from atalaia.atalaia import Atalaia
import numpy as np
import networkx as nx
class Explore:
"""Explore is used for text exploratory tasks.
"""
def __init__(self, language:str):
"""
Parameters
----------
language : str
The language of the corpus
"""
self.language = language
self.atalaia = self.__start_atalaia()
def __start_atalaia(self):
""" Starts an instance of Atalaia"""
return Atalaia(self.language)
def describe(self, corpus:list):
""" Gets the lengths of the sentences present in the corpus, based on the number of tokens.
Returns the lengths, the shortest value and the longest value and the average sentence size."""
# tokenize sentences
tokenized_sentences = [self.atalaia.tokenize(sentence) for sentence in corpus]
# get the lengths
lengths = [len(sentence) for sentence in tokenized_sentences]
# get the percentiles
a = np.array(lengths)
percentiles = (np.percentile(a,0), np.percentile(a,25), np.percentile(a,50), np.percentile(a,75), np.percentile(a,100))
# get shortest, longest and average sentence size using the percentiles values
shortest = percentiles[0] # 0%
longest = percentiles[4] # 100%
average = percentiles[2] # 50%
return lengths, shortest, longest, average, percentiles
def plot_sentences_size_histogram(self, corpus:list, bins = 30, xlabel = 'Number of tokens', ylabel = 'Frequency'):
""" Plots the tokens distribution """
# get sentences sizes
sentences_sizes, shortest, longest, average, percentiles = self.describe(corpus)
# plot
plt.hist(sentences_sizes, bins = bins)
plt.xlabel(xlabel)
plt.xlabel(ylabel)
plt.show()
# return sizes, shortest and longest values and average
return sentences_sizes, shortest, longest, average, percentiles
def plot_sentences_size_boxplot(self, corpus:list):
# get sentences sizes
sentences_sizes, shortest, longest, average, percentiles = self.describe(corpus)
# plot boxplot
plt.boxplot(sentences_sizes)
plt.show()
# return sizes, shortest and longest values and average
return sentences_sizes, shortest, longest, average, percentiles
def plot_representative_tokens(self, corpus:list, percentage=0.3):
#create corpus
corpus = self.atalaia.create_corpus(corpus)
# let's lowercase everything first
texts_lower = self.atalaia.lower_remove_white(corpus)
# plot
token_data = self.atalaia.representative_tokens(percentage,
texts_lower,
reverse=False)
token_data = token_data.items()
token_data = list(token_data)[:10]
tokens, counts = zip(*token_data)
# plot
plt.figure(figsize=(20,10))
plt.bar(tokens,
counts,
color='b')
plt.xlabel('Tokens');
plt.ylabel('Counts');
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.bar",
"atalaia.atalaia.Atalaia",
"numpy.percentile",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((585, 607), 'atalaia.atalaia.Atalaia', 'Atalaia', (['self.language'], {}), '(self.language)\n', (592, 607), False, 'from atalaia.atalaia import Atalaia\n'), ((1116, 1133), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (1124, 1133), True, 'import numpy as np\n'), ((1854, 1890), 'matplotlib.pyplot.hist', 'plt.hist', (['sentences_sizes'], {'bins': 'bins'}), '(sentences_sizes, bins=bins)\n', (1862, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1901, 1919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1911, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1946), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['ylabel'], {}), '(ylabel)\n', (1938, 1946), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1963, 1965), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2340), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['sentences_sizes'], {}), '(sentences_sizes)\n', (2323, 2340), True, 'import matplotlib.pyplot as plt\n'), ((2349, 2359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2357, 2359), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (3155, 3173), True, 'import matplotlib.pyplot as plt\n'), ((3181, 3215), 'matplotlib.pyplot.bar', 'plt.bar', (['tokens', 'counts'], {'color': '"""b"""'}), "(tokens, counts, color='b')\n", (3188, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3278), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tokens"""'], {}), "('Tokens')\n", (3268, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3308), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (3298, 3308), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1176), 'numpy.percentile', 'np.percentile', (['a', '(0)'], {}), '(a, 0)\n', (1170, 1176), True, 'import numpy as np\n'), ((1177, 1197), 'numpy.percentile', 'np.percentile', (['a', '(25)'], {}), '(a, 25)\n', (1190, 1197), True, 'import numpy as np\n'), ((1198, 1218), 'numpy.percentile', 'np.percentile', (['a', '(50)'], {}), '(a, 50)\n', (1211, 1218), True, 'import numpy as np\n'), ((1219, 1239), 'numpy.percentile', 'np.percentile', (['a', '(75)'], {}), '(a, 75)\n', (1232, 1239), True, 'import numpy as np\n'), ((1240, 1261), 'numpy.percentile', 'np.percentile', (['a', '(100)'], {}), '(a, 100)\n', (1253, 1261), True, 'import numpy as np\n')]
|
'''
gather redshift info across all observations for a given target type; for now from a single tile
'''
#test
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--type", help="tracer type to be selected")
parser.add_argument("--tile", help="observed tile to use") #eventually remove this and just gather everything
args = parser.parse_args()
type = args.type
tile = args.tile
if type == 'LRG':
tarbit = 0 #targeting bit
if type == 'QSO':
tarbit = 2
if type == 'ELG':
tarbit = 1
print('gathering type,tile')
print(type,tile)
tp = 'SV1_DESI_TARGET'
print('targeting bit, target program type; CHECK THEY ARE CORRECT!')
print(tarbit,tp)
#location of inputs
coaddir = '/global/cfs/cdirs/desi/spectro/redux/blanc/tiles/'+tile
subsets = [x[0][len(coaddir):].strip('/') for x in os.walk(coaddir)] #something must work better than this, but for now...
#outputs
svdir = '/project/projectdirs/desi/users/ajross/catalogs/SV/'
version = 'test/'
dirout = svdir+'redshift_comps/'+version
outf = dirout +'/'+tile+'_'+type+'zinfo.fits'
if not os.path.exists(svdir+'redshift_comps'):
os.mkdir(svdir+'redshift_comps')
print('made '+svdir+'redshift_comps random directory')
if not os.path.exists(dirout):
os.mkdir(dirout)
print('made '+dirout)
ss = 0 #use to switch from creating to concatenating
for night in subsets:
if len(night) > 0:
print('going through subset '+night)
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
fl = coaddir+'/'+night+'/zbest-'+str(si)+'-'+str(tile)+'-'+night+'.fits'
#print(fl)
fitsio.read(fl)
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' on subset '+night)
tspec = Table.read(coaddir+'/'+night+'/zbest-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+'/'+night+'/coadd-'+str(specs[0])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tn = Table.read(coaddir+'/'+night+'/zbest-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+'/'+night+'/coadd-'+str(specs[i])+'-'+str(tile)+'-'+night+'.fits',hdu='FIBERMAP')
tspec = vstack([tspec,tn])
tf = vstack([tf,tnf])
tspec = join(tspec,tf,keys=['TARGETID'])
wtype = ((tspec[tp] & 2**tarbit) > 0)
print(str(len(tspec))+' total entries '+str(len(tspec[wtype]))+' that are '+type)
tspec = tspec[wtype]
tspec['subset'] = night
if ss == 0:
tspect = tspec
ss = 1
else:
tspect = vstack([tspect,tspec])
print('there are now '+str(len(tspect)) +' entries with '+str(len(np.unique(tspect['TARGETID'])))+' unique target IDs')
tspect.sort('TARGETID')
tspect.write(outf,format='fits', overwrite=True)
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.walk",
"os.path.exists",
"astropy.table.join",
"astropy.table.vstack",
"fitsio.read",
"numpy.unique"
] |
[((383, 408), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (406, 408), False, 'import argparse\n'), ((1314, 1354), 'os.path.exists', 'os.path.exists', (["(svdir + 'redshift_comps')"], {}), "(svdir + 'redshift_comps')\n", (1328, 1354), False, 'import os\n'), ((1358, 1392), 'os.mkdir', 'os.mkdir', (["(svdir + 'redshift_comps')"], {}), "(svdir + 'redshift_comps')\n", (1366, 1392), False, 'import os\n'), ((1458, 1480), 'os.path.exists', 'os.path.exists', (['dirout'], {}), '(dirout)\n', (1472, 1480), False, 'import os\n'), ((1486, 1502), 'os.mkdir', 'os.mkdir', (['dirout'], {}), '(dirout)\n', (1494, 1502), False, 'import os\n'), ((1057, 1073), 'os.walk', 'os.walk', (['coaddir'], {}), '(coaddir)\n', (1064, 1073), False, 'import os\n'), ((2648, 2682), 'astropy.table.join', 'join', (['tspec', 'tf'], {'keys': "['TARGETID']"}), "(tspec, tf, keys=['TARGETID'])\n", (2652, 2682), False, 'from astropy.table import Table, join, unique, vstack\n'), ((2579, 2598), 'astropy.table.vstack', 'vstack', (['[tspec, tn]'], {}), '([tspec, tn])\n', (2585, 2598), False, 'from astropy.table import Table, join, unique, vstack\n'), ((2615, 2632), 'astropy.table.vstack', 'vstack', (['[tf, tnf]'], {}), '([tf, tnf])\n', (2621, 2632), False, 'from astropy.table import Table, join, unique, vstack\n'), ((2979, 3002), 'astropy.table.vstack', 'vstack', (['[tspect, tspec]'], {}), '([tspect, tspec])\n', (2985, 3002), False, 'from astropy.table import Table, join, unique, vstack\n'), ((1919, 1934), 'fitsio.read', 'fitsio.read', (['fl'], {}), '(fl)\n', (1930, 1934), False, 'import fitsio\n'), ((3076, 3105), 'numpy.unique', 'np.unique', (["tspect['TARGETID']"], {}), "(tspect['TARGETID'])\n", (3085, 3105), True, 'import numpy as np\n')]
|
"""
This module constructs network of streets.
"""
import numpy as np
import json
# Adobe flat UI colour scheme
DARK_BLUE = "#2C3E50"
MEDIUM_BLUE = "#2980B9"
LIGHT_BLUE = "#3498DB"
RED = "#E74C3C"
WHITE = "#ECF0F1"
# Colour parameters
STROKE_COLOUR = DARK_BLUE
STREET_COLOUR = DARK_BLUE
JUNCTION_COLOUR = MEDIUM_BLUE
JUNCTION_TEXT = DARK_BLUE
RESULTS_COLOUR = RED
RESULTS_TEXT = DARK_BLUE
# Dimensions
OFFSET = 50
STREET_WIDTH = 8
STROKE_WIDTH = 2
JUNCTION_WIDTH = 20
MAX_RADIUS = 25
INITIAL_DECIBELS = 120
# Max absorption
MAX_ABSORPTION = 0.1
# Don't plot absorption coefficients (option)
ABSORPTION = False
class Constructor(object):
"""
This class of methods initialises a network object of specified dimensions,
modifies the network using modifying methods, outputs the adjacency matrix
of the network and outputs the visualisation in the svg format.
"""
def __init__(self):
self.__horizontals = None
self.__verticals = None
self.__nodes = None
self.__adjacency = None
self.__modified_adjacency = None
self.__positions = None
self.__stage = 0
def set_grid(self, horizontals, verticals, length):
"""
This setter method sets stage 1 (setting and moving) of the construction.
"""
try:
horizontals = int(horizontals)
verticals = int(verticals)
except ValueError:
raise ValueError("Horizontals and verticals must be integers.")
try:
length = float(length)
except ValueError:
raise ValueError("Length must be a floating point number.")
for quantity in [horizontals, verticals, length]:
if quantity < 0:
raise ValueError(
"Horizontals, verticals and length must be positive numbers.")
self.__horizontals = horizontals
self.__verticals = verticals
self.__nodes = horizontals*verticals
self.__adjacency = self.__create_adjacency()
self.__modified_adjacency = None
self.__positions = self.__create_positions(length)
self.__stage = 1
def unset_grid(self):
"""
This method is used to set the network to the stage 0 (instantiation) of
the construction.
"""
self.__horizontals = None
self.__verticals = None
self.__nodes = None
self.__adjacency = None
self.__modified_adjacency = None
self.__positions = None
self.__stage = 0
def __create_adjacency(self):
"""
This private method returns initial adjacency matrix.
"""
adjacency = np.zeros((self.__nodes, self.__nodes), dtype=np.int)
# Normal adjacency matrix for grid network
for i in range(self.__nodes):
for j in range(self.__nodes):
if (j == i+1 and j%self.__verticals != 0) or \
(j == i-1 and i%self.__verticals != 0) or \
j == i+self.__verticals or \
j == i-self.__verticals:
adjacency[i][j] = 1
return adjacency
def __create_positions(self, length):
"""
This private method returns initial positions matrix.
"""
positions = np.zeros((self.__nodes, 2))
for i in range(self.__nodes):
positions[i][0] = i%self.__verticals*length
positions[i][1] = i//self.__verticals*length
return positions
def move_horizontal_line(self, i, length):
"""
This method moves the horizontal line i.
"""
assert self.__stage == 1
if i not in range(self.__horizontals):
raise ValueError("No such horizontal line.")
for node in range(self.__nodes):
if node//self.__verticals == i:
self.__positions[node][1] += length
def move_vertical_line(self, j, length):
"""
This method moves the vertical line j.
"""
assert self.__stage == 1
if j not in range(self.__verticals):
raise ValueError("No such vertical line.")
for node in range(self.__nodes):
if node%self.__verticals == j:
self.__positions[node][0] += length
def delete_connection(self, i, j):
"""
This method deletes the street (i, j).
"""
if self.__stage == 1:
self.__stage = 2 # set stage to 1 so lines cannot be moved
assert self.__stage == 2
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__adjacency[i][j] = 0
self.__adjacency[j][i] = 0
to_delete = []
if sum(self.__adjacency[i]) == 2:
connections = []
for k in range(self.__nodes):
if self.__adjacency[i][k] == 1:
connections.append(k)
if (self.__positions[i][0] == self.__positions[connections[0]][0] and \
self.__positions[i][0] == self.__positions[connections[1]][0]) or \
(self.__positions[i][1] == self.__positions[connections[0]][1] and \
self.__positions[i][1] == self.__positions[connections[1]][1]):
self.__adjacency[connections[0]][connections[1]] = 1
self.__adjacency[connections[1]][connections[0]] = 1
to_delete.append(i)
elif sum(self.__adjacency[i]) == 0:
to_delete.append(i)
if sum(self.__adjacency[j]) == 2:
connections = []
for k in range(self.__nodes):
if self.__adjacency[j][k] == 1:
connections.append(k)
if (self.__positions[j][0] == self.__positions[connections[0]][0] and \
self.__positions[j][0] == self.__positions[connections[1]][0]) or \
(self.__positions[j][1] == self.__positions[connections[0]][1] and \
self.__positions[j][1] == self.__positions[connections[1]][1]):
self.__adjacency[connections[0]][connections[1]] = 1
self.__adjacency[connections[1]][connections[0]] = 1
to_delete.append(j)
elif sum(self.__adjacency[j]) == 0:
to_delete.append(j)
if len(to_delete) != 0:
self.__adjacency = np.delete(self.__adjacency, to_delete, axis=0)
self.__adjacency = np.delete(self.__adjacency, to_delete, axis=1)
self.__positions = np.delete(self.__positions, to_delete, axis=0)
self.__nodes = int(self.__nodes - len(to_delete))
def modify_adjacency(self, width, alpha, beta):
"""
This method creates new adjacency matrix with dictionaries of keys
(alpha, beta, street width, street length, orientation) instead of 1s.
"""
if self.__stage == 1 or self.__stage == 2:
self.__stage = 3
assert self.__stage == 3
try:
width = float(width)
alpha = float(alpha)
beta = float(beta)
except ValueError:
raise ValueError("Width and absorption must be floating point numbers.")
if width <= 0:
raise ValueError("Width must be a positive number.")
if alpha < 0 or alpha > 1 or beta < 0 or beta > 1:
raise ValueError("Absorption must be a number between 0 and 1.")
self.__modified_adjacency = self.__adjacency.tolist() # To python structure
positions = self.__positions
for i in range(self.__nodes):
for j in range(i):
if self.__adjacency[i][j] == 1:
if positions[i][1] == positions[j][1]:
length = abs(positions[i][0] - positions[j][0]).tolist()
if positions[i][0] < positions[j][0]:
orientation = 0
elif positions[i][0] > positions[j][0]:
orientation = 2
else:
raise ValueError("Points are at the same position.")
elif positions[i][0] == positions[j][0]:
length = abs(positions[i][1] - positions[j][1]).tolist()
if positions[i][1] < positions[j][1]:
orientation = 1
elif positions[i][1] > positions[j][1]:
orientation = 3
else:
raise ValueError("Points are at the same position.")
else:
raise ValueError("Points are not colinear.")
self.__modified_adjacency[i][j] = {
"alpha": alpha,
"beta": beta,
"width": width,
"length": length,
"orientation": orientation}
self.__modified_adjacency[j][i] = {
"alpha": alpha,
"beta": beta,
"width": width,
"length": length,
"orientation": (orientation+2)%4}
def unmodify_adjacency(self):
"""
This method is used to set the stage to stage 2 (deleting) of the
construction.
"""
self.__stage = 2
self.__modified_adjacency = None
def change_width(self, i, j, width):
"""
This method changes the street width of street (i, j).
"""
assert self.__stage == 3
try:
width = float(width)
except ValueError:
raise ValueError("Width must be a floating point number.")
if width <= 0:
raise ValueError("Width must be a positive number.")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["width"] = width
self.__modified_adjacency[j][i]["width"] = width
def change_alpha(self, i, j, alpha):
"""
This method changes the wall absorption of street (i, j).
"""
assert self.__stage == 3
try:
alpha = float(alpha)
except ValueError:
raise ValueError("Absorption must be a floating point number.")
if alpha < 0 or alpha > 1:
raise ValueError("Absorption must be a number between 0 and 1")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["alpha"] = alpha
self.__modified_adjacency[j][i]["alpha"] = alpha
def change_beta(self, i, j, beta):
"""
This method changes the air absorption of street (i, j).
"""
assert self.__stage == 3
try:
beta = float(beta)
except ValueError:
raise ValueError("Absorption must be a floating point number.")
if beta < 0 or beta > 1:
raise ValueError("Absorption must be a number between 0 and 1")
if i not in range(self.__nodes) or j not in range(self.__nodes):
raise ValueError("Nodes out of range.")
if self.__modified_adjacency[i][j] == 0:
raise ValueError("Junctions are not neighbours.")
self.__modified_adjacency[i][j]["beta"] = beta
self.__modified_adjacency[j][i]["beta"] = beta
def get_horizontals(self):
"""
This getter method returns the number of horizontal streets.
"""
return self.__horizontals
def get_verticals(self):
"""
This getter method returns the number of vertical streets.
"""
return self.__verticals
def get_adjacency(self):
"""
This getter method returns the normal adjacency matrix.
"""
return self.__adjacency
def get_modified_adjacency(self):
"""
This getter method returns the modified adjacency matrix.
"""
return self.__modified_adjacency
def get_positions(self):
"""
This getter method returns the positions matrix.
"""
return self.__positions
def get_stage(self):
"""
This getter method returns current stage index.
"""
return self.__stage
def import_network(self, invalues):
"""
This method is used to import existing network from the invalues
dictionary.
"""
self.__horizontals = invalues["horizontals"]
self.__verticals = invalues["verticals"]
self.__nodes = invalues["nodes"]
self.__adjacency = np.array(invalues["adjacency"])
self.__modified_adjacency = invalues["modified_adjacency"]
self.__positions = np.array(invalues["positions"])
self.__stage = invalues["stage"]
def export_network(self, filename):
"""
This method is used to export currently constructed network to json
format to some file.
"""
data = {
"horizontals": self.__horizontals,
"verticals": self.__verticals,
"nodes": self.__nodes,
"adjacency": self.__adjacency.tolist(),
"modified_adjacency": self.__modified_adjacency,
"positions": self.__positions.tolist(),
"stage": self.__stage
}
with open(filename, "w") as file:
json.dump(data, file)
def draw_network(self, filename, results=False):
"""
This method outputs file "output.html" with svg drawing of network and
optinally plots the results.
"""
def get_hex_fill(coefficient, max_absorption):
red = hex(int(coefficient/max_absorption*255))
red = red[-2:] if len(red)==4 else "0{0}".format(red[-1])
blue = hex(int((1-coefficient/max_absorption)*255))
blue = blue[-2:] if len(blue)==4 else "0{0}".format(blue[-1])
fill = "#{0}00{1}".format(red, blue)
return fill
def svg_header(width, height):
return "<svg width='{0}' height='{1}'>\n".format(width, height)
def svg_line(x1, y1, x2, y2, fill=STREET_COLOUR, width=STREET_WIDTH):
return "<line x1='{0}' y1='{1}' x2='{2}' y2='{3}' \
style='stroke: {4}; stroke-width: {5}'/>\n".format(x1+OFFSET, y1+OFFSET,
x2+OFFSET, y2+OFFSET,
fill, width)
def svg_square(x, y):
return "<rect x='{0}' y='{1}' width='{2}' height='{2}' \
style='stroke: {3}; stroke-width: {4}; fill: {5}'/>\n".format(x-JUNCTION_WIDTH/2+OFFSET,
y-JUNCTION_WIDTH/2+OFFSET,
JUNCTION_WIDTH,
STROKE_COLOUR,
STROKE_WIDTH,
JUNCTION_COLOUR
)
def svg_circle(x, y, r, fill):
return "<circle cx='{0}' cy='{1}' r='{2}' style='stroke: {3}; \
stroke-width: {4}; fill: {5}'/>\n".format(x+OFFSET,
y+OFFSET,
r,
STROKE_COLOUR,
STROKE_WIDTH,
fill
)
def svg_text(x, y, colour, size, text):
move = (size-15)/4 # adjust text position
return "<text text-anchor='middle' x='{0}' y='{1}' \
style='fill: {2}; font-size: {3}'>{4}</text>\n".format(x+OFFSET,
y+OFFSET+JUNCTION_WIDTH/4 + move,
colour,
size,
text
)
positions = self.__positions
if self.__stage == 3:
adjacency = self.__modified_adjacency
modified = True
else:
adjacency = self.__adjacency
modified = False
with open(filename, "w") as file:
width = positions[self.__nodes-1][0]+2*OFFSET
height = positions[self.__nodes-1][1]+2*OFFSET
file.write(svg_header(width, height))
# Draw walls if modified (with absorption)
if modified and ABSORPTION:
for i in range(self.__nodes):
for j in range(i):
if adjacency[i][j] != 0:
[xi, yi] = positions[i]
[xj, yj] = positions[j]
alpha = adjacency[i][j]["alpha"]
alpha_fill = get_hex_fill(alpha, MAX_ABSORPTION)
width = adjacency[i][j]["width"]
translation = width/2
if xi == xj:
file.write(svg_line(xi-translation, yi,
xj-translation, yj,
alpha_fill, width
))
file.write(svg_line(xi+translation, yi,
xj+translation, yj,
alpha_fill, width
))
elif yi == yj:
file.write(svg_line(xi, yi-translation,
xj, yj-translation,
alpha_fill, width
))
file.write(svg_line(xi, yi+translation,
xj, yj+translation,
alpha_fill, width
))
# Draw streets (with absorption if modified)
for i in range(self.__nodes):
for j in range(i):
if adjacency[i][j] != 0:
[xi, yi] = positions[i]
[xj, yj] = positions[j]
if not modified or not ABSORPTION:
file.write(svg_line(xi, yi, xj, yj))
else:
beta = adjacency[i][j]["beta"]
beta_fill = get_hex_fill(beta, MAX_ABSORPTION)
width = adjacency[i][j]["width"]
file.write(svg_line(xi, yi, xj, yj,
beta_fill, width
))
# Draw junctions (rectangles with numbers)
counter = 0
for position in positions:
file.write(svg_square(position[0], position[1]))
file.write(svg_text(position[0], position[1], JUNCTION_TEXT, 15, counter))
counter += 1
# Draw results
if results:
(X, Y, Z) = results
for i in range(len(Z)):
decibels = 20*np.log10(Z[i]*10**(INITIAL_DECIBELS/20))
if decibels < 0:
continue
# Radius
radius = (decibels/INITIAL_DECIBELS)*MAX_RADIUS
file.write(svg_circle(X[i], Y[i], radius, RESULTS_COLOUR))
if decibels > 30:
file.write(svg_text(X[i], Y[i], RESULTS_TEXT, radius, int(round(decibels))))
file.write("</svg>")
|
[
"json.dump",
"numpy.zeros",
"numpy.array",
"numpy.log10",
"numpy.delete"
] |
[((2673, 2725), 'numpy.zeros', 'np.zeros', (['(self.__nodes, self.__nodes)'], {'dtype': 'np.int'}), '((self.__nodes, self.__nodes), dtype=np.int)\n', (2681, 2725), True, 'import numpy as np\n'), ((3291, 3318), 'numpy.zeros', 'np.zeros', (['(self.__nodes, 2)'], {}), '((self.__nodes, 2))\n', (3299, 3318), True, 'import numpy as np\n'), ((13085, 13116), 'numpy.array', 'np.array', (["invalues['adjacency']"], {}), "(invalues['adjacency'])\n", (13093, 13116), True, 'import numpy as np\n'), ((13211, 13242), 'numpy.array', 'np.array', (["invalues['positions']"], {}), "(invalues['positions'])\n", (13219, 13242), True, 'import numpy as np\n'), ((6475, 6521), 'numpy.delete', 'np.delete', (['self.__adjacency', 'to_delete'], {'axis': '(0)'}), '(self.__adjacency, to_delete, axis=0)\n', (6484, 6521), True, 'import numpy as np\n'), ((6553, 6599), 'numpy.delete', 'np.delete', (['self.__adjacency', 'to_delete'], {'axis': '(1)'}), '(self.__adjacency, to_delete, axis=1)\n', (6562, 6599), True, 'import numpy as np\n'), ((6631, 6677), 'numpy.delete', 'np.delete', (['self.__positions', 'to_delete'], {'axis': '(0)'}), '(self.__positions, to_delete, axis=0)\n', (6640, 6677), True, 'import numpy as np\n'), ((13895, 13916), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (13904, 13916), False, 'import json\n'), ((19959, 20005), 'numpy.log10', 'np.log10', (['(Z[i] * 10 ** (INITIAL_DECIBELS / 20))'], {}), '(Z[i] * 10 ** (INITIAL_DECIBELS / 20))\n', (19967, 20005), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#This script scores results from each student
#Drawn images are downloaded from a .csv file, converted from string base64 encoding,
#and scored against machine learning models saved to disk
import csv
import os
#import file
import cv2
import re
import base64
import numpy as np
from keras.models import model_from_json
from sklearn.metrics import cohen_kappa_score
from tkinter import *
import tkinter as tk
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askdirectory
from tkinter import simpledialog
import sys
import os.path
#Specify max size due to large size of Base64 images
#**~MAC/LINUX~**#
#csv.field_size_limit(sys.maxsize)
#**~WINDOWS 64 BIT~**#
csv.field_size_limit(2**30)
#Specify which questions are drawn images. Their associated value is the
#size of the image used in data preprocessing for the machine learning model.
drawn_images ={
"Q1": 64,
"Q2": 128,
"Q3": 64,
"Q4": 64,
"Q7": 128,
"Q8": 128,
"Q9": 128,
"Q17": 128,
"Q18": 64
}
#init variables
filename = ""
filedir = ""
modeldir = ""
prefix = ""
##Retrieve the CSV file to read image data
def getCSVfile():
global filename
global filedir
filename = askopenfilename()
filedir = os.path.abspath(os.path.join(filename, os.pardir))
filedir += "/"
print(filedir)
#Select the directory containing H5 and JSON model files.
def getModelDir():
global modeldir
modeldir = askdirectory()
modeldir += "/"
#Select a prefix to read only specific records starting with the prefix.
def getPrefix():
global prefix
prefix = simpledialog.askstring("input string", "Enter an ID prefix:")
#Run program and create two response CSV files.
def Start():
#for indexing
drawn_images_list = list(drawn_images)
#Load models:
models = []
print("Loading models... This may take a moment")
for key in drawn_images:
json_file_path = modeldir + key + ".json"
weight_file_path = modeldir + key + ".h5"
json_file = open(json_file_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(weight_file_path)
loaded_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
models.append(loaded_model)
print(f"Loaded model {key}...")
print("Done loading models")
#Function to process each individual image
#Returns a prediction score of 1 or 0.
def process_image(Qnum, uri, partid):
print(f"Processing image: {Qnum}")
#Ensure value exists
if(uri == None): return 0
#Grab value to resize image
size = drawn_images[Qnum]
#create image file as temporary
path = modeldir + "temp.png"
img = open(path, "wb")
img.write(base64.b64decode(uri))
img = cv2.imread(path, 0)
#Test resizing image. If the URI is corrupted, return 'C'.
try:
img = cv2.resize(img, (size, size))
except:
return 'c'
img_reshape = np.array(img).reshape(-1,size,size,1)
#Run image against model
print("Acc: ")
print (models[drawn_images_list.index(Qnum)].predict(img_reshape))
pred = models[drawn_images_list.index(Qnum)].predict_classes(img_reshape)[0]
#This flips the class as the prediction score is on the opposite entry.
pred = ("1", "0")[pred == 0]
pred_array = models[drawn_images_list.index(Qnum)].predict(img_reshape)
#Remove the image to make room for another
os.remove(modeldir + "temp.png")
eps = .15 #Min. acceptable criterion
if(1-np.amax(pred_array) > eps):
return 'f'
return pred
#Open two files, one for response scores and the other for written
#question responses. Each file name is appended with a prefix if
#a prefix is give.
data = open(filename, 'r')
responses = open(filedir + 'responses_pref' + prefix + '.csv', 'w')
Wresponses = open(filedir + 'Wresponses_pref' + prefix + '.csv', 'w')
read_data = csv.reader(data, delimiter=',')
write_responses = csv.writer(responses, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
write_Wresponses = csv.writer(Wresponses, delimiter=',',
quotechar='"', quoting=csv.QUOTE_ALL)
line_count = 0
for row in read_data:
if row[0].startswith(prefix, 0, len(prefix)):
print(row[0])
if line_count == 0:
line_count += 1
write_responses.writerow(['Number','Participant', 'Q1_drawn', 'Q2_drawn',
'Q3_drawn', 'Q4_drawn', 'Q7_drawn', 'Q8_drawn',
'Q9_drawn', 'Q17_drawn', 'Q18_drawn', 'Q5_response',
'Q5_correct_response', 'Q5_accuracy','Q6_response',
'Q6_correct_response', 'Q6_accuracy','Q10_1_response',
'Q10_1_correct_response','Q10_1_accuracy', 'Q10_2_response',
'Q10_2_correct_response', 'Q10_2_accuracy', 'Q11_response',
'Q11_correct_response', 'Q11_accuracy', 'Q12_response',
'Q12_correct_response','Q12_accuracy', 'Q13_response',
'Q13_correct_response', 'Q13_accuracy', 'Q14_1_response',
'Q14_1_correct_response', 'Q14_1_accuracy', 'Q14_2_response',
'Q14_2_correct_response','Q14_2_accuracy', 'Q15_AB_response',
'Q15_AB_correct_response','Q15_AB_accuracy', 'Q15_AD_response',
'Q15_AD_correct_response','Q15_AD_accuracy', 'Q15_BC_response',
'Q15_BC_correct_response','Q15_BC_accuracy', 'Q15_CD_response',
'Q15_CD_correct_response','Q15_CD_accuracy','Q15_BD_response',
'Q15_BD_correct_response','Q15_BD_accuracy', 'Total', 'Date Submitted'])
write_Wresponses.writerow(['Number','Participant','Q2_written', 'Q7_written', 'Q8_written',
'Q9_written', 'Q14_2_written', 'Q17_written', 'Q18_written', 'Date Submitted'])
else:
#Resp used for responses, respW for written reponses
resp = []
respW = []
count = 0
##logic here
#append number and name
resp.append(line_count)
resp.append(row[0])
respW.append(line_count)
respW.append(row[0])
#append drawn images
for x in drawn_images:
y = row[drawn_images_list.index(x) + 2].split(',')
if(len(y) > 1):
resp.append(process_image(x, y[1], row[0]))
else: resp.append("N/A")
#print(row[drawn_images_list.index(x) + 2])
##Q5
resp.append(row[23])
resp.append("A")
resp.append(("0", "1")[row[23] == "A"])
#Q6
resp.append(row[24])
resp.append("A")
resp.append(("0", "1")[row[24] == "A"])
#Q10_1
resp.append(row[15])
resp.append("Josh")
resp.append(("0", "1")["josh" in row[15].lower()])
#Q10_2
resp.append(row[18])
resp.append("josh")
resp.append(("0", "1")["josh" in row[18].lower()])
#Q11
resp.append(row[25])
resp.append("B")
resp.append(("0", "1")[row[25] == "B"])
#Q12
resp.append(row[26])
resp.append("B")
resp.append(("0", "1")[row[26] == "B"])
#Q13
resp.append(row[17])
resp.append("40")
resp.append(("0", "1")["40" in row[19]])
#Q14_1
resp.append(row[18])
resp.append("Josh")
resp.append(("0", "1")["josh" in row[18].lower()])
#Q15
##Refer to re library for digit extraction
resp.append(row[20])
resp.append("7040-7080")
val = re.findall("\d+", row[20])
if(len(val) > 0):
resp.append(("0", "1")[int(val[0]) >= 7040 and int(val[0]) <= 7080])
else: resp.append("0")
#Q16:
resp.append(row[27])
resp.append("yes")
resp.append(("0", "1")[row[27] == "yes"])
resp.append(row[28])
resp.append("yes")
resp.append(("0", "1")[row[28] == "yes"])
resp.append(row[29])
resp.append("yes")
resp.append(("0", "1")[row[29] == "yes"])
resp.append(row[30])
resp.append("no")
resp.append(("0", "1")[row[30] == "no"])
resp.append(row[31])
resp.append("yes")
resp.append(("0", "1")[row[31] == "yes"])
##WRITE ALL THE WRITTEN RESPONSES HERE
respW.append(row[11])
respW.append(row[12])
respW.append(row[13])
respW.append(row[14])
respW.append(row[16])
respW.append(row[19])
respW.append(row[21])
respW.append(row[22])
#Total
sum = 0
for x in resp:
if x == "1":
sum += 1
resp.append(sum)
#Dates
resp.append(row[32])
respW.append(row[32])
#Write rows
write_responses.writerow(resp)
write_Wresponses.writerow(respW)
line_count += 1
print(f"Finished, {line_count} rows read: ")
data.close()
responses.close()
##Run GUI
root = tk.Tk()
root.wm_title("Run Participant Data")
selectCsv = tk.Button(root, text='Select CSV file', width=25, command=getCSVfile)
selectCsv.pack()
selectDirectory = tk.Button(root, text='Select model directory', width=25, command=getModelDir)
selectDirectory.pack()
selectPrefix = tk.Button(root, text='Select an ID prefix', width=25, command=getPrefix)
selectPrefix.pack()
startButton = tk.Button(root, text='Start', width=25, command=Start)
startButton.pack()
root.mainloop()
|
[
"os.remove",
"csv.reader",
"csv.writer",
"tkinter.Button",
"csv.field_size_limit",
"tkinter.filedialog.askopenfilename",
"base64.b64decode",
"tkinter.filedialog.askdirectory",
"tkinter.simpledialog.askstring",
"cv2.imread",
"keras.models.model_from_json",
"numpy.array",
"numpy.amax",
"re.findall",
"os.path.join",
"tkinter.Tk",
"cv2.resize"
] |
[((741, 770), 'csv.field_size_limit', 'csv.field_size_limit', (['(2 ** 30)'], {}), '(2 ** 30)\n', (761, 770), False, 'import csv\n'), ((10354, 10361), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (10359, 10361), True, 'import tkinter as tk\n'), ((10412, 10481), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Select CSV file"""', 'width': '(25)', 'command': 'getCSVfile'}), "(root, text='Select CSV file', width=25, command=getCSVfile)\n", (10421, 10481), True, 'import tkinter as tk\n'), ((10517, 10594), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Select model directory"""', 'width': '(25)', 'command': 'getModelDir'}), "(root, text='Select model directory', width=25, command=getModelDir)\n", (10526, 10594), True, 'import tkinter as tk\n'), ((10633, 10705), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Select an ID prefix"""', 'width': '(25)', 'command': 'getPrefix'}), "(root, text='Select an ID prefix', width=25, command=getPrefix)\n", (10642, 10705), True, 'import tkinter as tk\n'), ((10740, 10794), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Start"""', 'width': '(25)', 'command': 'Start'}), "(root, text='Start', width=25, command=Start)\n", (10749, 10794), True, 'import tkinter as tk\n'), ((1246, 1263), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (1261, 1263), False, 'from tkinter.filedialog import askopenfilename\n'), ((1485, 1499), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (1497, 1499), False, 'from tkinter.filedialog import askdirectory\n'), ((1644, 1705), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', (['"""input string"""', '"""Enter an ID prefix:"""'], {}), "('input string', 'Enter an ID prefix:')\n", (1666, 1705), False, 'from tkinter import simpledialog\n'), ((4218, 4249), 'csv.reader', 'csv.reader', (['data'], {'delimiter': '""","""'}), "(data, delimiter=',')\n", (4228, 4249), False, 'import csv\n'), ((4272, 4346), 'csv.writer', 'csv.writer', (['responses'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_ALL'}), '(responses, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_ALL)\n', (4282, 4346), False, 'import csv\n'), ((4402, 4477), 'csv.writer', 'csv.writer', (['Wresponses'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_ALL'}), '(Wresponses, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_ALL)\n', (4412, 4477), False, 'import csv\n'), ((1294, 1327), 'os.path.join', 'os.path.join', (['filename', 'os.pardir'], {}), '(filename, os.pardir)\n', (1306, 1327), False, 'import os\n'), ((2195, 2229), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (2210, 2229), False, 'from keras.models import model_from_json\n'), ((2964, 2983), 'cv2.imread', 'cv2.imread', (['path', '(0)'], {}), '(path, 0)\n', (2974, 2983), False, 'import cv2\n'), ((3687, 3719), 'os.remove', 'os.remove', (["(modeldir + 'temp.png')"], {}), "(modeldir + 'temp.png')\n", (3696, 3719), False, 'import os\n'), ((2927, 2948), 'base64.b64decode', 'base64.b64decode', (['uri'], {}), '(uri)\n', (2943, 2948), False, 'import base64\n'), ((3083, 3112), 'cv2.resize', 'cv2.resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (3093, 3112), False, 'import cv2\n'), ((3175, 3188), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3183, 3188), True, 'import numpy as np\n'), ((3778, 3797), 'numpy.amax', 'np.amax', (['pred_array'], {}), '(pred_array)\n', (3785, 3797), True, 'import numpy as np\n'), ((8594, 8621), 're.findall', 're.findall', (['"""\\\\d+"""', 'row[20]'], {}), "('\\\\d+', row[20])\n", (8604, 8621), False, 'import re\n')]
|
from glob import glob
import os
import os.path as op
from shutil import copyfile
from nose.tools import assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.transforms import (Transform, apply_trans, rotation, translation,
scaling)
from mne.coreg import (fit_matched_points, create_default_subject, scale_mri,
_is_mri_subject, scale_labels, scale_source_space,
coregister_fiducials)
from mne.io.constants import FIFF
from mne.utils import _TempDir, run_tests_if_main
from mne.source_space import write_source_spaces
from functools import reduce
def test_coregister_fiducials():
"""Test coreg.coregister_fiducials()"""
# prepare head and MRI fiducials
trans = Transform('head', 'mri',
rotation(.4, .1, 0).dot(translation(.1, -.1, .1)))
coords_orig = np.array([[-0.08061612, -0.02908875, -0.04131077],
[0.00146763, 0.08506715, -0.03483611],
[0.08436285, -0.02850276, -0.04127743]])
coords_trans = apply_trans(trans, coords_orig)
def make_dig(coords, cf):
return ({'coord_frame': cf, 'ident': 1, 'kind': 1, 'r': coords[0]},
{'coord_frame': cf, 'ident': 2, 'kind': 1, 'r': coords[1]},
{'coord_frame': cf, 'ident': 3, 'kind': 1, 'r': coords[2]})
mri_fiducials = make_dig(coords_trans, FIFF.FIFFV_COORD_MRI)
info = {'dig': make_dig(coords_orig, FIFF.FIFFV_COORD_HEAD)}
# test coregister_fiducials()
trans_est = coregister_fiducials(info, mri_fiducials)
assert trans_est.from_str == trans.from_str
assert trans_est.to_str == trans.to_str
assert_array_almost_equal(trans_est['trans'], trans['trans'])
@testing.requires_testing_data
def test_scale_mri():
"""Test creating fsaverage and scaling it."""
# create fsaverage using the testing "fsaverage" instead of the FreeSurfer
# one
tempdir = _TempDir()
fake_home = testing.data_path()
create_default_subject(subjects_dir=tempdir, fs_home=fake_home,
verbose=True)
assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed"
fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')
os.remove(fid_path)
create_default_subject(update=True, subjects_dir=tempdir,
fs_home=fake_home)
assert op.exists(fid_path), "Updating fsaverage"
# copy MRI file from sample data (shouldn't matter that it's incorrect,
# so here choose a small one)
path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri',
'T1.mgz')
path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
copyfile(path_from, path_to)
# remove redundant label files
label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label')
label_paths = glob(label_temp)
for label_path in label_paths[1:]:
os.remove(label_path)
# create source space
print('Creating surface source space')
path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')
src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir,
add_dist=False)
write_source_spaces(path % 'ico-0', src)
mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
print('Creating volume source space')
vsrc = mne.setup_volume_source_space(
'fsaverage', pos=50, mri=mri, subjects_dir=tempdir,
add_interpolator=False)
write_source_spaces(path % 'vol-50', vsrc)
# scale fsaverage
os.environ['_MNE_FEW_SURFACES'] = 'true'
scale = np.array([1, .2, .8])
scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir,
verbose='debug')
del os.environ['_MNE_FEW_SURFACES']
assert _is_mri_subject('flachkopf', tempdir), "Scaling fsaverage failed"
spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')
assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled"
assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf',
'lh.sphere.reg'))
vsrc_s = mne.read_source_spaces(spath % 'vol-50')
pt = np.array([0.12, 0.41, -0.22])
assert_array_almost_equal(apply_trans(vsrc_s[0]['src_mri_t'], pt * scale),
apply_trans(vsrc[0]['src_mri_t'], pt))
scale_labels('flachkopf', subjects_dir=tempdir)
# add distances to source space
mne.add_source_space_distances(src)
src.save(path % 'ico-0', overwrite=True)
# scale with distances
os.remove(spath % 'ico-0')
scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
ssrc = mne.read_source_spaces(spath % 'ico-0')
assert ssrc[0]['dist'] is not None
def test_fit_matched_points():
"""Test fit_matched_points: fitting two matching sets of points"""
tgt_pts = np.random.RandomState(42).uniform(size=(6, 3))
# rotation only
trans = rotation(2, 6, 3)
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, translate=False,
out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation")
# rotation & translation
trans = np.dot(translation(2, -6, 3), rotation(2, 6, 3))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation and translation.")
# rotation & translation & scaling
trans = reduce(np.dot, (translation(2, -6, 3), rotation(1.5, .3, 1.4),
scaling(.5, .5, .5)))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, scale=1, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation, translation and scaling.")
# test exceeding tolerance
tgt_pts[0, :] += 20
assert_raises(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)
run_tests_if_main()
|
[
"mne.coreg.fit_matched_points",
"os.remove",
"mne.utils._TempDir",
"mne.utils.run_tests_if_main",
"mne.setup_volume_source_space",
"mne.coreg.coregister_fiducials",
"glob.glob",
"numpy.testing.assert_array_almost_equal",
"os.path.join",
"mne.read_source_spaces",
"mne.source_space.write_source_spaces",
"os.path.exists",
"mne.coreg.scale_source_space",
"numpy.random.RandomState",
"mne.transforms.apply_trans",
"mne.coreg.scale_mri",
"nose.tools.assert_raises",
"shutil.copyfile",
"mne.coreg._is_mri_subject",
"mne.setup_source_space",
"mne.coreg.create_default_subject",
"mne.transforms.scaling",
"mne.datasets.testing.data_path",
"mne.add_source_space_distances",
"mne.transforms.translation",
"numpy.array",
"mne.transforms.rotation",
"mne.coreg.scale_labels"
] |
[((6417, 6436), 'mne.utils.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (6434, 6436), False, 'from mne.utils import _TempDir, run_tests_if_main\n'), ((950, 1084), 'numpy.array', 'np.array', (['[[-0.08061612, -0.02908875, -0.04131077], [0.00146763, 0.08506715, -\n 0.03483611], [0.08436285, -0.02850276, -0.04127743]]'], {}), '([[-0.08061612, -0.02908875, -0.04131077], [0.00146763, 0.08506715,\n -0.03483611], [0.08436285, -0.02850276, -0.04127743]])\n', (958, 1084), True, 'import numpy as np\n'), ((1156, 1187), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'coords_orig'], {}), '(trans, coords_orig)\n', (1167, 1187), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((1629, 1670), 'mne.coreg.coregister_fiducials', 'coregister_fiducials', (['info', 'mri_fiducials'], {}), '(info, mri_fiducials)\n', (1649, 1670), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((1767, 1828), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (["trans_est['trans']", "trans['trans']"], {}), "(trans_est['trans'], trans['trans'])\n", (1792, 1828), False, 'from numpy.testing import assert_array_almost_equal\n'), ((2037, 2047), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (2045, 2047), False, 'from mne.utils import _TempDir, run_tests_if_main\n'), ((2064, 2083), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {}), '()\n', (2081, 2083), False, 'from mne.datasets import testing\n'), ((2088, 2165), 'mne.coreg.create_default_subject', 'create_default_subject', ([], {'subjects_dir': 'tempdir', 'fs_home': 'fake_home', 'verbose': '(True)'}), '(subjects_dir=tempdir, fs_home=fake_home, verbose=True)\n', (2110, 2165), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((2204, 2241), 'mne.coreg._is_mri_subject', '_is_mri_subject', (['"""fsaverage"""', 'tempdir'], {}), "('fsaverage', tempdir)\n", (2219, 2241), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((2287, 2350), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""bem"""', '"""fsaverage-fiducials.fif"""'], {}), "(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')\n", (2294, 2350), True, 'import os.path as op\n'), ((2355, 2374), 'os.remove', 'os.remove', (['fid_path'], {}), '(fid_path)\n', (2364, 2374), False, 'import os\n'), ((2379, 2455), 'mne.coreg.create_default_subject', 'create_default_subject', ([], {'update': '(True)', 'subjects_dir': 'tempdir', 'fs_home': 'fake_home'}), '(update=True, subjects_dir=tempdir, fs_home=fake_home)\n', (2401, 2455), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((2494, 2513), 'os.path.exists', 'op.exists', (['fid_path'], {}), '(fid_path)\n', (2503, 2513), True, 'import os.path as op\n'), ((2769, 2817), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""mri"""', '"""orig.mgz"""'], {}), "(tempdir, 'fsaverage', 'mri', 'orig.mgz')\n", (2776, 2817), True, 'import os.path as op\n'), ((2822, 2850), 'shutil.copyfile', 'copyfile', (['path_from', 'path_to'], {}), '(path_from, path_to)\n', (2830, 2850), False, 'from shutil import copyfile\n'), ((2904, 2953), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""label"""', '"""*.label"""'], {}), "(tempdir, 'fsaverage', 'label', '*.label')\n", (2911, 2953), True, 'import os.path as op\n'), ((2972, 2988), 'glob.glob', 'glob', (['label_temp'], {}), '(label_temp)\n', (2976, 2988), False, 'from glob import glob\n'), ((3139, 3199), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""bem"""', '"""fsaverage-%s-src.fif"""'], {}), "(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')\n", (3146, 3199), True, 'import os.path as op\n'), ((3210, 3296), 'mne.setup_source_space', 'mne.setup_source_space', (['"""fsaverage"""', '"""ico0"""'], {'subjects_dir': 'tempdir', 'add_dist': '(False)'}), "('fsaverage', 'ico0', subjects_dir=tempdir, add_dist=\n False)\n", (3232, 3296), False, 'import mne\n'), ((3329, 3369), 'mne.source_space.write_source_spaces', 'write_source_spaces', (["(path % 'ico-0')", 'src'], {}), "(path % 'ico-0', src)\n", (3348, 3369), False, 'from mne.source_space import write_source_spaces\n'), ((3380, 3428), 'os.path.join', 'op.join', (['tempdir', '"""fsaverage"""', '"""mri"""', '"""orig.mgz"""'], {}), "(tempdir, 'fsaverage', 'mri', 'orig.mgz')\n", (3387, 3428), True, 'import os.path as op\n'), ((3482, 3592), 'mne.setup_volume_source_space', 'mne.setup_volume_source_space', (['"""fsaverage"""'], {'pos': '(50)', 'mri': 'mri', 'subjects_dir': 'tempdir', 'add_interpolator': '(False)'}), "('fsaverage', pos=50, mri=mri, subjects_dir=\n tempdir, add_interpolator=False)\n", (3511, 3592), False, 'import mne\n'), ((3609, 3651), 'mne.source_space.write_source_spaces', 'write_source_spaces', (["(path % 'vol-50')", 'vsrc'], {}), "(path % 'vol-50', vsrc)\n", (3628, 3651), False, 'from mne.source_space import write_source_spaces\n'), ((3732, 3755), 'numpy.array', 'np.array', (['[1, 0.2, 0.8]'], {}), '([1, 0.2, 0.8])\n', (3740, 3755), True, 'import numpy as np\n'), ((3758, 3849), 'mne.coreg.scale_mri', 'scale_mri', (['"""fsaverage"""', '"""flachkopf"""', 'scale', '(True)'], {'subjects_dir': 'tempdir', 'verbose': '"""debug"""'}), "('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir,\n verbose='debug')\n", (3767, 3849), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((3911, 3948), 'mne.coreg._is_mri_subject', '_is_mri_subject', (['"""flachkopf"""', 'tempdir'], {}), "('flachkopf', tempdir)\n", (3926, 3948), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((3989, 4049), 'os.path.join', 'op.join', (['tempdir', '"""flachkopf"""', '"""bem"""', '"""flachkopf-%s-src.fif"""'], {}), "(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')\n", (3996, 4049), True, 'import os.path as op\n'), ((4062, 4088), 'os.path.exists', 'op.exists', (["(spath % 'ico-0')"], {}), "(spath % 'ico-0')\n", (4071, 4088), True, 'import os.path as op\n'), ((4265, 4305), 'mne.read_source_spaces', 'mne.read_source_spaces', (["(spath % 'vol-50')"], {}), "(spath % 'vol-50')\n", (4287, 4305), False, 'import mne\n'), ((4315, 4344), 'numpy.array', 'np.array', (['[0.12, 0.41, -0.22]'], {}), '([0.12, 0.41, -0.22])\n', (4323, 4344), True, 'import numpy as np\n'), ((4497, 4544), 'mne.coreg.scale_labels', 'scale_labels', (['"""flachkopf"""'], {'subjects_dir': 'tempdir'}), "('flachkopf', subjects_dir=tempdir)\n", (4509, 4544), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((4586, 4621), 'mne.add_source_space_distances', 'mne.add_source_space_distances', (['src'], {}), '(src)\n', (4616, 4621), False, 'import mne\n'), ((4699, 4725), 'os.remove', 'os.remove', (["(spath % 'ico-0')"], {}), "(spath % 'ico-0')\n", (4708, 4725), False, 'import os\n'), ((4730, 4792), 'mne.coreg.scale_source_space', 'scale_source_space', (['"""flachkopf"""', '"""ico-0"""'], {'subjects_dir': 'tempdir'}), "('flachkopf', 'ico-0', subjects_dir=tempdir)\n", (4748, 4792), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((4804, 4843), 'mne.read_source_spaces', 'mne.read_source_spaces', (["(spath % 'ico-0')"], {}), "(spath % 'ico-0')\n", (4826, 4843), False, 'import mne\n'), ((5081, 5098), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (5089, 5098), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5113, 5140), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (5124, 5140), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5157, 5223), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'translate': '(False)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, translate=False, out='trans')\n", (5175, 5223), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((5273, 5304), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (5284, 5304), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5309, 5395), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation"""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation')\n", (5334, 5395), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5530, 5557), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (5541, 5557), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5574, 5623), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'out': '"""trans"""'}), "(src_pts, tgt_pts, out='trans')\n", (5592, 5623), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((5638, 5669), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (5649, 5669), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5674, 5777), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation and translation."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation and translation.')\n", (5699, 5777), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5986, 6013), 'mne.transforms.apply_trans', 'apply_trans', (['trans', 'tgt_pts'], {}), '(trans, tgt_pts)\n', (5997, 6013), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((6030, 6088), 'mne.coreg.fit_matched_points', 'fit_matched_points', (['src_pts', 'tgt_pts'], {'scale': '(1)', 'out': '"""trans"""'}), "(src_pts, tgt_pts, scale=1, out='trans')\n", (6048, 6088), False, 'from mne.coreg import fit_matched_points, create_default_subject, scale_mri, _is_mri_subject, scale_labels, scale_source_space, coregister_fiducials\n'), ((6103, 6134), 'mne.transforms.apply_trans', 'apply_trans', (['trans_est', 'src_pts'], {}), '(trans_est, src_pts)\n', (6114, 6134), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((6139, 6251), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['tgt_pts', 'est_pts', '(2)', '"""fit_matched_points with rotation, translation and scaling."""'], {}), "(tgt_pts, est_pts, 2,\n 'fit_matched_points with rotation, translation and scaling.')\n", (6164, 6251), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6341, 6414), 'nose.tools.assert_raises', 'assert_raises', (['RuntimeError', 'fit_matched_points', 'tgt_pts', 'src_pts'], {'tol': '(10)'}), '(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)\n', (6354, 6414), False, 'from nose.tools import assert_raises\n'), ((2671, 2690), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {}), '()\n', (2688, 2690), False, 'from mne.datasets import testing\n'), ((3036, 3057), 'os.remove', 'os.remove', (['label_path'], {}), '(label_path)\n', (3045, 3057), False, 'import os\n'), ((4152, 4211), 'os.path.join', 'os.path.join', (['tempdir', '"""flachkopf"""', '"""surf"""', '"""lh.sphere.reg"""'], {}), "(tempdir, 'flachkopf', 'surf', 'lh.sphere.reg')\n", (4164, 4211), False, 'import os\n'), ((4375, 4422), 'mne.transforms.apply_trans', 'apply_trans', (["vsrc_s[0]['src_mri_t']", '(pt * scale)'], {}), "(vsrc_s[0]['src_mri_t'], pt * scale)\n", (4386, 4422), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((4454, 4491), 'mne.transforms.apply_trans', 'apply_trans', (["vsrc[0]['src_mri_t']", 'pt'], {}), "(vsrc[0]['src_mri_t'], pt)\n", (4465, 4491), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5474, 5495), 'mne.transforms.translation', 'translation', (['(2)', '(-6)', '(3)'], {}), '(2, -6, 3)\n', (5485, 5495), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5497, 5514), 'mne.transforms.rotation', 'rotation', (['(2)', '(6)', '(3)'], {}), '(2, 6, 3)\n', (5505, 5514), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((905, 932), 'mne.transforms.translation', 'translation', (['(0.1)', '(-0.1)', '(0.1)'], {}), '(0.1, -0.1, 0.1)\n', (916, 932), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5001, 5026), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (5022, 5026), True, 'import numpy as np\n'), ((5875, 5896), 'mne.transforms.translation', 'translation', (['(2)', '(-6)', '(3)'], {}), '(2, -6, 3)\n', (5886, 5896), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5898, 5921), 'mne.transforms.rotation', 'rotation', (['(1.5)', '(0.3)', '(1.4)'], {}), '(1.5, 0.3, 1.4)\n', (5906, 5921), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((5950, 5972), 'mne.transforms.scaling', 'scaling', (['(0.5)', '(0.5)', '(0.5)'], {}), '(0.5, 0.5, 0.5)\n', (5957, 5972), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n'), ((881, 902), 'mne.transforms.rotation', 'rotation', (['(0.4)', '(0.1)', '(0)'], {}), '(0.4, 0.1, 0)\n', (889, 902), False, 'from mne.transforms import Transform, apply_trans, rotation, translation, scaling\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#########
Reporting
#########
*Created on Thu Jun 8 14:40 2017 by <NAME>*
Tools for creating HTML Reports."""
import time
import base64
import os
import gc
import os.path as op
from string import Template
from io import BytesIO as IO
import pandas as pd
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
from PIL import Image, ImageChops
import matplotlib.pyplot as plt
from cellpainting2 import tools as cpt
from cellpainting2 import report_templ as cprt
from cellpainting2 import processing as cpp
cp_config = cpt.load_config("config")
# cp_plates = cpt.load_config("plates")
IPYTHON = cpt.is_interactive_ipython()
if IPYTHON:
from IPython.core.display import HTML
ACT_PROF_PARAMETERS = cp_config["Parameters"]
ACT_CUTOFF_PERC = cp_config["Cutoffs"]["ActCutoffPerc"]
ACT_CUTOFF_PERC_H = cp_config["Cutoffs"]["ActCutoffPercH"]
ACT_CUTOFF_PERC_REF = cp_config["Cutoffs"]["ActCutoffPercRef"]
OVERACT_H = cp_config["Cutoffs"]["OverActH"]
LIMIT_ACTIVITY_H = cp_config["Cutoffs"]["LimitActivityH"]
LIMIT_ACTIVITY_L = cp_config["Cutoffs"]["LimitActivityL"]
LIMIT_CELL_COUNT_H = cp_config["Cutoffs"]["LimitCellCountH"]
LIMIT_CELL_COUNT_L = cp_config["Cutoffs"]["LimitCellCountL"]
LIMIT_SIMILARITY_H = cp_config["Cutoffs"]["LimitSimilarityH"]
LIMIT_SIMILARITY_L = cp_config["Cutoffs"]["LimitSimilarityL"]
PARAMETER_HELP = cp_config["ParameterHelp"]
# get positions of the compartments in the list of parameters
x = 1
XTICKS = [x]
for comp in ["Median_Cytoplasm", "Median_Nuclei"]:
for idx, p in enumerate(ACT_PROF_PARAMETERS[x:], 1):
if p.startswith(comp):
XTICKS.append(idx + x)
x += idx
break
XTICKS.append(len(ACT_PROF_PARAMETERS))
Draw.DrawingOptions.atomLabelFontFace = "DejaVu Sans"
Draw.DrawingOptions.atomLabelFontSize = 18
try:
from misc_tools import apl_tools
AP_TOOLS = True
# Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} ({})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime(
"%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
# Try to import Avalon so it can be used for generation of 2d coordinates.
from rdkit.Avalon import pyAvalonTools as pyAv
USE_AVALON_2D = True
except ImportError:
print(" * Avalon not available. Using RDKit for 2d coordinate generation.")
USE_AVALON_2D = False
try:
import holoviews as hv
hv.extension("bokeh")
HOLOVIEWS = True
except ImportError:
HOLOVIEWS = False
print("* holoviews could not be import. heat_hv is not available.")
def check_2d_coords(mol, force=False):
"""Check if a mol has 2D coordinates and if not, calculate them."""
if not force:
try:
mol.GetConformer()
except ValueError:
force = True # no 2D coords... calculate them
if force:
if USE_AVALON_2D:
pyAv.Generate2DCoords(mol)
else:
mol.Compute2DCoords()
def mol_from_smiles(smi, calc_2d=True):
mol = Chem.MolFromSmiles(smi)
if not mol:
mol = Chem.MolFromSmiles("*")
else:
if calc_2d:
check_2d_coords(mol)
return mol
def autocrop(im, bgcolor="white"):
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return None # no contents
def get_value(str_val):
if not str_val:
return ""
try:
val = float(str_val)
if "." not in str_val:
val = int(val)
except ValueError:
val = str_val
return val
def isnumber(x):
"""Returns True, if x is a number (i.e. can be converted to float)."""
try:
float(x)
return True
except ValueError:
return False
def convert_bool(dict, dkey, true="Yes", false="No", default="n.d."):
if dkey in dict:
if dict[dkey]:
dict[dkey] = true
else:
dict[dkey] = false
else:
dict[dkey] = default
def load_image(path, well, channel):
image_fn = "{}/{}_w{}.jpg".format(path, well, channel)
im = Image.open(image_fn)
return im
def b64_mol(mol, size=300):
img_file = IO()
try:
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
except UnicodeEncodeError:
print(Chem.MolToSmiles(mol))
mol = Chem.MolFromSmiles("C")
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
img.save(img_file, format='PNG')
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
img_file.close()
return b64
def b64_img(im, format="JPEG"):
if isinstance(im, IO):
needs_close = False
img_file = im
else:
needs_close = True
img_file = IO()
im.save(img_file, format=format)
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
if needs_close:
img_file.close()
return b64
def mol_img_tag(mol, options=None):
tag = """<img {} src="data:image/png;base64,{}" alt="Mol"/>"""
if options is None:
options = ""
img_tag = tag.format(options, b64_mol(mol))
return img_tag
def img_tag(im, format="jpeg", options=None):
tag = """<img {} src="data:image/{};base64,{}" alt="Image"/>"""
if options is None:
options = ""
b = b64_img(im, format=format)
img_tag = tag.format(options, format.lower(), b)
return img_tag
def load_control_images(src_dir):
image_dir = op.join(src_dir, "images")
ctrl_images = {}
for ch in range(1, 6):
im = load_image(image_dir, "H11", ch)
ctrl_images[ch] = img_tag(im, options='style="width: 250px;"')
return ctrl_images
def sanitize_filename(fn):
result = fn.replace(":", "_").replace(",", "_").replace(".", "_")
return result
def write(text, fn):
with open(fn, "w") as f:
f.write(text)
def write_page(page, title="Report", fn="index.html", templ=cprt.HTML_INTRO):
t = Template(templ + page + cprt.HTML_EXTRO)
result = t.substitute(title=title)
write(result, fn=fn)
def assign_colors(rec):
act_cutoff_high = ACT_CUTOFF_PERC_H
if "Toxic" in rec:
if rec["Toxic"]:
rec["Col_Toxic"] = cprt.COL_RED
else:
rec["Col_Toxic"] = cprt.COL_GREEN
else:
rec["Col_Toxic"] = cprt.COL_WHITE
if "Pure_Flag" in rec:
if rec["Pure_Flag"] == "Ok":
rec["Col_Purity"] = cprt.COL_GREEN
elif rec["Pure_Flag"] == "Warn":
rec["Col_Purity"] = cprt.COL_YELLOW
elif rec["Pure_Flag"] == "Fail":
rec["Col_Purity"] = cprt.COL_RED
else:
rec["Col_Purity"] = cprt.COL_WHITE
else:
rec["Col_Purity"] = cprt.COL_WHITE
if rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_H:
rec["Col_Cell_Count"] = cprt.COL_GREEN
elif rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_L:
rec["Col_Cell_Count"] = cprt.COL_YELLOW
else:
rec["Col_Cell_Count"] = cprt.COL_RED
if rec["Activity"] > act_cutoff_high:
rec["Col_Act"] = cprt.COL_RED
elif rec["Activity"] >= LIMIT_ACTIVITY_H:
rec["Col_Act"] = cprt.COL_GREEN
elif rec["Activity"] >= LIMIT_ACTIVITY_L:
rec["Col_Act"] = cprt.COL_YELLOW
else:
rec["Col_Act"] = cprt.COL_RED
if rec["Act_Flag"] == "active":
rec["Col_Act_Flag"] = cprt.COL_GREEN
else:
rec["Col_Act_Flag"] = cprt.COL_RED
def remove_colors(rec):
for k in rec.keys():
if k.startswith("Col_"):
rec[k] = cprt.COL_WHITE
def overview_report(df, cutoff=LIMIT_SIMILARITY_L / 100,
highlight=False, mode="cpd"):
"""mode `int` displays similarities not to references but to other internal compounds
(just displays the `Similarity` column)."""
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
detailed_cpds = []
if isinstance(df, cpp.DataSet):
df = df.data
t = Template(cprt.OVERVIEW_TABLE_HEADER)
if "int" in mode:
tbl_header = t.substitute(sim_entity="to another Test Compound")
else:
tbl_header = t.substitute(sim_entity="to a Reference")
report = [cprt.OVERVIEW_TABLE_INTRO, tbl_header]
row_templ = Template(cprt.OVERVIEW_TABLE_ROW)
idx = 0
for _, rec in df.iterrows():
act_cutoff_low = ACT_CUTOFF_PERC
act_cutoff_high = ACT_CUTOFF_PERC_H
idx += 1
well_id = rec["Well_Id"]
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
rec["Act_Flag"] = "active"
rec["Max_Sim"] = ""
rec["Link"] = ""
rec["Col_Sim"] = cprt.COL_WHITE
has_details = True
if rec["Activity"] < act_cutoff_low:
has_details = False
rec["Act_Flag"] = "inactive"
# print(rec)
# similar references are searched for non-toxic compounds with an activity >= LIMIT_ACTIVITY_L
if rec["Activity"] < LIMIT_ACTIVITY_L or rec["Activity"] > act_cutoff_high or rec["Toxic"] or rec["OverAct"] > OVERACT_H:
similars_determined = False
if rec["OverAct"] > OVERACT_H:
rec["Max_Sim"] = "Overact."
rec["Col_Sim"] = cprt.COL_RED
else:
similars_determined = True
assign_colors(rec)
convert_bool(rec, "Toxic")
if has_details:
detailed_cpds.append(well_id)
details_fn = sanitize_filename(well_id)
plate = rec["Plate"]
rec["Link"] = '<a href="../{}/details/{}.html">Detailed<br>Report</a>'.format(
plate, details_fn)
if similars_determined:
if "int" in mode:
# similar = {"Similarity": [rec["Similarity"]]}
similar = pd.DataFrame(
{"Well_Id": [well_id], "Similarity": [rec["Similarity"]]})
else:
similar = sim_refs[sim_refs["Well_Id"] == well_id].compute()
similar = similar.sort_values("Similarity",
ascending=False).reset_index()
if len(similar) > 0:
max_sim = round(
similar["Similarity"][0] * 100, 1) # first in the list has the highest similarity
rec["Max_Sim"] = max_sim
if max_sim >= LIMIT_SIMILARITY_H:
rec["Col_Sim"] = cprt.COL_GREEN
elif max_sim >= LIMIT_SIMILARITY_L:
rec["Col_Sim"] = cprt.COL_YELLOW
else:
rec["Col_Sim"] = cprt.COL_WHITE
print("ERROR: This should not happen (Max_Sim).")
else:
rec["Max_Sim"] = "< {}".format(LIMIT_SIMILARITY_L)
rec["Col_Sim"] = cprt.COL_RED
if not highlight:
# remove all coloring again:
remove_colors(rec)
report.append(row_templ.substitute(rec))
report.append(cprt.TABLE_EXTRO)
return "\n".join(report), detailed_cpds
def sim_ref_table(similar):
cpp.load_resource("REFERENCES")
df_refs = cpp.REFERENCES
table = [cprt.TABLE_INTRO, cprt.REF_TABLE_HEADER]
templ = Template(cprt.REF_TABLE_ROW)
for idx, rec in similar.iterrows():
rec = rec.to_dict()
ref_id = rec["Ref_Id"]
ref_data = df_refs[df_refs["Well_Id"] == ref_id]
if cpp.is_dask(ref_data):
ref_data = ref_data.compute()
if len(ref_data) == 0:
print(rec)
raise ValueError("BUG: ref_data should not be empty.")
ref_data = ref_data.copy()
ref_data = ref_data.fillna("—")
rec.update(ref_data.to_dict("records")[0])
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["Sim_Format"] = "{:.1f}".format(rec["Similarity"] * 100)
rec["Tan_Format"] = "{:.1f}".format(rec["Tanimoto"] * 100)
if rec["Tan_Format"] == np.nan:
rec["Tan_Format"] = "—"
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx + 1
link = "../../{}/details/{}.html".format(rec["Plate"],
sanitize_filename(rec["Well_Id"]))
rec["link"] = link
row = templ.substitute(rec)
table.append(row)
table.append(cprt.TABLE_EXTRO)
return "\n".join(table)
def changed_parameters_table(act_prof, val, parameters=ACT_PROF_PARAMETERS):
changed = cpt.parameters_from_act_profile_by_val(
act_prof, val, parameters=parameters)
table = []
templ = Template(cprt.PARM_TABLE_ROW)
for idx, p in enumerate(changed, 1):
p_elmnts = p.split("_")
p_module = p_elmnts[2]
p_name = "_".join(p_elmnts[1:])
rec = {
"idx": idx,
"Parameter": p_name,
"Help_Page": PARAMETER_HELP[p_module]
}
row = templ.substitute(rec)
table.append(row)
return "\n".join(table), changed
def parm_stats(parameters):
result = []
channels = ["_Mito", "_Ph_golgi", "_Syto", "_ER", "Hoechst"]
for ch in channels:
cnt = len([p for p in parameters if ch in p])
result.append(cnt)
return result
def parm_hist(increased, decreased, hist_cache):
# try to load histogram from cache:
if op.isfile(hist_cache):
result = open(hist_cache).read()
return result
labels = [
"Mito",
"Golgi / Membrane",
"RNA / Nucleoli",
"ER",
"Nuclei"
]
inc_max = max(increased)
dec_max = max(decreased)
max_total = max([inc_max, dec_max])
if max_total == 0:
result = "No compartment-specific parameters were changed."
return result
inc_norm = [v / max_total for v in increased]
dec_norm = [v / max_total for v in decreased]
n_groups = 5
dpi = 96
# plt.rcParams['axes.titlesize'] = 25
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['legend.fontsize'] = 20
size = (1500, 1000)
figsize = (size[0] / dpi, size[1] / dpi)
fig, ax = plt.subplots(figsize=figsize)
index = np.arange(n_groups)
bar_width = 0.25
plt.bar(index, inc_norm, bar_width,
color='#94caef',
label='Inc')
plt.bar(index + bar_width, dec_norm, bar_width,
color='#ffdd1a',
label='Dec')
plt.xlabel('Cell Compartment')
plt.ylabel('rel. Occurrence')
plt.xticks(index + bar_width / 2, labels, rotation=45)
plt.legend()
plt.tight_layout()
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg", options='style="width: 800px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.close()
open(hist_cache, "w").write(result) # cache the histogram
return result
def heat_mpl(df, id_prop="Compound_Id", cmap="bwr",
show=True, colorbar=True, biosim=False, chemsim=False, method="dist_corr",
sort_parm=False, parm_dict=None,
plot_cache=None):
# try to load heatmap from cache:
if plot_cache is not None and op.isfile(plot_cache):
result = open(plot_cache).read()
return result
if "dist" in method.lower():
profile_sim = cpt.profile_sim_dist_corr
else:
profile_sim = cpt.profile_sim_tanimoto
df_len = len(df)
img_size = 15 if show else 17
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
# plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.figsize'] = (img_size, 1.1 + 0.47 * (df_len - 1))
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 15
fs_text = 18
y_labels = []
fp_list = []
max_val = 3 # using a fixed color range now
min_val = -3
ylabel_templ = "{}{}{}"
ylabel_cs = ""
ylabel_bs = ""
id_prop_list = []
for ctr, (_, rec) in enumerate(df.iterrows()):
if sort_parm:
if ctr == 0:
compartments = ["Median_Cells", "Median_Cytoplasm", "Median_Nuclei"]
parm_list = []
for comp in compartments:
parm_comp = [x for x in ACT_PROF_PARAMETERS if x.startswith(comp)]
val_list = [rec[x] for x in parm_comp]
parm_sorted = [x for _, x in sorted(zip(val_list, parm_comp))]
parm_list.extend(parm_sorted)
else:
parm_list = ACT_PROF_PARAMETERS
fp = [rec[x] for x in ACT_PROF_PARAMETERS]
fp_view = [rec[x] for x in parm_list]
fp_list.append(fp_view)
id_prop_list.append(rec[id_prop])
if chemsim:
if ctr == 0:
mol = mol_from_smiles(rec.get("Smiles", "*"))
if len(mol.GetAtoms()) > 1:
ylabel_cs = "Chem | "
mol_fp = Chem.GetMorganFingerprint(mol, 2) # ECFC4
else: # no Smiles present in the DataFrame
ylabel_cs = ""
chemsim = False
else:
q = rec.get("Smiles", "*")
if len(q) < 2:
ylabel_cs = " | "
else:
sim = cpt.chem_sim(mol_fp, q) * 100
ylabel_cs = "{:3.0f}% | ".format(sim)
if biosim:
if ctr == 0:
prof_ref = fp
ylabel_bs = " Bio | "
else:
sim = profile_sim(prof_ref, fp) * 100
ylabel_bs = "{:3.0f}% | ".format(sim)
ylabel = ylabel_templ.format(ylabel_cs, ylabel_bs, rec[id_prop])
y_labels.append(ylabel)
# m_val = max(fp) # this was the calculation of the color range
# if m_val > max_val:
# max_val = m_val
# m_val = min(fp)
# if m_val < min_val:
# min_val = m_val
if isinstance(parm_dict, dict):
parm_dict["Parameter"] = parm_list
for i in range(len(id_prop_list)):
parm_dict[str(id_prop_list[i])] = fp_list[i].copy()
# calc the colorbar range
max_val = max(abs(min_val), max_val)
# invert y axis:
y_labels = y_labels[::-1]
fp_list = fp_list[::-1]
Z = np.asarray(fp_list)
plt.xticks(XTICKS)
plt.yticks(np.arange(df_len) + 0.5, y_labels)
plt.pcolor(Z, vmin=-max_val, vmax=max_val, cmap=cmap)
plt.text(XTICKS[1] // 2, -1.1, "Cells",
horizontalalignment='center', fontsize=fs_text)
plt.text(XTICKS[1] + ((XTICKS[2] - XTICKS[1]) // 2), -1.1,
"Cytoplasm", horizontalalignment='center', fontsize=fs_text)
plt.text(XTICKS[2] + ((XTICKS[3] - XTICKS[2]) // 2), -1.1,
"Nuclei", horizontalalignment='center', fontsize=fs_text)
if colorbar and len(df) > 3:
plt.colorbar()
plt.tight_layout()
if show:
plt.show()
else:
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg",
options='style="width: 900px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.clf()
plt.close()
gc.collect()
if plot_cache is not None: # cache the plot
open(plot_cache, "w").write(result)
return result
def heat_hv(df, id_prop="Compound_Id", cmap="bwr", invert_y=False):
if not HOLOVIEWS:
raise ImportError("# holoviews library could not be imported")
df_parm = df[[id_prop] + ACT_PROF_PARAMETERS].copy()
df_len = len(df_parm)
col_bar = False if df_len < 3 else True
values = list(df_parm.drop(id_prop, axis=1).values.flatten())
max_val = max(values)
min_val = min(values)
max_val = max(abs(min_val), max_val)
hm_opts = dict(width=950, height=40 + 30 * df_len, tools=['hover'], invert_yaxis=invert_y,
xrotation=90, labelled=[], toolbar='above', colorbar=col_bar, xaxis=None,
colorbar_opts={"width": 10})
hm_style = {"cmap": cmap}
opts = {'HeatMap': {'plot': hm_opts, "style": hm_style}}
df_heat = cpt.melt(df_parm, id_prop=id_prop)
heatmap = hv.HeatMap(df_heat).redim.range(Value=(-max_val, max_val))
return heatmap(opts)
def show_images(plate_full_name, well):
"""For interactive viewing in the notebook."""
if not IPYTHON:
return
src_dir = op.join(cp_config["Paths"]["SrcPath"], plate_full_name)
ctrl_images = load_control_images(src_dir)
image_dir = op.join(src_dir, "images")
templ_dict = {}
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(
im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
tbody_templ = Template(cprt.IMAGES_TABLE)
table = cprt.TABLE_INTRO + \
tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO
return HTML(table)
def get_data_for_wells(well_ids):
cpp.load_resource("DATASTORE")
data = cpp.DATASTORE
result = data[data["Well_Id"].isin(well_ids)]
if cpp.is_dask(result):
result = result.compute()
result["_sort"] = pd.Categorical(
result["Well_Id"], categories=well_ids, ordered=True)
result = result.sort_values("_sort")
result.drop("_sort", axis=1, inplace=False)
return result
def detailed_report(rec, src_dir, ctrl_images):
# print(rec)
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
image_dir = op.join(src_dir, "images")
well_id = rec["Well_Id"]
# act_prof = [rec[x] for x in ACT_PROF_PARAMETERS]
mol = mol_from_smiles(rec.get("Smiles", "*"))
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
templ_dict = rec.copy()
log2_vals = [(x, rec[x]) for x in ACT_PROF_PARAMETERS]
parm_table = []
for idx, x in enumerate(log2_vals, 1):
parm_table.extend(["<tr><td>", str(idx), "</td>",
# omit the "Median_" head of each parameter
"<td>", x[0][7:], "</td>",
'<td align="right">', "{:.2f}".format(x[1]), "</td></tr>\n"])
templ_dict["Parm_Table"] = "".join(parm_table)
df_heat = pd.DataFrame([rec])
templ_dict["Date"] = date
templ_dict["mol_img"] = mol_img_tag(mol, options='class="cpd_image"')
if templ_dict["Is_Ref"]:
if not isinstance(templ_dict["Trivial_Name"], str) or templ_dict["Trivial_Name"] == "":
templ_dict["Trivial_Name"] = "—"
if not isinstance(templ_dict["Known_Act"], str) or templ_dict["Known_Act"] == "":
templ_dict["Known_Act"] = "—"
t = Template(cprt.DETAILS_REF_ROW)
templ_dict["Reference"] = t.substitute(templ_dict)
else:
templ_dict["Reference"] = ""
well = rec["Metadata_Well"]
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(
im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
act_cutoff_high = ACT_CUTOFF_PERC_H
if rec["Rel_Cell_Count"] < LIMIT_CELL_COUNT_L:
templ_dict["Ref_Table"] = "Because of compound toxicity, no similarity was determined."
elif rec["Activity"] < LIMIT_ACTIVITY_L:
templ_dict["Ref_Table"] = "Because of low induction (< {}%), no similarity was determined.".format(LIMIT_ACTIVITY_L)
elif rec["Activity"] > act_cutoff_high:
templ_dict["Ref_Table"] = "Because of high induction (> {}%), no similarity was determined.".format(act_cutoff_high)
elif rec["OverAct"] > OVERACT_H:
templ_dict["Ref_Table"] = "Because of high similarity to the overactivation profile (> {}%), no similarity was determined.".format(OVERACT_H)
else:
similar = sim_refs[sim_refs["Well_Id"] == well_id].compute()
if len(similar) > 0:
similar = similar.sort_values("Similarity",
ascending=False).reset_index().head(5)
ref_tbl = sim_ref_table(similar)
templ_dict["Ref_Table"] = ref_tbl
sim_data = get_data_for_wells(similar["Ref_Id"].values)
df_heat = pd.concat([df_heat, sim_data])
else:
templ_dict["Ref_Table"] = "No similar references found."
cache_path = op.join(cp_config["Dirs"]["DataDir"], "plots", rec["Plate"])
if not op.isdir(cache_path):
os.makedirs(cache_path, exist_ok=True)
hm_fn = sanitize_filename(rec["Well_Id"] + ".txt")
hm_cache = op.join(cache_path, hm_fn)
templ_dict["Heatmap"] = heat_mpl(df_heat, id_prop="Compound_Id", cmap="bwr",
show=False, colorbar=True, plot_cache=hm_cache)
t = Template(cprt.DETAILS_TEMPL)
report = t.substitute(templ_dict)
return report
def full_report(df, src_dir, report_name="report", plate=None,
cutoff=0.6, highlight=False):
report_full_path = op.join(cp_config["Dirs"]["ReportDir"], report_name)
overview_fn = op.join(report_full_path, "index.html")
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
cpt.create_dirs(op.join(report_full_path, "details"))
if isinstance(df, cpp.DataSet):
df = df.data
print("* creating overview...")
header = "{}\n<h2>Cell Painting Overview Report</h2>\n".format(cprt.LOGO)
title = "Overview"
if plate is not None:
title = plate
header += "<h3>Plate {}</h3>\n".format(plate)
header += "<p>({})</p>\n".format(date)
if highlight:
highlight_legend = cprt.HIGHLIGHT_LEGEND
else:
highlight_legend = ""
overview, detailed_cpds = overview_report(df, cutoff=cutoff, highlight=highlight)
overview = header + overview + highlight_legend
write_page(overview, title=title, fn=overview_fn,
templ=cprt.OVERVIEW_HTML_INTRO)
# print(detailed_cpds)
print("* creating detailed reports...")
print(" * loading control images...")
ctrl_images = load_control_images(src_dir)
print(" * writing individual reports...")
df_detailed = df[df["Well_Id"].isin(detailed_cpds)]
ctr = 0
df_len = len(df_detailed)
for _, rec in df_detailed.iterrows():
ctr += 1
if not IPYTHON and ctr % 10 == 0:
print(" ({:3d}%)\r".format(int(100 * ctr / df_len)), end="")
well_id = rec["Well_Id"]
fn = op.join(report_full_path, "details",
"{}.html".format(sanitize_filename(well_id)))
title = "{} Details".format(well_id)
# similar = detailed_cpds[well_id]
details = detailed_report(rec, src_dir, ctrl_images)
write_page(details, title=title, fn=fn, templ=cprt.DETAILS_HTML_INTRO)
print("* done. ")
if IPYTHON:
return HTML('<a href="{}">{}</a>'.format(overview_fn, "Overview"))
|
[
"PIL.ImageChops.difference",
"cellpainting2.tools.load_config",
"PIL.Image.new",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.bar",
"IPython.core.display.HTML",
"gc.collect",
"os.path.isfile",
"matplotlib.pyplot.style.use",
"numpy.arange",
"cellpainting2.tools.parameters_from_act_profile_by_val",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"rdkit.Chem.Draw.MolToImage",
"pandas.DataFrame",
"cellpainting2.tools.chem_sim",
"cellpainting2.processing.load_resource",
"rdkit.Chem.AllChem.GetMorganFingerprint",
"matplotlib.pyplot.close",
"holoviews.extension",
"matplotlib.pyplot.colorbar",
"holoviews.HeatMap",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"cellpainting2.processing.is_dask",
"time.localtime",
"pandas.concat",
"matplotlib.pyplot.pcolor",
"io.BytesIO",
"matplotlib.pyplot.show",
"cellpainting2.tools.melt",
"matplotlib.pyplot.legend",
"numpy.asarray",
"cellpainting2.tools.is_interactive_ipython",
"matplotlib.pyplot.text",
"rdkit.Avalon.pyAvalonTools.Generate2DCoords",
"matplotlib.pyplot.ylabel",
"rdkit.Chem.AllChem.MolFromSmiles",
"os.makedirs",
"os.path.isdir",
"misc_tools.apl_tools.get_commit",
"rdkit.Chem.AllChem.MolToSmiles",
"PIL.Image.open",
"string.Template",
"os.path.getmtime",
"pandas.Categorical",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((607, 632), 'cellpainting2.tools.load_config', 'cpt.load_config', (['"""config"""'], {}), "('config')\n", (622, 632), True, 'from cellpainting2 import tools as cpt\n'), ((684, 712), 'cellpainting2.tools.is_interactive_ipython', 'cpt.is_interactive_ipython', ([], {}), '()\n', (710, 712), True, 'from cellpainting2 import tools as cpt\n'), ((1976, 2006), 'misc_tools.apl_tools.get_commit', 'apl_tools.get_commit', (['__file__'], {}), '(__file__)\n', (1996, 2006), False, 'from misc_tools import apl_tools\n'), ((2631, 2652), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (2643, 2652), True, 'import holoviews as hv\n'), ((3230, 3253), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (3248, 3253), True, 'from rdkit.Chem import AllChem as Chem\n'), ((3488, 3522), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'im.size', 'bgcolor'], {}), "('RGB', im.size, bgcolor)\n", (3497, 3522), False, 'from PIL import Image, ImageChops\n'), ((3534, 3563), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im', 'bg'], {}), '(im, bg)\n', (3555, 3563), False, 'from PIL import Image, ImageChops\n'), ((4404, 4424), 'PIL.Image.open', 'Image.open', (['image_fn'], {}), '(image_fn)\n', (4414, 4424), False, 'from PIL import Image, ImageChops\n'), ((4484, 4488), 'io.BytesIO', 'IO', ([], {}), '()\n', (4486, 4488), True, 'from io import BytesIO as IO\n'), ((5757, 5783), 'os.path.join', 'op.join', (['src_dir', '"""images"""'], {}), "(src_dir, 'images')\n", (5764, 5783), True, 'import os.path as op\n'), ((6251, 6291), 'string.Template', 'Template', (['(templ + page + cprt.HTML_EXTRO)'], {}), '(templ + page + cprt.HTML_EXTRO)\n', (6259, 6291), False, 'from string import Template\n'), ((8092, 8121), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""SIM_REFS"""'], {}), "('SIM_REFS')\n", (8109, 8121), True, 'from cellpainting2 import processing as cpp\n'), ((8238, 8274), 'string.Template', 'Template', (['cprt.OVERVIEW_TABLE_HEADER'], {}), '(cprt.OVERVIEW_TABLE_HEADER)\n', (8246, 8274), False, 'from string import Template\n'), ((8512, 8545), 'string.Template', 'Template', (['cprt.OVERVIEW_TABLE_ROW'], {}), '(cprt.OVERVIEW_TABLE_ROW)\n', (8520, 8545), False, 'from string import Template\n'), ((11537, 11568), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""REFERENCES"""'], {}), "('REFERENCES')\n", (11554, 11568), True, 'from cellpainting2 import processing as cpp\n'), ((11664, 11692), 'string.Template', 'Template', (['cprt.REF_TABLE_ROW'], {}), '(cprt.REF_TABLE_ROW)\n', (11672, 11692), False, 'from string import Template\n'), ((12914, 12990), 'cellpainting2.tools.parameters_from_act_profile_by_val', 'cpt.parameters_from_act_profile_by_val', (['act_prof', 'val'], {'parameters': 'parameters'}), '(act_prof, val, parameters=parameters)\n', (12952, 12990), True, 'from cellpainting2 import tools as cpt\n'), ((13027, 13056), 'string.Template', 'Template', (['cprt.PARM_TABLE_ROW'], {}), '(cprt.PARM_TABLE_ROW)\n', (13035, 13056), False, 'from string import Template\n'), ((13765, 13786), 'os.path.isfile', 'op.isfile', (['hist_cache'], {}), '(hist_cache)\n', (13774, 13786), True, 'import os.path as op\n'), ((14363, 14393), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (14376, 14393), True, 'import matplotlib.pyplot as plt\n'), ((14398, 14429), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-pastel"""'], {}), "('seaborn-pastel')\n", (14411, 14429), True, 'import matplotlib.pyplot as plt\n'), ((14434, 14463), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-talk"""'], {}), "('seaborn-talk')\n", (14447, 14463), True, 'import matplotlib.pyplot as plt\n'), ((14710, 14739), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (14722, 14739), True, 'import matplotlib.pyplot as plt\n'), ((14752, 14771), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (14761, 14771), True, 'import numpy as np\n'), ((14797, 14862), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'inc_norm', 'bar_width'], {'color': '"""#94caef"""', 'label': '"""Inc"""'}), "(index, inc_norm, bar_width, color='#94caef', label='Inc')\n", (14804, 14862), True, 'import matplotlib.pyplot as plt\n'), ((14891, 14968), 'matplotlib.pyplot.bar', 'plt.bar', (['(index + bar_width)', 'dec_norm', 'bar_width'], {'color': '"""#ffdd1a"""', 'label': '"""Dec"""'}), "(index + bar_width, dec_norm, bar_width, color='#ffdd1a', label='Dec')\n", (14898, 14968), True, 'import matplotlib.pyplot as plt\n'), ((14998, 15028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cell Compartment"""'], {}), "('Cell Compartment')\n", (15008, 15028), True, 'import matplotlib.pyplot as plt\n'), ((15033, 15062), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""rel. Occurrence"""'], {}), "('rel. Occurrence')\n", (15043, 15062), True, 'import matplotlib.pyplot as plt\n'), ((15067, 15121), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + bar_width / 2)', 'labels'], {'rotation': '(45)'}), '(index + bar_width / 2, labels, rotation=45)\n', (15077, 15121), True, 'import matplotlib.pyplot as plt\n'), ((15126, 15138), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15136, 15138), True, 'import matplotlib.pyplot as plt\n'), ((15143, 15161), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15159, 15161), True, 'import matplotlib.pyplot as plt\n'), ((15177, 15181), 'io.BytesIO', 'IO', ([], {}), '()\n', (15179, 15181), True, 'from io import BytesIO as IO\n'), ((15186, 15242), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'format': '"""jpg"""'}), "(img_file, bbox_inches='tight', format='jpg')\n", (15197, 15242), True, 'import matplotlib.pyplot as plt\n'), ((15419, 15430), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15428, 15430), True, 'import matplotlib.pyplot as plt\n'), ((16086, 16116), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (16099, 16116), True, 'import matplotlib.pyplot as plt\n'), ((16121, 16152), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-pastel"""'], {}), "('seaborn-pastel')\n", (16134, 16152), True, 'import matplotlib.pyplot as plt\n'), ((16157, 16186), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-talk"""'], {}), "('seaborn-talk')\n", (16170, 16186), True, 'import matplotlib.pyplot as plt\n'), ((19039, 19058), 'numpy.asarray', 'np.asarray', (['fp_list'], {}), '(fp_list)\n', (19049, 19058), True, 'import numpy as np\n'), ((19063, 19081), 'matplotlib.pyplot.xticks', 'plt.xticks', (['XTICKS'], {}), '(XTICKS)\n', (19073, 19081), True, 'import matplotlib.pyplot as plt\n'), ((19136, 19189), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['Z'], {'vmin': '(-max_val)', 'vmax': 'max_val', 'cmap': 'cmap'}), '(Z, vmin=-max_val, vmax=max_val, cmap=cmap)\n', (19146, 19189), True, 'import matplotlib.pyplot as plt\n'), ((19194, 19285), 'matplotlib.pyplot.text', 'plt.text', (['(XTICKS[1] // 2)', '(-1.1)', '"""Cells"""'], {'horizontalalignment': '"""center"""', 'fontsize': 'fs_text'}), "(XTICKS[1] // 2, -1.1, 'Cells', horizontalalignment='center',\n fontsize=fs_text)\n", (19202, 19285), True, 'import matplotlib.pyplot as plt\n'), ((19299, 19420), 'matplotlib.pyplot.text', 'plt.text', (['(XTICKS[1] + (XTICKS[2] - XTICKS[1]) // 2)', '(-1.1)', '"""Cytoplasm"""'], {'horizontalalignment': '"""center"""', 'fontsize': 'fs_text'}), "(XTICKS[1] + (XTICKS[2] - XTICKS[1]) // 2, -1.1, 'Cytoplasm',\n horizontalalignment='center', fontsize=fs_text)\n", (19307, 19420), True, 'import matplotlib.pyplot as plt\n'), ((19436, 19554), 'matplotlib.pyplot.text', 'plt.text', (['(XTICKS[2] + (XTICKS[3] - XTICKS[2]) // 2)', '(-1.1)', '"""Nuclei"""'], {'horizontalalignment': '"""center"""', 'fontsize': 'fs_text'}), "(XTICKS[2] + (XTICKS[3] - XTICKS[2]) // 2, -1.1, 'Nuclei',\n horizontalalignment='center', fontsize=fs_text)\n", (19444, 19554), True, 'import matplotlib.pyplot as plt\n'), ((19626, 19644), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19642, 19644), True, 'import matplotlib.pyplot as plt\n'), ((20957, 20991), 'cellpainting2.tools.melt', 'cpt.melt', (['df_parm'], {'id_prop': 'id_prop'}), '(df_parm, id_prop=id_prop)\n', (20965, 20991), True, 'from cellpainting2 import tools as cpt\n'), ((21233, 21288), 'os.path.join', 'op.join', (["cp_config['Paths']['SrcPath']", 'plate_full_name'], {}), "(cp_config['Paths']['SrcPath'], plate_full_name)\n", (21240, 21288), True, 'import os.path as op\n'), ((21352, 21378), 'os.path.join', 'op.join', (['src_dir', '"""images"""'], {}), "(src_dir, 'images')\n", (21359, 21378), True, 'import os.path as op\n'), ((21656, 21683), 'string.Template', 'Template', (['cprt.IMAGES_TABLE'], {}), '(cprt.IMAGES_TABLE)\n', (21664, 21683), False, 'from string import Template\n'), ((21789, 21800), 'IPython.core.display.HTML', 'HTML', (['table'], {}), '(table)\n', (21793, 21800), False, 'from IPython.core.display import HTML\n'), ((21841, 21871), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""DATASTORE"""'], {}), "('DATASTORE')\n", (21858, 21871), True, 'from cellpainting2 import processing as cpp\n'), ((21954, 21973), 'cellpainting2.processing.is_dask', 'cpp.is_dask', (['result'], {}), '(result)\n', (21965, 21973), True, 'from cellpainting2 import processing as cpp\n'), ((22031, 22099), 'pandas.Categorical', 'pd.Categorical', (["result['Well_Id']"], {'categories': 'well_ids', 'ordered': '(True)'}), "(result['Well_Id'], categories=well_ids, ordered=True)\n", (22045, 22099), True, 'import pandas as pd\n'), ((22287, 22316), 'cellpainting2.processing.load_resource', 'cpp.load_resource', (['"""SIM_REFS"""'], {}), "('SIM_REFS')\n", (22304, 22316), True, 'from cellpainting2 import processing as cpp\n'), ((22422, 22448), 'os.path.join', 'op.join', (['src_dir', '"""images"""'], {}), "(src_dir, 'images')\n", (22429, 22448), True, 'import os.path as op\n'), ((23136, 23155), 'pandas.DataFrame', 'pd.DataFrame', (['[rec]'], {}), '([rec])\n', (23148, 23155), True, 'import pandas as pd\n'), ((25274, 25334), 'os.path.join', 'op.join', (["cp_config['Dirs']['DataDir']", '"""plots"""', "rec['Plate']"], {}), "(cp_config['Dirs']['DataDir'], 'plots', rec['Plate'])\n", (25281, 25334), True, 'import os.path as op\n'), ((25485, 25511), 'os.path.join', 'op.join', (['cache_path', 'hm_fn'], {}), '(cache_path, hm_fn)\n', (25492, 25511), True, 'import os.path as op\n'), ((25687, 25715), 'string.Template', 'Template', (['cprt.DETAILS_TEMPL'], {}), '(cprt.DETAILS_TEMPL)\n', (25695, 25715), False, 'from string import Template\n'), ((25906, 25958), 'os.path.join', 'op.join', (["cp_config['Dirs']['ReportDir']", 'report_name'], {}), "(cp_config['Dirs']['ReportDir'], report_name)\n", (25913, 25958), True, 'import os.path as op\n'), ((25977, 26016), 'os.path.join', 'op.join', (['report_full_path', '"""index.html"""'], {}), "(report_full_path, 'index.html')\n", (25984, 26016), True, 'import os.path as op\n'), ((3284, 3307), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""*"""'], {}), "('*')\n", (3302, 3307), True, 'from rdkit.Chem import AllChem as Chem\n'), ((5043, 5047), 'io.BytesIO', 'IO', ([], {}), '()\n', (5045, 5047), True, 'from io import BytesIO as IO\n'), ((11860, 11881), 'cellpainting2.processing.is_dask', 'cpp.is_dask', (['ref_data'], {}), '(ref_data)\n', (11871, 11881), True, 'from cellpainting2 import processing as cpp\n'), ((15803, 15824), 'os.path.isfile', 'op.isfile', (['plot_cache'], {}), '(plot_cache)\n', (15812, 15824), True, 'import os.path as op\n'), ((19607, 19621), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (19619, 19621), True, 'import matplotlib.pyplot as plt\n'), ((19666, 19676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19674, 19676), True, 'import matplotlib.pyplot as plt\n'), ((19706, 19710), 'io.BytesIO', 'IO', ([], {}), '()\n', (19708, 19710), True, 'from io import BytesIO as IO\n'), ((19719, 19775), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_file'], {'bbox_inches': '"""tight"""', 'format': '"""jpg"""'}), "(img_file, bbox_inches='tight', format='jpg')\n", (19730, 19775), True, 'import matplotlib.pyplot as plt\n'), ((19993, 20002), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20000, 20002), True, 'import matplotlib.pyplot as plt\n'), ((20011, 20022), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20020, 20022), True, 'import matplotlib.pyplot as plt\n'), ((20031, 20043), 'gc.collect', 'gc.collect', ([], {}), '()\n', (20041, 20043), False, 'import gc\n'), ((22388, 22404), 'time.localtime', 'time.localtime', ([], {}), '()\n', (22402, 22404), False, 'import time\n'), ((23586, 23616), 'string.Template', 'Template', (['cprt.DETAILS_REF_ROW'], {}), '(cprt.DETAILS_REF_ROW)\n', (23594, 23616), False, 'from string import Template\n'), ((25346, 25366), 'os.path.isdir', 'op.isdir', (['cache_path'], {}), '(cache_path)\n', (25354, 25366), True, 'import os.path as op\n'), ((25376, 25414), 'os.makedirs', 'os.makedirs', (['cache_path'], {'exist_ok': '(True)'}), '(cache_path, exist_ok=True)\n', (25387, 25414), False, 'import os\n'), ((26060, 26076), 'time.localtime', 'time.localtime', ([], {}), '()\n', (26074, 26076), False, 'import time\n'), ((26098, 26134), 'os.path.join', 'op.join', (['report_full_path', '"""details"""'], {}), "(report_full_path, 'details')\n", (26105, 26134), True, 'import os.path as op\n'), ((3103, 3129), 'rdkit.Avalon.pyAvalonTools.Generate2DCoords', 'pyAv.Generate2DCoords', (['mol'], {}), '(mol)\n', (3124, 3129), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((4521, 4560), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size, size)'}), '(mol, size=(size, size))\n', (4536, 4560), False, 'from rdkit.Chem import Draw\n'), ((4644, 4667), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['"""C"""'], {}), "('C')\n", (4662, 4667), True, 'from rdkit.Chem import AllChem as Chem\n'), ((19097, 19114), 'numpy.arange', 'np.arange', (['df_len'], {}), '(df_len)\n', (19106, 19114), True, 'import numpy as np\n'), ((4607, 4628), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (4623, 4628), True, 'from rdkit.Chem import AllChem as Chem\n'), ((4691, 4730), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size, size)'}), '(mol, size=(size, size))\n', (4706, 4730), False, 'from rdkit.Chem import Draw\n'), ((21006, 21025), 'holoviews.HeatMap', 'hv.HeatMap', (['df_heat'], {}), '(df_heat)\n', (21016, 21025), True, 'import holoviews as hv\n'), ((10182, 10253), 'pandas.DataFrame', 'pd.DataFrame', (["{'Well_Id': [well_id], 'Similarity': [rec['Similarity']]}"], {}), "({'Well_Id': [well_id], 'Similarity': [rec['Similarity']]})\n", (10194, 10253), True, 'import pandas as pd\n'), ((17676, 17709), 'rdkit.Chem.AllChem.GetMorganFingerprint', 'Chem.GetMorganFingerprint', (['mol', '(2)'], {}), '(mol, 2)\n', (17701, 17709), True, 'from rdkit.Chem import AllChem as Chem\n'), ((2280, 2301), 'os.path.getmtime', 'op.getmtime', (['__file__'], {}), '(__file__)\n', (2291, 2301), True, 'import os.path as op\n'), ((18032, 18055), 'cellpainting2.tools.chem_sim', 'cpt.chem_sim', (['mol_fp', 'q'], {}), '(mol_fp, q)\n', (18044, 18055), True, 'from cellpainting2 import tools as cpt\n'), ((25142, 25172), 'pandas.concat', 'pd.concat', (['[df_heat, sim_data]'], {}), '([df_heat, sim_data])\n', (25151, 25172), True, 'import pandas as pd\n')]
|
import numpy as np
import torch
# https://github.com/sfujim/TD3/blob/ade6260da88864d1ab0ed592588e090d3d97d679/utils.py
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
torch.from_numpy(self.state[ind]).float().to(self.device),
torch.from_numpy(self.action[ind]).float().to(self.device),
torch.from_numpy(self.next_state[ind]).float().to(self.device),
torch.from_numpy(self.reward[ind]).float().to(self.device),
torch.from_numpy(self.not_done[ind]).float().to(self.device)
)
def sample_np(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
np.float32(self.state[ind]),
np.float32(self.action[ind]),
np.float32(self.next_state[ind]),
np.float32(self.reward[ind]),
np.float32(self.not_done[ind])
)
def save(self, fdir):
np.save(fdir + '/sample-state', self.state[:self.size])
np.save(fdir + '/sample-action', self.action[:self.size])
np.save(fdir + '/sample-nstate', self.next_state[:self.size])
np.save(fdir + '/sample-reward', self.reward[:self.size])
np.save(fdir + '/sample-ndone', self.not_done[:self.size])
def load(self, fdir):
state = np.load(fdir + '/sample-state.npy', allow_pickle=True)
action = np.load(fdir + '/sample-action.npy', allow_pickle=True)
nstate = np.load(fdir + '/sample-nstate.npy', allow_pickle=True)
reward = np.load(fdir + '/sample-reward.npy', allow_pickle=True)
ndone = np.load(fdir + '/sample-ndone.npy', allow_pickle=True)
for s, a, ns, r, nd in zip(state, action, nstate, reward, ndone):
self.add(s, a, ns, r, 1. - nd)
def reset(self):
self.ptr = 0
self.size = 0
|
[
"numpy.load",
"numpy.save",
"numpy.float32",
"numpy.zeros",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.from_numpy"
] |
[((313, 344), 'numpy.zeros', 'np.zeros', (['(max_size, state_dim)'], {}), '((max_size, state_dim))\n', (321, 344), True, 'import numpy as np\n'), ((367, 399), 'numpy.zeros', 'np.zeros', (['(max_size, action_dim)'], {}), '((max_size, action_dim))\n', (375, 399), True, 'import numpy as np\n'), ((426, 457), 'numpy.zeros', 'np.zeros', (['(max_size, state_dim)'], {}), '((max_size, state_dim))\n', (434, 457), True, 'import numpy as np\n'), ((480, 503), 'numpy.zeros', 'np.zeros', (['(max_size, 1)'], {}), '((max_size, 1))\n', (488, 503), True, 'import numpy as np\n'), ((528, 551), 'numpy.zeros', 'np.zeros', (['(max_size, 1)'], {}), '((max_size, 1))\n', (536, 551), True, 'import numpy as np\n'), ((1057, 1105), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {'size': 'batch_size'}), '(0, self.size, size=batch_size)\n', (1074, 1105), True, 'import numpy as np\n'), ((1549, 1597), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {'size': 'batch_size'}), '(0, self.size, size=batch_size)\n', (1566, 1597), True, 'import numpy as np\n'), ((1874, 1929), 'numpy.save', 'np.save', (["(fdir + '/sample-state')", 'self.state[:self.size]'], {}), "(fdir + '/sample-state', self.state[:self.size])\n", (1881, 1929), True, 'import numpy as np\n'), ((1938, 1995), 'numpy.save', 'np.save', (["(fdir + '/sample-action')", 'self.action[:self.size]'], {}), "(fdir + '/sample-action', self.action[:self.size])\n", (1945, 1995), True, 'import numpy as np\n'), ((2004, 2065), 'numpy.save', 'np.save', (["(fdir + '/sample-nstate')", 'self.next_state[:self.size]'], {}), "(fdir + '/sample-nstate', self.next_state[:self.size])\n", (2011, 2065), True, 'import numpy as np\n'), ((2074, 2131), 'numpy.save', 'np.save', (["(fdir + '/sample-reward')", 'self.reward[:self.size]'], {}), "(fdir + '/sample-reward', self.reward[:self.size])\n", (2081, 2131), True, 'import numpy as np\n'), ((2140, 2198), 'numpy.save', 'np.save', (["(fdir + '/sample-ndone')", 'self.not_done[:self.size]'], {}), "(fdir + '/sample-ndone', self.not_done[:self.size])\n", (2147, 2198), True, 'import numpy as np\n'), ((2242, 2296), 'numpy.load', 'np.load', (["(fdir + '/sample-state.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-state.npy', allow_pickle=True)\n", (2249, 2296), True, 'import numpy as np\n'), ((2314, 2369), 'numpy.load', 'np.load', (["(fdir + '/sample-action.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-action.npy', allow_pickle=True)\n", (2321, 2369), True, 'import numpy as np\n'), ((2387, 2442), 'numpy.load', 'np.load', (["(fdir + '/sample-nstate.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-nstate.npy', allow_pickle=True)\n", (2394, 2442), True, 'import numpy as np\n'), ((2460, 2515), 'numpy.load', 'np.load', (["(fdir + '/sample-reward.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-reward.npy', allow_pickle=True)\n", (2467, 2515), True, 'import numpy as np\n'), ((2532, 2586), 'numpy.load', 'np.load', (["(fdir + '/sample-ndone.npy')"], {'allow_pickle': '(True)'}), "(fdir + '/sample-ndone.npy', allow_pickle=True)\n", (2539, 2586), True, 'import numpy as np\n'), ((1627, 1654), 'numpy.float32', 'np.float32', (['self.state[ind]'], {}), '(self.state[ind])\n', (1637, 1654), True, 'import numpy as np\n'), ((1668, 1696), 'numpy.float32', 'np.float32', (['self.action[ind]'], {}), '(self.action[ind])\n', (1678, 1696), True, 'import numpy as np\n'), ((1710, 1742), 'numpy.float32', 'np.float32', (['self.next_state[ind]'], {}), '(self.next_state[ind])\n', (1720, 1742), True, 'import numpy as np\n'), ((1756, 1784), 'numpy.float32', 'np.float32', (['self.reward[ind]'], {}), '(self.reward[ind])\n', (1766, 1784), True, 'import numpy as np\n'), ((1798, 1828), 'numpy.float32', 'np.float32', (['self.not_done[ind]'], {}), '(self.not_done[ind])\n', (1808, 1828), True, 'import numpy as np\n'), ((598, 623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (621, 623), False, 'import torch\n'), ((1135, 1168), 'torch.from_numpy', 'torch.from_numpy', (['self.state[ind]'], {}), '(self.state[ind])\n', (1151, 1168), False, 'import torch\n'), ((1206, 1240), 'torch.from_numpy', 'torch.from_numpy', (['self.action[ind]'], {}), '(self.action[ind])\n', (1222, 1240), False, 'import torch\n'), ((1278, 1316), 'torch.from_numpy', 'torch.from_numpy', (['self.next_state[ind]'], {}), '(self.next_state[ind])\n', (1294, 1316), False, 'import torch\n'), ((1354, 1388), 'torch.from_numpy', 'torch.from_numpy', (['self.reward[ind]'], {}), '(self.reward[ind])\n', (1370, 1388), False, 'import torch\n'), ((1426, 1462), 'torch.from_numpy', 'torch.from_numpy', (['self.not_done[ind]'], {}), '(self.not_done[ind])\n', (1442, 1462), False, 'import torch\n')]
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iree.tf.support.tf_utils."""
from absl.testing import parameterized
from iree.tf.support import tf_utils
import numpy as np
import tensorflow as tf
class UtilsTests(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters([('int8_to_i8', np.int8, 'i8'),
('int32_to_i32', np.int32, 'i32'),
('float32_to_f32', np.float32, 'f32'),
('float64_to_f64', np.float64, 'f64')])
def test_to_mlir_type(self, numpy_type, mlir_type):
self.assertEqual(tf_utils.to_mlir_type(numpy_type), mlir_type)
@parameterized.named_parameters([
('single_i32', [np.array([1, 2], dtype=np.int32)], '2xi32=1 2'),
('single_f32', [np.array([1, 2], dtype=np.float32)], '2xf32=1.0 2.0'),
])
def test_save_input_values(self, inputs, inputs_str):
self.assertEqual(tf_utils.save_input_values(inputs), inputs_str)
def test_apply_function(self):
inputs = [1, [2, 3], (4, 5), {'6': 6, '78': [7, 8]}]
expected = [0, [1, 2], (3, 4), {'6': 5, '78': [6, 7]}]
result = tf_utils.apply_function(inputs, lambda x: x - 1)
self.assertEqual(result, expected)
self.assertNotEqual(inputs, expected)
@parameterized.named_parameters([
{
'testcase_name': 'all the same',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': True,
},
{
'testcase_name': 'wrong int',
'array_c': np.array([1, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': False,
},
{
'testcase_name': 'wrong string',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['a', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': False,
},
{
'testcase_name': 'wrong float',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([1.0, 0.1, 0.2]),
'tar_same': False,
},
])
def test_recursive_check_same(self, array_c, array_d, array_e, tar_same):
# yapf: disable
ref = {
'a': 1,
'b': [
{'c': np.array([0, 1, 2])},
{'d': np.array(['0', '1', '2'])},
{'e': np.array([0.0, 0.1, 0.2])}
],
}
tar = {
'a': 1,
'b': [
{'c': array_c},
{'d': array_d},
{'e': array_e}
],
}
# yapf: enable
same, _ = tf_utils.check_same(ref, tar, rtol=1e-6, atol=1e-6)
self.assertEqual(tar_same, same)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"iree.tf.support.tf_utils.check_same",
"iree.tf.support.tf_utils.to_mlir_type",
"iree.tf.support.tf_utils.apply_function",
"numpy.array",
"absl.testing.parameterized.named_parameters",
"iree.tf.support.tf_utils.save_input_values"
] |
[((823, 1008), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["[('int8_to_i8', np.int8, 'i8'), ('int32_to_i32', np.int32, 'i32'), (\n 'float32_to_f32', np.float32, 'f32'), ('float64_to_f64', np.float64, 'f64')\n ]"], {}), "([('int8_to_i8', np.int8, 'i8'), (\n 'int32_to_i32', np.int32, 'i32'), ('float32_to_f32', np.float32, 'f32'),\n ('float64_to_f64', np.float64, 'f64')])\n", (853, 1008), False, 'from absl.testing import parameterized\n'), ((3363, 3377), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3375, 3377), True, 'import tensorflow as tf\n'), ((1704, 1752), 'iree.tf.support.tf_utils.apply_function', 'tf_utils.apply_function', (['inputs', '(lambda x: x - 1)'], {}), '(inputs, lambda x: x - 1)\n', (1727, 1752), False, 'from iree.tf.support import tf_utils\n'), ((3243, 3296), 'iree.tf.support.tf_utils.check_same', 'tf_utils.check_same', (['ref', 'tar'], {'rtol': '(1e-06)', 'atol': '(1e-06)'}), '(ref, tar, rtol=1e-06, atol=1e-06)\n', (3262, 3296), False, 'from iree.tf.support import tf_utils\n'), ((1180, 1213), 'iree.tf.support.tf_utils.to_mlir_type', 'tf_utils.to_mlir_type', (['numpy_type'], {}), '(numpy_type)\n', (1201, 1213), False, 'from iree.tf.support import tf_utils\n'), ((1493, 1527), 'iree.tf.support.tf_utils.save_input_values', 'tf_utils.save_input_values', (['inputs'], {}), '(inputs)\n', (1519, 1527), False, 'from iree.tf.support import tf_utils\n'), ((1943, 1962), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1951, 1962), True, 'import numpy as np\n'), ((1985, 2010), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (1993, 2010), True, 'import numpy as np\n'), ((2033, 2058), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (2041, 2058), True, 'import numpy as np\n'), ((2166, 2185), 'numpy.array', 'np.array', (['[1, 1, 2]'], {}), '([1, 1, 2])\n', (2174, 2185), True, 'import numpy as np\n'), ((2208, 2233), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (2216, 2233), True, 'import numpy as np\n'), ((2256, 2281), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (2264, 2281), True, 'import numpy as np\n'), ((2393, 2412), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2401, 2412), True, 'import numpy as np\n'), ((2435, 2460), 'numpy.array', 'np.array', (["['a', '1', '2']"], {}), "(['a', '1', '2'])\n", (2443, 2460), True, 'import numpy as np\n'), ((2483, 2508), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (2491, 2508), True, 'import numpy as np\n'), ((2619, 2638), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2627, 2638), True, 'import numpy as np\n'), ((2661, 2686), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (2669, 2686), True, 'import numpy as np\n'), ((2709, 2734), 'numpy.array', 'np.array', (['[1.0, 0.1, 0.2]'], {}), '([1.0, 0.1, 0.2])\n', (2717, 2734), True, 'import numpy as np\n'), ((1285, 1317), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.int32'}), '([1, 2], dtype=np.int32)\n', (1293, 1317), True, 'import numpy as np\n'), ((1356, 1390), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.float32'}), '([1, 2], dtype=np.float32)\n', (1364, 1390), True, 'import numpy as np\n'), ((2937, 2956), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2945, 2956), True, 'import numpy as np\n'), ((2977, 3002), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (2985, 3002), True, 'import numpy as np\n'), ((3023, 3048), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.2]'], {}), '([0.0, 0.1, 0.2])\n', (3031, 3048), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Standard BOX 2D module with single joint
"""
import gym_rem2D.morph.module_utility as mu
from gym_rem.utils import Rot
from enum import Enum
import numpy as np
from Controller import m_controller
import random
import math
from gym_rem2D.morph import abstract_module
from gym_rem2D.morph import simple_module as sm
import Box2D as B2D
from Box2D.b2 import (edgeShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
class Connection(Enum):
"""Available connections for standard 2D module"""
left = (1.,0.,0.)
right = (-1.,0.,0.)
top = (0.,1.0,0.)
class Circular2D(abstract_module.Module):
"""Standard 2D module"""
def __init__(self, theta=0, size=(0.1,0.1, 0.0)):
self.theta = theta % 2 # double check
self.size = np.array(size)
assert self.size.shape == (3,), "Size must be a 3 element vector! : this is a 2D module but takes in a three dimensional size vector for now. Third entry is ignored"
self.connection_axis = np.array([0., 0., 1.])
self.orientation = Rot.from_axis(self.connection_axis,
-self.theta * (np.pi / 2.))
# NOTE: The fudge factor is to avoid colliding with the plane once
# spawned
self.position = np.array([0., self.size[2] / 2. + 0.002, 0.]) # uses only x and y
self._children = {}
self.controller = m_controller.Controller()
# relative scales
self.radius = 0.25
self.angle = math.pi/2
self.type = "CIRCLE"
self.MIN_RADIUS = 0.25
self.MAX_RADIUS = 0.5
self.MIN_ANGLE = math.pi/4
self.MAX_ANGLE = math.pi*2
self.torque = 50
#self.joint = None # needs joint
def limitWH(self):
"""Limit morphology to bounds"""
if self.radius > self.MAX_RADIUS:
self.radius = self.MAX_RADIUS
elif self.radius < self.MIN_RADIUS:
self.radius = self.MIN_RADIUS
if self.angle >self.MAX_ANGLE:
self.angle = self.MAX_ANGLE
elif self.angle < self.MIN_ANGLE:
self.angle = self.MIN_ANGLE
def mutate(self, MORPH_MUTATION_RATE,MUTATION_RATE,MUT_SIGMA):
"""
To mutate the shape and controller stored in the modules.
"""
#return
if random.uniform(0,1) < MORPH_MUTATION_RATE:
self.radius = random.gauss(self.radius, MUT_SIGMA)
if random.uniform(0,1) < MORPH_MUTATION_RATE:
self.angle = random.gauss(self.angle,MUT_SIGMA * math.pi)
self.limitWH()
if self.controller is not None:
self.controller.mutate(MUTATION_RATE,MUT_SIGMA, self.angle)
def setMorph(self,val1, val2, val3):
# values are between -1 and 1
self.radius = val1 + 1.5
# val2 is not used since radius
self.angle = self.MIN_ANGLE +(((val3 + 1.0)*0.5) * (self.MAX_ANGLE-self.MIN_ANGLE))
# limit values
self.limitWH()
def __setitem__(self, key, module):
if not isinstance(key, Connection):
raise TypeError("Key: '{}' is not a Connection type".format(key))
if key in self._children:
raise ModuleAttached()
if key not in self.available:
raise ConnectionObstructed()
# Add module as a child
self._children[key] = module
# Calculate connection point
direction = self.orientation.rotate(np.array(key.value))
position = self.position + (direction * self.size) / 2.
# Update parent pointer of module
module.update(self, position, direction)
def update(self, parent=None, pos=None, direction=None):
# Update own orientation first in case we have been previously
# connected
self.orientation = Rot.from_axis(self.connection_axis,
-self.theta * (np.pi / 2.))
# Update position in case parent is None
self.position = np.array([0., 0., self.size[2] / 2. + 0.002])
# Reset connection in case parent is None
self.connection = None
# Call super to update orientation
super().update(parent, pos, direction)
# If parent is not None we need to update position and connection point
if self.parent is not None:
# Update center position for self
# NOTE: We add a little fudge factor to avoid overlap
self.position = pos + (direction * self.size * 1.01) / 2.
# Calculate connection points for joint
conn = np.array([0., 0., -self.size[2] / 2.])
parent_conn = parent.orientation.T.rotate(pos - parent.position)
self.connection = (parent_conn, conn)
# Update potential children
self.update_children()
def update_children(self):
for conn in self._children:
direction = self.orientation.rotate(np.array(conn.value))
position = self.position + (direction * self.size) / 2.
self._children[conn].update(self, position, direction)
def spawn(self):
orient = self.orientation.as_quat()
cuid = B2D.b2CircleShape
cuid.m_p.Set(self.position)
if (self.parent):
self.joint = B2D.b2RevoluteJoint()
return cuid
def get_global_position_of_connection_site(self,con=None, parent_component = None):
if con is None:
con = Connection.left # get intersection of rectangle from width and height
local_position = [] # 2d array
local_angle = (con.value[0] * (self.angle)) # positive for left, negative for right
# position relative to y directional vector
if parent_component:
local_angle+=parent_component.angle
x = math.cos(local_angle+ math.pi/2)*self.radius
y = math.sin(local_angle+ math.pi/2)*self.radius
local_position.append(x)
local_position.append(y)
if parent_component is None:
return local_position,local_angle
global_position = [local_position[0]+parent_component.position[0],
local_position[1]+parent_component.position[1]]
return global_position, local_angle
def create(self,world,TERRAIN_HEIGHT,module=None,node=None,connection_site=None, p_c=None, module_list=None, position = None):
# get module height and width
if p_c is not None and connection_site is None:
raise("When you want to attach a new component to a parent component, you have to supply",
"a connection_site object with it. This connection_site object defines where to anchor",
"the joint in between to components")
n_radius = self.radius
angle = 0
pos = [7,10,0];
if position is not None:
pos = position
if (p_c is not None):
local_pos_x =math.cos(connection_site.orientation.x+ math.pi/2) * n_radius
local_pos_y =math.sin(connection_site.orientation.x+ math.pi/2) * n_radius
pos[0] = (local_pos_x) + connection_site.position.x
pos[1] = (local_pos_y) + connection_site.position.y
# This module will create one component that will be temporarily stored in ncomponent
new_component = None
# This module will create one joint (if a parent component is present) that will be temporarily stored in njoint
njoint = None
components = []
joints = []
if connection_site:
angle += connection_site.orientation.x
if (pos[1] - n_radius < TERRAIN_HEIGHT): #TODO CHANGE TO TERRAIN_HEIGT OR DO CHECK ELSEWHERE
if node is not None:
node.component = None
return components,joints
else:
fixture = fixtureDef(
shape=B2D.b2CircleShape(radius =n_radius),
density=1,
friction=0.1,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001
)
new_component = world.CreateDynamicBody(
position=(pos[0],pos[1]),
angle = angle,
fixtures = fixture)
color = [255,255,255]
if node is not None and module_list is not None:
color = world.cmap(node.type/len(module_list))
elif node is not None and module_list is None:
print("Note: cannot assign a color to the module since the 'module_list' is not passed as an argument")
# move to component creator
new_component.color1 = (color[0],color[1],color[2])
new_component.color2 = (color[0],color[1],color[2])
components.append(new_component)
if node is not None:
node.component = [new_component]
if connection_site is not None:
joint = mu.create_joint(world, p_c,new_component,connection_site, angle, self.torque)
joints.append(joint)
return components, joints
|
[
"gym_rem.utils.Rot.from_axis",
"gym_rem2D.morph.module_utility.create_joint",
"random.uniform",
"Box2D.b2RevoluteJoint",
"math.sin",
"numpy.array",
"math.cos",
"Box2D.b2CircleShape",
"random.gauss",
"Controller.m_controller.Controller"
] |
[((770, 784), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (778, 784), True, 'import numpy as np\n'), ((981, 1006), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (989, 1006), True, 'import numpy as np\n'), ((1025, 1089), 'gym_rem.utils.Rot.from_axis', 'Rot.from_axis', (['self.connection_axis', '(-self.theta * (np.pi / 2.0))'], {}), '(self.connection_axis, -self.theta * (np.pi / 2.0))\n', (1038, 1089), False, 'from gym_rem.utils import Rot\n'), ((1201, 1249), 'numpy.array', 'np.array', (['[0.0, self.size[2] / 2.0 + 0.002, 0.0]'], {}), '([0.0, self.size[2] / 2.0 + 0.002, 0.0])\n', (1209, 1249), True, 'import numpy as np\n'), ((1309, 1334), 'Controller.m_controller.Controller', 'm_controller.Controller', ([], {}), '()\n', (1332, 1334), False, 'from Controller import m_controller\n'), ((3344, 3408), 'gym_rem.utils.Rot.from_axis', 'Rot.from_axis', (['self.connection_axis', '(-self.theta * (np.pi / 2.0))'], {}), '(self.connection_axis, -self.theta * (np.pi / 2.0))\n', (3357, 3408), False, 'from gym_rem.utils import Rot\n'), ((3480, 3528), 'numpy.array', 'np.array', (['[0.0, 0.0, self.size[2] / 2.0 + 0.002]'], {}), '([0.0, 0.0, self.size[2] / 2.0 + 0.002])\n', (3488, 3528), True, 'import numpy as np\n'), ((2065, 2085), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2079, 2085), False, 'import random\n'), ((2125, 2161), 'random.gauss', 'random.gauss', (['self.radius', 'MUT_SIGMA'], {}), '(self.radius, MUT_SIGMA)\n', (2137, 2161), False, 'import random\n'), ((2167, 2187), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2181, 2187), False, 'import random\n'), ((2226, 2271), 'random.gauss', 'random.gauss', (['self.angle', '(MUT_SIGMA * math.pi)'], {}), '(self.angle, MUT_SIGMA * math.pi)\n', (2238, 2271), False, 'import random\n'), ((3027, 3046), 'numpy.array', 'np.array', (['key.value'], {}), '(key.value)\n', (3035, 3046), True, 'import numpy as np\n'), ((3985, 4026), 'numpy.array', 'np.array', (['[0.0, 0.0, -self.size[2] / 2.0]'], {}), '([0.0, 0.0, -self.size[2] / 2.0])\n', (3993, 4026), True, 'import numpy as np\n'), ((4576, 4597), 'Box2D.b2RevoluteJoint', 'B2D.b2RevoluteJoint', ([], {}), '()\n', (4595, 4597), True, 'import Box2D as B2D\n'), ((5029, 5064), 'math.cos', 'math.cos', (['(local_angle + math.pi / 2)'], {}), '(local_angle + math.pi / 2)\n', (5037, 5064), False, 'import math\n'), ((5080, 5115), 'math.sin', 'math.sin', (['(local_angle + math.pi / 2)'], {}), '(local_angle + math.pi / 2)\n', (5088, 5115), False, 'import math\n'), ((7637, 7716), 'gym_rem2D.morph.module_utility.create_joint', 'mu.create_joint', (['world', 'p_c', 'new_component', 'connection_site', 'angle', 'self.torque'], {}), '(world, p_c, new_component, connection_site, angle, self.torque)\n', (7652, 7716), True, 'import gym_rem2D.morph.module_utility as mu\n'), ((4286, 4306), 'numpy.array', 'np.array', (['conn.value'], {}), '(conn.value)\n', (4294, 4306), True, 'import numpy as np\n'), ((5990, 6043), 'math.cos', 'math.cos', (['(connection_site.orientation.x + math.pi / 2)'], {}), '(connection_site.orientation.x + math.pi / 2)\n', (5998, 6043), False, 'import math\n'), ((6069, 6122), 'math.sin', 'math.sin', (['(connection_site.orientation.x + math.pi / 2)'], {}), '(connection_site.orientation.x + math.pi / 2)\n', (6077, 6122), False, 'import math\n'), ((6803, 6837), 'Box2D.b2CircleShape', 'B2D.b2CircleShape', ([], {'radius': 'n_radius'}), '(radius=n_radius)\n', (6820, 6837), True, 'import Box2D as B2D\n')]
|
import numpy as np
from multiprocessing import Pool
from ..bbox import bbox_overlaps
# https://zhuanlan.zhihu.com/p/34655990
def calc_PR_curve(pred, label):
pos = label[label == 1] # ๆญฃๆ ทๆฌ
threshold = np.sort(pred)[::-1] # predๆฏๆฏไธชๆ ทๆฌ็ๆญฃไพ้ขๆตๆฆ็ๅผ,้ๅบ
label = label[pred.argsort()[::-1]]
precision = []
recall = []
tp = 0
fp = 0
ap = 0 # ๅนณๅ็ฒพๅบฆ
for i in range(len(threshold)):
if label[i] == 1:
tp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
# ่ฟไผผๆฒ็บฟไธ้ข็งฏ
ap += (recall[i] - recall[i - 1]) * precision[i]
else:
fp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
return precision, recall, ap
def tpfp_voc(det_bboxes, gt_bboxes, iou_thr=0.5):
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
# tpๅfp้ฝๆฏ้ๅฏน้ขๆตไธชๆฐ่่จ๏ผไธๆฏgtไธชๆฐ
tp = np.zeros(num_dets, dtype=np.float32)
fp = np.zeros(num_dets, dtype=np.float32)
# ๅฆๆgt=0๏ผ้ฃไนๆๆ้ขๆตๆก้ฝ็ฎ่ฏฏๆฅ๏ผๆๆ้ขๆตbboxไฝ็ฝฎ็fp้ฝ่ฎพ็ฝฎไธบ1
if gt_bboxes.shape[0] == 0:
fp[...] = 1
return tp, fp
if num_dets == 0:
return tp, fp
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes).numpy()
# print(ious)
# ๅฏนไบๆฏไธช้ขๆตๆก๏ผๆพๅฐๆๅน้
็gt iou
ious_max = ious.max(axis=1)
# ๅฏนไบๆฏไธช้ขๆตๆก๏ผๆพๅฐๆๅน้
gt็็ดขๅผ
ious_argmax = ious.argmax(axis=1)
# ๆ็
ง้ขๆตๆฆ็ๅๆฏ้ๅบๆๅ
sort_inds = np.argsort(-det_bboxes[:, -1])
gt_covered = np.zeros(num_gts, dtype=bool)
# ๅคๅฏนไธๆ
ๅตไธ๏ผ้คไบๆฆ็ๅๅผๆๅคงไธๅคงไบ้ๅผ็้ขๆตๆก็ฎtpๅค๏ผๅ
ถไปๆกๅ
จ้จ็ฎfp
for i in sort_inds:
# ๅฆๆๅคงไบiou๏ผๅ่กจ็คบๅน้
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
# ๆฏไธชgt bboxๅชๅน้
ไธๆฌก๏ผไธๆฏๅ้ขๆตๆฆ็ๆๅคง็ๅน้
๏ผไธๆฏๆ็
งiou
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[i] = 1
else:
fp[i] = 1
else:
fp[i] = 1
return tp, fp
def _average_precision(recalls, precisions, mode='voc2007'):
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'voc2012': # ๅนณๆปๅๅฐฑๆฏๆ ๅ็PRๆฒ็บฟ็ฎๆณ
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
# ๅๆณๆฏ่พ้ซ็บง๏ผ้ซๆ
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) # ๆฏๆฎตๅบ้ดๅ
๏ผ็ฒพๅบฆ้ฝๆฏๅๆๅคงๅผ๏ผไนๅฐฑๆฏๆฐดๅนณ็บฟ
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] # ๆพๅฐๅฌๅ็่ฝฌๆ็น๏ผ่กจ็คบx่ฝด็งปๅจๅบ้ด็ดขๅผ
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) # ๆฏๆฎต้ข็งฏๅ
elif mode == 'voc2007': # 11็นๆณ๏ผ้่ฆๅนณๅนณๆปๅค็
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
# code ref from mmdetection
def voc_eval_map(results, annotations, iou_thr=0.5, name='voc2007', nproc=4):
"""
:param results: list[list],ๅคๅฑlistๆฏๆไปฃๅพ็็ผๅท๏ผๅ
ๅฑlistๆฏๆไปฃ็ฑปๅซ็ผๅท๏ผ
ๅ่ฎพไธๅ
ฑ20ไธช็ฑป๏ผๅๅ
ๅฑlist้ฟๅบฆไธบ20๏ผๆฏไธชListๅ
้จๆฏnumpy็ฉ้ต๏ผnx5่กจ็คบๆฏๅผ ๅพ็ๅฏนๅบ็ๆฏไธช็ฑปๅซ็ๆฃๆตbbox๏ผxyxyconfๆ ผๅผ
:param annotations:ๅresultsไธๆ ท
:param iou_thr: ๆฏๅฆ็ฎTP็้ๅผ๏ผvoc้ป่ฎคๆฏ0.5
:param name: ้็จๅชไธ็ง่ฏไผฐๆๆ ๏ผvoc2007ๆฏ11็น๏ผvoc2012ๆฏๆ ๅprๆฒ็บฟ่ฎก็ฎ
:return:
"""
assert len(results) == len(annotations)
num_imgs = len(results) # ๅพ็ไธชๆฐ
num_classes = len(results[0]) # positive class num
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
cls_dets = [img_res[i] for img_res in results]
cls_gts = [img_res[i] for img_res in annotations]
tpfp = pool.starmap(
tpfp_voc,
zip(cls_dets, cls_gts, [iou_thr for _ in range(num_imgs)]))
# ๅพๅฐๆฏไธช้ขๆตbbox็tpๅfpๆ
ๅต
tp, fp = tuple(zip(*tpfp))
# ็ป่ฎกgt bboxๆฐ็ฎ
num_gts = 0
for j, bbox in enumerate(cls_gts):
num_gts += bbox.shape[0]
# ๅๅนถๆๆๅพ็ๆๆ้ขๆตbbox
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0] # ๆฃๆตbboxไธชๆฐ
# ไปฅไธ่ฎก็ฎๅบไบๆฏไธช้ขๆตbbox็tpๅfpๆ
ๅต
# ๆญคๅค่ฎก็ฎ็ฒพๅบฆๅๅฌๅ็๏ผๅ็ๆฏ่พ้ซ็บง
sort_inds = np.argsort(-cls_dets[:, -1]) # ๆ็
ง้ขๆตๆฆ็ๅๅผ้ๅบๆๅ
# ไป็ปๆ่่ฟ็งๅๆณ๏ผๅ
ถๅฎๆฏc3_pr_roc.py้้ขcalc_PR_curve็้ซ็บงๅฟซ้ๅๆณ
tp = np.hstack(tp)[sort_inds][None]
fp = np.hstack(fp)[sort_inds][None]
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts, eps)
precisions = tp / np.maximum((tp + fp), eps)
recalls = recalls[0, :]
precisions = precisions[0, :]
# print('recalls', recalls, 'precisions', precisions)
ap = _average_precision(recalls, precisions, name)[0]
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
pool.close()
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
return mean_ap
|
[
"numpy.maximum",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"numpy.sort",
"numpy.cumsum",
"numpy.finfo",
"numpy.where",
"numpy.arange",
"numpy.array",
"multiprocessing.Pool",
"numpy.vstack"
] |
[((934, 970), 'numpy.zeros', 'np.zeros', (['num_dets'], {'dtype': 'np.float32'}), '(num_dets, dtype=np.float32)\n', (942, 970), True, 'import numpy as np\n'), ((980, 1016), 'numpy.zeros', 'np.zeros', (['num_dets'], {'dtype': 'np.float32'}), '(num_dets, dtype=np.float32)\n', (988, 1016), True, 'import numpy as np\n'), ((1421, 1451), 'numpy.argsort', 'np.argsort', (['(-det_bboxes[:, -1])'], {}), '(-det_bboxes[:, -1])\n', (1431, 1451), True, 'import numpy as np\n'), ((1469, 1498), 'numpy.zeros', 'np.zeros', (['num_gts'], {'dtype': 'bool'}), '(num_gts, dtype=bool)\n', (1477, 1498), True, 'import numpy as np\n'), ((2182, 2220), 'numpy.zeros', 'np.zeros', (['num_scales'], {'dtype': 'np.float32'}), '(num_scales, dtype=np.float32)\n', (2190, 2220), True, 'import numpy as np\n'), ((3858, 3869), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (3862, 3869), False, 'from multiprocessing import Pool\n'), ((210, 223), 'numpy.sort', 'np.sort', (['pred'], {}), '(pred)\n', (217, 223), True, 'import numpy as np\n'), ((2281, 2327), 'numpy.zeros', 'np.zeros', (['(num_scales, 1)'], {'dtype': 'recalls.dtype'}), '((num_scales, 1), dtype=recalls.dtype)\n', (2289, 2327), True, 'import numpy as np\n'), ((2343, 2388), 'numpy.ones', 'np.ones', (['(num_scales, 1)'], {'dtype': 'recalls.dtype'}), '((num_scales, 1), dtype=recalls.dtype)\n', (2350, 2388), True, 'import numpy as np\n'), ((2404, 2437), 'numpy.hstack', 'np.hstack', (['(zeros, recalls, ones)'], {}), '((zeros, recalls, ones))\n', (2413, 2437), True, 'import numpy as np\n'), ((2453, 2490), 'numpy.hstack', 'np.hstack', (['(zeros, precisions, zeros)'], {}), '((zeros, precisions, zeros))\n', (2462, 2490), True, 'import numpy as np\n'), ((4392, 4411), 'numpy.vstack', 'np.vstack', (['cls_dets'], {}), '(cls_dets)\n', (4401, 4411), True, 'import numpy as np\n'), ((4543, 4571), 'numpy.argsort', 'np.argsort', (['(-cls_dets[:, -1])'], {}), '(-cls_dets[:, -1])\n', (4553, 4571), True, 'import numpy as np\n'), ((4746, 4767), 'numpy.cumsum', 'np.cumsum', (['tp'], {'axis': '(1)'}), '(tp, axis=1)\n', (4755, 4767), True, 'import numpy as np\n'), ((4781, 4802), 'numpy.cumsum', 'np.cumsum', (['fp'], {'axis': '(1)'}), '(fp, axis=1)\n', (4790, 4802), True, 'import numpy as np\n'), ((2590, 2628), 'numpy.maximum', 'np.maximum', (['mpre[:, i - 1]', 'mpre[:, i]'], {}), '(mpre[:, i - 1], mpre[:, i])\n', (2600, 2628), True, 'import numpy as np\n'), ((2792, 2852), 'numpy.sum', 'np.sum', (['((mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])'], {}), '((mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n', (2798, 2852), True, 'import numpy as np\n'), ((4817, 4837), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (4825, 4837), True, 'import numpy as np\n'), ((4865, 4889), 'numpy.maximum', 'np.maximum', (['num_gts', 'eps'], {}), '(num_gts, eps)\n', (4875, 4889), True, 'import numpy as np\n'), ((4916, 4940), 'numpy.maximum', 'np.maximum', (['(tp + fp)', 'eps'], {}), '(tp + fp, eps)\n', (4926, 4940), True, 'import numpy as np\n'), ((2708, 2745), 'numpy.where', 'np.where', (['(mrec[i, 1:] != mrec[i, :-1])'], {}), '(mrec[i, 1:] != mrec[i, :-1])\n', (2716, 2745), True, 'import numpy as np\n'), ((2982, 3010), 'numpy.arange', 'np.arange', (['(0)', '(1 + 0.001)', '(0.1)'], {}), '(0, 1 + 0.001, 0.1)\n', (2991, 3010), True, 'import numpy as np\n'), ((4658, 4671), 'numpy.hstack', 'np.hstack', (['tp'], {}), '(tp)\n', (4667, 4671), True, 'import numpy as np\n'), ((4702, 4715), 'numpy.hstack', 'np.hstack', (['fp'], {}), '(fp)\n', (4711, 4715), True, 'import numpy as np\n'), ((5492, 5505), 'numpy.array', 'np.array', (['aps'], {}), '(aps)\n', (5500, 5505), True, 'import numpy as np\n')]
|
"""
B-Complement
Input:
part point clouds: B x P x N x 3
Output:
R and T: B x P x(3 + 4)
Losses:
Center L2 Loss, Rotation L2 Loss, Rotation Chamder-Distance Loss
"""
import torch
from torch import nn
import torch.nn.functional as F
import sys, os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
from cd.chamfer import chamfer_distance
from quaternion import qrot
import ipdb
from scipy.optimize import linear_sum_assignment
# PointNet Front-end
class PartPointNet(nn.Module):
def __init__(self, feat_len):
super(PartPointNet, self).__init__()
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 64, 1)
self.conv3 = nn.Conv1d(64, 64, 1)
self.conv4 = nn.Conv1d(64, 128, 1)
#self.conv5 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
#self.bn5 = nn.BatchNorm1d(1024)
self.mlp1 = nn.Linear(128, feat_len)
self.bn6 = nn.BatchNorm1d(feat_len)
"""
Input: B x N x 3
Output: B x F
"""
def forward(self, x):
x = x.permute(0, 2, 1)
x = torch.relu(self.bn1(self.conv1(x)))
x = torch.relu(self.bn2(self.conv2(x)))
x = torch.relu(self.bn3(self.conv3(x)))
x = torch.relu(self.bn4(self.conv4(x)))
#x = torch.relu(self.bn5(self.conv5(x)))
x = x.max(dim=-1)[0]
x = torch.relu(self.bn6(self.mlp1(x)))
return x
# PointNet Back-end
class PoseDecoder(nn.Module):
def __init__(self, feat_len):
super(PoseDecoder, self).__init__()
self.mlp1 = nn.Linear(feat_len, 512)
self.mlp2 = nn.Linear(512, 256)
self.trans = nn.Linear(256, 3)
self.quat = nn.Linear(256, 4)
self.quat.bias.data.zero_()
"""
Input: B x (2F + P + 16)
Output: B x 7
"""
def forward(self, feat):
feat = torch.relu(self.mlp1(feat))
feat = torch.relu(self.mlp2(feat))
trans = torch.tanh(self.trans(feat)) # consider to remove torch.tanh if not using PartNet normalization
quat_bias = feat.new_tensor([[[1.0, 0.0, 0.0, 0.0]]])
quat = self.quat(feat).add(quat_bias)
quat = quat / (1e-12 + quat.pow(2).sum(dim=-1, keepdim=True)).sqrt()
out = torch.cat([trans, quat.squeeze(0)], dim=-1)
return out
class Network(nn.Module):
def __init__(self, conf):
super(Network, self).__init__()
self.conf = conf
self.part_pointnet = PartPointNet(conf.feat_len)
self.pose_decoder = PoseDecoder(2 * conf.feat_len + conf.max_num_part + 16)
"""
Input: B x P x N x 3, B x P, B x P x P, B x 7
Output: B x P x (3 + 4)
"""
def forward(self,seq, part_pcs, part_valids, instance_label, gt_part_pose):
batch_size = part_pcs.shape[0]
num_part = part_pcs.shape[1]
num_point = part_pcs.shape[2]
pred_part_poses = np.zeros((batch_size, num_part, 7))
pred_part_poses = torch.tensor(pred_part_poses).to(self.conf.device)
# generate random_noise
random_noise = np.random.normal(loc=0.0, scale=1.0, size=[batch_size, num_part, 16]).astype(
np.float32) # B x P x 16
random_noise = torch.tensor(random_noise).to(self.conf.device)
for iter in range(num_part):
select_ind = seq[:,iter].int().tolist()
batch_ind = [i for i in range(len(select_ind))]
if iter == 0:
cur_pred_pose = gt_part_pose # B x 7
pred_part_poses= pred_part_poses.float()
pred_part_poses[batch_ind,select_ind,:] = cur_pred_pose
cur_pred_center = cur_pred_pose[:, :3].unsqueeze(1).repeat(1, num_point, 1) # B x N x 3
cur_pred_qrot = cur_pred_pose[:, 3:].unsqueeze(1).repeat(1, num_point, 1) # B x N x 4
cur_part = cur_pred_center + qrot(cur_pred_qrot, part_pcs[batch_ind,select_ind, :, :])# B x N x 3
cur_part = cur_part.unsqueeze(1) # B x 1 x N x 3
cur_shape = cur_part # B x batch_ind,select_ind x N x 3
else:
cur_shape_feat = self.part_pointnet(cur_shape.view(batch_size, -1, 3)) # B x F
cur_part_feat = self.part_pointnet(part_pcs[batch_ind,select_ind, :, :])# B x F
cat_feat = torch.cat([cur_shape_feat, cur_part_feat, instance_label[batch_ind,select_ind, :].contiguous(), random_noise[batch_ind,select_ind, :].contiguous()], dim=-1) # B x (2F + P + 16)
cur_pred_pose = self.pose_decoder(cat_feat) # B x 7
pred_part_poses[batch_ind,select_ind, :] = cur_pred_pose
cur_pred_center = cur_pred_pose[:, :3].unsqueeze(1).repeat(1, num_point, 1) # B x N x 3
cur_pred_qrot = cur_pred_pose[:, 3:].unsqueeze(1).repeat(1, num_point, 1) # B x N x 4
cur_part = cur_pred_center + qrot(cur_pred_qrot, part_pcs[batch_ind,select_ind, :, :]) # B x N x 3
cur_part = cur_part.unsqueeze(1) # B x 1 x N x 3
cur_shape = torch.cat([cur_shape, cur_part], dim=1) # B x select_ind x N x 3
pred_part_poses = pred_part_poses.double() * part_valids.unsqueeze(2).double()
return pred_part_poses.float()
"""
Input: * x N x 3, * x 3, * x 4, * x 3, * x 4,
Output: *, * (two lists)
"""
def linear_assignment(self, pts, centers1, quats1, centers2, quats2):
cur_part_cnt = pts.shape[0]
num_point = pts.shape[1]
with torch.no_grad():
cur_quats1 = quats1.unsqueeze(1).repeat(1, num_point, 1)
cur_centers1 = centers1.unsqueeze(1).repeat(1, num_point, 1)
cur_pts1 = qrot(cur_quats1, pts) + cur_centers1
cur_quats2 = quats2.unsqueeze(1).repeat(1, num_point, 1)
cur_centers2 = centers2.unsqueeze(1).repeat(1, num_point, 1)
cur_pts2 = qrot(cur_quats2, pts) + cur_centers2
cur_pts1 = cur_pts1.unsqueeze(1).repeat(1, cur_part_cnt, 1, 1).view(-1, num_point, 3)
cur_pts2 = cur_pts2.unsqueeze(0).repeat(cur_part_cnt, 1, 1, 1).view(-1, num_point, 3)
dist1, dist2 = chamfer_distance(cur_pts1, cur_pts2, transpose=False)
dist_mat = (dist1.mean(1) + dist2.mean(1)).view(cur_part_cnt, cur_part_cnt)
rind, cind = linear_sum_assignment(dist_mat.cpu().numpy())
return rind, cind
"""
Input: B x P x 3, B x P x 3, B x P
Output: B
"""
def get_trans_l2_loss(self, trans1, trans2, valids):
loss_per_data = (trans1 - trans2).pow(2).sum(dim=-1)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data
"""
Input: B x P x N x 3, B x P x 4, B x P x 4, B x P
Output: B
"""
def get_rot_l2_loss(self, pts, quat1, quat2, valids):
batch_size = pts.shape[0]
num_point = pts.shape[2]
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
loss_per_data = (pts1 - pts2).pow(2).sum(-1).mean(-1)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data
"""
Input: B x P x N x 3, B x P x 4, B x P x 4, B x P
Output: B
"""
def get_rot_cd_loss(self, pts, quat1, quat2, valids, device):
batch_size = pts.shape[0]
num_point = pts.shape[2]
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
dist1, dist2 = chamfer_distance(pts1.view(-1, num_point, 3), pts2.view(-1, num_point, 3), transpose=False)
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.view(batch_size, -1)
loss_per_data = loss_per_data.to(device)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data #
def get_total_cd_loss(self, pts, quat1, quat2, valids, center1, center2, device):
batch_size = pts.shape[0]
num_part = pts.shape[1]
num_point = pts.shape[2]
center1 = center1.unsqueeze(2).repeat(1,1,num_point,1)
center2 = center2.unsqueeze(2).repeat(1,1,num_point,1)
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center1
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center2
dist1, dist2 = chamfer_distance(pts1.view(-1, num_point, 3), pts2.view(-1, num_point, 3), transpose=False)
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.view(batch_size, -1)
thre = 0.01
loss_per_data = loss_per_data.to(device)
acc = [[0 for i in range(num_part)]for j in range(batch_size)]
for i in range(batch_size):
for j in range(num_part):
if loss_per_data[i,j] < thre and valids[i,j]:
acc[i][j] = 1
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data , acc
def get_shape_cd_loss(self, pts, quat1, quat2, valids, center1, center2, device):
batch_size = pts.shape[0]
num_part = pts.shape[1]
num_point = pts.shape[2]
center1 = center1.unsqueeze(2).repeat(1,1,num_point,1)
center2 = center2.unsqueeze(2).repeat(1,1,num_point,1)
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center1
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center2
pts1 = pts1.view(batch_size,num_part*num_point,3)
pts2 = pts2.view(batch_size,num_part*num_point,3)
dist1, dist2 = chamfer_distance(pts1, pts2, transpose=False)
valids = valids.unsqueeze(2).repeat(1,1,1000).view(batch_size,-1)
dist1 = dist1 * valids
dist2 = dist2 * valids
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.to(device)
return loss_per_data
def get_sym_point(self, point, x, y, z):
if x:
point[0] = - point[0]
if y:
point[1] = - point[1]
if z:
point[2] = - point[2]
return point.tolist()
def get_possible_point_list(self, point, sym):
sym = torch.tensor([1.0, 1.0, 1.0])
point_list = []
if sym.equal(torch.tensor([0.0, 0.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
elif sym.equal(torch.tensor([1.0, 0.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
elif sym.equal(torch.tensor([0.0, 1.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
elif sym.equal(torch.tensor([0.0, 0.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
elif sym.equal(torch.tensor([1.0, 1.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 1, 1, 0))
elif sym.equal(torch.tensor([1.0, 0.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 1, 0, 1))
elif sym.equal(torch.tensor([0.0, 1.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 0, 1, 1))
else:
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 1, 1, 0))
point_list.append(self.get_sym_point(point, 1, 0, 1))
point_list.append(self.get_sym_point(point, 0, 1, 1))
point_list.append(self.get_sym_point(point, 1, 1, 1))
return point_list
def get_min_l2_dist(self, list1, list2, center1, center2, quat1, quat2):
list1 = torch.tensor(list1) # m x 3
list2 = torch.tensor(list2) # n x 3
len1 = list1.shape[0]
len2 = list2.shape[0]
center1 = center1.unsqueeze(0).repeat(len1, 1)
center2 = center2.unsqueeze(0).repeat(len2, 1)
quat1 = quat1.unsqueeze(0).repeat(len1, 1)
quat2 = quat2.unsqueeze(0).repeat(len2, 1)
list1 = list1.to(self.conf.device)
list2 = list2.to(self.conf.device)
list1 = center1 + qrot(quat1, list1)
list2 = center2 + qrot(quat2, list2)
mat1 = list1.unsqueeze(1).repeat(1, len2, 1)
mat2 = list2.unsqueeze(0).repeat(len1, 1, 1)
mat = (mat1 - mat2) * (mat1 - mat2)
mat = mat.sum(dim=-1)
return mat.min()
"""
Contact point loss metric
Date: 2020/5/22
Input B x P x 3, B x P x 4, B x P x P x 4, B x P x 3
Ouput B
"""
def get_contact_point_loss(self, center, quat, contact_points, sym_info):
batch_size = center.shape[0]
num_part = center.shape[1]
contact_point_loss = torch.zeros(batch_size)
total_num = 0
count = 0
for b in range(batch_size):
sum_loss = 0
for i in range(num_part):
for j in range(num_part):
if contact_points[b, i, j, 0]:
contact_point_1 = contact_points[b, i, j, 1:]
contact_point_2 = contact_points[b, j, i, 1:]
sym1 = sym_info[b, i]
sym2 = sym_info[b, j]
point_list_1 = self.get_possible_point_list(contact_point_1, sym1)
point_list_2 = self.get_possible_point_list(contact_point_2, sym2)
dist = self.get_min_l2_dist(point_list_1, point_list_2, center[b, i, :], center[b, j, :],
quat[b, i, :], quat[b, j, :]) # 1
if dist < 0.01:
count += 1
total_num += 1
sum_loss += dist
contact_point_loss[b] = sum_loss
return contact_point_loss, count, total_num
|
[
"torch.mean",
"os.path.abspath",
"cd.chamfer.chamfer_distance",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d",
"numpy.zeros",
"quaternion.qrot",
"torch.cat",
"numpy.random.normal",
"torch.nn.Linear",
"torch.zeros",
"torch.no_grad",
"os.path.join",
"torch.tensor"
] |
[((355, 380), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (370, 380), False, 'import sys, os\n'), ((398, 432), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""../utils"""'], {}), "(BASE_DIR, '../utils')\n", (410, 432), False, 'import sys, os\n'), ((718, 737), 'torch.nn.Conv1d', 'nn.Conv1d', (['(3)', '(64)', '(1)'], {}), '(3, 64, 1)\n', (727, 737), False, 'from torch import nn\n'), ((759, 779), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (768, 779), False, 'from torch import nn\n'), ((801, 821), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (810, 821), False, 'from torch import nn\n'), ((843, 864), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(128)', '(1)'], {}), '(64, 128, 1)\n', (852, 864), False, 'from torch import nn\n'), ((931, 949), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (945, 949), False, 'from torch import nn\n'), ((969, 987), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (983, 987), False, 'from torch import nn\n'), ((1007, 1025), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (1021, 1025), False, 'from torch import nn\n'), ((1045, 1064), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (1059, 1064), False, 'from torch import nn\n'), ((1127, 1151), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'feat_len'], {}), '(128, feat_len)\n', (1136, 1151), False, 'from torch import nn\n'), ((1171, 1195), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['feat_len'], {}), '(feat_len)\n', (1185, 1195), False, 'from torch import nn\n'), ((1808, 1832), 'torch.nn.Linear', 'nn.Linear', (['feat_len', '(512)'], {}), '(feat_len, 512)\n', (1817, 1832), False, 'from torch import nn\n'), ((1853, 1872), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (1862, 1872), False, 'from torch import nn\n'), ((1895, 1912), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (1904, 1912), False, 'from torch import nn\n'), ((1934, 1951), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(4)'], {}), '(256, 4)\n', (1943, 1951), False, 'from torch import nn\n'), ((3152, 3187), 'numpy.zeros', 'np.zeros', (['(batch_size, num_part, 7)'], {}), '((batch_size, num_part, 7))\n', (3160, 3187), True, 'import numpy as np\n'), ((10047, 10092), 'cd.chamfer.chamfer_distance', 'chamfer_distance', (['pts1', 'pts2'], {'transpose': '(False)'}), '(pts1, pts2, transpose=False)\n', (10063, 10092), False, 'from cd.chamfer import chamfer_distance\n'), ((10680, 10709), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (10692, 10709), False, 'import torch\n'), ((13035, 13054), 'torch.tensor', 'torch.tensor', (['list1'], {}), '(list1)\n', (13047, 13054), False, 'import torch\n'), ((13080, 13099), 'torch.tensor', 'torch.tensor', (['list2'], {}), '(list2)\n', (13092, 13099), False, 'import torch\n'), ((14099, 14122), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (14110, 14122), False, 'import torch\n'), ((5777, 5792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5790, 5792), False, 'import torch\n'), ((6424, 6477), 'cd.chamfer.chamfer_distance', 'chamfer_distance', (['cur_pts1', 'cur_pts2'], {'transpose': '(False)'}), '(cur_pts1, cur_pts2, transpose=False)\n', (6440, 6477), False, 'from cd.chamfer import chamfer_distance\n'), ((8005, 8029), 'torch.mean', 'torch.mean', (['dist1'], {'dim': '(1)'}), '(dist1, dim=1)\n', (8015, 8029), False, 'import torch\n'), ((8032, 8056), 'torch.mean', 'torch.mean', (['dist2'], {'dim': '(1)'}), '(dist2, dim=1)\n', (8042, 8056), False, 'import torch\n'), ((8894, 8918), 'torch.mean', 'torch.mean', (['dist1'], {'dim': '(1)'}), '(dist1, dim=1)\n', (8904, 8918), False, 'import torch\n'), ((8921, 8945), 'torch.mean', 'torch.mean', (['dist2'], {'dim': '(1)'}), '(dist2, dim=1)\n', (8931, 8945), False, 'import torch\n'), ((10253, 10277), 'torch.mean', 'torch.mean', (['dist1'], {'dim': '(1)'}), '(dist1, dim=1)\n', (10263, 10277), False, 'import torch\n'), ((10280, 10304), 'torch.mean', 'torch.mean', (['dist2'], {'dim': '(1)'}), '(dist2, dim=1)\n', (10290, 10304), False, 'import torch\n'), ((10755, 10784), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10767, 10784), False, 'import torch\n'), ((13493, 13511), 'quaternion.qrot', 'qrot', (['quat1', 'list1'], {}), '(quat1, list1)\n', (13497, 13511), False, 'from quaternion import qrot\n'), ((13538, 13556), 'quaternion.qrot', 'qrot', (['quat2', 'list2'], {}), '(quat2, list2)\n', (13542, 13556), False, 'from quaternion import qrot\n'), ((3214, 3243), 'torch.tensor', 'torch.tensor', (['pred_part_poses'], {}), '(pred_part_poses)\n', (3226, 3243), False, 'import torch\n'), ((3320, 3389), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '[batch_size, num_part, 16]'}), '(loc=0.0, scale=1.0, size=[batch_size, num_part, 16])\n', (3336, 3389), True, 'import numpy as np\n'), ((3459, 3485), 'torch.tensor', 'torch.tensor', (['random_noise'], {}), '(random_noise)\n', (3471, 3485), False, 'import torch\n'), ((5308, 5347), 'torch.cat', 'torch.cat', (['[cur_shape, cur_part]'], {'dim': '(1)'}), '([cur_shape, cur_part], dim=1)\n', (5317, 5347), False, 'import torch\n'), ((5960, 5981), 'quaternion.qrot', 'qrot', (['cur_quats1', 'pts'], {}), '(cur_quats1, pts)\n', (5964, 5981), False, 'from quaternion import qrot\n'), ((6163, 6184), 'quaternion.qrot', 'qrot', (['cur_quats2', 'pts'], {}), '(cur_quats2, pts)\n', (6167, 6184), False, 'from quaternion import qrot\n'), ((10876, 10905), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (10888, 10905), False, 'import torch\n'), ((4121, 4179), 'quaternion.qrot', 'qrot', (['cur_pred_qrot', 'part_pcs[batch_ind, select_ind, :, :]'], {}), '(cur_pred_qrot, part_pcs[batch_ind, select_ind, :, :])\n', (4125, 4179), False, 'from quaternion import qrot\n'), ((5143, 5201), 'quaternion.qrot', 'qrot', (['cur_pred_qrot', 'part_pcs[batch_ind, select_ind, :, :]'], {}), '(cur_pred_qrot, part_pcs[batch_ind, select_ind, :, :])\n', (5147, 5201), False, 'from quaternion import qrot\n'), ((11063, 11092), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (11075, 11092), False, 'import torch\n'), ((11250, 11279), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (11262, 11279), False, 'import torch\n'), ((11437, 11466), 'torch.tensor', 'torch.tensor', (['[1.0, 1.0, 0.0]'], {}), '([1.0, 1.0, 0.0])\n', (11449, 11466), False, 'import torch\n'), ((11756, 11785), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 1.0]'], {}), '([1.0, 0.0, 1.0])\n', (11768, 11785), False, 'import torch\n'), ((12075, 12104), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 1.0])\n', (12087, 12104), False, 'import torch\n')]
|
import unittest
import os
import matplotlib.pyplot as plt
import numpy as np
from lorenz.lorenz import make_dataset, plot_3d
import lorenz.dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.path = os.path.join(os.path.split(os.path.split(os.path.dirname(__file__))[0])[0], 'data', 'lorenz.h5')
def test_random_iterator_1d(self):
rng = np.random.RandomState(1729)
dataset = lorenz.dataset.Dataset(self.path, view='1d')
for b in dataset.random_iterator(4, 100):
plt.plot(np.linspace(0,1,b.shape[1]), b[:,:,0].T)
plt.show()
#plot_3d(b)
def test_random_iterator_3d(self):
rng = np.random.RandomState(1729)
dataset = lorenz.dataset.Dataset(self.path, view='3d')
for b in dataset.random_iterator(4, 100):
plot_3d(b)
|
[
"matplotlib.pyplot.show",
"lorenz.lorenz.plot_3d",
"os.path.dirname",
"numpy.random.RandomState",
"numpy.linspace"
] |
[((380, 407), 'numpy.random.RandomState', 'np.random.RandomState', (['(1729)'], {}), '(1729)\n', (401, 407), True, 'import numpy as np\n'), ((684, 711), 'numpy.random.RandomState', 'np.random.RandomState', (['(1729)'], {}), '(1729)\n', (705, 711), True, 'import numpy as np\n'), ((595, 605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (603, 605), True, 'import matplotlib.pyplot as plt\n'), ((837, 847), 'lorenz.lorenz.plot_3d', 'plot_3d', (['b'], {}), '(b)\n', (844, 847), False, 'from lorenz.lorenz import make_dataset, plot_3d\n'), ((542, 571), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'b.shape[1]'], {}), '(0, 1, b.shape[1])\n', (553, 571), True, 'import numpy as np\n'), ((270, 295), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (285, 295), False, 'import os\n')]
|
from utilities import load_stf
import numpy as np
from scipy.spatial.distance import cosine
import time
#vsm = load_stf('glove.840B.300d.sample.txt',300)
#csm = np.load('centroids').item()
#distrib = np.zeros((100000,10))
#oFile = open('f_distrib','w+')
def dot_product(v1,v2):
total = 0
if len(v1) != len(v2):
throw
for i in range(len(v1)):
total += float(v1[i])*float(v2[i])
return total
def centroid(vsm,w,k):
total = np.zeros(len(vsm.word_vectors[vsm.dictionary[w]]))
for v in vsm.most_similar(w,k+1):
total += vsm.word_vectors[vsm.dictionary[v[0]]]
total /= k
return total
def lcent_similarity(w1,w2,vsm,gamma,k,c):
v1 = vsm.word_vectors[vsm.dictionary[w1]]
v2 = vsm.word_vectors[vsm.dictionary[w2]]
v1v2 = dot_product(v1,v2)
v1c = dot_product(v1,c)
v1cg = np.power(v1c,gamma)
return v1v2 - v1cg
def insert(v,sims,vec,val):
nv = np.zeros(len(v))
nsims = np.zeros((len(sims),300))
swap = 0
for i in range(len(v)):
if v[i]<val:
swap = 1
break
if swap == 0:
return (v,sims)
nv[:i] = v[:i]
nsims[:i] = sims[:i]
nv[i] = val
nsims[i] = vec
nv[i+1:] = v[i:len(v)-1]
nsims[i+1:] = sims[i:len(sims)-1]
return (nv,nsims)
def most_similar_lcent(vsm,csm,word,k,gamma):
sims = np.zeros(10)
vecs = np.zeros(10)
c = csm[word]
for i,d_word in enumerate(vsm.dictionary):
sim = lcent_similarity(word,d_word,vsm,gamma,k,c)
(sims,vecs) = insert(vecs,sims,vsm.dictionary[d_word],sim)
ret = []
for i in range(10):
ret.append((sims[i],vecs[i]))
return ret
'''
centroids = {}
for i,j in enumerate(vsm.dictionary):
if i%100 == 0:
print i
centroids[j] = centroid(vsm,j,11)
'''
#c = time.time()
#for j,w in enumerate(vsm.dictionary):
# print j
# print time.time() - c
# c = time.time()
# ms = most_similar_lcent(vsm,csm,w,11,2)
# for k,s in enumerate(ms):
# print s
# i = vsm.dictionary[s]
# distrib[i,k] += 1
#for c in centroids:
# oFile.write(str(c) + u' ')
# for i in centroids[c]:
# oFile.write(str(i) + u' ')
# oFile.write(u'\n')
#np.save(oFile,distrib)
#oFile.close()
|
[
"numpy.power",
"numpy.zeros"
] |
[((788, 808), 'numpy.power', 'np.power', (['v1c', 'gamma'], {}), '(v1c, gamma)\n', (796, 808), True, 'import numpy as np\n'), ((1221, 1233), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1229, 1233), True, 'import numpy as np\n'), ((1242, 1254), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1250, 1254), True, 'import numpy as np\n')]
|
from data_collection.read_sentinel import pair_imagenames
from utils.set_user_input import set_arguments_pipeline
from utils.raster_helper import read_url_image, read_input_geometry, array2raster
import numpy as np
import rasterio
def compute_ndvi(band_inf, bands=["red", "nir"]):
"""
This function computes the ndvi (normalized difference vegetation index)
from the image resulting of the data catalog search.
"""
input_geometry = read_input_geometry(set_arguments_pipeline()["input_geometry"])
post_fix = "_band_info"
red_band = band_inf[bands[0] + post_fix]
nir_band = band_inf[bands[1] + post_fix]
imagepairs_url_list = pair_imagenames(red_band, nir_band)
ndvi_results = {}
progress_counter = 0
for image_pair in imagepairs_url_list:
band_red_url = [
red_url for red_url in imagepairs_url_list[image_pair] if "B04" in red_url
][0]
band_nir_url = [
nir_url for nir_url in imagepairs_url_list[image_pair] if "B08" in nir_url
][0]
band_red_image = read_url_image(band_red_url, input_geometry).astype(float)
band_nir_image = read_url_image(band_nir_url, input_geometry).astype(float)
ndvi_result = np.empty(band_red_image.shape, dtype=rasterio.float32)
check = np.logical_or(band_red_image > 0, band_nir_image > 0)
ndvi_result = np.where(
check,
(band_nir_image - band_red_image) / (band_nir_image + band_red_image),
-999,
)
array2raster(ndvi_result, input_geometry, band_red_url)
ndvi_results[image_pair] = [ndvi_result]
progress_counter += 1
print(
"{0} of {1} images processed".format(
progress_counter, len(imagepairs_url_list)
)
)
return ndvi_results
|
[
"data_collection.read_sentinel.pair_imagenames",
"utils.raster_helper.array2raster",
"numpy.empty",
"utils.set_user_input.set_arguments_pipeline",
"numpy.where",
"utils.raster_helper.read_url_image",
"numpy.logical_or"
] |
[((663, 698), 'data_collection.read_sentinel.pair_imagenames', 'pair_imagenames', (['red_band', 'nir_band'], {}), '(red_band, nir_band)\n', (678, 698), False, 'from data_collection.read_sentinel import pair_imagenames\n'), ((1229, 1283), 'numpy.empty', 'np.empty', (['band_red_image.shape'], {'dtype': 'rasterio.float32'}), '(band_red_image.shape, dtype=rasterio.float32)\n', (1237, 1283), True, 'import numpy as np\n'), ((1300, 1353), 'numpy.logical_or', 'np.logical_or', (['(band_red_image > 0)', '(band_nir_image > 0)'], {}), '(band_red_image > 0, band_nir_image > 0)\n', (1313, 1353), True, 'import numpy as np\n'), ((1376, 1472), 'numpy.where', 'np.where', (['check', '((band_nir_image - band_red_image) / (band_nir_image + band_red_image))', '(-999)'], {}), '(check, (band_nir_image - band_red_image) / (band_nir_image +\n band_red_image), -999)\n', (1384, 1472), True, 'import numpy as np\n'), ((1524, 1579), 'utils.raster_helper.array2raster', 'array2raster', (['ndvi_result', 'input_geometry', 'band_red_url'], {}), '(ndvi_result, input_geometry, band_red_url)\n', (1536, 1579), False, 'from utils.raster_helper import read_url_image, read_input_geometry, array2raster\n'), ((475, 499), 'utils.set_user_input.set_arguments_pipeline', 'set_arguments_pipeline', ([], {}), '()\n', (497, 499), False, 'from utils.set_user_input import set_arguments_pipeline\n'), ((1064, 1108), 'utils.raster_helper.read_url_image', 'read_url_image', (['band_red_url', 'input_geometry'], {}), '(band_red_url, input_geometry)\n', (1078, 1108), False, 'from utils.raster_helper import read_url_image, read_input_geometry, array2raster\n'), ((1148, 1192), 'utils.raster_helper.read_url_image', 'read_url_image', (['band_nir_url', 'input_geometry'], {}), '(band_nir_url, input_geometry)\n', (1162, 1192), False, 'from utils.raster_helper import read_url_image, read_input_geometry, array2raster\n')]
|
# -*- coding: utf-8 -*-
"""
Created on January 24, 2018
@author: neerbek
"""
# -*- coding: utf-8 -*-
import os
os.chdir("../../taboo-core")
from numpy.random import RandomState # type: ignore
from sklearn.cluster import KMeans # type: ignore
import ai_util
import confusion_matrix
import kmeans_cluster_util as kutil
import similarity.load_trees as load_trees
# import pylab # type: ignore
import matplotlib.pyplot as plt
import importlib
# importlib.reload(kutil)
importlib.reload(confusion_matrix)
# for information type 203
#
#
# run rnn on data (very low emb size)
# OMP_NUM_THREADS=3 ipython3 functionality/train_model.py -- -traintrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$test.txt -validtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$dev.txt -testtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$test.txt -nx 50 -nh 20 -lr 0.5 -L1_reg 0 -L2_reg 0 -n_epochs -1 -retain_probability 1 -batch_size 90 -valid_batch_size 300 -glove_path ../code/glove/ -train_report_frequency 445/5 -validation_frequency 445 -file_prefix save_exp164
# Epoch 114. On validation set: Best (110, 1.065507, 77.4675%)
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$train.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings -max_tree_count 100 -max_count 100 -max_embedding_count 10000
#
# for realz
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$train.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings > train.txt
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$dev.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings > dev.txt
#
# zip output_embeddings_exp164_e480.zip train.txt dev.txt
# rm train.txt dev.txt
# mv output_embeddings_exp164_e480.zip output
# don't add to git (for now), we should make a backup
totaltimer = ai_util.Timer("Total time: ")
traintimer = ai_util.Timer("Train time: ")
totaltimer.begin()
inputfileTrain = "output/output_embeddings_exp164_e120.zip$train.txt"
inputfileTrain = "output/output_embeddings_exp164_e480.zip$train.txt"
linesTrainFull = confusion_matrix.read_embeddings(inputfileTrain, max_line_count=-1)
linesTrain = [linesTrainFull[i] for i in range(60000)]
inputfileDev = "output/output_embeddings_exp164_e120.zip$dev.txt"
inputfileDev = "output/output_embeddings_exp164_e480.zip$dev.txt"
linesDev = confusion_matrix.read_embeddings(inputfileDev, max_line_count=-1)
numberOfClusters = 35
randomSeed = 7485
doShow = True
low = 3 # 03
high = 22 # 16 not good
rng = RandomState(randomSeed)
aTrain = confusion_matrix.get_embedding_matrix(linesTrain, normalize=True)
aTrainFull = confusion_matrix.get_embedding_matrix(linesTrainFull, normalize=True)
aDev = confusion_matrix.get_embedding_matrix(linesDev, normalize=True)
kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)
# kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrainFull)
# kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aDev)
# sort_order = kutil.get_cluster_sen_ratios_sort_order(aTrainFull, linesTrainFull, kmeans)
sort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)
show = kutil.SHOW_ALL
if doShow:
# plot
y1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)
y2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)
# y1 = kutil.get_cluster_sen_ratios(aTrainFull, linesTrainFull, kmeans, sort_order)
# y2 = getScaledSizes(aTrainFull, kmeans, sort_order)
# y3 = kutil.get_cluster_sen_ratios(aDev, linesDev, kmeans, sort_order)
# y4 = getScaledSizes(aDev, kmeans, sort_order)
x = kutil.getXRange(show, low, high, numberOfClusters)
y1 = [y1[i] for i in x]
y2 = [y2[i] for i in x]
# y3 = [y3[i] for i in x]
# y4 = [y4[i] for i in x]
confusion_matrix.new_graph('Clusters', 'Ratio')
plt.plot(x, y1, 'k-', label='Sensitivity')
plt.plot(x, y2, 'k+', label='Accumulate size')
# plt.plot(x, y3, 'b-', label='Sensitivity Dev')
# plt.plot(x, y4, 'b+', label='Accumulate size Dev')
if show == kutil.SHOW_ALL:
# plt.plot((low, low), (0, 1), 'k-')
plt.plot((high, high), (0, 1), 'k:')
plt.legend()
plt.savefig('ijcai18_plot_sensitive_sorted_203.eps')
# plt.savefig('tmp.eps')
# plt.show() don't call show from an interactive prompt :(
# https://github.com/matplotlib/matplotlib/issues/8505/
clusterIds = sort_order # clusterIds == sort_order, it's just syntaxtic sugar
(linesC1, aC1) = kutil.get_sentences_from_clusters(clusterIds[:high], linesTrainFull, aTrainFull, kmeans)
(linesC2, aC2) = kutil.get_sentences_from_clusters(clusterIds[high:], linesTrainFull, aTrainFull, kmeans)
(lines2C1, a2C1) = kutil.get_sentences_from_clusters(clusterIds[:high], linesDev, aDev, kmeans)
(lines2C2, a2C2) = kutil.get_sentences_from_clusters(clusterIds[high:], linesDev, aDev, kmeans)
print(len(linesC1), len(linesC2))
print(len(lines2C1), len(lines2C2))
# after some iterations (unknown random seed)
# 78442 45824
# 17034 9966 (27000)
kutil.get_base_accuracy(linesC1, "train C1").report()
kutil.get_base_accuracy(linesC2, "train C2").report()
# if we want to validation score
kutil.get_base_accuracy(lines2C1, "dev C1").report()
kutil.get_base_accuracy(lines2C2, "dev C2").report()
# don't know if these values are updated!
# Accuracy (train C1): 0.9432 (0.6436), f1=0.9179 (24901 1398 49089 3054)
# Accuracy (train C2): 0.9871 (0.0128), f1=0.9935 (45224 579 8 13)
# Accuracy (dev C1): 0.9304 (0.6318), f1=0.9023 (5470 383 10379 802)
# Accuracy (dev C2): 0.9832 (0.0167), f1=0.9915 (9796 163 3 4)
|
[
"confusion_matrix.read_embeddings",
"ai_util.Timer",
"matplotlib.pyplot.plot",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.legend",
"kmeans_cluster_util.get_base_accuracy",
"numpy.random.RandomState",
"kmeans_cluster_util.get_cluster_sen_ratios",
"importlib.reload",
"confusion_matrix.new_graph",
"kmeans_cluster_util.get_cluster_sen_ratios_sort_order",
"kmeans_cluster_util.getScaledSizes",
"kmeans_cluster_util.get_sentences_from_clusters",
"kmeans_cluster_util.getXRange",
"confusion_matrix.get_embedding_matrix",
"os.chdir",
"matplotlib.pyplot.savefig"
] |
[((114, 142), 'os.chdir', 'os.chdir', (['"""../../taboo-core"""'], {}), "('../../taboo-core')\n", (122, 142), False, 'import os\n'), ((473, 507), 'importlib.reload', 'importlib.reload', (['confusion_matrix'], {}), '(confusion_matrix)\n', (489, 507), False, 'import importlib\n'), ((2524, 2553), 'ai_util.Timer', 'ai_util.Timer', (['"""Total time: """'], {}), "('Total time: ')\n", (2537, 2553), False, 'import ai_util\n'), ((2567, 2596), 'ai_util.Timer', 'ai_util.Timer', (['"""Train time: """'], {}), "('Train time: ')\n", (2580, 2596), False, 'import ai_util\n'), ((2774, 2841), 'confusion_matrix.read_embeddings', 'confusion_matrix.read_embeddings', (['inputfileTrain'], {'max_line_count': '(-1)'}), '(inputfileTrain, max_line_count=-1)\n', (2806, 2841), False, 'import confusion_matrix\n'), ((3040, 3105), 'confusion_matrix.read_embeddings', 'confusion_matrix.read_embeddings', (['inputfileDev'], {'max_line_count': '(-1)'}), '(inputfileDev, max_line_count=-1)\n', (3072, 3105), False, 'import confusion_matrix\n'), ((3210, 3233), 'numpy.random.RandomState', 'RandomState', (['randomSeed'], {}), '(randomSeed)\n', (3221, 3233), False, 'from numpy.random import RandomState\n'), ((3243, 3308), 'confusion_matrix.get_embedding_matrix', 'confusion_matrix.get_embedding_matrix', (['linesTrain'], {'normalize': '(True)'}), '(linesTrain, normalize=True)\n', (3280, 3308), False, 'import confusion_matrix\n'), ((3322, 3391), 'confusion_matrix.get_embedding_matrix', 'confusion_matrix.get_embedding_matrix', (['linesTrainFull'], {'normalize': '(True)'}), '(linesTrainFull, normalize=True)\n', (3359, 3391), False, 'import confusion_matrix\n'), ((3399, 3462), 'confusion_matrix.get_embedding_matrix', 'confusion_matrix.get_embedding_matrix', (['linesDev'], {'normalize': '(True)'}), '(linesDev, normalize=True)\n', (3436, 3462), False, 'import confusion_matrix\n'), ((3799, 3866), 'kmeans_cluster_util.get_cluster_sen_ratios_sort_order', 'kutil.get_cluster_sen_ratios_sort_order', (['aTrain', 'linesTrain', 'kmeans'], {}), '(aTrain, linesTrain, kmeans)\n', (3838, 3866), True, 'import kmeans_cluster_util as kutil\n'), ((5202, 5294), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[:high]', 'linesTrainFull', 'aTrainFull', 'kmeans'], {}), '(clusterIds[:high], linesTrainFull,\n aTrainFull, kmeans)\n', (5235, 5294), True, 'import kmeans_cluster_util as kutil\n'), ((5308, 5400), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[high:]', 'linesTrainFull', 'aTrainFull', 'kmeans'], {}), '(clusterIds[high:], linesTrainFull,\n aTrainFull, kmeans)\n', (5341, 5400), True, 'import kmeans_cluster_util as kutil\n'), ((5416, 5492), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[:high]', 'linesDev', 'aDev', 'kmeans'], {}), '(clusterIds[:high], linesDev, aDev, kmeans)\n', (5449, 5492), True, 'import kmeans_cluster_util as kutil\n'), ((5512, 5588), 'kmeans_cluster_util.get_sentences_from_clusters', 'kutil.get_sentences_from_clusters', (['clusterIds[high:]', 'linesDev', 'aDev', 'kmeans'], {}), '(clusterIds[high:], linesDev, aDev, kmeans)\n', (5545, 5588), True, 'import kmeans_cluster_util as kutil\n'), ((3922, 3990), 'kmeans_cluster_util.get_cluster_sen_ratios', 'kutil.get_cluster_sen_ratios', (['aTrain', 'linesTrain', 'kmeans', 'sort_order'], {}), '(aTrain, linesTrain, kmeans, sort_order)\n', (3950, 3990), True, 'import kmeans_cluster_util as kutil\n'), ((4000, 4048), 'kmeans_cluster_util.getScaledSizes', 'kutil.getScaledSizes', (['aTrain', 'kmeans', 'sort_order'], {}), '(aTrain, kmeans, sort_order)\n', (4020, 4048), True, 'import kmeans_cluster_util as kutil\n'), ((4331, 4381), 'kmeans_cluster_util.getXRange', 'kutil.getXRange', (['show', 'low', 'high', 'numberOfClusters'], {}), '(show, low, high, numberOfClusters)\n', (4346, 4381), True, 'import kmeans_cluster_util as kutil\n'), ((4502, 4549), 'confusion_matrix.new_graph', 'confusion_matrix.new_graph', (['"""Clusters"""', '"""Ratio"""'], {}), "('Clusters', 'Ratio')\n", (4528, 4549), False, 'import confusion_matrix\n'), ((4554, 4596), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', '"""k-"""'], {'label': '"""Sensitivity"""'}), "(x, y1, 'k-', label='Sensitivity')\n", (4562, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4601, 4647), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2', '"""k+"""'], {'label': '"""Accumulate size"""'}), "(x, y2, 'k+', label='Accumulate size')\n", (4609, 4647), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4895), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4893, 4895), True, 'import matplotlib.pyplot as plt\n'), ((4900, 4952), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ijcai18_plot_sensitive_sorted_203.eps"""'], {}), "('ijcai18_plot_sensitive_sorted_203.eps')\n", (4911, 4952), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3526), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'numberOfClusters', 'random_state': 'rng'}), '(n_clusters=numberOfClusters, random_state=rng)\n', (3479, 3526), False, 'from sklearn.cluster import KMeans\n'), ((4842, 4878), 'matplotlib.pyplot.plot', 'plt.plot', (['(high, high)', '(0, 1)', '"""k:"""'], {}), "((high, high), (0, 1), 'k:')\n", (4850, 4878), True, 'import matplotlib.pyplot as plt\n'), ((5742, 5786), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['linesC1', '"""train C1"""'], {}), "(linesC1, 'train C1')\n", (5765, 5786), True, 'import kmeans_cluster_util as kutil\n'), ((5796, 5840), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['linesC2', '"""train C2"""'], {}), "(linesC2, 'train C2')\n", (5819, 5840), True, 'import kmeans_cluster_util as kutil\n'), ((5883, 5926), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['lines2C1', '"""dev C1"""'], {}), "(lines2C1, 'dev C1')\n", (5906, 5926), True, 'import kmeans_cluster_util as kutil\n'), ((5936, 5979), 'kmeans_cluster_util.get_base_accuracy', 'kutil.get_base_accuracy', (['lines2C2', '"""dev C2"""'], {}), "(lines2C2, 'dev C2')\n", (5959, 5979), True, 'import kmeans_cluster_util as kutil\n')]
|
"""
Tests for package pytools.viz.dendrogram
"""
# noinspection PyPackageRequirements
import hashlib
import logging
from io import StringIO
import numpy as np
# noinspection PyPackageRequirements
import pytest
# noinspection PyPackageRequirements
import scipy.cluster.hierarchy as hc
from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree
log = logging.getLogger(__name__)
@pytest.fixture
def linkage_matrix() -> np.ndarray:
"""Create a linkage matrix."""
x = np.array([[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]])
return hc.linkage(x)
@pytest.fixture
def linkage_tree(linkage_matrix: np.ndarray) -> LinkageTree:
"""Create a linkage tree for drawing tests."""
return LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=list("ABCDEFGH"),
leaf_weights=[(w + 1) / 36 for w in range(8)],
)
def test_dendrogram_drawer_text(linkage_matrix: np.ndarray) -> None:
checksum_dendrogram_report = "32427095857f0589f68210ad4b2e8210"
leaf_names = list("ABCDEFGH")
leaf_weights = [(w + 1) / 36 for w in range(8)]
with pytest.raises(ValueError) as value_error:
LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=leaf_names,
leaf_weights=leaf_weights,
max_distance=1,
)
assert value_error.value.args == (
"arg max_distance=1 must be equal to or greater than the maximum distance "
"(= 4.0) in the linkage tree",
)
linkage_tree = LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=leaf_names,
leaf_weights=[(w + 1) / 36 for w in range(8)],
distance_label="distance",
leaf_label="label",
weight_label="weight",
)
with StringIO() as out:
dd = DendrogramDrawer(style=DendrogramReportStyle(out=out))
dd.draw(data=linkage_tree, title="Test")
report_str = str(out.getvalue())
log.debug(f"\n{report_str}")
assert (
hashlib.md5(str(report_str).encode("utf-8")).hexdigest()
) == checksum_dendrogram_report
|
[
"io.StringIO",
"pytools.viz.dendrogram.DendrogramReportStyle",
"scipy.cluster.hierarchy.linkage",
"pytest.raises",
"numpy.array",
"pytools.viz.dendrogram.LinkageTree",
"logging.getLogger"
] |
[((384, 411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'import logging\n'), ((509, 558), 'numpy.array', 'np.array', (['[[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]'], {}), '([[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]])\n', (517, 558), True, 'import numpy as np\n'), ((570, 583), 'scipy.cluster.hierarchy.linkage', 'hc.linkage', (['x'], {}), '(x)\n', (580, 583), True, 'import scipy.cluster.hierarchy as hc\n'), ((1116, 1141), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1129, 1141), False, 'import pytest\n'), ((1166, 1284), 'pytools.viz.dendrogram.LinkageTree', 'LinkageTree', ([], {'scipy_linkage_matrix': 'linkage_matrix', 'leaf_names': 'leaf_names', 'leaf_weights': 'leaf_weights', 'max_distance': '(1)'}), '(scipy_linkage_matrix=linkage_matrix, leaf_names=leaf_names,\n leaf_weights=leaf_weights, max_distance=1)\n', (1177, 1284), False, 'from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree\n'), ((1782, 1792), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1790, 1792), False, 'from io import StringIO\n'), ((1837, 1867), 'pytools.viz.dendrogram.DendrogramReportStyle', 'DendrogramReportStyle', ([], {'out': 'out'}), '(out=out)\n', (1858, 1867), False, 'from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree\n')]
|
#!/usr/bin/python3
import sys
#sys.path.insert(0, "/usr/local/opencv3/lib/python2.7/site-packages/")
import argparse
#import commands
import cv2
import fnmatch
import numpy as np
import os.path
import random
import navpy
sys.path.append('../lib')
import AC3D
import Pose
import ProjectMgr
import SRTM
import transformations
# for all the images in the project image_dir, compute the camera
# poses from the aircraft pose (and camera mounting transform).
# Project the image plane onto an SRTM (DEM) surface for our best
# layout guess (at this point before we do any matching/bundle
# adjustment work.)
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--texture-resolution', type=int, default=512, help='texture resolution (should be 2**n, so numbers like 256, 512, 1024, etc.')
parser.add_argument('--ground', type=float, help='ground elevation in meters')
parser.add_argument('--sba', action='store_true', help='use sba pose')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
ref = proj.ned_reference_lla
# setup SRTM ground interpolator
sss = SRTM.NEDGround( ref, 6000, 6000, 30 )
ac3d_steps = 8
# compute the uv grid for each image and project each point out into
# ned space, then intersect each vector with the srtm ground.
# build our local image list for placing
print(args.sba)
if not args.sba:
image_list = proj.image_list
else:
image_list = []
for image in proj.image_list:
if image.camera_pose_sba != None:
#print image.camera_pose_sba
image_list.append(image)
depth = 0.0
camw, camh = proj.cam.get_image_params()
for image in image_list:
print(image.name)
# scale the K matrix if we have scaled the images
scale = float(image.width) / float(camw)
K = proj.cam.get_K(scale)
IK = np.linalg.inv(K)
grid_list = []
u_list = np.linspace(0, image.width, ac3d_steps + 1)
v_list = np.linspace(0, image.height, ac3d_steps + 1)
#print "u_list:", u_list
#print "v_list:", v_list
for v in v_list:
for u in u_list:
grid_list.append( [u, v] )
print('grid_list:', grid_list)
if not args.sba:
proj_list = proj.projectVectors( IK, image.get_body2ned(),
image.get_cam2body(), grid_list )
else:
print(image.get_body2ned_sba())
proj_list = proj.projectVectors( IK, image.get_body2ned_sba(),
image.get_cam2body(), grid_list )
print('proj_list:', proj_list)
if not args.sba:
ned = image.camera_pose['ned']
else:
ned = image.camera_pose_sba['ned']
print('ned', image.camera_pose['ned'], ned)
if args.ground:
pts_ned = proj.intersectVectorsWithGroundPlane(ned,
args.ground, proj_list)
else:
pts_ned = sss.interpolate_vectors(ned, proj_list)
print("pts_3d (ned):\n", pts_ned)
# convert ned to xyz and stash the result for each image
image.grid_list = []
ground_sum = 0
for p in pts_ned:
image.grid_list.append( [p[1], p[0], -(p[2]+depth)] )
ground_sum += -p[2]
depth -= 0.01 # favor last pictures above earlier ones
# call the ac3d generator
AC3D.generate(image_list, src_dir=proj.source_dir,
project_dir=args.project, base_name='direct',
version=1.0, trans=0.1, resolution=args.texture_resolution)
if not args.ground:
print('Avg ground elevation (SRTM):', ground_sum / len(pts_ned))
|
[
"sys.path.append",
"argparse.ArgumentParser",
"numpy.linalg.inv",
"numpy.linspace",
"SRTM.NEDGround",
"AC3D.generate",
"ProjectMgr.ProjectMgr"
] |
[((224, 249), 'sys.path.append', 'sys.path.append', (['"""../lib"""'], {}), "('../lib')\n", (239, 249), False, 'import sys\n'), ((617, 685), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Set the initial camera poses."""'}), "(description='Set the initial camera poses.')\n", (640, 685), False, 'import argparse\n'), ((1094, 1129), 'ProjectMgr.ProjectMgr', 'ProjectMgr.ProjectMgr', (['args.project'], {}), '(args.project)\n', (1115, 1129), False, 'import ProjectMgr\n'), ((1223, 1258), 'SRTM.NEDGround', 'SRTM.NEDGround', (['ref', '(6000)', '(6000)', '(30)'], {}), '(ref, 6000, 6000, 30)\n', (1237, 1258), False, 'import SRTM\n'), ((3435, 3600), 'AC3D.generate', 'AC3D.generate', (['image_list'], {'src_dir': 'proj.source_dir', 'project_dir': 'args.project', 'base_name': '"""direct"""', 'version': '(1.0)', 'trans': '(0.1)', 'resolution': 'args.texture_resolution'}), "(image_list, src_dir=proj.source_dir, project_dir=args.project,\n base_name='direct', version=1.0, trans=0.1, resolution=args.\n texture_resolution)\n", (3448, 3600), False, 'import AC3D\n'), ((1948, 1964), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (1961, 1964), True, 'import numpy as np\n'), ((1998, 2041), 'numpy.linspace', 'np.linspace', (['(0)', 'image.width', '(ac3d_steps + 1)'], {}), '(0, image.width, ac3d_steps + 1)\n', (2009, 2041), True, 'import numpy as np\n'), ((2055, 2099), 'numpy.linspace', 'np.linspace', (['(0)', 'image.height', '(ac3d_steps + 1)'], {}), '(0, image.height, ac3d_steps + 1)\n', (2066, 2099), True, 'import numpy as np\n')]
|
import gc
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import torch
import torch.nn as nn
import torchvision
import sys
# To view tensorboard metrics
# tensorboard --logdir=logs --port=6006 --bind_all
from torch.utils.tensorboard import SummaryWriter
from functools import partial
from evolver import CrossoverType, MutationType, InitType, MatrixEvolver, VectorEvolver
from unet import UNet
from dataset_utils import PartitionType
from cuda_utils import maybe_get_cuda_device, clear_cuda
from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader
from ignite.contrib.handlers.tensorboard_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU
from ignite.handlers import ModelCheckpoint
from ignite.utils import setup_logger
from ignite.engine import Engine
# Define directories for data, logging and model saving.
base_dir = os.getcwd()
dataset_name = "landcover_large"
dataset_dir = os.path.join(base_dir, "data/" + dataset_name)
experiment_name = "dropout_single_point_finetuning_100_children"
model_name = "best_model_9_validation_accuracy=0.8940.pt"
model_path = os.path.join(base_dir, "logs/" + dataset_name + "/" + model_name)
log_dir = os.path.join(base_dir, "logs/" + dataset_name + "_" + experiment_name)
# Create DataLoaders for each partition of Landcover data.
dataloader_params = {
'batch_size': 8,
'shuffle': True,
'num_workers': 6,
'pin_memory': True}
partition_types = [PartitionType.TRAIN, PartitionType.VALIDATION,
PartitionType.FINETUNING, PartitionType.TEST]
data_loaders = get_landcover_dataloaders(dataset_dir,
partition_types,
dataloader_params,
force_create_dataset=False)
train_loader = data_loaders[0]
finetuning_loader = data_loaders[2]
dataloader_params['shuffle'] = False
test_loader = get_landcover_dataloader(dataset_dir, PartitionType.TEST, dataloader_params)
# Get GPU device if available.
device = maybe_get_cuda_device()
# Determine model and training params.
params = {
'max_epochs': 10,
'n_classes': 4,
'in_channels': 4,
'depth': 5,
'learning_rate': 0.001,
'log_steps': 1,
'save_top_n_models': 4,
'num_children': 100
}
clear_cuda()
model = UNet(in_channels = params['in_channels'],
n_classes = params['n_classes'],
depth = params['depth'])
model.load_state_dict(torch.load(model_path))
# Create Trainer or Evaluators
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=params['learning_rate'])
# Determine metrics for evaluation.
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
"mean_iou": mIoU(ConfusionMatrix(num_classes = params['n_classes'])),
}
for batch in train_loader:
batch_x = batch[0]
_ = model(batch_x)
break
drop_out_layers = model.get_dropout_layers()
del model, batch_x
clear_cuda()
for layer in drop_out_layers:
layer_name = layer.name
size = layer.x_size[1:]
sizes = [size]
clear_cuda()
model = UNet(in_channels = params['in_channels'],
n_classes = params['n_classes'],
depth = params['depth'])
model.load_state_dict(torch.load(model_path))
model.to(device)
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=params['learning_rate'])
num_channels = size[0]
evolver = VectorEvolver(num_channels,
CrossoverType.UNIFORM,
MutationType.FLIP_BIT,
InitType.RANDOM,
flip_bit_prob=0.25,
flip_bit_decay=0.5)
log_dir_test = log_dir + "_" + layer_name
def mask_from_vec(vec, matrix_size):
mask = np.ones(matrix_size)
for i in range(len(vec)):
if vec[i] == 0:
mask[i, :, :] = 0
elif vec[i] == 1:
mask[i, :, :] = 1
return mask
def dropout_finetune_step(engine, batch):
model.eval()
with torch.no_grad():
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
loss = sys.float_info.max
for i in range(params['num_children']):
model.zero_grad()
child_vec = evolver.spawn_child()
child_mask = mask_from_vec(child_vec, size)
model.set_dropout_masks({layer_name: torch.tensor(child_mask, dtype=torch.float32).to(device)})
outputs = model(batch_x)
current_loss = criterion(outputs[:, :, 127:128,127:128], batch_y[:,127:128,127:128]).item()
loss = min(loss, current_loss)
if current_loss == 0.0:
current_loss = sys.float_info.max
else:
current_loss = 1.0 / current_loss
evolver.add_child(child_vec, current_loss)
priority, best_child = evolver.get_best_child()
best_mask = mask_from_vec(best_child, size)
model.set_dropout_masks({layer_name: torch.tensor(best_mask, dtype=torch.float32).to(device)})
return loss
# Create Trainer or Evaluators
trainer = Engine(dropout_finetune_step)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
trainer.logger = setup_logger("Trainer")
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator.logger = setup_logger("Validation Evaluator")
@trainer.on(Events.ITERATION_COMPLETED(every=1))
def report_evolver_stats(engine):
priorities = np.array(evolver.get_generation_priorities())
# Take reciprocal since we needed to store priorities in min heap.
priorities = 1.0 / priorities
tb_logger.writer.add_scalar("training/evolver_count",
priorities.shape[0], engine.state.iteration)
tb_logger.writer.add_scalar("training/evolver_mean",
np.mean(priorities), engine.state.iteration)
tb_logger.writer.add_scalar("training/evolver_std",
np.std(priorities), engine.state.iteration)
evolver.update_parents()
@trainer.on(Events.EPOCH_COMPLETED)
def visualize_validation_predictions(engine):
for i, batch in enumerate(test_loader):
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
outputs = model(batch_x)
num_images = batch_x.shape[0]
batch_y_detach = batch_y.detach().cpu().numpy()
batch_x_detach = batch_x.detach().cpu().numpy()
outputs_detach = outputs.detach().cpu().numpy()
for j in range(num_images):
f, ax = plt.subplots(1, 3, figsize=(10, 4))
ax[0].imshow(np.moveaxis(batch_x_detach[j, :, :, :], [0], [2]) / 255.0)
ax[1].imshow((np.array(batch_y_detach[j, :, :])))
ax[2].imshow(np.argmax(np.moveaxis(np.array(outputs_detach[j, :, :, :]), [0],[ 2]), axis=2))
ax[0].set_title("X")
ax[1].set_title("Y")
ax[2].set_title("Predict")
f.suptitle("Layer: " + layer_name + " Itteration: " + str(engine.state.iteration) + " Image: " + str(j))
plt.show()
if i > 5:
break
break
# Tensorboard Logger setup below based on pytorch ignite example
# https://github.com/pytorch/ignite/blob/master/examples/contrib/mnist/mnist_with_tensorboard_logger.py
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
"""Callback to compute metrics on the train and validation data."""
train_evaluator.run(finetuning_loader)
validation_evaluator.run(test_loader)
def score_function(engine):
"""Function to determine the metric upon which to compare model."""
return engine.state.metrics["accuracy"]
# Setup Tensor Board Logging
tb_logger = TensorboardLogger(log_dir=log_dir_test)
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=params['log_steps']),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
metric_names="all",
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names="all",
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach_opt_params_handler(trainer,
event_name=Events.ITERATION_COMPLETED(every=params['log_steps']),
optimizer=optimizer)
model_checkpoint = ModelCheckpoint(
log_dir_test,
n_saved=params['save_top_n_models'],
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
trainer.run(finetuning_loader, max_epochs=params['max_epochs'])
tb_logger.close()
|
[
"numpy.moveaxis",
"numpy.ones",
"ignite.engine.create_supervised_evaluator",
"torch.nn.NLLLoss",
"numpy.mean",
"landcover_dataloader.get_landcover_dataloaders",
"cuda_utils.maybe_get_cuda_device",
"torch.no_grad",
"cuda_utils.clear_cuda",
"os.path.join",
"numpy.std",
"torch.load",
"evolver.VectorEvolver",
"matplotlib.pyplot.subplots",
"ignite.metrics.ConfusionMatrix",
"matplotlib.pyplot.show",
"ignite.metrics.Accuracy",
"ignite.engine.Events.ITERATION_COMPLETED",
"unet.UNet",
"os.getcwd",
"ignite.engine.Engine",
"ignite.utils.setup_logger",
"numpy.array",
"landcover_dataloader.get_landcover_dataloader",
"ignite.metrics.Loss",
"torch.tensor"
] |
[((1006, 1017), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1015, 1017), False, 'import os\n'), ((1065, 1111), 'os.path.join', 'os.path.join', (['base_dir', "('data/' + dataset_name)"], {}), "(base_dir, 'data/' + dataset_name)\n", (1077, 1111), False, 'import os\n'), ((1249, 1314), 'os.path.join', 'os.path.join', (['base_dir', "('logs/' + dataset_name + '/' + model_name)"], {}), "(base_dir, 'logs/' + dataset_name + '/' + model_name)\n", (1261, 1314), False, 'import os\n'), ((1325, 1395), 'os.path.join', 'os.path.join', (['base_dir', "('logs/' + dataset_name + '_' + experiment_name)"], {}), "(base_dir, 'logs/' + dataset_name + '_' + experiment_name)\n", (1337, 1395), False, 'import os\n'), ((1714, 1820), 'landcover_dataloader.get_landcover_dataloaders', 'get_landcover_dataloaders', (['dataset_dir', 'partition_types', 'dataloader_params'], {'force_create_dataset': '(False)'}), '(dataset_dir, partition_types, dataloader_params,\n force_create_dataset=False)\n', (1739, 1820), False, 'from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader\n'), ((2062, 2138), 'landcover_dataloader.get_landcover_dataloader', 'get_landcover_dataloader', (['dataset_dir', 'PartitionType.TEST', 'dataloader_params'], {}), '(dataset_dir, PartitionType.TEST, dataloader_params)\n', (2086, 2138), False, 'from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader\n'), ((2181, 2204), 'cuda_utils.maybe_get_cuda_device', 'maybe_get_cuda_device', ([], {}), '()\n', (2202, 2204), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((2439, 2451), 'cuda_utils.clear_cuda', 'clear_cuda', ([], {}), '()\n', (2449, 2451), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((2464, 2561), 'unet.UNet', 'UNet', ([], {'in_channels': "params['in_channels']", 'n_classes': "params['n_classes']", 'depth': "params['depth']"}), "(in_channels=params['in_channels'], n_classes=params['n_classes'],\n depth=params['depth'])\n", (2468, 2561), False, 'from unet import UNet\n'), ((2679, 2691), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (2689, 2691), True, 'import torch.nn as nn\n'), ((3147, 3159), 'cuda_utils.clear_cuda', 'clear_cuda', ([], {}), '()\n', (3157, 3159), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((2612, 2634), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2622, 2634), False, 'import torch\n'), ((2868, 2878), 'ignite.metrics.Accuracy', 'Accuracy', ([], {}), '()\n', (2876, 2878), False, 'from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU\n'), ((2897, 2912), 'ignite.metrics.Loss', 'Loss', (['criterion'], {}), '(criterion)\n', (2901, 2912), False, 'from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU\n'), ((3270, 3282), 'cuda_utils.clear_cuda', 'clear_cuda', ([], {}), '()\n', (3280, 3282), False, 'from cuda_utils import maybe_get_cuda_device, clear_cuda\n'), ((3299, 3396), 'unet.UNet', 'UNet', ([], {'in_channels': "params['in_channels']", 'n_classes': "params['n_classes']", 'depth': "params['depth']"}), "(in_channels=params['in_channels'], n_classes=params['n_classes'],\n depth=params['depth'])\n", (3303, 3396), False, 'from unet import UNet\n'), ((3521, 3533), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3531, 3533), True, 'import torch.nn as nn\n'), ((3695, 3829), 'evolver.VectorEvolver', 'VectorEvolver', (['num_channels', 'CrossoverType.UNIFORM', 'MutationType.FLIP_BIT', 'InitType.RANDOM'], {'flip_bit_prob': '(0.25)', 'flip_bit_decay': '(0.5)'}), '(num_channels, CrossoverType.UNIFORM, MutationType.FLIP_BIT,\n InitType.RANDOM, flip_bit_prob=0.25, flip_bit_decay=0.5)\n', (3708, 3829), False, 'from evolver import CrossoverType, MutationType, InitType, MatrixEvolver, VectorEvolver\n'), ((5618, 5647), 'ignite.engine.Engine', 'Engine', (['dropout_finetune_step'], {}), '(dropout_finetune_step)\n', (5624, 5647), False, 'from ignite.engine import Engine\n'), ((5670, 5736), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', (['model'], {'metrics': 'metrics', 'device': 'device'}), '(model, metrics=metrics, device=device)\n', (5697, 5736), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((5764, 5830), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', (['model'], {'metrics': 'metrics', 'device': 'device'}), '(model, metrics=metrics, device=device)\n', (5791, 5830), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((5852, 5875), 'ignite.utils.setup_logger', 'setup_logger', (['"""Trainer"""'], {}), "('Trainer')\n", (5864, 5875), False, 'from ignite.utils import setup_logger\n'), ((5905, 5936), 'ignite.utils.setup_logger', 'setup_logger', (['"""Train Evaluator"""'], {}), "('Train Evaluator')\n", (5917, 5936), False, 'from ignite.utils import setup_logger\n'), ((5971, 6007), 'ignite.utils.setup_logger', 'setup_logger', (['"""Validation Evaluator"""'], {}), "('Validation Evaluator')\n", (5983, 6007), False, 'from ignite.utils import setup_logger\n'), ((2939, 2987), 'ignite.metrics.ConfusionMatrix', 'ConfusionMatrix', ([], {'num_classes': "params['n_classes']"}), "(num_classes=params['n_classes'])\n", (2954, 2987), False, 'from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU\n'), ((3459, 3481), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (3469, 3481), False, 'import torch\n'), ((4078, 4098), 'numpy.ones', 'np.ones', (['matrix_size'], {}), '(matrix_size)\n', (4085, 4098), True, 'import numpy as np\n'), ((6025, 6060), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': '(1)'}), '(every=1)\n', (6051, 6060), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((4366, 4381), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4379, 4381), False, 'import torch\n'), ((6520, 6539), 'numpy.mean', 'np.mean', (['priorities'], {}), '(priorities)\n', (6527, 6539), True, 'import numpy as np\n'), ((6661, 6679), 'numpy.std', 'np.std', (['priorities'], {}), '(priorities)\n', (6667, 6679), True, 'import numpy as np\n'), ((8732, 8785), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': "params['log_steps']"}), "(every=params['log_steps'])\n", (8758, 8785), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((9346, 9399), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'every': "params['log_steps']"}), "(every=params['log_steps'])\n", (9372, 9399), False, 'from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\n'), ((7349, 7384), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 4)'}), '(1, 3, figsize=(10, 4))\n', (7361, 7384), True, 'import matplotlib.pyplot as plt\n'), ((7906, 7916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7914, 7916), True, 'import matplotlib.pyplot as plt\n'), ((7503, 7536), 'numpy.array', 'np.array', (['batch_y_detach[j, :, :]'], {}), '(batch_y_detach[j, :, :])\n', (7511, 7536), True, 'import numpy as np\n'), ((7414, 7463), 'numpy.moveaxis', 'np.moveaxis', (['batch_x_detach[j, :, :, :]', '[0]', '[2]'], {}), '(batch_x_detach[j, :, :, :], [0], [2])\n', (7425, 7463), True, 'import numpy as np\n'), ((5486, 5530), 'torch.tensor', 'torch.tensor', (['best_mask'], {'dtype': 'torch.float32'}), '(best_mask, dtype=torch.float32)\n', (5498, 5530), False, 'import torch\n'), ((7590, 7626), 'numpy.array', 'np.array', (['outputs_detach[j, :, :, :]'], {}), '(outputs_detach[j, :, :, :])\n', (7598, 7626), True, 'import numpy as np\n'), ((4789, 4834), 'torch.tensor', 'torch.tensor', (['child_mask'], {'dtype': 'torch.float32'}), '(child_mask, dtype=torch.float32)\n', (4801, 4834), False, 'import torch\n')]
|
"""original source: https://github.com/chainer/chainerrl/pull/480
MIT License
Copyright (c) Preferred Networks, Inc.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import *
from future import standard_library
standard_library.install_aliases()
import argparse
from inspect import getsourcefile
import os
import sys
import numpy as np
import chainer
import minerl # noqa: register MineRL envs as Gym envs.
import gym
import chainerrl
from chainerrl import experiments, explorers
from chainerrl.experiments.evaluator import Evaluator
from dqfd import DQfD, PrioritizedDemoReplayBuffer
from q_functions import CNNBranchingQFunction
from env_wrappers import (
BranchedRandomizedAction, BranchedActionWrapper,
MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper,
PoVWithCompassAngleWrapper, FullObservationSpaceWrapper)
from expert_converter import choose_top_experts, fill_buffer
class ScaleGradHook(object):
name = 'ScaleGrad'
call_for_each_param = True
timing = 'pre'
def __init__(self, scale):
self.scale = scale
def __call__(self, rule, param):
if getattr(param, 'scale_param', False):
param.grad *= self.scale
def main():
"""Parses arguments and runs the example
"""
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='MineRLTreechop-v0',
choices=[
'MineRLTreechop-v0',
'MineRLNavigate-v0', 'MineRLNavigateDense-v0', 'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0',
'MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0',
'MineRLNavigateDenseFixed-v0' # for debug use
],
help='MineRL environment identifier')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--gpu', type=int, default=-1,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--final-exploration-frames',
type=int, default=10**6,
help='Timesteps after which we stop ' +
'annealing exploration rate')
parser.add_argument('--final-epsilon', type=float, default=0.01,
help='Final value of epsilon during training.')
parser.add_argument('--eval-epsilon', type=float, default=0.001,
help='Exploration epsilon used during eval episodes.')
parser.add_argument('--replay-start-size', type=int, default=1000,
help='Minimum replay buffer size before ' +
'performing gradient updates.')
parser.add_argument('--target-update-interval', type=int, default=10**4,
help='Frequency (in timesteps) at which ' +
'the target network is updated.')
parser.add_argument('--update-interval', type=int, default=4,
help='Frequency (in timesteps) of network updates.')
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--no-clip-delta',
dest='clip_delta', action='store_false')
parser.add_argument('--error-max', type=float, default=1.0)
parser.add_argument('--num-step-return', type=int, default=10)
parser.set_defaults(clip_delta=True)
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--logging-filename', type=str, default=None)
parser.add_argument('--monitor', action='store_true', default=False,
help='Monitor env. Videos and additional information are saved as output files when evaluation')
# parser.add_argument('--render', action='store_true', default=False,
# help='Render env states in a GUI window.')
parser.add_argument('--optimizer', type=str, default='rmsprop',
choices=['rmsprop', 'adam'])
parser.add_argument('--lr', type=float, default=2.5e-4, help='Learning rate')
parser.add_argument("--replay-buffer-size", type=int, default=10**6,
help="Size of replay buffer (Excluding demonstrations)")
parser.add_argument("--minibatch-size", type=int, default=32)
parser.add_argument('--batch-accumulator', type=str, default="sum")
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default=None)
parser.add_argument("--save-demo-trajectories", action="store_true",
default=False)
# DQfD specific parameters for loading and pretraining.
parser.add_argument('--n-experts', type=int, default=10)
parser.add_argument('--expert-demo-path', type=str, default=None)
parser.add_argument('--n-pretrain-steps', type=int, default=750000)
parser.add_argument('--demo-supervised-margin', type=float, default=0.8)
parser.add_argument('--loss-coeff-l2', type=float, default=1e-5)
parser.add_argument('--loss-coeff-nstep', type=float, default=1.0)
parser.add_argument('--loss-coeff-supervised', type=float, default=1.0)
parser.add_argument('--bonus-priority-agent', type=float, default=0.001)
parser.add_argument('--bonus-priority-demo', type=float, default=1.0)
# Action branching architecture
parser.add_argument('--gradient-clipping', action='store_true', default=False)
parser.add_argument('--gradient-rescaling', action='store_true', default=False)
# NoisyNet parameters
parser.add_argument('--use-noisy-net', type=str, default=None,
choices=['before-pretraining', 'after-pretraining'])
parser.add_argument('--noisy-net-sigma', type=float, default=0.5)
# Parameters for state/action handling
parser.add_argument('--frame-stack', type=int, default=None, help='Number of frames stacked (None for disable).')
parser.add_argument('--frame-skip', type=int, default=None, help='Number of frames skipped (None for disable).')
parser.add_argument('--camera-atomic-actions', type=int, default=10)
parser.add_argument('--max-range-of-camera', type=float, default=10.)
parser.add_argument('--use-full-observation', action='store_true', default=False)
args = parser.parse_args()
assert args.expert_demo_path is not None,"DQfD needs collected \
expert demonstrations"
import logging
if args.logging_filename is not None:
logging.basicConfig(filename=args.logging_filename, filemode='w',
level=args.logging_level)
else:
logging.basicConfig(level=args.logging_level)
logger = logging.getLogger(__name__)
train_seed = args.seed
test_seed = 2 ** 31 - 1 - args.seed
chainerrl.misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
logger.info('Output files are saved in {}'.format(args.outdir))
if args.env == 'MineRLTreechop-v0':
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions]
elif args.env in ['MineRLNavigate-v0', 'MineRLNavigateDense-v0',
'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 2]
elif args.env in ['MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 32]
else:
raise Exception("Unknown environment")
def make_env(env, test):
# wrap env: observation...
# NOTE: wrapping order matters!
if args.use_full_observation:
env = FullObservationSpaceWrapper(env)
elif args.env.startswith('MineRLNavigate'):
env = PoVWithCompassAngleWrapper(env)
else:
env = ObtainPoVWrapper(env)
if test and args.monitor:
env = gym.wrappers.Monitor(
env, os.path.join(args.outdir, 'monitor'),
mode='evaluation' if test else 'training', video_callable=lambda episode_id: True)
if args.frame_skip is not None:
env = FrameSkip(env, skip=args.frame_skip)
# convert hwc -> chw as Chainer requires
env = MoveAxisWrapper(env, source=-1, destination=0,
use_tuple=args.use_full_observation)
#env = ScaledFloatFrame(env)
if args.frame_stack is not None:
env = FrameStack(env, args.frame_stack, channel_order='chw',
use_tuple=args.use_full_observation)
# wrap env: action...
env = BranchedActionWrapper(env, branch_sizes, args.camera_atomic_actions, args.max_range_of_camera)
if test:
env = BranchedRandomizedAction(env, branch_sizes, args.eval_epsilon)
env_seed = test_seed if test else train_seed
env.seed(int(env_seed))
return env
core_env = gym.make(args.env)
env = make_env(core_env, test=False)
eval_env = make_env(core_env, test=True)
# Q function
if args.env.startswith('MineRLNavigate'):
if args.use_full_observation:
base_channels = 3 # RGB
else:
base_channels = 4 # RGB + compass
elif args.env.startswith('MineRLObtain'):
base_channels = 3 # RGB
else:
base_channels = 3 # RGB
if args.frame_stack is None:
n_input_channels = base_channels
else:
n_input_channels = base_channels * args.frame_stack
q_func = CNNBranchingQFunction(branch_sizes,
n_input_channels=n_input_channels,
gradient_rescaling=args.gradient_rescaling,
use_tuple=args.use_full_observation)
def phi(x):
# observation -> NN input
if args.use_full_observation:
pov = np.asarray(x[0], dtype=np.float32)
others = np.asarray(x[1], dtype=np.float32)
return (pov / 255, others)
else:
return np.asarray(x, dtype=np.float32) / 255
explorer = explorers.LinearDecayEpsilonGreedy(
1.0, args.final_epsilon,
args.final_exploration_frames,
lambda: np.array([np.random.randint(n) for n in branch_sizes]))
# Draw the computational graph and save it in the output directory.
if args.use_full_observation:
sample_obs = tuple([x[None] for x in env.observation_space.sample()])
else:
sample_obs = env.observation_space.sample()[None]
chainerrl.misc.draw_computational_graph(
[q_func(phi(sample_obs))], os.path.join(args.outdir, 'model'))
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
if args.use_noisy_net is None:
opt.setup(q_func)
if args.gradient_rescaling:
opt.add_hook(ScaleGradHook(1 / (1 + len(q_func.branch_sizes))))
if args.gradient_clipping:
opt.add_hook(chainer.optimizer_hooks.GradientClipping(10.0))
# calculate corresponding `steps` and `eval_interval` according to frameskip
maximum_frames = 8640000 # = 1440 episodes if we count an episode as 6000 frames.
if args.frame_skip is None:
steps = maximum_frames
eval_interval = 6000 * 100 # (approx.) every 100 episode (counts "1 episode = 6000 steps")
else:
steps = maximum_frames // args.frame_skip
eval_interval = 6000 * 100 // args.frame_skip # (approx.) every 100 episode (counts "1 episode = 6000 steps")
# Anneal beta from beta0 to 1 throughout training
betasteps = steps / args.update_interval
replay_buffer = PrioritizedDemoReplayBuffer(
args.replay_buffer_size, alpha=0.4,
beta0=0.6, betasteps=betasteps,
error_max=args.error_max,
num_steps=args.num_step_return)
# Fill the demo buffer with expert transitions
if not args.demo:
chosen_dirs = choose_top_experts(args.expert_demo_path, args.n_experts,
logger=logger)
fill_buffer(args.env, chosen_dirs, replay_buffer, args.frame_skip,
args.frame_stack, args.camera_atomic_actions,
args.max_range_of_camera, args.use_full_observation,
logger=logger)
logger.info("Demo buffer loaded with {} transitions".format(
len(replay_buffer)))
def reward_transform(x):
return np.sign(x) * np.log(1 + np.abs(x))
if args.use_noisy_net is not None and args.use_noisy_net == 'before-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
opt.setup(q_func)
agent = DQfD(q_func, opt, replay_buffer,
gamma=0.99,
explorer=explorer,
n_pretrain_steps=args.n_pretrain_steps,
demo_supervised_margin=args.demo_supervised_margin,
bonus_priority_agent=args.bonus_priority_agent,
bonus_priority_demo=args.bonus_priority_demo,
loss_coeff_nstep=args.loss_coeff_nstep,
loss_coeff_supervised=args.loss_coeff_supervised,
loss_coeff_l2=args.loss_coeff_l2,
gpu=args.gpu,
replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
clip_delta=args.clip_delta,
update_interval=args.update_interval,
batch_accumulator=args.batch_accumulator,
phi=phi, reward_transform=reward_transform,
minibatch_size=args.minibatch_size)
if args.use_noisy_net is not None and args.use_noisy_net == 'after-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
opt.setup(q_func)
opt.add_hook(
chainer.optimizer_hooks.WeightDecay(args.loss_coeff_l2))
agent.optimizer = opt
agent.target_model = None
agent.sync_target_network()
if args.load:
agent.load(args.load)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs)
logger.info('n_runs: {} mean: {} median: {} stdev: {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev']))
else:
agent.pretrain()
evaluator = Evaluator(agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
eval_interval=eval_interval,
outdir=args.outdir,
max_episode_len=None,
env=eval_env,
step_offset=0,
save_best_so_far_agent=True,
logger=logger)
# Evaluate the agent BEFORE training begins
evaluator.evaluate_and_update_max_score(t=0, episodes=0)
experiments.train_agent(agent=agent,
env=env,
steps=steps,
outdir=args.outdir,
max_episode_len=None,
step_offset=0,
evaluator=evaluator,
successful_score=None,
step_hooks=[])
env.close()
if __name__ == "__main__":
main()
|
[
"numpy.abs",
"argparse.ArgumentParser",
"chainerrl.explorers.Greedy",
"future.standard_library.install_aliases",
"q_functions.CNNBranchingQFunction",
"numpy.random.randint",
"expert_converter.choose_top_experts",
"expert_converter.fill_buffer",
"os.path.join",
"chainerrl.links.to_factorized_noisy",
"env_wrappers.FullObservationSpaceWrapper",
"chainerrl.misc.set_random_seed",
"env_wrappers.BranchedActionWrapper",
"chainerrl.experiments.prepare_output_dir",
"chainer.optimizer_hooks.WeightDecay",
"env_wrappers.BranchedRandomizedAction",
"env_wrappers.FrameStack",
"dqfd.DQfD",
"chainerrl.experiments.train_agent",
"chainerrl.experiments.evaluator.Evaluator",
"chainer.optimizers.RMSpropGraves",
"chainerrl.experiments.eval_performance",
"env_wrappers.PoVWithCompassAngleWrapper",
"numpy.asarray",
"env_wrappers.MoveAxisWrapper",
"env_wrappers.FrameSkip",
"chainer.optimizers.Adam",
"gym.make",
"logging.basicConfig",
"chainer.optimizer_hooks.GradientClipping",
"numpy.sign",
"env_wrappers.ObtainPoVWrapper",
"dqfd.PrioritizedDemoReplayBuffer",
"logging.getLogger"
] |
[((332, 366), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (364, 366), False, 'from future import standard_library\n'), ((1390, 1415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1413, 1415), False, 'import argparse\n'), ((7221, 7248), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7238, 7248), False, 'import logging\n'), ((7322, 7381), 'chainerrl.misc.set_random_seed', 'chainerrl.misc.set_random_seed', (['args.seed'], {'gpus': '(args.gpu,)'}), '(args.seed, gpus=(args.gpu,))\n', (7352, 7381), False, 'import chainerrl\n'), ((7401, 7450), 'chainerrl.experiments.prepare_output_dir', 'experiments.prepare_output_dir', (['args', 'args.outdir'], {}), '(args, args.outdir)\n', (7431, 7450), False, 'from chainerrl import experiments, explorers\n'), ((9636, 9654), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (9644, 9654), False, 'import gym\n'), ((10222, 10382), 'q_functions.CNNBranchingQFunction', 'CNNBranchingQFunction', (['branch_sizes'], {'n_input_channels': 'n_input_channels', 'gradient_rescaling': 'args.gradient_rescaling', 'use_tuple': 'args.use_full_observation'}), '(branch_sizes, n_input_channels=n_input_channels,\n gradient_rescaling=args.gradient_rescaling, use_tuple=args.\n use_full_observation)\n', (10243, 10382), False, 'from q_functions import CNNBranchingQFunction\n'), ((12435, 12597), 'dqfd.PrioritizedDemoReplayBuffer', 'PrioritizedDemoReplayBuffer', (['args.replay_buffer_size'], {'alpha': '(0.4)', 'beta0': '(0.6)', 'betasteps': 'betasteps', 'error_max': 'args.error_max', 'num_steps': 'args.num_step_return'}), '(args.replay_buffer_size, alpha=0.4, beta0=0.6,\n betasteps=betasteps, error_max=args.error_max, num_steps=args.\n num_step_return)\n', (12462, 12597), False, 'from dqfd import DQfD, PrioritizedDemoReplayBuffer\n'), ((13516, 14225), 'dqfd.DQfD', 'DQfD', (['q_func', 'opt', 'replay_buffer'], {'gamma': '(0.99)', 'explorer': 'explorer', 'n_pretrain_steps': 'args.n_pretrain_steps', 'demo_supervised_margin': 'args.demo_supervised_margin', 'bonus_priority_agent': 'args.bonus_priority_agent', 'bonus_priority_demo': 'args.bonus_priority_demo', 'loss_coeff_nstep': 'args.loss_coeff_nstep', 'loss_coeff_supervised': 'args.loss_coeff_supervised', 'loss_coeff_l2': 'args.loss_coeff_l2', 'gpu': 'args.gpu', 'replay_start_size': 'args.replay_start_size', 'target_update_interval': 'args.target_update_interval', 'clip_delta': 'args.clip_delta', 'update_interval': 'args.update_interval', 'batch_accumulator': 'args.batch_accumulator', 'phi': 'phi', 'reward_transform': 'reward_transform', 'minibatch_size': 'args.minibatch_size'}), '(q_func, opt, replay_buffer, gamma=0.99, explorer=explorer,\n n_pretrain_steps=args.n_pretrain_steps, demo_supervised_margin=args.\n demo_supervised_margin, bonus_priority_agent=args.bonus_priority_agent,\n bonus_priority_demo=args.bonus_priority_demo, loss_coeff_nstep=args.\n loss_coeff_nstep, loss_coeff_supervised=args.loss_coeff_supervised,\n loss_coeff_l2=args.loss_coeff_l2, gpu=args.gpu, replay_start_size=args.\n replay_start_size, target_update_interval=args.target_update_interval,\n clip_delta=args.clip_delta, update_interval=args.update_interval,\n batch_accumulator=args.batch_accumulator, phi=phi, reward_transform=\n reward_transform, minibatch_size=args.minibatch_size)\n', (13520, 14225), False, 'from dqfd import DQfD, PrioritizedDemoReplayBuffer\n'), ((7023, 7119), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'args.logging_filename', 'filemode': '"""w"""', 'level': 'args.logging_level'}), "(filename=args.logging_filename, filemode='w', level=\n args.logging_level)\n", (7042, 7119), False, 'import logging\n'), ((7161, 7206), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'args.logging_level'}), '(level=args.logging_level)\n', (7180, 7206), False, 'import logging\n'), ((8945, 9033), 'env_wrappers.MoveAxisWrapper', 'MoveAxisWrapper', (['env'], {'source': '(-1)', 'destination': '(0)', 'use_tuple': 'args.use_full_observation'}), '(env, source=-1, destination=0, use_tuple=args.\n use_full_observation)\n', (8960, 9033), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((9321, 9420), 'env_wrappers.BranchedActionWrapper', 'BranchedActionWrapper', (['env', 'branch_sizes', 'args.camera_atomic_actions', 'args.max_range_of_camera'], {}), '(env, branch_sizes, args.camera_atomic_actions, args.\n max_range_of_camera)\n', (9342, 9420), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((11290, 11324), 'os.path.join', 'os.path.join', (['args.outdir', '"""model"""'], {}), "(args.outdir, 'model')\n", (11302, 11324), False, 'import os\n'), ((11377, 11454), 'chainer.optimizers.RMSpropGraves', 'chainer.optimizers.RMSpropGraves', (['args.lr'], {'alpha': '(0.95)', 'momentum': '(0.0)', 'eps': '(0.01)'}), '(args.lr, alpha=0.95, momentum=0.0, eps=0.01)\n', (11409, 11454), False, 'import chainer\n'), ((12718, 12790), 'expert_converter.choose_top_experts', 'choose_top_experts', (['args.expert_demo_path', 'args.n_experts'], {'logger': 'logger'}), '(args.expert_demo_path, args.n_experts, logger=logger)\n', (12736, 12790), False, 'from expert_converter import choose_top_experts, fill_buffer\n'), ((12841, 13031), 'expert_converter.fill_buffer', 'fill_buffer', (['args.env', 'chosen_dirs', 'replay_buffer', 'args.frame_skip', 'args.frame_stack', 'args.camera_atomic_actions', 'args.max_range_of_camera', 'args.use_full_observation'], {'logger': 'logger'}), '(args.env, chosen_dirs, replay_buffer, args.frame_skip, args.\n frame_stack, args.camera_atomic_actions, args.max_range_of_camera, args\n .use_full_observation, logger=logger)\n', (12852, 13031), False, 'from expert_converter import choose_top_experts, fill_buffer\n'), ((13360, 13437), 'chainerrl.links.to_factorized_noisy', 'chainerrl.links.to_factorized_noisy', (['q_func'], {'sigma_scale': 'args.noisy_net_sigma'}), '(q_func, sigma_scale=args.noisy_net_sigma)\n', (13395, 13437), False, 'import chainerrl\n'), ((13457, 13475), 'chainerrl.explorers.Greedy', 'explorers.Greedy', ([], {}), '()\n', (13473, 13475), False, 'from chainerrl import experiments, explorers\n'), ((14569, 14646), 'chainerrl.links.to_factorized_noisy', 'chainerrl.links.to_factorized_noisy', (['q_func'], {'sigma_scale': 'args.noisy_net_sigma'}), '(q_func, sigma_scale=args.noisy_net_sigma)\n', (14604, 14646), False, 'import chainerrl\n'), ((14666, 14684), 'chainerrl.explorers.Greedy', 'explorers.Greedy', ([], {}), '()\n', (14682, 14684), False, 'from chainerrl import experiments, explorers\n'), ((15219, 15321), 'chainerrl.experiments.eval_performance', 'experiments.eval_performance', ([], {'env': 'eval_env', 'agent': 'agent', 'n_steps': 'None', 'n_episodes': 'args.eval_n_runs'}), '(env=eval_env, agent=agent, n_steps=None,\n n_episodes=args.eval_n_runs)\n', (15247, 15321), False, 'from chainerrl import experiments, explorers\n'), ((15552, 15769), 'chainerrl.experiments.evaluator.Evaluator', 'Evaluator', ([], {'agent': 'agent', 'n_steps': 'None', 'n_episodes': 'args.eval_n_runs', 'eval_interval': 'eval_interval', 'outdir': 'args.outdir', 'max_episode_len': 'None', 'env': 'eval_env', 'step_offset': '(0)', 'save_best_so_far_agent': '(True)', 'logger': 'logger'}), '(agent=agent, n_steps=None, n_episodes=args.eval_n_runs,\n eval_interval=eval_interval, outdir=args.outdir, max_episode_len=None,\n env=eval_env, step_offset=0, save_best_so_far_agent=True, logger=logger)\n', (15561, 15769), False, 'from chainerrl.experiments.evaluator import Evaluator\n'), ((16159, 16342), 'chainerrl.experiments.train_agent', 'experiments.train_agent', ([], {'agent': 'agent', 'env': 'env', 'steps': 'steps', 'outdir': 'args.outdir', 'max_episode_len': 'None', 'step_offset': '(0)', 'evaluator': 'evaluator', 'successful_score': 'None', 'step_hooks': '[]'}), '(agent=agent, env=env, steps=steps, outdir=args.\n outdir, max_episode_len=None, step_offset=0, evaluator=evaluator,\n successful_score=None, step_hooks=[])\n', (16182, 16342), False, 'from chainerrl import experiments, explorers\n'), ((8365, 8397), 'env_wrappers.FullObservationSpaceWrapper', 'FullObservationSpaceWrapper', (['env'], {}), '(env)\n', (8392, 8397), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((8844, 8880), 'env_wrappers.FrameSkip', 'FrameSkip', (['env'], {'skip': 'args.frame_skip'}), '(env, skip=args.frame_skip)\n', (8853, 8880), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((9155, 9251), 'env_wrappers.FrameStack', 'FrameStack', (['env', 'args.frame_stack'], {'channel_order': '"""chw"""', 'use_tuple': 'args.use_full_observation'}), "(env, args.frame_stack, channel_order='chw', use_tuple=args.\n use_full_observation)\n", (9165, 9251), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((9452, 9514), 'env_wrappers.BranchedRandomizedAction', 'BranchedRandomizedAction', (['env', 'branch_sizes', 'args.eval_epsilon'], {}), '(env, branch_sizes, args.eval_epsilon)\n', (9476, 9514), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((10559, 10593), 'numpy.asarray', 'np.asarray', (['x[0]'], {'dtype': 'np.float32'}), '(x[0], dtype=np.float32)\n', (10569, 10593), True, 'import numpy as np\n'), ((10615, 10649), 'numpy.asarray', 'np.asarray', (['x[1]'], {'dtype': 'np.float32'}), '(x[1], dtype=np.float32)\n', (10625, 10649), True, 'import numpy as np\n'), ((11504, 11536), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', (['args.lr'], {}), '(args.lr)\n', (11527, 11536), False, 'import chainer\n'), ((11756, 11802), 'chainer.optimizer_hooks.GradientClipping', 'chainer.optimizer_hooks.GradientClipping', (['(10.0)'], {}), '(10.0)\n', (11796, 11802), False, 'import chainer\n'), ((13230, 13240), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (13237, 13240), True, 'import numpy as np\n'), ((14744, 14821), 'chainer.optimizers.RMSpropGraves', 'chainer.optimizers.RMSpropGraves', (['args.lr'], {'alpha': '(0.95)', 'momentum': '(0.0)', 'eps': '(0.01)'}), '(args.lr, alpha=0.95, momentum=0.0, eps=0.01)\n', (14776, 14821), False, 'import chainer\n'), ((14972, 15027), 'chainer.optimizer_hooks.WeightDecay', 'chainer.optimizer_hooks.WeightDecay', (['args.loss_coeff_l2'], {}), '(args.loss_coeff_l2)\n', (15007, 15027), False, 'import chainer\n'), ((8468, 8499), 'env_wrappers.PoVWithCompassAngleWrapper', 'PoVWithCompassAngleWrapper', (['env'], {}), '(env)\n', (8494, 8499), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((8532, 8553), 'env_wrappers.ObtainPoVWrapper', 'ObtainPoVWrapper', (['env'], {}), '(env)\n', (8548, 8553), False, 'from env_wrappers import BranchedRandomizedAction, BranchedActionWrapper, MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper, PoVWithCompassAngleWrapper, FullObservationSpaceWrapper\n'), ((8649, 8685), 'os.path.join', 'os.path.join', (['args.outdir', '"""monitor"""'], {}), "(args.outdir, 'monitor')\n", (8661, 8685), False, 'import os\n'), ((10722, 10753), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (10732, 10753), True, 'import numpy as np\n'), ((14879, 14911), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', (['args.lr'], {}), '(args.lr)\n', (14902, 14911), False, 'import chainer\n'), ((10910, 10930), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (10927, 10930), True, 'import numpy as np\n'), ((13254, 13263), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (13260, 13263), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import numpy as np
import matching.cr_search_validation_matcher
import utils.data_format_keys as dfk
import sys
from evaluation.link_metrics import LinkMetricsResults
from multiprocessing import Pool
from utils.utils import read_json, save_json
def modify_simple_threshold(dataset, threshold):
for item in dataset:
if item[dfk.DATASET_SCORE] is not None and \
item[dfk.DATASET_SCORE] < threshold:
item[dfk.DATASET_TARGET_TEST][dfk.CR_ITEM_DOI] = None
return dataset
def find_best(results):
overall = [r[1].get(dfk.EVAL_F1) for r in results]
index = len(overall) - overall[::-1].index(max(overall)) - 1
return index, results[index][0], results[index][1].get(dfk.EVAL_PREC), \
results[index][1].get(dfk.EVAL_REC), results[index][1].get(dfk.EVAL_F1)
dataset = read_json(sys.argv[1])['dataset']
matcher = matching.cr_search_validation_matcher.Matcher(0.4, 0, [])
with Pool(10) as p:
results = p.map(matcher.match,
[item.get('ref_string') for item in dataset])
for item, target in zip(dataset, results):
item['target_test']['DOI'] = target[0]
item['score'] = target[1]
save_json(dataset, sys.argv[2])
results_valid_threshold = \
[(t, LinkMetricsResults(modify_simple_threshold(dataset, t)))
for t in np.arange(0.0, 1.0, 0.01)]
print(','.join([str(i) for i in find_best(results_valid_threshold)[1:]]))
|
[
"utils.utils.read_json",
"numpy.arange",
"utils.utils.save_json",
"multiprocessing.Pool"
] |
[((1193, 1224), 'utils.utils.save_json', 'save_json', (['dataset', 'sys.argv[2]'], {}), '(dataset, sys.argv[2])\n', (1202, 1224), False, 'from utils.utils import read_json, save_json\n'), ((852, 874), 'utils.utils.read_json', 'read_json', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (861, 874), False, 'from utils.utils import read_json, save_json\n'), ((960, 968), 'multiprocessing.Pool', 'Pool', (['(10)'], {}), '(10)\n', (964, 968), False, 'from multiprocessing import Pool\n'), ((1334, 1359), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(0.01)'], {}), '(0.0, 1.0, 0.01)\n', (1343, 1359), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#
# visualize_contingency_tables.py: Visualizes all contingency tables
# obtained by our method in the form of a diagram in the plane.
#
# Input: JSON file with shapelets
#
# Output: A set of points in the plane, each representing one table,
# such that the distance to the origin refers to preferences
# in splitting behaviour.
#
# The output will be written to `stdout`.
import argparse
import json
import sys
import numpy as np
def transform_table(table):
"""
Transforms a contingency table into a point on a two-dimensional
plane, in which the distance to the origin shows the suitability
of a contingency table for separating cases and controls.
"""
# Yes, this ordering is correct. Please refer to our paper for
# more details.
a, b, d, c = table
n1 = a+b
n0 = c+d
return (a-b) / n1, (c-d) / n0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Contingency Table Visualization")
parser.add_argument("input",
metavar = "INPUT",
help = "Input file"
)
parser.add_argument("-f", "--flip",
required = False,
action = "store_true",
help = "If set, flips values in the visualization to ensure that quadrant 3 is not used"
)
parser.add_argument("-p", "--prune",
required = False,
action = "store_true",
help = "If set, prunes duplicates points"
)
arguments = parser.parse_args()
input_file = arguments.input
flip = arguments.flip
prune = arguments.prune
with open(input_file) as f:
data = json.load(f)
shapelets = data["shapelets"]
tables = []
for shapelet in shapelets:
tables.append( shapelet["table"] )
points = []
for table in tables:
x,y = transform_table(table)
if flip and ( (x < 0 and y < 0) or (np.sign(x) != np.sign(y) and -x > y) ):
x,y = -y,-x
points.append( (x,y) )
if prune:
points = set(points)
for x,y in points:
print("{}\t{}".format(x,y))
|
[
"json.load",
"argparse.ArgumentParser",
"numpy.sign"
] |
[((909, 979), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Contingency Table Visualization"""'}), "(description='Contingency Table Visualization')\n", (932, 979), False, 'import argparse\n'), ((1572, 1584), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1581, 1584), False, 'import json\n'), ((1816, 1826), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (1823, 1826), True, 'import numpy as np\n'), ((1830, 1840), 'numpy.sign', 'np.sign', (['y'], {}), '(y)\n', (1837, 1840), True, 'import numpy as np\n')]
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # This is a data-parallelized Neural Network # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
############################################################################################################
########################################### IMPORT PACKAGES ################################################
############################################################################################################
# General
import os
import functools
import time
import numpy as np
import pandas as pd
import random
import math
import warnings
# Parallelization
from mpi4py import MPI
##############################################################################################################
########################################## HELPER FUNCTIONS ##################################################
##############################################################################################################
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_der(x):
return sigmoid(x) *(1-sigmoid (x))
def train_network(wh,wo,epochs,train_X,train_Y):
for epoch in range(epochs):
# slice data
sliced_inputs = np.asarray(np.split(train_X, comm.size))
sliced_labels = np.asarray(np.split(train_Y, comm.size))
size = int(len(train_X)/comm.size)
inputs_buf = np.zeros((size,hidden_layer_size))
labels_buf = np.zeros(len(train_Y),dtype='i')
# send data to each process
comm.Scatter(sliced_inputs, inputs_buf, root=0)
comm.Scatter(sliced_labels, labels_buf, root=0)
### neural network iterations ###
## feedforward ##
# hidden layer
zh = np.dot(train_X, wh)
ah = sigmoid(zh)
# output layer
zo = np.dot(ah, wo)
ao = sigmoid(zo)
# error calculation
error_out = ((1 / (2*len(train_X))) * (np.power((ao - train_Y), 2)))
## backpropogation ##
# backpropogation from output layer to hidden layer
dcost_dao = ao - train_Y
dao_dzo = sigmoid_der(zo)
dzo_dwo = ah
dcost_wo = np.dot(dzo_dwo.T, (dcost_dao * dao_dzo))
# backpropogate from hidden layer to input layer
dcost_dzo = dcost_dao * dao_dzo
dzo_dah = wo
dcost_dah = np.dot(dcost_dzo , dzo_dah.T)
dah_dzh = sigmoid_der(zh)
dzh_dwh = train_X
dcost_wh = np.dot(dzh_dwh.T, dah_dzh * dcost_dah)
comm.Barrier()
# average error for all processes
error_buf = [0] * comm.size
try:
error_buf = comm.gather(error_out)
error_out = sum(error_buf) / len(error_buf)
except TypeError as e:
pass
# if comm.rank == 0:
# print(f'error at iteration {epoch}: {error_out.sum()}')
# gather gradients of weights for hidden layer from all processes
dcost_wh_buf = np.asarray([np.zeros_like(dcost_wh)] * comm.size)
comm.Gather(dcost_wh, dcost_wh_buf)
comm.Barrier()
dcost_wh = functools.reduce(np.add, dcost_wh_buf) / comm.size # average gradients across all processes
# gather gradients of weights for output layer
dcost_wo_buf = np.asarray([np.zeros_like(dcost_wo)] * comm.size)
comm.Gather(dcost_wo, dcost_wo_buf)
comm.Barrier()
dcost_wo = functools.reduce(np.add, dcost_wo_buf) / comm.size # average gradients across all processes
# update weights
wh -= lr * dcost_wh
wo -= lr * dcost_wo
# send updated weights to processes
comm.Bcast([wh, MPI.DOUBLE])
comm.Bcast([wo, MPI.DOUBLE])
return wh,wo
def predict(theta1,theta2, inputs):
a2 = np.dot(inputs, theta1)
a2 = sigmoid(a2)
a3 = np.dot(a2, theta2)
a3 = pd.Series(sigmoid(a3).reshape(-1))
predictions = np.where(a3 >= 0.5,1,-1)
return pd.Series(predictions)
def accuracy_measures(predictions,actual):
df = pd.concat([predictions,actual],axis = 1) # concatenate predicitons & actual labels into single dataframe
df.columns = ['predictions','actual']
df['correct'] = np.where(df.predictions == df.actual,1,0)
# true positives
positives = df.loc[df.actual == 1]
true_positives = positives.correct.sum()
# false negatives
false_negatives = (positives.predictions == -1).sum()
# tru negatives
negatives = df.loc[df.actual == -1]
true_negatives = negatives.correct.sum()
# false Positives
false_positives = (negatives.predictions == -1).sum()
# overall accuracy
accuracy = (true_positives + true_negatives)/(true_positives + true_negatives + false_positives + false_negatives)
# precision
precision = true_positives/(true_positives + false_positives)
# recall (sensitivity)
sensitivity = true_positives/(true_positives+false_negatives)
# specificity
specificity = true_negatives/(true_negatives + false_positives)
return accuracy,precision, sensitivity, specificity
############################################################################################################
######################################## EXECUTION & PERFORMANCE ###########################################
############################################################################################################
if __name__ == '__main__':
#suppress warnings
warnings.filterwarnings('ignore')
####################################################
############ DATA IMPORT & FORMATTING ##############
####################################################
model_df = pd.read_csv('blackjack.csv')
X = np.array(model_df[[i for i in model_df.columns if i not in {'correct_action','outcome'}]])
train_X = np.array(model_df[['player_initial_total', 'has_ace', 'dealer_card','count']])
train_Y = np.array(model_df['correct_action']).reshape(-1,1)
####################################################
############### MPI INITIALIZATION #################
####################################################
# Init MPI
comm = MPI.COMM_WORLD
# structure of the 3-layer neural network
hidden_layer_size = 10
output_layer_size = 1
lr = 1 # learning rate
epochs = 50 # iterations
# randomly initialize weights
if comm.rank == 0:
wh = np.random.rand(train_X.shape[1],hidden_layer_size) # weights for hidden layer
wo = np.random.rand(hidden_layer_size, 1) # weights for output layer
else:
wh = np.random.rand(train_X.shape[1],hidden_layer_size)
wo = np.random.rand(hidden_layer_size, 1)
comm.Barrier()
# communicate weight vectors
comm.Bcast([wh, MPI.DOUBLE])
comm.Bcast([wo, MPI.DOUBLE])
#################################################
############ NEURAL NETWORK TRAINING ############
#################################################
if comm.rank == 0:
start = time.time()
wh,wo = train_network(wh,wo,epochs,train_X,train_Y)
if comm.rank == 0:
end = time.time()
train_time = round(end-start,2)
print(f'\nEND OF TRAINING, took {train_time} seconds\n')
# write training time to file for plotting
out_filename = f'nn_train_{comm.size}.txt'
outfile = open(out_filename, "w")
outfile.write(str(train_time))
################################################
############ PREDICTIONS & RESULTS #############
################################################
# generate predictions
predictions = predict(wh,wo,train_X)
actual = pd.Series(train_Y.reshape(-1))
# compute & display results
accuracy,precision, sensitivity, specificity = accuracy_measures(predictions,actual)
print('PERFORMANCE RESULTS:')
print(f'accuracy: {100*round(accuracy,2)}%')
print(f'precision: {100*round(precision,2)}%')
print(f'sensitivity: {100*round(sensitivity,2)}%')
print(f'specificity: {100*round(specificity,2)}%\n')
|
[
"numpy.zeros_like",
"warnings.filterwarnings",
"pandas.read_csv",
"numpy.power",
"numpy.zeros",
"functools.reduce",
"time.time",
"numpy.split",
"numpy.where",
"numpy.array",
"pandas.Series",
"numpy.exp",
"numpy.random.rand",
"numpy.dot",
"pandas.concat"
] |
[((3562, 3584), 'numpy.dot', 'np.dot', (['inputs', 'theta1'], {}), '(inputs, theta1)\n', (3568, 3584), True, 'import numpy as np\n'), ((3617, 3635), 'numpy.dot', 'np.dot', (['a2', 'theta2'], {}), '(a2, theta2)\n', (3623, 3635), True, 'import numpy as np\n'), ((3702, 3728), 'numpy.where', 'np.where', (['(a3 >= 0.5)', '(1)', '(-1)'], {}), '(a3 >= 0.5, 1, -1)\n', (3710, 3728), True, 'import numpy as np\n'), ((3738, 3760), 'pandas.Series', 'pd.Series', (['predictions'], {}), '(predictions)\n', (3747, 3760), True, 'import pandas as pd\n'), ((3812, 3852), 'pandas.concat', 'pd.concat', (['[predictions, actual]'], {'axis': '(1)'}), '([predictions, actual], axis=1)\n', (3821, 3852), True, 'import pandas as pd\n'), ((3975, 4018), 'numpy.where', 'np.where', (['(df.predictions == df.actual)', '(1)', '(0)'], {}), '(df.predictions == df.actual, 1, 0)\n', (3983, 4018), True, 'import numpy as np\n'), ((5192, 5225), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5215, 5225), False, 'import warnings\n'), ((5400, 5428), 'pandas.read_csv', 'pd.read_csv', (['"""blackjack.csv"""'], {}), "('blackjack.csv')\n", (5411, 5428), True, 'import pandas as pd\n'), ((5435, 5531), 'numpy.array', 'np.array', (["model_df[[i for i in model_df.columns if i not in {'correct_action',\n 'outcome'}]]"], {}), "(model_df[[i for i in model_df.columns if i not in {\n 'correct_action', 'outcome'}]])\n", (5443, 5531), True, 'import numpy as np\n'), ((5538, 5617), 'numpy.array', 'np.array', (["model_df[['player_initial_total', 'has_ace', 'dealer_card', 'count']]"], {}), "(model_df[['player_initial_total', 'has_ace', 'dealer_card', 'count']])\n", (5546, 5617), True, 'import numpy as np\n'), ((1403, 1438), 'numpy.zeros', 'np.zeros', (['(size, hidden_layer_size)'], {}), '((size, hidden_layer_size))\n', (1411, 1438), True, 'import numpy as np\n'), ((1716, 1735), 'numpy.dot', 'np.dot', (['train_X', 'wh'], {}), '(train_X, wh)\n', (1722, 1735), True, 'import numpy as np\n'), ((1786, 1800), 'numpy.dot', 'np.dot', (['ah', 'wo'], {}), '(ah, wo)\n', (1792, 1800), True, 'import numpy as np\n'), ((2102, 2140), 'numpy.dot', 'np.dot', (['dzo_dwo.T', '(dcost_dao * dao_dzo)'], {}), '(dzo_dwo.T, dcost_dao * dao_dzo)\n', (2108, 2140), True, 'import numpy as np\n'), ((2266, 2294), 'numpy.dot', 'np.dot', (['dcost_dzo', 'dzo_dah.T'], {}), '(dcost_dzo, dzo_dah.T)\n', (2272, 2294), True, 'import numpy as np\n'), ((2364, 2402), 'numpy.dot', 'np.dot', (['dzh_dwh.T', '(dah_dzh * dcost_dah)'], {}), '(dzh_dwh.T, dah_dzh * dcost_dah)\n', (2370, 2402), True, 'import numpy as np\n'), ((6088, 6139), 'numpy.random.rand', 'np.random.rand', (['train_X.shape[1]', 'hidden_layer_size'], {}), '(train_X.shape[1], hidden_layer_size)\n', (6102, 6139), True, 'import numpy as np\n'), ((6175, 6211), 'numpy.random.rand', 'np.random.rand', (['hidden_layer_size', '(1)'], {}), '(hidden_layer_size, 1)\n', (6189, 6211), True, 'import numpy as np\n'), ((6256, 6307), 'numpy.random.rand', 'np.random.rand', (['train_X.shape[1]', 'hidden_layer_size'], {}), '(train_X.shape[1], hidden_layer_size)\n', (6270, 6307), True, 'import numpy as np\n'), ((6316, 6352), 'numpy.random.rand', 'np.random.rand', (['hidden_layer_size', '(1)'], {}), '(hidden_layer_size, 1)\n', (6330, 6352), True, 'import numpy as np\n'), ((6656, 6667), 'time.time', 'time.time', ([], {}), '()\n', (6665, 6667), False, 'import time\n'), ((6755, 6766), 'time.time', 'time.time', ([], {}), '()\n', (6764, 6766), False, 'import time\n'), ((1050, 1060), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1056, 1060), True, 'import numpy as np\n'), ((1256, 1284), 'numpy.split', 'np.split', (['train_X', 'comm.size'], {}), '(train_X, comm.size)\n', (1264, 1284), True, 'import numpy as np\n'), ((1317, 1345), 'numpy.split', 'np.split', (['train_Y', 'comm.size'], {}), '(train_Y, comm.size)\n', (1325, 1345), True, 'import numpy as np\n'), ((1890, 1915), 'numpy.power', 'np.power', (['(ao - train_Y)', '(2)'], {}), '(ao - train_Y, 2)\n', (1898, 1915), True, 'import numpy as np\n'), ((2944, 2982), 'functools.reduce', 'functools.reduce', (['np.add', 'dcost_wh_buf'], {}), '(np.add, dcost_wh_buf)\n', (2960, 2982), False, 'import functools\n'), ((3231, 3269), 'functools.reduce', 'functools.reduce', (['np.add', 'dcost_wo_buf'], {}), '(np.add, dcost_wo_buf)\n', (3247, 3269), False, 'import functools\n'), ((5629, 5665), 'numpy.array', 'np.array', (["model_df['correct_action']"], {}), "(model_df['correct_action'])\n", (5637, 5665), True, 'import numpy as np\n'), ((2832, 2855), 'numpy.zeros_like', 'np.zeros_like', (['dcost_wh'], {}), '(dcost_wh)\n', (2845, 2855), True, 'import numpy as np\n'), ((3119, 3142), 'numpy.zeros_like', 'np.zeros_like', (['dcost_wo'], {}), '(dcost_wo)\n', (3132, 3142), True, 'import numpy as np\n')]
|
from cgbind.log import logger
from copy import deepcopy
import numpy as np
from cgbind.constants import Constants
from rdkit.Chem import AllChem
from scipy.optimize import minimize, Bounds
from scipy.spatial import distance_matrix
from cgbind import geom
from cgbind.atoms import get_vdw_radii
from cgbind.geom import rotation_matrix
from cgbind.geom import calc_com
from cgbind.utils import copy_func
def cage_subst_repulsion_func(cage, substrate, cage_coords, subst_coords, with_attraction=True):
"""
Determine the energy using two-body atom-atom repulsion derived from noble
gas dimers where
V_rep(r) = exp(- r/b + a)
where a and b are parameters determined by the atom pairs. Parameters are
suitable to generate V_rep in kcal mol-1
:param cage: (Cage object)
:param substrate: (Substrate object)
:param cage_coords: (list(np.ndarray)) Cage coordinates
:param subst_coords: (list(np.ndarray)) Substrate coordinates
:param with_attraction: (bool) do or don't return the energy with a
constant attractive term based on the number of
substrate atoms in the structure
:return: energy: (float) Potential energy (V_rep) in kcal mol-1
"""
dist_mat = distance_matrix(cage_coords, subst_coords)
# Matrix with the pairwise additions of the vdW radii
sum_vdw_radii = np.add.outer(np.array(cage.vdw_radii),
np.array(substrate.vdw_radii))
# Magic numbers derived from fitting potentials to noble gas dimers and
# plotting against the sum of vdw radii
b_mat = 0.083214 * sum_vdw_radii - 0.003768
a_mat = 11.576415 * (0.175541 * sum_vdw_radii + 0.316642)
exponent_mat = -(dist_mat / b_mat) + a_mat
energy_mat = np.exp(exponent_mat)
energy = np.sum(energy_mat)
# E is negative for favourable binding but this is a purely repulsive
# function so subtract a number.. which is determined from the best
# classifier for 102 binding affinities (see cgbind paper) 0.4 kcal mol-1
if with_attraction:
return energy - 0.4 * substrate.n_atoms
return energy
def cage_subst_repulsion_and_electrostatic_func(cage, substrate, cage_coords, subst_coords):
"""
Determine the energy of adding a substrate to a cage based on V_rep + V_att
where the attractive term is electrostatic and uses the sum of
q_i q_j / r_ij interaction energies where q_i is the partial atomic charge
on atom i.
:param cage: (Cage object)
:param substrate: (Substrate object)
:param cage_coords: (list(np.ndarray)) Cage coordinates
:param subst_coords: (list(np.ndarray)) Substrate coordinates
:return:
"""
# Calculate the distance matrix in Bohr (a0) so the energies are in au
dist_mat = Constants.ang2a0 * distance_matrix(cage_coords, subst_coords)
# Charges are already in units of e
prod_charge_mat = np.outer(cage.charges, substrate.charges)
# Compute the pairwise iteration energies as V = q1 q2 / r in atomic units
energy_mat = prod_charge_mat / dist_mat
electrostatic_energy = Constants.ha2kcalmol * np.sum(energy_mat)
repulsive_energy = cage_subst_repulsion_func(cage, substrate, cage_coords, subst_coords)
return electrostatic_energy + repulsive_energy
def add_substrate_com(cagesubt):
"""
Add a substrate the centre of a cage defined by its centre of mass (com)
will minimise the energy with respect to rotation of the substrate and the
substrate conformer using cagesubt.energy_func. Will rotate cagesubt.n_init_geom
times and use cagesubt.n_subst_confs number of substrate conformers
:param cagesubt: (CageSubstrateComplex object)
:return: xyzs: (list(list))
"""
logger.info(f'Adding substrate to the cage COM and minimising the energy '
f'with {cagesubt.energy_func.__name__}')
# Minimum energy initialisation and the x parameter array (angles to
# rotate about the x, y, z axes)
min_energy, curr_x = 9999999999.9, np.zeros(3)
# Optimum (minimum energy) conformer
best_coords = None
c, s = cagesubt.cage, cagesubt.substrate
cage_coords = get_centered_cage_coords(c)
c.vdw_radii = [get_vdw_radii(atom) for atom in c.atoms]
if cagesubt.n_subst_confs > 1:
try:
s.gen_confs(n_confs=cagesubt.n_subst_confs)
except (ValueError, RuntimeError):
logger.error('Could not generate substrate conformers')
return None
for i, substrate in enumerate(s.conformers):
subst_coords = get_centered_substrate_coords(substrate)
s.vdw_radii = [get_vdw_radii(atom) for atom in s.atoms]
if s.mol_obj is not None:
s.volume = AllChem.ComputeMolVolume(s.mol_obj, confId=i)
for _ in range(cagesubt.n_init_geom):
rot_angles = 2.0 * np.pi * np.random.rand(3) # rand generates in [0, 1] so multiply with
# Minimise the energy with a BFGS minimiser supporting bounds on
# the values (rotation is periodic)
result = minimize(get_energy, x0=np.array(rot_angles),
args=(c, s, cagesubt.energy_func, cage_coords, subst_coords),
method='L-BFGS-B',
bounds=Bounds(lb=0.0, ub=2*np.pi), tol=0.01)
energy = result.fun
logger.info(f'Energy = {energy:.4f}')
if energy < min_energy:
min_energy = energy
best_coords = get_rotated_subst_coords(result.x, subst_coords)
logger.info(f'Min energy = {min_energy:.4f} kcal mol-1')
cagesubt.binding_energy_kcal = min_energy
if best_coords is not None:
s.set_atoms(coords=best_coords)
c.set_atoms(coords=cage_coords)
return c.atoms + s.atoms
else:
return None
def get_centered_cage_coords(cage):
"""Get the cage coordinates that had been translated to the cage centroid"""
cage_coords = cage.get_coords()
centroid = cage.get_centroid()
return np.array([coord - centroid for coord in cage_coords])
def get_centered_substrate_coords(substrate):
"""Get the substrate coordinates that have been translated to its center of mass"""
substrate.centre()
return substrate.get_coords()
def cat_cage_subst_coords(cage, substrate, cage_coords, substrate_coords):
"""
Concatenate some coordinates into a set of xyzs by adding back the atom
labels from the original xyzs
:param cage:
:param substrate:
:param cage_coords:
:param substrate_coords:
:return:
"""
logger.info('Appending substrate coordinates to cage coordinates')
xyzs = [[cage.xyzs[n][0]] + cage_coords[n].tolist() for n in range(len(cage.xyzs))]
cage.substrate_atom_ids = list(range(len(xyzs), len(xyzs) + len(substrate.xyzs)))
xyzs += [[substrate.xyzs[n][0]] + substrate_coords[n].tolist() for n in range(len(substrate.xyzs))]
return xyzs
def get_rotated_subst_coords(x, subst_coords):
"""Get substrate coordinates that have been rotated by x[0] radians in the
x axis etc."""
x_rot, y_rot, z_rot = x
rot_matrix = np.identity(3)
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.i, theta=x_rot))
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.j, theta=y_rot))
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.k, theta=z_rot))
return np.array([np.matmul(rot_matrix, coord) for coord in deepcopy(subst_coords)])
def get_energy(x, cage, substrate, energy_func, cage_coords, subst_coords):
"""
Calculate the energy in kcal mol-1 for a particular x, which contains the
rotations in x, y, z cartesian directions
"""
rot_substrate_coords = get_rotated_subst_coords(x, subst_coords)
energy = energy_func(cage, substrate, cage_coords, rot_substrate_coords)
return energy
cage_subst_repulsion_func.__name__ = 'repulsion'
cage_subst_repulsion_and_electrostatic_func.__name__ = 'electrostatic'
cage_subst_repulsion_and_electrostatic_func_est = copy_func(cage_subst_repulsion_and_electrostatic_func)
cage_subst_repulsion_and_electrostatic_func_est.__name__ = 'electrostatic_fast'
energy_funcs = [cage_subst_repulsion_func,
cage_subst_repulsion_and_electrostatic_func,
cage_subst_repulsion_and_electrostatic_func_est]
|
[
"copy.deepcopy",
"numpy.outer",
"numpy.sum",
"cgbind.log.logger.info",
"cgbind.geom.rotation_matrix",
"rdkit.Chem.AllChem.ComputeMolVolume",
"cgbind.utils.copy_func",
"numpy.identity",
"numpy.zeros",
"scipy.spatial.distance_matrix",
"scipy.optimize.Bounds",
"numpy.array",
"numpy.exp",
"numpy.matmul",
"numpy.random.rand",
"cgbind.atoms.get_vdw_radii",
"cgbind.log.logger.error"
] |
[((8114, 8168), 'cgbind.utils.copy_func', 'copy_func', (['cage_subst_repulsion_and_electrostatic_func'], {}), '(cage_subst_repulsion_and_electrostatic_func)\n', (8123, 8168), False, 'from cgbind.utils import copy_func\n'), ((1265, 1307), 'scipy.spatial.distance_matrix', 'distance_matrix', (['cage_coords', 'subst_coords'], {}), '(cage_coords, subst_coords)\n', (1280, 1307), False, 'from scipy.spatial import distance_matrix\n'), ((1787, 1807), 'numpy.exp', 'np.exp', (['exponent_mat'], {}), '(exponent_mat)\n', (1793, 1807), True, 'import numpy as np\n'), ((1821, 1839), 'numpy.sum', 'np.sum', (['energy_mat'], {}), '(energy_mat)\n', (1827, 1839), True, 'import numpy as np\n'), ((2936, 2977), 'numpy.outer', 'np.outer', (['cage.charges', 'substrate.charges'], {}), '(cage.charges, substrate.charges)\n', (2944, 2977), True, 'import numpy as np\n'), ((3770, 3891), 'cgbind.log.logger.info', 'logger.info', (['f"""Adding substrate to the cage COM and minimising the energy with {cagesubt.energy_func.__name__}"""'], {}), "(\n f'Adding substrate to the cage COM and minimising the energy with {cagesubt.energy_func.__name__}'\n )\n", (3781, 3891), False, 'from cgbind.log import logger\n'), ((5606, 5662), 'cgbind.log.logger.info', 'logger.info', (['f"""Min energy = {min_energy:.4f} kcal mol-1"""'], {}), "(f'Min energy = {min_energy:.4f} kcal mol-1')\n", (5617, 5662), False, 'from cgbind.log import logger\n'), ((6090, 6145), 'numpy.array', 'np.array', (['[(coord - centroid) for coord in cage_coords]'], {}), '([(coord - centroid) for coord in cage_coords])\n', (6098, 6145), True, 'import numpy as np\n'), ((6651, 6717), 'cgbind.log.logger.info', 'logger.info', (['"""Appending substrate coordinates to cage coordinates"""'], {}), "('Appending substrate coordinates to cage coordinates')\n", (6662, 6717), False, 'from cgbind.log import logger\n'), ((7209, 7223), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (7220, 7223), True, 'import numpy as np\n'), ((1400, 1424), 'numpy.array', 'np.array', (['cage.vdw_radii'], {}), '(cage.vdw_radii)\n', (1408, 1424), True, 'import numpy as np\n'), ((1459, 1488), 'numpy.array', 'np.array', (['substrate.vdw_radii'], {}), '(substrate.vdw_radii)\n', (1467, 1488), True, 'import numpy as np\n'), ((2830, 2872), 'scipy.spatial.distance_matrix', 'distance_matrix', (['cage_coords', 'subst_coords'], {}), '(cage_coords, subst_coords)\n', (2845, 2872), False, 'from scipy.spatial import distance_matrix\n'), ((3152, 3170), 'numpy.sum', 'np.sum', (['energy_mat'], {}), '(energy_mat)\n', (3158, 3170), True, 'import numpy as np\n'), ((4052, 4063), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4060, 4063), True, 'import numpy as np\n'), ((4240, 4259), 'cgbind.atoms.get_vdw_radii', 'get_vdw_radii', (['atom'], {}), '(atom)\n', (4253, 4259), False, 'from cgbind.atoms import get_vdw_radii\n'), ((7263, 7304), 'cgbind.geom.rotation_matrix', 'rotation_matrix', ([], {'axis': 'geom.i', 'theta': 'x_rot'}), '(axis=geom.i, theta=x_rot)\n', (7278, 7304), False, 'from cgbind.geom import rotation_matrix\n'), ((7345, 7386), 'cgbind.geom.rotation_matrix', 'rotation_matrix', ([], {'axis': 'geom.j', 'theta': 'y_rot'}), '(axis=geom.j, theta=y_rot)\n', (7360, 7386), False, 'from cgbind.geom import rotation_matrix\n'), ((7427, 7468), 'cgbind.geom.rotation_matrix', 'rotation_matrix', ([], {'axis': 'geom.k', 'theta': 'z_rot'}), '(axis=geom.k, theta=z_rot)\n', (7442, 7468), False, 'from cgbind.geom import rotation_matrix\n'), ((4658, 4677), 'cgbind.atoms.get_vdw_radii', 'get_vdw_radii', (['atom'], {}), '(atom)\n', (4671, 4677), False, 'from cgbind.atoms import get_vdw_radii\n'), ((4756, 4801), 'rdkit.Chem.AllChem.ComputeMolVolume', 'AllChem.ComputeMolVolume', (['s.mol_obj'], {'confId': 'i'}), '(s.mol_obj, confId=i)\n', (4780, 4801), False, 'from rdkit.Chem import AllChem\n'), ((5411, 5448), 'cgbind.log.logger.info', 'logger.info', (['f"""Energy = {energy:.4f}"""'], {}), "(f'Energy = {energy:.4f}')\n", (5422, 5448), False, 'from cgbind.log import logger\n'), ((7492, 7520), 'numpy.matmul', 'np.matmul', (['rot_matrix', 'coord'], {}), '(rot_matrix, coord)\n', (7501, 7520), True, 'import numpy as np\n'), ((4441, 4496), 'cgbind.log.logger.error', 'logger.error', (['"""Could not generate substrate conformers"""'], {}), "('Could not generate substrate conformers')\n", (4453, 4496), False, 'from cgbind.log import logger\n'), ((4888, 4905), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (4902, 4905), True, 'import numpy as np\n'), ((7534, 7556), 'copy.deepcopy', 'deepcopy', (['subst_coords'], {}), '(subst_coords)\n', (7542, 7556), False, 'from copy import deepcopy\n'), ((5128, 5148), 'numpy.array', 'np.array', (['rot_angles'], {}), '(rot_angles)\n', (5136, 5148), True, 'import numpy as np\n'), ((5328, 5356), 'scipy.optimize.Bounds', 'Bounds', ([], {'lb': '(0.0)', 'ub': '(2 * np.pi)'}), '(lb=0.0, ub=2 * np.pi)\n', (5334, 5356), False, 'from scipy.optimize import minimize, Bounds\n')]
|
from rdkit import Chem
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
tqdm.pandas()
GLOBAL_SCALE = ['partial_charge', 'fukui_neu', 'fukui_elec']
ATOM_SCALE = ['NMR']
def check_chemprop_out(df):
invalid = []
for _,r in df.iterrows():
for c in ['partial_charge', 'fukui_neu', 'fukui_elec', 'NMR', 'bond_order', 'bond_length']:
if np.any(pd.isna(r[c])):
invalid.append(r['smiles'])
break
return invalid
def modify_scaled_df(df, scalers):
for index in df.index:
if "H-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(27.7189 - scalers['NMR']["H"].data_min_[0]) / (scalers['NMR']["H"].data_max_[0] - scalers['NMR']["H"].data_min_[0])])
elif "F-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(481.6514 - scalers['NMR']["F"].data_min_[0]) / (scalers['NMR']["F"].data_max_[0] - scalers['NMR']["F"].data_min_[0])])
elif "Cl-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(1150.4265 - scalers['NMR']["Cl"].data_min_[0]) / (scalers['NMR']["Cl"].data_max_[0] - scalers['NMR']["Cl"].data_min_[0])])
elif "Br-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(3126.8978 - scalers['NMR']["Br"].data_min_[0]) / (scalers['NMR']["Br"].data_max_[0] - scalers['NMR']["Br"].data_min_[0])])
return df
def min_max_normalize(df, scalers=None, train_smiles=None):
if train_smiles is not None:
ref_df = df[df.smiles.isin(train_smiles)]
else:
ref_df = df.copy()
if scalers is None:
scalers = get_scaler(ref_df)
for column in GLOBAL_SCALE:
scaler = scalers[column]
df[column] = df[column].apply(lambda x: scaler.transform(x.reshape(-1, 1)).reshape(-1))
def min_max_by_atom(atoms, data, scaler):
data = [scaler[a].transform(np.array([[d]]))[0][0] for a, d in zip(atoms, data)]
return np.array(data)
if ATOM_SCALE:
print('postprocessing atom-wise scaling')
df['atoms'] = df.smiles.apply(lambda x: get_atoms(x))
for column in ATOM_SCALE:
df[column] = df.progress_apply(lambda x: min_max_by_atom(x['atoms'], x[column], scalers[column]), axis=1)
df['bond_order_matrix'] = df.apply(lambda x: bond_to_matrix(x['smiles'], x['bond_order']), axis=1)
df['distance_matrix'] = df.apply(lambda x: bond_to_matrix(x['smiles'], x['bond_length']), axis=1)
df = modify_scaled_df(df, scalers)
df = df[['smiles', 'partial_charge', 'fukui_neu', 'fukui_elec', 'NMR', 'bond_order_matrix', 'distance_matrix']]
df = df.set_index('smiles')
return df, scalers
def get_scaler(df):
scalers = {}
for column in GLOBAL_SCALE:
scaler = MinMaxScaler()
data = np.concatenate(df[column].tolist()).reshape(-1, 1)
scaler.fit(data)
scalers[column] = scaler
if ATOM_SCALE:
atoms = df.smiles.apply(lambda x: get_atoms(x))
atoms = np.concatenate(atoms.tolist())
for column in ATOM_SCALE:
data = np.concatenate(df[column].tolist())
data = pd.DataFrame({'atoms': atoms, 'data': data})
data = data.groupby('atoms').agg({'data': lambda x: list(x)})['data'].apply(lambda x: np.array(x)).to_dict()
scalers[column] = {}
for k, d in data.items():
scaler = MinMaxScaler()
scalers[column][k] = scaler.fit(d.reshape(-1, 1))
return scalers
def bond_to_matrix(smiles, bond_vector):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
bond_matrix = np.zeros([len(m.GetAtoms()), len(m.GetAtoms())])
for i, bp in enumerate(bond_vector):
b = m.GetBondWithIdx(i)
bond_matrix[b.GetBeginAtomIdx(), b.GetEndAtomIdx()] = bond_matrix[b.GetEndAtomIdx(), b.GetBeginAtomIdx()] = bp
return bond_matrix
def get_atoms(smiles):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
atoms = [x.GetSymbol() for x in m.GetAtoms()]
return atoms
def minmax_by_element(r, minmax, target):
target = r[target]
elements = r['atoms']
for i, a in enumerate(elements):
target[i] = (target[i] - minmax[a][0]) / (minmax[a][1] - minmax[a][0] + np.finfo(float).eps)
return target
|
[
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler",
"tqdm.tqdm.pandas",
"numpy.finfo",
"numpy.array",
"rdkit.Chem.AddHs",
"pandas.isna",
"rdkit.Chem.MolFromSmiles"
] |
[((179, 192), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (190, 192), False, 'from tqdm import tqdm\n'), ((4407, 4433), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (4425, 4433), False, 'from rdkit import Chem\n'), ((4443, 4456), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['m'], {}), '(m)\n', (4453, 4456), False, 'from rdkit import Chem\n'), ((4774, 4800), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (4792, 4800), False, 'from rdkit import Chem\n'), ((4810, 4823), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['m'], {}), '(m)\n', (4820, 4823), False, 'from rdkit import Chem\n'), ((2815, 2829), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2823, 2829), True, 'import numpy as np\n'), ((3620, 3634), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3632, 3634), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((730, 876), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (738, 876), True, 'import numpy as np\n'), ((907, 1040), 'numpy.array', 'np.array', (["[(27.7189 - scalers['NMR']['H'].data_min_[0]) / (scalers['NMR']['H'].\n data_max_[0] - scalers['NMR']['H'].data_min_[0])]"], {}), "([(27.7189 - scalers['NMR']['H'].data_min_[0]) / (scalers['NMR'][\n 'H'].data_max_[0] - scalers['NMR']['H'].data_min_[0])])\n", (915, 1040), True, 'import numpy as np\n'), ((3992, 4036), 'pandas.DataFrame', 'pd.DataFrame', (["{'atoms': atoms, 'data': data}"], {}), "({'atoms': atoms, 'data': data})\n", (4004, 4036), True, 'import pandas as pd\n'), ((475, 488), 'pandas.isna', 'pd.isna', (['r[c]'], {}), '(r[c])\n', (482, 488), True, 'import pandas as pd\n'), ((1128, 1274), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (1136, 1274), True, 'import numpy as np\n'), ((1305, 1439), 'numpy.array', 'np.array', (["[(481.6514 - scalers['NMR']['F'].data_min_[0]) / (scalers['NMR']['F'].\n data_max_[0] - scalers['NMR']['F'].data_min_[0])]"], {}), "([(481.6514 - scalers['NMR']['F'].data_min_[0]) / (scalers['NMR'][\n 'F'].data_max_[0] - scalers['NMR']['F'].data_min_[0])])\n", (1313, 1439), True, 'import numpy as np\n'), ((4255, 4269), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4267, 4269), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1528, 1674), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (1536, 1674), True, 'import numpy as np\n'), ((1705, 1843), 'numpy.array', 'np.array', (["[(1150.4265 - scalers['NMR']['Cl'].data_min_[0]) / (scalers['NMR']['Cl'].\n data_max_[0] - scalers['NMR']['Cl'].data_min_[0])]"], {}), "([(1150.4265 - scalers['NMR']['Cl'].data_min_[0]) / (scalers['NMR']\n ['Cl'].data_max_[0] - scalers['NMR']['Cl'].data_min_[0])])\n", (1713, 1843), True, 'import numpy as np\n'), ((5103, 5118), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5111, 5118), True, 'import numpy as np\n'), ((1932, 2078), 'numpy.array', 'np.array', (["[(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge']\n .data_max_[0] - scalers['partial_charge'].data_min_[0])]"], {}), "([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers[\n 'partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])\n", (1940, 2078), True, 'import numpy as np\n'), ((2109, 2247), 'numpy.array', 'np.array', (["[(3126.8978 - scalers['NMR']['Br'].data_min_[0]) / (scalers['NMR']['Br'].\n data_max_[0] - scalers['NMR']['Br'].data_min_[0])]"], {}), "([(3126.8978 - scalers['NMR']['Br'].data_min_[0]) / (scalers['NMR']\n ['Br'].data_max_[0] - scalers['NMR']['Br'].data_min_[0])])\n", (2117, 2247), True, 'import numpy as np\n'), ((2747, 2762), 'numpy.array', 'np.array', (['[[d]]'], {}), '([[d]])\n', (2755, 2762), True, 'import numpy as np\n'), ((4135, 4146), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4143, 4146), True, 'import numpy as np\n')]
|
import pickle
import numpy as np
import pandas as pd
from numpy import linalg as LA
from scipy import stats
import sys
def compute_rmse(target, prediction):
"""Compute rmse between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: rmse between the ground truth and forecasts
"""
return np.sqrt(mean_squared_error(target, prediction))
def compute_cosine(target, prediction):
"""Compute cosine simialrity between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: cosine simialrity between the ground truth and forecasts
"""
result = np.dot(target, prediction) / (LA.norm(target) * LA.norm(prediction))
return result
def r_squared(y_true, y_pred, y_mean=None):
"""Compute relative R^2 between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: relative R^2 between the ground truth and forecasts
"""
if y_mean is None:
y_mean = np.zeros(y_true.shape[0]) * np.mean(y_true)
rss = np.sum((y_true - y_pred)**2)
tss = np.sum((y_true - y_mean)**2)
rsq = 1 - rss / tss
return rsq
def print_eval_stats(eval_result):
"""Print the mean(se), median(se), 0.25 quantile(se), and 0.75 quantile (se) of the array, where se represents standard deviation
Args:
eval_result: a numpy array with evluation results
"""
print('mean: {:.4f} ({:.4f}) median {:.4f} ({:.4f})'.format(np.mean(eval_result),
stats.sem(eval_result),
np.median(eval_result),
quantile_se(eval_result, p=50)))
print('0.25 quantile: {:.4f} ({:.4f}) 0.75 quantile: {:.4f} ({:.4f})'.format(np.quantile(eval_result, 0.25),
quantile_se(eval_result, p=25),
np.quantile(eval_result, 0.75),
quantile_se(eval_result, p=75)))
def quantile_se(x, p=50):
# compute the standard error for different quantiles
# Source: <NAME>, "Mathematical Statistics". Springer Texts in Statistics, 1999. Page 306: Theorem 5.10
# p: quantile: int between 0-100
# x: data sequence
n = len(x) # number of samples
q = np.percentile(x, p)
density = stats.gaussian_kde(x) # density estimate of x
Fp = density(q).item()
p = p / 100.
sF = np.sqrt(p * (1 - p)) / Fp
se = sF / np.sqrt(n)
return se
def eval_forecast(model_name, rootpath, test_years, month_range, rep=False, num_rep=10):
"""Evalute the forecasts on training and test sets
Args:
model_name: a string indicating the name of a model
rootpath: the path where the forecasts are saved
test_years: a list of years in the test set
month_range: a list of months in the test set
rep: True or False, indicating if the reults include repeated runs
num_rep: the number of repetition
Returns:
result_train: the forecasting performance (temporal/spatial cosine/r2) on training set
result_test: the forecasting performance (temporal/spatial cosine/r2) on test set
"""
target_train = []
target_test = []
prediction_train = []
prediction_test = []
for year in test_years:
if year == 2020:
month_range = range(1, 7)
elif year == 2017:
month_range = range(7, 13)
else:
month_range = range(1, 13)
for month_id in month_range:
result_temp = load_results(rootpath + 'forecast_results/results_{}_{}_{}.pkl'.format(model_name, year, month_id))
target_train.append(result_temp['target_train'])
target_test.append(result_temp['target_test'])
if rep is True:
prediction_train_temp = np.zeros(result_temp['target_train'].shape)
prediction_test_temp = np.zeros(result_temp['target_test'].shape)
for i in range(num_rep):
prediction_train_temp += result_temp['prediction_train'][i]
prediction_test_temp += result_temp['prediction_test'][i]
prediction_train.append(prediction_train_temp / float(num_rep))
prediction_test.append(prediction_test_temp / float(num_rep))
else:
prediction_train.append(result_temp['prediction_train'])
prediction_test.append(result_temp['prediction_test'])
# test set evaluation
prediction_test = np.concatenate(prediction_test, axis=0)
target_test = np.concatenate(target_test, axis=0)
temporal_cos = np.zeros(prediction_test.shape[0])
spatial_cos = np.zeros(prediction_test.shape[1])
temporal_r2 = np.zeros(prediction_test.shape[0])
spatial_r2 = np.zeros(prediction_test.shape[1])
for i in range(prediction_test.shape[0]):
temporal_cos[i] = compute_cosine(target_test[i, :], prediction_test[i, :])
temporal_r2[i] = r_squared(target_test[i, :], prediction_test[i, :])
for i in range(prediction_test.shape[1]):
spatial_cos[i] = compute_cosine(target_test[:, i], prediction_test[:, i])
spatial_r2[i] = r_squared(target_test[:, i], prediction_test[:, i])
result_test = {}
result_test['temporal_cos'] = temporal_cos
result_test['spatial_cos'] = spatial_cos
result_test['temporal_r2'] = temporal_r2
result_test['spatial_r2'] = spatial_r2
# training set evaluation
prediction_train = np.concatenate(prediction_train, axis=0)
target_train = np.concatenate(target_train, axis=0)
temporal_cos_train = np.zeros(prediction_train.shape[0])
spatial_cos_train = np.zeros(prediction_train.shape[1])
temporal_r2_train = np.zeros(prediction_train.shape[0])
spatial_r2_train = np.zeros(prediction_train.shape[1])
for i in range(prediction_train.shape[0]):
temporal_cos_train[i] = compute_cosine(target_train[i, :], prediction_train[i, :])
temporal_r2_train[i] = r_squared(target_train[i, :], prediction_train[i, :])
for i in range(prediction_train.shape[1]):
spatial_cos_train[i] = compute_cosine(target_train[:, i], prediction_train[:, i])
spatial_r2_train[i] = r_squared(target_train[:, i], prediction_train[:, i])
result_train = {}
result_train['temporal_cos'] = temporal_cos_train
result_train['spatial_cos'] = spatial_cos_train
result_train['temporal_r2'] = temporal_r2_train
result_train['spatial_r2'] = spatial_r2_train
return result_train, result_test
|
[
"numpy.quantile",
"numpy.sum",
"numpy.median",
"numpy.zeros",
"scipy.stats.gaussian_kde",
"numpy.percentile",
"numpy.mean",
"numpy.linalg.norm",
"scipy.stats.sem",
"numpy.dot",
"numpy.concatenate",
"numpy.sqrt"
] |
[((1232, 1262), 'numpy.sum', 'np.sum', (['((y_true - y_pred) ** 2)'], {}), '((y_true - y_pred) ** 2)\n', (1238, 1262), True, 'import numpy as np\n'), ((1271, 1301), 'numpy.sum', 'np.sum', (['((y_true - y_mean) ** 2)'], {}), '((y_true - y_mean) ** 2)\n', (1277, 1301), True, 'import numpy as np\n'), ((2691, 2710), 'numpy.percentile', 'np.percentile', (['x', 'p'], {}), '(x, p)\n', (2704, 2710), True, 'import numpy as np\n'), ((2725, 2746), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['x'], {}), '(x)\n', (2743, 2746), False, 'from scipy import stats\n'), ((4908, 4947), 'numpy.concatenate', 'np.concatenate', (['prediction_test'], {'axis': '(0)'}), '(prediction_test, axis=0)\n', (4922, 4947), True, 'import numpy as np\n'), ((4966, 5001), 'numpy.concatenate', 'np.concatenate', (['target_test'], {'axis': '(0)'}), '(target_test, axis=0)\n', (4980, 5001), True, 'import numpy as np\n'), ((5021, 5055), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[0]'], {}), '(prediction_test.shape[0])\n', (5029, 5055), True, 'import numpy as np\n'), ((5074, 5108), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[1]'], {}), '(prediction_test.shape[1])\n', (5082, 5108), True, 'import numpy as np\n'), ((5127, 5161), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[0]'], {}), '(prediction_test.shape[0])\n', (5135, 5161), True, 'import numpy as np\n'), ((5179, 5213), 'numpy.zeros', 'np.zeros', (['prediction_test.shape[1]'], {}), '(prediction_test.shape[1])\n', (5187, 5213), True, 'import numpy as np\n'), ((5878, 5918), 'numpy.concatenate', 'np.concatenate', (['prediction_train'], {'axis': '(0)'}), '(prediction_train, axis=0)\n', (5892, 5918), True, 'import numpy as np\n'), ((5938, 5974), 'numpy.concatenate', 'np.concatenate', (['target_train'], {'axis': '(0)'}), '(target_train, axis=0)\n', (5952, 5974), True, 'import numpy as np\n'), ((6000, 6035), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[0]'], {}), '(prediction_train.shape[0])\n', (6008, 6035), True, 'import numpy as np\n'), ((6060, 6095), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[1]'], {}), '(prediction_train.shape[1])\n', (6068, 6095), True, 'import numpy as np\n'), ((6120, 6155), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[0]'], {}), '(prediction_train.shape[0])\n', (6128, 6155), True, 'import numpy as np\n'), ((6179, 6214), 'numpy.zeros', 'np.zeros', (['prediction_train.shape[1]'], {}), '(prediction_train.shape[1])\n', (6187, 6214), True, 'import numpy as np\n'), ((759, 785), 'numpy.dot', 'np.dot', (['target', 'prediction'], {}), '(target, prediction)\n', (765, 785), True, 'import numpy as np\n'), ((2825, 2845), 'numpy.sqrt', 'np.sqrt', (['(p * (1 - p))'], {}), '(p * (1 - p))\n', (2832, 2845), True, 'import numpy as np\n'), ((2865, 2875), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2872, 2875), True, 'import numpy as np\n'), ((789, 804), 'numpy.linalg.norm', 'LA.norm', (['target'], {}), '(target)\n', (796, 804), True, 'from numpy import linalg as LA\n'), ((807, 826), 'numpy.linalg.norm', 'LA.norm', (['prediction'], {}), '(prediction)\n', (814, 826), True, 'from numpy import linalg as LA\n'), ((1178, 1203), 'numpy.zeros', 'np.zeros', (['y_true.shape[0]'], {}), '(y_true.shape[0])\n', (1186, 1203), True, 'import numpy as np\n'), ((1206, 1221), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (1213, 1221), True, 'import numpy as np\n'), ((1646, 1666), 'numpy.mean', 'np.mean', (['eval_result'], {}), '(eval_result)\n', (1653, 1666), True, 'import numpy as np\n'), ((1732, 1754), 'scipy.stats.sem', 'stats.sem', (['eval_result'], {}), '(eval_result)\n', (1741, 1754), False, 'from scipy import stats\n'), ((1820, 1842), 'numpy.median', 'np.median', (['eval_result'], {}), '(eval_result)\n', (1829, 1842), True, 'import numpy as np\n'), ((2022, 2052), 'numpy.quantile', 'np.quantile', (['eval_result', '(0.25)'], {}), '(eval_result, 0.25)\n', (2033, 2052), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.quantile', 'np.quantile', (['eval_result', '(0.75)'], {}), '(eval_result, 0.75)\n', (2259, 2278), True, 'import numpy as np\n'), ((4215, 4258), 'numpy.zeros', 'np.zeros', (["result_temp['target_train'].shape"], {}), "(result_temp['target_train'].shape)\n", (4223, 4258), True, 'import numpy as np\n'), ((4298, 4340), 'numpy.zeros', 'np.zeros', (["result_temp['target_test'].shape"], {}), "(result_temp['target_test'].shape)\n", (4306, 4340), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 11:19:57 2018
@author: rwilson
"""
import pygmsh
import meshio
import numpy as np
import pickle
class utilities():
'''A collection of functions for interacting with the mesh object
'''
def meshOjfromDisk(meshObjectPath='cly.Mesh'):
'''Read the entire mesh object from disk.
Parameters
----------
meshObjectPath : str (default='cly.Mesh')
'''
with open(meshObjectPath, 'rb') as clyMesh_file:
return pickle.load(clyMesh_file)
class mesher():
'''Mesh generator class using pygmsh to gmsh code.
Parameters
----------
mesh_param : dict
Expected parameters defining the mesh, char_len, height, radius
cell_data : dict
Contains line, tetra, triangle, vertex 'gmsh:physical' and
'gmsh:geometrical'.
cells : dict
Contains line, tetra, triangle, vertex of the point indicies as defined in
``points``.
points : array(float)
Matrix of xyz coords for each point in the mesh domain
Notes
-----
Understanding the mesh structure
Points are a list of each point or verticies in x,y,z positions.
cell_data['tetra']['gmsh:physical'] : the physical values of each tetra
* cell['tetra'] : list of lists of each tetrahedral verticies index referencing to
the coords inside the points. [points[i1],
points[i2],
points[i3],
points[i4]]
'''
def __init__(self, mesh_param):
self.mesh_param = mesh_param
self.cell_data = None
self.points = None
self.cells = None
self.cell_cent = None
def meshIt(self):
'''Produces the mesh.
'''
self._cylinderMesh()
self._cellCent()
def _cylinderMesh(self):
''' Produce a cylindrical mesh
'''
# The geometry object
geom = pygmsh.opencascade.Geometry()
# Positions
btm_face = [0.0, 0.0, 0.0]
axis = [0.0, 0.0, self.mesh_param['height']]
# create the cylinder with open cascade
geom.add_cylinder(btm_face, axis, self.mesh_param['radius'],
char_length=self.mesh_param['char_len']
)
# Make the mesh
self.points, self.cells, _, self.cell_data, _ = pygmsh.generate_mesh(geom)
def _cellCent(self):
''' Calculate the centre of each tetra.
'''
# The verticies in cart coords
tetra_verts = [ np.array([self.points[vert[0]], self.points[vert[1]],
self.points[vert[2]], self.points[vert[3]]])
for vert in self.cells['tetra']]
# The centre of tetra in cart coords
self.cell_cent = [np.array([vert[:,0].sum()/4, vert[:,1].sum()/4, vert[:,2].sum()/4])
for vert in tetra_verts]
def saveMesh(self, name):
'''Save the mesh to file.
Parameters
----------
name : str
Name of the mesh file saved to the current directory.
'''
mesh = meshio.Mesh(self.points, self.cells, cell_data=self.cell_data)
meshio.write('%s.vtu' % name, mesh)
# meshio.write('%s.vtu' % name, self.points, self.cells, cell_data=self.cell_data)
# meshio.write('%s.msh4' % name, self.points, self.cells, cell_data=self.cell_data)
# meshio.gmsh_io.write('%s.msh' % name, self.points, self.cells, cell_data=self.cell_data)
def setCellsVal(self, cell_values):
'''Set each cell physical value.
Parameters
----------
cell_values : array/list
physical values of each tetra cell within the mesh domain in order
corresponding to ``points``.
'''
self.cell_data['tetra']['gmsh:physical'] = cell_values
def meshOjtoDisk(self):
'''Save the entire mesh object to disk
'''
with open('cly.Mesh', 'wb') as clyMesh_file:
pickle.dump(self, clyMesh_file)
def meshOjfromDisk(self):
'''Save the entire mesh object to disk
TODO
----
Should likely depreciate this function and simply use that stored in the utility class
'''
with open('cly.Mesh', 'rb') as clyMesh_file:
return pickle.load(clyMesh_file)
|
[
"pickle.dump",
"pygmsh.generate_mesh",
"meshio.write",
"pickle.load",
"numpy.array",
"pygmsh.opencascade.Geometry",
"meshio.Mesh"
] |
[((2184, 2213), 'pygmsh.opencascade.Geometry', 'pygmsh.opencascade.Geometry', ([], {}), '()\n', (2211, 2213), False, 'import pygmsh\n'), ((2589, 2615), 'pygmsh.generate_mesh', 'pygmsh.generate_mesh', (['geom'], {}), '(geom)\n', (2609, 2615), False, 'import pygmsh\n'), ((3385, 3447), 'meshio.Mesh', 'meshio.Mesh', (['self.points', 'self.cells'], {'cell_data': 'self.cell_data'}), '(self.points, self.cells, cell_data=self.cell_data)\n', (3396, 3447), False, 'import meshio\n'), ((3457, 3492), 'meshio.write', 'meshio.write', (["('%s.vtu' % name)", 'mesh'], {}), "('%s.vtu' % name, mesh)\n", (3469, 3492), False, 'import meshio\n'), ((549, 574), 'pickle.load', 'pickle.load', (['clyMesh_file'], {}), '(clyMesh_file)\n', (560, 574), False, 'import pickle\n'), ((2768, 2870), 'numpy.array', 'np.array', (['[self.points[vert[0]], self.points[vert[1]], self.points[vert[2]], self.\n points[vert[3]]]'], {}), '([self.points[vert[0]], self.points[vert[1]], self.points[vert[2]],\n self.points[vert[3]]])\n', (2776, 2870), True, 'import numpy as np\n'), ((4276, 4307), 'pickle.dump', 'pickle.dump', (['self', 'clyMesh_file'], {}), '(self, clyMesh_file)\n', (4287, 4307), False, 'import pickle\n'), ((4595, 4620), 'pickle.load', 'pickle.load', (['clyMesh_file'], {}), '(clyMesh_file)\n', (4606, 4620), False, 'import pickle\n')]
|
import itertools
from typing import List, DefaultDict, Tuple
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# from sklearn.metrics import recall_score, accuracy_score, confusion_matrix
from sklearn.model_selection import KFold
from .categorical_encoders import LeaveOneOutEncoder
class LOOGridSearchCV:
"""
Specially prepared class to do grid search with cross-validation on our loo encoded
DataFrame.
Scores should be approximately ok, although i have no proof for that :)
"""
def __init__(self,
train_df: pd.DataFrame,
model,
params_grid: DefaultDict,
columns_to_encode: List,
columns_to_drop_from_training: List,
Xs_train: List[pd.DataFrame] = None,
ys_train: List[pd.DataFrame] = None,
Xs_val: List[pd.DataFrame] = None,
ys_val: List[pd.DataFrame] = None,
ohe_emails: bool = True,
mean: int = 1,
std: int = 0.05,
n_folds: int = 5,
encoded_df: pd.DataFrame = pd.DataFrame(),
) -> None:
"""
:param train_df: train_df (will be splitted then to train/and_val n_folds times)
:param model: model to train
:param params_grid: param_grid to search
:param columns_to_encode: categorical columns, which you want to encode using loo
:param columns_to_drop_from_training: columns to drop from training phase
:param ohe_emails: if set to True, performs OHE on emails column
:param Xs_train:
:param mean: mean to regularization part of the encoding
:param std: std to regularization part of the encoding
:param n_folds: n_folds to validate
:param encoded_df: if task was done before, just pass here already encoded_df
"
"""
self.processed_train_df = (train_df.copy(deep=True)
.reset_index()
.drop(columns='name'))
self.model = model
self.params_grid = params_grid
self.columns_to_encode = columns_to_encode
self.columns_to_drop_from_training = columns_to_drop_from_training
self.ohe_emails = ohe_emails
self.mean = mean
self.std = std
self.n_folds = n_folds
if not Xs_train:
self.Xs_train, self.ys_train, self.Xs_val, self.ys_val = ([] for i in range(4))
else:
self.Xs_train = Xs_train
self.ys_train = ys_train
self.Xs_val = Xs_val
self.ys_val = ys_val
self.encoded_df_ = encoded_df
# self.best_accuracy_estimator = None
# self.best_recall_estimator = None
self.best_roc_auc_estimator = None
def _ohe_emails(self) -> pd.DataFrame:
"""
internal method for one hot encoding emails column
"""
email_ohe_names = {0: '0_emails',
1: '1_email',
2: '2_emails',
3: '3_emails',
4: '4_emails',
5: '5_emails'}
self.processed_train_df = (pd.concat([self.processed_train_df, pd.get_dummies(
self.processed_train_df['emails'])], axis=1)
.rename(columns=email_ohe_names))
self.columns_to_drop_from_training.append('emails')
return self.processed_train_df
def _prepare_train_val_dfs(self):
"""
Internal method
bunch of code to prepare train and validation dataframes for given n_folds, to make
grid search and cross validation processes much faster: for each n_folds you will need
to compute encoded_df only once, same for validation and train DataFrames
"""
if self.ohe_emails:
X = self._ohe_emails()
else:
X = self.processed_train_df
if 'emails' in X.columns:
X['emails'] = X['emails'].astype(int)
y = self.processed_train_df[['target']]
X.drop(columns=self.columns_to_drop_from_training, inplace=True)
"""
to have each sample exactly once in validation set
"""
kf = KFold(n_splits=self.n_folds, shuffle=False, random_state=None)
splits = kf.split(X)
dfs_to_mean = []
for train_index, val_index in splits:
X_train, y_train = X.iloc[train_index], y.iloc[train_index]
X_val, y_val = X.iloc[val_index], y.iloc[val_index]
X_val.drop(columns=['target'], inplace=True)
enc = LeaveOneOutEncoder(train_df=X_train,
test_df=X_val,
columns_to_encode=self.columns_to_encode,
target_column='target',
random_state=42,
mean=self.mean,
std=self.std)
X_train, X_val = enc.fit()
encoded_cols = [col for col in X_train.columns if 'encoded_' in col]
dfs_to_mean.append(X_train[encoded_cols])
train_to_drop = self.columns_to_encode.copy()
train_to_drop.extend(['target'])
X_train.drop(columns=train_to_drop, inplace=True)
test_to_drop = self.columns_to_encode.copy()
X_val.drop(columns=test_to_drop, inplace=True)
self.Xs_train.append(X_train)
self.ys_train.append(y_train)
self.Xs_val.append(X_val)
self.ys_val.append(y_val)
"""
we are computing here the mean of the folds with excluding the 'i am now validation not the
training set' part, as I see it as the most proper thing to do, to use cross-validation
approach
"""
for df in dfs_to_mean:
zeros = [0 for col in df.columns]
for index in range(len(self.processed_train_df)):
if index not in df.index:
df.loc[index, :] = zeros
df.sort_index(inplace=True)
mean_df = dfs_to_mean[0].copy(deep=True)
mean_df = mean_df * 0
for num in range(self.n_folds):
mean_df = mean_df + dfs_to_mean[num]
self.encoded_df_ = mean_df.divide(self.n_folds - 1)
def best_roc_auc_estimator_(self, best_roc_auc_estimator):
self.best_roc_auc_estimator = best_roc_auc_estimator
"""
def best_accuracy_estimator_(self, best_accuracy_estimator):
self.best_accuracy_estimator = best_accuracy_estimator
def best_recall_estimator_(self, best_recall_estimator):
self.best_recall_estimator = best_recall_estimator
"""
def grid_search(self) -> Tuple[List, List, List, List]:
"""
performs GridSearchCV
:return: list with each of the models: accuracies, parameters, recalls and confusion
matrices
"""
if self.encoded_df_.empty:
self._prepare_train_val_dfs()
models_roc_auc_scores = []
# models_accuracies, models_recalls, models_parameters, models_cms = ([] for i in range(4))
for p in itertools.product(*self.params_grid.values()):
model_params = self.params_grid.copy()
for counter, key in enumerate(model_params.keys()):
model_params[key] = p[counter]
# models_parameters.append(model_params.items())
clf = clone(self.model)
clf = clf.set_params(**model_params)
cv_roc_auc_scores = []
# cv_accuracies, cv_recalls, cv_cms = ([] for i in range(3))
"""
fitting and predicting for all folds, then scoring them by:
accuracy, recall and confusion matrix
"""
for index in range(self.n_folds):
clf.fit(self.Xs_train[index], self.ys_train[index])
predictions = clf.predict(self.Xs_val[index])
cv_roc_auc_scores.append(roc_auc_score(self.ys_val[index], predictions))
# cv_accuracies.append(accuracy_score(self.ys_val[index], predictions))
# cv_recalls.append(recall_score(self.ys_val[index], predictions))
# cv_cms.append(confusion_matrix(self.ys_val[index], predictions))
"""
final evaluation of scores (means of all folds scores
for confusion matrix we can get not integer values, please treat this more informative
than strict - but anyway, as a source of information which model should we choose
"""
models_roc_auc_scores.append(np.mean(cv_roc_auc_scores))
# models_accuracies.append(np.mean(cv_accuracies))
# models_recalls.append(np.mean(cv_recalls))
# models_cms.append(np.mean(cv_cms, axis=0))
# if max(models_accuracies) == np.mean(cv_accuracies):
# self.best_accuracy_estimator_(clf)
# if max(models_recalls) == np.mean(cv_recalls):
# self.best_recall_estimator_(clf)
if max(models_roc_auc_scores) == np.mean(cv_roc_auc_scores):
self.best_roc_auc_estimator_(clf)
return models_roc_auc_scores
# return models_accuracies, models_parameters, models_recalls, models_cms
def processed_train(self):
"""
:return: processed train DataFrame with added encoded columns
"""
train = self.processed_train_df.copy(deep=True)
encoded = self.encoded_df_.copy(deep=True)
train = train.drop(columns=self.columns_to_encode+['target'])
processed_train = pd.concat([train, encoded], axis=1)
return processed_train
|
[
"pandas.DataFrame",
"pandas.get_dummies",
"sklearn.model_selection.KFold",
"sklearn.metrics.roc_auc_score",
"numpy.mean",
"pandas.concat",
"sklearn.base.clone"
] |
[((1195, 1209), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1207, 1209), True, 'import pandas as pd\n'), ((4373, 4435), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.n_folds', 'shuffle': '(False)', 'random_state': 'None'}), '(n_splits=self.n_folds, shuffle=False, random_state=None)\n', (4378, 4435), False, 'from sklearn.model_selection import KFold\n'), ((9819, 9854), 'pandas.concat', 'pd.concat', (['[train, encoded]'], {'axis': '(1)'}), '([train, encoded], axis=1)\n', (9828, 9854), True, 'import pandas as pd\n'), ((7623, 7640), 'sklearn.base.clone', 'clone', (['self.model'], {}), '(self.model)\n', (7628, 7640), False, 'from sklearn.base import clone\n'), ((8807, 8833), 'numpy.mean', 'np.mean', (['cv_roc_auc_scores'], {}), '(cv_roc_auc_scores)\n', (8814, 8833), True, 'import numpy as np\n'), ((9291, 9317), 'numpy.mean', 'np.mean', (['cv_roc_auc_scores'], {}), '(cv_roc_auc_scores)\n', (9298, 9317), True, 'import numpy as np\n'), ((8172, 8218), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['self.ys_val[index]', 'predictions'], {}), '(self.ys_val[index], predictions)\n', (8185, 8218), False, 'from sklearn.metrics import roc_auc_score\n'), ((3345, 3394), 'pandas.get_dummies', 'pd.get_dummies', (["self.processed_train_df['emails']"], {}), "(self.processed_train_df['emails'])\n", (3359, 3394), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 8 22:09:19 2017
@author: LinZhang
"""
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)
return dataset, labels
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# def initial variables:
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# def basic operation in cnn:
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
batch_size = 16
#patch_size = 5 # not really used, finetune your network for fun!
#depth = 16 # not really used, finetune your network for fun!
num_hidden = 1024
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# a small network with two convolutional layers, followed by one fully connected layer
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
keep_prob = tf.placeholder(tf.float32)
# Variables.
layer1_weights = weight_variable([3, 3, num_channels, 8]) # conv kernel
layer1_biases = bias_variable([8])
layer2_weights = weight_variable([3, 3, 8, 16]) # conv kernel
layer2_biases = bias_variable([16])
layer3_weights = weight_variable([image_size // 4 * image_size // 4 * 16, num_hidden])
layer3_biases = bias_variable([num_hidden])
layer4_weights = weight_variable([num_hidden, num_labels])
layer4_biases = bias_variable([num_labels])
# Model.
def model(data,use_dropout = False):
# convolution layer 1
conv1 = conv2d(data, layer1_weights)
hidden1 = tf.nn.relu(conv1 + layer1_biases)
hidden1_pool = max_pool_2x2(hidden1)
# convolution layer 2
conv2 = conv2d(hidden1_pool, layer2_weights)
hidden2 = tf.nn.relu(conv2 + layer2_biases)
hidden2_pool = max_pool_2x2(hidden2)
# full connection layer
shape = hidden2_pool.get_shape().as_list()
reshape = tf.reshape(hidden2_pool, [shape[0], shape[1] * shape[2] * shape[3]])
hidden3 = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
# dropout
if (use_dropout):
return tf.matmul(tf.nn.dropout(hidden3,keep_prob), layer4_weights) + layer4_biases
else:
return tf.matmul(hidden3, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset,use_dropout = True) # only training uses dropout
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# learning rate decay
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(0.05,
global_step, 100, 0.95, staircase=True)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob:0.5}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
|
[
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.nn.conv2d",
"tensorflow.truncated_normal",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"six.moves.range",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.nn.max_pool",
"tensorflow.Graph",
"six.moves.cPickle.load",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.exponential_decay",
"tensorflow.nn.dropout"
] |
[((2536, 2546), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2544, 2546), True, 'import tensorflow as tf\n'), ((844, 882), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (863, 882), True, 'import tensorflow as tf\n'), ((894, 914), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (905, 914), True, 'import tensorflow as tf\n'), ((957, 986), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (968, 986), True, 'import tensorflow as tf\n'), ((998, 1018), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1009, 1018), True, 'import tensorflow as tf\n'), ((1079, 1135), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (1091, 1135), True, 'import tensorflow as tf\n'), ((1170, 1245), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (1184, 1245), True, 'import tensorflow as tf\n'), ((1553, 1567), 'six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1564, 1567), True, 'from six.moves import cPickle as pickle\n'), ((2614, 2702), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, image_size, image_size, num_channels)'}), '(tf.float32, shape=(batch_size, image_size, image_size,\n num_channels))\n', (2628, 2702), True, 'import tensorflow as tf\n'), ((2730, 2788), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_labels)'}), '(tf.float32, shape=(batch_size, num_labels))\n', (2744, 2788), True, 'import tensorflow as tf\n'), ((2812, 2838), 'tensorflow.constant', 'tf.constant', (['valid_dataset'], {}), '(valid_dataset)\n', (2823, 2838), True, 'import tensorflow as tf\n'), ((2861, 2886), 'tensorflow.constant', 'tf.constant', (['test_dataset'], {}), '(test_dataset)\n', (2872, 2886), True, 'import tensorflow as tf\n'), ((2903, 2929), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2917, 2929), True, 'import tensorflow as tf\n'), ((4616, 4647), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (4627, 4647), True, 'import tensorflow as tf\n'), ((4700, 4772), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['(0.05)', 'global_step', '(100)', '(0.95)'], {'staircase': '(True)'}), '(0.05, global_step, 100, 0.95, staircase=True)\n', (4726, 4772), True, 'import tensorflow as tf\n'), ((5005, 5026), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (5018, 5026), True, 'import tensorflow as tf\n'), ((5177, 5200), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5187, 5200), True, 'import tensorflow as tf\n'), ((5294, 5310), 'six.moves.range', 'range', (['num_steps'], {}), '(num_steps)\n', (5299, 5310), False, 'from six.moves import range\n'), ((3591, 3624), 'tensorflow.nn.relu', 'tf.nn.relu', (['(conv1 + layer1_biases)'], {}), '(conv1 + layer1_biases)\n', (3601, 3624), True, 'import tensorflow as tf\n'), ((3780, 3813), 'tensorflow.nn.relu', 'tf.nn.relu', (['(conv2 + layer2_biases)'], {}), '(conv2 + layer2_biases)\n', (3790, 3813), True, 'import tensorflow as tf\n'), ((3969, 4037), 'tensorflow.reshape', 'tf.reshape', (['hidden2_pool', '[shape[0], shape[1] * shape[2] * shape[3]]'], {}), '(hidden2_pool, [shape[0], shape[1] * shape[2] * shape[3]])\n', (3979, 4037), True, 'import tensorflow as tf\n'), ((4492, 4570), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'tf_train_labels', 'logits': 'logits'}), '(labels=tf_train_labels, logits=logits)\n', (4531, 4570), True, 'import tensorflow as tf\n'), ((4854, 4902), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4887, 4902), True, 'import tensorflow as tf\n'), ((5217, 5246), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5244, 5246), True, 'import tensorflow as tf\n'), ((537, 558), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (546, 558), True, 'import numpy as np\n'), ((4067, 4101), 'tensorflow.matmul', 'tf.matmul', (['reshape', 'layer3_weights'], {}), '(reshape, layer3_weights)\n', (4076, 4101), True, 'import tensorflow as tf\n'), ((4291, 4325), 'tensorflow.matmul', 'tf.matmul', (['hidden3', 'layer4_weights'], {}), '(hidden3, layer4_weights)\n', (4300, 4325), True, 'import tensorflow as tf\n'), ((689, 714), 'numpy.argmax', 'np.argmax', (['predictions', '(1)'], {}), '(predictions, 1)\n', (698, 714), True, 'import numpy as np\n'), ((718, 738), 'numpy.argmax', 'np.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (727, 738), True, 'import numpy as np\n'), ((4192, 4225), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden3', 'keep_prob'], {}), '(hidden3, keep_prob)\n', (4205, 4225), True, 'import tensorflow as tf\n')]
|
import os
import random
from contextlib import contextmanager
from typing import Generator
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import config, ops
DIGITS = frozenset(str(i) for i in range(10))
@contextmanager
def tensorflow_random_state(seed: int) -> Generator[None, None, None]:
# Save values
origin_gpu_det = os.environ.get("TF_DETERMINISTIC_OPS", None)
orig_random_state = random.getstate()
orig_np_random_state = np.random.get_state()
if context.executing_eagerly():
tf_random_seed = context.global_seed()
else:
tf_random_seed = ops.get_default_graph().seed
determism_enabled = config.is_op_determinism_enabled()
config.enable_op_determinism()
# Set values
os.environ["TF_DETERMINISTIC_OPS"] = "1"
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
yield
# Reset values
if origin_gpu_det is not None:
os.environ["TF_DETERMINISTIC_OPS"] = origin_gpu_det
else:
os.environ.pop("TF_DETERMINISTIC_OPS")
random.setstate(orig_random_state)
np.random.set_state(orig_np_random_state)
tf.random.set_seed(tf_random_seed)
if not determism_enabled:
config.disable_op_determinism()
|
[
"tensorflow.random.set_seed",
"tensorflow.python.framework.config.is_op_determinism_enabled",
"numpy.random.seed",
"numpy.random.get_state",
"tensorflow.python.framework.config.enable_op_determinism",
"tensorflow.python.eager.context.global_seed",
"tensorflow.python.framework.ops.get_default_graph",
"numpy.random.set_state",
"os.environ.get",
"tensorflow.python.eager.context.executing_eagerly",
"random.seed",
"random.setstate",
"os.environ.pop",
"tensorflow.python.framework.config.disable_op_determinism",
"random.getstate"
] |
[((409, 453), 'os.environ.get', 'os.environ.get', (['"""TF_DETERMINISTIC_OPS"""', 'None'], {}), "('TF_DETERMINISTIC_OPS', None)\n", (423, 453), False, 'import os\n'), ((478, 495), 'random.getstate', 'random.getstate', ([], {}), '()\n', (493, 495), False, 'import random\n'), ((523, 544), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (542, 544), True, 'import numpy as np\n'), ((552, 579), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (577, 579), False, 'from tensorflow.python.eager import context\n'), ((717, 751), 'tensorflow.python.framework.config.is_op_determinism_enabled', 'config.is_op_determinism_enabled', ([], {}), '()\n', (749, 751), False, 'from tensorflow.python.framework import config, ops\n'), ((756, 786), 'tensorflow.python.framework.config.enable_op_determinism', 'config.enable_op_determinism', ([], {}), '()\n', (784, 786), False, 'from tensorflow.python.framework import config, ops\n'), ((854, 871), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (865, 871), False, 'import random\n'), ((876, 896), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (890, 896), True, 'import numpy as np\n'), ((901, 925), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (919, 925), True, 'import tensorflow as tf\n'), ((1113, 1147), 'random.setstate', 'random.setstate', (['orig_random_state'], {}), '(orig_random_state)\n', (1128, 1147), False, 'import random\n'), ((1152, 1193), 'numpy.random.set_state', 'np.random.set_state', (['orig_np_random_state'], {}), '(orig_np_random_state)\n', (1171, 1193), True, 'import numpy as np\n'), ((1198, 1232), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['tf_random_seed'], {}), '(tf_random_seed)\n', (1216, 1232), True, 'import tensorflow as tf\n'), ((606, 627), 'tensorflow.python.eager.context.global_seed', 'context.global_seed', ([], {}), '()\n', (625, 627), False, 'from tensorflow.python.eager import context\n'), ((1070, 1108), 'os.environ.pop', 'os.environ.pop', (['"""TF_DETERMINISTIC_OPS"""'], {}), "('TF_DETERMINISTIC_OPS')\n", (1084, 1108), False, 'import os\n'), ((1271, 1302), 'tensorflow.python.framework.config.disable_op_determinism', 'config.disable_op_determinism', ([], {}), '()\n', (1300, 1302), False, 'from tensorflow.python.framework import config, ops\n'), ((663, 686), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (684, 686), False, 'from tensorflow.python.framework import config, ops\n')]
|
"""
Search using NASA CMR
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import json
import logging
import requests
import numpy as np
_logger = logging.getLogger(__name__)
from podpac.core.utils import _get_from_url
CMR_URL = r"https://cmr.earthdata.nasa.gov/search/"
def get_collection_entries(session=None, short_name=None, keyword=None, **kwargs):
"""Uses NASA CMR to retrieve metadata about a collection
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
short_name: str, optional
The short name of the dataset
keyword: str, optional
Any keyword search parameters
**kwargs: str, optional
Any additional query parameters
Returns
---------
list:
A list of collection metadata dictionaries
Examples:
-----------
>>> # This make the following request https://cmr.earthdata.nasa.gov/search/collections.json?short_name=SPL2SMAP_S
>>> get_collection_id(short_name='SPL2SMAP_S')
['C1522341104-NSIDC_ECS']
"""
base_url = CMR_URL + "collections.json?"
if short_name is not None:
kwargs["short_name"] = short_name
if keyword is not None:
kwargs["keyword"] = keyword
query_string = "&".join([k + "=" + v for k, v in kwargs.items()])
# use generic requests session if `session` is not defined
if session is None:
session = requests
pydict = _get_from_url(base_url + query_string, session).json()
entries = pydict["feed"]["entry"]
return entries
def get_collection_id(session=None, short_name=None, keyword=None, **kwargs):
"""Uses NASA CMR to retrieve collection id
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
short_name: str, optional
The short name of the dataset
keyword: str, optional
Any keyword search parameters
**kwargs: str, optional
Any additional query parameters
Returns
---------
list
A list of collection id's (ideally only one)
Examples:
-----------
>>> # This make the following request https://cmr.earthdata.nasa.gov/search/collections.json?short_name=SPL2SMAP_S
>>> get_collection_id(short_name='SPL2SMAP_S')
['C1522341104-NSIDC_ECS']
"""
entries = get_collection_entries(session=session, short_name=short_name, keyword=keyword, **kwargs)
if len(entries) > 1:
_logger.warning("Found more than 1 entry for collection_id search")
collection_id = [e["id"] for e in entries]
return collection_id
def search_granule_json(session=None, entry_map=None, **kwargs):
"""Search for specific files from NASA CMR for a particular collection
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
entry_map: function
A function applied to each individual entry. Could be used to filter out certain data in an entry
**kwargs: dict
Additional query string parameters.
At minimum the provider, provider_id, concept_id, collection_concept_id, short_name, version, or entry_title
need to be provided for a granule search.
Returns
---------
list
Entries for each granule in the collection based on the search terms
"""
base_url = CMR_URL + "granules.json?"
if not np.any(
[
m not in kwargs
for m in [
"provider",
"provider_id",
"concept_id",
"collection_concept_id",
"short_name",
"version",
"entry_title",
]
]
):
raise ValueError(
"Need to provide either"
" provider, provider_id, concept_id, collection_concept_id, short_name, version or entry_title"
" for granule search."
)
if "page_size" not in kwargs:
kwargs["page_size"] = "2000"
if entry_map is None:
entry_map = lambda x: x
query_string = "&".join([k + "=" + str(v) for k, v in kwargs.items()])
if session is None:
session = requests
url = base_url + query_string
if "page_num" not in kwargs:
entries = _get_all_granule_pages(session, url, entry_map)
else:
pydict = _get_from_url(url, session).json()
entries = list(map(entry_map, pydict["feed"]["entry"]))
return entries
def _get_all_granule_pages(session, url, entry_map, max_paging_depth=1000000):
"""Helper function for searching through all pages for a collection.
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
url: str
URL to website
entry_map: function
Function for mapping the entries to a desired format
max_paging_depth
"""
page_size = int([q for q in url.split("?")[1].split("&") if "page_size" in q][0].split("=")[1])
max_pages = int(max_paging_depth / page_size)
pydict = _get_from_url(url, session).json()
entries = list(map(entry_map, pydict["feed"]["entry"]))
for i in range(1, max_pages):
page_url = url + "&page_num=%d" % (i + 1)
page_entries = _get_from_url(page_url, session).json()["feed"]["entry"]
if not page_entries:
break
entries.extend(list(map(entry_map, page_entries)))
return entries
|
[
"numpy.any",
"podpac.core.utils._get_from_url",
"logging.getLogger"
] |
[((188, 215), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (205, 215), False, 'import logging\n'), ((3485, 3632), 'numpy.any', 'np.any', (["[(m not in kwargs) for m in ['provider', 'provider_id', 'concept_id',\n 'collection_concept_id', 'short_name', 'version', 'entry_title']]"], {}), "([(m not in kwargs) for m in ['provider', 'provider_id', 'concept_id',\n 'collection_concept_id', 'short_name', 'version', 'entry_title']])\n", (3491, 3632), True, 'import numpy as np\n'), ((1500, 1547), 'podpac.core.utils._get_from_url', '_get_from_url', (['(base_url + query_string)', 'session'], {}), '(base_url + query_string, session)\n', (1513, 1547), False, 'from podpac.core.utils import _get_from_url\n'), ((5155, 5182), 'podpac.core.utils._get_from_url', '_get_from_url', (['url', 'session'], {}), '(url, session)\n', (5168, 5182), False, 'from podpac.core.utils import _get_from_url\n'), ((4439, 4466), 'podpac.core.utils._get_from_url', '_get_from_url', (['url', 'session'], {}), '(url, session)\n', (4452, 4466), False, 'from podpac.core.utils import _get_from_url\n'), ((5358, 5390), 'podpac.core.utils._get_from_url', '_get_from_url', (['page_url', 'session'], {}), '(page_url, session)\n', (5371, 5390), False, 'from podpac.core.utils import _get_from_url\n')]
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) <NAME> - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by <NAME> <<EMAIL>>, January 2017
import os
import numpy as np
import pandas as pd
import scipy.io as sio
import utils.datasets as utils
# ---------------------------------------------------------------
# data set paths
__data_path = "{}/data/NMR_40wines.mat".format(os.path.split(__file__)[0])
__pickle_path = "{}/cache/nmr_wine.pickle".format(os.path.split(__file__)[0])
# ---------------------------------------------------------------
# TODO: Add docstring with usage examples (see 'uv_fuel' data set)
@utils.load_data_from_pickle(__pickle_path)
def load_nmr_wines():
"""Loads the NMR Wines data set.
Returns:
A Pandas DataFrame with all the data set info.
Examples:
>>> ds = load_nmr_wines()
>>> ds['wine_data'].shape
(40, 8729)
>>> ds['wine_ints'].shape
(22, 1)
"""
# loading matlab data set object
raw_data = sio.loadmat(__data_path)
# validating loaded data
if raw_data is None:
raise Exception('Error while loading 1H-NMR Wines data.')
# getting features labels
features_labels = raw_data['ppm'][0].tolist()
# getting properties labels
props_labels = list(map(lambda x: x[0], raw_data['Label'][0]))
# getting samples data
data = raw_data['X']
# getting properties data
props_data = raw_data['Y']
# creating the wine data set
all_data = np.hstack([data, props_data])
all_labels = range(all_data.shape[0])
all_features = features_labels + props_labels
wine_ds = utils.build_data_set(all_data.tolist(), all_labels, all_features)
# ----------------------
wine_ints_data = raw_data['wine_ints'][0]
wine_ints_ds = pd.DataFrame(wine_ints_data)
# ----------------------
# the final data set
ds = {
'wine_data': wine_ds,
'wine_ints': wine_ints_ds,
}
# returning the final data set
return ds
|
[
"pandas.DataFrame",
"scipy.io.loadmat",
"numpy.hstack",
"utils.datasets.load_data_from_pickle",
"os.path.split"
] |
[((717, 759), 'utils.datasets.load_data_from_pickle', 'utils.load_data_from_pickle', (['__pickle_path'], {}), '(__pickle_path)\n', (744, 759), True, 'import utils.datasets as utils\n'), ((1102, 1126), 'scipy.io.loadmat', 'sio.loadmat', (['__data_path'], {}), '(__data_path)\n', (1113, 1126), True, 'import scipy.io as sio\n'), ((1593, 1622), 'numpy.hstack', 'np.hstack', (['[data, props_data]'], {}), '([data, props_data])\n', (1602, 1622), True, 'import numpy as np\n'), ((1891, 1919), 'pandas.DataFrame', 'pd.DataFrame', (['wine_ints_data'], {}), '(wine_ints_data)\n', (1903, 1919), True, 'import pandas as pd\n'), ((472, 495), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (485, 495), False, 'import os\n'), ((551, 574), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (564, 574), False, 'import os\n')]
|
#!/usr/bin/env python3
import numpy as np
import pickle
from PIL import Image
w = pickle.load(open("weights1000.pkl", "rb"))
def Classify(example):
return w.dot(example)
#Seems to get 2, 3, 4 correct...
for i in range(0, 5):
image = Image.open("test_images/{}.jpg".format(i)).convert("L")
x = np.asarray(image.getdata())
x = (255 - x)/255
x = np.r_[x, 1]
y = Classify(x)
print(y)
print("Actual: {} Classification: {}".format(i, np.argmax(y)))
|
[
"numpy.argmax"
] |
[((464, 476), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (473, 476), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#
# Tests for ``sim.py``
# These tests were hand calculated by <NAME>: <EMAIL>
#
from clusim.clustering import Clustering
import clusim.sim as sim
from clusim.dag import DAG
import clusim.clusimelement as clusimelement
from numpy.testing import assert_approx_equal
from numpy import mean
def test_comparison_example():
c1_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0], 4: [2], 5: [1]}
c2_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0], 4: [2], 5: [2]}
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict)
N11, N10, N01, N00 = sim.count_pairwise_cooccurence(c1, c2)
assert N11 == 2, "Element Co-occurance counts for N11 does not match. %s != %s" % (N11, 2)
assert N10 == 2, "Element Co-occurance counts for N10 does not match. %s != %s" % (N10, 2)
assert N01 == 1, "Element Co-occurance counts for N01 does not match. %s != %s" % (N01, 1)
assert N00 == 10, "Element Co-occurance counts for N00 does not match. %s != %s" % (N00, 10)
known_sim_values = {'jaccard_index': 0.4,
'rand_index': 0.8,
'fowlkes_mallows_index': 0.5773502691896258,
'rogers_tanimoto_index': 2./3.,
'southwood_index': 2./3.,
'czekanowski_index': 0.5714285714285714,
'dice_index': 0.5714285714285714,
'sorensen_index': 0.5714285714285714,
'pearson_correlation': 0.011363636363636364,
'classification_error': 0.16666666666666674,
'purity_index': 0.8333333333333333,
'fmeasure': 0.5714285714285714,
'nmi': 0.7396673768007593,
'vi': 0.792481250360578,
'geometric_accuracy': 0.8333333333333334,
'overlap_quality': 0.0,
'onmi': 0.7449589906475155,
'omega_index': 0.44444444444444453
}
for simfunc in sim.available_similarity_measures:
simvalue = eval('sim.' + simfunc+'(c1, c2)')
assert simvalue == known_sim_values[simfunc], "Similarity Measure %s does not match. %s != %s" % (simfunc, simvalue, known_sim_values[simfunc])
def test_model_example():
c1_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0]}
c2_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [1]}
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict)
known_rand_values = {'perm': 0.5,
'perm1': 0.5,
'num': 0.510204081632653,
'num1': 0.5,
'all': 0.555555555555556,
'all1': 0.5
}
known_mi_values = {'perm': 0.311278124459133,
'perm1': 0.311278124459133,
'num': 0.309927805548467,
'num1': 0.301825892084476,
'all': 0.611635721962606,
'all1': 0.419448541053684
}
for rdm in sim.available_random_models:
exp_rand_value = sim.expected_rand_index(n_elements=c1.n_elements,
n_clusters1=c1.n_clusters,
n_clusters2=c2.n_clusters,
clu_size_seq1=c1.clu_size_seq,
clu_size_seq2=c2.clu_size_seq,
random_model=rdm
)
assert_approx_equal(exp_rand_value, known_rand_values[rdm], 10**(-10), "Expected Rand Index with %s Random Model does not match. %s != %s" % (rdm, exp_rand_value, known_rand_values[rdm]))
exp_mi_value = sim.expected_mi(n_elements=c1.n_elements,
n_clusters1=c1.n_clusters,
n_clusters2=c2.n_clusters,
clu_size_seq1=c1.clu_size_seq,
clu_size_seq2=c2.clu_size_seq,
random_model=rdm,
logbase=2.)
assert_approx_equal(exp_mi_value, known_mi_values[rdm], 10**(-10), "Expected MI with %s Random Model does not match. %s != %s" % (rdm, exp_mi_value, known_mi_values[rdm]) )
def test_elementsim_example():
# taken from Fig 3 of Gates et al (2018) Scientific Reports
# overlapping clustering
c1_elm2clu_dict = {0: [0], 1: [0], 2: [0], 3: [1], 4: [1], 5: [1, 2], 6: [2]}
# hierarchical clustering
c2_elm2clu_dict = {0: [1], 1: [1], 2: [2], 3: [5], 4: [5], 5: [6, 8], 6: [9]}
c2_dag = DAG()
c2_dag.add_edges_from([(0, 1), (0, 2), (3, 4), (4, 5), (4, 6), (3, 7), (7, 8), (7, 9)])
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict, hier_graph=c2_dag)
known_elsim = [0.92875658, 0.92875658, 0.85751315, 0.25717544, 0.74282456, 0.82083876, 0.80767074]
elsim, ellabels = clusimelement.element_sim_elscore(c1, c2, alpha=0.9, r=1., r2=None, rescale_path_type='max')
for i in range(7):
assert_approx_equal(elsim[i], known_elsim[i], 10**(-10), "Element-centric similarity for element %s does not match. %s != %s" % (i, elsim[i], known_elsim[i]) )
if __name__ == "__main__":
test_comparison_example()
test_model_example()
test_elementsim_example()
|
[
"clusim.sim.expected_mi",
"clusim.sim.expected_rand_index",
"clusim.clusimelement.element_sim_elscore",
"clusim.dag.DAG",
"clusim.clustering.Clustering",
"numpy.testing.assert_approx_equal",
"clusim.sim.count_pairwise_cooccurence"
] |
[((499, 539), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c1_elm2clu_dict'}), '(elm2clu_dict=c1_elm2clu_dict)\n', (509, 539), False, 'from clusim.clustering import Clustering\n'), ((549, 589), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c2_elm2clu_dict'}), '(elm2clu_dict=c2_elm2clu_dict)\n', (559, 589), False, 'from clusim.clustering import Clustering\n'), ((616, 654), 'clusim.sim.count_pairwise_cooccurence', 'sim.count_pairwise_cooccurence', (['c1', 'c2'], {}), '(c1, c2)\n', (646, 654), True, 'import clusim.sim as sim\n'), ((2501, 2541), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c1_elm2clu_dict'}), '(elm2clu_dict=c1_elm2clu_dict)\n', (2511, 2541), False, 'from clusim.clustering import Clustering\n'), ((2551, 2591), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c2_elm2clu_dict'}), '(elm2clu_dict=c2_elm2clu_dict)\n', (2561, 2591), False, 'from clusim.clustering import Clustering\n'), ((4906, 4911), 'clusim.dag.DAG', 'DAG', ([], {}), '()\n', (4909, 4911), False, 'from clusim.dag import DAG\n'), ((5014, 5054), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c1_elm2clu_dict'}), '(elm2clu_dict=c1_elm2clu_dict)\n', (5024, 5054), False, 'from clusim.clustering import Clustering\n'), ((5064, 5123), 'clusim.clustering.Clustering', 'Clustering', ([], {'elm2clu_dict': 'c2_elm2clu_dict', 'hier_graph': 'c2_dag'}), '(elm2clu_dict=c2_elm2clu_dict, hier_graph=c2_dag)\n', (5074, 5123), False, 'from clusim.clustering import Clustering\n'), ((5251, 5348), 'clusim.clusimelement.element_sim_elscore', 'clusimelement.element_sim_elscore', (['c1', 'c2'], {'alpha': '(0.9)', 'r': '(1.0)', 'r2': 'None', 'rescale_path_type': '"""max"""'}), "(c1, c2, alpha=0.9, r=1.0, r2=None,\n rescale_path_type='max')\n", (5284, 5348), True, 'import clusim.clusimelement as clusimelement\n'), ((3268, 3460), 'clusim.sim.expected_rand_index', 'sim.expected_rand_index', ([], {'n_elements': 'c1.n_elements', 'n_clusters1': 'c1.n_clusters', 'n_clusters2': 'c2.n_clusters', 'clu_size_seq1': 'c1.clu_size_seq', 'clu_size_seq2': 'c2.clu_size_seq', 'random_model': 'rdm'}), '(n_elements=c1.n_elements, n_clusters1=c1.n_clusters,\n n_clusters2=c2.n_clusters, clu_size_seq1=c1.clu_size_seq, clu_size_seq2\n =c2.clu_size_seq, random_model=rdm)\n', (3291, 3460), True, 'import clusim.sim as sim\n'), ((3755, 3952), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['exp_rand_value', 'known_rand_values[rdm]', '(10 ** -10)', "('Expected Rand Index with %s Random Model does not match. %s != %s' % (rdm,\n exp_rand_value, known_rand_values[rdm]))"], {}), "(exp_rand_value, known_rand_values[rdm], 10 ** -10, \n 'Expected Rand Index with %s Random Model does not match. %s != %s' % (\n rdm, exp_rand_value, known_rand_values[rdm]))\n", (3774, 3952), False, 'from numpy.testing import assert_approx_equal\n'), ((3967, 4164), 'clusim.sim.expected_mi', 'sim.expected_mi', ([], {'n_elements': 'c1.n_elements', 'n_clusters1': 'c1.n_clusters', 'n_clusters2': 'c2.n_clusters', 'clu_size_seq1': 'c1.clu_size_seq', 'clu_size_seq2': 'c2.clu_size_seq', 'random_model': 'rdm', 'logbase': '(2.0)'}), '(n_elements=c1.n_elements, n_clusters1=c1.n_clusters,\n n_clusters2=c2.n_clusters, clu_size_seq1=c1.clu_size_seq, clu_size_seq2\n =c2.clu_size_seq, random_model=rdm, logbase=2.0)\n', (3982, 4164), True, 'import clusim.sim as sim\n'), ((4397, 4577), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['exp_mi_value', 'known_mi_values[rdm]', '(10 ** -10)', "('Expected MI with %s Random Model does not match. %s != %s' % (rdm,\n exp_mi_value, known_mi_values[rdm]))"], {}), "(exp_mi_value, known_mi_values[rdm], 10 ** -10, \n 'Expected MI with %s Random Model does not match. %s != %s' % (rdm,\n exp_mi_value, known_mi_values[rdm]))\n", (4416, 4577), False, 'from numpy.testing import assert_approx_equal\n'), ((5376, 5543), 'numpy.testing.assert_approx_equal', 'assert_approx_equal', (['elsim[i]', 'known_elsim[i]', '(10 ** -10)', "('Element-centric similarity for element %s does not match. %s != %s' % (i,\n elsim[i], known_elsim[i]))"], {}), "(elsim[i], known_elsim[i], 10 ** -10, \n 'Element-centric similarity for element %s does not match. %s != %s' %\n (i, elsim[i], known_elsim[i]))\n", (5395, 5543), False, 'from numpy.testing import assert_approx_equal\n')]
|
import numpy as np
import scipy as sp
import scipy.spatial
import matplotlib as mpl
import matplotlib.path
from ..kernels.high_level.cauchy import Cauchy_Layer_Apply
from ..point_set import PointSet
def find_interior_points(source, target, boundary_acceptable=False):
"""
quick finding of which points in target are outside vs. inside
"""
# first exclude things outside of bounding box
xmin = source.x.min()
xmax = source.x.max()
ymin = source.y.min()
ymax = source.y.max()
in_bounding_box = np.logical_and.reduce([ target.x > xmin, target.x < xmax,
target.y > ymin, target.y < ymax])
out_bounding_box = np.logical_not(in_bounding_box)
small_targ = PointSet(c=target.c[in_bounding_box])
small_targ.compute_tree()
wn = np.zeros(target.N, dtype=complex)
wn[out_bounding_box] = 0.0
# compute winding number via cauchy sums
wn[in_bounding_box] = Cauchy_Layer_Apply(source, small_targ, \
dipstr=np.ones(source.N)).real
wn = np.abs(wn)
bad = np.logical_or(np.isnan(wn), np.isinf(wn))
good = np.logical_not(bad)
big = np.zeros_like(wn)
big[good] = wn[good] > 1e5
bad = np.logical_or(big, bad)
wn[bad] = 1.0
# get region where that sum was not accurate enough
dist = source.tolerance_to_distance(1e-2)
q = target.find_near_points(source, dist).ravel()
# phys array, good except in near boundary region
wn[q] = 0.0
phys = wn > 0.5
# brute force search
poly = mpl.path.Path(source.get_stacked_boundary(T=False))
xq = target.x[q]
yq = target.y[q]
tq = np.column_stack([xq, yq])
interior = poly.contains_points(tq)
phys[q] = interior
phys[bad] = boundary_acceptable
ext = np.logical_not(phys)
return phys, ext
|
[
"numpy.zeros_like",
"numpy.abs",
"numpy.logical_not",
"numpy.zeros",
"numpy.logical_and.reduce",
"numpy.isnan",
"numpy.isinf",
"numpy.ones",
"numpy.logical_or",
"numpy.column_stack"
] |
[((530, 626), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['[target.x > xmin, target.x < xmax, target.y > ymin, target.y < ymax]'], {}), '([target.x > xmin, target.x < xmax, target.y > ymin, \n target.y < ymax])\n', (551, 626), True, 'import numpy as np\n'), ((692, 723), 'numpy.logical_not', 'np.logical_not', (['in_bounding_box'], {}), '(in_bounding_box)\n', (706, 723), True, 'import numpy as np\n'), ((818, 851), 'numpy.zeros', 'np.zeros', (['target.N'], {'dtype': 'complex'}), '(target.N, dtype=complex)\n', (826, 851), True, 'import numpy as np\n'), ((1083, 1093), 'numpy.abs', 'np.abs', (['wn'], {}), '(wn)\n', (1089, 1093), True, 'import numpy as np\n'), ((1157, 1176), 'numpy.logical_not', 'np.logical_not', (['bad'], {}), '(bad)\n', (1171, 1176), True, 'import numpy as np\n'), ((1187, 1204), 'numpy.zeros_like', 'np.zeros_like', (['wn'], {}), '(wn)\n', (1200, 1204), True, 'import numpy as np\n'), ((1246, 1269), 'numpy.logical_or', 'np.logical_or', (['big', 'bad'], {}), '(big, bad)\n', (1259, 1269), True, 'import numpy as np\n'), ((1673, 1698), 'numpy.column_stack', 'np.column_stack', (['[xq, yq]'], {}), '([xq, yq])\n', (1688, 1698), True, 'import numpy as np\n'), ((1808, 1828), 'numpy.logical_not', 'np.logical_not', (['phys'], {}), '(phys)\n', (1822, 1828), True, 'import numpy as np\n'), ((1118, 1130), 'numpy.isnan', 'np.isnan', (['wn'], {}), '(wn)\n', (1126, 1130), True, 'import numpy as np\n'), ((1132, 1144), 'numpy.isinf', 'np.isinf', (['wn'], {}), '(wn)\n', (1140, 1144), True, 'import numpy as np\n'), ((1050, 1067), 'numpy.ones', 'np.ones', (['source.N'], {}), '(source.N)\n', (1057, 1067), True, 'import numpy as np\n')]
|
import os.path as osp
import os
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
import moviepy.editor as mpy
import tqdm
from contextlib import contextmanager
from mpi4py import MPI
import imageio
from baselines import logger
import baselines.common.tf_util as U
from baselines.common import colorize
from baselines.common.mpi_adam import MpiAdam
import dataset
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
class GlobalTrainer(object):
def __init__(self, name, env, runner, policy, config):
self._name = name
self._env = env.unwrapped
self._runner = runner
self._config = config
self._policy = policy
self._is_chef = (MPI.COMM_WORLD.Get_rank() == 0)
# global step
self.global_step = tf.Variable(0, name='global_step', dtype=tf.int64, trainable=False)
self._update_global_step = tf.assign(self.global_step, self.global_step + 1)
# tensorboard summary
self.summary_name = ['global/length', 'global/reward', 'global/success']
# build loss/optimizers
self._build_distillation()
def _build_distillation(self):
config = self._config
pi = self._policy
self._global_norm = U.function(
[], tf.global_norm([tf.cast(var, tf.float32) for var in pi.get_variables()]))
# policy update
ac = pi.pdtype.sample_placeholder([None])
pol_var_list = [v for v in pi.get_trainable_variables() if 'pol' in v.name]
self._pol_adam = MpiAdam(pol_var_list)
pol_loss = tf.reduce_mean(pi.pd.neglogp(ac))
#pol_loss = tf.reduce_mean(tf.square(pi.pd.sample() - ac))
fetch_dict = {
'loss': pol_loss,
'g': U.flatgrad(pol_loss, pol_var_list,
clip_norm=config.global_max_grad_norm)
}
self._pol_loss = U.function([ac] + pi.ob, fetch_dict)
self.summary_name += ['global/loss', 'global/grad_norm', 'global/global_norm']
# value update
if config.global_vf:
ret = tf.placeholder(dtype=tf.float32, shape=[None], name='return')
vf_var_list = [v for v in pi.get_trainable_variables() if 'vf' in v.name]
self._vf_adam = MpiAdam(vf_var_list)
vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret))
fetch_dict = {
'vf_loss': vf_loss,
'vf_g': U.flatgrad(vf_loss, vf_var_list,
clip_norm=config.global_max_grad_norm)
}
self._vf_loss = U.function([ret] + pi.ob, fetch_dict)
self.summary_name += ['global/vf_loss', 'global/vf_grad_norm']
# initialize and sync
U.initialize()
self._pol_adam.sync()
if config.global_vf:
self._vf_adam.sync()
if config.debug:
logger.log("[worker: {} global] Init param sum".format(MPI.COMM_WORLD.Get_rank()), self._adam.getflat().sum())
@contextmanager
def timed(self, msg):
if self._is_chef:
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize("done in %.3f seconds"%(time.time() - tstart), color='magenta'))
else:
yield
def update(self, step, ob, ac, ret=None):
info = defaultdict(list)
config = self._config
sess = U.get_session()
global_step = sess.run(self.global_step)
sess.run(self._update_global_step)
pi = self._policy
ob_dict = self._env.get_ob_dict(ob)
if self._config.obs_norm == 'learn':
for ob_name in pi.ob_type:
pi.ob_rms[ob_name].update(ob_dict[ob_name])
with self.timed("update global network"):
for _ in range(self._config.global_iters):
# policy network
for (mb_ob, mb_ac) in dataset.iterbatches(
(ob, ac), include_final_partial_batch=False,
batch_size=self._config.global_batch_size):
ob_list = pi.get_ob_list(mb_ob)
fetched = self._pol_loss(mb_ac, *ob_list)
loss, g = fetched['loss'], fetched['g']
self._pol_adam.update(g, self._config.global_stepsize)
info['global/loss'].append(np.mean(loss))
info['global/grad_norm'].append(np.linalg.norm(g))
if config.global_vf:
# value network
for (mb_ob, mb_ret) in dataset.iterbatches(
(ob, ret), include_final_partial_batch=False,
batch_size=self._config.global_batch_size):
ob_list = pi.get_ob_list(mb_ob)
fetched = self._vf_loss(mb_ret, *ob_list)
vf_loss, vf_g = fetched['vf_loss'], fetched['vf_g']
self._vf_adam.update(vf_g, self._config.global_stepsize)
info['global/vf_loss'].append(np.mean(vf_loss))
info['global/vf_grad_norm'].append(np.linalg.norm(vf_g))
for key, value in info.items():
info[key] = np.mean(value)
info['global/global_norm'] = self._global_norm()
return info
def summary(self, it):
info = self.evaluate(it, record=self._config.training_video_record)
# save checkpoint
if it % self._config.ckpt_save_step == 0:
fname = osp.join(self._config.log_dir, '%.5d' % it)
U.save_state(fname)
return info
def evaluate(self, ckpt_num=None, record=False):
config = self._config
ep_lens = []
ep_rets = []
ep_success = []
if record:
record_dir = osp.join(config.log_dir, 'video')
os.makedirs(record_dir, exist_ok=True)
for _ in tqdm.trange(10):
ep_traj = self._runner.rollout(True, True)
ep_lens.append(ep_traj["ep_length"][0])
ep_rets.append(ep_traj["ep_reward"][0])
ep_success.append(ep_traj["ep_success"][0])
logger.log('[{}] Trial #{}: lengths {}, returns {}'.format(
self._name, _, ep_traj["ep_length"][0], ep_traj["ep_reward"][0]))
# Video recording
if record:
visual_obs = ep_traj["visual_ob"]
video_name = '{}{}_{}{}.{}'.format(config.video_prefix or '', self._name,
'' if ckpt_num is None else 'ckpt_{}_'.format(ckpt_num), _, config.video_format)
video_path = osp.join(record_dir, video_name)
if config.video_format == 'mp4':
fps = 60.
def f(t):
frame_length = len(visual_obs)
new_fps = 1./(1./fps + 1./frame_length)
idx = min(int(t*new_fps), frame_length-1)
return visual_obs[idx]
video = mpy.VideoClip(f, duration=len(visual_obs)/fps+2)
video.write_videofile(video_path, fps, verbose=False)
elif config.video_format == 'gif':
imageio.mimsave(video_path, visual_obs, fps=100)
logger.log('[{}] Episode Length: {}'.format(self._name, np.mean(ep_lens)))
logger.log('[{}] Episode Rewards: {}'.format(self._name, np.mean(ep_rets)))
return {'global/length': np.mean(ep_lens),
'global/reward': np.mean(ep_rets),
'global/success': np.mean(ep_success)}
|
[
"baselines.common.mpi_adam.MpiAdam",
"baselines.common.tf_util.get_session",
"baselines.common.tf_util.initialize",
"collections.defaultdict",
"tensorflow.assign",
"tensorflow.Variable",
"numpy.mean",
"numpy.linalg.norm",
"os.path.join",
"imageio.mimsave",
"baselines.common.tf_util.function",
"dataset.iterbatches",
"tensorflow.placeholder",
"tensorflow.cast",
"baselines.common.tf_util.flatgrad",
"baselines.common.tf_util.save_state",
"tqdm.trange",
"mpi4py.MPI.COMM_WORLD.Get_rank",
"baselines.common.colorize",
"os.makedirs",
"time.time",
"tensorflow.square"
] |
[((847, 914), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'dtype': 'tf.int64', 'trainable': '(False)'}), "(0, name='global_step', dtype=tf.int64, trainable=False)\n", (858, 914), True, 'import tensorflow as tf\n'), ((950, 999), 'tensorflow.assign', 'tf.assign', (['self.global_step', '(self.global_step + 1)'], {}), '(self.global_step, self.global_step + 1)\n', (959, 999), True, 'import tensorflow as tf\n'), ((1587, 1608), 'baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['pol_var_list'], {}), '(pol_var_list)\n', (1594, 1608), False, 'from baselines.common.mpi_adam import MpiAdam\n'), ((1936, 1972), 'baselines.common.tf_util.function', 'U.function', (['([ac] + pi.ob)', 'fetch_dict'], {}), '([ac] + pi.ob, fetch_dict)\n', (1946, 1972), True, 'import baselines.common.tf_util as U\n'), ((2780, 2794), 'baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (2792, 2794), True, 'import baselines.common.tf_util as U\n'), ((3395, 3412), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3406, 3412), False, 'from collections import defaultdict\n'), ((3458, 3473), 'baselines.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (3471, 3473), True, 'import baselines.common.tf_util as U\n'), ((5968, 5983), 'tqdm.trange', 'tqdm.trange', (['(10)'], {}), '(10)\n', (5979, 5983), False, 'import tqdm\n'), ((765, 790), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (788, 790), False, 'from mpi4py import MPI\n'), ((1799, 1872), 'baselines.common.tf_util.flatgrad', 'U.flatgrad', (['pol_loss', 'pol_var_list'], {'clip_norm': 'config.global_max_grad_norm'}), '(pol_loss, pol_var_list, clip_norm=config.global_max_grad_norm)\n', (1809, 1872), True, 'import baselines.common.tf_util as U\n'), ((2131, 2192), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""return"""'}), "(dtype=tf.float32, shape=[None], name='return')\n", (2145, 2192), True, 'import tensorflow as tf\n'), ((2307, 2327), 'baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['vf_var_list'], {}), '(vf_var_list)\n', (2314, 2327), False, 'from baselines.common.mpi_adam import MpiAdam\n'), ((2628, 2665), 'baselines.common.tf_util.function', 'U.function', (['([ret] + pi.ob)', 'fetch_dict'], {}), '([ret] + pi.ob, fetch_dict)\n', (2638, 2665), True, 'import baselines.common.tf_util as U\n'), ((3179, 3190), 'time.time', 'time.time', ([], {}), '()\n', (3188, 3190), False, 'import time\n'), ((5279, 5293), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (5286, 5293), True, 'import numpy as np\n'), ((5572, 5615), 'os.path.join', 'osp.join', (['self._config.log_dir', "('%.5d' % it)"], {}), "(self._config.log_dir, '%.5d' % it)\n", (5580, 5615), True, 'import os.path as osp\n'), ((5628, 5647), 'baselines.common.tf_util.save_state', 'U.save_state', (['fname'], {}), '(fname)\n', (5640, 5647), True, 'import baselines.common.tf_util as U\n'), ((5865, 5898), 'os.path.join', 'osp.join', (['config.log_dir', '"""video"""'], {}), "(config.log_dir, 'video')\n", (5873, 5898), True, 'import os.path as osp\n'), ((5911, 5949), 'os.makedirs', 'os.makedirs', (['record_dir'], {'exist_ok': '(True)'}), '(record_dir, exist_ok=True)\n', (5922, 5949), False, 'import os\n'), ((7526, 7542), 'numpy.mean', 'np.mean', (['ep_lens'], {}), '(ep_lens)\n', (7533, 7542), True, 'import numpy as np\n'), ((7577, 7593), 'numpy.mean', 'np.mean', (['ep_rets'], {}), '(ep_rets)\n', (7584, 7593), True, 'import numpy as np\n'), ((7629, 7648), 'numpy.mean', 'np.mean', (['ep_success'], {}), '(ep_success)\n', (7636, 7648), True, 'import numpy as np\n'), ((2365, 2390), 'tensorflow.square', 'tf.square', (['(pi.vpred - ret)'], {}), '(pi.vpred - ret)\n', (2374, 2390), True, 'import tensorflow as tf\n'), ((2479, 2550), 'baselines.common.tf_util.flatgrad', 'U.flatgrad', (['vf_loss', 'vf_var_list'], {'clip_norm': 'config.global_max_grad_norm'}), '(vf_loss, vf_var_list, clip_norm=config.global_max_grad_norm)\n', (2489, 2550), True, 'import baselines.common.tf_util as U\n'), ((3126, 3156), 'baselines.common.colorize', 'colorize', (['msg'], {'color': '"""magenta"""'}), "(msg, color='magenta')\n", (3134, 3156), False, 'from baselines.common import colorize\n'), ((3959, 4071), 'dataset.iterbatches', 'dataset.iterbatches', (['(ob, ac)'], {'include_final_partial_batch': '(False)', 'batch_size': 'self._config.global_batch_size'}), '((ob, ac), include_final_partial_batch=False, batch_size\n =self._config.global_batch_size)\n', (3978, 4071), False, 'import dataset\n'), ((6678, 6710), 'os.path.join', 'osp.join', (['record_dir', 'video_name'], {}), '(record_dir, video_name)\n', (6686, 6710), True, 'import os.path as osp\n'), ((7390, 7406), 'numpy.mean', 'np.mean', (['ep_lens'], {}), '(ep_lens)\n', (7397, 7406), True, 'import numpy as np\n'), ((7474, 7490), 'numpy.mean', 'np.mean', (['ep_rets'], {}), '(ep_rets)\n', (7481, 7490), True, 'import numpy as np\n'), ((1345, 1369), 'tensorflow.cast', 'tf.cast', (['var', 'tf.float32'], {}), '(var, tf.float32)\n', (1352, 1369), True, 'import tensorflow as tf\n'), ((2979, 3004), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (3002, 3004), False, 'from mpi4py import MPI\n'), ((4616, 4728), 'dataset.iterbatches', 'dataset.iterbatches', (['(ob, ret)'], {'include_final_partial_batch': '(False)', 'batch_size': 'self._config.global_batch_size'}), '((ob, ret), include_final_partial_batch=False,\n batch_size=self._config.global_batch_size)\n', (4635, 4728), False, 'import dataset\n'), ((4413, 4426), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (4420, 4426), True, 'import numpy as np\n'), ((4480, 4497), 'numpy.linalg.norm', 'np.linalg.norm', (['g'], {}), '(g)\n', (4494, 4497), True, 'import numpy as np\n'), ((7276, 7324), 'imageio.mimsave', 'imageio.mimsave', (['video_path', 'visual_obs'], {'fps': '(100)'}), '(video_path, visual_obs, fps=100)\n', (7291, 7324), False, 'import imageio\n'), ((3260, 3271), 'time.time', 'time.time', ([], {}), '()\n', (3269, 3271), False, 'import time\n'), ((5116, 5132), 'numpy.mean', 'np.mean', (['vf_loss'], {}), '(vf_loss)\n', (5123, 5132), True, 'import numpy as np\n'), ((5193, 5213), 'numpy.linalg.norm', 'np.linalg.norm', (['vf_g'], {}), '(vf_g)\n', (5207, 5213), True, 'import numpy as np\n')]
|
import pymc as pm
import matplotlib.pyplot as plt
import numpy as np
plt.rc('font', family='Malgun Gothic')
lambda_ = pm.Exponential("poisson_param", 1)
data_generator = pm.Poisson("data_generater", lambda_)
data_plus_one = data_generator + 1
print(lambda_.children)
print(data_generator.parents)
# value
print(lambda_.value)
betas = pm.Uniform("betas", 0, 1, size=5)
betas.value
## random
ld1 = pm.Exponential("lambda_1", 1) # first ํ๋์ prior
ld2 = pm.Exponential("lambda_2", 1) # second ํ๋์ prior
tau = pm.DiscreteUniform("tau", lower=0, upper=10) # ํ๋ ๋ณํ์ ๋ํ prior
print("init")
print(ld1.value)
print(ld2.value)
print(tau.value)
print(ld1.random(), ld2.random(), tau.random())
print("random call")
print(ld1.value)
print(ld2.value)
print(tau.value)
n_data_points = 5
@pm.deterministic
def labmda_(tau=tau, lambda_1=ld1, lambda_2=ld2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1
out[tau:] = lambda_2
return out
####################################################
#### ๋ชจ๋ธ์ ๊ด์ธก ํฌํจ ####
figsize = (12.5, 4)
plt.figure(figsize=figsize)
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 300
samples = [ld1.random() for i in range(20000)]
plt.hist(samples, bins=70, normed=True, histtype="stepfilled")
plt.xlim(0, 8)
plt.show()
# ๊ณ ์ ๋ฐธ๋ฅ
data = np.array([10, 25, 15, 20, 35])
obs = pm.Poisson("obs", lambda_, value=data, observed=True)
obs.value
##################
##### ๋ชจ๋ธ๋ง #####
tau = pm.rdiscrete_uniform(0, 80)
alpha = 1./20.
lambda_1, lambda_2 = pm.rexponential(alpha, 2)
lambda_ = np.r_[lambda_1*np.ones(tau), lambda_2*np.ones(80-tau)]
data = pm.rpoisson(lambda_)
plt.bar(np.arange(80), data, color="#348ABD")
plt.bar(tau-1, data[tau-1], color='r', label='ํ๋๋ณํ')
plt.xlable("time")
plt.ylabel("message")
plt.xlim(0, 80)
plt.legend()
|
[
"pymc.Poisson",
"matplotlib.pyplot.bar",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"pymc.rexponential",
"pymc.rpoisson",
"matplotlib.pyplot.rc",
"pymc.DiscreteUniform",
"pymc.Exponential",
"matplotlib.pyplot.show",
"pymc.Uniform",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlable",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"pymc.rdiscrete_uniform",
"matplotlib.pyplot.hist",
"numpy.zeros",
"numpy.array"
] |
[((70, 108), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Malgun Gothic"""'}), "('font', family='Malgun Gothic')\n", (76, 108), True, 'import matplotlib.pyplot as plt\n'), ((119, 153), 'pymc.Exponential', 'pm.Exponential', (['"""poisson_param"""', '(1)'], {}), "('poisson_param', 1)\n", (133, 153), True, 'import pymc as pm\n'), ((171, 208), 'pymc.Poisson', 'pm.Poisson', (['"""data_generater"""', 'lambda_'], {}), "('data_generater', lambda_)\n", (181, 208), True, 'import pymc as pm\n'), ((338, 371), 'pymc.Uniform', 'pm.Uniform', (['"""betas"""', '(0)', '(1)'], {'size': '(5)'}), "('betas', 0, 1, size=5)\n", (348, 371), True, 'import pymc as pm\n'), ((401, 430), 'pymc.Exponential', 'pm.Exponential', (['"""lambda_1"""', '(1)'], {}), "('lambda_1', 1)\n", (415, 430), True, 'import pymc as pm\n'), ((455, 484), 'pymc.Exponential', 'pm.Exponential', (['"""lambda_2"""', '(1)'], {}), "('lambda_2', 1)\n", (469, 484), True, 'import pymc as pm\n'), ((510, 554), 'pymc.DiscreteUniform', 'pm.DiscreteUniform', (['"""tau"""'], {'lower': '(0)', 'upper': '(10)'}), "('tau', lower=0, upper=10)\n", (528, 554), True, 'import pymc as pm\n'), ((1041, 1068), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1051, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1183, 1245), 'matplotlib.pyplot.hist', 'plt.hist', (['samples'], {'bins': '(70)', 'normed': '(True)', 'histtype': '"""stepfilled"""'}), "(samples, bins=70, normed=True, histtype='stepfilled')\n", (1191, 1245), True, 'import matplotlib.pyplot as plt\n'), ((1246, 1260), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(8)'], {}), '(0, 8)\n', (1254, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1269, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1319), 'numpy.array', 'np.array', (['[10, 25, 15, 20, 35]'], {}), '([10, 25, 15, 20, 35])\n', (1297, 1319), True, 'import numpy as np\n'), ((1326, 1379), 'pymc.Poisson', 'pm.Poisson', (['"""obs"""', 'lambda_'], {'value': 'data', 'observed': '(True)'}), "('obs', lambda_, value=data, observed=True)\n", (1336, 1379), True, 'import pymc as pm\n'), ((1433, 1460), 'pymc.rdiscrete_uniform', 'pm.rdiscrete_uniform', (['(0)', '(80)'], {}), '(0, 80)\n', (1453, 1460), True, 'import pymc as pm\n'), ((1497, 1522), 'pymc.rexponential', 'pm.rexponential', (['alpha', '(2)'], {}), '(alpha, 2)\n', (1512, 1522), True, 'import pymc as pm\n'), ((1595, 1615), 'pymc.rpoisson', 'pm.rpoisson', (['lambda_'], {}), '(lambda_)\n', (1606, 1615), True, 'import pymc as pm\n'), ((1662, 1718), 'matplotlib.pyplot.bar', 'plt.bar', (['(tau - 1)', 'data[tau - 1]'], {'color': '"""r"""', 'label': '"""ํ๋๋ณํ"""'}), "(tau - 1, data[tau - 1], color='r', label='ํ๋๋ณํ')\n", (1669, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1733), 'matplotlib.pyplot.xlable', 'plt.xlable', (['"""time"""'], {}), "('time')\n", (1725, 1733), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1755), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""message"""'], {}), "('message')\n", (1744, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1771), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(80)'], {}), '(0, 80)\n', (1764, 1771), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1784), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1782, 1784), True, 'import matplotlib.pyplot as plt\n'), ((858, 881), 'numpy.zeros', 'np.zeros', (['n_data_points'], {}), '(n_data_points)\n', (866, 881), True, 'import numpy as np\n'), ((1624, 1637), 'numpy.arange', 'np.arange', (['(80)'], {}), '(80)\n', (1633, 1637), True, 'import numpy as np\n'), ((1548, 1560), 'numpy.ones', 'np.ones', (['tau'], {}), '(tau)\n', (1555, 1560), True, 'import numpy as np\n'), ((1571, 1588), 'numpy.ones', 'np.ones', (['(80 - tau)'], {}), '(80 - tau)\n', (1578, 1588), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib2tikz.save as tikz_save
import math
def derivative(y, h, n: int=1):
if n == 1:
return lambda x: (y(x + h) - y(x - h)) / (2 * h)
else:
return derivative(derivative(y, h, n - 1), h, 1)
def integral(y, h, a, b):
ret = 0
sgn = 1
if a > b:
sgn = -1
a, b = b, a
if abs(b - a) < h:
h *= abs(b - a)
for i in np.arange(a, b, h):
ret += y(i) * h
return ret * sgn
def fourier(y, h, n, a, b):
L = (b - a) / 2
a_0 = integral(y, h, a, b) / (2 * L)
a_n = [0] * n
b_n = [0] * n
for i in range(1, n + 1):
a_n[i - 1] = (1 / L) * integral(lambda x: y(x) * np.cos(i * np.pi * x / L), h, a, b)
b_n[i - 1] = (1 / L) * integral(lambda x: y(x) * np.sin(i * np.pi * x / L), h, a, b)
return lambda x: fouriereval(x, a_0, a_n, b_n, L)
def fouriereval(x, a_0, a_n, b_n, l):
ret = a_0
for i in range(1, len(a_n) + 1):
ret += a_n[i - 1] * np.cos(i * np.pi * x / l)
ret += b_n[i - 1] * np.sin(i * np.pi * x / l)
return ret
# def f(x):
# if x > 2:
# return f(x - 4)
# if x < -2:
# return f(x + 4)
# return ((x**3) - 4 * x) / 4
# def f(x):
# if x < -1:
# return f(x + 2)
# if x > 1:
# return f(x - 2)
# return -1 if x < 0 else 1
def fx(x, n):
if n == 1:
return np.sin(x)
return fx(np.sin(x) * np.pi / 2, n - 1)
# def f(x):
# return np.cos(np.tan(np.sin(x)))
def sirc(x):
return np.sqrt(1 - x**2)
def f(x):
if x < -2:
return f(x + 4)
if x > 2:
return f(x - 4)
if x < 0:
return -sirc(x + 1)
else:
return sirc(x - 1)
h = 0.001
x = np.arange(-4, 4, 0.01)
# kr = lambda x: derivative(f, h, 2)(x) / ((1 + derivative(f, h)(x)**2)**(3 / 2))
# dkr = derivative(kr, h)
# dy = derivative(f, h)
fr = fourier(f, h, 101, -2, 2)
plt.plot(x, np.vectorize(f)(x))
# plt.plot(x, np.vectorize(kr)(x))
# plt.plot(x, np.vectorize(dkr)(x))
# plt.plot(x, np.vectorize(dy)(x))
plt.plot(x, np.vectorize(fr)(x))
plt.axis([-4, 4, -5, 5])
plt.title("$f(x)$")
plt.grid(True)
tikz_save("PyPlotTesting/Figurer/" + "f" + str(1) + ".tikz", figureheight='\\figureheight', figurewidth='\\figurewidth')
|
[
"matplotlib.pyplot.title",
"numpy.vectorize",
"matplotlib.pyplot.axis",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] |
[((1563, 1585), 'numpy.arange', 'np.arange', (['(-4)', '(4)', '(0.01)'], {}), '(-4, 4, 0.01)\n', (1572, 1585), True, 'import numpy as np\n'), ((1926, 1950), 'matplotlib.pyplot.axis', 'plt.axis', (['[-4, 4, -5, 5]'], {}), '([-4, 4, -5, 5])\n', (1934, 1950), True, 'import matplotlib.pyplot as plt\n'), ((1952, 1971), 'matplotlib.pyplot.title', 'plt.title', (['"""$f(x)$"""'], {}), "('$f(x)$')\n", (1961, 1971), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1987), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1981, 1987), True, 'import matplotlib.pyplot as plt\n'), ((394, 412), 'numpy.arange', 'np.arange', (['a', 'b', 'h'], {}), '(a, b, h)\n', (403, 412), True, 'import numpy as np\n'), ((1396, 1415), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (1403, 1415), True, 'import numpy as np\n'), ((1273, 1282), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1279, 1282), True, 'import numpy as np\n'), ((1766, 1781), 'numpy.vectorize', 'np.vectorize', (['f'], {}), '(f)\n', (1778, 1781), True, 'import numpy as np\n'), ((1904, 1920), 'numpy.vectorize', 'np.vectorize', (['fr'], {}), '(fr)\n', (1916, 1920), True, 'import numpy as np\n'), ((927, 952), 'numpy.cos', 'np.cos', (['(i * np.pi * x / l)'], {}), '(i * np.pi * x / l)\n', (933, 952), True, 'import numpy as np\n'), ((975, 1000), 'numpy.sin', 'np.sin', (['(i * np.pi * x / l)'], {}), '(i * np.pi * x / l)\n', (981, 1000), True, 'import numpy as np\n'), ((1294, 1303), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1300, 1303), True, 'import numpy as np\n'), ((644, 669), 'numpy.cos', 'np.cos', (['(i * np.pi * x / L)'], {}), '(i * np.pi * x / L)\n', (650, 669), True, 'import numpy as np\n'), ((731, 756), 'numpy.sin', 'np.sin', (['(i * np.pi * x / L)'], {}), '(i * np.pi * x / L)\n', (737, 756), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
'''
Custom theano class to query the search engine.
'''
import numpy as np
import theano
from theano import gof
from theano import tensor
import parameters as prm
import utils
import average_precision
import random
class Search(theano.Op):
__props__ = ()
def __init__(self,options):
self.options = options
self.options['reformulated_queries'] = {}
def make_node(self, x1, x2, x3, x4):
assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
x1 = tensor.as_tensor_variable(x1)
x2 = tensor.as_tensor_variable(x2)
x3 = tensor.as_tensor_variable(x3)
x4 = tensor.as_tensor_variable(x4)
out = [tensor.fmatrix().type(), tensor.itensor3().type(), tensor.imatrix().type(), tensor.fmatrix().type()]
return theano.Apply(self, [x1, x2, x3, x4], out)
def perform(self, node, inputs, output_storage):
q_m = inputs[0]
D_truth = inputs[1]
n_iter = int(inputs[2])
is_train = int(inputs[3])
#outputs
metrics = np.zeros((len(q_m), len(prm.metrics_map)), np.float32)
if is_train:
max_feedback_docs = prm.max_feedback_docs_train
else:
max_feedback_docs = prm.max_feedback_docs
D_i = -2 * np.ones((len(q_m), max_feedback_docs, prm.max_words_input), np.int32)
D_gt_m = np.zeros((len(q_m), prm.max_candidates), np.float32)
D_id = np.zeros((len(q_m), prm.max_candidates), np.int32)
# no need to retrieve extra terms in the last iteration
if n_iter == prm.n_iterations - 1:
extra_terms = False
else:
extra_terms = True
# allow the search engine to cache queries only in the first iteration.
if n_iter == 0:
save_cache = prm.use_cache
else:
save_cache = False
max_cand = prm.max_candidates
qs = []
for i, q_lst in enumerate(self.options['current_queries']):
q = []
for j, word in enumerate(q_lst):
if q_m[i,j] == 1:
q.append(str(word))
q = ' '.join(q)
if len(q) == 0:
q = 'dummy'
qs.append(q)
# only used to print the reformulated queries.
self.options['reformulated_queries'][n_iter] = qs
# always return one more candidate because one of them might be the input doc.
candss = self.options['engine'].get_candidates(qs, max_cand, prm.max_feedback_docs, save_cache, extra_terms)
for i, cands in enumerate(candss):
D_truth_dic = {}
for d_truth in D_truth[i]:
if d_truth > -1:
D_truth_dic[d_truth] = 0
D_id[i,:len(cands.keys())] = cands.keys()
j = 0
m = 0
cand_ids = []
selected_docs = np.arange(prm.max_feedback_docs)
if is_train:
selected_docs = np.random.choice(selected_docs, size=prm.max_feedback_docs_train, replace=False)
for k, (cand_id, (words_idx, words)) in enumerate(cands.items()):
cand_ids.append(cand_id)
# no need to add candidate words in the last iteration.
if n_iter < prm.n_iterations - 1:
# only add docs selected by sampling (if training).
if k in selected_docs:
words = words[:prm.max_terms_per_doc]
words_idx = words_idx[:prm.max_terms_per_doc]
D_i[i,m,:len(words_idx)] = words_idx
# append empty strings, so the list size becomes <dim>.
words = words + max(0, prm.max_words_input - len(words)) * ['']
# append new words to the list of current queries.
self.options['current_queries'][i] += words
m += 1
if cand_id in D_truth_dic:
D_gt_m[i,j] = 1.
j += 1
cands_set = set(cands.keys())
if qs[i].lower() in self.options['engine'].title_id_map:
input_doc_id = self.options['engine'].title_id_map[qs[i].lower()]
# Remove input doc from returned docs.
# This operation does not raise an error if the element is not there.
cands_set.discard(input_doc_id)
intersec = len(set(D_truth_dic.keys()) & cands_set)
recall = intersec / max(1., float(len(D_truth_dic)))
precision = intersec / max(1., float(prm.max_candidates))
metrics[i,prm.metrics_map['RECALL']] = recall
metrics[i,prm.metrics_map['PRECISION']] = precision
metrics[i,prm.metrics_map['F1']] = 2 * recall * precision / max(0.01, recall + precision)
avg_precision = average_precision.compute(D_truth_dic.keys(), cand_ids)
metrics[i,prm.metrics_map['MAP']] = avg_precision
metrics[i,prm.metrics_map['LOG-GMAP']] = np.log(avg_precision + 1e-5)
output_storage[0][0] = metrics
output_storage[1][0] = D_i
output_storage[2][0] = D_id
output_storage[3][0] = D_gt_m
def grad(self, inputs, output_grads):
return [tensor.zeros_like(ii, dtype=theano.config.floatX) for ii in inputs]
|
[
"theano.tensor.as_tensor_variable",
"numpy.log",
"theano.tensor.itensor3",
"theano.Apply",
"theano.tensor.imatrix",
"theano.tensor.zeros_like",
"numpy.arange",
"theano.tensor.fmatrix",
"numpy.random.choice"
] |
[((554, 583), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x1'], {}), '(x1)\n', (579, 583), False, 'from theano import tensor\n'), ((597, 626), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x2'], {}), '(x2)\n', (622, 626), False, 'from theano import tensor\n'), ((640, 669), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x3'], {}), '(x3)\n', (665, 669), False, 'from theano import tensor\n'), ((683, 712), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['x4'], {}), '(x4)\n', (708, 712), False, 'from theano import tensor\n'), ((845, 886), 'theano.Apply', 'theano.Apply', (['self', '[x1, x2, x3, x4]', 'out'], {}), '(self, [x1, x2, x3, x4], out)\n', (857, 886), False, 'import theano\n'), ((2934, 2966), 'numpy.arange', 'np.arange', (['prm.max_feedback_docs'], {}), '(prm.max_feedback_docs)\n', (2943, 2966), True, 'import numpy as np\n'), ((5171, 5200), 'numpy.log', 'np.log', (['(avg_precision + 1e-05)'], {}), '(avg_precision + 1e-05)\n', (5177, 5200), True, 'import numpy as np\n'), ((5408, 5457), 'theano.tensor.zeros_like', 'tensor.zeros_like', (['ii'], {'dtype': 'theano.config.floatX'}), '(ii, dtype=theano.config.floatX)\n', (5425, 5457), False, 'from theano import tensor\n'), ((3025, 3110), 'numpy.random.choice', 'np.random.choice', (['selected_docs'], {'size': 'prm.max_feedback_docs_train', 'replace': '(False)'}), '(selected_docs, size=prm.max_feedback_docs_train, replace=False\n )\n', (3041, 3110), True, 'import numpy as np\n'), ((728, 744), 'theano.tensor.fmatrix', 'tensor.fmatrix', ([], {}), '()\n', (742, 744), False, 'from theano import tensor\n'), ((753, 770), 'theano.tensor.itensor3', 'tensor.itensor3', ([], {}), '()\n', (768, 770), False, 'from theano import tensor\n'), ((779, 795), 'theano.tensor.imatrix', 'tensor.imatrix', ([], {}), '()\n', (793, 795), False, 'from theano import tensor\n'), ((804, 820), 'theano.tensor.fmatrix', 'tensor.fmatrix', ([], {}), '()\n', (818, 820), False, 'from theano import tensor\n')]
|
"""Helper methods for Module 2."""
import errno
import glob
import os.path
import pickle
import time
import calendar
import numpy
import pandas
import matplotlib.colors
import matplotlib.pyplot as pyplot
import sklearn.metrics
import sklearn.linear_model
import sklearn.tree
import sklearn.ensemble
from module_4 import roc_curves
from module_4 import performance_diagrams as perf_diagrams
from module_4 import attributes_diagrams as attr_diagrams
# Directories.
MODULE2_DIR_NAME = '.'
SHORT_COURSE_DIR_NAME = '..'
DEFAULT_FEATURE_DIR_NAME = (
'{0:s}/data/track_data_ncar_ams_3km_csv_small'
).format(SHORT_COURSE_DIR_NAME)
# Variable names.
METADATA_COLUMNS = [
'Step_ID', 'Track_ID', 'Ensemble_Name', 'Ensemble_Member', 'Run_Date',
'Valid_Date', 'Forecast_Hour', 'Valid_Hour_UTC'
]
EXTRANEOUS_COLUMNS = [
'Duration', 'Centroid_Lon', 'Centroid_Lat', 'Centroid_X', 'Centroid_Y',
'Storm_Motion_U', 'Storm_Motion_V', 'Matched', 'Max_Hail_Size',
'Num_Matches', 'Shape', 'Location', 'Scale'
]
TARGET_NAME = 'RVORT1_MAX-future_max'
BINARIZED_TARGET_NAME = 'strong_future_rotation_flag'
NUM_VALUES_KEY = 'num_values'
MEAN_VALUE_KEY = 'mean_value'
MEAN_OF_SQUARES_KEY = 'mean_of_squares'
MAE_KEY = 'mean_absolute_error'
MSE_KEY = 'mean_squared_error'
MEAN_BIAS_KEY = 'mean_bias'
MAE_SKILL_SCORE_KEY = 'mae_skill_score'
MSE_SKILL_SCORE_KEY = 'mse_skill_score'
MAX_PEIRCE_SCORE_KEY = 'max_peirce_score'
AUC_KEY = 'area_under_roc_curve'
MAX_CSI_KEY = 'max_csi'
BRIER_SCORE_KEY = 'brier_score'
BRIER_SKILL_SCORE_KEY = 'brier_skill_score'
# Plotting constants.
DEFAULT_FIG_WIDTH_INCHES = 10
DEFAULT_FIG_HEIGHT_INCHES = 10
SMALL_FIG_WIDTH_INCHES = 10
SMALL_FIG_HEIGHT_INCHES = 10
FIGURE_RESOLUTION_DPI = 300
BAR_GRAPH_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
BAR_GRAPH_EDGE_WIDTH = 2
BAR_GRAPH_FONT_SIZE = 14
BAR_GRAPH_FONT_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
FONT_SIZE = 20
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
# Misc constants.
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
RANDOM_SEED = 6695
LAMBDA_TOLERANCE = 1e-10
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def _remove_future_data(predictor_table):
"""Removes future data from predictors.
:param predictor_table: pandas DataFrame with predictor values. Each row is
one storm object.
:return: predictor_table: Same but with fewer columns.
"""
predictor_names = list(predictor_table)
columns_to_remove = [p for p in predictor_names if 'future' in p]
return predictor_table.drop(columns_to_remove, axis=1, inplace=False)
def _feature_file_name_to_date(csv_file_name):
"""Parses date from name of feature (CSV) file.
:param csv_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(csv_file_name)[-1]
date_string = pathless_file_name.replace(
'track_step_NCARSTORM_d01_', ''
).replace('-0000.csv', '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def find_many_feature_files(first_date_string, last_date_string,
feature_dir_name=DEFAULT_FEATURE_DIR_NAME):
"""Finds feature files in the given date range.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:param feature_dir_name: Name of directory with feature (CSV) files.
:return: csv_file_names: 1-D list of paths to feature files.
"""
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT)
csv_file_pattern = '{0:s}/track_step_NCARSTORM_d01_{1:s}-0000.csv'.format(
feature_dir_name, DATE_FORMAT_REGEX)
csv_file_names = glob.glob(csv_file_pattern)
csv_file_names.sort()
file_date_strings = [_feature_file_name_to_date(f) for f in csv_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
return [csv_file_names[k] for k in good_indices]
def read_feature_file(csv_file_name):
"""Reads features from CSV file.
:param csv_file_name: Path to input file.
:return: metadata_table: pandas DataFrame with metadata. Each row is one
storm object.
:return: predictor_table: pandas DataFrame with predictor values. Each row
is one storm object.
:return: target_table: pandas DataFrame with target values. Each row is one
storm object.
"""
predictor_table = pandas.read_csv(csv_file_name, header=0, sep=',')
predictor_table.drop(EXTRANEOUS_COLUMNS, axis=1, inplace=True)
metadata_table = predictor_table[METADATA_COLUMNS]
predictor_table.drop(METADATA_COLUMNS, axis=1, inplace=True)
target_table = predictor_table[[TARGET_NAME]]
predictor_table.drop([TARGET_NAME], axis=1, inplace=True)
predictor_table = _remove_future_data(predictor_table)
return metadata_table, predictor_table, target_table
def read_many_feature_files(csv_file_names):
"""Reads features from many CSV files.
:param csv_file_names: 1-D list of paths to input files.
:return: metadata_table: See doc for `read_feature_file`.
:return: predictor_table: Same.
:return: target_table: Same.
"""
num_files = len(csv_file_names)
list_of_metadata_tables = [pandas.DataFrame()] * num_files
list_of_predictor_tables = [pandas.DataFrame()] * num_files
list_of_target_tables = [pandas.DataFrame()] * num_files
for i in range(num_files):
print('Reading data from: "{0:s}"...'.format(csv_file_names[i]))
(list_of_metadata_tables[i], list_of_predictor_tables[i],
list_of_target_tables[i]
) = read_feature_file(csv_file_names[i])
if i == 0:
continue
list_of_metadata_tables[i] = list_of_metadata_tables[i].align(
list_of_metadata_tables[0], axis=1
)[0]
list_of_predictor_tables[i] = list_of_predictor_tables[i].align(
list_of_predictor_tables[0], axis=1
)[0]
list_of_target_tables[i] = list_of_target_tables[i].align(
list_of_target_tables[0], axis=1
)[0]
metadata_table = pandas.concat(
list_of_metadata_tables, axis=0, ignore_index=True)
predictor_table = pandas.concat(
list_of_predictor_tables, axis=0, ignore_index=True)
target_table = pandas.concat(
list_of_target_tables, axis=0, ignore_index=True)
return metadata_table, predictor_table, target_table
def _update_normalization_params(intermediate_normalization_dict, new_values):
"""Updates normalization params for one predictor.
:param intermediate_normalization_dict: Dictionary with the following keys.
intermediate_normalization_dict['num_values']: Number of values on which
current estimates are based.
intermediate_normalization_dict['mean_value']: Current estimate for mean.
intermediate_normalization_dict['mean_of_squares']: Current mean of squared
values.
:param new_values: numpy array of new values (will be used to update
`intermediate_normalization_dict`).
:return: intermediate_normalization_dict: Same as input but with updated
values.
"""
if MEAN_VALUE_KEY not in intermediate_normalization_dict:
intermediate_normalization_dict = {
NUM_VALUES_KEY: 0,
MEAN_VALUE_KEY: 0.,
MEAN_OF_SQUARES_KEY: 0.
}
these_means = numpy.array([
intermediate_normalization_dict[MEAN_VALUE_KEY], numpy.mean(new_values)
])
these_weights = numpy.array([
intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size
])
intermediate_normalization_dict[MEAN_VALUE_KEY] = numpy.average(
these_means, weights=these_weights)
these_means = numpy.array([
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY],
numpy.mean(new_values ** 2)
])
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] = numpy.average(
these_means, weights=these_weights)
intermediate_normalization_dict[NUM_VALUES_KEY] += new_values.size
return intermediate_normalization_dict
def _get_standard_deviation(intermediate_normalization_dict):
"""Computes stdev from intermediate normalization params.
:param intermediate_normalization_dict: See doc for
`_update_normalization_params`.
:return: standard_deviation: Standard deviation.
"""
num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])
multiplier = num_values / (num_values - 1)
return numpy.sqrt(multiplier * (
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -
intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2
))
def get_normalization_params(csv_file_names):
"""Computes normalization params (mean and stdev) for each predictor.
:param csv_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
for this_file_name in csv_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_predictor_table = read_feature_file(this_file_name)[1]
if predictor_names is None:
predictor_names = list(this_predictor_table)
norm_dict_by_predictor = [{}] * len(predictor_names)
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor[m],
new_values=this_predictor_table[predictor_names[m]].values
)
print('\n')
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
print(message_string)
return normalization_dict
def normalize_predictors(predictor_table, normalization_dict=None):
"""Normalizes predictors to z-scores.
:param predictor_table: See doc for `read_feature_file`.
:param normalization_dict: Dictionary. Each key is the name of a predictor
value, and the corresponding value is a length-2 numpy array with
[mean, standard deviation]. If `normalization_dict is None`, mean and
standard deviation will be computed for each predictor.
:return: predictor_table: Normalized version of input.
:return: normalization_dict: See doc for input variable. If input was None,
this will be a newly created dictionary. Otherwise, this will be the
same dictionary passed as input.
"""
predictor_names = list(predictor_table)
num_predictors = len(predictor_names)
if normalization_dict is None:
normalization_dict = {}
for m in range(num_predictors):
this_mean = numpy.mean(predictor_table[predictor_names[m]].values)
this_stdev = numpy.std(
predictor_table[predictor_names[m]].values, ddof=1
)
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
these_norm_values = (
predictor_table[predictor_names[m]].values - this_mean
) / this_stdev
predictor_table = predictor_table.assign(**{
predictor_names[m]: these_norm_values
})
return predictor_table, normalization_dict
def denormalize_predictors(predictor_table, normalization_dict):
"""Denormalizes predictors from z-scores back to original scales.
:param predictor_table: See doc for `normalize_predictors`.
:param normalization_dict: Same.
:return: predictor_table: Denormalized version of input.
"""
predictor_names = list(predictor_table)
num_predictors = len(predictor_names)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
these_denorm_values = (
this_mean + this_stdev * predictor_table[predictor_names[m]].values
)
predictor_table = predictor_table.assign(**{
predictor_names[m]: these_denorm_values
})
return predictor_table
def get_binarization_threshold(csv_file_names, percentile_level):
"""Computes binarization threshold for target variable.
Binarization threshold will be [q]th percentile of all target values, where
q = `percentile_level`.
:param csv_file_names: 1-D list of paths to input files.
:param percentile_level: q in the above discussion.
:return: binarization_threshold: Binarization threshold (used to turn each
target value into a yes-or-no label).
"""
max_target_values = numpy.array([])
for this_file_name in csv_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_target_table = read_feature_file(this_file_name)[-1]
max_target_values = numpy.concatenate((
max_target_values, this_target_table[TARGET_NAME].values
))
binarization_threshold = numpy.percentile(
max_target_values, percentile_level)
print('\nBinarization threshold for "{0:s}" = {1:.4e}'.format(
TARGET_NAME, binarization_threshold
))
return binarization_threshold
def binarize_target_values(target_values, binarization_threshold):
"""Binarizes target values.
E = number of examples (storm objects)
:param target_values: length-E numpy array of real-number target values.
:param binarization_threshold: Binarization threshold.
:return: target_values: length-E numpy array of binarized target values
(integers in 0...1).
"""
return (target_values >= binarization_threshold).astype(int)
def _lambdas_to_sklearn_inputs(lambda1, lambda2):
"""Converts lambdas to input arguments for scikit-learn.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: alpha: Input arg for scikit-learn model.
:return: l1_ratio: Input arg for scikit-learn model.
"""
return lambda1 + lambda2, lambda1 / (lambda1 + lambda2)
def setup_linear_regression(lambda1=0., lambda2=0.):
"""Sets up (but does not train) linear-regression model.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: model_object: Instance of `sklearn.linear_model`.
"""
assert lambda1 >= 0
assert lambda2 >= 0
if lambda1 < LAMBDA_TOLERANCE and lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.LinearRegression(
fit_intercept=True, normalize=False)
if lambda1 < LAMBDA_TOLERANCE:
return sklearn.linear_model.Ridge(
alpha=lambda2, fit_intercept=True, normalize=False,
random_state=RANDOM_SEED)
if lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.Lasso(
alpha=lambda1, fit_intercept=True, normalize=False,
random_state=RANDOM_SEED)
alpha, l1_ratio = _lambdas_to_sklearn_inputs(
lambda1=lambda1, lambda2=lambda2)
return sklearn.linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=True, normalize=False,
random_state=RANDOM_SEED)
def train_linear_regression(model_object, training_predictor_table,
training_target_table):
"""Trains linear-regression model.
:param model_object: Untrained model created by `setup_linear_regression`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[TARGET_NAME].values
)
return model_object
def _create_directory(directory_name=None, file_name=None):
"""Creates directory (along with parents if necessary).
This method creates directories only when necessary, so you don't have to
worry about it overwriting anything.
:param directory_name: Name of desired directory.
:param file_name: [used only if `directory_name is None`]
Path to desired file. All directories in path will be created.
"""
if directory_name is None:
directory_name = os.path.split(file_name)[0]
try:
os.makedirs(directory_name)
except OSError as this_error:
if this_error.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
def write_model(model_object, pickle_file_name):
"""Writes model to Pickle file.
:param model_object: Trained model (instance of `sklearn.linear_model`, for
example).
:param pickle_file_name: Path to output file.
"""
print('Writing model to: "{0:s}"...'.format(pickle_file_name))
_create_directory(file_name=pickle_file_name)
file_handle = open(pickle_file_name, 'wb')
pickle.dump(model_object, file_handle)
file_handle.close()
def evaluate_regression(
target_values, predicted_target_values, mean_training_target_value,
verbose=True, create_plots=True, dataset_name=None):
"""Evaluates regression model.
E = number of examples
:param target_values: length-E numpy array of actual target values.
:param predicted_target_values: length-E numpy array of predictions.
:param mean_training_target_value: Mean target value in training data.
:param verbose: Boolean flag. If True, will print results to command
window.
:param create_plots: Boolean flag. If True, will create plots.
:param dataset_name: Dataset name (e.g., "validation"). Used only if
`create_plots == True or verbose == True`.
:return: evaluation_dict: Dictionary with the following keys.
evaluation_dict['mean_absolute_error']: Mean absolute error (MAE).
evaluation_dict['mean_squared_error']: Mean squared error (MSE).
evaluation_dict['mean_bias']: Mean bias (signed error).
evaluation_dict['mae_skill_score']: MAE skill score (fractional improvement
over climatology, in range -1...1).
evaluation_dict['mse_skill_score']: MSE skill score (fractional improvement
over climatology, in range -1...1).
"""
signed_errors = predicted_target_values - target_values
mean_bias = numpy.mean(signed_errors)
mean_absolute_error = numpy.mean(numpy.absolute(signed_errors))
mean_squared_error = numpy.mean(signed_errors ** 2)
climo_signed_errors = mean_training_target_value - target_values
climo_mae = numpy.mean(numpy.absolute(climo_signed_errors))
climo_mse = numpy.mean(climo_signed_errors ** 2)
mae_skill_score = (climo_mae - mean_absolute_error) / climo_mae
mse_skill_score = (climo_mse - mean_squared_error) / climo_mse
evaluation_dict = {
MAE_KEY: mean_absolute_error,
MSE_KEY: mean_squared_error,
MEAN_BIAS_KEY: mean_bias,
MAE_SKILL_SCORE_KEY: mae_skill_score,
MSE_SKILL_SCORE_KEY: mse_skill_score
}
if verbose or create_plots:
dataset_name = dataset_name[0].upper() + dataset_name[1:]
if verbose:
print('{0:s} MAE (mean absolute error) = {1:.3e} s^-1'.format(
dataset_name, evaluation_dict[MAE_KEY]
))
print('{0:s} MSE (mean squared error) = {1:.3e} s^-2'.format(
dataset_name, evaluation_dict[MSE_KEY]
))
print('{0:s} bias (mean signed error) = {1:.3e} s^-1'.format(
dataset_name, evaluation_dict[MEAN_BIAS_KEY]
))
message_string = (
'{0:s} MAE skill score (improvement over climatology) = {1:.3f}'
).format(dataset_name, evaluation_dict[MAE_SKILL_SCORE_KEY])
print(message_string)
message_string = (
'{0:s} MSE skill score (improvement over climatology) = {1:.3f}'
).format(dataset_name, evaluation_dict[MSE_SKILL_SCORE_KEY])
print(message_string)
if not create_plots:
return evaluation_dict
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
attr_diagrams.plot_regression_relia_curve(
observed_values=target_values, forecast_values=predicted_target_values,
num_bins=20, figure_object=figure_object, axes_object=axes_object)
axes_object.set_xlabel(r'Forecast value (s$^{-1}$)')
axes_object.set_ylabel(r'Conditional mean observation (s$^{-1}$)')
title_string = '{0:s} reliability curve for max future vorticity'.format(
dataset_name)
axes_object.set_title(title_string)
pyplot.show()
return evaluation_dict
def plot_model_coefficients(model_object, predictor_names):
"""Plots coefficients for linear- or logistic-regression model.
:param model_object: Trained instance of `sklearn.linear_model`.
:param predictor_names: 1-D list of predictor names, in the same order used
to train the model.
"""
coefficients = model_object.coef_
num_dimensions = len(coefficients.shape)
if num_dimensions > 1:
coefficients = coefficients[0, ...]
num_predictors = len(predictor_names)
y_coords = numpy.linspace(
0, num_predictors - 1, num=num_predictors, dtype=float)
_, axes_object = pyplot.subplots(
1, 1, figsize=(DEFAULT_FIG_WIDTH_INCHES, DEFAULT_FIG_HEIGHT_INCHES)
)
axes_object.barh(
y_coords, coefficients, color=BAR_GRAPH_COLOUR,
edgecolor=BAR_GRAPH_COLOUR, linewidth=BAR_GRAPH_EDGE_WIDTH)
pyplot.xlabel('Coefficient')
pyplot.ylabel('Predictor variable')
pyplot.yticks([], [])
x_tick_values, _ = pyplot.xticks()
pyplot.xticks(x_tick_values, rotation=90)
x_min = numpy.percentile(coefficients, 1.)
x_max = numpy.percentile(coefficients, 99.)
pyplot.xlim([x_min, x_max])
for j in range(num_predictors):
axes_object.text(
0, y_coords[j], predictor_names[j], color=BAR_GRAPH_FONT_COLOUR,
horizontalalignment='center', verticalalignment='center',
fontsize=BAR_GRAPH_FONT_SIZE)
def _add_colour_bar(
axes_object, colour_map_object, values_to_colour, min_colour_value,
max_colour_value, colour_norm_object=None,
orientation_string='vertical', extend_min=True, extend_max=True):
"""Adds colour bar to existing axes.
:param axes_object: Existing axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param values_to_colour: numpy array of values to colour.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map. If `colour_norm_object is None`,
will assume that scale is linear.
:param orientation_string: Orientation of colour bar ("vertical" or
"horizontal").
:param extend_min: Boolean flag. If True, the bottom of the colour bar will
have an arrow. If False, it will be a flat line, suggesting that lower
values are not possible.
:param extend_max: Same but for top of colour bar.
:return: colour_bar_object: Colour bar (instance of
`matplotlib.pyplot.colorbar`) created by this method.
"""
if colour_norm_object is None:
colour_norm_object = matplotlib.colors.Normalize(
vmin=min_colour_value, vmax=max_colour_value, clip=False)
scalar_mappable_object = pyplot.cm.ScalarMappable(
cmap=colour_map_object, norm=colour_norm_object)
scalar_mappable_object.set_array(values_to_colour)
if extend_min and extend_max:
extend_string = 'both'
elif extend_min:
extend_string = 'min'
elif extend_max:
extend_string = 'max'
else:
extend_string = 'neither'
if orientation_string == 'horizontal':
padding = 0.075
else:
padding = 0.05
colour_bar_object = pyplot.colorbar(
ax=axes_object, mappable=scalar_mappable_object,
orientation=orientation_string, pad=padding, extend=extend_string,
shrink=0.8)
colour_bar_object.ax.tick_params(labelsize=FONT_SIZE)
return colour_bar_object
def plot_scores_2d(
score_matrix, min_colour_value, max_colour_value, x_tick_labels,
y_tick_labels, colour_map_object=pyplot.cm.plasma):
"""Plots scores on 2-D grid.
M = number of rows in grid
N = number of columns in grid
:param score_matrix: M-by-N numpy array of scores.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param x_tick_labels: length-N numpy array of tick values.
:param y_tick_labels: length-M numpy array of tick values.
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
"""
_, axes_object = pyplot.subplots(
1, 1, figsize=(DEFAULT_FIG_WIDTH_INCHES, DEFAULT_FIG_HEIGHT_INCHES)
)
pyplot.imshow(
score_matrix, cmap=colour_map_object, origin='lower',
vmin=min_colour_value, vmax=max_colour_value)
x_tick_values = numpy.linspace(
0, score_matrix.shape[1] - 1, num=score_matrix.shape[1], dtype=float
)
y_tick_values = numpy.linspace(
0, score_matrix.shape[0] - 1, num=score_matrix.shape[0], dtype=float
)
pyplot.xticks(x_tick_values, x_tick_labels)
pyplot.yticks(y_tick_values, y_tick_labels)
_add_colour_bar(
axes_object=axes_object, colour_map_object=colour_map_object,
values_to_colour=score_matrix, min_colour_value=min_colour_value,
max_colour_value=max_colour_value)
def setup_logistic_regression(lambda1=0., lambda2=0.):
"""Sets up (but does not train) logistic-regression model.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: model_object: Instance of `sklearn.linear_model.SGDClassifier`.
"""
assert lambda1 >= 0
assert lambda2 >= 0
if lambda1 < LAMBDA_TOLERANCE and lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='none', fit_intercept=True, verbose=0,
random_state=RANDOM_SEED)
if lambda1 < LAMBDA_TOLERANCE:
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='l2', alpha=lambda2, fit_intercept=True,
verbose=0, random_state=RANDOM_SEED)
if lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='l1', alpha=lambda1, fit_intercept=True,
verbose=0, random_state=RANDOM_SEED)
alpha, l1_ratio = _lambdas_to_sklearn_inputs(
lambda1=lambda1, lambda2=lambda2)
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='elasticnet', alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=True, verbose=0, random_state=RANDOM_SEED)
def train_logistic_regression(model_object, training_predictor_table,
training_target_table):
"""Trains logistic-regression model.
:param model_object: Untrained model created by `setup_logistic_regression`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
def eval_binary_classifn(
observed_labels, forecast_probabilities, training_event_frequency,
verbose=True, create_plots=True, dataset_name=None):
"""Evaluates binary-classification model.
E = number of examples
:param observed_labels: length-E numpy array of observed labels (integers in
0...1, where 1 means that event occurred).
:param forecast_probabilities: length-E numpy array with forecast
probabilities of event (positive class).
:param training_event_frequency: Frequency of event in training data.
:param verbose: Boolean flag. If True, will print results to command
window.
:param create_plots: Boolean flag. If True, will create plots.
:param dataset_name: Dataset name (e.g., "validation"). Used only if
`create_plots == True or verbose == True`.
"""
pofd_by_threshold, pod_by_threshold = roc_curves.get_points_in_roc_curve(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities)
max_peirce_score = numpy.nanmax(pod_by_threshold - pofd_by_threshold)
area_under_roc_curve = sklearn.metrics.auc(
x=pofd_by_threshold, y=pod_by_threshold)
pod_by_threshold, success_ratio_by_threshold = (
perf_diagrams.get_points_in_perf_diagram(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities)
)
csi_by_threshold = (
(pod_by_threshold ** -1 + success_ratio_by_threshold ** -1 - 1) ** -1
)
max_csi = numpy.nanmax(csi_by_threshold)
mean_forecast_by_bin, event_freq_by_bin, num_examples_by_bin = (
attr_diagrams.get_points_in_relia_curve(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities, num_bins=20)
)
uncertainty = training_event_frequency * (1. - training_event_frequency)
this_numerator = numpy.nansum(
num_examples_by_bin *
(mean_forecast_by_bin - event_freq_by_bin) ** 2
)
reliability = this_numerator / numpy.sum(num_examples_by_bin)
this_numerator = numpy.nansum(
num_examples_by_bin *
(event_freq_by_bin - training_event_frequency) ** 2
)
resolution = this_numerator / numpy.sum(num_examples_by_bin)
brier_score = uncertainty + reliability - resolution
brier_skill_score = (resolution - reliability) / uncertainty
evaluation_dict = {
MAX_PEIRCE_SCORE_KEY: max_peirce_score,
AUC_KEY: area_under_roc_curve,
MAX_CSI_KEY: max_csi,
BRIER_SCORE_KEY: brier_score,
BRIER_SKILL_SCORE_KEY: brier_skill_score
}
if verbose or create_plots:
dataset_name = dataset_name[0].upper() + dataset_name[1:]
if verbose:
print('{0:s} Max Peirce score (POD - POFD) = {1:.3f}'.format(
dataset_name, evaluation_dict[MAX_PEIRCE_SCORE_KEY]
))
print('{0:s} AUC (area under ROC curve) = {1:.3f}'.format(
dataset_name, evaluation_dict[AUC_KEY]
))
print('{0:s} Max CSI (critical success index) = {1:.3f}'.format(
dataset_name, evaluation_dict[MAX_CSI_KEY]
))
print('{0:s} Brier score = {1:.3f}'.format(
dataset_name, evaluation_dict[BRIER_SCORE_KEY]
))
message_string = (
'{0:s} Brier skill score (improvement over climatology) = {1:.3f}'
).format(dataset_name, evaluation_dict[BRIER_SKILL_SCORE_KEY])
print(message_string)
if not create_plots:
return evaluation_dict
_, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
roc_curves.plot_roc_curve(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities,
axes_object=axes_object)
title_string = '{0:s} ROC curve (AUC = {1:.3f})'.format(
dataset_name, evaluation_dict[AUC_KEY]
)
pyplot.title(title_string)
pyplot.show()
_, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
perf_diagrams.plot_performance_diagram(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities,
axes_object=axes_object)
title_string = '{0:s} performance diagram (max CSI = {1:.3f})'.format(
dataset_name, evaluation_dict[MAX_CSI_KEY]
)
pyplot.title(title_string)
pyplot.show()
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
attr_diagrams.plot_attributes_diagram(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities, num_bins=20,
figure_object=figure_object, axes_object=axes_object)
title_string = (
'{0:s} attributes diagram (Brier skill score = {1:.3f})'
).format(dataset_name, evaluation_dict[BRIER_SKILL_SCORE_KEY])
axes_object.set_title(title_string)
pyplot.show()
return evaluation_dict
def setup_classification_tree(min_examples_at_split=30,
min_examples_at_leaf=30):
"""Sets up (but does not train) decision tree for classification.
:param min_examples_at_split: Minimum number of examples at split node.
:param min_examples_at_leaf: Minimum number of examples at leaf node.
:return: model_object: Instance of `sklearn.tree.DecisionTreeClassifier`.
"""
return sklearn.tree.DecisionTreeClassifier(
criterion='entropy', min_samples_split=min_examples_at_split,
min_samples_leaf=min_examples_at_leaf, random_state=RANDOM_SEED)
def train_classification_tree(model_object, training_predictor_table,
training_target_table):
"""Trains decision tree for classification.
:param model_object: Untrained model created by `setup_classification_tree`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
def setup_classification_forest(
max_predictors_per_split, num_trees=100, min_examples_at_split=30,
min_examples_at_leaf=30):
"""Sets up (but does not train) random forest for classification.
:param max_predictors_per_split: Max number of predictors to try at each
split.
:param num_trees: Number of trees.
:param min_examples_at_split: Minimum number of examples at split node.
:param min_examples_at_leaf: Minimum number of examples at leaf node.
:return: model_object: Instance of
`sklearn.ensemble.RandomForestClassifier`.
"""
return sklearn.ensemble.RandomForestClassifier(
n_estimators=num_trees, min_samples_split=min_examples_at_split,
min_samples_leaf=min_examples_at_leaf,
max_features=max_predictors_per_split, bootstrap=True,
random_state=RANDOM_SEED, verbose=2)
def train_classification_forest(model_object, training_predictor_table,
training_target_table):
"""Trains random forest for classification.
:param model_object: Untrained model created by
`setup_classification_forest`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
def setup_classification_gbt(
max_predictors_per_split, num_trees=100, learning_rate=0.1,
min_examples_at_split=30, min_examples_at_leaf=30):
"""Sets up (but does not train) gradient-boosted trees for classification.
:param max_predictors_per_split: Max number of predictors to try at each
split.
:param num_trees: Number of trees.
:param learning_rate: Learning rate.
:param min_examples_at_split: Minimum number of examples at split node.
:param min_examples_at_leaf: Minimum number of examples at leaf node.
:return: model_object: Instance of
`sklearn.ensemble.GradientBoostingClassifier`.
"""
return sklearn.ensemble.GradientBoostingClassifier(
loss='exponential', learning_rate=learning_rate, n_estimators=num_trees,
min_samples_split=min_examples_at_split,
min_samples_leaf=min_examples_at_leaf,
max_features=max_predictors_per_split, random_state=RANDOM_SEED,
verbose=2)
def train_classification_gbt(model_object, training_predictor_table,
training_target_table):
"""Trains gradient-boosted trees for classification.
:param model_object: Untrained model created by
`setup_classification_gbt`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
|
[
"matplotlib.pyplot.title",
"numpy.absolute",
"pickle.dump",
"numpy.sum",
"pandas.read_csv",
"module_4.attributes_diagrams.plot_attributes_diagram",
"numpy.mean",
"module_4.roc_curves.plot_roc_curve",
"glob.glob",
"pandas.DataFrame",
"numpy.std",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"pandas.concat",
"numpy.nansum",
"numpy.average",
"matplotlib.pyplot.show",
"module_4.attributes_diagrams.get_points_in_relia_curve",
"module_4.roc_curves.get_points_in_roc_curve",
"numpy.percentile",
"matplotlib.pyplot.ylabel",
"numpy.nanmax",
"numpy.concatenate",
"matplotlib.pyplot.xlim",
"numpy.logical_and",
"time.gmtime",
"numpy.array",
"module_4.performance_diagrams.plot_performance_diagram",
"module_4.attributes_diagrams.plot_regression_relia_curve",
"module_4.performance_diagrams.get_points_in_perf_diagram",
"matplotlib.pyplot.xlabel",
"time.strptime",
"numpy.sqrt"
] |
[((1929, 1962), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""font"""'], {'size': 'FONT_SIZE'}), "('font', size=FONT_SIZE)\n", (1938, 1962), True, 'import matplotlib.pyplot as pyplot\n'), ((1963, 2001), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'titlesize': 'FONT_SIZE'}), "('axes', titlesize=FONT_SIZE)\n", (1972, 2001), True, 'import matplotlib.pyplot as pyplot\n'), ((2002, 2040), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'labelsize': 'FONT_SIZE'}), "('axes', labelsize=FONT_SIZE)\n", (2011, 2040), True, 'import matplotlib.pyplot as pyplot\n'), ((2041, 2080), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""xtick"""'], {'labelsize': 'FONT_SIZE'}), "('xtick', labelsize=FONT_SIZE)\n", (2050, 2080), True, 'import matplotlib.pyplot as pyplot\n'), ((2081, 2120), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""ytick"""'], {'labelsize': 'FONT_SIZE'}), "('ytick', labelsize=FONT_SIZE)\n", (2090, 2120), True, 'import matplotlib.pyplot as pyplot\n'), ((2121, 2160), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""legend"""'], {'fontsize': 'FONT_SIZE'}), "('legend', fontsize=FONT_SIZE)\n", (2130, 2160), True, 'import matplotlib.pyplot as pyplot\n'), ((2161, 2201), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'titlesize': 'FONT_SIZE'}), "('figure', titlesize=FONT_SIZE)\n", (2170, 2201), True, 'import matplotlib.pyplot as pyplot\n'), ((1747, 1787), 'numpy.array', 'numpy.array', (['[27, 158, 119]'], {'dtype': 'float'}), '([27, 158, 119], dtype=float)\n', (1758, 1787), False, 'import numpy\n'), ((1868, 1906), 'numpy.array', 'numpy.array', (['[217, 95, 2]'], {'dtype': 'float'}), '([217, 95, 2], dtype=float)\n', (1879, 1906), False, 'import numpy\n'), ((4972, 4999), 'glob.glob', 'glob.glob', (['csv_file_pattern'], {}), '(csv_file_pattern)\n', (4981, 4999), False, 'import glob\n'), ((5953, 6002), 'pandas.read_csv', 'pandas.read_csv', (['csv_file_name'], {'header': '(0)', 'sep': '""","""'}), "(csv_file_name, header=0, sep=',')\n", (5968, 6002), False, 'import pandas\n'), ((7648, 7713), 'pandas.concat', 'pandas.concat', (['list_of_metadata_tables'], {'axis': '(0)', 'ignore_index': '(True)'}), '(list_of_metadata_tables, axis=0, ignore_index=True)\n', (7661, 7713), False, 'import pandas\n'), ((7745, 7811), 'pandas.concat', 'pandas.concat', (['list_of_predictor_tables'], {'axis': '(0)', 'ignore_index': '(True)'}), '(list_of_predictor_tables, axis=0, ignore_index=True)\n', (7758, 7811), False, 'import pandas\n'), ((7840, 7903), 'pandas.concat', 'pandas.concat', (['list_of_target_tables'], {'axis': '(0)', 'ignore_index': '(True)'}), '(list_of_target_tables, axis=0, ignore_index=True)\n', (7853, 7903), False, 'import pandas\n'), ((9051, 9130), 'numpy.array', 'numpy.array', (['[intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size]'], {}), '([intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size])\n', (9062, 9130), False, 'import numpy\n'), ((9200, 9249), 'numpy.average', 'numpy.average', (['these_means'], {'weights': 'these_weights'}), '(these_means, weights=these_weights)\n', (9213, 9249), False, 'import numpy\n'), ((9457, 9506), 'numpy.average', 'numpy.average', (['these_means'], {'weights': 'these_weights'}), '(these_means, weights=these_weights)\n', (9470, 9506), False, 'import numpy\n'), ((10047, 10190), 'numpy.sqrt', 'numpy.sqrt', (['(multiplier * (intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] - \n intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2))'], {}), '(multiplier * (intermediate_normalization_dict[\n MEAN_OF_SQUARES_KEY] - intermediate_normalization_dict[MEAN_VALUE_KEY] **\n 2))\n', (10057, 10190), False, 'import numpy\n'), ((14736, 14751), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (14747, 14751), False, 'import numpy\n'), ((15090, 15143), 'numpy.percentile', 'numpy.percentile', (['max_target_values', 'percentile_level'], {}), '(max_target_values, percentile_level)\n', (15106, 15143), False, 'import numpy\n'), ((18976, 19014), 'pickle.dump', 'pickle.dump', (['model_object', 'file_handle'], {}), '(model_object, file_handle)\n', (18987, 19014), False, 'import pickle\n'), ((20369, 20394), 'numpy.mean', 'numpy.mean', (['signed_errors'], {}), '(signed_errors)\n', (20379, 20394), False, 'import numpy\n'), ((20488, 20518), 'numpy.mean', 'numpy.mean', (['(signed_errors ** 2)'], {}), '(signed_errors ** 2)\n', (20498, 20518), False, 'import numpy\n'), ((20669, 20705), 'numpy.mean', 'numpy.mean', (['(climo_signed_errors ** 2)'], {}), '(climo_signed_errors ** 2)\n', (20679, 20705), False, 'import numpy\n'), ((22091, 22176), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)'}), '(1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)\n )\n', (22106, 22176), True, 'import matplotlib.pyplot as pyplot\n'), ((22191, 22380), 'module_4.attributes_diagrams.plot_regression_relia_curve', 'attr_diagrams.plot_regression_relia_curve', ([], {'observed_values': 'target_values', 'forecast_values': 'predicted_target_values', 'num_bins': '(20)', 'figure_object': 'figure_object', 'axes_object': 'axes_object'}), '(observed_values=target_values,\n forecast_values=predicted_target_values, num_bins=20, figure_object=\n figure_object, axes_object=axes_object)\n', (22232, 22380), True, 'from module_4 import attributes_diagrams as attr_diagrams\n'), ((22663, 22676), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (22674, 22676), True, 'import matplotlib.pyplot as pyplot\n'), ((23234, 23304), 'numpy.linspace', 'numpy.linspace', (['(0)', '(num_predictors - 1)'], {'num': 'num_predictors', 'dtype': 'float'}), '(0, num_predictors - 1, num=num_predictors, dtype=float)\n', (23248, 23304), False, 'import numpy\n'), ((23336, 23424), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(DEFAULT_FIG_WIDTH_INCHES, DEFAULT_FIG_HEIGHT_INCHES)'}), '(1, 1, figsize=(DEFAULT_FIG_WIDTH_INCHES,\n DEFAULT_FIG_HEIGHT_INCHES))\n', (23351, 23424), True, 'import matplotlib.pyplot as pyplot\n'), ((23587, 23615), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Coefficient"""'], {}), "('Coefficient')\n", (23600, 23615), True, 'import matplotlib.pyplot as pyplot\n'), ((23620, 23655), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Predictor variable"""'], {}), "('Predictor variable')\n", (23633, 23655), True, 'import matplotlib.pyplot as pyplot\n'), ((23661, 23682), 'matplotlib.pyplot.yticks', 'pyplot.yticks', (['[]', '[]'], {}), '([], [])\n', (23674, 23682), True, 'import matplotlib.pyplot as pyplot\n'), ((23706, 23721), 'matplotlib.pyplot.xticks', 'pyplot.xticks', ([], {}), '()\n', (23719, 23721), True, 'import matplotlib.pyplot as pyplot\n'), ((23726, 23767), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['x_tick_values'], {'rotation': '(90)'}), '(x_tick_values, rotation=90)\n', (23739, 23767), True, 'import matplotlib.pyplot as pyplot\n'), ((23781, 23816), 'numpy.percentile', 'numpy.percentile', (['coefficients', '(1.0)'], {}), '(coefficients, 1.0)\n', (23797, 23816), False, 'import numpy\n'), ((23828, 23864), 'numpy.percentile', 'numpy.percentile', (['coefficients', '(99.0)'], {}), '(coefficients, 99.0)\n', (23844, 23864), False, 'import numpy\n'), ((23868, 23895), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['[x_min, x_max]'], {}), '([x_min, x_max])\n', (23879, 23895), True, 'import matplotlib.pyplot as pyplot\n'), ((25648, 25721), 'matplotlib.pyplot.cm.ScalarMappable', 'pyplot.cm.ScalarMappable', ([], {'cmap': 'colour_map_object', 'norm': 'colour_norm_object'}), '(cmap=colour_map_object, norm=colour_norm_object)\n', (25672, 25721), True, 'import matplotlib.pyplot as pyplot\n'), ((26124, 26275), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', ([], {'ax': 'axes_object', 'mappable': 'scalar_mappable_object', 'orientation': 'orientation_string', 'pad': 'padding', 'extend': 'extend_string', 'shrink': '(0.8)'}), '(ax=axes_object, mappable=scalar_mappable_object,\n orientation=orientation_string, pad=padding, extend=extend_string,\n shrink=0.8)\n', (26139, 26275), True, 'import matplotlib.pyplot as pyplot\n'), ((27055, 27143), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(DEFAULT_FIG_WIDTH_INCHES, DEFAULT_FIG_HEIGHT_INCHES)'}), '(1, 1, figsize=(DEFAULT_FIG_WIDTH_INCHES,\n DEFAULT_FIG_HEIGHT_INCHES))\n', (27070, 27143), True, 'import matplotlib.pyplot as pyplot\n'), ((27159, 27277), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['score_matrix'], {'cmap': 'colour_map_object', 'origin': '"""lower"""', 'vmin': 'min_colour_value', 'vmax': 'max_colour_value'}), "(score_matrix, cmap=colour_map_object, origin='lower', vmin=\n min_colour_value, vmax=max_colour_value)\n", (27172, 27277), True, 'import matplotlib.pyplot as pyplot\n'), ((27311, 27399), 'numpy.linspace', 'numpy.linspace', (['(0)', '(score_matrix.shape[1] - 1)'], {'num': 'score_matrix.shape[1]', 'dtype': 'float'}), '(0, score_matrix.shape[1] - 1, num=score_matrix.shape[1],\n dtype=float)\n', (27325, 27399), False, 'import numpy\n'), ((27430, 27518), 'numpy.linspace', 'numpy.linspace', (['(0)', '(score_matrix.shape[0] - 1)'], {'num': 'score_matrix.shape[0]', 'dtype': 'float'}), '(0, score_matrix.shape[0] - 1, num=score_matrix.shape[0],\n dtype=float)\n', (27444, 27518), False, 'import numpy\n'), ((27534, 27577), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['x_tick_values', 'x_tick_labels'], {}), '(x_tick_values, x_tick_labels)\n', (27547, 27577), True, 'import matplotlib.pyplot as pyplot\n'), ((27582, 27625), 'matplotlib.pyplot.yticks', 'pyplot.yticks', (['y_tick_values', 'y_tick_labels'], {}), '(y_tick_values, y_tick_labels)\n', (27595, 27625), True, 'import matplotlib.pyplot as pyplot\n'), ((30589, 30707), 'module_4.roc_curves.get_points_in_roc_curve', 'roc_curves.get_points_in_roc_curve', ([], {'observed_labels': 'observed_labels', 'forecast_probabilities': 'forecast_probabilities'}), '(observed_labels=observed_labels,\n forecast_probabilities=forecast_probabilities)\n', (30623, 30707), False, 'from module_4 import roc_curves\n'), ((30745, 30795), 'numpy.nanmax', 'numpy.nanmax', (['(pod_by_threshold - pofd_by_threshold)'], {}), '(pod_by_threshold - pofd_by_threshold)\n', (30757, 30795), False, 'import numpy\n'), ((30955, 31079), 'module_4.performance_diagrams.get_points_in_perf_diagram', 'perf_diagrams.get_points_in_perf_diagram', ([], {'observed_labels': 'observed_labels', 'forecast_probabilities': 'forecast_probabilities'}), '(observed_labels=observed_labels,\n forecast_probabilities=forecast_probabilities)\n', (30995, 31079), True, 'from module_4 import performance_diagrams as perf_diagrams\n'), ((31231, 31261), 'numpy.nanmax', 'numpy.nanmax', (['csi_by_threshold'], {}), '(csi_by_threshold)\n', (31243, 31261), False, 'import numpy\n'), ((31340, 31476), 'module_4.attributes_diagrams.get_points_in_relia_curve', 'attr_diagrams.get_points_in_relia_curve', ([], {'observed_labels': 'observed_labels', 'forecast_probabilities': 'forecast_probabilities', 'num_bins': '(20)'}), '(observed_labels=observed_labels,\n forecast_probabilities=forecast_probabilities, num_bins=20)\n', (31379, 31476), True, 'from module_4 import attributes_diagrams as attr_diagrams\n'), ((31604, 31691), 'numpy.nansum', 'numpy.nansum', (['(num_examples_by_bin * (mean_forecast_by_bin - event_freq_by_bin) ** 2)'], {}), '(num_examples_by_bin * (mean_forecast_by_bin -\n event_freq_by_bin) ** 2)\n', (31616, 31691), False, 'import numpy\n'), ((31798, 31889), 'numpy.nansum', 'numpy.nansum', (['(num_examples_by_bin * (event_freq_by_bin - training_event_frequency) ** 2)'], {}), '(num_examples_by_bin * (event_freq_by_bin -\n training_event_frequency) ** 2)\n', (31810, 31889), False, 'import numpy\n'), ((33269, 33354), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)'}), '(1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)\n )\n', (33284, 33354), True, 'import matplotlib.pyplot as pyplot\n'), ((33369, 33503), 'module_4.roc_curves.plot_roc_curve', 'roc_curves.plot_roc_curve', ([], {'observed_labels': 'observed_labels', 'forecast_probabilities': 'forecast_probabilities', 'axes_object': 'axes_object'}), '(observed_labels=observed_labels,\n forecast_probabilities=forecast_probabilities, axes_object=axes_object)\n', (33394, 33503), False, 'from module_4 import roc_curves\n'), ((33645, 33671), 'matplotlib.pyplot.title', 'pyplot.title', (['title_string'], {}), '(title_string)\n', (33657, 33671), True, 'import matplotlib.pyplot as pyplot\n'), ((33676, 33689), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (33687, 33689), True, 'import matplotlib.pyplot as pyplot\n'), ((33712, 33797), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)'}), '(1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)\n )\n', (33727, 33797), True, 'import matplotlib.pyplot as pyplot\n'), ((33812, 33959), 'module_4.performance_diagrams.plot_performance_diagram', 'perf_diagrams.plot_performance_diagram', ([], {'observed_labels': 'observed_labels', 'forecast_probabilities': 'forecast_probabilities', 'axes_object': 'axes_object'}), '(observed_labels=observed_labels,\n forecast_probabilities=forecast_probabilities, axes_object=axes_object)\n', (33850, 33959), True, 'from module_4 import performance_diagrams as perf_diagrams\n'), ((34119, 34145), 'matplotlib.pyplot.title', 'pyplot.title', (['title_string'], {}), '(title_string)\n', (34131, 34145), True, 'import matplotlib.pyplot as pyplot\n'), ((34150, 34163), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (34161, 34163), True, 'import matplotlib.pyplot as pyplot\n'), ((34198, 34283), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)'}), '(1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)\n )\n', (34213, 34283), True, 'import matplotlib.pyplot as pyplot\n'), ((34298, 34490), 'module_4.attributes_diagrams.plot_attributes_diagram', 'attr_diagrams.plot_attributes_diagram', ([], {'observed_labels': 'observed_labels', 'forecast_probabilities': 'forecast_probabilities', 'num_bins': '(20)', 'figure_object': 'figure_object', 'axes_object': 'axes_object'}), '(observed_labels=observed_labels,\n forecast_probabilities=forecast_probabilities, num_bins=20,\n figure_object=figure_object, axes_object=axes_object)\n', (34335, 34490), True, 'from module_4 import attributes_diagrams as attr_diagrams\n'), ((34707, 34720), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (34718, 34720), True, 'import matplotlib.pyplot as pyplot\n'), ((2727, 2766), 'time.strptime', 'time.strptime', (['time_string', 'time_format'], {}), '(time_string, time_format)\n', (2740, 2766), False, 'import time\n'), ((3164, 3190), 'time.gmtime', 'time.gmtime', (['unix_time_sec'], {}), '(unix_time_sec)\n', (3175, 3190), False, 'import time\n'), ((11422, 11458), 'numpy.array', 'numpy.array', (['[this_mean, this_stdev]'], {}), '([this_mean, this_stdev])\n', (11433, 11458), False, 'import numpy\n'), ((14960, 15037), 'numpy.concatenate', 'numpy.concatenate', (['(max_target_values, this_target_table[TARGET_NAME].values)'], {}), '((max_target_values, this_target_table[TARGET_NAME].values))\n', (14977, 15037), False, 'import numpy\n'), ((20432, 20461), 'numpy.absolute', 'numpy.absolute', (['signed_errors'], {}), '(signed_errors)\n', (20446, 20461), False, 'import numpy\n'), ((20616, 20651), 'numpy.absolute', 'numpy.absolute', (['climo_signed_errors'], {}), '(climo_signed_errors)\n', (20630, 20651), False, 'import numpy\n'), ((31745, 31775), 'numpy.sum', 'numpy.sum', (['num_examples_by_bin'], {}), '(num_examples_by_bin)\n', (31754, 31775), False, 'import numpy\n'), ((31942, 31972), 'numpy.sum', 'numpy.sum', (['num_examples_by_bin'], {}), '(num_examples_by_bin)\n', (31951, 31972), False, 'import numpy\n'), ((5301, 5410), 'numpy.logical_and', 'numpy.logical_and', (['(file_times_unix_sec >= first_time_unix_sec)', '(file_times_unix_sec <= last_time_unix_sec)'], {}), '(file_times_unix_sec >= first_time_unix_sec, \n file_times_unix_sec <= last_time_unix_sec)\n', (5318, 5410), False, 'import numpy\n'), ((6780, 6798), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (6796, 6798), False, 'import pandas\n'), ((6844, 6862), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (6860, 6862), False, 'import pandas\n'), ((6905, 6923), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (6921, 6923), False, 'import pandas\n'), ((9001, 9023), 'numpy.mean', 'numpy.mean', (['new_values'], {}), '(new_values)\n', (9011, 9023), False, 'import numpy\n'), ((9362, 9389), 'numpy.mean', 'numpy.mean', (['(new_values ** 2)'], {}), '(new_values ** 2)\n', (9372, 9389), False, 'import numpy\n'), ((12661, 12715), 'numpy.mean', 'numpy.mean', (['predictor_table[predictor_names[m]].values'], {}), '(predictor_table[predictor_names[m]].values)\n', (12671, 12715), False, 'import numpy\n'), ((12741, 12802), 'numpy.std', 'numpy.std', (['predictor_table[predictor_names[m]].values'], {'ddof': '(1)'}), '(predictor_table[predictor_names[m]].values, ddof=1)\n', (12750, 12802), False, 'import numpy\n'), ((12887, 12923), 'numpy.array', 'numpy.array', (['[this_mean, this_stdev]'], {}), '([this_mean, this_stdev])\n', (12898, 12923), False, 'import numpy\n')]
|
import cv2
import numpy as np
from pyzbar.pyzbar import decode
#This is the barcode testing phase. This allowed me to create the validation of the accounts.
img = cv2.imread('/home/pi/Resources12/frame (1).png')
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
with open('/home.pi/Resources12\myDataFile.text') as f:
myDataList = f.read().splitlines()
while True:
success, img = cap.read()
for barcode in decode(img):
myData = barcode.data.decode('utf-8')
print(myData)
if myData in myDataList:
myOutput = 'Authorized'
myColor = (0, 255, 0)
else:
myOutput = 'Un-Authorized'
myColor = (0, 0, 255)
pts = np.array([barcode.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, myColor, 5)
pts2 = barcode.rect
cv2.putText(img, myOutput, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX,
0.9, myColor, 2)
cv2.imshow('Result', img)
cv2.waitKey(1)
|
[
"cv2.putText",
"cv2.polylines",
"cv2.waitKey",
"pyzbar.pyzbar.decode",
"cv2.VideoCapture",
"cv2.imread",
"numpy.array",
"cv2.imshow"
] |
[((165, 213), 'cv2.imread', 'cv2.imread', (['"""/home/pi/Resources12/frame (1).png"""'], {}), "('/home/pi/Resources12/frame (1).png')\n", (175, 213), False, 'import cv2\n'), ((220, 239), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (236, 239), False, 'import cv2\n'), ((432, 443), 'pyzbar.pyzbar.decode', 'decode', (['img'], {}), '(img)\n', (438, 443), False, 'from pyzbar.pyzbar import decode\n'), ((998, 1023), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'img'], {}), "('Result', img)\n", (1008, 1023), False, 'import cv2\n'), ((1028, 1042), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1039, 1042), False, 'import cv2\n'), ((719, 756), 'numpy.array', 'np.array', (['[barcode.polygon]', 'np.int32'], {}), '([barcode.polygon], np.int32)\n', (727, 756), True, 'import numpy as np\n'), ((803, 846), 'cv2.polylines', 'cv2.polylines', (['img', '[pts]', '(True)', 'myColor', '(5)'], {}), '(img, [pts], True, myColor, 5)\n', (816, 846), False, 'import cv2\n'), ((883, 977), 'cv2.putText', 'cv2.putText', (['img', 'myOutput', '(pts2[0], pts2[1])', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', 'myColor', '(2)'], {}), '(img, myOutput, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX, \n 0.9, myColor, 2)\n', (894, 977), False, 'import cv2\n')]
|
import requests, json, os, pickle
import networkx as nx
import GOSTnets as gn
import matplotlib.pyplot as plt
from matplotlib import gridspec
from time import sleep
import pandas as pd
import geopandas as gpd
import rasterio
from rasterio.windows import Window
from rasterio.plot import *
from rasterio.mask import *
import numpy as np
from shapely.geometry import Point
from shapely.geometry import box
import contextily as ctx
import osmnx as ox
from fiona.crs import from_epsg
import pycrs
import geoviews as gv
import hvplot.pandas
import random
from utility import *
from raster_ops import *
from mapbox import *
def get_origins(places, population_file, window_size = 50, use_pickle = False, do_pickle_result = True, pickle_region_name = ""):
"""
Function extracts origins points from raster population map-based origin data
places - string of region of interest, or a dataframe of administrate boundary polygon
population_file - raster file
window_size - final size of each population grid that is wanted
"""
# Immediately return pickled data if requested
if use_pickle == True:
with open (f"../data/interim/origins_{pickle_region_name}.pickle", "rb") as handle:
origins = pickle.load(handle)
print(f"Origins:{origins.shape};")
return origins
#Scan the raster map with big windows
origins=pd.DataFrame()
window=window_size * 2
with rasterio.open(population_file) as src:
for left_x in np.arange(0,src.width,window):
for top_y in np.arange(0,src.height,window):
out=get_pop(population_file,left_x,top_y,window,plot=False)
if out != {}:
origins=origins.append([out])
print("%i/%i\r"%(left_x,src.width),end="")
#Do a splitting pass. Run this cell several times (we run four times),
# until you have a balance of small window and not too big rois
#run this cell as many times as you want to split the windows
#for i in range(0,4):
#for pass_num in range(0,split_passes):
# Split pass start
regions_need_splitting = origins[origins['split'] == True]
print(f"{len(regions_need_splitting)} regions need splitting")
olen=len(origins)
for i in np.arange(olen):
print("%i/%i\r"%(i+1,olen),end="")
if origins.iloc[i,origins.columns.get_loc('split')] == True:
origins.iloc[i,origins.columns.get_loc('split')]='done'
s = split(population_file,origins.iloc[i])
origins=origins.append(s,sort=False)
print("done.")
print("We now have %i regions of min size %i, %i will be split in next round"%\
(len(origins),origins['window'].min(),len(origins[origins['split']==True])))
origins=origins[origins['tot_pop']>0]
origins=origins[origins['split']!='done']
print("We have %i regions of size %i, %i with population >0"%
(len(origins),min(origins['window']),len(origins[origins['tot_pop']>0])))
# Split pass end
# Set the geometry of the generated polygons as points - that are grid centroids.
origins=gpd.GeoDataFrame(origins,
crs='epsg:4326',
geometry=[Point(xy) for xy in zip(origins['center_lon'], origins['center_lat'])]
)
# Create a separate geometry column that contains the grid geometry
origins['geo_grid']=origins.apply(
lambda r: box(r.left_lon, r.bottom_lat, r.right_lon, r.top_lat, ccw=False),
axis=1
)
col_to_keep = origins.columns
# Filter Origins with administrative boundary
if (isinstance(places,pd.DataFrame)==True):
bounds = places
else:
bounds = ox.boundaries.gdf_from_places(places)
# Don't clip to full bounds, just bbox. That's why this is commented
tr_origins = gpd.sjoin(origins, bounds, how="inner", op="intersects")
tr_origins = tr_origins[col_to_keep].reset_index(drop=True)
#Outputting Origins
print(f"All origins:{origins.shape}; Relevant Origins:{tr_origins.shape}")
# Pickle generated origins if requested
if do_pickle_result == True:
with open ("../data/interim/origins_{pickle_region_name}.pickle", "wb") as handle:
pickle.dump(tr_origins, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Pickled origins")
return(tr_origins)
def origins_snap_osm(origins, net_pickle_path):
"""
Function snaps origins to nearest road based on road network
origins - dataframe of population origin points
net_pickle_path - road network
"""
print("Reading pickle")
G = nx.read_gpickle(net_pickle_path)
print("Snapping Origins")
origins = gn.pandana_snap_c(G,
point_gdf = origins,
source_crs = 'epsg:4326',
target_crs = 'epsg:4326',
add_dist_to_node_col = True,
time_it = True)
origins = origins.drop(['x', 'y'], axis=1)
print("Converting NNs to Points")
def get_geo_from_node(NN):
node = G.nodes[NN]
return gpd.points_from_xy([node['x']], [node['y']])[0]
origins['center_geom'] = origins['geometry'].copy()
origins['geometry'] = origins['NN'].apply(get_geo_from_node)
origins['geometry'] = origins['geometry'].set_crs('epsg:4326')
return origins
def get_destinations(places, tags):
"""
Function extract POI data from OSM
places - string of region of interest, or a dataframe of administrate boundary polygon
tags - amenity tags for destinations, must be queryable in OSM
"""
#For places innputs tht are a data frame
if isinstance(places, pd.DataFrame)==True :
bounds = places.reset_index(drop=True)
boundaries=bounds.loc[0]['geometry']
df = ox.pois_from_polygon(boundaries, tags)
#For places inputs that are a string
else:
destinations_list = []
for p in places:
destinations_list.append(ox.pois_from_place(p, tags))
df = pd.concat(destinations_list)
#Formatting dataframe
df = gpd.GeoDataFrame(df[["osmid", "amenity", "name", "source"]], geometry=df['geometry'])
df = df.set_crs("EPSG:4326")
# Convert Polygons to centroid
df.loc[df.geometry.geom_type != 'Point', "geometry"] = df.loc[df.geometry.geom_type != 'Point', "geometry"].centroid
#Making sure we have no Amenitities of None Type
df = df.loc[df.amenity.isin(tags['amenity']), :].reset_index(drop=True).copy()
return df
def n_closest_geodetic(origins, destinations, n_keep):
"""
Function takes in origins and destinations and outputs a new destination list
with n_keep amount of closest destinations to each origin. This helps make fewer calls from Mapbox
origins - data frane
destinations - data frane
n_keep = int number of nearby destinations you would like to keep
"""
destinations = destinations.to_crs("EPSG:4326")
origins = origins.to_crs("EPSG:4326")
dest_list = []
for i in origins.index:
origin = origins.loc[i, :]
dest_o = destinations.copy()
dest_o['distance_to_or'] = dest_o.distance(origin.geometry)
dest_o['o_index'] = i
dest_o = dest_o.sort_values(by='distance_to_or', axis=0, ascending=True).head(n_keep)
dest_list.append(dest_o)
return pd.concat(dest_list).reset_index(drop=True)
def biplot(origins,destinations, mode, t_max,xlim=False):
"""
Function plos a map and an histogram for the places beyong t_max hours from closest hospital.
oorigins - data frame with travel time and distance data and population data
destinations - data frame
mode - string of travel mode
t_max - travel time threshold in hours
x_lim - x axis limit for histogram
"""
o = origins.copy()
h = destinations.copy()
#o_above = o[(o['t_'+o_type]>t_max) & (o['so_'+o_type]<so_max)]
o_above = o[(o['hrs_to_hosp_or_clinic']>t_max)]
variable="tot_pop"
vmin,vmax=0,10000
fig = plt.figure(figsize=(12, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax1=plt.subplot(gs[0])
ax1.axis("off")
sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm.set_array([])
fig.colorbar(sm)
o_above['geometry'] = o_above['geo_grid']
o_above.to_crs('epsg:3857').plot(column=variable, cmap='Reds', linewidth=0.8, ax=ax1,edgecolor='0.8')
h.to_crs('epsg:3857').plot( alpha=0.8,color='blue',marker=".",markersize=8,ax=ax1)
ctx.add_basemap(ax1)
ax1.set_title("(Red) population beyond %i h from hospital (Blue)"%t_max)
ax1.set_axis_off()
ax2=plt.subplot(gs[1])
o['hrs_to_hosp_or_clinic'].plot.hist(alpha=0.5,bins=1000,cumulative=True,density=False,log=False,logx=False,weights=o['tot_pop'])
if len(xlim)>0:
plt.xlim(xlim)
ax2.ticklabel_format(style='sci')
ax2.axvline(t_max,color="red")
ax2.set_ylabel('People [10s Million]')
ax2.set_xlabel('Distance to closest hospital or clinic:'+' [h]')
modestring="%i people (%.2f%%) > %i h "+ mode+ " hospital"
ax2.set_title(modestring%\
(o_above['tot_pop'].sum(),o_above['tot_pop'].sum()/o['tot_pop'].sum()*100,t_max))
#plt.tight_layout()
plt.show();
|
[
"pickle.dump",
"osmnx.pois_from_polygon",
"geopandas.sjoin",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"osmnx.pois_from_place",
"matplotlib.pyplot.Normalize",
"shapely.geometry.box",
"pandas.DataFrame",
"shapely.geometry.Point",
"contextily.add_basemap",
"GOSTnets.pandana_snap_c",
"geopandas.GeoDataFrame",
"osmnx.boundaries.gdf_from_places",
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot",
"rasterio.open",
"matplotlib.pyplot.xlim",
"geopandas.points_from_xy",
"networkx.read_gpickle",
"matplotlib.gridspec.GridSpec"
] |
[((1396, 1410), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1408, 1410), True, 'import pandas as pd\n'), ((2279, 2294), 'numpy.arange', 'np.arange', (['olen'], {}), '(olen)\n', (2288, 2294), True, 'import numpy as np\n'), ((3901, 3957), 'geopandas.sjoin', 'gpd.sjoin', (['origins', 'bounds'], {'how': '"""inner"""', 'op': '"""intersects"""'}), "(origins, bounds, how='inner', op='intersects')\n", (3910, 3957), True, 'import geopandas as gpd\n'), ((4712, 4744), 'networkx.read_gpickle', 'nx.read_gpickle', (['net_pickle_path'], {}), '(net_pickle_path)\n', (4727, 4744), True, 'import networkx as nx\n'), ((4790, 4923), 'GOSTnets.pandana_snap_c', 'gn.pandana_snap_c', (['G'], {'point_gdf': 'origins', 'source_crs': '"""epsg:4326"""', 'target_crs': '"""epsg:4326"""', 'add_dist_to_node_col': '(True)', 'time_it': '(True)'}), "(G, point_gdf=origins, source_crs='epsg:4326', target_crs=\n 'epsg:4326', add_dist_to_node_col=True, time_it=True)\n", (4807, 4923), True, 'import GOSTnets as gn\n'), ((6261, 6351), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["df[['osmid', 'amenity', 'name', 'source']]"], {'geometry': "df['geometry']"}), "(df[['osmid', 'amenity', 'name', 'source']], geometry=df[\n 'geometry'])\n", (6277, 6351), True, 'import geopandas as gpd\n'), ((8241, 8268), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (8251, 8268), True, 'import matplotlib.pyplot as plt\n'), ((8279, 8323), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'width_ratios': '[3, 1]'}), '(1, 2, width_ratios=[3, 1])\n', (8296, 8323), False, 'from matplotlib import gridspec\n'), ((8334, 8352), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (8345, 8352), True, 'import matplotlib.pyplot as plt\n'), ((8783, 8803), 'contextily.add_basemap', 'ctx.add_basemap', (['ax1'], {}), '(ax1)\n', (8798, 8803), True, 'import contextily as ctx\n'), ((8913, 8931), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (8924, 8931), True, 'import matplotlib.pyplot as plt\n'), ((9503, 9513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9511, 9513), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1477), 'rasterio.open', 'rasterio.open', (['population_file'], {}), '(population_file)\n', (1460, 1477), False, 'import rasterio\n'), ((1508, 1539), 'numpy.arange', 'np.arange', (['(0)', 'src.width', 'window'], {}), '(0, src.width, window)\n', (1517, 1539), True, 'import numpy as np\n'), ((3768, 3805), 'osmnx.boundaries.gdf_from_places', 'ox.boundaries.gdf_from_places', (['places'], {}), '(places)\n', (3797, 3805), True, 'import osmnx as ox\n'), ((5960, 5998), 'osmnx.pois_from_polygon', 'ox.pois_from_polygon', (['boundaries', 'tags'], {}), '(boundaries, tags)\n', (5980, 5998), True, 'import osmnx as ox\n'), ((6192, 6220), 'pandas.concat', 'pd.concat', (['destinations_list'], {}), '(destinations_list)\n', (6201, 6220), True, 'import pandas as pd\n'), ((9094, 9108), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (9102, 9108), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1274), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1266, 1274), False, 'import requests, json, os, pickle\n'), ((1564, 1596), 'numpy.arange', 'np.arange', (['(0)', 'src.height', 'window'], {}), '(0, src.height, window)\n', (1573, 1596), True, 'import numpy as np\n'), ((3472, 3536), 'shapely.geometry.box', 'box', (['r.left_lon', 'r.bottom_lat', 'r.right_lon', 'r.top_lat'], {'ccw': '(False)'}), '(r.left_lon, r.bottom_lat, r.right_lon, r.top_lat, ccw=False)\n', (3475, 3536), False, 'from shapely.geometry import box\n'), ((4317, 4382), 'pickle.dump', 'pickle.dump', (['tr_origins', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(tr_origins, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (4328, 4382), False, 'import requests, json, os, pickle\n'), ((5251, 5295), 'geopandas.points_from_xy', 'gpd.points_from_xy', (["[node['x']]", "[node['y']]"], {}), "([node['x']], [node['y']])\n", (5269, 5295), True, 'import geopandas as gpd\n'), ((7553, 7573), 'pandas.concat', 'pd.concat', (['dest_list'], {}), '(dest_list)\n', (7562, 7573), True, 'import pandas as pd\n'), ((8427, 8462), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (8440, 8462), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3250), 'shapely.geometry.Point', 'Point', (['xy'], {}), '(xy)\n', (3246, 3250), False, 'from shapely.geometry import Point\n'), ((6150, 6177), 'osmnx.pois_from_place', 'ox.pois_from_place', (['p', 'tags'], {}), '(p, tags)\n', (6168, 6177), True, 'import osmnx as ox\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 09:24:17 2017
@author: gao
ๅพๅฐ่ฏญ้ณๅๅชๅฃฐ็ๆททๅๆฐๆฎ ไปฅ.pkl็ๅฝขๅผๅญๆพๅจmix_dataไธญ
"""
import librosa as lr
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
from ams_extract import ams_extractor
resample_rate=16000
nfft = 512
offset = int(nfft/2)
def genAndSave_trainData(speech_fileName):#็ป็บฏๅ่ฏญ้ณๆๆ ็ญพ
s, fs = lr.load(speech_fileName, sr=None)
s = lr.resample(s, fs, resample_rate) # resample to 16k
s = np.array(s)
s_tf = lr.core.stft(s, n_fft=nfft, hop_length=offset)
s_db = lr.core.amplitude_to_db(np.abs(s_tf))
#s_angle = np.angle(s_tf)
x_label = np.ones(s_db.shape[1]) # initialize x_label to all one
xmean = s_db.mean(axis=0)
for i in range(s_db.shape[1]):
if xmean[i] < -40:
x_label[i] = 0
#xstd = np.std(s_db, axis =0)
#x_data = (s_db-xmean)/(xstd+1e-10) # normalize train data to zero mean and unit std
return x_label#,x_data,s_angle,xmean,xstd
'''
#label&dataๅพๅฝขๆพ็คบ
plt.subplot(211)
plt.plot(xmean)
plt.subplot(212)
plt.plot(x_label)
plt.show()
'''
def gen_vadWav(x_data,x_label,s_angle,xmean,xstd):#ๆ ่ฎฐๅค็ๅ็ๆณขๅฝขๆพ็คบๅฝๆฐ
for i in range(x_data.shape[1]):
x_data[:,i] = x_data[:,i] * xstd[i] + xmean[i] # ้ๅฝไธๅๅค็
speech_amp = lr.core.db_to_amplitude(x_data)
for i in range(len(x_label)):
if x_label[i]==0:
speech_amp[:,i]=1#ๆฏdata็ๅฐๆนๅฐฑ้ฝไฟ็ ไธๆฏ็ๅฐๆนๅ
จ้ฝ็ฝฎๆ0
speech_tf = speech_amp * np.exp(s_angle*1j)
speech = lr.core.istft(speech_tf, hop_length = offset)#ๅ ๅ
ฅ่งๅบฆไฟกๆฏๅ
้ๅถๅๅๆขๅพๅฐๅพ่ฐฑ
return speech
def Read_wav(fileName):#ๅฎไน็ไธๅฅฝ ๆชไฝฟ็จ
s, fs = lr.load(fileName, sr=None)
s = lr.resample(s, fs, resample_rate) # resample to 16k
s = np.array(s)
s_tf = lr.core.stft(s, n_fft=nfft, hop_length=offset)#ๅฏนๆฐๆฎ่ฟ่กๅฟซ้ๅ
้ๅถๅๅ
s_db = lr.core.amplitude_to_db(np.abs(s_tf))#ๅน
ๅผ่ฝฌๅไธบdB
s_angle = np.angle(s_tf)#่ฎฐๅฝๅ
้ๅถๅๆขๅ็่งๅบฆ
xmean = s_db.mean(axis=0)#ๆฑๅ
้ๅถๅๅๅ็ๅๅผ
xstd = np.std(s_db, axis =0) #ๆ ๅๅทฎ
x_data = (s_db-xmean)/(xstd+1e-10) # normalize train data to zero mean and unit std
return x_data,s_angle
def MFCC(y,sr):#ๆๅ20D็MFCC
return lr.feature.mfcc(y=y,n_fft=nfft,hop_length=offset,n_mfcc=20)
def Pitch(y,sr):
pitches, magnitudes = lr.core.pitch.piptrack(y=y,n_fft=nfft,hop_length=offset)
# Select out pitches with high energy
pitches = pitches[magnitudes > np.median(magnitudes)]
return lr.core.pitch_tuning(pitches)
def Mix_wav(speechName,noiseName,mix_snr=5,train=True):#ๅฏนๆฐๆฎๅ ไธไธช5db็ๅชๅฃฐ
s,fs=lr.load(speechName,sr=None)
n,fsn=lr.load(noiseName,sr=None)
s=lr.resample(s,fs,resample_rate)
n=lr.resample(n,fsn,resample_rate)
s=np.array(s)
n=np.array(n)
len_s=len(s)
len_n=len(n)
if len_s<=len_n:
n=n[0:len_s]
else:
n_extend_num=int(len_s/len_n)+1
n=n.repeat(n_extend_num)
n=n[0:len_s]
alpha=np.sqrt((s**2).sum()/((n**2).sum()*10**(mix_snr/10)))
mix=s+alpha*n
mix_tf=lr.core.stft(mix,n_fft=nfft,hop_length=offset)
mix_db=lr.core.amplitude_to_db(np.abs(mix_tf))
mfcc=MFCC(mix,sr=fs)
pitch=Pitch(mix,sr=fs)
ams=ams_extractor(mix,sr=fs,win_len=nfft,shift_len=offset,order=1)
if train==True:
return mix_db, mfcc, pitch,ams#mix_db
else:
mix_angle=np.angle(mix_tf)
return mix_db,mix_angle
def I_Mix_wav(mix_db,mix_angle):# ๅ ๆงๅชๅฃฐ่ฏญ้ณๆณขๅฝข้ๅๆข
mix_amp=lr.core.db_to_amplitude(mix_db)
mix_tf=mix_amp*np.exp(mix_angle*1j)
mix=lr.core.istft(mix_tf,hop_length=offset)
return mix
def label_save(mix_data,mix_label,fn):#fnไธบๆไปถ่ฆๅญๅจ็ไฝ็ฝฎ
dic_data={'mix_data':mix_data,
'mix_label':mix_label} #ๅฐๆฐๆฎๅญๅ
ฅๅญๅ
ธไธญ
with open(fn,'wb') as f:
pickle.dump(dic_data,f)
def _create_zero_indexes(speech_fileName):#่ฟ้ถ็
s, fs = lr.load(speech_fileName, sr=None)
s = lr.resample(s, fs, resample_rate) # resample to 16k
zero_indexes=[]
zero_crossings=lr.zero_crossings(s)
zero_index=np.nonzero(zero_crossings)[0]
zero_indexes.append(zero_index)
#ๅพๅฐๅ ๅช็่ฏญ้ณไฟกๅท
filepath="C:/Users/gao/vad/863_IBM_test/"#ๆทปๅ ่ทฏๅพ1
#filepath="D:/863่ฏญ้ณๆฐๆฎ/863_data/863_IBM_train1/"#ๆทปๅ ่ทฏๅพ2
dirname=os.listdir(filepath)#่ทๅๅ
จ้จๆไปถ
filepath1="C:/Users/gao/vad/noise/"
dirnoise=os.listdir(filepath1)
for i in range(0,len(dirname)):
for j in range(0,len(dirnoise)):
noise_fileName=filepath1+dirnoise[j]
#print(noise_fileName)#่ฏปๅๅชๅฃฐ้ณ้ข
speech_fileName=filepath+dirname[i]
#print(speech_fileName)#่ฏปๅๅๅงๆฐๆฎ
mix_label=genAndSave_trainData(speech_fileName)#็จๅๅง้ณ้ขๅพๅฐๆฐๆฎ็ๆ ็ญพ
#plt.plot(mix_label)
#plt.show()
#ๅพๅฐ5dBไธ็ๆททๅ้ณ้ข
#mix_db,mix_angle=Mix_wav(speech_fileName,noise_fileName,mix_snr=5,train=False)
mix_db, mfcc, pitch,ams=Mix_wav(speech_fileName,noise_fileName,mix_snr=0,train=True)
#print(mix_tf.shape,mfcc.shape,pitch.shape,ams.shape)
mix=np.row_stack((mix_db,mfcc,ams))
#print(mix.shape)
#mixdata็ๅฝไธๅ
mixmean= mix.mean(axis=0)#ๆฑๅบๅๅผ
mixstd = np.std(mix, axis =0) #ๆฑๅบๆ ๅๅทฎ
mix_data= (mix-mixmean)/(mixstd+1e-10) #ๅฝไธๅๅ็ๆฐๆฎ
print(mix_data.shape)
print(mix_label)
#a=label_save(mix_data,mix_label,'C:/Users/gao/vad/test_data/0dB/factory_test/'+dirname[i].strip('.WAV')+dirnoise[j].strip('.wav')+'0'+'.pkl')
"""
x=I_Mix_wav(mix_db,mix_angle)
lr.output.write_wav('C:/Users/gao/vad/test_data/'+dirname[i].strip('.WAV')+'__5'+dirnoise[j], x, resample_rate)
#ๆพ็คบๅ ๆงๅชๅฃฐ่ฏญ้ณ
plt.plot(x)
plt.show()
"""
|
[
"librosa.zero_crossings",
"pickle.dump",
"numpy.abs",
"numpy.angle",
"numpy.ones",
"librosa.resample",
"numpy.exp",
"librosa.feature.mfcc",
"numpy.std",
"librosa.core.db_to_amplitude",
"librosa.core.pitch_tuning",
"numpy.median",
"librosa.core.pitch.piptrack",
"librosa.load",
"librosa.core.stft",
"os.listdir",
"numpy.nonzero",
"numpy.array",
"numpy.row_stack",
"ams_extract.ams_extractor",
"librosa.core.istft"
] |
[((4283, 4303), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (4293, 4303), False, 'import os\n'), ((4356, 4377), 'os.listdir', 'os.listdir', (['filepath1'], {}), '(filepath1)\n', (4366, 4377), False, 'import os\n'), ((375, 408), 'librosa.load', 'lr.load', (['speech_fileName'], {'sr': 'None'}), '(speech_fileName, sr=None)\n', (382, 408), True, 'import librosa as lr\n'), ((421, 454), 'librosa.resample', 'lr.resample', (['s', 'fs', 'resample_rate'], {}), '(s, fs, resample_rate)\n', (432, 454), True, 'import librosa as lr\n'), ((485, 496), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (493, 496), True, 'import numpy as np\n'), ((513, 559), 'librosa.core.stft', 'lr.core.stft', (['s'], {'n_fft': 'nfft', 'hop_length': 'offset'}), '(s, n_fft=nfft, hop_length=offset)\n', (525, 559), True, 'import librosa as lr\n'), ((658, 680), 'numpy.ones', 'np.ones', (['s_db.shape[1]'], {}), '(s_db.shape[1])\n', (665, 680), True, 'import numpy as np\n'), ((1335, 1366), 'librosa.core.db_to_amplitude', 'lr.core.db_to_amplitude', (['x_data'], {}), '(x_data)\n', (1358, 1366), True, 'import librosa as lr\n'), ((1548, 1591), 'librosa.core.istft', 'lr.core.istft', (['speech_tf'], {'hop_length': 'offset'}), '(speech_tf, hop_length=offset)\n', (1561, 1591), True, 'import librosa as lr\n'), ((1684, 1710), 'librosa.load', 'lr.load', (['fileName'], {'sr': 'None'}), '(fileName, sr=None)\n', (1691, 1710), True, 'import librosa as lr\n'), ((1723, 1756), 'librosa.resample', 'lr.resample', (['s', 'fs', 'resample_rate'], {}), '(s, fs, resample_rate)\n', (1734, 1756), True, 'import librosa as lr\n'), ((1787, 1798), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (1795, 1798), True, 'import numpy as np\n'), ((1815, 1861), 'librosa.core.stft', 'lr.core.stft', (['s'], {'n_fft': 'nfft', 'hop_length': 'offset'}), '(s, n_fft=nfft, hop_length=offset)\n', (1827, 1861), True, 'import librosa as lr\n'), ((1946, 1960), 'numpy.angle', 'np.angle', (['s_tf'], {}), '(s_tf)\n', (1954, 1960), True, 'import numpy as np\n'), ((2031, 2051), 'numpy.std', 'np.std', (['s_db'], {'axis': '(0)'}), '(s_db, axis=0)\n', (2037, 2051), True, 'import numpy as np\n'), ((2218, 2280), 'librosa.feature.mfcc', 'lr.feature.mfcc', ([], {'y': 'y', 'n_fft': 'nfft', 'hop_length': 'offset', 'n_mfcc': '(20)'}), '(y=y, n_fft=nfft, hop_length=offset, n_mfcc=20)\n', (2233, 2280), True, 'import librosa as lr\n'), ((2322, 2380), 'librosa.core.pitch.piptrack', 'lr.core.pitch.piptrack', ([], {'y': 'y', 'n_fft': 'nfft', 'hop_length': 'offset'}), '(y=y, n_fft=nfft, hop_length=offset)\n', (2344, 2380), True, 'import librosa as lr\n'), ((2490, 2519), 'librosa.core.pitch_tuning', 'lr.core.pitch_tuning', (['pitches'], {}), '(pitches)\n', (2510, 2519), True, 'import librosa as lr\n'), ((2603, 2631), 'librosa.load', 'lr.load', (['speechName'], {'sr': 'None'}), '(speechName, sr=None)\n', (2610, 2631), True, 'import librosa as lr\n'), ((2641, 2668), 'librosa.load', 'lr.load', (['noiseName'], {'sr': 'None'}), '(noiseName, sr=None)\n', (2648, 2668), True, 'import librosa as lr\n'), ((2674, 2707), 'librosa.resample', 'lr.resample', (['s', 'fs', 'resample_rate'], {}), '(s, fs, resample_rate)\n', (2685, 2707), True, 'import librosa as lr\n'), ((2712, 2746), 'librosa.resample', 'lr.resample', (['n', 'fsn', 'resample_rate'], {}), '(n, fsn, resample_rate)\n', (2723, 2746), True, 'import librosa as lr\n'), ((2751, 2762), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2759, 2762), True, 'import numpy as np\n'), ((2769, 2780), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (2777, 2780), True, 'import numpy as np\n'), ((3056, 3104), 'librosa.core.stft', 'lr.core.stft', (['mix'], {'n_fft': 'nfft', 'hop_length': 'offset'}), '(mix, n_fft=nfft, hop_length=offset)\n', (3068, 3104), True, 'import librosa as lr\n'), ((3230, 3296), 'ams_extract.ams_extractor', 'ams_extractor', (['mix'], {'sr': 'fs', 'win_len': 'nfft', 'shift_len': 'offset', 'order': '(1)'}), '(mix, sr=fs, win_len=nfft, shift_len=offset, order=1)\n', (3243, 3296), False, 'from ams_extract import ams_extractor\n'), ((3516, 3547), 'librosa.core.db_to_amplitude', 'lr.core.db_to_amplitude', (['mix_db'], {}), '(mix_db)\n', (3539, 3547), True, 'import librosa as lr\n'), ((3596, 3636), 'librosa.core.istft', 'lr.core.istft', (['mix_tf'], {'hop_length': 'offset'}), '(mix_tf, hop_length=offset)\n', (3609, 3636), True, 'import librosa as lr\n'), ((3914, 3947), 'librosa.load', 'lr.load', (['speech_fileName'], {'sr': 'None'}), '(speech_fileName, sr=None)\n', (3921, 3947), True, 'import librosa as lr\n'), ((3960, 3993), 'librosa.resample', 'lr.resample', (['s', 'fs', 'resample_rate'], {}), '(s, fs, resample_rate)\n', (3971, 3993), True, 'import librosa as lr\n'), ((4055, 4075), 'librosa.zero_crossings', 'lr.zero_crossings', (['s'], {}), '(s)\n', (4072, 4075), True, 'import librosa as lr\n'), ((595, 607), 'numpy.abs', 'np.abs', (['s_tf'], {}), '(s_tf)\n', (601, 607), True, 'import numpy as np\n'), ((1516, 1538), 'numpy.exp', 'np.exp', (['(s_angle * 1.0j)'], {}), '(s_angle * 1.0j)\n', (1522, 1538), True, 'import numpy as np\n'), ((1910, 1922), 'numpy.abs', 'np.abs', (['s_tf'], {}), '(s_tf)\n', (1916, 1922), True, 'import numpy as np\n'), ((3139, 3153), 'numpy.abs', 'np.abs', (['mix_tf'], {}), '(mix_tf)\n', (3145, 3153), True, 'import numpy as np\n'), ((3392, 3408), 'numpy.angle', 'np.angle', (['mix_tf'], {}), '(mix_tf)\n', (3400, 3408), True, 'import numpy as np\n'), ((3567, 3591), 'numpy.exp', 'np.exp', (['(mix_angle * 1.0j)'], {}), '(mix_angle * 1.0j)\n', (3573, 3591), True, 'import numpy as np\n'), ((3823, 3847), 'pickle.dump', 'pickle.dump', (['dic_data', 'f'], {}), '(dic_data, f)\n', (3834, 3847), False, 'import pickle\n'), ((4091, 4117), 'numpy.nonzero', 'np.nonzero', (['zero_crossings'], {}), '(zero_crossings)\n', (4101, 4117), True, 'import numpy as np\n'), ((5134, 5167), 'numpy.row_stack', 'np.row_stack', (['(mix_db, mfcc, ams)'], {}), '((mix_db, mfcc, ams))\n', (5146, 5167), True, 'import numpy as np\n'), ((5344, 5363), 'numpy.std', 'np.std', (['mix'], {'axis': '(0)'}), '(mix, axis=0)\n', (5350, 5363), True, 'import numpy as np\n'), ((2456, 2477), 'numpy.median', 'np.median', (['magnitudes'], {}), '(magnitudes)\n', (2465, 2477), True, 'import numpy as np\n')]
|
"""Compute stats on the results."""
import arviz as az
from datetime import datetime
import numpy as np
import pandas as pd
from pathlib import Path
from pystan.misc import _summary
from scipy.stats import nbinom
from tqdm.auto import tqdm
from warnings import warn
from .io import extract_samples
def get_rhat(fit) -> float:
"""Get `rhat` for the log-probability of a fit.
This is a measure of the convergence across sampling chains.
Good convergence is indicated by a value near 1.0.
"""
x = _summary(fit, ['lp__'], [])
summary = pd.DataFrame(x['summary'], columns=x['summary_colnames'], index=x['summary_rownames'])
return summary.loc['lp__', 'Rhat']
def get_waic(samples: pd.DataFrame) -> dict:
"""Get the Widely-Used Information Criterion (WAIC) for a fit.
Only use if you don't have arviz (`get_waic_and_loo` is preferred).
Args:
samples (pd.DataFrame): Samples extracted from a fit.
Returns:
dict: WAIC and se of WAIC for these samples
"""
from numpy import log, exp, sum, mean, var, sqrt
# I named the Stan array 'llx'
ll = samples[[c for c in samples if 'llx' in c]]
n_samples, n_obs = ll.shape
# Convert to likelihoods (pray for no numeric precision issues)
like = exp(ll)
# log of the mean (across samples) of the likelihood for each observation
lml = log(mean(like, axis=0))
# Sum (across observations) of lml
lppd = sum(lml)
# Variance (across samples) of the log-likelihood for each observation
vll = var(ll, axis=0)
# Sum (across observations) of the vll
pwaic = sum(vll)
elpdi = lml - vll
waic = 2*(-lppd + pwaic)
# Standar error of the measure
se = 2*sqrt(n_obs*var(elpdi))
return {'waic': waic, 'se': se}
def get_waic_and_loo(fit) -> dict:
warn("`get_waic_and_loo` is deprecated, use `get_fit_quality` instead.",
DeprecationWarning)
return get_fit_quality(fit)
def get_fit_quality(fit) -> dict:
"""Compute Widely-Available Information Criterion (WAIC) and
Leave One Out (LOO) from a fit instance using Arviz.
Args:
fit: A PyStan4model instance (i.e. a PyStan fit).
Returns:
dict: WAIC and LOO statistics (and se's) for this fit.
"""
result = {}
try:
idata = az.from_pystan(fit, log_likelihood="llx")
except KeyError as e:
warn("'%s' not found; waic and loo will not be computed" % str(e),
stacklevel=2)
result.update({'waic': 0, 'loo': 0})
else:
result.update(dict(az.loo(idata, scale='deviance')))
result.update(dict(az.waic(idata, scale='deviance')))
result.update({'lp__rhat': get_rhat(fit)})
return result
def getllxtensor_singleroi(roi: str, data_path: str, fits_path: str,
models_path: str, model_name: str,
fit_format: int) -> np.array:
"""Recompute a single log-likelihood tensor (n_samples x n_datapoints).
Args:
roi (str): A single ROI, e.g. "US_MI" or "Greece".
data_path (str): Full path to the data directory.
fits_path (str): Full path to the fits directory.
models_path (str): Full path to the models directory.
model_name (str): The model name (without the '.stan' suffix).
fit_format (int): The .csv (0) or .pkl (1) fit format.
Returns:
np.array: The log-likelihood tensor.
"""
csv_path = Path(data_path) / ("covidtimeseries_%s_.csv" % roi)
df = pd.read_csv(csv_path)
t0 = np.where(df["new_cases"].values > 1)[0][0]
y = df[['new_cases', 'new_recover', 'new_deaths']].to_numpy()\
.astype(int)[t0:, :]
# load samples
samples = extract_samples(fits_path, models_path, model_name, roi,
fit_format)
S = np.shape(samples['lambda[0,0]'])[0]
# print(S)
# get number of observations, check against data above
for i in range(1000, 0, -1): # Search for it from latest to earliest
candidate = '%s[%d,0]' % ('lambda', i)
if candidate in samples:
N = i+1 # N observations, add 1 since index starts at 0
break # And move on
print(N) # run using old data
print(len(y))
llx = np.zeros((S, N, 3))
# # conversion from Stan neg_binom2(n_stan | mu,phi)
# to scipy.stats.nbinom(k,n_scipy,p)
# # n_scipy = phi, p = phi/mu, k = n_stan
# t0 = time.time()
for i in range(S):
phi = samples['phi'][i]
for j in range(N):
mu = max(samples['lambda['+str(j)+',0]'][i], 1)
llx[i, j, 0] = np.log(nbinom.pmf(max(y[j, 0], 0), phi, phi/mu))
mu = max(samples['lambda['+str(j)+',1]'][i], 1)
llx[i, j, 1] = np.log(nbinom.pmf(max(y[j, 1], 0), phi, phi/mu))
mu = max(samples['lambda['+str(j)+',2]'][i], 1)
llx[i, j, 2] = np.log(nbinom.pmf(max(y[j, 2], 0), phi, phi/mu))
print(np.sum(llx[i, :, :]))
print(samples['ll_'][i])
print('--')
return llx
def reweighted_stat(stat_vals: np.array, loo_vals: np.array,
loo_se_vals: np.array = None) -> float:
"""Get weighted means of a stat (across models),
where the weights are related to the LOO's of model/
Args:
stat_vals (np.array): Values (across models) of some statistic.
loo_vals (np.array): Values (across models) of LOO.
loo_se_vals (np.array, optional): Values (across models) of se of LOO.
Defaults to None.
Returns:
float: A new average value for the statistic, weighted across models.
"""
# Assume that loo is on a deviance scale (lower is better)
min_loo = min(loo_vals)
weights = np.exp(-0.5*(loo_vals-min_loo))
if loo_se_vals is not None:
weights *= np.exp(-0.5*loo_se_vals)
weights = weights/np.sum(weights)
return np.sum(stat_vals * weights)
def reweighted_stats(raw_table_path: str, save: bool = True,
roi_weight='n_data_pts', extra=None, first=None, dates=None) -> pd.DataFrame:
"""Reweight all statistics (across models) according to the LOO
of each of the models.
Args:
raw_table_path (str): Path to the .csv file containing the statistics
for each model.
save (bool, optional): Whether to save the results. Defaults to True.
Returns:
pd.DataFrame: The reweighted statistics
(i.e. a weighted average across models).
"""
df = pd.read_csv(raw_table_path, index_col=['model', 'roi', 'quantile'])
df = df[~df.index.duplicated(keep='last')]
df.columns.name = 'param'
df = df.stack('param').unstack(['roi', 'quantile', 'param']).T
rois = df.index.get_level_values('roi').unique()
result = pd.Series(index=df.index)
if first is not None:
rois = rois[:first]
for roi in tqdm(rois):
loo = df.loc[(roi, 'mean', 'loo')]
loo_se = df.loc[(roi, 'std', 'loo')]
# An indexer for this ROI
chunk = df.index.get_level_values('roi') == roi
result[chunk] = df[chunk].apply(lambda x:
reweighted_stat(x, loo, loo_se),
axis=1)
result = result.unstack(['param'])
result = result[~result.index.get_level_values('quantile')
.isin(['min', 'max'])] # Remove min and max
if extra is not None:
extra.columns.name = 'param'
result = result.join(extra)
# Add stats for a fixed date
if dates:
if isinstance(dates, str):
dates = [dates]
for date in dates:
result = add_fixed_date(result, date, ['Rt', 'car', 'ifr'])
# Compute global stats
means = result.unstack('roi').loc['mean'].unstack('param')
means = means.drop('AA_Global', errors='ignore')
means = means[sorted(means.columns)]
if roi_weight == 'var':
inv_var = 1/result.unstack('roi').loc['std']**2
weights = inv_var.fillna(0).unstack('param')
global_mean = (means*weights).sum() / weights.sum()
global_var = ((weights*((means - global_mean)**2)).sum()/weights.sum())
elif roi_weight == 'waic':
waic = means['waic']
n_data = means['n_data_pts']
# Assume that waic is on a deviance scale (lower is better)
weights = np.exp(-0.5*waic/n_data)
global_mean = means.mul(weights, axis=0).sum() / weights.sum()
global_var = (((means - global_mean)**2).mul(weights, axis=0)).sum()/weights.sum()
elif roi_weight == 'n_data_pts':
n_data = means['n_data_pts']
# Assume that waic is on a deviance scale (lower is better)
weights = n_data
global_mean = means.mul(weights, axis=0).sum() / weights.sum()
global_var = (((means - global_mean)**2).mul(weights, axis=0)).sum()/weights.sum()
global_sd = global_var**(1/2)
result.loc[('AA_Global', 'mean'), :] = global_mean
result.loc[('AA_Global', 'std'), :] = global_sd
result = result.sort_index()
if save:
path = Path(raw_table_path).parent / 'fit_table_reweighted.csv'
result.to_csv(path)
return result
def days_into_2020(date_str):
date = datetime.strptime(date_str, '%Y-%m-%d')
one_one = datetime.strptime('2020-01-01', '%Y-%m-%d')
return (date - one_one).days
def get_roi_week(date_str, roi_day_one):
days = days_into_2020(date_str)
roi_days = days - roi_day_one
try:
roi_week = int(roi_days/7)
except:
roi_week = 9999
return roi_week
def add_fixed_date(df, date_str, stats):
for roi in df.index:
week = get_roi_week(date_str, df.loc[roi, 't0'])
for stat in stats:
col = '%s (week %d)' % (stat, week)
new_col = '%s (%s)' % (stat, date_str)
if col in df:
df.loc[roi, new_col] = df.loc[roi, col]
else:
df.loc[roi, new_col] = None
return df
|
[
"pandas.DataFrame",
"arviz.from_pystan",
"numpy.sum",
"pandas.read_csv",
"numpy.zeros",
"tqdm.auto.tqdm",
"numpy.shape",
"datetime.datetime.strptime",
"numpy.mean",
"pathlib.Path",
"numpy.exp",
"pandas.Series",
"numpy.where",
"arviz.waic",
"warnings.warn",
"arviz.loo",
"numpy.var",
"pystan.misc._summary"
] |
[((523, 550), 'pystan.misc._summary', '_summary', (['fit', "['lp__']", '[]'], {}), "(fit, ['lp__'], [])\n", (531, 550), False, 'from pystan.misc import _summary\n'), ((565, 656), 'pandas.DataFrame', 'pd.DataFrame', (["x['summary']"], {'columns': "x['summary_colnames']", 'index': "x['summary_rownames']"}), "(x['summary'], columns=x['summary_colnames'], index=x[\n 'summary_rownames'])\n", (577, 656), True, 'import pandas as pd\n'), ((1281, 1288), 'numpy.exp', 'exp', (['ll'], {}), '(ll)\n', (1284, 1288), False, 'from numpy import log, exp, sum, mean, var, sqrt\n'), ((1451, 1459), 'numpy.sum', 'sum', (['lml'], {}), '(lml)\n', (1454, 1459), False, 'from numpy import log, exp, sum, mean, var, sqrt\n'), ((1545, 1560), 'numpy.var', 'var', (['ll'], {'axis': '(0)'}), '(ll, axis=0)\n', (1548, 1560), False, 'from numpy import log, exp, sum, mean, var, sqrt\n'), ((1616, 1624), 'numpy.sum', 'sum', (['vll'], {}), '(vll)\n', (1619, 1624), False, 'from numpy import log, exp, sum, mean, var, sqrt\n'), ((1822, 1918), 'warnings.warn', 'warn', (['"""`get_waic_and_loo` is deprecated, use `get_fit_quality` instead."""', 'DeprecationWarning'], {}), "('`get_waic_and_loo` is deprecated, use `get_fit_quality` instead.',\n DeprecationWarning)\n", (1826, 1918), False, 'from warnings import warn\n'), ((3513, 3534), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (3524, 3534), True, 'import pandas as pd\n'), ((4252, 4271), 'numpy.zeros', 'np.zeros', (['(S, N, 3)'], {}), '((S, N, 3))\n', (4260, 4271), True, 'import numpy as np\n'), ((5735, 5770), 'numpy.exp', 'np.exp', (['(-0.5 * (loo_vals - min_loo))'], {}), '(-0.5 * (loo_vals - min_loo))\n', (5741, 5770), True, 'import numpy as np\n'), ((5892, 5919), 'numpy.sum', 'np.sum', (['(stat_vals * weights)'], {}), '(stat_vals * weights)\n', (5898, 5919), True, 'import numpy as np\n'), ((6532, 6599), 'pandas.read_csv', 'pd.read_csv', (['raw_table_path'], {'index_col': "['model', 'roi', 'quantile']"}), "(raw_table_path, index_col=['model', 'roi', 'quantile'])\n", (6543, 6599), True, 'import pandas as pd\n'), ((6810, 6835), 'pandas.Series', 'pd.Series', ([], {'index': 'df.index'}), '(index=df.index)\n', (6819, 6835), True, 'import pandas as pd\n'), ((6905, 6915), 'tqdm.auto.tqdm', 'tqdm', (['rois'], {}), '(rois)\n', (6909, 6915), False, 'from tqdm.auto import tqdm\n'), ((9285, 9324), 'datetime.datetime.strptime', 'datetime.strptime', (['date_str', '"""%Y-%m-%d"""'], {}), "(date_str, '%Y-%m-%d')\n", (9302, 9324), False, 'from datetime import datetime\n'), ((9339, 9382), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01"""', '"""%Y-%m-%d"""'], {}), "('2020-01-01', '%Y-%m-%d')\n", (9356, 9382), False, 'from datetime import datetime\n'), ((1381, 1399), 'numpy.mean', 'mean', (['like'], {'axis': '(0)'}), '(like, axis=0)\n', (1385, 1399), False, 'from numpy import log, exp, sum, mean, var, sqrt\n'), ((2309, 2350), 'arviz.from_pystan', 'az.from_pystan', (['fit'], {'log_likelihood': '"""llx"""'}), "(fit, log_likelihood='llx')\n", (2323, 2350), True, 'import arviz as az\n'), ((3452, 3467), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (3456, 3467), False, 'from pathlib import Path\n'), ((3823, 3855), 'numpy.shape', 'np.shape', (["samples['lambda[0,0]']"], {}), "(samples['lambda[0,0]'])\n", (3831, 3855), True, 'import numpy as np\n'), ((5818, 5844), 'numpy.exp', 'np.exp', (['(-0.5 * loo_se_vals)'], {}), '(-0.5 * loo_se_vals)\n', (5824, 5844), True, 'import numpy as np\n'), ((5865, 5880), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (5871, 5880), True, 'import numpy as np\n'), ((3544, 3580), 'numpy.where', 'np.where', (["(df['new_cases'].values > 1)"], {}), "(df['new_cases'].values > 1)\n", (3552, 3580), True, 'import numpy as np\n'), ((4950, 4970), 'numpy.sum', 'np.sum', (['llx[i, :, :]'], {}), '(llx[i, :, :])\n', (4956, 4970), True, 'import numpy as np\n'), ((8420, 8448), 'numpy.exp', 'np.exp', (['(-0.5 * waic / n_data)'], {}), '(-0.5 * waic / n_data)\n', (8426, 8448), True, 'import numpy as np\n'), ((1733, 1743), 'numpy.var', 'var', (['elpdi'], {}), '(elpdi)\n', (1736, 1743), False, 'from numpy import log, exp, sum, mean, var, sqrt\n'), ((2561, 2592), 'arviz.loo', 'az.loo', (['idata'], {'scale': '"""deviance"""'}), "(idata, scale='deviance')\n", (2567, 2592), True, 'import arviz as az\n'), ((2622, 2654), 'arviz.waic', 'az.waic', (['idata'], {'scale': '"""deviance"""'}), "(idata, scale='deviance')\n", (2629, 2654), True, 'import arviz as az\n'), ((9139, 9159), 'pathlib.Path', 'Path', (['raw_table_path'], {}), '(raw_table_path)\n', (9143, 9159), False, 'from pathlib import Path\n')]
|
import os
import sys
import numpy as np
import qcdb
from ..utils import *
@using("nwchem")
def test_grad():
h2o = qcdb.set_molecule(
"""
O 0.00000000 0.00000000 0.00000000
H 0.00000000 1.93042809 -1.10715266
H 0.00000000 -1.93042809 -1.10715266
units au"""
)
qcdb.set_options(
{
"basis": "sto-3g",
"scf__e_convergence": 1e-6,
#'nwchem_driver__tight': True
}
)
val = qcdb.gradient("nwc-scf")
scf = -74.888142460799
grads = np.array(
[[0.000000, 0.000000, 0.058550], [0.000000, 0.140065, -0.029275], [0.000000, -0.140065, -0.029275]]
)
assert compare_values(scf, qcdb.variable("HF TOTAL ENERGY"), 5, "scf")
assert compare_arrays(grads, qcdb.variable("CURRENT GRADIENT"), 5, "scf grad")
|
[
"qcdb.variable",
"qcdb.set_options",
"numpy.array",
"qcdb.gradient",
"qcdb.set_molecule"
] |
[((123, 337), 'qcdb.set_molecule', 'qcdb.set_molecule', (['"""\n O 0.00000000 0.00000000 0.00000000\n H 0.00000000 1.93042809 -1.10715266\n H 0.00000000 -1.93042809 -1.10715266\n units au"""'], {}), '(\n """\n O 0.00000000 0.00000000 0.00000000\n H 0.00000000 1.93042809 -1.10715266\n H 0.00000000 -1.93042809 -1.10715266\n units au"""\n )\n', (140, 337), False, 'import qcdb\n'), ((347, 413), 'qcdb.set_options', 'qcdb.set_options', (["{'basis': 'sto-3g', 'scf__e_convergence': 1e-06}"], {}), "({'basis': 'sto-3g', 'scf__e_convergence': 1e-06})\n", (363, 413), False, 'import qcdb\n'), ((514, 538), 'qcdb.gradient', 'qcdb.gradient', (['"""nwc-scf"""'], {}), "('nwc-scf')\n", (527, 538), False, 'import qcdb\n'), ((579, 671), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.05855], [0.0, 0.140065, -0.029275], [0.0, -0.140065, -0.029275]]'], {}), '([[0.0, 0.0, 0.05855], [0.0, 0.140065, -0.029275], [0.0, -0.140065,\n -0.029275]])\n', (587, 671), True, 'import numpy as np\n'), ((735, 767), 'qcdb.variable', 'qcdb.variable', (['"""HF TOTAL ENERGY"""'], {}), "('HF TOTAL ENERGY')\n", (748, 767), False, 'import qcdb\n'), ((812, 845), 'qcdb.variable', 'qcdb.variable', (['"""CURRENT GRADIENT"""'], {}), "('CURRENT GRADIENT')\n", (825, 845), False, 'import qcdb\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: fnels
"""
import numpy as np
import maze
import random
import cv2
class EnvCleaner(object):
def __init__(self, N_agent, map_size, seed):
self.map_size = map_size
self.seed = seed
self.occupancy = self.generate_maze(seed)
self.N_agent = N_agent
self.agt_pos_list = []
for i in range(self.N_agent):
self.agt_pos_list.append([1, 1])
def generate_maze(self, seed):
symbols = {
# default symbols
'start': 'S',
'end': 'X',
'wall_v': '|',
'wall_h': '-',
'wall_c': '+',
'head': '#',
'tail': 'o',
'empty': ' '
}
maze_obj = maze.Maze(int((self.map_size - 1) / 2), int((self.map_size - 1) / 2), seed, symbols, 1)
grid_map = maze_obj.to_np()
for i in range(self.map_size):
for j in range(self.map_size):
if grid_map[i][j] == 0:
grid_map[i][j] = 2
return grid_map
def step(self, action_list):
reward = 0
#print(self.occupancy)
for i in range(len(action_list)):
if action_list[i] == 0: # up
if self.occupancy[self.agt_pos_list[i][0] - 1][self.agt_pos_list[i][1]] != 1: # if can move
self.agt_pos_list[i][0] = self.agt_pos_list[i][0] - 1
else: #can't move
return -1
if action_list[i] == 2: # down
if self.occupancy[self.agt_pos_list[i][0] + 1][self.agt_pos_list[i][1]] != 1: # if can move
self.agt_pos_list[i][0] = self.agt_pos_list[i][0] + 1
else: #can't move
return -1
if action_list[i] == 3: # left
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] - 1] != 1: # if can move
self.agt_pos_list[i][1] = self.agt_pos_list[i][1] - 1
else: #can't move
return -1
if action_list[i] == 1: # right
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] + 1] != 1: # if can move
self.agt_pos_list[i][1] = self.agt_pos_list[i][1] + 1
else: #can't move
return -1
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1]] == 2: # if the spot is dirty
self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1]] = 0
reward = reward + 1
else:
return -.05 #spot not dirty
return reward
def can_move(self, action_list):
for i in range(len(action_list)):
if action_list[i] == 0: # up
if self.occupancy[self.agt_pos_list[i][0] - 1][self.agt_pos_list[i][1]] != 1: # if can move
return True
else:
return False
if action_list[i] == 2: # down
if self.occupancy[self.agt_pos_list[i][0] + 1][self.agt_pos_list[i][1]] != 1: # if can move
return True
else:
return False
if action_list[i] == 3: # left
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] - 1] != 1: # if can move
return True
else:
return False
if action_list[i] == 1: # right
if self.occupancy[self.agt_pos_list[i][0]][self.agt_pos_list[i][1] + 1] != 1: # if can move
return True
else:
return False
def is_room_clean(self):
for row in self.occupancy: #go through all the tiles in room
for column in row:
if column == 2: #tile is dirty
return False
return True
def get_global_obs(self):
obs = np.zeros((self.map_size, self.map_size, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if self.occupancy[i, j] == 0:
obs[i, j, 0] = 1.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 1.0
if self.occupancy[i, j] == 2:
obs[i, j, 0] = 0.0
obs[i, j, 1] = 1.0
obs[i, j, 2] = 0.0
for i in range(self.N_agent):
obs[self.agt_pos_list[i][0], self.agt_pos_list[i][1], 0] = 1.0
obs[self.agt_pos_list[i][0], self.agt_pos_list[i][1], 1] = 0.0
obs[self.agt_pos_list[i][0], self.agt_pos_list[i][1], 2] = 0.0
return obs
def reset(self):
self.occupancy = self.generate_maze(self.seed)
self.agt_pos_list = []
for i in range(self.N_agent):
self.agt_pos_list.append([1, 1])
return self.occupancy
def render(self):
obs = self.get_global_obs()
enlarge = 5
new_obs = np.ones((self.map_size*enlarge, self.map_size*enlarge, 3))
for i in range(self.map_size):
for j in range(self.map_size):
if obs[i][j][0] == 0.0 and obs[i][j][1] == 0.0 and obs[i][j][2] == 0.0:
cv2.rectangle(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, j * enlarge + enlarge), (0, 0, 0), -1)
if obs[i][j][0] == 1.0 and obs[i][j][1] == 0.0 and obs[i][j][2] == 0.0:
cv2.rectangle(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, j * enlarge + enlarge), (0, 0, 255), -1)
if obs[i][j][0] == 0.0 and obs[i][j][1] == 1.0 and obs[i][j][2] == 0.0:
cv2.rectangle(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, j * enlarge + enlarge), (0, 255, 0), -1)
cv2.imshow('image', new_obs)
cv2.waitKey(10)
def random_action_list(self):
action_list = []
for i in range(1000):
action_list.append(random.randint(0,3))
return action_list
|
[
"random.randint",
"cv2.waitKey",
"numpy.zeros",
"numpy.ones",
"cv2.rectangle",
"cv2.imshow"
] |
[((4161, 4204), 'numpy.zeros', 'np.zeros', (['(self.map_size, self.map_size, 3)'], {}), '((self.map_size, self.map_size, 3))\n', (4169, 4204), True, 'import numpy as np\n'), ((5213, 5275), 'numpy.ones', 'np.ones', (['(self.map_size * enlarge, self.map_size * enlarge, 3)'], {}), '((self.map_size * enlarge, self.map_size * enlarge, 3))\n', (5220, 5275), True, 'import numpy as np\n'), ((6032, 6060), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'new_obs'], {}), "('image', new_obs)\n", (6042, 6060), False, 'import cv2\n'), ((6069, 6084), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (6080, 6084), False, 'import cv2\n'), ((6214, 6234), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (6228, 6234), False, 'import random\n'), ((5462, 5580), 'cv2.rectangle', 'cv2.rectangle', (['new_obs', '(i * enlarge, j * enlarge)', '(i * enlarge + enlarge, j * enlarge + enlarge)', '(0, 0, 0)', '(-1)'], {}), '(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, \n j * enlarge + enlarge), (0, 0, 0), -1)\n', (5475, 5580), False, 'import cv2\n'), ((5684, 5804), 'cv2.rectangle', 'cv2.rectangle', (['new_obs', '(i * enlarge, j * enlarge)', '(i * enlarge + enlarge, j * enlarge + enlarge)', '(0, 0, 255)', '(-1)'], {}), '(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, \n j * enlarge + enlarge), (0, 0, 255), -1)\n', (5697, 5804), False, 'import cv2\n'), ((5908, 6028), 'cv2.rectangle', 'cv2.rectangle', (['new_obs', '(i * enlarge, j * enlarge)', '(i * enlarge + enlarge, j * enlarge + enlarge)', '(0, 255, 0)', '(-1)'], {}), '(new_obs, (i * enlarge, j * enlarge), (i * enlarge + enlarge, \n j * enlarge + enlarge), (0, 255, 0), -1)\n', (5921, 6028), False, 'import cv2\n')]
|
from collections import deque
import random
import numpy as np
import gym
from gym.wrappers import AtariPreprocessing
class Game():
def __init__(self, game_name, start_noop=2, last_n_frames=4, frameskip=4, grayscale_obs=True, scale_obs=False):
self.start_noop = start_noop
self.last_n_frames = last_n_frames
self.frameskip = frameskip
self.buffer = deque([], self.last_n_frames)
self.env = gym.make(game_name)
# Hacks to make environment deterministic and compatible with Atari Preprocessing
self.env.unwrapped.frameskip = 1
if 'NoFrameskip' not in self.env.spec.id:
print('Environment is not Frameskip version.')
self.env.spec.id += '-NoFrameskip'
self.envWrapped = AtariPreprocessing(self.env, frame_skip=self.frameskip, grayscale_obs=grayscale_obs, scale_obs=scale_obs)
self.envWrapped.reset()
self.n_actions = self.env.action_space.n
init_screen = self.get_screen()
# Screen dimension is represented as (CHW) for PyTorch
self.scr_dims = tuple([self.last_n_frames] + list(init_screen.shape))
for _ in range(self.frameskip):
self.buffer.append(init_screen.copy())
#self.start_game()
def start_game(self):
self.buffer.clear()
# Random starting operations to simulate human conditions
noop_action = 0
# In breakout, nothing happens unless first 'Fired'.
if 'Breakout' in self.env.spec.id:
noop_action = 1
for _ in range(random.randint(1, self.start_noop)):
# 0 corresponds to No-Op action
# 1 corresponds to Fire
self.step(noop_action)
# Fill remaining buffer by most recent frame to send a valid input to model
if len(self.buffer) > 0:
last_screen = self.buffer[-1]
else:
last_screen = self.get_screen()
while len(self.buffer) < self.buffer.maxlen:
self.buffer.append(last_screen.copy())
def get_screen(self):
screen = self.envWrapped._get_obs()
return screen
def get_input(self):
# Each element in buffer is a tensor of 84x84 dimensions.
# This function returns tensor of 4x84x84 dimensions.
return np.stack(tuple(self.buffer), axis=0)
def get_n_actions(self):
# return number of actions
return self.n_actions
def reset_env(self):
# reset the gym environment
self.env.reset()
self.start_game()
def get_screen_dims(self):
# return the screen dimensions
return self.scr_dims
def step(self, action):
screen, reward, done, _ = self.envWrapped.step(action)
# # DEBUG
# import matplotlib.pyplot as plt
# plt.imshow(screen)
# plt.plot()
# plt.savefig('tmp_img.png')
# print(action, '\t', reward)
# input()
# # DEBUG
# ALE takes care of the max pooling of the last 2 frames
# Refer: "https://danieltakeshi.github.io/2016/11/25/
# frame-skipping-and-preprocessing-for-deep-q-networks-on-atari-2600-games/"
self.buffer.append(screen)
# reward is clipped between -1 and 1
reward = np.clip(reward, -1.0, 1.0)
return reward, done
"""
Actions in OpenAI Gym ALE
-------------------------
ACTION_MEANING = {
0: "NOOP",
1: "FIRE",
2: "UP",
3: "RIGHT",
4: "LEFT",
5: "DOWN",
6: "UPRIGHT",
7: "UPLEFT",
8: "DOWNRIGHT",
9: "DOWNLEFT",
10: "UPFIRE",
11: "RIGHTFIRE",
12: "LEFTFIRE",
13: "DOWNFIRE",
14: "UPRIGHTFIRE",
15: "UPLEFTFIRE",
16: "DOWNRIGHTFIRE",
17: "DOWNLEFTFIRE",
}
"""
|
[
"random.randint",
"gym.make",
"gym.wrappers.AtariPreprocessing",
"numpy.clip",
"collections.deque"
] |
[((392, 421), 'collections.deque', 'deque', (['[]', 'self.last_n_frames'], {}), '([], self.last_n_frames)\n', (397, 421), False, 'from collections import deque\n'), ((442, 461), 'gym.make', 'gym.make', (['game_name'], {}), '(game_name)\n', (450, 461), False, 'import gym\n'), ((778, 888), 'gym.wrappers.AtariPreprocessing', 'AtariPreprocessing', (['self.env'], {'frame_skip': 'self.frameskip', 'grayscale_obs': 'grayscale_obs', 'scale_obs': 'scale_obs'}), '(self.env, frame_skip=self.frameskip, grayscale_obs=\n grayscale_obs, scale_obs=scale_obs)\n', (796, 888), False, 'from gym.wrappers import AtariPreprocessing\n'), ((3283, 3309), 'numpy.clip', 'np.clip', (['reward', '(-1.0)', '(1.0)'], {}), '(reward, -1.0, 1.0)\n', (3290, 3309), True, 'import numpy as np\n'), ((1568, 1602), 'random.randint', 'random.randint', (['(1)', 'self.start_noop'], {}), '(1, self.start_noop)\n', (1582, 1602), False, 'import random\n')]
|
"""
Build ensemble models from ensemble of RandomForestRegressor models
"""
import sys
import numpy as np
import pandas as pd
import xarray as xr
import datetime as datetime
import sklearn as sk
from sklearn.ensemble import RandomForestRegressor
import glob
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
# s2s imports
import sparse2spatial.utils as utils
import sparse2spatial.RFRanalysis as RFRanalysis
from sparse2spatial.RFRanalysis import get_core_stats_on_current_models
def build_or_get_models(df=None, testset='Test set (strat. 20%)',
save_model_to_disk=False, read_model_from_disk=True,
target='Iodide', model_names=None,
delete_existing_model_files=False, rm_outliers=True,
model_sub_dir='/TEMP_MODELS/', random_state=42,
rm_LOD_filled_data=False, model_feature_dict=None,
debug=False):
"""
Build (or read from disc) various models (diff. features) to test comparisons
Parameters
-------
df (pd.DataFrame): DataFrame of target and features values for point locations
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
save_model_to_disk (bool): Save the models to disc as pickled binaries?
read_model_from_disk (bool): read the models from disc if they are already built?
target (str): Name of the target variable (e.g. iodide)
model_names (list): List of model names to build/read
random_state (int), the seed used by the random number generator
delete_existing_model_files (bool): delete the existing model binaries in folder?
rm_outliers (bool): remove the outliers from the observational dataset
rm_LOD_filled_data (bool): remove the limit of detection (LOD) filled values?
rm_Skagerrak_data (bool): Remove specific data
(above argument is a iodide specific option - remove this)
model_feature_dict (dict): dictionary of features used in each model
model_sub_dir (str): the sub directory in which the models are to be saved/read
debug (bool): run and debug function/output
Returns
-------
(dict)
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
import gc
# - Get processed data
if isinstance(df, type(None)):
print('Dictionary of model names and features must be provided!')
sys.exit()
# - Get local variables
# Location to save models
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/{}/'.format(data_root, target, model_sub_dir)
if debug:
print('Using models from {}'.format(folder))
# Get details on model setups to use
if isinstance(model_feature_dict, type(None)):
print('Dictionary of model names and features must be provided!')
sys.exit()
if isinstance(model_names, type(None)):
model_names = list(sorted(model_feature_dict.keys()))
# Set a hyperparameter settings
hyperparam_dict = utils.get_hyperparameter_dict()
# Setup dictionaries to save model detail into
N_features_used = {}
features_used_dict = {}
oob_scores = {}
models_dict = {}
# Loop model input variable options and build models
if not read_model_from_disk:
for n_model_name, model_name in enumerate(model_names):
print(n_model_name, model_name)
# Get testing features and hyperparameters to build model
features_used = model_feature_dict[model_name]
n_estimators = hyperparam_dict['n_estimators']
oob_score = hyperparam_dict['oob_score']
# Select and split variables in the training and test dataset
train_set_tr = df.loc[df[testset] != True, features_used]
train_set_tr_labels = df.loc[df[testset] != True, target]
#ย Build model (Setup and fit)
model = RandomForestRegressor(random_state=random_state,
n_estimators=n_estimators,
oob_score=oob_score,
criterion='mse')
# Provide the model with the features (features_used) and
# The labels ( target, train_set_tr_labels)
model.fit(train_set_tr, train_set_tr_labels)
# Save model in temporary folder?
if save_model_to_disk:
# Check if there are any existing files...
pkls_in_dir = glob.glob(folder+'*.pkl')
Npkls = len(pkls_in_dir)
if delete_existing_model_files and (n_model_name == 0):
import os
[os.remove(i) for i in pkls_in_dir]
print('WARNING: deleted existing ({}) pkls'.format(Npkls))
elif(not delete_existing_model_files) and (n_model_name == 0):
assert Npkls == 0, 'WARNING: model files exist!'
else:
pass
# Save models as pickles
model_savename = "my_model_{:0>4}.pkl".format(n_model_name)
try:
joblib.dump(model, folder+model_savename)
except FileNotFoundError:
prt_str = "WARNING: Failed to save file - @ '{}' with name '{}'"
print( prt_str.format(folder+model_savename))
utils.check_or_mk_directory_struture()
# Also keep models online in dictionary
models_dict[model_name] = model
# force local tidy of garbage
gc.collect()
# Loop model and predict for all values
# If time to make models too great, then read-in here and 'rm' from above
for n_model_name, model_name in enumerate(model_names):
# Get testing features and hyperparameters to build model
features_used = model_feature_dict[model_name]
print(n_model_name, model_name, features_used)
# Read models from disk?
if (not save_model_to_disk) and (read_model_from_disk):
model_savename = "my_model_{:0>4}.pkl".format(n_model_name)
model = joblib.load(folder+model_savename)
models_dict[model_name] = model
else:
model = models_dict[model_name]
# Predict target for all observation locations
df[model_name] = model.predict(df[features_used].values)
# Save number of features used too
N_features_used[model_name] = len(features_used)
features_used_dict[model_name] = '+'.join(features_used)
try:
oob_scores[model_name] = model.oob_score_
except:
oob_scores[model_name] = np.NaN
models_dict[model_name] = model
# Return models and predictions in a dictionary structure
RFR_dict = {}
RFR_dict['models_dict'] = models_dict
RFR_dict['model_names'] = model_names
RFR_dict['df'] = df
RFR_dict['features_used_dict'] = features_used_dict
RFR_dict['N_features_used'] = N_features_used
RFR_dict['oob_scores'] = oob_scores
return RFR_dict
def get_features_used_by_model(models_list=None, RFR_dict=None):
"""
Get the (set of) features used by a list of models
Parameters
-------
RFR_dict (dict): dictionary of core variables and data
models_list (list): list of model names to get features for
Returns
-------
(list)
"""
# Get dictionary of shared data if not provided
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# Get models to use (assume top models, if not provided)
if isinstance(models_list, type(None)):
models_list = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'])
# Now plot up in input variables
features_used_dict = RFR_dict['features_used_dict']
vars2use = []
for model_name in models_list:
vars2use += [features_used_dict[model_name].split('+')]
# Remove double ups
vars2use = [j for i in vars2use for j in i]
return list(set(vars2use))
def get_top_models(n=10, stats=None, RFR_dict=None, vars2exclude=None,
exclude_ensemble=True, verbose=True):
"""
retrieve the names of the top models (default=top 10)
Parameters
-------
n (int), the number of top ranked models to return
vars2exclude (list): list of variables to exclude (e.g. DEPTH)
RFR_dict (dict): dictionary of core variables and data
exclude_ensemble (bool): exclude the ensemble prediction from the list
verbose (bool): print out verbose output?
Returns
-------
(list)
"""
# Get stats on models in RFR_dict
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
if isinstance(stats, type(None)):
stats = get_core_stats_on_current_models(
RFR_dict=RFR_dict, verbose=False)
# Don't count the Ensemble in the top ranking models
if exclude_ensemble:
var_ = 'RFR(Ensemble)'
try:
stats = stats.T[[i for i in stats.T.columns if var_ not in i]].T
if verbose:
print('removed {} from list'.format(var_))
except:
if verbose:
print('failed to remove {} from list'.format(var_))
# Return the top model's names
params2inc = stats.T.columns
# Exclude any variables in provided list
if not isinstance(vars2exclude, type(None)):
for var_ in vars2exclude:
params2inc = [i for i in params2inc if var_ not in i]
# Return the updated dataframe's index (model names that are top models)
return list(stats.T[params2inc].T.head(n).index)
def Hyperparameter_Tune4choosen_models(RFR_dict=None, target='Iodide', cv=7,
testset='Test set (strat. 20%)'):
"""
Driver to tune mutiple RFR models
Parameters
-------
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
cv (int), number of folds of cross-validation to use
target (str): Name of the target variable (e.g. iodide)
RFR_dict (dict): dictionary of models, data and shared variables
Returns
-------
(None)
"""
from sklearn.externals import joblib
# Get the data for the models
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
# Set models to optimise
models2compare = get_top_models(RFR_dict=RFR_dict, vars2exclude=['DOC', 'Prod'])
# Get variables needed from core dictionary
features_used_dict = RFR_dict['features_used_dict']
models_dict = RFR_dict['models_dict']
# Set folder to use for optimised models
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/OPTIMISED_MODELS/'.format(data_root, target)
# Loop and save optimised model
# NOTE: this could be speed up by using more cores
for model_name in models2compare:
print('Optimising model: {}'.format(model_name))
# Get model
model = models_dict[model_name]
# get testing features
features_used = features_used_dict[model_name].split('+')
# Tune parameters
BE = Hyperparameter_Tune_model(model=model, use_choosen_model=False,
save_best_estimator=True, model_name=model_name,
RFR_dict=RFR_dict,
features_used=features_used, cv=cv)
# - Test the tuned models against the test set
test_the_tuned_models = False
if test_the_tuned_models:
# Get the core data
df = RFR_dict['df']
# Get the data
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# Test the improvements in the optimised models?
for model_name in models2compare:
# - Get existing model
model = models_dict[model_name]
# Get testing features
features_used = features_used_dict[model_name].split('+')
# - Get the data
# ( Make sure to remove the target )
# train_features = df[features_used].loc[ train_set.index ]
# train_labels = df[[target]].loc[ train_set.index ]
test_features = df[features_used].loc[test_set.index]
test_labels = df[[target]].loc[test_set.index]
# - test the existing model
print(' ---------------- '*3)
print(' ---------------- {}: '.format(model_name))
print(' - Base values: ')
quick_model_evaluation(model, test_features, test_labels)
# - Get optimised model
try:
model_savename = "my_model_{}.pkl".format(model_name)
OPmodel = joblib.load(folder + model_savename)
#
print(' - Optimised values: ')
quick_model_evaluation(OPmodel, test_features, test_labels)
except:
pass
# - Test the tuned models against the training set
# Get the core data
df = RFR_dict['df']
# get the data
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# Test the improvements in the optimised models?
for model_name in models2compare:
# - Get existing model
model = models_dict[model_name]
# get testing features
features_used = features_used_dict[model_name].split('+')
# - Get the data
# ( Making sure to remove the target!!! )
train_features = df[features_used].loc[train_set.index]
train_labels = df[[target]].loc[train_set.index]
# test_features = df[features_used].loc[ test_set.index ]
# test_labels = df[[target]].loc[ test_set.index ]
# - test the existing model
print(' ---------------- '*3)
print(' ---------------- {}: '.format(model_name))
print(' - Base values: ')
quick_model_evaluation(model, train_features, train_labels)
# - Get optimised model
try:
model_savename = "my_model_{}.pkl".format(model_name)
OPmodel = joblib.load(folder + model_savename)
#
print(' - Optimised values: ')
quick_model_evaluation(OPmodel, train_features, train_labels)
except:
pass
def Hyperparameter_Tune_model(use_choosen_model=True, model=None,
RFR_dict=None, df=None, cv=3,
testset='Test set (strat. 20%)', target='Iodide',
features_used=None, model_name=None,
save_best_estimator=True):
"""
Driver to tune hyperparmeters of model
Parameters
-------
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
target (str): Name of the target variable (e.g. iodide)
RFR_dict (dict): dictionary of core variables and data
model_name (str): name of model to tune performance of
features_used (list): list of the features within the model_name model
save_best_estimator (bool): save the best performing model offline
model (RandomForestRegressor), Random Forest Regressor model to tune
cv (int), number of folds of cross-validation to use
Returns
-------
(RandomForestRegressor)
"""
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestRegressor
# Get data to test
if isinstance(df, type(None)):
# df = get_dataset_processed4ML()
df = RFR_dict['df']
# Use the model selected from the feature testing
if use_choosen_model:
assert_str = "model name not needed as use_choosen_model selected!"
assert isinstance(model, type(None)), assert_str
# select a single chosen model
mdict = get_choosen_model_from_features_selection()
features_used = mdict['features_used']
model = mdict['model']
model_name = mdict['name']
# - extract training dataset
test_set = df.loc[df[testset] == True, :]
train_set = df.loc[df[testset] == False, :]
# also sub select all vectors for input data
# ( Making sure to remove the target!!! )
train_features = df[features_used].loc[train_set.index]
train_labels = df[[target]].loc[train_set.index]
test_features = df[features_used].loc[test_set.index]
test_labels = df[[target]].loc[test_set.index]
# - Make the base model for comparisons
base_model = RandomForestRegressor(n_estimators=10, random_state=42,
criterion='mse')
base_model.fit(train_features, train_labels)
quick_model_evaluation(base_model, test_features, test_labels)
# - First make an intial explore of the parameter space
rf_random = Use_RS_CV_to_explore_hyperparams(cv=cv,
train_features=train_features,
train_labels=train_labels,
features_used=features_used
)
# Check the performance by Random searching (RandomizedSearchCV)
best_random = rf_random.best_estimator_
best_params_ = rf_random.best_params_
print(rf_random.best_params_)
quick_model_evaluation(best_random, test_features, test_labels)
# - Now do a more focused optimisation
# get the parameters based on the RandomizedSearchCV output
param_grid = define_hyperparameter_options2test(
features_used=features_used, best_params_=best_params_,
param_grid_RandomizedSearchCV=True)
# Use GridSearchCV
grid_search = use_GS_CV_to_tune_Hyperparams(cv=cv,
train_features=train_features,
param_grid=param_grid,
train_labels=train_labels,
features_used=features_used,
)
print(grid_search.best_params_)
# Check the performance of grid seraching searching
BEST_ESTIMATOR = grid_search.best_estimator_
quick_model_evaluation(BEST_ESTIMATOR, test_features, test_labels)
# Save the best estimator now for future use
if save_best_estimator:
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/models/LIVE/OPTIMISED_MODELS/'.format(data_root, target)
model_savename = "my_model_{}.pkl".format(model_name)
joblib.dump(BEST_ESTIMATOR, folder + model_savename)
else:
return BEST_ESTIMATOR
def Use_RS_CV_to_explore_hyperparams(train_features=None,
train_labels=None,
features_used=None,
test_features=None,
test_labels=None,
scoring='neg_mean_squared_error',
cv=3):
"""
Intial test of parameter space using RandomizedSearchCV
Parameters
-------
features_used (list): list of the features used by the model
train_features (list): list of the training features
train_labels (list): list of the training labels
test_features (list): list of the testing features
test_labels (list): list of the testing labels
cv (int), number of folds of cross-validation to use
scoring (str): scoring method to use
"""
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=10, stop=1000, num=10)]
# Number of features to consider at every split
# max_features = ['auto', 'sqrt']
max_features = range(1, 30)
if not isinstance(features_used, type(None)):
max_features = [i for i in max_features if
i <= len(features_used)]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
# bootstrap = [True, False]
bootstrap = [True] # Force use of bootstrapping
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
# Use the random grid to search for best hyperparameters
# First create the base model to tune
rf = RandomForestRegressor(random_state=42, criterion='mse')
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator=rf,
param_distributions=random_grid, n_iter=100, cv=cv,
verbose=2,
random_state=42, n_jobs=-1, scoring=scoring)
# Fit the random search model
rf_random.fit(train_features, train_labels)
return rf_random
def use_GS_CV_to_tune_Hyperparams(param_grid=None,
train_features=None, train_labels=None,
features_used=None, \
scoring='neg_mean_squared_error', cv=3,
):
"""
Refine hyperparameters using (GridSearchCV)
Parameters
-------
features_used (list): list of the features used by the model
train_features (list): list of the training features
train_labels (list): list of the training labels
cv (int), number of folds of cross-validation to use
scoring (str): scoring method to use
"""
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
# Create a based model
rf = RandomForestRegressor(random_state=42, criterion='mse')
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid,
cv=cv, n_jobs=-1, verbose=2, scoring=scoring)
# Fit the grid search to the data
grid_search.fit(train_features, train_labels)
return grid_search
def quick_model_evaluation(model, test_features, test_labels):
"""
Perform a quick model evaluation
"""
from sklearn.metrics import mean_squared_error
predictions = model.predict(test_features)
MSE = mean_squared_error(test_labels, predictions)
RMSE = np.sqrt(MSE)
ME = np.mean(abs(predictions - test_labels.values))
print('Model Performance')
print('Mean squared error (MAE): {:0.4f} nM'.format(MSE))
print('Mean absolute error (MAE): {:0.4f} nM'.format(ME))
print('RMSE = {:0.2f}'.format(RMSE))
return RMSE
def define_hyperparameter_options2test(features_used=None,
param_grid_RandomizedSearchCV=True,
best_params_=None,
param_grid_intial_guess=True,
):
"""
Define a selction of test groups
Parameters
-------
param_grid_intial_guess (bool): use the parameter grid of guesses
param_grid_RandomizedSearchCV (bool): use the parameter grid obtained
by randomly searching
best_params_ (param_grid), parameter grid of best parameters to use
features_used (list): list of the features used by the model
"""
# - Shared variables in grid
vals2test = {
'n_estimators': [10, 50, 75, 100, 125, 200, 300, 500],
'max_features': [1, 2, 3, 4, 5],
'max_depth': [3, 10, None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 3, 10],
'oob_score': [True],
'bootstrap': [True],
}
if param_grid_RandomizedSearchCV:
if not isinstance(best_params_, type(None)):
vals2test_ASSUMED = vals2test.copy()
vals2test = {}
for key in best_params_:
value = best_params_[key]
# 'n_estimators' / trees
if (key == 'n_estimators'):
values = [value+(i*10) for i in range(0, 4)]
values += [value+(i*10) for i in range(-4, 0)]
# only allow values greater than zero
values = [i for i in values if i > 0]
# add sorted values
vals2test[key] = sorted(values)
# max depth
elif (key == 'max_depth'):
# value is either a number of "None".
if utils.is_number(value):
values = [value+(i*5) for i in range(0, 2)]
values += [value+(i*5) for i in range(-2, 0)]
# only allow values greater than zero
values = [i for i in values if i > 0]
# add sorted values
vals2test[key] = sorted(values)
else: # If None, just use None.
vals2test[key] = [value]
# 'min_samples_split'
elif (key == 'min_samples_leaf'):
if value == 1:
values = range(value, value+3)
else:
values = [value, value+1, value+2]
# add sorted values
vals2test[key] = list(sorted(values))
# 'min_samples_split'
elif (key == 'min_samples_split'):
values = [value, value+1, value+2]
# add sorted values
vals2test[key] = list(sorted(values))
# Add bootstrap and 'max_features' as recived
elif (key == 'bootstrap') or (key == 'max_features'):
vals2test[key] = [value]
# Check the key has settings intialised for
else:
print('No settings setup for {}'.format(key))
sys.exit()
# check all the values in best_params_ are in dict
new_keys = best_params_.keys()
old_keys = vals2test_ASSUMED.keys()
extra_keys = [i for i in old_keys if i not in new_keys]
print('WARNING: adding standard keys for: ', extra_keys)
for key in extra_keys:
vals2test[key] = vals2test_ASSUMED[key]
# check all values in
all_in_dict = any([i not in vals2test.keys() for i in new_keys])
assert (not all_in_dict), 'Missing keys from provided best_params_'
else:
vals2test = {
'n_estimators': [80+(i*10) for i in range(8)],
'max_features': [1, 2, 3, 4, 5],
'max_depth': [90+(i*5) for i in range(5)],
'min_samples_split': [4, 5, 6],
'min_samples_leaf': [1, 2, 3],
'oob_score': [True],
'bootstrap': [True],
}
# Check the number of variations being tested
def prod(iterable):
import operator
return reduce(operator.mul, iterable, 1)
len_of_values = [len(vals2test[i]) for i in vals2test.keys()]
print('WARNING: # of variations undertest = {}'.format(prod(len_of_values)))
# Make sure the max features isn't set to more features_used that known
if not isinstance(features_used, type(None)):
max_features = vals2test['max_features']
max_features = [i for i in max_features if
i <= len(features_used)]
vals2test['max_features'] = max_features
# --- Setup a parameter grid for testings
param_grid = [
# - # of trees (โn_estimatorsโ, test=10, 25, 50, 100, 250, 500)
# {
# 'bootstrap': [True],
# 'n_estimators': vals2test['n_estimators'],
# 'oob_score': [True],
# },
# # - # of features/โvariablesโ (โmax_featuresโ, test= 2,3,4, None)
# {
# 'bootstrap': [True],
# 'max_features': vals2test['max_features2test'],
# 'oob_score': [True],
# },
# # - both of the above
# {
# 'bootstrap': [True],
# 'n_estimators': vals2test['n_estimators'],
# 'max_features': vals2test['max_features'],
# 'oob_score': [True],
# },
# # - Minimum samples per leaf
# {
# 'bootstrap': [True],
# "min_samples_leaf": vals2test['min_samples_leaf'],
# 'oob_score': [True],
# },
# # - Depth
# {
# 'bootstrap': [True],
# "max_depth": max_depth2test,
# 'oob_score': [True],
# },
# # - Split?
# {
# 'bootstrap': [True],
# "min_samples_split": vals2test['min_samples_split'],
# 'oob_score': [True],
# },
# - all of the above
{
'bootstrap': vals2test['bootstrap'],
'n_estimators': vals2test['n_estimators'],
'max_features': vals2test['max_features'],
"min_samples_split": vals2test['min_samples_split'],
"min_samples_leaf": vals2test['min_samples_leaf'],
"max_depth": vals2test['max_depth'],
'oob_score': vals2test['oob_score'],
},
]
if param_grid_intial_guess:
return param_grid
elif return_random_informed_grid:
return param_grid_RandomizedSearchCV
def mk_predictions_NetCDF_4_many_builds(model2use, res='4x5',
models_dict=None, features_used_dict=None,
RFR_dict=None, target='Iodide',
stats=None, plot2check=False,
rm_Skagerrak_data=False,
debug=False):
"""
Make a NetCDF file of predicted variables for a given resolution
Parameters
-------
model2use (str): name of the model to use
target (str): Name of the target variable (e.g. iodide)
RFR_dict (dict): dictionary of core variables and data
res (str): horizontal resolution of dataset (e.g. 4x5)
features_used_dict (dict): dictionary of feature variables in models
plot2check (bool): make a quick plot to check the prediction
models_dict (dict): dictionary of RFR models and there names
stats (pd.DataFrame): dataframe of statistics on models in models_dict
rm_Skagerrak_data (bool): Remove specific data
(above argument is a iodide specific option - remove this)
debug (bool): print out debugging output?
Returns
-------
(None)
"""
from sklearn.externals import joblib
import gc
import glob
# - local variables
# extract the models...
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models(
rm_Skagerrak_data=rm_Skagerrak_data
)
# Get the variables required here
if isinstance(features_used_dict, type(None)):
features_used_dict = RFR_dict['features_used_dict']
# Set the extr_str if rm_Skagerrak_data set to True
if rm_Skagerrak_data:
extr_str = '_No_Skagerrak'
else:
extr_str = ''
# Get location to save file and set filename
folder = utils.get_file_locations('data_root') + '/data/'
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# Get location to save ensemble builds of models
folder_str = '{}/{}/models/LIVE/ENSEMBLE_REPEAT_BUILD{}/'
folder = folder_str.format(folder, target, extr_str)
# - Make a dataset for each model
ds_l = []
# Get list of twenty models built
models_str = folder + '*{}*.pkl'.format(model2use)
builds4model = glob.glob(models_str)
print(builds4model, models_str)
# Print a string to debug the output
db_str = "Found {} saved models for '{} - glob str:{}'"
print(db_str.format(len(builds4model), model2use, models_str))
# Get the numbers for the models in directory
b_modelnames = [i.split('my_model_')[-1][:-3] for i in builds4model]
# Check the number of models selected
ast_str = "There aren't models for {} in {}"
assert len(b_modelnames) > 1, ast_str.format(model2use, folder)
# Now loop by model built for ensemble member and predict values
for n_modelname, b_modelname in enumerate(b_modelnames):
# Load the model
model = joblib.load(builds4model[n_modelname])
# Get testinng features
features_used = features_used_dict[model2use].split('+')
# Make a DataSet of predicted values
ds_l += [mk_da_of_predicted_values(model=model, res=res, dsA=dsA,
modelname=b_modelname,
features_used=features_used)]
# Force local tidy of garbage
gc.collect()
# Combine datasets
ds = xr.merge(ds_l)
# - Also get values for existing parameterisations
if target == 'Iodide':
# Chance et al (2013)
param = u'Chance2014_STTxx2_I'
arr = utils.calc_I_Chance2014_STTxx2_I(dsA['WOA_TEMP'].values)
ds[param] = ds[b_modelname] # use existing array as dummy to fill
ds[param].values = arr
# MacDonald et al (2013)
param = 'MacDonald2014_iodide'
arr = utils.calc_I_MacDonald2014(dsA['WOA_TEMP'].values)
ds[param] = ds[b_modelname] # use existing array as dummy to fill
ds[param].values = arr
# Do a test diagnostic plot?
if plot2check:
for var_ in ds.data_vars:
# Do a quick plot to check
arr = ds[var_].mean(dim='time')
AC.map_plot(arr, res=res)
plt.title(var_)
plt.show()
# Save to NetCDF
save_name = 'Oi_prj_predicted_{}_{}_ENSEMBLE_BUILDS_{}_{}.nc'
ds.to_netcdf(save_name.format(target, res, model2use, extr_str))
def get_model_predictions4obs_point(df=None, model_name='TEMP+DEPTH+SAL',
model=None, features_used=None):
"""
Get model predictions for all observed points
Parameters
-------
df (pd.DataFrame): dataframe containing of target and features
features_used_dict (dict): dictionary of feature variables in models
model (RandomForestRegressor), Random Forest Regressor model to user
model_name (str): name of the model to use
Returns
-------
(np.array)
"""
# Model name?
if isinstance(model, type(None)):
print('Model now must be provided get_model_predictions4obs_point')
sys.exit()
# Testing features to use
if isinstance(features_used, type(None)):
func_name = 'get_model_predictions4obs_point'
print("The model's features must be provided to {}".format(func_name))
# Now predict for the given testing features
target_predictions = model.predict(df[features_used])
return target_predictions
def mk_test_train_sets(df=None, target='Iodide',
rand_strat=True, features_used=None,
random_state=42, rand_20_80=False,
nsplits=4, verbose=True, debug=False):
"""
Make a test and training dataset for ML algorithms
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
random_state (int), seed value to use as random seed for reproducible analysis
nsplits (int), number of ways to split the data
rand_strat (bool): split the data in a random way using stratified sampling
rand_20_80 (bool): split the data in a random way
df (pd.DataFrame): dataframe containing of target and features
debug (bool): print out debugging output?
verbose (bool): print out verbose output?
Returns
-------
(list)
"""
# - make Test and training set
# to make this approach's output identical at every run
np.random.seed(42)
# - Standard random selection:
if rand_20_80:
from sklearn.model_selection import train_test_split
# Use a standard 20% test set.
train_set, test_set = train_test_split(df, test_size=0.2,
random_state=random_state)
# also sub select all vectors for input data
# ( Making sure to remove the target!!! )
train_set = df[features_used].loc[train_set.index]
test_set = df[features_used].loc[test_set.index]
test_set_targets = df[[target]].loc[test_set.index]
# - Use a random split
if rand_strat:
from sklearn.model_selection import StratifiedShuffleSplit
# Add in "SPLIT_GROUP" metric
SPLITvar = 'SPLIT_GROUP'
use_ceil_of_log = False # This approach was only used
if use_ceil_of_log:
# Original approach taken for AGU work etc
ceil_ln_limited = np.ceil(np.log(df[target]))
# push bottom end values into lower bin
ceil_ln_limited[ceil_ln_limited <= 2] = 2
# push top end values in higher bin
ceil_ln_limited[ceil_ln_limited >= 5] = 5
df[SPLITvar] = ceil_ln_limited
else:
# Use decals and put the bins with high values to together
# NOTE: use quartile cut (pd.qcut, not pd.cut)
# df[SPLITvar] = pd.cut(df[target].values,10).codes.astype(int)
# Combine the lesser populated higher 5 bins into the 5th bin
# df.loc[ df[SPLITvar] >= 4, SPLITvar ] = 4
# qcut will split the data into N ("nsplits") bins (e.g. quintiles)
# pd.qcut(df[target].values,5).value_counts()
df[SPLITvar] = pd.qcut(df[target].values, nsplits).codes
if verbose:
print(df[SPLITvar].value_counts())
# setup the split
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2,
random_state=random_state)
# Now split
for train_index, test_index in split.split(df, df[SPLITvar]):
train_set = df.loc[train_index]
test_set = df.loc[test_index]
test_set_targets = df[[target]].loc[test_index]
# Gotcha for changes in array index
Na = df[~df.index.isin(train_index.tolist() + test_index.tolist())]
if (Na.shape[0] < 0):
print('WARNING'*20)
print(Na)
# Print out the split of the bins...
if verbose:
dfs = {
'ALL data': df, 'test data': test_set, 'train data': train_set
}
for key_ in dfs.keys():
print('data split in: {}'.format(key_))
print(dfs[key_][SPLITvar].value_counts() / dfs[key_].shape[0])
# Now remove the SPLIT group
for set_ in train_set, test_set:
set_.drop(SPLITvar, axis=1, inplace=True)
return train_set, test_set, test_set_targets
def mk_predictions_for_3D_features(dsA=None, RFR_dict=None, res='4x5',
models_dict=None, features_used_dict=None,
stats=None, folder=None, target='Iodide',
use_updated_predictor_NetCDF=False,
save2NetCDF=False, plot2check=False,
models2compare=[], topmodels=None,
xsave_str='',
add_ensemble2ds=False,
verbose=True, debug=False):
"""
Make a NetCDF file of predicted target from feature variables for a given resolution
Parameters
----------
dsA (xr.Dataset): dataset object with variables to interpolate
RFR_dict (dict): dictionary of core variables and data
res (str): horizontal resolution (e.g. 4x5) of Dataset
save2NetCDF (bool): save interpolated Dataset to as a NetCDF?
features_used_dict (dict): dictionary of feature variables in models
models_dict (dict): dictionary of RFR models and there names
stats (pd.DataFrame): dataframe of statistics on models in models_dict
folder (str): location of NetCDF file of feature variables
target (str): name of the species being predicted
models2compare (list): list of models to make spatial predictions for (rm: double up?)
topmodels (list): list of models to make spatial predictions for
xsave_str (str): string to include as suffix in filename used for saved NetCDF
add_ensemble2ds (bool): calculate std. dev. and mean for list of topmodels
verbose (bool): print out verbose output?
debug (bool): print out debugging output?
Returns
-------
(xr.Dataset)
"""
# Make sure the core dictionary is provided
assert (type(RFR_dict) ==
dict), 'Core variables must be provided as dict (RFR_dict)'
# Make sure a full list of models was provided
assert (len(models2compare) > 0), 'List of models to must be provided!'
# Inc. all the topmodels in the list of models to compare if they have been provided.
if isinstance(topmodels, type(list)):
models2compare += topmodels
# Remove any double ups in list of of models to predict
models2compare = list(set(models2compare))
# Get the variables required here
if isinstance(models_dict, type(None)):
models_dict = RFR_dict['models_dict']
if isinstance(features_used_dict, type(None)):
features_used_dict = RFR_dict['features_used_dict']
# Get location to save file and set filename
if isinstance(folder, type(None)):
folder = utils.get_file_locations('data_root') + '/data/'
if isinstance(dsA, type(None)):
filename = 'Oi_prj_feature_variables_{}.nc'.format(res)
dsA = xr.open_dataset(folder + filename)
# - Make a dataset of predictions for each model
ds_l = []
for modelname in models2compare:
# get model
model = models_dict[modelname]
# get testinng features
features_used = utils.get_model_features_used_dict(modelname)
# Make a DataSet of predicted values
ds_tmp = utils.mk_da_of_predicted_values(dsA=dsA, model=model, res=res,
modelname=modelname,
features_used=features_used)
# Add attributes to the prediction
ds_tmp = utils.add_attrs2target_ds(ds_tmp, add_global_attrs=False,
varname=modelname)
# Save to list
ds_l += [ds_tmp]
# Combine datasets
ds = xr.merge(ds_l)
# - Also get values for parameterisations
# if target == 'Iodide':
# # Chance et al (2013)
# param = u'Chance2014_STTxx2_I'
# arr = utils.calc_I_Chance2014_STTxx2_I(dsA['WOA_TEMP'].values)
# ds[param] = ds[modelname] # use existing array as dummy to fill
# ds[param].values = arr
# # MacDonald et al (2013)
# param = 'MacDonald2014_iodide'
# arr = utils.calc_I_MacDonald2014(dsA['WOA_TEMP'].values)
# ds[param] = ds[modelname] # use existing array as dummy to fill
# ds[param].values = arr
# Add ensemble to ds too
if add_ensemble2ds:
print('WARNING: Using topmodels for ensemble as calculated here')
var2template = list(ds.data_vars)[0]
ds = RFRanalysis.add_ensemble_avg_std_to_dataset(ds=ds, res=res,
target=target,
RFR_dict=RFR_dict,
topmodels=topmodels,
var2template=var2template,
save2NetCDF=False)
# Add global attributes
ds = utils.add_attrs2target_ds(ds, add_varname_attrs=False)
# Save to NetCDF
if save2NetCDF:
filename = 'Oi_prj_predicted_{}_{}{}.nc'.format(target, res, xsave_str)
ds.to_netcdf(filename)
else:
return ds
|
[
"sklearn.model_selection.GridSearchCV",
"sklearn.externals.joblib.dump",
"os.remove",
"numpy.random.seed",
"sparse2spatial.RFRanalysis.add_ensemble_avg_std_to_dataset",
"sklearn.model_selection.train_test_split",
"gc.collect",
"sparse2spatial.utils.mk_da_of_predicted_values",
"glob.glob",
"AC_tools.map_plot",
"sparse2spatial.utils.is_number",
"sparse2spatial.utils.calc_I_Chance2014_STTxx2_I",
"sparse2spatial.utils.calc_I_MacDonald2014",
"sklearn.model_selection.RandomizedSearchCV",
"xarray.merge",
"sparse2spatial.RFRanalysis.get_core_stats_on_current_models",
"numpy.linspace",
"sparse2spatial.utils.check_or_mk_directory_struture",
"pandas.qcut",
"sparse2spatial.utils.add_attrs2target_ds",
"sklearn.metrics.mean_squared_error",
"sklearn.ensemble.RandomForestRegressor",
"sparse2spatial.utils.get_file_locations",
"sklearn.externals.joblib.load",
"sparse2spatial.utils.get_model_features_used_dict",
"sys.exit",
"sparse2spatial.utils.get_hyperparameter_dict",
"numpy.log",
"xarray.open_dataset",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.sqrt"
] |
[((2564, 2601), 'sparse2spatial.utils.get_file_locations', 'utils.get_file_locations', (['"""data_root"""'], {}), "('data_root')\n", (2588, 2601), True, 'import sparse2spatial.utils as utils\n'), ((3096, 3127), 'sparse2spatial.utils.get_hyperparameter_dict', 'utils.get_hyperparameter_dict', ([], {}), '()\n', (3125, 3127), True, 'import sparse2spatial.utils as utils\n'), ((10782, 10819), 'sparse2spatial.utils.get_file_locations', 'utils.get_file_locations', (['"""data_root"""'], {}), "('data_root')\n", (10806, 10819), True, 'import sparse2spatial.utils as utils\n'), ((16775, 16847), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)', 'random_state': '(42)', 'criterion': '"""mse"""'}), "(n_estimators=10, random_state=42, criterion='mse')\n", (16796, 16847), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((21233, 21288), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'criterion': '"""mse"""'}), "(random_state=42, criterion='mse')\n", (21254, 21288), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((21447, 21592), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'rf', 'param_distributions': 'random_grid', 'n_iter': '(100)', 'cv': 'cv', 'verbose': '(2)', 'random_state': '(42)', 'n_jobs': '(-1)', 'scoring': 'scoring'}), '(estimator=rf, param_distributions=random_grid, n_iter=\n 100, cv=cv, verbose=2, random_state=42, n_jobs=-1, scoring=scoring)\n', (21465, 21592), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((22600, 22655), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'criterion': '"""mse"""'}), "(random_state=42, criterion='mse')\n", (22621, 22655), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((22714, 22814), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'rf', 'param_grid': 'param_grid', 'cv': 'cv', 'n_jobs': '(-1)', 'verbose': '(2)', 'scoring': 'scoring'}), '(estimator=rf, param_grid=param_grid, cv=cv, n_jobs=-1, verbose\n =2, scoring=scoring)\n', (22726, 22814), False, 'from sklearn.model_selection import GridSearchCV\n'), ((23178, 23222), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_labels', 'predictions'], {}), '(test_labels, predictions)\n', (23196, 23222), False, 'from sklearn.metrics import mean_squared_error\n'), ((23234, 23246), 'numpy.sqrt', 'np.sqrt', (['MSE'], {}), '(MSE)\n', (23241, 23246), True, 'import numpy as np\n'), ((32363, 32397), 'xarray.open_dataset', 'xr.open_dataset', (['(folder + filename)'], {}), '(folder + filename)\n', (32378, 32397), True, 'import xarray as xr\n'), ((32734, 32755), 'glob.glob', 'glob.glob', (['models_str'], {}), '(models_str)\n', (32743, 32755), False, 'import glob\n'), ((33898, 33912), 'xarray.merge', 'xr.merge', (['ds_l'], {}), '(ds_l)\n', (33906, 33912), True, 'import xarray as xr\n'), ((36887, 36905), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (36901, 36905), True, 'import numpy as np\n'), ((43589, 43603), 'xarray.merge', 'xr.merge', (['ds_l'], {}), '(ds_l)\n', (43597, 43603), True, 'import xarray as xr\n'), ((44852, 44906), 'sparse2spatial.utils.add_attrs2target_ds', 'utils.add_attrs2target_ds', (['ds'], {'add_varname_attrs': '(False)'}), '(ds, add_varname_attrs=False)\n', (44877, 44906), True, 'import sparse2spatial.utils as utils\n'), ((2478, 2488), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2486, 2488), False, 'import sys\n'), ((2921, 2931), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2929, 2931), False, 'import sys\n'), ((8895, 8961), 'sparse2spatial.RFRanalysis.get_core_stats_on_current_models', 'get_core_stats_on_current_models', ([], {'RFR_dict': 'RFR_dict', 'verbose': '(False)'}), '(RFR_dict=RFR_dict, verbose=False)\n', (8927, 8961), False, 'from sparse2spatial.RFRanalysis import get_core_stats_on_current_models\n'), ((18670, 18707), 'sparse2spatial.utils.get_file_locations', 'utils.get_file_locations', (['"""data_root"""'], {}), "('data_root')\n", (18694, 18707), True, 'import sparse2spatial.utils as utils\n'), ((18859, 18911), 'sklearn.externals.joblib.dump', 'joblib.dump', (['BEST_ESTIMATOR', '(folder + model_savename)'], {}), '(BEST_ESTIMATOR, folder + model_savename)\n', (18870, 18911), False, 'from sklearn.externals import joblib\n'), ((32244, 32281), 'sparse2spatial.utils.get_file_locations', 'utils.get_file_locations', (['"""data_root"""'], {}), "('data_root')\n", (32268, 32281), True, 'import sparse2spatial.utils as utils\n'), ((33413, 33451), 'sklearn.externals.joblib.load', 'joblib.load', (['builds4model[n_modelname]'], {}), '(builds4model[n_modelname])\n', (33424, 33451), False, 'from sklearn.externals import joblib\n'), ((33853, 33865), 'gc.collect', 'gc.collect', ([], {}), '()\n', (33863, 33865), False, 'import gc\n'), ((34078, 34134), 'sparse2spatial.utils.calc_I_Chance2014_STTxx2_I', 'utils.calc_I_Chance2014_STTxx2_I', (["dsA['WOA_TEMP'].values"], {}), "(dsA['WOA_TEMP'].values)\n", (34110, 34134), True, 'import sparse2spatial.utils as utils\n'), ((34327, 34377), 'sparse2spatial.utils.calc_I_MacDonald2014', 'utils.calc_I_MacDonald2014', (["dsA['WOA_TEMP'].values"], {}), "(dsA['WOA_TEMP'].values)\n", (34353, 34377), True, 'import sparse2spatial.utils as utils\n'), ((35577, 35587), 'sys.exit', 'sys.exit', ([], {}), '()\n', (35585, 35587), False, 'import sys\n'), ((37090, 37152), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)', 'random_state': 'random_state'}), '(df, test_size=0.2, random_state=random_state)\n', (37106, 37152), False, 'from sklearn.model_selection import train_test_split\n'), ((38826, 38902), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.2)', 'random_state': 'random_state'}), '(n_splits=1, test_size=0.2, random_state=random_state)\n', (38848, 38902), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((42755, 42789), 'xarray.open_dataset', 'xr.open_dataset', (['(folder + filename)'], {}), '(folder + filename)\n', (42770, 42789), True, 'import xarray as xr\n'), ((43009, 43054), 'sparse2spatial.utils.get_model_features_used_dict', 'utils.get_model_features_used_dict', (['modelname'], {}), '(modelname)\n', (43043, 43054), True, 'import sparse2spatial.utils as utils\n'), ((43117, 43234), 'sparse2spatial.utils.mk_da_of_predicted_values', 'utils.mk_da_of_predicted_values', ([], {'dsA': 'dsA', 'model': 'model', 'res': 'res', 'modelname': 'modelname', 'features_used': 'features_used'}), '(dsA=dsA, model=model, res=res, modelname=\n modelname, features_used=features_used)\n', (43148, 43234), True, 'import sparse2spatial.utils as utils\n'), ((43389, 43465), 'sparse2spatial.utils.add_attrs2target_ds', 'utils.add_attrs2target_ds', (['ds_tmp'], {'add_global_attrs': '(False)', 'varname': 'modelname'}), '(ds_tmp, add_global_attrs=False, varname=modelname)\n', (43414, 43465), True, 'import sparse2spatial.utils as utils\n'), ((44369, 44537), 'sparse2spatial.RFRanalysis.add_ensemble_avg_std_to_dataset', 'RFRanalysis.add_ensemble_avg_std_to_dataset', ([], {'ds': 'ds', 'res': 'res', 'target': 'target', 'RFR_dict': 'RFR_dict', 'topmodels': 'topmodels', 'var2template': 'var2template', 'save2NetCDF': '(False)'}), '(ds=ds, res=res, target=target,\n RFR_dict=RFR_dict, topmodels=topmodels, var2template=var2template,\n save2NetCDF=False)\n', (44412, 44537), True, 'import sparse2spatial.RFRanalysis as RFRanalysis\n'), ((3989, 4106), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': 'random_state', 'n_estimators': 'n_estimators', 'oob_score': 'oob_score', 'criterion': '"""mse"""'}), "(random_state=random_state, n_estimators=n_estimators,\n oob_score=oob_score, criterion='mse')\n", (4010, 4106), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((5683, 5695), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5693, 5695), False, 'import gc\n'), ((6244, 6280), 'sklearn.externals.joblib.load', 'joblib.load', (['(folder + model_savename)'], {}), '(folder + model_savename)\n', (6255, 6280), False, 'from sklearn.externals import joblib\n'), ((20025, 20065), 'numpy.linspace', 'np.linspace', ([], {'start': '(10)', 'stop': '(1000)', 'num': '(10)'}), '(start=10, stop=1000, num=10)\n', (20036, 20065), True, 'import numpy as np\n'), ((20410, 20438), 'numpy.linspace', 'np.linspace', (['(10)', '(110)'], {'num': '(11)'}), '(10, 110, num=11)\n', (20421, 20438), True, 'import numpy as np\n'), ((34665, 34690), 'AC_tools.map_plot', 'AC.map_plot', (['arr'], {'res': 'res'}), '(arr, res=res)\n', (34676, 34690), True, 'import AC_tools as AC\n'), ((42592, 42629), 'sparse2spatial.utils.get_file_locations', 'utils.get_file_locations', (['"""data_root"""'], {}), "('data_root')\n", (42616, 42629), True, 'import sparse2spatial.utils as utils\n'), ((4582, 4609), 'glob.glob', 'glob.glob', (["(folder + '*.pkl')"], {}), "(folder + '*.pkl')\n", (4591, 4609), False, 'import glob\n'), ((12887, 12923), 'sklearn.externals.joblib.load', 'joblib.load', (['(folder + model_savename)'], {}), '(folder + model_savename)\n', (12898, 12923), False, 'from sklearn.externals import joblib\n'), ((14377, 14413), 'sklearn.externals.joblib.load', 'joblib.load', (['(folder + model_savename)'], {}), '(folder + model_savename)\n', (14388, 14413), False, 'from sklearn.externals import joblib\n'), ((37848, 37866), 'numpy.log', 'np.log', (['df[target]'], {}), '(df[target])\n', (37854, 37866), True, 'import numpy as np\n'), ((38667, 38702), 'pandas.qcut', 'pd.qcut', (['df[target].values', 'nsplits'], {}), '(df[target].values, nsplits)\n', (38674, 38702), True, 'import pandas as pd\n'), ((5239, 5282), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model', '(folder + model_savename)'], {}), '(model, folder + model_savename)\n', (5250, 5282), False, 'from sklearn.externals import joblib\n'), ((4772, 4784), 'os.remove', 'os.remove', (['i'], {}), '(i)\n', (4781, 4784), False, 'import os\n'), ((5494, 5532), 'sparse2spatial.utils.check_or_mk_directory_struture', 'utils.check_or_mk_directory_struture', ([], {}), '()\n', (5530, 5532), True, 'import sparse2spatial.utils as utils\n'), ((25415, 25437), 'sparse2spatial.utils.is_number', 'utils.is_number', (['value'], {}), '(value)\n', (25430, 25437), True, 'import sparse2spatial.utils as utils\n'), ((26851, 26861), 'sys.exit', 'sys.exit', ([], {}), '()\n', (26859, 26861), False, 'import sys\n')]
|
# coding: utf-8
#
import numpy as np
import json
from socket import *
import select
import pygame
from pygame.locals import *
import sys
HOST = gethostname()
PORT = 1113
BUFSIZE = 1024
ADDR = ("127.0.0.1", PORT)
USER = 'Server'
INTERVAL=0.01
VEROCITY=100
LIFETIME=1000
#Window.fullscreen=True
class DataReceiver:
def init(self):
self.label_data={}
self.t={}
self.udpServSock = socket(AF_INET, SOCK_DGRAM) #IPv4/UDP
self.udpServSock.bind(ADDR)
self.udpServSock.setblocking(0)
enabled_labels = [0,1,2]
labels = {"0":"aaa","1":"bbb","2":"ccc"}
def update(self):
data=None
draw_data={}
try:
while True:
data, addr = self.udpServSock.recvfrom(BUFSIZE)
if data is not None and len(data)>0:
print('...received from and returned to:', addr)
print(len(data))
while len(data)>=6:
idx=int.from_bytes(data[0:1],byteorder='little')
l=int.from_bytes(data[1:2],byteorder='little')
arr=np.frombuffer(data[2:6],dtype=np.float32)
data=data[6:]
y=arr[0]
draw_data[l]=y
print(idx,l,y)
except:
pass
if len(draw_data)>0:
for l,y in draw_data.items():
# set label color
label=int(l)
if label not in self.label_data:
self.label_data[label]=np.zeros((100,))
self.label_data[label][:]=np.nan
self.t[label]=0
self.label_data[label][self.t[l]]=y
for l,v in self.label_data.items():
self.t[l]+=1
self.t[l]=self.t[l]%100
self.label_data[l][self.t[l]]=np.nan
def stop(self):
self.udpServSock.close()
color_list=[(255,0,0),(0,255,0),(0,0,255),]
def main():
dr=DataReceiver()
pygame.init() # ๅๆๅ
screen = pygame.display.set_mode((600, 400))
pygame.display.set_caption("Pygame Test")
dr.init()
t=0
dt=10
while(True):
cnt=0
for event in pygame.event.get(): # ็ตไบๅฆ็
if event.type == QUIT:
pygame.quit()
sys.exit()
dr.update()
screen.fill((0,0,0,)) # ่ๆฏ่ฒใฎๆๅฎใRGBใ ใจๆใ
for k,v in dr.label_data.items():
cur=dr.t[k]
prev=np.nan
for i in range(len(v)):
y=v[(cur-i)%len(v)]
#if (not np.isnan(y)) and (not np.isnan(prev)):
# pygame.draw.line(screen, (255,255,255), (i*dt,prev), ((i+1)*dt,y),5)
if (not np.isnan(y)):
pygame.draw.circle(screen, color_list[k], (i*dt,int(y)), 3)
cnt+=1
prev=y
pygame.display.update() # ็ป้ขๆดๆฐ
pygame.time.wait(30) # ๆดๆฐ้้ใๅคๅใใช็ง
t+=1
if __name__ == "__main__":
main()
|
[
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.frombuffer",
"numpy.zeros",
"pygame.init",
"numpy.isnan",
"pygame.time.wait",
"pygame.display.update",
"pygame.display.set_caption",
"sys.exit"
] |
[((1600, 1613), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1611, 1613), False, 'import pygame\n'), ((1630, 1665), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 400)'], {}), '((600, 400))\n', (1653, 1665), False, 'import pygame\n'), ((1667, 1708), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Pygame Test"""'], {}), "('Pygame Test')\n", (1693, 1708), False, 'import pygame\n'), ((1769, 1787), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1785, 1787), False, 'import pygame\n'), ((2274, 2297), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2295, 2297), False, 'import pygame\n'), ((2307, 2327), 'pygame.time.wait', 'pygame.time.wait', (['(30)'], {}), '(30)\n', (2323, 2327), False, 'import pygame\n'), ((1829, 1842), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1840, 1842), False, 'import pygame\n'), ((1848, 1858), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1856, 1858), False, 'import sys\n'), ((1242, 1258), 'numpy.zeros', 'np.zeros', (['(100,)'], {}), '((100,))\n', (1250, 1258), True, 'import numpy as np\n'), ((2170, 2181), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (2178, 2181), True, 'import numpy as np\n'), ((944, 986), 'numpy.frombuffer', 'np.frombuffer', (['data[2:6]'], {'dtype': 'np.float32'}), '(data[2:6], dtype=np.float32)\n', (957, 986), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os,sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../utility'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../network'))
import numpy as np
import tensorflow as tf
from agent import Agent
from eager_nn import ActorNet, CriticNet
from optimizer import *
from OU_noise import OrnsteinUhlenbeckProcess
class DDPG(Agent):
def __init__(self, *args, **kwargs):
self.max_action = kwargs.pop('max_action')
super().__init__(*args, **kwargs)
self.noise = OrnsteinUhlenbeckProcess(num_actions=self.n_actions)
self.tau = 0.01
if self.is_categorical:
self.Vmax, self.Vmin = 10.0, -10.0
self.delta_z = tf.lin_space(self.Vmin, self.Vmax, self.critic.N_atoms)
"""
self.delta_z = (self.Vmax - self.Vmin) / (self.q_eval.N_atoms - 1)
self.z_list = tf.constant([self.Vmin + i * self.delta_z for i in range(self.critic.N_atoms)],dtype=tf.float32)
self.z_list_broadcasted = tf.tile(tf.reshape(self.z_list,[1,self.q_eval.N_atoms]), tf.constant([self.n_actions,1]))
"""
def _build_net(self):
self.actor = ActorNet(model=self.model[0], out_dim=self.n_actions, name='ActorNet', opt=self._optimizer, lr=self.lr, trainable=self.trainable, max_action=self.max_action)
self.actor_target = ActorNet(model=self.model[0], out_dim=self.n_actions, name='ActorNet_target', trainable=False, max_action=self.max_action)
self.critic = CriticNet(model=self.model[1], out_dim=1, name='CriticNet', opt=self._optimizer, lr=self.lr, trainable=self.trainable)
self.critic_target = CriticNet(model=self.model[1], out_dim=1, name='CriticNet_target',trainable=False)
@tf.contrib.eager.defun
def inference(self, state):
return self.actor.inference(state)
def choose_action(self, observation):
observation = observation[np.newaxis, :]
action = self.inference(observation) + np.expand_dims(self.noise.generate(),axis=0)
return np.array(action[0])
def test_choose_action(self, observation):
observation = observation[np.newaxis, :]
action = self.inference(observation)
return np.array(action[0])
def update_q_net(self, replay_data, weights):
bs, ba, done, bs_, br, p_idx = replay_data
self.bs = np.array(bs, dtype=np.float32)
bs_ = np.array(bs_, dtype=np.float32)
eval_act_index = np.reshape(ba,(self.batch_size, self.n_actions))
reward = np.reshape(np.array(br, dtype=np.float32),(self.batch_size,1))
done = np.reshape(np.array(done, dtype=np.float32),(self.batch_size,1))
p_idx = np.reshape(p_idx,(self.batch_size,1))
return self._train_body(self.bs, eval_act_index, done, bs_, reward, p_idx, weights)
@tf.contrib.eager.defun
def _train_body(self, bs, eval_act_index, done, bs_, reward, p_idx, weights):
global_step = tf.train.get_or_create_global_step()
# update critic_net
with tf.device(self.device):
with tf.GradientTape() as tape:
if self.is_categorical:
pass
else:
critic_next, critic_eval = self.critic_target.inference([bs_, self.actor_target.inference(bs_)]), self.critic.inference([bs, eval_act_index])
target_Q = reward + self.discount ** tf.cast(p_idx, tf.float32) * critic_next * (1. - done)
target_Q = tf.stop_gradient(target_Q)
# โcritic_loss
error = tf.losses.huber_loss(labels=target_Q, predictions=critic_eval)
td_error = tf.abs(tf.reduce_mean(target_Q - critic_eval, axis=1))
critic_loss = tf.reduce_mean(error * weights, keepdims=True)
self.critic.optimize(critic_loss, global_step, tape)
# update actor_net
with tf.GradientTape() as tape:
actor_eval = tf.cast(self.actor.inference(bs), tf.float32)
actor_loss = -tf.reduce_mean(self.critic.inference([bs, actor_eval]))
self.actor.optimize(actor_loss, global_step, tape)
# check to replace target parameters
self.update_target_net()
return [critic_loss, actor_loss], td_error
def update_target_net(self):
# update critic_target_net
for param, target_param in zip(self.critic.weights, self.critic_target.weights):
target_param.assign(self.tau * param + (1 - self.tau) * target_param)
# update actor_target_net
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(self.tau * param + (1 - self.tau) * target_param)
return
class TD3(Agent):
def __init__(self, *args, **kwargs):
self.max_action = kwargs.pop('max_action')
super().__init__(*args, **kwargs)
self.noise = OrnsteinUhlenbeckProcess(num_actions=self.n_actions)
self.tau = 0.01
self.policy_freq = 2
def _build_net(self):
self.actor = ActorNet(model=self.model[0], out_dim=self.n_actions, name='ActorNet', opt=self._optimizer, lr=self.lr, trainable=True, max_action=self.max_action)
self.actor_target = ActorNet(model=self.model[0], out_dim=self.n_actions, name='ActorNet_target', trainable=False, max_action=self.max_action)
self.critic1 = CriticNet(model=self.model[1], out_dim=1, name='CriticNet1', opt=self._optimizer, lr=self.lr, trainable=True)
self.critic2 = CriticNet(model=self.model[1], out_dim=1, name='CriticNet2', opt=self._optimizer, lr=self.lr, trainable=True)
self.critic_target1 = CriticNet(model=self.model[1], out_dim=1, name='CriticNet_target1',trainable=False)
self.critic_target2 = CriticNet(model=self.model[1], out_dim=1, name='CriticNet_target2',trainable=False)
@tf.contrib.eager.defun
def inference(self, state):
return self.actor.inference(state)
def choose_action(self, observation):
observation = observation[np.newaxis, :]
action = self.inference(observation) + np.expand_dims(self.noise.generate(),axis=0)
return np.array(action[0])
def test_choose_action(self, observation):
observation = observation[np.newaxis, :]
action = self.inference(observation)
return np.array(action[0])
def update_q_net(self, replay_data, weights):
bs, ba, done, bs_, br, p_idx = replay_data
self.bs = np.array(bs, dtype=np.float32)
bs_ = np.array(bs_, dtype=np.float32)
eval_act_index = np.reshape(ba,(self.batch_size, self.n_actions))
reward = np.reshape(np.array(br, dtype=np.float32),(self.batch_size,1))
done = np.reshape(np.array(done, dtype=np.float32),(self.batch_size,1))
p_idx = np.reshape(p_idx,(self.batch_size,1))
critic_loss1, critic_loss2, td_error = self._train_critic(self.bs, eval_act_index, done, bs_, reward, p_idx, weights)
if self._iteration % self.policy_freq == 0:
self.actor_loss = self._train_actor(self.bs)
self._iteration += 1
return [critic_loss1, critic_loss2, self.actor_loss], td_error
@tf.contrib.eager.defun
def _train_critic(self, bs, eval_act_index, done, bs_, reward, p_idx, weights, noise_clip=0.5):
with tf.device(self.device):
global_step = tf.train.get_or_create_global_step()
noise = tf.clip_by_value(tf.random.normal(shape=[self.batch_size,1],mean=0.0, stddev=0.2) , -noise_clip, noise_clip)
next_action = tf.clip_by_value(self.actor_target.inference(bs_) + noise, -self.max_action, self.max_action)
critic_next1, critic_next2 = self.critic_target1.inference([bs_, next_action]), self.critic_target2.inference([bs_, next_action])
critic_next = tf.minimum(critic_next1, critic_next2)
target_Q = reward + self.discount ** tf.cast(p_idx, tf.float32) * critic_next * (1. - done)
target_Q = tf.stop_gradient(target_Q)
# update critic_net1
with tf.GradientTape() as tape:
critic_eval1 = self.critic1.inference([bs, eval_act_index])
error = tf.losses.huber_loss(labels=target_Q, predictions=critic_eval1)
td_error = tf.abs(tf.reduce_mean(target_Q - critic_eval1, axis=1))
critic_loss1 = tf.reduce_mean(error * weights, keepdims=True)
self.critic1.optimize(critic_loss1, global_step, tape)
with tf.GradientTape() as tape:
critic_eval2 = self.critic2.inference([bs, eval_act_index])
critic_loss2 = tf.reduce_mean(tf.losses.huber_loss(labels=target_Q, predictions=critic_eval2) * weights, keepdims=True)
self.critic2.optimize(critic_loss2, global_step, tape)
return critic_loss1, critic_loss2, td_error
@tf.contrib.eager.defun
def _train_actor(self, bs):
with tf.device(self.device):
global_step = tf.train.get_or_create_global_step()
# update actor_net
with tf.GradientTape() as tape:
actor_eval = tf.cast(self.actor.inference(bs), tf.float32)
self.actor_loss = -tf.reduce_mean(self.critic1.inference([bs, actor_eval]))
self.actor.optimize(self.actor_loss, global_step, tape)
# check to replace target parameters
self.update_target_net()
return self.actor_loss
def update_target_net(self):
# update critic_target_net1
for param, target_param in zip(self.critic1.weights, self.critic_target1.weights):
target_param.assign(self.tau * param + (1 - self.tau) * target_param)
# update critic_target_net2
for param, target_param in zip(self.critic2.weights, self.critic_target2.weights):
target_param.assign(self.tau * param + (1 - self.tau) * target_param)
# update actor_target_net
for param, target_param in zip(self.actor.weights, self.actor_target.weights):
target_param.assign(self.tau * param + (1 - self.tau) * target_param)
return
|
[
"tensorflow.random.normal",
"eager_nn.ActorNet",
"eager_nn.CriticNet",
"os.path.dirname",
"OU_noise.OrnsteinUhlenbeckProcess",
"tensorflow.train.get_or_create_global_step",
"tensorflow.device",
"tensorflow.stop_gradient",
"tensorflow.minimum",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.losses.huber_loss",
"numpy.array",
"numpy.reshape",
"tensorflow.lin_space",
"tensorflow.GradientTape"
] |
[((67, 92), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (82, 92), False, 'import os, sys\n'), ((141, 166), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (156, 166), False, 'import os, sys\n'), ((539, 591), 'OU_noise.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'num_actions': 'self.n_actions'}), '(num_actions=self.n_actions)\n', (563, 591), False, 'from OU_noise import OrnsteinUhlenbeckProcess\n'), ((1188, 1355), 'eager_nn.ActorNet', 'ActorNet', ([], {'model': 'self.model[0]', 'out_dim': 'self.n_actions', 'name': '"""ActorNet"""', 'opt': 'self._optimizer', 'lr': 'self.lr', 'trainable': 'self.trainable', 'max_action': 'self.max_action'}), "(model=self.model[0], out_dim=self.n_actions, name='ActorNet', opt=\n self._optimizer, lr=self.lr, trainable=self.trainable, max_action=self.\n max_action)\n", (1196, 1355), False, 'from eager_nn import ActorNet, CriticNet\n'), ((1374, 1501), 'eager_nn.ActorNet', 'ActorNet', ([], {'model': 'self.model[0]', 'out_dim': 'self.n_actions', 'name': '"""ActorNet_target"""', 'trainable': '(False)', 'max_action': 'self.max_action'}), "(model=self.model[0], out_dim=self.n_actions, name=\n 'ActorNet_target', trainable=False, max_action=self.max_action)\n", (1382, 1501), False, 'from eager_nn import ActorNet, CriticNet\n'), ((1520, 1643), 'eager_nn.CriticNet', 'CriticNet', ([], {'model': 'self.model[1]', 'out_dim': '(1)', 'name': '"""CriticNet"""', 'opt': 'self._optimizer', 'lr': 'self.lr', 'trainable': 'self.trainable'}), "(model=self.model[1], out_dim=1, name='CriticNet', opt=self.\n _optimizer, lr=self.lr, trainable=self.trainable)\n", (1529, 1643), False, 'from eager_nn import ActorNet, CriticNet\n'), ((1668, 1755), 'eager_nn.CriticNet', 'CriticNet', ([], {'model': 'self.model[1]', 'out_dim': '(1)', 'name': '"""CriticNet_target"""', 'trainable': '(False)'}), "(model=self.model[1], out_dim=1, name='CriticNet_target',\n trainable=False)\n", (1677, 1755), False, 'from eager_nn import ActorNet, CriticNet\n'), ((2054, 2073), 'numpy.array', 'np.array', (['action[0]'], {}), '(action[0])\n', (2062, 2073), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.array', 'np.array', (['action[0]'], {}), '(action[0])\n', (2243, 2254), True, 'import numpy as np\n'), ((2376, 2406), 'numpy.array', 'np.array', (['bs'], {'dtype': 'np.float32'}), '(bs, dtype=np.float32)\n', (2384, 2406), True, 'import numpy as np\n'), ((2421, 2452), 'numpy.array', 'np.array', (['bs_'], {'dtype': 'np.float32'}), '(bs_, dtype=np.float32)\n', (2429, 2452), True, 'import numpy as np\n'), ((2478, 2527), 'numpy.reshape', 'np.reshape', (['ba', '(self.batch_size, self.n_actions)'], {}), '(ba, (self.batch_size, self.n_actions))\n', (2488, 2527), True, 'import numpy as np\n'), ((2703, 2742), 'numpy.reshape', 'np.reshape', (['p_idx', '(self.batch_size, 1)'], {}), '(p_idx, (self.batch_size, 1))\n', (2713, 2742), True, 'import numpy as np\n'), ((2966, 3002), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (3000, 3002), True, 'import tensorflow as tf\n'), ((4976, 5028), 'OU_noise.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'num_actions': 'self.n_actions'}), '(num_actions=self.n_actions)\n', (5000, 5028), False, 'from OU_noise import OrnsteinUhlenbeckProcess\n'), ((5130, 5282), 'eager_nn.ActorNet', 'ActorNet', ([], {'model': 'self.model[0]', 'out_dim': 'self.n_actions', 'name': '"""ActorNet"""', 'opt': 'self._optimizer', 'lr': 'self.lr', 'trainable': '(True)', 'max_action': 'self.max_action'}), "(model=self.model[0], out_dim=self.n_actions, name='ActorNet', opt=\n self._optimizer, lr=self.lr, trainable=True, max_action=self.max_action)\n", (5138, 5282), False, 'from eager_nn import ActorNet, CriticNet\n'), ((5306, 5433), 'eager_nn.ActorNet', 'ActorNet', ([], {'model': 'self.model[0]', 'out_dim': 'self.n_actions', 'name': '"""ActorNet_target"""', 'trainable': '(False)', 'max_action': 'self.max_action'}), "(model=self.model[0], out_dim=self.n_actions, name=\n 'ActorNet_target', trainable=False, max_action=self.max_action)\n", (5314, 5433), False, 'from eager_nn import ActorNet, CriticNet\n'), ((5453, 5567), 'eager_nn.CriticNet', 'CriticNet', ([], {'model': 'self.model[1]', 'out_dim': '(1)', 'name': '"""CriticNet1"""', 'opt': 'self._optimizer', 'lr': 'self.lr', 'trainable': '(True)'}), "(model=self.model[1], out_dim=1, name='CriticNet1', opt=self.\n _optimizer, lr=self.lr, trainable=True)\n", (5462, 5567), False, 'from eager_nn import ActorNet, CriticNet\n'), ((5586, 5700), 'eager_nn.CriticNet', 'CriticNet', ([], {'model': 'self.model[1]', 'out_dim': '(1)', 'name': '"""CriticNet2"""', 'opt': 'self._optimizer', 'lr': 'self.lr', 'trainable': '(True)'}), "(model=self.model[1], out_dim=1, name='CriticNet2', opt=self.\n _optimizer, lr=self.lr, trainable=True)\n", (5595, 5700), False, 'from eager_nn import ActorNet, CriticNet\n'), ((5726, 5814), 'eager_nn.CriticNet', 'CriticNet', ([], {'model': 'self.model[1]', 'out_dim': '(1)', 'name': '"""CriticNet_target1"""', 'trainable': '(False)'}), "(model=self.model[1], out_dim=1, name='CriticNet_target1',\n trainable=False)\n", (5735, 5814), False, 'from eager_nn import ActorNet, CriticNet\n'), ((5840, 5928), 'eager_nn.CriticNet', 'CriticNet', ([], {'model': 'self.model[1]', 'out_dim': '(1)', 'name': '"""CriticNet_target2"""', 'trainable': '(False)'}), "(model=self.model[1], out_dim=1, name='CriticNet_target2',\n trainable=False)\n", (5849, 5928), False, 'from eager_nn import ActorNet, CriticNet\n'), ((6232, 6251), 'numpy.array', 'np.array', (['action[0]'], {}), '(action[0])\n', (6240, 6251), True, 'import numpy as np\n'), ((6409, 6428), 'numpy.array', 'np.array', (['action[0]'], {}), '(action[0])\n', (6417, 6428), True, 'import numpy as np\n'), ((6550, 6580), 'numpy.array', 'np.array', (['bs'], {'dtype': 'np.float32'}), '(bs, dtype=np.float32)\n', (6558, 6580), True, 'import numpy as np\n'), ((6595, 6626), 'numpy.array', 'np.array', (['bs_'], {'dtype': 'np.float32'}), '(bs_, dtype=np.float32)\n', (6603, 6626), True, 'import numpy as np\n'), ((6652, 6701), 'numpy.reshape', 'np.reshape', (['ba', '(self.batch_size, self.n_actions)'], {}), '(ba, (self.batch_size, self.n_actions))\n', (6662, 6701), True, 'import numpy as np\n'), ((6877, 6916), 'numpy.reshape', 'np.reshape', (['p_idx', '(self.batch_size, 1)'], {}), '(p_idx, (self.batch_size, 1))\n', (6887, 6916), True, 'import numpy as np\n'), ((722, 777), 'tensorflow.lin_space', 'tf.lin_space', (['self.Vmin', 'self.Vmax', 'self.critic.N_atoms'], {}), '(self.Vmin, self.Vmax, self.critic.N_atoms)\n', (734, 777), True, 'import tensorflow as tf\n'), ((2555, 2585), 'numpy.array', 'np.array', (['br'], {'dtype': 'np.float32'}), '(br, dtype=np.float32)\n', (2563, 2585), True, 'import numpy as np\n'), ((2633, 2665), 'numpy.array', 'np.array', (['done'], {'dtype': 'np.float32'}), '(done, dtype=np.float32)\n', (2641, 2665), True, 'import numpy as np\n'), ((3045, 3067), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (3054, 3067), True, 'import tensorflow as tf\n'), ((6729, 6759), 'numpy.array', 'np.array', (['br'], {'dtype': 'np.float32'}), '(br, dtype=np.float32)\n', (6737, 6759), True, 'import numpy as np\n'), ((6807, 6839), 'numpy.array', 'np.array', (['done'], {'dtype': 'np.float32'}), '(done, dtype=np.float32)\n', (6815, 6839), True, 'import numpy as np\n'), ((7393, 7415), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (7402, 7415), True, 'import tensorflow as tf\n'), ((7443, 7479), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (7477, 7479), True, 'import tensorflow as tf\n'), ((7898, 7936), 'tensorflow.minimum', 'tf.minimum', (['critic_next1', 'critic_next2'], {}), '(critic_next1, critic_next2)\n', (7908, 7936), True, 'import tensorflow as tf\n'), ((8064, 8090), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['target_Q'], {}), '(target_Q)\n', (8080, 8090), True, 'import tensorflow as tf\n'), ((9024, 9046), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (9033, 9046), True, 'import tensorflow as tf\n'), ((9074, 9110), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (9108, 9110), True, 'import tensorflow as tf\n'), ((3086, 3103), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3101, 3103), True, 'import tensorflow as tf\n'), ((3939, 3956), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3954, 3956), True, 'import tensorflow as tf\n'), ((7518, 7584), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[self.batch_size, 1]', 'mean': '(0.0)', 'stddev': '(0.2)'}), '(shape=[self.batch_size, 1], mean=0.0, stddev=0.2)\n', (7534, 7584), True, 'import tensorflow as tf\n'), ((8142, 8159), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8157, 8159), True, 'import tensorflow as tf\n'), ((8269, 8332), 'tensorflow.losses.huber_loss', 'tf.losses.huber_loss', ([], {'labels': 'target_Q', 'predictions': 'critic_eval1'}), '(labels=target_Q, predictions=critic_eval1)\n', (8289, 8332), True, 'import tensorflow as tf\n'), ((8447, 8493), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(error * weights)'], {'keepdims': '(True)'}), '(error * weights, keepdims=True)\n', (8461, 8493), True, 'import tensorflow as tf\n'), ((8579, 8596), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8594, 8596), True, 'import tensorflow as tf\n'), ((9159, 9176), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9174, 9176), True, 'import tensorflow as tf\n'), ((3505, 3531), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['target_Q'], {}), '(target_Q)\n', (3521, 3531), True, 'import tensorflow as tf\n'), ((3595, 3657), 'tensorflow.losses.huber_loss', 'tf.losses.huber_loss', ([], {'labels': 'target_Q', 'predictions': 'critic_eval'}), '(labels=target_Q, predictions=critic_eval)\n', (3615, 3657), True, 'import tensorflow as tf\n'), ((3778, 3824), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(error * weights)'], {'keepdims': '(True)'}), '(error * weights, keepdims=True)\n', (3792, 3824), True, 'import tensorflow as tf\n'), ((8367, 8414), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(target_Q - critic_eval1)'], {'axis': '(1)'}), '(target_Q - critic_eval1, axis=1)\n', (8381, 8414), True, 'import tensorflow as tf\n'), ((3696, 3742), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(target_Q - critic_eval)'], {'axis': '(1)'}), '(target_Q - critic_eval, axis=1)\n', (3710, 3742), True, 'import tensorflow as tf\n'), ((8728, 8791), 'tensorflow.losses.huber_loss', 'tf.losses.huber_loss', ([], {'labels': 'target_Q', 'predictions': 'critic_eval2'}), '(labels=target_Q, predictions=critic_eval2)\n', (8748, 8791), True, 'import tensorflow as tf\n'), ((7986, 8012), 'tensorflow.cast', 'tf.cast', (['p_idx', 'tf.float32'], {}), '(p_idx, tf.float32)\n', (7993, 8012), True, 'import tensorflow as tf\n'), ((3419, 3445), 'tensorflow.cast', 'tf.cast', (['p_idx', 'tf.float32'], {}), '(p_idx, tf.float32)\n', (3426, 3445), True, 'import tensorflow as tf\n')]
|
# Author by CRS-club and wizard
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import numpy as np
import datetime
import logging
logger = logging.getLogger(__name__)
class MetricsCalculator():
def __init__(self, name, mode):
self.name = name
self.mode = mode # 'train', 'val', 'test'
self.reset()
def reset(self):
logger.info('Resetting {} metrics...'.format(self.mode))
self.aggr_acc1 = 0.0
self.aggr_acc5 = 0.0
self.aggr_loss = 0.0
self.aggr_batch_size = 0
def finalize_metrics(self):
self.avg_acc1 = self.aggr_acc1 / self.aggr_batch_size
self.avg_acc5 = self.aggr_acc5 / self.aggr_batch_size
self.avg_loss = self.aggr_loss / self.aggr_batch_size
def get_computed_metrics(self):
json_stats = {}
json_stats['avg_loss'] = self.avg_loss
json_stats['avg_acc1'] = self.avg_acc1
json_stats['avg_acc5'] = self.avg_acc5
return json_stats
def calculate_metrics(self, loss, softmax, labels):
accuracy1 = compute_topk_accuracy(softmax, labels, top_k=1) * 100.
accuracy5 = compute_topk_accuracy(softmax, labels, top_k=5) * 100.
return accuracy1, accuracy5
def accumulate(self, loss, softmax, labels):
cur_batch_size = softmax.shape[0]
# if returned loss is None for e.g. test, just set loss to be 0.
if loss is None:
cur_loss = 0.
else:
cur_loss = np.mean(np.array(loss)) #
self.aggr_batch_size += cur_batch_size
self.aggr_loss += cur_loss * cur_batch_size
accuracy1 = compute_topk_accuracy(softmax, labels, top_k=1) * 100.
accuracy5 = compute_topk_accuracy(softmax, labels, top_k=5) * 100.
self.aggr_acc1 += accuracy1 * cur_batch_size
self.aggr_acc5 += accuracy5 * cur_batch_size
return
# ----------------------------------------------
# other utils
# ----------------------------------------------
def compute_topk_correct_hits(top_k, preds, labels):
'''Compute the number of corret hits'''
batch_size = preds.shape[0]
top_k_preds = np.zeros((batch_size, top_k), dtype=np.float32)
for i in range(batch_size):
top_k_preds[i, :] = np.argsort(-preds[i, :])[:top_k]
correctness = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
if labels[i] in top_k_preds[i, :].astype(np.int32).tolist():
correctness[i] = 1
correct_hits = sum(correctness)
return correct_hits
def compute_topk_accuracy(softmax, labels, top_k):
computed_metrics = {}
assert labels.shape[0] == softmax.shape[0], "Batch size mismatch."
aggr_batch_size = labels.shape[0]
aggr_top_k_correct_hits = compute_topk_correct_hits(top_k, softmax, labels)
# normalize results
computed_metrics = \
float(aggr_top_k_correct_hits) / aggr_batch_size
return computed_metrics
|
[
"numpy.argsort",
"numpy.zeros",
"numpy.array",
"logging.getLogger"
] |
[((243, 270), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'import logging\n'), ((2244, 2291), 'numpy.zeros', 'np.zeros', (['(batch_size, top_k)'], {'dtype': 'np.float32'}), '((batch_size, top_k), dtype=np.float32)\n', (2252, 2291), True, 'import numpy as np\n'), ((2404, 2440), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.int32'}), '(batch_size, dtype=np.int32)\n', (2412, 2440), True, 'import numpy as np\n'), ((2352, 2376), 'numpy.argsort', 'np.argsort', (['(-preds[i, :])'], {}), '(-preds[i, :])\n', (2362, 2376), True, 'import numpy as np\n'), ((1591, 1605), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (1599, 1605), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.stats.mstats import theilslopes
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
# define constants
_c = 299792.458 # speed of light in km s^-1
class SpecAnalysis:
'''Analyse astronomy spectra.
'''
def __init__(self, wavelength, flux, flux_err=None):
'''
Parameters
----------
wavelength : List[Real] or 1darray
Input wavelengths. Needs to be monotonically increasing.
flux : List[Real] or 1darray
Input flux profile.
flux_err : List[Real] or 1darray, optional
Input flux error. If None, then set to array of 0.
'''
# if no error, then set error to 0
if flux_err is None:
flux_err = np.full(len(flux), 0)
# change to numpy array
try:
self.wl = np.array(wavelength, dtype=float)
self.flux = np.array(flux, dtype=float)
self.flux_err = np.array(flux_err, dtype=float)
except:
raise ValueError('Could not turn input into numpy arrays.')
# check monotonic increasing wavelength
if not ((self.wl[1:] - self.wl[:-1]) > 0).all():
raise ValueError('Wavelength needs to be strictly increasing')
# check length of all input equal
if not len(self.wl) == len(self.flux) == len(self.flux_err):
raise ValueError(
'wavelength, flux, flux_err are not the same length')
def save(self, wavelength, flux, flux_err=None):
'''Save the values given into the class.
Parameters
----------
wavelength : List[Real] or 1darray
Input wavelengths. Needs to be monotonically increasing.
flux : List[Real] or 1darray
Input flux profile.
flux_err : List[Real] or 1darray, optional
Input flux error. If None, then set to array 0.
'''
# if no error, then set error to 0
if flux_err is None:
flux_err = np.full(len(flux), 0)
self.wl = wavelength
self.flux = flux
self.flux_err = flux_err
def mask_region(self, masks, rm='out'):
'''Mask (remove) a region of the spectrum.
Parameters
----------
masks : list of lists[float]
The regions which you want to mask.
rm : str, optional
To remove the region in the mask or out. Accepted values: 'in' and
'out'.
Returns
-------
wl, flux, flux_err : 3darray
Masked wavelength, flux, and flux error.
'''
# make mask
mask_full = np.zeros(len(self.wl), dtype=bool)
for lower, upper in masks:
mask = (lower <= self.wl) & (self.wl <= upper)
mask_full = mask_full | mask
# flip if masking inside
if rm == 'in':
mask_full = ~mask_full
# apply mask
self.save(
self.wl[mask_full], self.flux[mask_full], self.flux_err[mask_full]
)
return self.wl, self.flux, self.flux_err
def cut(self, center, upper=10, lower=10, domain='wl'):
'''Cuts the wavelength, flux, and flux error and returns the values
between center - lower and center + upper.
Parameters
----------
center : Real
The center of the wavelengths where the cut should be taken, in the
same units as the wavelength.
upper : Positive Real, optional
The amount to go above the center when taking the cut, in the same
units of nm if rtype=wl, or in km/s if rtype=vr.
lower : Positive Real, optional
The amount to go below the center when taking the cut, in the same
units of nm if rtype=wl, or in km/s if rtype=vr.
domain : str, optional
The domain upper and lower is in. Either wl or vr (wavelength,
radial velocity respectively).
Returns
-------
wl, flux, flux_err : 3darray
Cut wavelength, flux, and flux error.
'''
# convert to wavelength
if domain == 'vr':
lower = vr_to_wl(lower, center=center)
upper = vr_to_wl(upper, center=center)
# cut
low = center - lower
high = center + upper
self.mask_region([[low, high]])
return self.wl, self.flux, self.flux_err
def sigma_clip(self, func, args=(), sigma_cut=3, iterations=1):
'''Clip outliers based on a sigma cut.
Parameters
----------
func : callable ``func(self.wl, self.flux, self.flux_err, *args)``.
The function to fit the spectrum to.
args : tuple, optional
Extra arguments passed to func,
i.e., ``func(self.wl, self.flux, self.flux_err, *args)``.
sigma_cut : float
The tolerance on sigma clip.
iterations : int
The number of times to iterate the sigma clip.
Returns
-------
wl, flux, flux_err : 3darray
Clipped wavelength, flux, and flux error.
'''
for _ in range(iterations):
flux_fit = func(self.wl, self.flux, self.flux_err, *args)
diff = self.flux - flux_fit
sigma = np.std(diff)
mask = np.abs(diff) < sigma*sigma_cut
self.save(self.wl[mask], self.flux[mask], self.flux_err[mask])
return self.wl, self.flux, self.flux_err
def cont_norm(self, center, mask_step=0.01, sigma_cut=3, iterations=3):
'''Normalise the continuum. Assumes you're normalising a line profile.
1. Do basic normalisation.
2. Sigma clip lines and outliers.
3. Fit theilslope on the clipped spectrum.
4. Remove fit from line.
Only works for a small region with linear continuum.
Parameters
----------
center : float
The center of the line.
mask_step : float
Width/2 of the line.
sigma_cut : float
The tolerance on sigma clip.
iterations : int
The number of times to iterate the sigma clip.
Returns
-------
wl, flux, flux_err : 3darray
Normalised wavelength, flux, and flux error.
'''
# save original values
wl = self.wl
flux = self.flux
flux_err = self.flux_err
# first do a shitty normalisation with the main line removed
masks = [[center - mask_step, center + mask_step]]
self.mask_region(masks, rm='in')
med = np.median(self.flux)
flux = flux/med
flux_err = flux_err/med
# sigma clip
self.save(wl, flux, flux_err)
median = lambda x,y,z:np.median(y)
self.sigma_clip(median, sigma_cut=sigma_cut, iterations=iterations)
# fit
fit = theilslopes(self.flux, self.wl)
grad = fit[0]
intercept = fit[1]
# remove linear slope
fit = wl*grad + intercept
flux = flux/fit
flux_err = flux_err/fit
self.save(wl, flux, flux_err)
return self.wl, self.flux, self.flux_err
def gaussian_broaden(center, sigma=0, num=None):
'''Only works for synthetic spectra because it uses cubicspline. Might
be unpredictable for synthetic spectra with more than 1 line or gaps.
TODO: investigate behaviour on harder to deal with synthetic spectra.
'''
# convert to velocity space
vr = wl_to_v(self.wl, center=center)
cs = CubicSpline(vr, self.flux)
# set steps
if num is None:
num = int((vr[-1] - vr[0]) / np.min(vr[1:] - vr[:-1])) + 1
num *= 2
# set kernel
g_gen = scipy.stats.norm(0, sigma/2.35482) # convert FWHM to sigma
# convolve
tau = np.linspace(vr[0], vr[-1], num)
convolver = np.array([g_gen.pdf(t) for t in tau])
convolver /= np.sum(convolver)
integrand = [cs(vr - t)*convolver[i] for i, t in enumerate(tau)]
flux_conv = np.sum(integrand, axis = 0)
self.flux = flux_conv
return self.wl, self.flux
def polyfit(x, y, x_out=None, deg=1):
'''Fits a polynomial to input data after shifting data to 0.
Parameters
----------
x : List[Real] or 1darray
x values to fit over.
y : List[Real] or 1darray
y values to fit over.
x_out : List[Real] or None
Output x values. If None, then return the fit.
deg : Int, optional
Degree of fitted polynomial. 1 by default.
Returns
-------
y_out or center, fit: 1darray or tuple(float)
Output y values at the input x_out values or x mean, grad, intercept.
'''
# convert to numpy arrays
x = np.array(x)
y = np.array(y)
# fit
center_x = np.mean(x)
fit = np.polyfit(x - center_x, y, deg=deg)
# return fit and center
if x_out is None:
return center_x, fit[0], fit[1]
# return y evaluated at x_out
y_out = np.polyval(fit, x_out - center_x)
return y_out
def cut_wavelength(wavelength, center=670.9659, upper=10, lower=10):
"""Cuts the wavelength returns the values between center - lower and center + upper. Useful for plotting mostly because many functions return a cut line profile but not cut wavelength.
Parameters
----------
wavelength : List[Real] or 1darray
Input wavelengths. Needs to be monotonically increasing.
center : Real, optional
The center of the wavelengths where the cut should be taken, in the same units as the wavelength.
upper : Positive Real, optional
The amount to go above the center when taking the cut, in the same units as the wavelength.
lower : Positive Real, optional
The amount to go below the center when taking the cut, in the same units as the wavelength.
Returns
-------
wl_cut : 2darray
Cut wavelengths.
"""
wavelength = np.array(wavelength)
low = center - lower
high = center + upper
wl_cut = wavelength[(low <= wavelength) & (high >= wavelength)]
return wl_cut
def wl_to_vr(wl, center=670.9659):
'''Converts wavelengths to radial velocity, works for errors too.
Parameters
----------
wl : float or ndarray
Wavelength to be converted, in nm.
center : float
The wavelength that vr=0 is at.
Returns
-------
vr : float or ndarray
Radial velocity in km/s.
'''
if isinstance(wl, float):
return wl*_c/center
else:
return np.array(wl)*_c/center
def vr_to_wl(vr, center=670.9659):
'''Converts wavelengths to radial velocity, works for errors too.
Parameters
----------
vr : float or ndarray
Radial velocity to be converted, in km/s.
center : float
The wavelength that vr=0 is at.
Returns
-------
wl : float or ndarray
Wavelengths in nm.
'''
if isinstance(vr, float):
return vr*center/_c
else:
return np.array(vr)*center/_c
def vac_to_air(lam):
'''Convert from vacuum to air wavelengths.
From https://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
Parameters
----------
lam : float or ndarray
Wavelengths in vacuum in angstroms.
Returns
-------
air : float or ndarray
Wavelengths in air in angstroms.
'''
s = 1e4/lam
n = 1 + 0.0000834254 + 0.02406147 / (130 - s**2) + 0.00015998 / (38.9 - s**2)
return lam/n
def air_to_vac(lam):
'''Convert from air to vacuum wavelengths.
From https://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
Parameters
----------
lam : float or ndarray
Wavelengths in air in angstroms.
Returns
-------
vac : float or ndarray
Wavelengths in vacuum in angstroms.
'''
s = 1e4/lam
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s**2) + 0.0001599740894897 / (38.92568793293 - s**2)
return lam*n
def convolve(f, g, n, m):
'''Compute the discrete convolution.
Parameters
----------
f : Function
Function 1.
g : Function
Function 2.
n : 1darray
Shift applied to g.
m : 1darray
Discrete values at which the function is evaluated.
Returns
-------
conv : float
Discrete convolution at n f*g(n).
'''
m_shift = m + n.reshape(n.shape[0], -1) # m+n
return np.sum(f(m)*g(m_shift), axis=1)
def common_range(x_range, shifts):
'''Compute the common range shared after the shifts are applied.
Parameters
----------
x_range : Tuple(Float)
The range over which the functions are defined.
shifts : List[Float]
The shifts to apply to the functions.
Returns
-------
left, right : Float, Float
The shared range (left, right).
'''
min_shift = np.min(shifts)
max_shift = np.max(shifts)
left = max(x_range[0], x_range[0] - min_shift)
right = min(x_range[1], x_range[1] - max_shift)
if left == right:
raise ValueError('Shifts are too extreme given the x_range, there is no overlap.')
return left, right
def cross_correlate(f, g, x_range, shifts, num=10000, plot=False):
'''Compute the cross correlation between two functions. Truncates edges, no
extrapolation.
Parameters
----------
f : Function
Function 1.
g : Function
Function 2.
x_range : tuple(Float)
The common range that f and g are defined over.
i.e. f : [-2, 2] -> R
g : [-1, 3] -> R
Then x_range = (-1, 2)
shifts : 1darray
The shifts to apply to the function g.
num : int
The number of points to sample the function f and g.
plot : bool
Display plots for debugging.
Returns
-------
cc : List[Float]
The cross correlations of the given shifts.
'''
#TODO: x_range could be improved to take into account differences in range
# of f and g, this doesn't happen a lot in practice though
left, right = common_range(x_range, shifts)
m = np.linspace(left, right, num=num)
if plot:
# original functions
x = np.linspace(x_range[0], x_range[1], 1000)
plt.scatter(x, f(x), label='f', s=4, alpha=0.5)
plt.scatter(x, g(x), label='g', s=4, alpha=0.5)
# shift g
plt.scatter(m, g(m+np.max(shifts)), s=4, alpha=0.5,
label=f'g min={np.min(shifts):.2f} shift')
plt.scatter(m, g(m+np.min(shifts)), s=4, alpha=0.5,
label=f'g max={np.max(shifts):.2f} shift')
# common region
for line in [left, right]:
plt.axvline(line, color='black', linestyle='--')
plt.legend()
plt.show()
return convolve(f, g, shifts, m)
def radial_velocity(f, g, x_range, shifts, num=10000, plot=False):
'''Compute the radial velocity from the max cross correlation.
f and g must have continuum centered at 0. f(n) = g(n-rv)
Parameters
----------
f : Function
Function 1.
g : Function
Function 2.
x_range : tuple(Float)
The common range that f and g are defined over.
i.e. f : [-2, 2] -> R
g : [-1, 3] -> R
Then x_range = (-1, 2)
shifts : 1darray
The shifts to apply to the function g.
num : int
The number of points to sample the function f and g.
plot : bool
Display plots for debugging.
Returns
-------
rv : Float
The radial velocity of g with respect to f.
'''
# cast to numpy array
shifts = np.array(shifts)
cc = cross_correlate(f, g, x_range, shifts, num=num, plot=plot)
return shifts[np.argmax(cc)]
|
[
"matplotlib.pyplot.axvline",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.polyfit",
"numpy.polyval",
"numpy.median",
"scipy.interpolate.CubicSpline",
"matplotlib.pyplot.legend",
"numpy.argmax",
"numpy.std",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.max",
"numpy.linspace",
"scipy.stats.mstats.theilslopes"
] |
[((8851, 8862), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (8859, 8862), True, 'import numpy as np\n'), ((8871, 8882), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (8879, 8882), True, 'import numpy as np\n'), ((8909, 8919), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (8916, 8919), True, 'import numpy as np\n'), ((8930, 8966), 'numpy.polyfit', 'np.polyfit', (['(x - center_x)', 'y'], {'deg': 'deg'}), '(x - center_x, y, deg=deg)\n', (8940, 8966), True, 'import numpy as np\n'), ((9109, 9142), 'numpy.polyval', 'np.polyval', (['fit', '(x_out - center_x)'], {}), '(fit, x_out - center_x)\n', (9119, 9142), True, 'import numpy as np\n'), ((10057, 10077), 'numpy.array', 'np.array', (['wavelength'], {}), '(wavelength)\n', (10065, 10077), True, 'import numpy as np\n'), ((13016, 13030), 'numpy.min', 'np.min', (['shifts'], {}), '(shifts)\n', (13022, 13030), True, 'import numpy as np\n'), ((13047, 13061), 'numpy.max', 'np.max', (['shifts'], {}), '(shifts)\n', (13053, 13061), True, 'import numpy as np\n'), ((14252, 14285), 'numpy.linspace', 'np.linspace', (['left', 'right'], {'num': 'num'}), '(left, right, num=num)\n', (14263, 14285), True, 'import numpy as np\n'), ((15769, 15785), 'numpy.array', 'np.array', (['shifts'], {}), '(shifts)\n', (15777, 15785), True, 'import numpy as np\n'), ((6653, 6673), 'numpy.median', 'np.median', (['self.flux'], {}), '(self.flux)\n', (6662, 6673), True, 'import numpy as np\n'), ((6939, 6970), 'scipy.stats.mstats.theilslopes', 'theilslopes', (['self.flux', 'self.wl'], {}), '(self.flux, self.wl)\n', (6950, 6970), False, 'from scipy.stats.mstats import theilslopes\n'), ((7626, 7652), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['vr', 'self.flux'], {}), '(vr, self.flux)\n', (7637, 7652), False, 'from scipy.interpolate import CubicSpline\n'), ((7916, 7947), 'numpy.linspace', 'np.linspace', (['vr[0]', 'vr[-1]', 'num'], {}), '(vr[0], vr[-1], num)\n', (7927, 7947), True, 'import numpy as np\n'), ((8027, 8044), 'numpy.sum', 'np.sum', (['convolver'], {}), '(convolver)\n', (8033, 8044), True, 'import numpy as np\n'), ((8138, 8163), 'numpy.sum', 'np.sum', (['integrand'], {'axis': '(0)'}), '(integrand, axis=0)\n', (8144, 8163), True, 'import numpy as np\n'), ((14341, 14382), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', '(1000)'], {}), '(x_range[0], x_range[1], 1000)\n', (14352, 14382), True, 'import numpy as np\n'), ((14887, 14899), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14897, 14899), True, 'import matplotlib.pyplot as plt\n'), ((14908, 14918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14916, 14918), True, 'import matplotlib.pyplot as plt\n'), ((15873, 15886), 'numpy.argmax', 'np.argmax', (['cc'], {}), '(cc)\n', (15882, 15886), True, 'import numpy as np\n'), ((875, 908), 'numpy.array', 'np.array', (['wavelength'], {'dtype': 'float'}), '(wavelength, dtype=float)\n', (883, 908), True, 'import numpy as np\n'), ((933, 960), 'numpy.array', 'np.array', (['flux'], {'dtype': 'float'}), '(flux, dtype=float)\n', (941, 960), True, 'import numpy as np\n'), ((989, 1020), 'numpy.array', 'np.array', (['flux_err'], {'dtype': 'float'}), '(flux_err, dtype=float)\n', (997, 1020), True, 'import numpy as np\n'), ((5340, 5352), 'numpy.std', 'np.std', (['diff'], {}), '(diff)\n', (5346, 5352), True, 'import numpy as np\n'), ((6821, 6833), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (6830, 6833), True, 'import numpy as np\n'), ((14830, 14878), 'matplotlib.pyplot.axvline', 'plt.axvline', (['line'], {'color': '"""black"""', 'linestyle': '"""--"""'}), "(line, color='black', linestyle='--')\n", (14841, 14878), True, 'import matplotlib.pyplot as plt\n'), ((5372, 5384), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (5378, 5384), True, 'import numpy as np\n'), ((10658, 10670), 'numpy.array', 'np.array', (['wl'], {}), '(wl)\n', (10666, 10670), True, 'import numpy as np\n'), ((11129, 11141), 'numpy.array', 'np.array', (['vr'], {}), '(vr)\n', (11137, 11141), True, 'import numpy as np\n'), ((14540, 14554), 'numpy.max', 'np.max', (['shifts'], {}), '(shifts)\n', (14546, 14554), True, 'import numpy as np\n'), ((14663, 14677), 'numpy.min', 'np.min', (['shifts'], {}), '(shifts)\n', (14669, 14677), True, 'import numpy as np\n'), ((7739, 7763), 'numpy.min', 'np.min', (['(vr[1:] - vr[:-1])'], {}), '(vr[1:] - vr[:-1])\n', (7745, 7763), True, 'import numpy as np\n'), ((14608, 14622), 'numpy.min', 'np.min', (['shifts'], {}), '(shifts)\n', (14614, 14622), True, 'import numpy as np\n'), ((14731, 14745), 'numpy.max', 'np.max', (['shifts'], {}), '(shifts)\n', (14737, 14745), True, 'import numpy as np\n')]
|
import numpy as np
#
class Rollout:
def __init__(self):
self.dict_obs = []
self.dict_next_obs = []
self.actions = []
self.rewards = []
self.terminals = []
self.agent_infos = []
self.env_infos = {}
self.path_length = 0
def __len__(self):
return self.path_length
def add_transition(self, obs, action, next_obs, reward, done, env_info, agent_info):
self.dict_obs.append(obs)
self.dict_next_obs.append(next_obs)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(done)
if not self.env_infos:
for k, v in env_info.items():
self.env_infos[k] = [v]
else:
for k, v in env_info.items():
self.env_infos[k].append(v)
self.path_length += 1
def to_dict(self):
self.actions = np.array(self.actions)
if len(self.actions.shape) == 1:
self.actions = np.expand_dims(self.actions, 1)
for k, v in self.env_infos.items():
self.env_infos[k] = np.array(v)
self.rewards = np.array(self.rewards)
self.terminals = np.array(self.terminals).reshape(-1, 1)
return dict(
observations=self.dict_obs,
actions=self.actions,
rewards=self.rewards,
next_observations=self.dict_next_obs,
terminals=self.terminals,
agent_infos=self.agent_infos,
env_infos=self.env_infos,
)
|
[
"numpy.array",
"numpy.expand_dims"
] |
[((909, 931), 'numpy.array', 'np.array', (['self.actions'], {}), '(self.actions)\n', (917, 931), True, 'import numpy as np\n'), ((1143, 1165), 'numpy.array', 'np.array', (['self.rewards'], {}), '(self.rewards)\n', (1151, 1165), True, 'import numpy as np\n'), ((1000, 1031), 'numpy.expand_dims', 'np.expand_dims', (['self.actions', '(1)'], {}), '(self.actions, 1)\n', (1014, 1031), True, 'import numpy as np\n'), ((1108, 1119), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1116, 1119), True, 'import numpy as np\n'), ((1191, 1215), 'numpy.array', 'np.array', (['self.terminals'], {}), '(self.terminals)\n', (1199, 1215), True, 'import numpy as np\n')]
|
print('importing packages...')
import numpy as np
import cv2
import math
import random
import time
import rotate_brush as rb
import gradient
from thready import amap
import os
import threading
canvaslock = threading.Lock()
canvaslock.acquire()
canvaslock.release()
def lockgen(canvas,ym,yp,xm,xp):
# given roi, know which lock.
#
# if left:
# return leftcanvaslock:
# if right:
# return rightcanvaslock:
# if riding:
# reutrn canvaslock:
pass
def load(filename='flower.jpg'):
print('loading',filename,'...')
global imname,flower,canvas,hist
global rescale,xs_small,ys_small,smallerflower
imname = filename.split('.')[0]
# original image
flower = cv2.imread(filename)
xshape = flower.shape[1]
yshape = flower.shape[0]
rescale = xshape/640
# display rescaling: you'll know when it's larger than your screen
if rescale<1:
rescale=1
xs_small = int(xshape/rescale)
ys_small = int(yshape/rescale)
smallerflower = cv2.resize(flower,dsize=(xs_small,ys_small)).astype('float32')/255
# for preview purpose,
# if image too large
# convert to float32
flower = flower.astype('float32')/255
# canvas initialized
canvas = flower.copy()
canvas[:,:] = 0.8
#clear hist
hist=[]
print(filename,'loaded.')
load()
def rn():
return random.random()
def showimg():
if rescale==1:
smallercanvas = canvas
else:
smallercanvas = cv2.resize(canvas,dsize=(xs_small,ys_small),interpolation=cv2.INTER_NEAREST)
i,j,d = wherediff(smallercanvas,smallerflower)
sd = np.mean(d)
print('mean diff:',sd)
d[i,:]=1.0
d[:,j]=1.0
cv2.imshow('canvas',smallercanvas)
cv2.imshow('flower',smallerflower)
cv2.imshow('diff',d)
cv2.waitKey(1)
cv2.waitKey(1)
def destroy():
cv2.destroyAllWindows()
def positive_sharpen(i,overblur=False,coeff=8.): #no darken to original image
# emphasize the edges
blurred = cv2.blur(i,(5,5))
sharpened = i + (i - blurred) * coeff
if overblur:
return cv2.blur(np.maximum(sharpened,i),(11,11))
return cv2.blur(np.maximum(sharpened,i),(3,3))
def diff(i1,i2,overblur=False):
#calculate the difference of 2 float32 BGR images.
# # use lab
# i1=i1.astype(np.float32)
# i2=i2.astype(np.float32)
# lab1 = cv2.cvtColor(i1,cv2.COLOR_BGR2LAB)
# lab2 = cv2.cvtColor(i2,cv2.COLOR_BGR2LAB)
# d = lab1-lab2
# d = d*d / 10000
# # use rgb
d = (i1-i2)# * [0.2,1.5,1.3]
d = d*d
d = positive_sharpen(np.sum(d,-1),overblur=overblur)
return d
# grayscalize
def wherediff(i1=None,i2=None):
global canvas,flower
if i1 is None:
i1 = canvas
if i2 is None:
i2 = flower
# find out where max difference point is.
d = diff(i1,i2,overblur=True)
i,j = np.unravel_index(d.argmax(),d.shape)
return i,j,d
def get_random_color():
return np.array([rn(),rn(),rn()]).astype('float32')
#danger: default to float64
def limit(x,minimum,maximum):
return min(max(x,minimum),maximum)
# history and replay section
# global history.
hist = []
def record(sth):
hist.append(sth)
# repaint the image from history
def repaint(constraint_angle=False,upscale=1.,batchsize=16):
starttime = time.time()
newcanvas = np.array(canvas).astype('uint8')
# newcanvas = cv2.cvtColor(newcanvas,cv2.COLOR_BGR2BGRA) # fastest format
if upscale!=1.:
newcanvas = cv2.resize(newcanvas,dsize=(int(newcanvas.shape[1]*upscale),int(newcanvas.shape[0]*upscale)))
newcanvas[:,:,:] = int(0.8*255)
def showthis():
showsize = 640
resize_scale = min(showsize/newcanvas.shape[1],1.)
resizedx,resizedy = int(newcanvas.shape[1]*resize_scale),int(newcanvas.shape[0]*resize_scale)
smallercanvas = cv2.resize(newcanvas,dsize=(resizedx,resizedy),interpolation=cv2.INTER_NEAREST)
cv2.imshow('repaint',smallercanvas)
cv2.waitKey(1)
def paintone(histitem):
x,y,radius,srad,angle,cb,cg,cr,brushname = histitem
cb,cg,cr = int(cb*255),int(cg*255),int(cr*255)
# cv2.ellipse(newcanvas,(int(x),int(y)),(radius,srad),angle,0,360,color=(cb,cg,cr),thickness=-1)
b,key = rb.get_brush(brushname)
if constraint_angle:
angle = constraint_angle+rn()*20-10
if upscale!=1:
x,y,radius,srad = x*upscale,y*upscale,radius*upscale,srad*upscale
rb.compose(newcanvas,b,x=x,y=y,rad=radius,srad=srad,angle=angle,color=[cb,cg,cr],useoil=True,lock=canvaslock)
k = 0
batch = []
def runbatch(batch):
from thready import amap # multithreading
return amap(paintone,batch)
lastep = 0
while k<len(hist):
while len(batch)<batchsize and k<len(hist):
batch.append(hist[k])
k+=1
runbatch(batch)
print(k,'painted. one of them:',batch[0])
# show progress:
ep = int(k/(newcanvas.shape[1]*upscale)) # larger image => longer wait per show
if ep >lastep:
showthis()
lastep = ep # show every 32p
batch=[]
print(time.time()-starttime,'s elapsed')
showthis()
return newcanvas
import json
def savehist(filename='hist.json'):
f = open(filename,'w')
json.dump(hist,f)
f.close()
def loadhist(filename='hist.json'):
f = open(filename,'r')
global hist
hist = json.load(f)
# end hist section
def paint_one(x,y,brushname='random',angle=-1.,minrad=10,maxrad=60):
oradius = rn()*rn()*maxrad+minrad
fatness = 1/(1+rn()*rn()*6)
brush,key = rb.get_brush(brushname)
def intrad(orad):
#obtain integer radius and shorter-radius
radius = int(orad)
srad = int(orad*fatness+1)
return radius,srad
radius,srad = intrad(oradius)
#set initial angle
if angle == -1.:
angle = rn()*360
# set initial color
# c = get_random_color()
# sample color from image => converges faster.
c = flower[int(y),int(x),:]
delta = 1e-4
# get copy of square ROI area, to do drawing and calculate error.
def get_roi(newx,newy,newrad):
radius,srad = intrad(newrad)
xshape = flower.shape[1]
yshape = flower.shape[0]
yp = int(min(newy+radius,yshape-1))
ym = int(max(0,newy-radius))
xp = int(min(newx+radius,xshape-1))
xm = int(max(0,newx-radius))
if yp<=ym or xp<=xm:
# if zero w or h
raise NameError('zero roi')
ref = flower[ym:yp,xm:xp]
bef = canvas[ym:yp,xm:xp]
aftr = np.array(bef)
# print(flower.dtype,canvas.dtype,ref.dtype)
return ref,bef,aftr
# paint one stroke with given config and return the error.
def paint_aftr_w(color,angle,nx,ny,nr):
ref,bef,aftr = get_roi(nx,ny,nr)
radius,srad = intrad(nr)
# cv2.circle(aftr,(radius,radius),radius,color=color,thickness=-1)
# cv2.ellipse(aftr,(radius,radius),(radius,srad),angle,0,360,color=color,thickness=-1)
rb.compose(aftr,brush,x=radius,y=radius,rad=radius,srad=srad,angle=angle,color=color,usefloat=True,useoil=False)
# if useoil here set to true: 2x slow down + instability
err_aftr = np.mean(diff(aftr,ref))
return err_aftr
# finally paint the same stroke onto the canvas.
def paint_final_w(color,angle,nr):
radius,srad = intrad(nr)
# cv2.circle(canvas,(x,y), radius, color=color,thickness=-1)
# cv2.ellipse(canvas,(int(x),int(y)),(radius,srad),angle,0,360,color=color,thickness=-1)
rb.compose(canvas,brush,x=x,y=y,rad=radius,srad=srad,angle=angle,color=color,usefloat=True,useoil=True,lock=canvaslock)
# enable oil effects on final paint.
# np.float64 will cause problems
rec = [x,y,radius,srad,angle,color[0],color[1],color[2],brushname]
rec = [float(r) if type(r)==np.float64 or type(r)==np.float32 else r for r in rec]
record(rec)
# log it!
# given err, calculate gradient of parameters wrt to it
def calc_gradient(err):
b,g,r = c[0],c[1],c[2]
cc = b,g,r
err_aftr = paint_aftr_w((b+delta,g,r),angle,x,y,oradius)
gb = err_aftr - err
err_aftr = paint_aftr_w((b,g+delta,r),angle,x,y,oradius)
gg = err_aftr - err
err_aftr = paint_aftr_w((b,g,r+delta),angle,x,y,oradius)
gr = err_aftr - err
err_aftr = paint_aftr_w(cc,(angle+5.)%360,x,y,oradius)
ga = err_aftr - err
err_aftr = paint_aftr_w(cc,angle,x+2,y,oradius)
gx = err_aftr - err
err_aftr = paint_aftr_w(cc,angle,x,y+2,oradius)
gy = err_aftr - err
err_aftr = paint_aftr_w(cc,angle,x,y,oradius+3)
gradius = err_aftr - err
return np.array([gb,gg,gr])/delta,ga/5,gx/2,gy/2,gradius/3,err
# max and min steps for gradient descent
tryfor = 12
mintry = 3
for i in range(tryfor):
try: # might have error
# what is the error at ROI?
ref,bef,aftr = get_roi(x,y,oradius)
orig_err = np.mean(diff(bef,ref))
# do the painting
err = paint_aftr_w(c,angle,x,y,oradius)
# if error decreased:
if err<orig_err and i>=mintry :
paint_final_w(c,angle,oradius)
return True,i
# if not satisfactory
# calculate gradient
grad,anglegrad,gx,gy,gradius,err = calc_gradient(err)
except NameError as e:
print(e)
print('error within calc_gradient')
return False,i
if printgrad: #debug purpose.
if i==0:
print('----------')
print('orig_err',orig_err)
print('ep:{}, err:{:3f}, color:{}, angle:{:2f}, xy:{:2f},{:2f}, radius:{:2f}'.format(i,err,c,angle,x,y,oradius))
# do descend
if i<tryfor-1:
c = c - (grad*.3).clip(max=0.3,min=-0.3)
c = c.clip(max=1.,min=0.)
angle = (angle - limit(anglegrad*100000,-5,5))%360
x = x - limit(gx*1000*radius,-3,3)
y = y - limit(gy*1000*radius,-3,3)
oradius = oradius* (1-limit(gradius*20000,-0.2,.2))
oradius = limit(oradius,7,100)
# print('after desc:x:{:2f},y:{:2f},angle:{:2f},oradius:{:5f}'
# .format(x,y,angle,oradius))
return False,tryfor
def putstrokes(howmany):
def samplepoints():
# sample a lot of points from one error image - save computation cost
point_list = []
y,x,d = wherediff()
phasemap = gradient.get_phase(flower)
# while not enough points:
while len(point_list)<howmany:
# randomly pick one point
yshape,xshape = flower.shape[0:2]
ry,rx = int(rn()*yshape),int(rn()*xshape)
# accept with high probability if error is large
# and vice versa
if d[ry,rx]>0.5*rn():
# get gradient orientation info from phase map
phase = phasemap[ry,rx] # phase should be between [0,2pi)
# choose direction perpendicular to gradient
angle = (phase/math.pi*180+90)%360
# angle = 22.5
point_list.append((ry,rx,angle))
return point_list
def pcasync(tup):
y,x,angle = tup
b,key = rb.get_brush(key='random') # get a random brush
return paint_one(x,y,brushname=key,minrad=10,maxrad=50,angle=angle) #num of epoch
if True:
from thready import amap # multithreading
point_list = samplepoints()
return amap(pcasync,point_list)
else: # single threading test
point_list = samplepoints()
res={}
for idx,item in enumerate(point_list):
print('single threaded mode.',idx)
res[idx] = pcasync(item)
return res
# autosave during canvas painting
dosaveimage = True
# dosaveimage = False
# gradient debug info print
printgrad = False
# printgrad = True
# run the whole thing
def r(epoch=1):
# filename prefix for each run
seed = int(rn()*1000)
print('running...')
st = time.time()
# timing counter for autosave and showimg()
timecounter = 0
showcounter = 0
for i in range(epoch):
loopfor = 1
paranum = 256
# number of stroke tries per batch, sent to thread pool
# smaller number decreases efficiency
succeeded = 0 # how many strokes being placed
ti = time.time()
# average step of gradient descent performed
avgstep=0.
for k in range(loopfor):
res = putstrokes(paranum) # res is a map of results
for r in res:
status,step = res[r]
avgstep += step
succeeded += 1 if status else 0
avgstep/=loopfor*paranum
steptime = time.time()-ti
tottime = time.time()-st
#info out
print('epoch',i,'/',epoch ,'succeeded:',succeeded,'/',loopfor*paranum,'avg step:' ,avgstep,'time:{:.1f}s, total:{:.1f}s'.format(steptime,tottime))
#autosave
timecounter+=steptime
if(timecounter>20):
timecounter=0
if dosaveimage:
print('saving to disk...')
if not os.path.exists('./'+imname):
os.mkdir('./'+imname)
cv2.imwrite(imname+'/{}_{:04d}.png'.format(seed,i),canvas*255)
print('saved.')
# refresh view
showcounter+=steptime
if(showcounter>3):
showcounter=0
showimg()
showimg()
|
[
"os.mkdir",
"numpy.maximum",
"numpy.sum",
"numpy.mean",
"cv2.imshow",
"rotate_brush.get_brush",
"os.path.exists",
"threading.Lock",
"cv2.destroyAllWindows",
"cv2.resize",
"json.dump",
"cv2.waitKey",
"random.random",
"json.load",
"cv2.blur",
"time.time",
"gradient.get_phase",
"cv2.imread",
"thready.amap",
"numpy.array",
"rotate_brush.compose"
] |
[((207, 223), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (221, 223), False, 'import threading\n'), ((722, 742), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (732, 742), False, 'import cv2\n'), ((1378, 1393), 'random.random', 'random.random', ([], {}), '()\n', (1391, 1393), False, 'import random\n'), ((1632, 1642), 'numpy.mean', 'np.mean', (['d'], {}), '(d)\n', (1639, 1642), True, 'import numpy as np\n'), ((1706, 1741), 'cv2.imshow', 'cv2.imshow', (['"""canvas"""', 'smallercanvas'], {}), "('canvas', smallercanvas)\n", (1716, 1741), False, 'import cv2\n'), ((1745, 1780), 'cv2.imshow', 'cv2.imshow', (['"""flower"""', 'smallerflower'], {}), "('flower', smallerflower)\n", (1755, 1780), False, 'import cv2\n'), ((1784, 1805), 'cv2.imshow', 'cv2.imshow', (['"""diff"""', 'd'], {}), "('diff', d)\n", (1794, 1805), False, 'import cv2\n'), ((1810, 1824), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1821, 1824), False, 'import cv2\n'), ((1829, 1843), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1840, 1843), False, 'import cv2\n'), ((1864, 1887), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1885, 1887), False, 'import cv2\n'), ((2007, 2026), 'cv2.blur', 'cv2.blur', (['i', '(5, 5)'], {}), '(i, (5, 5))\n', (2015, 2026), False, 'import cv2\n'), ((3321, 3332), 'time.time', 'time.time', ([], {}), '()\n', (3330, 3332), False, 'import time\n'), ((5336, 5354), 'json.dump', 'json.dump', (['hist', 'f'], {}), '(hist, f)\n', (5345, 5354), False, 'import json\n'), ((5459, 5471), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5468, 5471), False, 'import json\n'), ((5649, 5672), 'rotate_brush.get_brush', 'rb.get_brush', (['brushname'], {}), '(brushname)\n', (5661, 5672), True, 'import rotate_brush as rb\n'), ((12273, 12284), 'time.time', 'time.time', ([], {}), '()\n', (12282, 12284), False, 'import time\n'), ((1494, 1573), 'cv2.resize', 'cv2.resize', (['canvas'], {'dsize': '(xs_small, ys_small)', 'interpolation': 'cv2.INTER_NEAREST'}), '(canvas, dsize=(xs_small, ys_small), interpolation=cv2.INTER_NEAREST)\n', (1504, 1573), False, 'import cv2\n'), ((2161, 2185), 'numpy.maximum', 'np.maximum', (['sharpened', 'i'], {}), '(sharpened, i)\n', (2171, 2185), True, 'import numpy as np\n'), ((2585, 2598), 'numpy.sum', 'np.sum', (['d', '(-1)'], {}), '(d, -1)\n', (2591, 2598), True, 'import numpy as np\n'), ((3863, 3950), 'cv2.resize', 'cv2.resize', (['newcanvas'], {'dsize': '(resizedx, resizedy)', 'interpolation': 'cv2.INTER_NEAREST'}), '(newcanvas, dsize=(resizedx, resizedy), interpolation=cv2.\n INTER_NEAREST)\n', (3873, 3950), False, 'import cv2\n'), ((3951, 3987), 'cv2.imshow', 'cv2.imshow', (['"""repaint"""', 'smallercanvas'], {}), "('repaint', smallercanvas)\n", (3961, 3987), False, 'import cv2\n'), ((3995, 4009), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4006, 4009), False, 'import cv2\n'), ((4277, 4300), 'rotate_brush.get_brush', 'rb.get_brush', (['brushname'], {}), '(brushname)\n', (4289, 4300), True, 'import rotate_brush as rb\n'), ((4490, 4614), 'rotate_brush.compose', 'rb.compose', (['newcanvas', 'b'], {'x': 'x', 'y': 'y', 'rad': 'radius', 'srad': 'srad', 'angle': 'angle', 'color': '[cb, cg, cr]', 'useoil': '(True)', 'lock': 'canvaslock'}), '(newcanvas, b, x=x, y=y, rad=radius, srad=srad, angle=angle,\n color=[cb, cg, cr], useoil=True, lock=canvaslock)\n', (4500, 4614), True, 'import rotate_brush as rb\n'), ((4717, 4738), 'thready.amap', 'amap', (['paintone', 'batch'], {}), '(paintone, batch)\n', (4721, 4738), False, 'from thready import amap\n'), ((6652, 6665), 'numpy.array', 'np.array', (['bef'], {}), '(bef)\n', (6660, 6665), True, 'import numpy as np\n'), ((7110, 7236), 'rotate_brush.compose', 'rb.compose', (['aftr', 'brush'], {'x': 'radius', 'y': 'radius', 'rad': 'radius', 'srad': 'srad', 'angle': 'angle', 'color': 'color', 'usefloat': '(True)', 'useoil': '(False)'}), '(aftr, brush, x=radius, y=radius, rad=radius, srad=srad, angle=\n angle, color=color, usefloat=True, useoil=False)\n', (7120, 7236), True, 'import rotate_brush as rb\n'), ((7658, 7791), 'rotate_brush.compose', 'rb.compose', (['canvas', 'brush'], {'x': 'x', 'y': 'y', 'rad': 'radius', 'srad': 'srad', 'angle': 'angle', 'color': 'color', 'usefloat': '(True)', 'useoil': '(True)', 'lock': 'canvaslock'}), '(canvas, brush, x=x, y=y, rad=radius, srad=srad, angle=angle,\n color=color, usefloat=True, useoil=True, lock=canvaslock)\n', (7668, 7791), True, 'import rotate_brush as rb\n'), ((10697, 10723), 'gradient.get_phase', 'gradient.get_phase', (['flower'], {}), '(flower)\n', (10715, 10723), False, 'import gradient\n'), ((11483, 11509), 'rotate_brush.get_brush', 'rb.get_brush', ([], {'key': '"""random"""'}), "(key='random')\n", (11495, 11509), True, 'import rotate_brush as rb\n'), ((11736, 11761), 'thready.amap', 'amap', (['pcasync', 'point_list'], {}), '(pcasync, point_list)\n', (11740, 11761), False, 'from thready import amap\n'), ((12622, 12633), 'time.time', 'time.time', ([], {}), '()\n', (12631, 12633), False, 'import time\n'), ((2108, 2132), 'numpy.maximum', 'np.maximum', (['sharpened', 'i'], {}), '(sharpened, i)\n', (2118, 2132), True, 'import numpy as np\n'), ((3350, 3366), 'numpy.array', 'np.array', (['canvas'], {}), '(canvas)\n', (3358, 3366), True, 'import numpy as np\n'), ((5185, 5196), 'time.time', 'time.time', ([], {}), '()\n', (5194, 5196), False, 'import time\n'), ((13002, 13013), 'time.time', 'time.time', ([], {}), '()\n', (13011, 13013), False, 'import time\n'), ((13035, 13046), 'time.time', 'time.time', ([], {}), '()\n', (13044, 13046), False, 'import time\n'), ((1027, 1073), 'cv2.resize', 'cv2.resize', (['flower'], {'dsize': '(xs_small, ys_small)'}), '(flower, dsize=(xs_small, ys_small))\n', (1037, 1073), False, 'import cv2\n'), ((8860, 8882), 'numpy.array', 'np.array', (['[gb, gg, gr]'], {}), '([gb, gg, gr])\n', (8868, 8882), True, 'import numpy as np\n'), ((13422, 13451), 'os.path.exists', 'os.path.exists', (["('./' + imname)"], {}), "('./' + imname)\n", (13436, 13451), False, 'import os\n'), ((13471, 13494), 'os.mkdir', 'os.mkdir', (["('./' + imname)"], {}), "('./' + imname)\n", (13479, 13494), False, 'import os\n')]
|
import numpy as np
from numpy import vectorize
import scipy.optimize as so
@vectorize
def U(c, h, kappa, nu):
if c<=0:
u = -np.inf
elif c>0:
u = np.log(c) - (kappa*h**(1+1/nu))/((1+1/nu))
return u
class rep_ag:
def __init__(self, theta, beta, delta, kappa, nu, kmin, kmax, hmin, hmax, num_node=20, order=3):
self.theta = theta
self.beta = beta
self.delta = delta
self.kappa = kappa
self.nu = nu
self.kmin = kmin
self.kmax = kmax
self.hmin = hmin
self.hmax = hmax
self.num_node = num_node
self.order = order
##### creating the basis functions
func = []
Psi1 = np.vectorize(lambda x: 1)
Psi2 = np.vectorize(lambda x: x)
func.append(Psi1)
func.append(Psi2)
for i in range(2,order):
f = np.vectorize(lambda x, n=i: 2*x*func[n-1](x)-func[n-2](x))
func.append(f)
self.func = func
self.gridk, self.gridk_cheb = self.cheb_node(kmin, kmax, num_node, cheby=0)
PHI = []
for f in self.func:
phi = f(2*(self.gridk-self.kmin)/(self.kmax-self.kmin) -1)
PHI.append(phi)
self.PHI = np.array(PHI).T
def cheb_node(self, a, b, num, cheby=1):
vec = np.arange(0,num)
vec = np.flip(vec, axis=0)
chb = np.cos((vec*np.pi)/(num-1))
points = (a+b)/2 + ((b-a)/2)*chb
if cheby == 0:
vec_unit = 1/2 + (1/2)*chb
return np.array(points), np.array(vec_unit)
else:
return np.array(points)
def update_val(self, Theta_guess, ki, start): #Theta_guess here is just for a specific ki so we also need ki
Kp = lambda c, h: (1-self.delta)*ki + ki**(1-self.theta) *h**self.theta - c
Kp_cheb = lambda c, h: 2*(Kp(c,h)-self.kmin)/(self.kmax-self.kmin) -1 # here the value is function of kp not k so we need to map kp to (0,1) not k
Suma = lambda c, h: sum(Theta_guess[i]*self.func[i](Kp_cheb(c,h)) for i in range(len(self.func)))
VnotM = lambda x: -U(x[0], x[1], self.kappa, self.nu) - self.beta*Suma(x[0],x[1]) # - the objective because I am minimizing when I want to maximize
#non linear constraint
const = ({'type': 'ineq', 'fun': lambda x: ki**(1-self.theta)* x[1]**self.theta -x[0]})#higher or equal to zero
Boundc = (0.01*ki**(1-self.theta), None)
Boundh = (0.001*self.hmin,self.hmax)
Bound = (Boundc, Boundh)
res = so.minimize(VnotM, start, method = 'SLSQP', bounds = Bound, constraints=const)# start should be the solution found previously so we have interest in storing previous solution
# it should be an enequality not an upper_bound
Value = -res.fun
c_opt = res.x[0]
h_opt = res.x[1]
return Value, c_opt, h_opt
def update_theta(self, Theta_Old, Old_opt):
New_opt = []
V = []
for i in range(len(self.gridk)):
Value, c_opt, h_opt = self.update_val(Theta_Old, self.gridk[i], Old_opt[i,:]) #Old_opt is going to be a matrix containing the previews policy funtions
New_opt.append([c_opt, h_opt])
V.append(Value)
New_opt = np.array(New_opt)
V = np.array(V)
New_theta = np.linalg.inv([email protected])@self.PHI.T@V
return New_opt, New_theta
def problem(self, Old_theta = None, Tol = 10**(-6)):
if Old_theta == None:
Old_theta = np.zeros(len(self.func))
Old_c = (self.kmax/4)**(1-self.theta) *np.ones(len(self.gridk))
Old_h = (self.hmax/4)*np.ones(len(self.gridk))
Old_opt = np.vstack((Old_c,Old_h)).T
err = 1
j = 0
while err>Tol:
New_opt, New_theta = self.update_theta(Old_theta, Old_opt)
err = np.max(np.abs(Old_theta-New_theta))
if j%50 == 0:
print('iteration:', j)
print('error:', err)
Old_theta = New_theta
Old_opt = New_opt
j = j+1
self.New_opt = New_opt
self.New_theta = New_theta
return New_opt, New_theta
def Val_pol_fun(self):
kc = lambda k: 2*(k-self.kmin)/(self.kmax-self.kmin) -1
self.V = np.vectorize(lambda k: sum(self.New_theta[i]*self.func[i](kc(k)) for i in range(len(self.func))))
self.Theta_c = np.linalg.inv([email protected])@[email protected]_opt[:,0]
self.Theta_h = np.linalg.inv([email protected])@[email protected]_opt[:,1]
self.gc = np.vectorize(lambda k: sum(self.Theta_c[i]*self.func[i](kc(k)) for i in range(len(self.func))))
self.gh = np.vectorize(lambda k: sum(self.Theta_h[i]*self.func[i](kc(k)) for i in range(len(self.func))))
|
[
"scipy.optimize.minimize",
"numpy.vectorize",
"numpy.flip",
"numpy.log",
"numpy.abs",
"numpy.arange",
"numpy.array",
"numpy.cos",
"numpy.linalg.inv",
"numpy.vstack"
] |
[((732, 757), 'numpy.vectorize', 'np.vectorize', (['(lambda x: 1)'], {}), '(lambda x: 1)\n', (744, 757), True, 'import numpy as np\n'), ((774, 799), 'numpy.vectorize', 'np.vectorize', (['(lambda x: x)'], {}), '(lambda x: x)\n', (786, 799), True, 'import numpy as np\n'), ((1378, 1395), 'numpy.arange', 'np.arange', (['(0)', 'num'], {}), '(0, num)\n', (1387, 1395), True, 'import numpy as np\n'), ((1410, 1430), 'numpy.flip', 'np.flip', (['vec'], {'axis': '(0)'}), '(vec, axis=0)\n', (1417, 1430), True, 'import numpy as np\n'), ((1446, 1477), 'numpy.cos', 'np.cos', (['(vec * np.pi / (num - 1))'], {}), '(vec * np.pi / (num - 1))\n', (1452, 1477), True, 'import numpy as np\n'), ((2612, 2686), 'scipy.optimize.minimize', 'so.minimize', (['VnotM', 'start'], {'method': '"""SLSQP"""', 'bounds': 'Bound', 'constraints': 'const'}), "(VnotM, start, method='SLSQP', bounds=Bound, constraints=const)\n", (2623, 2686), True, 'import scipy.optimize as so\n'), ((3350, 3367), 'numpy.array', 'np.array', (['New_opt'], {}), '(New_opt)\n', (3358, 3367), True, 'import numpy as np\n'), ((3381, 3392), 'numpy.array', 'np.array', (['V'], {}), '(V)\n', (3389, 3392), True, 'import numpy as np\n'), ((1271, 1284), 'numpy.array', 'np.array', (['PHI'], {}), '(PHI)\n', (1279, 1284), True, 'import numpy as np\n'), ((1672, 1688), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1680, 1688), True, 'import numpy as np\n'), ((179, 188), 'numpy.log', 'np.log', (['c'], {}), '(c)\n', (185, 188), True, 'import numpy as np\n'), ((1600, 1616), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1608, 1616), True, 'import numpy as np\n'), ((1618, 1636), 'numpy.array', 'np.array', (['vec_unit'], {}), '(vec_unit)\n', (1626, 1636), True, 'import numpy as np\n'), ((3414, 3450), 'numpy.linalg.inv', 'np.linalg.inv', (['(self.PHI.T @ self.PHI)'], {}), '(self.PHI.T @ self.PHI)\n', (3427, 3450), True, 'import numpy as np\n'), ((3802, 3827), 'numpy.vstack', 'np.vstack', (['(Old_c, Old_h)'], {}), '((Old_c, Old_h))\n', (3811, 3827), True, 'import numpy as np\n'), ((3983, 4012), 'numpy.abs', 'np.abs', (['(Old_theta - New_theta)'], {}), '(Old_theta - New_theta)\n', (3989, 4012), True, 'import numpy as np\n'), ((4557, 4593), 'numpy.linalg.inv', 'np.linalg.inv', (['(self.PHI.T @ self.PHI)'], {}), '(self.PHI.T @ self.PHI)\n', (4570, 4593), True, 'import numpy as np\n'), ((4646, 4682), 'numpy.linalg.inv', 'np.linalg.inv', (['(self.PHI.T @ self.PHI)'], {}), '(self.PHI.T @ self.PHI)\n', (4659, 4682), True, 'import numpy as np\n')]
|
import os
import sqlite3
import pandas as pd
import numpy as np
from .pybash import get_file_info
def connect_to_db(path):
"""
Interact with a SQLite database
Parameters
----------
path: str
Location of the SQLite database
Returns
-------
conn: Connector
The SQLite connection object
curs: Cursor
The SQLite cursor object
Usage
-----
conn, curs = connect_to_db("data/raw/foo.db")
"""
try:
if os.path.exists(path):
print("Connecting to Existing DB")
conn = sqlite3.connect(path)
else:
print("Initialising new SQLite DB")
conn = sqlite3.connect(path)
curs = conn.cursor()
except:
print("An error occured. Please check the file path")
return conn, curs
def print_table_names(path_to_db):
"""
Print and return the names of tables in a SQLite database
"""
conn, curs = connect_to_db(path_to_db)
result = curs.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
print(result)
return result
def load_file_to_db(path_to_file, path_to_db, table_name, delim):
"""
Load a text file of any size into a SQLite database
Parameters
----------
path_to_file: str
Location of the text file
path_to_db: str
Location of the SQLite db
table_name: str
Name of the table to be created in the database
delim: str
The delimiter for the text file
Returns
-------
None
"""
conn, curs = connect_to_db(path_to_db)
print("The database at {} contains the following tables.".format(path_to_db))
print(curs.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall())
if os.path.exists(path_to_file):
size_ = get_file_info(path_to_file).get('size')
rows_ = get_file_info(path_to_file).get('rows')
try:
if size_ < 250:
print("{} is a small file. Importing directly.".format(path_to_file))
df_ = pd.read_csv(
path_to_file,
sep=delim,
low_memory=False,
error_bad_lines=False,
quoting=csv.QUOTE_NONE
)
df_.to_sql(
name=table_name,
con=conn,
index=False,
if_exists='append'
)
print("Done.")
else:
print("{} is large. Importing in chunks.".format(path_to_file))
csize = int(np.ceil(rows_/10))
chunks = pd.read_csv(
path_to_file,
sep=delim,
chunksize=csize,
error_bad_lines=False,
low_memory=False,
quoting=csv.QUOTE_NONE
)
for c in chunks:
c.to_sql(
name=table_name,
con=conn,
index=False,
if_exists='append'
)
print("Done")
except:
print("An error occurred while reading the file.")
else:
print("File not found at {}, please check the path".format(path_to_file))
return None
|
[
"pandas.read_csv",
"sqlite3.connect",
"os.path.exists",
"numpy.ceil"
] |
[((1773, 1801), 'os.path.exists', 'os.path.exists', (['path_to_file'], {}), '(path_to_file)\n', (1787, 1801), False, 'import os\n'), ((486, 506), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (500, 506), False, 'import os\n'), ((574, 595), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (589, 595), False, 'import sqlite3\n'), ((677, 698), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (692, 698), False, 'import sqlite3\n'), ((2064, 2170), 'pandas.read_csv', 'pd.read_csv', (['path_to_file'], {'sep': 'delim', 'low_memory': '(False)', 'error_bad_lines': '(False)', 'quoting': 'csv.QUOTE_NONE'}), '(path_to_file, sep=delim, low_memory=False, error_bad_lines=\n False, quoting=csv.QUOTE_NONE)\n', (2075, 2170), True, 'import pandas as pd\n'), ((2679, 2801), 'pandas.read_csv', 'pd.read_csv', (['path_to_file'], {'sep': 'delim', 'chunksize': 'csize', 'error_bad_lines': '(False)', 'low_memory': '(False)', 'quoting': 'csv.QUOTE_NONE'}), '(path_to_file, sep=delim, chunksize=csize, error_bad_lines=False,\n low_memory=False, quoting=csv.QUOTE_NONE)\n', (2690, 2801), True, 'import pandas as pd\n'), ((2635, 2654), 'numpy.ceil', 'np.ceil', (['(rows_ / 10)'], {}), '(rows_ / 10)\n', (2642, 2654), True, 'import numpy as np\n')]
|
"""
script to matrix normalization
"""
from functools import reduce
import math as m
import numpy as np
def minmax_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: min max normalized column of matrix data
"""
if min(x) == max(x):
return np.ones(x.shape)
if type == 'cost':
return (max(x) - x) / (max(x) - min(x))
return (x - min(x)) / (max(x) - min(x))
def max_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: max normalized column of matrix data
"""
if type == 'cost':
return 1 - x/max(x)
return x / max(x)
def sum_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: sum normalized column of matrix data
"""
if type == 'cost':
return (1/x) / sum(1/x)
return x / sum(x)
def vector_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: vector normalized column of matrix data
"""
if type == 'cost':
return 1 - (x / np.sqrt(sum(x ** 2)))
return x / np.sqrt(sum(x ** 2))
def logaritmic_normalization(x, type):
"""
:param x: column of matrix data
:param type: type of normalization
:return: logarithmic normalized column of matrix data
"""
prod = reduce(lambda a, b: a*b, x)
if type == 'cost':
return (1 - (np.log(x) / m.log(prod))) / (len(x) - 1)
return np.log(x) / m.log(prod)
def normalize(matrix, types, method, precision = 2):
"""
:param matrix: decision matrix
:param types: types of normalization for columns
:param method: method of normalization
:param precision: precision
:return: normalized matrix
"""
if matrix.shape[1] != len(types):
print('Sizes does not match')
normalized_matrix = matrix.astype('float')
for i in range(len(types)):
if type == 1:
normalized_matrix[:, i] = np.round(method(matrix[:, i], types[i]), precision)
else:
normalized_matrix[:, i] = np.round(method(matrix[:, i], types[i]), precision)
return normalized_matrix
|
[
"functools.reduce",
"numpy.log",
"numpy.ones",
"math.log"
] |
[((1453, 1482), 'functools.reduce', 'reduce', (['(lambda a, b: a * b)', 'x'], {}), '(lambda a, b: a * b, x)\n', (1459, 1482), False, 'from functools import reduce\n'), ((328, 344), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (335, 344), True, 'import numpy as np\n'), ((1577, 1586), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1583, 1586), True, 'import numpy as np\n'), ((1589, 1600), 'math.log', 'm.log', (['prod'], {}), '(prod)\n', (1594, 1600), True, 'import math as m\n'), ((1525, 1534), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1531, 1534), True, 'import numpy as np\n'), ((1537, 1548), 'math.log', 'm.log', (['prod'], {}), '(prod)\n', (1542, 1548), True, 'import math as m\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
data4models.py
# Sentiment Indentification for Roman Urdu
'''
import numpy as np
import pandas as pd
class Data:
# Constructor
def __init__( self, config ):
self.config = config
def split( self, df ):
'''
Split the (entire) data into training data & test data
'''
assert isinstance( df, pd.DataFrame), 'df must be a pandas.DataFrame.'
test_split_ratio = self.config.test_split_ratio
print(f'Data.preprocess.split: test_split_ratio= {test_split_ratio}' )
reviews = df['review']
sentiments = df['sentiment']
n_dataset = df.shape[0]
n_test = int( n_dataset * test_split_ratio ) # 0.7
n_training = n_dataset - n_test # 0.3
# Use indexcing to split the data.
index_data = np.arange( n_dataset )
index_training = np.random.choice( index_data, n_training, replace=False )
index_test = np.delete( index_data, index_training )
data_training_np = reviews.loc[ index_training ].values
data_test_np = reviews.loc[ index_test ].values
labels_training_np = sentiments.loc[ index_training ].values
labels_test_np = sentiments.loc[ index_test ].values
print(f' number of dataset =', n_dataset )
print(f' np.shape(x_train) =', np.shape(data_training_np) )
print(f' np.shape(y_train) =', np.shape(labels_training_np) )
print(f' np.shape(x_test) =', np.shape(data_test_np) )
print(f' np.shape(y_test) =', np.shape(labels_test_np) )
return data_training_np, labels_training_np, data_test_np, labels_test_np
# x_train, y_train, x_test, y_test
# def __init__( self, x, y, config ):
# self.config = config
# self.x = x # shape = (length, dimension)
# self.y = y # shape = (length,)
def split( self, split_rate=[0.7, 0.2, 0.1] ):
'''
The default ratio to split the training, evaluation, & test data is 7:2:1.
'''
print( 'split_rate = ', split_rate )
length, dimension = np.shape( self.x )
# Split the (entire) data into training data & test data
n_training = int( length * split_rate[0] ) # 0.7
n_evaluation = int( length * split_rate[1] ) # 0.2
n_test = length - n_training - n_evaluation
# Use indexcing to split the data.
index_data = np.arange( length ) # 13704, [0, length-1]
index_training = np.random.choice( index_data, n_training, replace=False ) # 9592
index_temp = np.delete( index_data, index_training ) # 4112
index_evaluation = np.random.choice( index_temp, n_evaluation ) # 2740
index_test = np.delete( index_temp, index_evaluation ) # 3547, This must be 1372!
data_training = self.x[ index_training, : ]
data_evaluation = self.x[ index_evaluation, : ]
data_test = self.x[ index_test, : ]
labels_training = self.y[ index_training ]
labels_evaluation = self.y[ index_evaluation ]
labels_test = self.y[ index_test ]
training = [data_training, labels_training]
evaluation = [data_evaluation, labels_evaluation]
test = [data_test, labels_test]
return training, evaluation, test
# #=====================================================================#
# # The above variables don't have the leading self. to improve readability.
# self.length = length # = size, or n_data
# self.dimension = dimension
#
# self.n_training = n_training
# self.n_test = n_test
def load(self, batch_size):
data_length = len( self.data_training )
if data_length >= batch_size:
# Because of replace=False,
# ValueError: Cannot take a larger sample than population when 'replace=False'
index = np.random.choice( data_length, batch_size, replace=False )
data = self.data_training[ index,: ]
labels = self.labels_training[ index ]
self.data_training = np.delete( self.data_training, index, axis=0 )
self.labels_training = np.delete( self.labels_training, index )
done = True
else: #data_length < batch_size:
self.data_training = self.x[ self.index_training ]
self.labels_training = self.y[ self.index_training ]
done = False
return data, labels, done
# EOF
|
[
"numpy.shape",
"numpy.arange",
"numpy.delete",
"numpy.random.choice"
] |
[((927, 947), 'numpy.arange', 'np.arange', (['n_dataset'], {}), '(n_dataset)\n', (936, 947), True, 'import numpy as np\n'), ((976, 1031), 'numpy.random.choice', 'np.random.choice', (['index_data', 'n_training'], {'replace': '(False)'}), '(index_data, n_training, replace=False)\n', (992, 1031), True, 'import numpy as np\n'), ((1060, 1097), 'numpy.delete', 'np.delete', (['index_data', 'index_training'], {}), '(index_data, index_training)\n', (1069, 1097), True, 'import numpy as np\n'), ((2284, 2300), 'numpy.shape', 'np.shape', (['self.x'], {}), '(self.x)\n', (2292, 2300), True, 'import numpy as np\n'), ((2630, 2647), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (2639, 2647), True, 'import numpy as np\n'), ((2702, 2757), 'numpy.random.choice', 'np.random.choice', (['index_data', 'n_training'], {'replace': '(False)'}), '(index_data, n_training, replace=False)\n', (2718, 2757), True, 'import numpy as np\n'), ((2796, 2833), 'numpy.delete', 'np.delete', (['index_data', 'index_training'], {}), '(index_data, index_training)\n', (2805, 2833), True, 'import numpy as np\n'), ((2882, 2924), 'numpy.random.choice', 'np.random.choice', (['index_temp', 'n_evaluation'], {}), '(index_temp, n_evaluation)\n', (2898, 2924), True, 'import numpy as np\n'), ((2962, 3001), 'numpy.delete', 'np.delete', (['index_temp', 'index_evaluation'], {}), '(index_temp, index_evaluation)\n', (2971, 3001), True, 'import numpy as np\n'), ((1488, 1514), 'numpy.shape', 'np.shape', (['data_training_np'], {}), '(data_training_np)\n', (1496, 1514), True, 'import numpy as np\n'), ((1558, 1586), 'numpy.shape', 'np.shape', (['labels_training_np'], {}), '(labels_training_np)\n', (1566, 1586), True, 'import numpy as np\n'), ((1630, 1652), 'numpy.shape', 'np.shape', (['data_test_np'], {}), '(data_test_np)\n', (1638, 1652), True, 'import numpy as np\n'), ((1696, 1720), 'numpy.shape', 'np.shape', (['labels_test_np'], {}), '(labels_test_np)\n', (1704, 1720), True, 'import numpy as np\n'), ((4261, 4317), 'numpy.random.choice', 'np.random.choice', (['data_length', 'batch_size'], {'replace': '(False)'}), '(data_length, batch_size, replace=False)\n', (4277, 4317), True, 'import numpy as np\n'), ((4475, 4519), 'numpy.delete', 'np.delete', (['self.data_training', 'index'], {'axis': '(0)'}), '(self.data_training, index, axis=0)\n', (4484, 4519), True, 'import numpy as np\n'), ((4558, 4596), 'numpy.delete', 'np.delete', (['self.labels_training', 'index'], {}), '(self.labels_training, index)\n', (4567, 4596), True, 'import numpy as np\n')]
|
# ---------------------------------------------------------------------------- #
# World Cup: Stats scanner
# Ver: 0.01
# ---------------------------------------------------------------------------- #
#
# Code by <NAME>
#
# ---------------------------------------------------------------------------- #
import os
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from time import sleep
os.chdir("/mnt/aec0936f-d983-44c1-99f5-0f5b36390285/Dropbox/Python/Predictive Analytics FIFA")
'''
browser = webdriver.Firefox()
browser.get("https://www.whoscored.com/Regions/247/Tournaments/36/Seasons/5967/Stages/15737/Show/International-FIFA-World-Cup-2018")
sleep(3)
base_url = 'https://www.whoscored.com'
def get_countries_links(browser):
return [team.get_attribute('href') for team in browser.find_elements_by_xpath('//table[@id="tournament-fixture"]//td[contains(@class,"team")]//a')]
countries_link = set()
countries_link.update(get_countries_links(browser))
browser.find_elements_by_xpath('//table[@id="tournament-fixture"]//td[contains(@class,"team")]//a')[0].get_attribute('href')
# click next page
browser.find_element_by_xpath('//span[contains(@class, "ui-icon-triangle-1-e")]').click()
sleep(1)
countries_link.update(get_countries_links(browser))
# click next page
browser.find_element_by_xpath('//span[contains(@class, "ui-icon-triangle-1-e")]').click()
sleep(1)
countries_link.update(get_countries_links(browser))
#countries_link
player_link = dict()
for country_link in countries_link:
browser.get(country_link)
sleep(1)
team = browser.find_element_by_xpath('//span[@class="team-header-name"]')
player_link[team.text] = dict()
for player in browser.find_elements_by_xpath('//table[@id="top-player-stats-summary-grid"]//tbody//tr//a'):
player_link[team.text][player.text] = player.get_attribute('href')
np.save("Data/player_link.npy", player_link)
'''
def detect_element(browser, element_id, by_what = By.ID):
# Simplify the detection of an element in the browser
element_present = EC.presence_of_element_located((by_what, element_id))
try:
WebDriverWait(browser, 5, poll_frequency = .1).until(element_present)
return True
except TimeoutException as e:
return False
player_link = np.load("Data/player_link.npy").item()
# will delete nan from already_loaded
already_loaded = rating_dict.copy()
for team in rating_dict.keys():
for player in rating_dict[team]:
if pd.isnull(rating_dict[team][player]):
already_loaded[team].pop(player, None)
#caps = DesiredCapabilities().FIREFOX
caps = DesiredCapabilities.CHROME
caps["pageLoadStrategy"] = "none"
#rating_dict = {team:{} for team in player_link.keys()}
browser = webdriver.Chrome(desired_capabilities = caps)#Firefox(capabilities=caps)
for team in player_link.keys():
for player in player_link[team].keys():
if player in already_loaded[team].keys(): continue
while True:
try:
browser.get(player_link[team][player])
wait = WebDriverWait(browser, 20)
wait.until(EC.presence_of_element_located((By.XPATH, '//table[@id="top-player-stats-summary-grid"]')))
browser.execute_script("window.stop();")
try:
rating_dict[team][player] = browser.find_elements_by_xpath('//table[@id="top-player-stats-summary-grid"]//td[@class="rating"]')[-1].text
print(rating_dict[team][player])
break
except IndexError:
try:
iframe = browser.find_element_by_xpath('//iframe')
browser.switch_to_frame(iframe)
browser.find_element_by_xpath('//p[contains(text(), "Access Denied")]')
sleep(5)
except NoSuchElementException:
rating_dict[team][player] = np.nan
except TimeoutException:
sleep(5)
np.save("Data/rating_dict.npy", rating_dict)
rating_dict['Saudi Arabia']
|
[
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"numpy.load",
"numpy.save",
"pandas.isnull",
"time.sleep",
"selenium.webdriver.Chrome",
"os.chdir"
] |
[((897, 1001), 'os.chdir', 'os.chdir', (['"""/mnt/aec0936f-d983-44c1-99f5-0f5b36390285/Dropbox/Python/Predictive Analytics FIFA"""'], {}), "(\n '/mnt/aec0936f-d983-44c1-99f5-0f5b36390285/Dropbox/Python/Predictive Analytics FIFA'\n )\n", (905, 1001), False, 'import os\n'), ((3265, 3308), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'desired_capabilities': 'caps'}), '(desired_capabilities=caps)\n', (3281, 3308), False, 'from selenium import webdriver\n'), ((4577, 4621), 'numpy.save', 'np.save', (['"""Data/rating_dict.npy"""', 'rating_dict'], {}), "('Data/rating_dict.npy', rating_dict)\n", (4584, 4621), True, 'import numpy as np\n'), ((2570, 2623), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(by_what, element_id)'], {}), '((by_what, element_id))\n', (2600, 2623), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2807, 2838), 'numpy.load', 'np.load', (['"""Data/player_link.npy"""'], {}), "('Data/player_link.npy')\n", (2814, 2838), True, 'import numpy as np\n'), ((3001, 3037), 'pandas.isnull', 'pd.isnull', (['rating_dict[team][player]'], {}), '(rating_dict[team][player])\n', (3010, 3037), True, 'import pandas as pd\n'), ((2642, 2687), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(5)'], {'poll_frequency': '(0.1)'}), '(browser, 5, poll_frequency=0.1)\n', (2655, 2687), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3594, 3620), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(20)'], {}), '(browser, 20)\n', (3607, 3620), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3648, 3742), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(By.XPATH, \'//table[@id="top-player-stats-summary-grid"]\')'], {}), '((By.XPATH,\n \'//table[@id="top-player-stats-summary-grid"]\'))\n', (3678, 3742), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((4555, 4563), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (4560, 4563), False, 'from time import sleep\n'), ((4382, 4390), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (4387, 4390), False, 'from time import sleep\n')]
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
from typing import List, Optional, Tuple, Union
import numpy as np
from scipy.stats import multivariate_normal as normal
def simulate_sensitivity_maps(
shape: Union[List[int], Tuple[int]], num_coils: int, var: float = 1, seed: Optional[int] = None
) -> np.ndarray:
r"""Simulates coil sensitivities using bi-variate or tri-variate gaussian distribution.
Parameters
----------
shape: List[int] or Tuple[int]
(nx, ny) or (nx, ny, nz).
num_coils: int
Number of coils to be simulated.
var: float
Variance.
seed: int or None
If not None, a seed will be used to produce an offset for the gaussian mean :math:`\mu`.
Returns
-------
sensitivity_map : nd.array
Simulated coil sensitivity maps of shape (num_coils, \*shape).
Notes
-----
Sensitivity maps are normalized such that:
.. math::
\sum_{k=1}^{n_c} {S^{k}}^{*}S^{k} = I.
"""
if num_coils == 1:
return np.ones(shape)[None] + 0.0j
# X, Y are switched in np.meshgrid
meshgrid = np.meshgrid(*[np.linspace(-1, 1, n) for n in shape[:2][::-1] + shape[2:]])
indices = np.stack(meshgrid, axis=-1)
sensitivity_map = np.zeros((num_coils, *shape))
# Assume iid
cov = np.zeros(len(shape))
for ii in range(len(shape)):
cov[ii] = var
cov = np.diag(cov)
if seed:
np.random.seed(seed)
offset = np.random.uniform(0, 2 * np.pi, 1)
for coil_idx in range(num_coils):
mu = [
np.cos(coil_idx / num_coils * 2 * np.pi + offset).item(),
np.sin(coil_idx / num_coils * 2 * np.pi + offset).item(),
]
if len(shape) == 3:
mu += [0.0]
sensitivity_map[coil_idx] = normal(mu, cov).pdf(indices)
sensitivity_map = sensitivity_map + 1.0j * sensitivity_map # make complex
# Normalize
sensitivity_map_norm = np.sqrt((np.conj(sensitivity_map) * sensitivity_map).sum(0))[None]
sensitivity_map = sensitivity_map / sensitivity_map_norm
return sensitivity_map
|
[
"numpy.stack",
"numpy.random.uniform",
"numpy.conj",
"numpy.random.seed",
"numpy.zeros",
"numpy.ones",
"scipy.stats.multivariate_normal",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.diag"
] |
[((1211, 1238), 'numpy.stack', 'np.stack', (['meshgrid'], {'axis': '(-1)'}), '(meshgrid, axis=-1)\n', (1219, 1238), True, 'import numpy as np\n'), ((1262, 1291), 'numpy.zeros', 'np.zeros', (['(num_coils, *shape)'], {}), '((num_coils, *shape))\n', (1270, 1291), True, 'import numpy as np\n'), ((1405, 1417), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (1412, 1417), True, 'import numpy as np\n'), ((1473, 1507), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(1)'], {}), '(0, 2 * np.pi, 1)\n', (1490, 1507), True, 'import numpy as np\n'), ((1439, 1459), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1453, 1459), True, 'import numpy as np\n'), ((1040, 1054), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1047, 1054), True, 'import numpy as np\n'), ((1136, 1157), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (1147, 1157), True, 'import numpy as np\n'), ((1799, 1814), 'scipy.stats.multivariate_normal', 'normal', (['mu', 'cov'], {}), '(mu, cov)\n', (1805, 1814), True, 'from scipy.stats import multivariate_normal as normal\n'), ((1573, 1622), 'numpy.cos', 'np.cos', (['(coil_idx / num_coils * 2 * np.pi + offset)'], {}), '(coil_idx / num_coils * 2 * np.pi + offset)\n', (1579, 1622), True, 'import numpy as np\n'), ((1643, 1692), 'numpy.sin', 'np.sin', (['(coil_idx / num_coils * 2 * np.pi + offset)'], {}), '(coil_idx / num_coils * 2 * np.pi + offset)\n', (1649, 1692), True, 'import numpy as np\n'), ((1960, 1984), 'numpy.conj', 'np.conj', (['sensitivity_map'], {}), '(sensitivity_map)\n', (1967, 1984), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 13:17:41 2018
@author: laurenwalters
"""
import numpy as np
import matplotlib.pyplot as plt
import random
#For saving/importing data
from numpy import asarray
from numpy import save
from numpy import load
#Created by <NAME>, 2018-2020
#Contributions by <NAME>
#For reactions in aqueous conditions
#find out how much detail you want in your graph
#n=input("Enter the mesh grid detail you want, suggested (30-140): ")
n=30;
#Constants
R=8.31447; #kJ/(mol*K)
T=298.15; #K
F= 9.648533*10**4; #kJ/(V*mol)
P=1; #bar, 10^5*Pa
eta=6
nI=10**-eta; #Activity Concentration
#Array showing the composition of Cu:Bi:S
composition=np.array([1,1,1])
#pH Range and Constants
lowpH = -2;
highpH = 16;
pHrange = int;
pHrange = highpH-lowpH;
pHcount = pHrange/n; #used to iterate through pH range
#Applied Potential Range and Constants
Ulow = -1.5; #V
Uhigh = 1.5; #V
Urange = Uhigh-Ulow; #V
Ucount = Urange/n; #used to iterate through U (energy) range
###############################################################################
######################## DFT CALCULATIONS #####################################
###############################################################################
#Electronic Energies in eV/f.u.
#PBEsol with SOC
Ee_Bi= -5.114928333;
Ee_Bi2O3= -31.163316;
Ee_Bi2O5= -40.1344765;
Ee_Bi2O4=-36.7221975;
Ee_Bi4O7=-68.40888;
#PBEsol
Ee_Cu=-4.3152965;
Ee_CuO=-10.7488868;
Ee_Cu2O=-14.99698;
Ee_CuOH2_s=-25.1916025;
#PBEsol
Ee_O2=-10.281123
Ee_H2=-6.5141508
Ee_S= -4.391811875;
###############################################################################
########### MULTICOMPONENT SPECIES ############################################
#Calculated with PBEsol
Ee_Cu2S=-13.4793116667;
Ee_Cu7S4=-49.8241325;
Ee_CuS=-9.170266;
Ee_CuS2=-13.63935;
Ee_Cu2SO4_3=-101.5166;
Ee_BiCu=-9.31218;
Ee_CuBiO2_2=-42.245475;
Ee_BiS2=-14.6172585;
Ee_Bi2S3=-24.878388;
Ee_Bi2S2O=-27.2327565;
Ee_Bi2SO4_3=-109.35902;
Ee_Bi14OS24=-247.57619;
Ee_Bi2SO2=-29.50652;
Ee_BiSCuO=-21.5022935;
Ee_Cu3BiS3=-32.4713275;
Ee_Cu4Bi4S9=-80.830705;
Ee_Cu4BiS2_5=-90.647798;
Ee_CuBiS2=-19.041996;
###############################################################################
###### Vibrational Energy #####################################################
###############################################################################
#Vibrational Energies in eV/f.u.
#From PBEsol Phonon Calculations
Fvib_O2=-0.272;
F_rot_trans_O2=0.099;
Ftot_O2=Fvib_O2+F_rot_trans_O2;
F_H = .202;
Fvib_S=-0.0091266451372
Fvib_CuO=0.062498987735
Fvib_Cu2O=0.00507624852
Fvib_Cu=-0.007167374680
Fvib_CuOH2_s=0.66653026525
Fvib_Bi=-0.0761976993239
Fvib_Bi2O3=-0.057653546889
Fvib_Bi2O5=0.14677315404
Fvib_Bi2O4=0.12231438709
Fvib_Bi4O7=0.08741679245
Fvib_Cu2S=-0.0050937891364
Fvib_Cu7S4=-0.178002185722
Fvib_CuS=-0.0119849701814
Fvib_CuS2=-0.0033060080158
Fvib_Cu2SO4_3=1.00135494361
Fvib_BiCu=-0.11006963132
Fvib_CuBiO2_2=0.09853363658
Fvib_BiS2=-0.063943629448
Fvib_Bi2S3=-0.1428187610337
Fvib_Bi2S2O=-0.08193190191
Fvib_Bi2SO4_3=0.81266278392
Fvib_Bi14OS24=0.02990373431
Fvib_Bi2SO2=-0.0265520338422
Fvib_BiSCuO=-0.039894146059
Fvib_Cu3BiS3=-0.1661179102334
Fvib_Cu4Bi4S9=-0.3270592722135
Fvib_Cu4BiS2_5=-0.430548296696
Fvib_CuBiS2=-0.08663072302
###############################################################################
### Compounds-Calculate the formation energies ############################
###############################################################################
#Free Energies of Formation in eV/f.u.
dGf_CuO= (Ee_CuO+Fvib_CuO) -(Ee_Cu+Fvib_Cu) - 0.5*(Ee_O2+Ftot_O2);
dGf_Cu2O=(Ee_Cu2O+Fvib_Cu2O) -2.0*(Ee_Cu+Fvib_Cu) - 0.5*(Ee_O2+Ftot_O2);
dGf_CuOH2_s= (Ee_CuOH2_s+Fvib_CuOH2_s) -(Ee_Cu+Fvib_Cu)-(Ee_O2+Ftot_O2)-(Ee_H2+F_H);
dGf_Bi2O3= ((Ee_Bi2O3)+Fvib_Bi2O3) -2.0*(Ee_Bi+Fvib_Bi)-1.5*(Ee_O2-Ftot_O2);
dGf_Bi2O5= ((Ee_Bi2O5)+Fvib_Bi2O5) -2.0*(Ee_Bi+Fvib_Bi)-2.5*(Ee_O2-Ftot_O2);
dGf_Bi2O4= ((Ee_Bi2O4)+Fvib_Bi2O4) -2.0*(Ee_Bi+Fvib_Bi)-2.0*(Ee_O2-Ftot_O2);
dGf_Bi4O7= ((Ee_Bi4O7)+Fvib_Bi4O7) -4.0*(Ee_Bi+Fvib_Bi)-3.5*(Ee_O2-Ftot_O2);
dGf_Cu2S=(Ee_Cu2S+Fvib_Cu2S) -2*(Ee_Cu+Fvib_Cu)-(Ee_S+Fvib_S);
dGf_Cu7S4=(Ee_Cu7S4+Fvib_Cu7S4) -7*(Ee_Cu+Fvib_Cu)-4*(Ee_S+Fvib_S);
dGf_CuS=(Ee_CuS+Fvib_CuS) -(Ee_Cu+Fvib_Cu)-(Ee_S+Fvib_S);
dGf_CuS2=(Ee_CuS2+Fvib_CuS2) -(Ee_Cu+Fvib_Cu)-2*(Ee_S+Fvib_S);
dGf_Cu2SO4_3=(Ee_Cu2SO4_3+Fvib_Cu2SO4_3) -2*(Ee_Cu+Fvib_Cu)-3*(Ee_S+Fvib_S)-6.0*((Ee_O2)-Ftot_O2);
dGf_BiCu=(Ee_BiCu+Fvib_BiCu) -(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi);
dGf_CuBiO2_2=(Ee_CuBiO2_2+Fvib_CuBiO2_2) -(Ee_Cu+Fvib_Cu)-2*(Ee_Bi+Fvib_Bi)-2.0*((Ee_O2)-Ftot_O2);
dGf_BiS2=(Ee_BiS2+Fvib_BiS2) -(Ee_Bi+Fvib_Bi)-2*(Ee_S+Fvib_S);
dGf_Bi2S3=(Ee_Bi2S3+Fvib_Bi2S3) -2*(Ee_Bi+Fvib_Bi)-3*(Ee_S+Fvib_S);
dGf_Bi2S2O=(Ee_Bi2S2O+Fvib_Bi2S2O) -2*(Ee_Bi+Fvib_Bi)-2*(Ee_S+Fvib_S)-0.5*((Ee_O2)-Ftot_O2);
dGf_Bi2SO4_3=(Ee_Bi2SO4_3+Fvib_Bi2SO4_3) -2*(Ee_Bi+Fvib_Bi)-3*(Ee_S+Fvib_S)-6.0*((Ee_O2)-Ftot_O2);
dGf_Bi14OS24=(Ee_Bi14OS24+Fvib_Bi14OS24) -14*(Ee_Bi+Fvib_Bi)-24*(Ee_S+Fvib_S)-0.5*((Ee_O2)-Ftot_O2);
dGf_Bi2SO2=(Ee_Bi2SO2+Fvib_Bi2SO2) -2*(Ee_Bi+Fvib_Bi)-(Ee_S+Fvib_S)-1.0*((Ee_O2)-Ftot_O2);
dGf_BiSCuO=(Ee_BiSCuO+Fvib_BiSCuO) -(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi)-(Ee_S+Fvib_S)-0.5*((Ee_O2)-Ftot_O2);
dGf_Cu3BiS3=(Ee_Cu3BiS3+Fvib_Cu3BiS3) -3*(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi)-3*(Ee_S+Fvib_S);
dGf_Cu4Bi4S9=(Ee_Cu4Bi4S9+Fvib_Cu4Bi4S9) -4*(Ee_Cu+Fvib_Cu)-4*(Ee_Bi+Fvib_Bi)-9*(Ee_S+Fvib_S);
dGf_Cu4BiS2_5=(Ee_Cu4BiS2_5+Fvib_Cu4BiS2_5)-4*(Ee_Cu+Fvib_Cu)-5*(Ee_Bi+Fvib_Bi)-10*(Ee_S+Fvib_S);
dGf_CuBiS2=(Ee_CuBiS2+Fvib_CuBiS2) -(Ee_Cu+Fvib_Cu)-(Ee_Bi+Fvib_Bi)-2*(Ee_S+Fvib_S);
#Set the reference values
dGf_Cu=0.0;
dGf_Bi=0.0;
dGf_S=0.0;
###############################################################################
###############################################################################
###############################################################################
###############################################################################
############## Aqueous Ion Free Energies of Formation #########################
#Free Energies of Formation in eV/f.u.
##Elemental Bismuth Species
dGf_Bi_3Plus= 0.6430898
dGf_BiOH_2Plus= -1.6968378
dGf_BiO_Plus= -1.4977965
##Elemental Copper Species
dGf_Cu1= 0.506502
dGf_Cu2= 0.674092
dGf_CuOH2_minus= -3.4518209
dGf_CuOH3= -5.1197432
dGf_CuOH_Plus= -1.3127387
dGf_CuOH4_2=-6.814302
dGf_CuOH2= -3.2666113
dGf_CuOH = -1.2677578
dGf_Cu2OH2_2plus=-2.942417
dGf_Cu3OH4_2plus=-6.567839
#Elemental Sulphur Species
dGf_H2S=-0.283601
dGf_HS_Minus=0.13053
dGf_S_2Minus=0.9521892
dGf_S2_2Minus=0.8563979
dGf_S3_2Minus=0.7791664
dGf_S4_2Minus=0.7204948
dGf_S5_2Minus=0.6803396
dGf_H2S2O3=-5.6329986
dGf_HS2O3_Minus=-5.6156529
dGf_S2O3_2Minus=-5.515915
dGf_S5O6_2Minus=-9.9087
dGf_S4O6_2Minus=-10.5939
dGf_HS2O4_Minus=-6.13203282
dGf_S2O4_2Minus=-5.9842
dGf_S3O6_2Minus=-9.930382
dGf_H2SO3=-5.580528
dGf_HSO3_Minus=-5.464
dGf_SO3_2Minus=-5.03457
dGf_S2O6_2Minus=-10.02
dGf_H2SO4=-7.6901922
dGf_HSO4_Minus=-7.8029389
dGf_SO4_2Minus=-7.6901922
dGf_S2O8_2Minus=-11.361
dGf_HSO5_Minus= -6.60739025
dGf_S2O5_2Minus= -8.195817793
#Water
dGf_H2O=-2.458;
###############################################################################
###############################################################################
###############################################################################
################################################################################
############# CONVERT from eV to kJ/mol ####################################
###############################################################################
dGf_Cu= dGf_Cu*F;
dGf_CuO= dGf_CuO*F;
dGf_Cu2O= dGf_Cu2O*F;
dGf_Cu1= dGf_Cu1*F;
dGf_Cu2= dGf_Cu2*F;
dGf_CuOH4_2= dGf_CuOH4_2*F;
dGf_CuOH2_minus= dGf_CuOH2_minus*F;
dGf_CuOH3= dGf_CuOH3*F;
dGf_CuOH_Plus= dGf_CuOH_Plus*F;
dGf_CuOH2= dGf_CuOH2*F;
dGf_CuOH = dGf_CuOH*F;
dGf_Cu2OH2_2plus=dGf_Cu2OH2_2plus*F;
dGf_Cu3OH4_2plus=dGf_Cu3OH4_2plus*F;
dGf_CuOH2_s=dGf_CuOH2_s*F
dGf_Bi= dGf_Bi*F;
dGf_Bi2O3= dGf_Bi2O3*F;
dGf_Bi2O5= dGf_Bi2O5*F;
dGf_Bi2O4=dGf_Bi2O4*F;
dGf_Bi4O7=dGf_Bi4O7*F;
dGf_Bi_3Plus= dGf_Bi_3Plus*F;
dGf_BiOH_2Plus= dGf_BiOH_2Plus*F;
dGf_BiO_Plus= dGf_BiO_Plus*F;
dGf_S= dGf_S*F;
dGf_H2S=dGf_H2S*F;
dGf_HS_Minus=dGf_HS_Minus*F;
dGf_S_2Minus=dGf_S_2Minus*F;
dGf_S2_2Minus=dGf_S2_2Minus*F;
dGf_S3_2Minus=dGf_S3_2Minus*F;
dGf_S4_2Minus=dGf_S4_2Minus*F;
dGf_S5_2Minus=dGf_S5_2Minus*F;
dGf_H2S2O3=dGf_H2S2O3*F;
dGf_HS2O3_Minus=dGf_HS2O3_Minus*F;
dGf_S2O3_2Minus=dGf_S2O3_2Minus*F;
dGf_S5O6_2Minus=dGf_S5O6_2Minus*F;
dGf_S4O6_2Minus=dGf_S4O6_2Minus*F;
dGf_HS2O4_Minus=dGf_HS2O4_Minus*F;
dGf_S2O4_2Minus=dGf_S2O4_2Minus*F;
dGf_S3O6_2Minus=dGf_S3O6_2Minus*F;
dGf_H2SO3=dGf_H2SO3*F;
dGf_HSO3_Minus=dGf_HSO3_Minus*F;
dGf_SO3_2Minus=dGf_SO3_2Minus*F;
dGf_S2O6_2Minus=dGf_S2O6_2Minus*F;
dGf_H2SO4=dGf_H2SO4*F;
dGf_HSO4_Minus=dGf_HSO4_Minus*F;
dGf_SO4_2Minus=dGf_SO4_2Minus*F;
dGf_S2O8_2Minus=dGf_S2O8_2Minus*F;
dGf_HSO5_Minus=dGf_HSO5_Minus*F;
dGf_S2O5_2Minus=dGf_S2O5_2Minus*F;
dGf_Cu2S=dGf_Cu2S*F;
dGf_Cu7S4=dGf_Cu7S4*F;
dGf_CuS=dGf_CuS*F;
dGf_CuS2=dGf_CuS2*F;
dGf_Cu2SO4_3=dGf_Cu2SO4_3*F;
dGf_BiCu=dGf_BiCu*F;
dGf_CuBiO2_2=dGf_CuBiO2_2*F;
dGf_BiS2=dGf_BiS2*F;
dGf_Bi2S3=dGf_Bi2S3*F;
dGf_Bi2S2O=dGf_Bi2S2O*F;
dGf_Bi2SO4_3=dGf_Bi2SO4_3*F;
dGf_Bi14OS24=dGf_Bi14OS24*F;
dGf_Bi2SO2=dGf_Bi2SO2*F;
dGf_BiSCuO=dGf_BiSCuO*F;
dGf_Cu3BiS3=dGf_Cu3BiS3*F;
dGf_Cu4Bi4S9=dGf_Cu4Bi4S9*F;
dGf_Cu4BiS2_5=dGf_Cu4BiS2_5*F;
dGf_CuBiS2=dGf_CuBiS2*F;
dGf_H2O= dGf_H2O*F;
###############################################################################
###############################################################################
###############################################################################
###############################################################################
############### Populate the species matrix ################################
###############################################################################
species=np.zeros((65,8))
######## Formation Energies ###################################################
species[0,0]=0.00;
species[1,0]=dGf_CuO
species[2,0]=dGf_Cu2O
species[3,0]=dGf_Cu1
species[4,0]=dGf_Cu2
species[5,0]=dGf_CuOH4_2
species[6,0]=dGf_CuOH2_minus
species[7,0]=dGf_CuOH3
species[8,0]=dGf_CuOH_Plus
species[9,0]=dGf_CuOH2
species[10,0]=dGf_CuOH
species[11,0]=dGf_Cu2OH2_2plus
species[12,0]=dGf_Cu3OH4_2plus
species[13,0]=dGf_Bi
species[14,0]=dGf_Bi2O3
species[15,0]=dGf_Bi2O5
species[16,0]=dGf_Bi2O4
species[17,0]=dGf_Bi4O7
species[18,0]=dGf_Bi_3Plus
species[19,0]=dGf_BiOH_2Plus
species[20,0]=dGf_BiO_Plus
species[21,0]=dGf_S
species[22,0]=dGf_H2S
species[23,0]=dGf_HS_Minus
species[24,0]=dGf_S_2Minus
species[25,0]=dGf_S2_2Minus
species[26,0]=dGf_S3_2Minus
species[27,0]=dGf_S4_2Minus
species[28,0]=dGf_S5_2Minus
species[29,0]=dGf_H2S2O3
species[30,0]=dGf_HS2O3_Minus
species[31,0]=dGf_S2O3_2Minus
species[32,0]=dGf_S5O6_2Minus
species[33,0]=dGf_S4O6_2Minus
species[34,0]=dGf_HS2O4_Minus
species[35,0]=dGf_S2O4_2Minus
species[36,0]=dGf_S3O6_2Minus
species[37,0]=dGf_H2SO3
species[38,0]=dGf_HSO3_Minus
species[39,0]=dGf_SO3_2Minus
species[40,0]=dGf_S2O6_2Minus
species[41,0]=dGf_H2SO4
species[42,0]=dGf_HSO4_Minus
species[43,0]=dGf_SO4_2Minus
species[44,0]=dGf_S2O8_2Minus
species[45,0]=dGf_HSO5_Minus
species[46,0]=dGf_S2O5_2Minus
species[47,0]=dGf_Cu2S
species[48,0]=dGf_Cu7S4
species[49,0]=dGf_CuS
species[50,0]=dGf_CuS2
species[51,0]=dGf_Cu2SO4_3
species[52,0]=dGf_BiCu
species[53,0]=dGf_CuBiO2_2
species[54,0]=dGf_BiS2
species[55,0]=dGf_Bi2S3
species[56,0]=dGf_Bi2S2O
species[57,0]=dGf_Bi2SO4_3
species[58,0]=dGf_Bi14OS24
species[59,0]=dGf_Bi2SO2
species[60,0]=dGf_CuBiS2
species[61,0]=dGf_Cu4Bi4S9
species[62,0]=dGf_Cu4BiS2_5
species[63,0]=dGf_BiSCuO
species[64,0]=dGf_Cu3BiS3
######## Electron Count #######################################################
#Cu
species[0,1]=0.00;
species[1,1]=2
species[2,1]=2
species[3,1]=1
species[4,1]=2
species[5,1]=2
species[6,1]=1
species[7,1]=2
species[8,1]=2
species[9,1]=2
species[10,1]=1
species[11,1]=4
species[12,1]=6
#Bi
species[13,1]=0
species[14,1]=6
species[15,1]=10
species[16,1]=8
species[17,1]=14
species[18,1]=3
species[19,1]=3
species[20,1]=3
#S
species[21,1]=0
species[22,1]=-2
species[23,1]=-2
species[24,1]=-2
species[25,1]=-2
species[26,1]=-2
species[27,1]=-2
species[28,1]=-2
species[29,1]=4
species[30,1]=4
species[31,1]=4
species[32,1]=10
species[33,1]=10
species[34,1]=6
species[35,1]=6
species[36,1]=10
species[37,1]=4
species[38,1]=4
species[39,1]=4
species[40,1]=10
species[41,1]=6
species[42,1]=6
species[43,1]=6
species[44,1]=14
species[45,1]=8
species[46,1]=8
#CuSOBi
species[47,1]=0
species[48,1]=0
species[49,1]=0
species[50,1]=0
species[51,1]=24
species[52,1]=0
species[53,1]=8
#BiSO
species[54,1]=0
species[55,1]=0
species[56,1]=2
species[57,1]=24
species[58,1]=2
species[59,1]=4
#CuBiS
species[60,1]=0
species[61,1]=0
species[62,1]=0
#BiCuSO
species[63,1]=2
species[64,1]=0
######## Hydrogen H+ Count ####################################################
#Cu
species[0,2]=0
species[1,2]=2
species[2,2]=2
species[3,2]=0
species[4,2]=0
species[5,2]=4
species[6,2]=2
species[7,2]=3
species[8,2]=1
species[9,2]=2
species[10,2]=1
species[11,2]=2
species[12,2]=4
#Bi
species[13,2]=0
species[14,2]=6
species[15,2]=10
species[16,2]=8
species[17,2]=14
species[18,2]=0
species[19,2]=1
species[20,2]=2
#S
species[21,2]=0
species[22,2]=-2
species[23,2]=-1
species[24,2]=0
species[25,2]=0
species[26,2]=0
species[27,2]=0
species[28,2]=0
species[29,2]=6
species[30,2]=5
species[31,2]=4
species[32,2]=12
species[33,2]=12
species[34,2]=6
species[35,2]=8
species[36,2]=12
species[37,2]=4
species[38,2]=5
species[39,2]=6
species[40,2]=12
species[41,2]=6
species[42,2]=7
species[43,2]=8
species[44,2]=16
species[45,2]=9
species[46,2]=10
#CuSBiO
species[47,2]=0
species[48,2]=0
species[49,2]=0
species[50,2]=0
species[51,2]=24
species[52,2]=0
species[53,2]=8
#BiSO
species[54,2]=0
species[55,2]=0
species[56,2]=2
species[57,2]=24
species[58,2]=2
species[59,2]=4
#BiCuS
species[60,2]=0
species[61,2]=0
species[62,2]=0
#BiCuSO
species[63,2]=2
species[64,2]=0
########### Number of Coppers Cu ##############################################
#Cu
species[0,3]=1
species[1,3]=1
species[2,3]=2
species[3,3]=1
species[4,3]=1
species[5,3]=1
species[6,3]=1
species[7,3]=1
species[8,3]=1
species[9,3]=1
species[10,3]=1
species[11,3]=2
species[12,3]=3
#Bismuth and Sulphur
species[13,3]=0
species[14,3]=0
species[15,3]=0
species[16,3]=0
species[17,3]=0
species[18,3]=0
species[19,3]=0
species[20,3]=0
species[21,3]=0
species[22,3]=0
species[23,3]=0
species[24,3]=0
species[25,3]=0
species[26,3]=0
species[27,3]=0
species[28,3]=0
species[29,3]=0
species[30,3]=0
species[31,3]=0
species[32,3]=0
species[33,3]=0
species[34,3]=0
species[35,3]=0
species[36,3]=0
species[37,3]=0
species[38,3]=0
species[39,3]=0
species[40,3]=0
species[41,3]=0
species[42,3]=0
species[43,3]=0
species[44,3]=0
species[45,3]=0
species[46,3]=0
#CuBiSO
species[47,3]=2
species[48,3]=7
species[49,3]=1
species[50,3]=1
species[51,3]=2
species[52,3]=1
species[53,3]=1
#BiSO
species[54,3]=0
species[55,3]=0
species[56,3]=0
species[57,3]=0
species[58,3]=0
species[59,3]=0
#CuBiS
species[60,3]=1
species[61,3]=4
species[62,3]=4
#BiCuSO
species[63,3]=1
species[64,3]=3
########### Number of Bismuths Bi #############################################
#Copper
species[0,4]=0
species[1,4]=0
species[2,4]=0
species[3,4]=0
species[4,4]=0
species[5,4]=0
species[6,4]=0
species[7,4]=0
species[8,4]=0
species[9,4]=0
species[10,4]=0
species[11,4]=0
species[12,4]=0
#Bismuth
species[13,4]=1
species[14,4]=2
species[15,4]=2
species[16,4]=2
species[17,4]=4
species[18,4]=1
species[19,4]=1
species[20,4]=1
#Sulphur
species[21,4]=0
species[22,4]=0
species[23,4]=0
species[24,4]=0
species[25,4]=0
species[26,4]=0
species[27,4]=0
species[28,4]=0
species[29,4]=0
species[30,4]=0
species[31,4]=0
species[32,4]=0
species[33,4]=0
species[34,4]=0
species[35,4]=0
species[36,4]=0
species[37,4]=0
species[38,4]=0
species[39,4]=0
species[40,4]=0
species[41,4]=0
species[42,4]=0
species[43,4]=0
species[44,4]=0
species[45,4]=0
species[46,4]=0
#CuSBiO
species[47,4]=0
species[48,4]=0
species[49,4]=0
species[50,4]=0
species[51,4]=0
species[52,4]=1
species[53,4]=2
#BiSO
species[54,4]=1
species[55,4]=2
species[56,4]=2
species[57,4]=2
species[58,4]=14
species[59,4]=2
#CuBiS
species[60,4]=1
species[61,4]=4
species[62,4]=5
#BiCuSO
species[63,4]=1
species[64,4]=1
########### Number of Sulphurs S #############################################
#Coppers
species[0,5]=0
species[1,5]=0
species[2,5]=0
species[3,5]=0
species[4,5]=0
species[5,5]=0
species[6,5]=0
species[7,5]=0
species[8,5]=0
species[9,5]=0
species[10,5]=0
species[11,5]=0
species[12,5]=0
#Bismuth
species[13,5]=0
species[14,5]=0
species[15,5]=0
species[16,5]=0
species[17,5]=0
species[18,5]=0
species[19,5]=0
species[20,5]=0
#Sulphur
species[21,5]=1
species[22,5]=1
species[23,5]=1
species[24,5]=1
species[25,5]=2
species[26,5]=3
species[27,5]=4
species[28,5]=5
species[29,5]=2
species[30,5]=2
species[31,5]=2
species[32,5]=5
species[33,5]=4
species[34,5]=2
species[35,5]=2
species[36,5]=3
species[37,5]=1
species[38,5]=1
species[39,5]=1
species[40,5]=2
species[41,5]=1
species[42,5]=1
species[43,5]=1
species[44,5]=2
species[45,5]=1
species[46,5]=2
#CuSBiO
species[47,5]=1
species[48,5]=4
species[49,5]=1
species[50,5]=2
species[51,5]=3
species[52,5]=0
species[53,5]=0
#BiSO
species[54,5]=2
species[55,5]=3
species[56,5]=2
species[57,5]=3
species[58,5]=24
species[59,5]=1
#CuBiS
species[60,5]=2
species[61,5]=9
species[62,5]=10
#BiCuSO
species[63,5]=1
species[64,5]=3
######### Number of H2O's #####################################################
#Copper
species[0,6]=0
species[1,6]=1
species[2,6]=1
species[3,6]=0
species[4,6]=0
species[5,6]=4
species[6,6]=2
species[7,6]=3
species[8,6]=1
species[9,6]=2
species[10,6]=1
species[11,6]=2
species[12,6]=4
#Bi
species[13,6]=0
species[14,6]=3
species[15,6]=5
species[16,6]=4
species[17,6]=7
species[18,6]=0
species[19,6]=1
species[20,6]=1
#Sulphur
species[21,6]=0
species[22,6]=0
species[23,6]=0
species[24,6]=0
species[25,6]=0
species[26,6]=0
species[27,6]=0
species[28,6]=0
species[29,6]=3
species[30,6]=3
species[31,6]=3
species[32,6]=6
species[33,6]=6
species[34,6]=4
species[35,6]=4
species[36,6]=6
species[37,6]=3
species[38,6]=3
species[39,6]=3
species[40,6]=6
species[41,6]=4
species[42,6]=4
species[43,6]=4
species[44,6]=8
species[45,6]=5
species[46,6]=5
#CuSBiO
species[47,6]=0
species[48,6]=0
species[49,6]=0
species[50,6]=0
species[51,6]=12
species[52,6]=0
species[53,6]=4
#BiSO
species[54,6]=0
species[55,6]=0
species[56,6]=1
species[57,6]=12
species[58,6]=1
species[59,6]=2
#CuBiS
species[60,6]=0
species[61,6]=0
species[62,6]=0
#BiCuSO
species[63,6]=1
species[64,6]=0
########## Aqueous Ions?????? #################################################
#Copper
species[0,7]=0
species[1,7]=0
species[2,7]=0
species[3,7]=1
species[4,7]=1
species[5,7]=1
species[6,7]=1
species[7,7]=1
species[8,7]=1
species[9,7]=1
species[10,7]=1
species[11,7]=1
species[12,7]=1
#Bismuth
species[13,7]=0
species[14,7]=0
species[15,7]=0
species[16,7]=0
species[17,7]=0
species[18,7]=1
species[19,7]=1
species[20,7]=1
#Sulphur
species[21,7]=0
species[22,7]=1
species[23,7]=1
species[24,7]=1
species[25,7]=1
species[26,7]=1
species[27,7]=1
species[28,7]=1
species[29,7]=1
species[30,7]=1
species[31,7]=1
species[32,7]=1
species[33,7]=1
species[34,7]=1
species[35,7]=1
species[36,7]=1
species[37,7]=1
species[38,7]=1
species[39,7]=1
species[40,7]=1
species[41,7]=1
species[42,7]=1
species[43,7]=1
species[44,7]=1
species[45,7]=1
species[46,7]=1
#CuSBiO
species[47,7]=0
species[48,7]=0
species[49,7]=0
species[50,7]=0
species[51,7]=0
species[52,7]=0
species[53,7]=0
#BiSO
species[54,7]=0
species[55,7]=0
species[56,7]=0
species[57,7]=0
species[58,7]=0
species[59,7]=0
#CuBiS
species[60,7]=0
species[61,7]=0
species[62,7]=0
#BiCuSO
species[63,7]=0
species[64,7]=0
#Function to determine species combinations
try:
combos=load('BiCuOS-speciesCombo.npy')
num=load('BiCuOS-numberSpecies.npy')
combo_num=int(num[0])
except OSError:
print('Cannot Open File')
###############################################################################
#### Determine which species are able to combine at the composition ###########
###############################################################################
t=1
flag=1
f_total=int;
f=np.zeros((3))
combos=np.zeros((45000,9,3))
combo_num=0
combos[combo_num, 0, 0]=-1
combos[combo_num, 0, 1]=-1
combos[combo_num, 0, 2]=-1
for k in range(0, len(species)):
for m in range(0, len(species)):
for p in range(0, len(species)):
#Check to make sure each element is in this combination of species
if((species[k, 3]>0 or species[m, 3] >0 or species[p, 3]) \
and (species[k, 4]>0 or species[m, 4] >0 or species[p, 4]) \
and (species[k, 5]>0 or species[m, 5] >0 or species[p, 5])):
#save species in array
t=1
a = np.array([[species[k, 3],species[m, 3], species[p,3]], \
[species[k, 4],species[m, 4], species[p,4]], \
[species[k, 5],species[m, 5], species[p,5]]])
#check to see if each species contains a single element. This is a really long call.
flag=1
if((species[k, 3]==0 and species[m, 3] ==0) or \
(species[m, 3]==0 and species[p, 3] ==0) or \
(species[k, 3]==0 and species[p, 3] ==0)):
if((species[k, 4]==0 and species[m, 4] ==0) or \
(species[m, 4]==0 and species[p, 4] ==0) or \
(species[k, 4]==0 and species[p, 4] ==0)):
if((species[k, 5]==0 and species[m, 5] ==0) or \
(species[m, 5]==0 and species[p, 5] ==0) or \
(species[k, 5]==0 and species[p, 5] ==0)):
flag=0
#if so, find the composition though linear algebra.
try:
f=np.linalg.solve(a, composition)
except:
#print('Error: Species '+str(k)+', Species2: '+str(m)+', Species3: '+str(p)+'\n')
t=1
t=0
#If there is at least one multi-element species in this combination
if(flag==1):
#test each linear combination
for h in range(1, 20):
for i in range(1, 20):
for j in range(1, 20):
#Is there a linear combination of the elements that will allow
#For the
if(((h*a[0,0]+i*a[0,1]+j*a[0,2])/(h*a[1,0]+i*a[1,1]+j*a[1,2]))==composition[0]/composition[1] and \
((h*a[1,0]+i*a[1,1]+j*a[1,2])/(h*a[2,0]+i*a[2,1]+j*a[2,2]))==composition[1]/composition[2] and \
((h*a[0,0]+i*a[0,1]+j*a[0,2])/(h*a[2,0]+i*a[2,1]+j*a[2,2]))==composition[0]/composition[2]):
#save the composition
f[0]=h
f[1]=i
f[2]=j
#Ending parameters, break loops
t=0;
h=40;
i=40;
j=40;
#If there is a linear combination, save the species in the combos array.
if (t==0):
#print(str(combo_num)+': Species1: '+str(k)+', Species2: '+str(m)+'\n')
#Species Number
combos[combo_num, 0, 0]=k
combos[combo_num, 0, 1]=m
combos[combo_num, 0, 2]=p
#Energy
combos[combo_num, 1, 0]=species[k,0]
combos[combo_num, 1, 1]=species[m,0]
combos[combo_num, 1, 2]=species[p,0]
#Electrons
combos[combo_num, 2, 0]=species[k,1]
combos[combo_num, 2, 1]=species[m,1]
combos[combo_num, 2, 2]=species[p,1]
#H+
combos[combo_num, 3, 0]=species[k,2]
combos[combo_num, 3, 1]=species[m,2]
combos[combo_num, 3, 2]=species[p,2]
#Number Silvers
combos[combo_num, 4, 0]=species[k,3]
combos[combo_num, 4, 1]=species[m,3]
combos[combo_num, 4, 2]=species[p,3]
#Number Bismuth
combos[combo_num, 5, 0]=species[k,4]
combos[combo_num, 5, 1]=species[m,4]
combos[combo_num, 5, 2]=species[p,4]
#Number H2O
combos[combo_num, 6, 0]=species[k,5]
combos[combo_num, 6, 1]=species[m,5]
combos[combo_num, 6, 2]=species[p,5]
#Aqueous Ions
combos[combo_num, 7, 0]=species[k,6]
combos[combo_num, 7, 1]=species[m,6]
combos[combo_num, 7, 2]=species[p,6]
#Percent of each in species in final combo
f_total=f[0]+f[1]+f[2];
combos[combo_num, 8, 0]=f[0]/f_total
combos[combo_num, 8, 1]=f[1]/f_total
combos[combo_num, 8, 2]=f[2]/f_total
combo_num=combo_num+1;
t=1
#print('entered')
else:
#Catch and switch the value of t back to no
t=1
save('BiCuOS-speciesCombo.npy', combos)
save('BiCuOS-numberSpecies.npy', asarray([[combo_num]]))
print('The number of species combinations is '+ str(combo_num)+'.\n')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
########### Chemical Potential Mesh Calculations ############################
###############################################################################
#should be as long as there are specicies considered
#populate with smaller values that will be calculated.
muValues=np.zeros((n+1,n+1,4))
current_mu=int
current_ele=int
current_H=int
current_H2O=int
current_aquI=int
current_NumEle=int
sort=np.zeros((3,1))
#fill in the grid. Calculate
for i in range(0, n+1):
#calculate the energies for each species number
pH=lowpH+(i*pHcount);
for j in range(0,n+1):
U=Ulow+(j*Ucount);
muValues[i,j,0]=-1
muValues[i,j,1]=-1
muValues[i,j,2]=-1
muValues[i,j,3]=100000000
#Go through all species, commpare all pairs
for k in range(0, combo_num):
p=int(combos[k,0,0]);
m=int(combos[k,0,1]);
s=int(combos[k,0,2]);
f1=combos[k,8,0];
f2=combos[k,8,1];
f3=combos[k,8,2];
#The first species's contribution to the mu
current_eng=species[p,0]
current_ele=F*U*(species[p,1])
current_H=R*T*np.log(10.0)*pH*(species[p,2])
current_H2O=dGf_H2O*(species[p,6])
current_aquI=R*T*np.log(nI)*(species[p,7])
current_NumEle=1
for t in range(3,6):
if(species[p,t]>1):
current_NumEle=current_NumEle*species[p,t];
current_mu=f1*((current_eng+current_aquI-current_ele-current_H-current_H2O)/current_NumEle);
#The second species' contribution to the mu
current_eng=species[m,0];
current_ele=F*U*(species[m,1])
current_H=R*T*np.log(10.0)*pH*(species[m,2])
current_H2O=dGf_H2O*(species[m,6])
current_aquI=R*T*np.log(nI)*(species[m,7])
current_NumEle=1
for t in range(3,6):
if(species[m,t]>1):
current_NumEle=current_NumEle*species[m,t];
current_mu=current_mu+f2*((current_eng+current_aquI-current_ele-current_H-current_H2O)/current_NumEle);
#The second species' contribution to the mu
current_eng=species[s,0];
current_ele=F*U*(species[s,1])
current_H=R*T*np.log(10.0)*pH*(species[s,2])
current_H2O=dGf_H2O*(species[s,6])
current_aquI=R*T*np.log(nI)*(species[s,7])
current_NumEle=1
for t in range(3,6):
if(species[s,t]>1):
current_NumEle=current_NumEle*species[s,t];
current_mu=current_mu+f3*((current_eng+current_aquI-current_ele-current_H-current_H2O)/current_NumEle);
if(current_mu<muValues[i,j,3]):
sort[0,0]=p
sort[1,0]=m
sort[2,0]=s
a=np.sort(sort[:,0])
muValues[i,j,0]=a[0]
muValues[i,j,1]=a[1]
muValues[i,j,2]=a[2]
muValues[i,j,3]=current_mu
###############################################################################
###############################################################################
###############################################################################
###############################################################################
################### Plot Pourbaix Diagram ###################################
###############################################################################
flag = np.zeros((50,6)) # The first 4 indexes are the materials stored, the next three are the colors
index=0;
fig =plt.figure()
ax=plt.subplot(111)
ax = plt.gca()
ax.set_xlim([lowpH,highpH])
ax.set_ylim([Ulow,Uhigh])
l=0;
index=0;
for i in range(0, n+1):
pH=lowpH+i*pHcount;
for j in range(0,n+1):
U=Ulow+(Ucount*j);
l=0
for k in range(0, len(flag)):
if(flag[k,0]==muValues[i,j,0] and flag[k,1]==muValues[i,j,1] and flag[k,2]==muValues[i,j,2]):
ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4)
#break loop, the color is found
k=len(flag)+1
l=1
elif(flag[k,0]==muValues[i,j,0] and flag[k,1]==muValues[i,j,2]and flag[k,2]==muValues[i,j,1]):
ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4)
#break loop, the color is found
k=len(flag)+1
l=1
elif(flag[k,0]==muValues[i,j,1] and flag[k,1]==muValues[i,j,2]and flag[k,2]==muValues[i,j,0]):
ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4)
#break loop, the color is found
k=len(flag)+1
l=1
elif(flag[k,0]==muValues[i,j,1] and flag[k,1]==muValues[i,j,0]and flag[k,2]==muValues[i,j,2]):
ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4)
#break loop, the color is found
k=len(flag)+1
l=1
elif(flag[k,0]==muValues[i,j,2] and flag[k,1]==muValues[i,j,0]and flag[k,2]==muValues[i,j,1]):
ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4)
#break loop, the color is found
k=len(flag)+1
l=1
elif(flag[k,0]==muValues[i,j,2] and flag[k,1]==muValues[i,j,1]and flag[k,2]==muValues[i,j,0]):
ax.plot(pH,U,'.', color = [flag[k,3],flag[k,4],flag[k,5]],markersize=4)
#break loop, the color is found
k=len(flag)+1
l=1
if(l==0):
label='M1: '+str(muValues[i,j,0])+', M2: '+str(muValues[i,j,1])+' M3: '+str(muValues[i,j,2])
flag[index,0] = muValues[i,j,0]
flag[index,1] = muValues[i,j,1]
flag[index,2] = muValues[i,j,2]
flag[index,3] = random.random();
flag[index,4] = random.random();
flag[index,5] = random.random();
ax.plot(pH,U,'.', color = [flag[index,3],flag[index,4],flag[index,5]],markersize=4,label=label)
index=index+1;
#####Plot H2O and H2 lines##################################
muH=np.zeros((pHrange+1));
muH2O=np.zeros((pHrange+1));
pHArray=np.zeros((pHrange+1));
for i in range(0, pHrange):
pHArray[i] =lowpH+i;
muH[i]=-0.059*pHArray[i];
muH2O[i]=1.23-0.059*pHArray[i];
pHArray[pHrange] =lowpH+(pHrange);
muH[pHrange]=-0.059*pHArray[pHrange];
muH2O[pHrange]=1.23-0.059*pHArray[pHrange];
##############################################################
ax.plot(pHArray[:], muH[:],'c--',label='$H_2$',linewidth=1)
ax.plot(pHArray[:], muH2O[:],'b--',label='$H_2O$', linewidth=1)
ax.legend(loc='upper center', bbox_to_anchor=(1.3, 0.9), ncol=1)
plt.ylabel('Electric Potential, E(V)')
plt.xlabel('pH')
plt.title('Bi-Cu-S Pourbaix Diagram, $\eta_{Bi,Cu,S}=10^{-'+str(eta)+'}$, '+str(composition[0])+'Cu:' +str(composition[1])+'Bi:'+str(composition[2])+'S')
###############################################################################
############## Plot with Lines ############################################
###############################################################################
flag = np.zeros((50,6)) # The first 4 indexes are the materials stored, the next three are the colors
index=0;
fig =plt.figure()
ax=plt.subplot(111)
ax = plt.gca()
ax.set_xlim([lowpH,highpH])
ax.set_ylim([Ulow,Uhigh])
#If drawing lines for metastable phases
for i in range(1, n):
#calculate the energies for each species number
pH=lowpH+(i*pHcount);
for j in range(1,n):
U=Ulow+(j*Ucount);
#If drawing lines for metastable phases
if((muValues[i,j,0]!=muValues[i-1,j,0])):
ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2)
elif(muValues[i,j,1]!=muValues[i-1,j,1]):
ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2)
elif((muValues[i,j,0]!=muValues[i,j-1,0]) or (muValues[i,j,1]!=muValues[i,j-1,1])):
ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2)
elif((muValues[i,j,2]!=muValues[i,j-1,2]) or (muValues[i,j,2]!=muValues[i-1,j,2])):
ax.plot(pH,U,'.', color = [0.0,0.0,0.0],markersize=2)
ax.plot(pHArray[:], muH[:],'c--',label='$H_2$',linewidth=1)
ax.plot(pHArray[:], muH2O[:],'b--',label='$H_2O$', linewidth=1)
plt.ylabel('Electric Potential, E(V)')
plt.xlabel('pH')
plt.title('Bi-Cu-S Pourbaix Diagram, $\eta_{Bi,Cu,S}=10^{-'+str(eta)+'}$, '+str(composition[0])+'Cu:' +str(composition[1])+'Bi:'+str(composition[2])+'S')
chartBox=ax.get_position()
ax.set_position([chartBox.x0, chartBox.y0, chartBox.width*1.5, chartBox.height*1.5])
ax.legend(loc='upper center', bbox_to_anchor=(1.3, 0.9), ncol=1)
plt.show()
print('End of Script')
|
[
"matplotlib.pyplot.subplot",
"numpy.load",
"numpy.save",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.asarray",
"numpy.zeros",
"random.random",
"matplotlib.pyplot.figure",
"numpy.sort",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.linalg.solve"
] |
[((734, 753), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (742, 753), True, 'import numpy as np\n'), ((10420, 10437), 'numpy.zeros', 'np.zeros', (['(65, 8)'], {}), '((65, 8))\n', (10428, 10437), True, 'import numpy as np\n'), ((27784, 27811), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1, 4)'], {}), '((n + 1, n + 1, 4))\n', (27792, 27811), True, 'import numpy as np\n'), ((27908, 27924), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (27916, 27924), True, 'import numpy as np\n'), ((31114, 31131), 'numpy.zeros', 'np.zeros', (['(50, 6)'], {}), '((50, 6))\n', (31122, 31131), True, 'import numpy as np\n'), ((31223, 31235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (31233, 31235), True, 'import matplotlib.pyplot as plt\n'), ((31239, 31255), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (31250, 31255), True, 'import matplotlib.pyplot as plt\n'), ((31261, 31270), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31268, 31270), True, 'import matplotlib.pyplot as plt\n'), ((33846, 33867), 'numpy.zeros', 'np.zeros', (['(pHrange + 1)'], {}), '(pHrange + 1)\n', (33854, 33867), True, 'import numpy as np\n'), ((33875, 33896), 'numpy.zeros', 'np.zeros', (['(pHrange + 1)'], {}), '(pHrange + 1)\n', (33883, 33896), True, 'import numpy as np\n'), ((33906, 33927), 'numpy.zeros', 'np.zeros', (['(pHrange + 1)'], {}), '(pHrange + 1)\n', (33914, 33927), True, 'import numpy as np\n'), ((34443, 34481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Electric Potential, E(V)"""'], {}), "('Electric Potential, E(V)')\n", (34453, 34481), True, 'import matplotlib.pyplot as plt\n'), ((34482, 34498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pH"""'], {}), "('pH')\n", (34492, 34498), True, 'import matplotlib.pyplot as plt\n'), ((34902, 34919), 'numpy.zeros', 'np.zeros', (['(50, 6)'], {}), '((50, 6))\n', (34910, 34919), True, 'import numpy as np\n'), ((35011, 35023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35021, 35023), True, 'import matplotlib.pyplot as plt\n'), ((35027, 35043), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (35038, 35043), True, 'import matplotlib.pyplot as plt\n'), ((35049, 35058), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35056, 35058), True, 'import matplotlib.pyplot as plt\n'), ((36055, 36093), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Electric Potential, E(V)"""'], {}), "('Electric Potential, E(V)')\n", (36065, 36093), True, 'import matplotlib.pyplot as plt\n'), ((36094, 36110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pH"""'], {}), "('pH')\n", (36104, 36110), True, 'import matplotlib.pyplot as plt\n'), ((36442, 36452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36450, 36452), True, 'import matplotlib.pyplot as plt\n'), ((20454, 20485), 'numpy.load', 'load', (['"""BiCuOS-speciesCombo.npy"""'], {}), "('BiCuOS-speciesCombo.npy')\n", (20458, 20485), False, 'from numpy import load\n'), ((20494, 20526), 'numpy.load', 'load', (['"""BiCuOS-numberSpecies.npy"""'], {}), "('BiCuOS-numberSpecies.npy')\n", (20498, 20526), False, 'from numpy import load\n'), ((20893, 20904), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (20901, 20904), True, 'import numpy as np\n'), ((20918, 20941), 'numpy.zeros', 'np.zeros', (['(45000, 9, 3)'], {}), '((45000, 9, 3))\n', (20926, 20941), True, 'import numpy as np\n'), ((27010, 27049), 'numpy.save', 'save', (['"""BiCuOS-speciesCombo.npy"""', 'combos'], {}), "('BiCuOS-speciesCombo.npy', combos)\n", (27014, 27049), False, 'from numpy import save\n'), ((27087, 27109), 'numpy.asarray', 'asarray', (['[[combo_num]]'], {}), '([[combo_num]])\n', (27094, 27109), False, 'from numpy import asarray\n'), ((33537, 33552), 'random.random', 'random.random', ([], {}), '()\n', (33550, 33552), False, 'import random\n'), ((33582, 33597), 'random.random', 'random.random', ([], {}), '()\n', (33595, 33597), False, 'import random\n'), ((33627, 33642), 'random.random', 'random.random', ([], {}), '()\n', (33640, 33642), False, 'import random\n'), ((30404, 30423), 'numpy.sort', 'np.sort', (['sort[:, 0]'], {}), '(sort[:, 0])\n', (30411, 30423), True, 'import numpy as np\n'), ((28790, 28800), 'numpy.log', 'np.log', (['nI'], {}), '(nI)\n', (28796, 28800), True, 'import numpy as np\n'), ((29367, 29377), 'numpy.log', 'np.log', (['nI'], {}), '(nI)\n', (29373, 29377), True, 'import numpy as np\n'), ((29954, 29964), 'numpy.log', 'np.log', (['nI'], {}), '(nI)\n', (29960, 29964), True, 'import numpy as np\n'), ((21582, 21741), 'numpy.array', 'np.array', (['[[species[k, 3], species[m, 3], species[p, 3]], [species[k, 4], species[m, \n 4], species[p, 4]], [species[k, 5], species[m, 5], species[p, 5]]]'], {}), '([[species[k, 3], species[m, 3], species[p, 3]], [species[k, 4],\n species[m, 4], species[p, 4]], [species[k, 5], species[m, 5], species[p,\n 5]]])\n', (21590, 21741), True, 'import numpy as np\n'), ((28683, 28695), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (28689, 28695), True, 'import numpy as np\n'), ((29260, 29272), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (29266, 29272), True, 'import numpy as np\n'), ((29847, 29859), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (29853, 29859), True, 'import numpy as np\n'), ((22820, 22851), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'composition'], {}), '(a, composition)\n', (22835, 22851), True, 'import numpy as np\n')]
|
# test bin, analyze, and plot functions
# imports
import os
from os.path import join
from os import listdir
import matplotlib.pyplot as plt
# imports
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import filter
import analyze
from correction import correct
from utils import fit, functions, bin, io, plotting, modify, plot_collections
from utils.plotting import lighten_color
# A note on SciencePlots colors
"""
Blue: #0C5DA5
Green: #00B945
Red: #FF9500
Orange: #FF2C00
Other Colors:
Light Blue: #7BC8F6
Paler Blue: #0343DF
Azure: #069AF3
Dark Green: #054907
"""
sciblue = '#0C5DA5'
scigreen = '#00B945'
scired = '#FF9500'
sciorange = '#FF2C00'
plt.style.use(['science', 'ieee', 'std-colors'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
# ----------------------------------------------------------------------------------------------------------------------
# 1. SETUP - BASE DIRECTORY
base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/'
# ----------------------------------------------------------------------------------------------------------------------
# 2. SETUP - IDPT
path_idpt = join(base_dir, 'results-04.26.22_idpt')
path_test_coords = join(path_idpt, 'coords/test-coords')
path_calib_coords = join(path_idpt, 'coords/calib-coords')
path_similarity = join(path_idpt, 'similarity')
path_results = join(path_idpt, 'results')
path_figs = join(path_idpt, 'figs')
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# 3. ANALYSIS - READ FILES
method = 'idpt'
microns_per_pixel = 0.8
# ----- 4.1 CORRECT TEST COORDS
correct_test_coords = False
if correct_test_coords:
use_idpt_zf = False
use_spct_zf = False
# ------------------------------------------------------------------------------------------------------------------
if use_idpt_zf:
"""
NOTE: This correction scheme fits a 2D spline to the in-focus particle positions and uses this to set their
z_f = 0 position.
"""
param_zf = 'zf_from_peak_int'
plot_calib_plane = False
plot_calib_spline = False
kx, ky = 2, 2
# step 1. read calibration coords
dfc, dfcpid, dfcpop, dfcstats = io.read_calib_coords(path_calib_coords, method)
# step 2. remove outliers
# 2.1 get z_in-focus mean + standard deviation
zf_c_mean = dfcpid[param_zf].mean()
zf_c_std = dfcpid[param_zf].std()
# 2.2 filter calibration coords
dfcpid = dfcpid[(dfcpid[param_zf] > zf_c_mean - zf_c_std) & (dfcpid[param_zf] < zf_c_mean + zf_c_std)]
# step 3. fit plane
dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel)
popt_c = dictc_fit_plane['popt_pixels']
if plot_calib_plane:
fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane)
plt.savefig(path_figs + '/idpt-calib-coords_fit-plane_raw.png')
plt.close()
dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value'])
dfict_fit_plane.to_excel(path_figs + '/idpt-calib-coords_fit-plane_raw.xlsx')
# step 4. FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION)
bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x,
y=dfcpid.y,
z=dfcpid[param_zf],
kx=kx,
ky=ky)
if plot_calib_spline:
fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf],
bispl_c,
cmap='RdBu',
grid_resolution=30,
view='multi')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel(r'$z_{f} \: (\mu m)$')
plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3)))
plt.savefig(path_figs + '/idpt-calib-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky))
plt.close()
# step 5. read test_coords
dft = io.read_test_coords(path_test_coords)
# step 6. drop unnecessary columns in dft
dft = dft[['frame', 'id', 'z', 'z_true', 'x', 'y', 'cm', 'error']]
# step 7. create a z_corr column by using fitted spline to correct z
dft = correct.correct_z_by_spline(dft, bispl=bispl_c, param_z='z')
dft['z_true_corr'] = dft['z_true'] - dft['z_cal_surf']
# step 8. export corrected test_coords
dft.to_excel(path_results + '/test_coords_corrected_t-calib2_c-calib1.xlsx', index=False)
elif use_spct_zf:
"""
NOTE: No correction is currently performed. The z-coords are well aligned enough in both calibration image sets
to just ignore. This is not necessarily surprising because the calibration images were acquired with the intention
of making the z-coords identical for all calibration image sets (by using the same beginning and ending tick mark
on the fine adjustment knob during image acquisition).
"""
# --------------------------------------------------------------------------------------------------------------
# SETUP - SPCT CALIBRATION IN-FOCUS COORDS
# SPCT analysis of images used for IDPT calibration
path_spct_calib_coords = join(base_dir, 'results-04.26.22_spct_calib1_test-2-3/coords/calib-coords')
path_calib_pid_defocus = join(path_spct_calib_coords, 'calib_spct_pid_defocus_stats_c-calib1_t-calib2.xlsx')
path_calib_spct_stats = join(path_spct_calib_coords, 'calib_spct_stats_c-calib1_t-calib2.xlsx')
path_calib_spct_pop = join(path_spct_calib_coords, 'calib_spct_pop_defocus_stats_c-calib1_t-calib2.xlsx')
# SPCT analysis of images used for IDPT test
path_spct_test_coords = join(base_dir, 'results-04.28.22_spct-calib2_test3/coords/calib-coords')
path_test_pid_defocus = join(path_spct_test_coords, 'calib_spct_pid_defocus_stats_c-calib2_t-calib3.xlsx')
path_test_spct_stats = join(path_spct_test_coords, 'calib_spct_stats_c-calib2_t-calib3.xlsx')
path_test_spct_pop = join(path_spct_test_coords, 'calib_spct_pop_defocus_stats_c-calib2_t-calib3.xlsx')
# --------------------------------------------------------------------------------------------------------------
# --- PART A. READ COORDS USED FOR IDPT CALIBRATION (i.e. 'calib1')
merge_spct_stats = True
param_zf = 'zf_from_peak_int'
plot_calib_plane = True
plot_test_plane = True
kx, ky = 2, 2
# step 1. merge [['x', 'y']] into spct pid defocus stats.
if merge_spct_stats:
# read SPCT calibration coords and merge ['x', 'y'] into pid_defocus_stats
dfcpid = pd.read_excel(path_calib_pid_defocus)
dfcstats = pd.read_excel(path_calib_spct_stats)
dfcpid = modify.merge_calib_pid_defocus_and_correction_coords(path_calib_coords, method, dfs=[dfcstats,
dfcpid])
else:
# read SPCT pid defocus stats that have already been merged
path_calib_pid_defocus = join(path_calib_coords, 'calib_spct_pid_defocus_stats_calib1_xy.xlsx')
dfcpid = pd.read_excel(path_calib_pid_defocus)
# step 2. remove outliers
# 2.1 get z_in-focus mean + standard deviation
zf_c_mean = dfcpid[param_zf].mean()
zf_c_std = dfcpid[param_zf].std()
# 2.2 filter calibration coords
dfcpid = dfcpid[(dfcpid[param_zf] > 34) & (dfcpid[param_zf] < zf_c_mean + zf_c_std / 2)]
dfcpid = dfcpid[dfcpid['x'] > 120]
# step 3. fit plane
dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel)
popt_c = dictc_fit_plane['popt_pixels']
if plot_calib_plane:
fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane)
plt.savefig(path_figs + '/calibration-coords_fit-plane_raw.png')
plt.close()
dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value'])
dfict_fit_plane.to_excel(path_figs + '/calibration-coords_fit-plane_raw.xlsx')
# FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION)
bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x,
y=dfcpid.y,
z=dfcpid[param_zf],
kx=kx,
ky=ky)
fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf],
bispl_c,
cmap='RdBu',
grid_resolution=30,
view='multi')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel(r'$z_{f} \: (\mu m)$')
plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3)))
plt.savefig(path_figs + '/calibration-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky))
plt.close()
# ---
# --- PART B. READ COORDS USED FOR IDPT TEST (i.e. 'calib2')
# step 1. merge [['x', 'y']] into spct pid defocus stats.
if merge_spct_stats:
# read SPCT calibration coords and merge ['x', 'y'] into pid_defocus_stats
dfcpid = pd.read_excel(path_test_pid_defocus)
dfcstats = pd.read_excel(path_test_spct_stats)
dfcpid = modify.merge_calib_pid_defocus_and_correction_coords(path_calib_coords, method, dfs=[dfcstats,
dfcpid])
else:
# read SPCT pid defocus stats that have already been merged
path_calib_pid_defocus = join(path_calib_coords, 'calib_spct_pid_defocus_stats_calib2_xy.xlsx')
dfcpid = pd.read_excel(path_calib_pid_defocus)
# step 2. remove outliers
# 2.1 get z_in-focus mean + standard deviation
zf_c_mean = dfcpid[param_zf].mean()
zf_c_std = dfcpid[param_zf].std()
# 2.2 filter calibration coords
dfcpid = dfcpid[(dfcpid[param_zf] > zf_c_mean - zf_c_std / 2) & (dfcpid[param_zf] < zf_c_mean + zf_c_std / 2)]
# step 3. fit plane
dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel)
popt_c = dictc_fit_plane['popt_pixels']
if plot_test_plane:
fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane)
plt.savefig(path_figs + '/test-coords_fit-plane_raw.png')
plt.close()
dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value'])
dfict_fit_plane.to_excel(path_figs + '/test-coords_fit-plane_raw.xlsx')
# FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION)
bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x,
y=dfcpid.y,
z=dfcpid[param_zf],
kx=kx,
ky=ky)
fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf],
bispl_c,
cmap='RdBu',
grid_resolution=30,
view='multi')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel(r'$z_{f} \: (\mu m)$')
plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3)))
plt.savefig(path_figs + '/test-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky))
plt.close()
# ----------------------------------------------------------------------------------------------------------------------
# 4. PLOT TEST COORDS RMSE-Z
analyze_test_coords = False
save_plots = False
show_plots = False
if analyze_test_coords:
# read test coords
dft = io.read_test_coords(path_test_coords)
# test coords stats
mag_eff = 20.0
area_pixels = 512 ** 2
area_microns = (512 * microns_per_pixel) ** 2
i_num_rows = len(dft)
i_num_pids = len(dft.id.unique())
# ---
# --- STEP 0. drop and rename columns for simplicity
dft = dft.drop(columns=['z', 'z_true'])
dft = dft.rename(columns={'z_corr': 'z', 'z_true_corr': 'z_true'})
# ---
rmse_all_particles = False
rmse_on_off_bpe = False
rmse_compare = False
# format plots
xylim = 37.25
xyticks = [-30, -15, 0, 15, 30]
lbls = ['On', 'Border', 'Off']
markers = ['s', 'd', 'o']
if rmse_all_particles:
# --- STEP 1. CALCULATE RMSE-Z FOR ALL PARTICLES
column_to_bin = 'z_true'
bins_z = 20
round_z_to_decimal = 3
min_cm = 0.5
# 1.1 mean rmse-z
dfrmse_mean = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=1,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse_mean.to_excel(path_results + '/mean-rmse-z_bin=1_no-filters.xlsx')
# 1.2 binned rmse-z
dfrmse = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=bins_z,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse.to_excel(path_results + '/binned-rmse-z_bins={}_no-filters.xlsx'.format(bins_z))
# 1.3 groupby 'bin' rmse-z mean + std
dfrmsem, dfrmsestd = bin.bin_generic(dft,
column_to_bin='bin',
column_to_count='id',
bins=bins_z,
round_to_decimal=round_z_to_decimal,
return_groupby=True)
# 1.3 plot binned rmse-z
if save_plots or show_plots:
# close all figs
plt.close('all')
# ----------------------- BASIC RMSE-Z PLOTS
# rmse-z: microns
fig, ax = plt.subplots()
ax.plot(dfrmse.index, dfrmse.rmse_z, '-o')
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/rmse-z_microns.png')
if show_plots:
plt.show()
plt.close()
# ----------------------- Z-MEAN +/- Z-STD PLOTS
# fit line
popt, pcov = curve_fit(functions.line, dfrmse.z_true, dfrmse.z)
z_fit = np.linspace(dfrmse.z_true.min(), dfrmse.z_true.max())
rmse_fit_line = np.sqrt(np.sum((functions.line(dfrmse.z_true, *popt) - dfrmse.z)**2) / len(dfrmse.z))
print(rmse_fit_line)
# binned calibration curve with std-z errorbars (microns) + fit line
fig, ax = plt.subplots()
ax.errorbar(dfrmsem.z_true, dfrmsem.z, yerr=dfrmsestd.z, fmt='o', ms=3, elinewidth=0.5, capsize=1, color=sciblue,
label=r'$\overline{z} \pm \sigma$') #
ax.plot(z_fit, functions.line(z_fit, *popt), linestyle='--', linewidth=1.5, color='black', alpha=0.25,
label=r'$dz/dz_{true} = $' + ' {}'.format(np.round(popt[0], 3)))
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$z \: (\mu m)$')
ax.set_ylim([-xylim, xylim])
ax.set_yticks(ticks=xyticks, labels=xyticks)
ax.legend(loc='lower right', handletextpad=0.25, borderaxespad=0.3)
plt.tight_layout()
if save_plots:
plt.savefig(path_figs +
'/calibration_curve_z+std-errobars_fit_line_a{}_b{}_slope-label-blk.png'.format(
np.round(popt[0],
3),
np.round(popt[1],
3))
)
if show_plots:
plt.show()
plt.close()
if rmse_on_off_bpe:
# --- STEP 0. SPLIT DATAFRAME INTO (1) OFF BPE and (2) OFF BPE.
column_to_bin = 'x'
bins_x = [145, 175, 205]
round_x_to_decimal = 0
dfbx = bin.bin_by_list(dft,
column_to_bin=column_to_bin,
bins=bins_x,
round_to_decimal=round_x_to_decimal,
)
df_on = dfbx[dfbx['bin'] == bins_x[0]]
df_edge = dfbx[dfbx['bin'] == bins_x[1]]
df_off = dfbx[dfbx['bin'] == bins_x[2]]
# --- plotting
# --- STEP 1. PLOT CALIBRATION CURVE (Z VS. Z_TRUE) FOR EACH DATAFRAME (ON, EDGE, OFF)
ss = 1
fig, ax = plt.subplots()
ax.scatter(df_off.z_true, df_off.z, s=ss, marker=markers[2], color=sciblue, label=lbls[2])
ax.scatter(df_on.z_true, df_on.z, s=ss, marker=markers[0], color=sciorange, label=lbls[0])
ax.scatter(df_edge.z_true, df_edge.z, s=ss, marker=markers[1], color=scired, label=lbls[1])
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$z \: (\mu m)$')
ax.set_ylim([-xylim, xylim])
ax.set_yticks(ticks=xyticks, labels=xyticks)
ax.legend(loc='lower right', markerscale=2.5)
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/on-edge-off-bpe_calibration_curve.png')
if show_plots:
plt.show()
plt.close()
# --- STEP 2. FOR EACH DATAFRAME (ON, EDGE, OFF), COMPUTE RMSE-Z AND PLOT
for lbl, dft in zip(lbls, [df_on, df_edge, df_off]):
# --- STEP 1. CALCULATE RMSE-Z FOR ALL PARTICLES
column_to_bin = 'z_true'
bins_z = 20
round_z_to_decimal = 3
min_cm = 0.5
# 1.1 mean rmse-z
dfrmse_mean = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=1,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse_mean.to_excel(path_results + '/{}_mean-rmse-z_bin=1_no-filters.xlsx'.format(lbl))
# 1.2 binned rmse-z
dfrmse = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=bins_z,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse.to_excel(path_results + '/{}_binned-rmse-z_bins={}_no-filters.xlsx'.format(lbl, bins_z))
# 1.3 groupby 'bin' rmse-z mean + std
dfrmsem, dfrmsestd = bin.bin_generic(dft,
column_to_bin='bin',
column_to_count='id',
bins=bins_z,
round_to_decimal=round_z_to_decimal,
return_groupby=True)
# 1.3 plot binned rmse-z
if save_plots or show_plots:
# close all figs
plt.close('all')
# ----------------------- BASIC RMSE-Z PLOTS
# rmse-z: microns
fig, ax = plt.subplots()
ax.plot(dfrmse.index, dfrmse.rmse_z, '-o')
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/{}_rmse-z_microns.png'.format(lbl))
if show_plots:
plt.show()
plt.close()
# ----------------------- Z-MEAN +/- Z-STD PLOTS
# fit line
popt, pcov = curve_fit(functions.line, dfrmse.z_true, dfrmse.z)
z_fit = np.linspace(dfrmse.z_true.min(), dfrmse.z_true.max())
rmse_fit_line = np.sqrt(np.sum((functions.line(dfrmse.z_true, *popt) - dfrmse.z) ** 2) / len(dfrmse.z))
print(rmse_fit_line)
# binned calibration curve with std-z errorbars (microns) + fit line
fig, ax = plt.subplots()
ax.errorbar(dfrmsem.z_true, dfrmsem.z, yerr=dfrmsestd.z, fmt='o', ms=3, elinewidth=0.5, capsize=1,
color=sciblue,
label=r'$\overline{z} \pm \sigma$') #
ax.plot(z_fit, functions.line(z_fit, *popt), linestyle='--', linewidth=1.5, color='black', alpha=0.25,
label=r'$dz/dz_{true} = $' + ' {}'.format(np.round(popt[0], 3)))
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$z \: (\mu m)$')
ax.set_ylim([-xylim, xylim])
ax.set_yticks(ticks=xyticks, labels=xyticks)
ax.legend(loc='lower right', handletextpad=0.25, borderaxespad=0.3)
plt.tight_layout()
if save_plots:
plt.savefig(path_figs +
'/{}_calibration_curve_z+std-errobars_fit_line_a{}_b{}_slope-label-blk.png'.format(
lbl,
np.round(popt[0],
3),
np.round(popt[1],
3))
)
if show_plots:
plt.show()
plt.close()
if rmse_compare:
# 1. read binned rmse-z dataframes from Excel
path_rmse_compare = join(path_results, 'on-edge-off-bpe')
df1 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[0])))
df2 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[1])))
df3 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[2])))
# 1.3 plot binned rmse-z
if save_plots or show_plots:
ms = 4
# ----------------------- BASIC RMSE-Z PLOTS
# rmse-z: microns
fig, ax = plt.subplots()
ax.plot(df3.bin, df3.rmse_z, '-o', ms=ms, label=lbls[2], color=sciblue)
ax.plot(df2.bin, df2.rmse_z, '-o', ms=ms, label=lbls[1], color=scired)
ax.plot(df1.bin, df1.rmse_z, '-o', ms=ms, label=lbls[0], color=sciorange)
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
ax.legend()
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns.png')
if show_plots:
plt.show()
plt.close()
# rmse-z (microns) + c_m
darken_clr = 1.0
alpha_clr = 1.0
fig, [axr, ax] = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]})
axr.plot(df3.bin, df3.cm, '-', ms=ms-2, marker=markers[2], color=sciblue)
axr.plot(df2.bin, df2.cm, '-', ms=ms-2, marker=markers[1], color=scired)
axr.plot(df1.bin, df1.cm, '-', ms=ms-2, marker=markers[0], color=sciorange)
axr.set_ylabel(r'$c_{m}$')
ax.plot(df3.bin, df3.rmse_z, '-', ms=ms-0.75, marker=markers[2], color=sciblue, label=lbls[2])
ax.plot(df2.bin, df2.rmse_z, '-', ms=ms-0.75, marker=markers[1], color=scired, label=lbls[1])
ax.plot(df1.bin, df1.rmse_z, '-', ms=ms-0.75, marker=markers[0], color=sciorange, label=lbls[0])
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
ax.legend()
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns_cm.png')
if show_plots:
plt.show()
plt.close()
# ----------------------------------------------------------------------------------------------------------------------
# 5. IDPT VS. SPCT - COMPARE NUMBER OF PARTICLES PER Z
compare_idpt_spct = False
save_plots = False
show_plots = False
if compare_idpt_spct:
# --- 1. IDPT
# read IDPT test coords
dft = io.read_test_coords(path_test_coords)
# test coords stats
mag_eff = 20.0
area_pixels = 512 ** 2
area_microns = (512 * microns_per_pixel) ** 2
i_num_rows = len(dft)
i_num_pids = len(dft.id.unique())
dft = dft.drop(columns=['z', 'z_true'])
dft = dft.rename(columns={'z_corr': 'z', 'z_true_corr': 'z_true'})
# --- 2. SPCT
# 2.1 read SPCT off-bpe test coords
dfs_off = pd.read_excel('/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_calib1_test-2-3/coords/test-coords/test_coords_t-calib2_c-calib1.xlsx')
dfs_on = pd.read_excel('/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_stack-id-on-bpe/testcalib2_calcalib1/test_coords_t_20X_ccalib1_tcalib2_c_20X_tcalib2_ccalib1_2022-04-26 20:45:34.334931.xlsx')
# 2.2 correct z by mean z_f from peak_intensity
z_f_mean = 35.1
dfs_off['z'] = dfs_off['z'] - z_f_mean
dfs_off['z_true'] = dfs_off['z_true'] - z_f_mean
dfs_on['z'] = dfs_on['z'] - z_f_mean
dfs_on['z_true'] = dfs_on['z_true'] - z_f_mean
# --- 3. GROUPBY Z_TRUE
dftg = dft.copy()
dftg = dftg.round({'z_true': 0})
dftc = dftg.groupby('z_true').count().reset_index()
dfs_offc = dfs_off.groupby('z_true').count().reset_index()
dfs_onc = dfs_on.groupby('z_true').count().reset_index()
# filter z_true for pretty plotting
zlim = 35
dftc = dftc[(dftc['z_true'] > -zlim) & (dftc['z_true'] < zlim)]
dfs_offc = dfs_offc[(dfs_offc['z_true'] > -zlim) & (dfs_offc['z_true'] < zlim)]
dfs_onc = dfs_onc[(dfs_onc['z_true'] > -zlim) & (dfs_onc['z_true'] < zlim)]
# ---
# --- plotting
# format plots
xylim = 37.25
xyticks = [-30, -15, 0, 15, 30]
ms = 3
# FIGURE 1. PLOT NUMBER OF PARTICLES PER Z_TRUE
fig, ax = plt.subplots()
ax.plot(dftc.z_true, dftc.z, '-o', ms=ms, color=sciblue, label=r'$IDPT$')
ax.plot(dfs_offc.z_true, dfs_offc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.0), label=r'$SPCT_{Low}$')
ax.plot(dfs_onc.z_true, dfs_onc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.2), label=r'$SPCT_{High}$')
ax.set_xlabel(r'$z \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(xyticks)
ax.set_ylabel(r'$N_{p} \: (\#)$')
ax.set_ylim([0, 200])
ax.legend()
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/compare-idpt-spct_num-particles.png')
if show_plots:
plt.show()
plt.close()
# ---
# FIGURE 2. PLOT NUMBER OF PARTICLES PER Z_TRUE AND CM
dftm = dftg.groupby('z_true').mean().reset_index()
dfs_offm = dfs_off.groupby('z_true').mean().reset_index()
dfs_onm = dfs_on.groupby('z_true').mean().reset_index()
# filter z_true for pretty plotting
dftm = dftm[(dftm['z_true'] > -zlim) & (dftm['z_true'] < zlim)]
dfs_offm = dfs_offm[(dfs_offm['z_true'] > -zlim) & (dfs_offm['z_true'] < zlim)]
dfs_onm = dfs_onm[(dfs_onm['z_true'] > -zlim) & (dfs_onm['z_true'] < zlim)]
# plot
fig, [axr, ax] = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]})
axr.plot(dftm.z_true, dftm.cm, '-o', ms=ms - 1, color=sciblue)
axr.plot(dfs_offm.z_true, dfs_offm.cm, '-o', ms=ms - 1, color=lighten_color(scigreen, 1.0))
axr.plot(dfs_onm.z_true, dfs_onm.cm, '-o', ms=ms - 1, color=lighten_color(scigreen, 1.2))
axr.set_ylabel(r'$c_{m}$')
axr.set_ylim([0.790, 1.01])
axr.set_yticks([0.8, 0.9, 1.0])
ax.plot(dftc.z_true, dftc.z, '-o', ms=ms, color=sciblue, label=r'$IDPT$')
ax.plot(dfs_offc.z_true, dfs_offc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.0), label=r'$SPCT_{Low}$')
ax.plot(dfs_onc.z_true, dfs_onc.z, '-o', ms=ms, color=lighten_color(scigreen, 1.2), label=r'$SPCT_{High}$')
ax.set_xlabel(r'$z \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(xyticks)
ax.set_ylabel(r'$N_{p} \: (\#)$')
ax.set_ylim([0, 185])
ax.set_yticks([0, 50, 100, 150])
ax.legend()
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/compare-idpt-spct_num-particles_and_cm.png')
if show_plots:
plt.show()
plt.close()
# ----------------------------------------------------------------------------------------------------------------------
# 6. AVERAGE PARTICLE-TO-PARTICLE SIMILARITY PER-FRAME
plot_average_particle_similarity = False
if plot_average_particle_similarity:
# setup
save_plots = True
xylim = 37.25
xyticks = [-30, -15, 0, 15, 30]
ms = 3
# read dataframe
fp = join(base_dir, 'average-particle-similarity/'
'average_similarity_SPCT_11.02.21-BPE_Pressure_Deflection_20X_c-calib1_t-calib2.xlsx')
dfsim = pd.read_excel(fp)
# plot
fig, ax = plt.subplots()
ax.plot(dfsim.z_corr, dfsim.sim, '-o', ms=ms)
ax.set_xlabel(r'$z \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(xyticks)
ax.set_ylabel(r'$S (p_{i}, p_{N})$')
ax.set_ylim([0.49, 1.01])
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/average-particle-to-particle-similarity.png')
plt.show()
plt.close()
j = 1
print("Analysis completed without errors.")
|
[
"utils.plotting.lighten_color",
"utils.functions.line",
"utils.io.read_calib_coords",
"utils.plotting.scatter_3d_and_spline",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.round",
"utils.bin.bin_by_list",
"matplotlib.pyplot.close",
"utils.plotting.plot_fitted_plane_and_points",
"correction.correct.fit_in_focus_plane",
"matplotlib.pyplot.subplots",
"utils.fit.fit_3d_spline",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"scipy.optimize.curve_fit",
"pandas.read_excel",
"correction.correct.correct_z_by_spline",
"utils.bin.bin_local_rmse_z",
"utils.bin.bin_generic",
"utils.modify.merge_calib_pid_defocus_and_correction_coords",
"utils.io.read_test_coords",
"matplotlib.pyplot.savefig"
] |
[((681, 729), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['science', 'ieee', 'std-colors']"], {}), "(['science', 'ieee', 'std-colors'])\n", (694, 729), True, 'import matplotlib.pyplot as plt\n'), ((740, 754), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (752, 754), True, 'import matplotlib.pyplot as plt\n'), ((808, 822), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (817, 822), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1286), 'os.path.join', 'join', (['base_dir', '"""results-04.26.22_idpt"""'], {}), "(base_dir, 'results-04.26.22_idpt')\n", (1251, 1286), False, 'from os.path import join\n'), ((1306, 1343), 'os.path.join', 'join', (['path_idpt', '"""coords/test-coords"""'], {}), "(path_idpt, 'coords/test-coords')\n", (1310, 1343), False, 'from os.path import join\n'), ((1364, 1402), 'os.path.join', 'join', (['path_idpt', '"""coords/calib-coords"""'], {}), "(path_idpt, 'coords/calib-coords')\n", (1368, 1402), False, 'from os.path import join\n'), ((1421, 1450), 'os.path.join', 'join', (['path_idpt', '"""similarity"""'], {}), "(path_idpt, 'similarity')\n", (1425, 1450), False, 'from os.path import join\n'), ((1466, 1492), 'os.path.join', 'join', (['path_idpt', '"""results"""'], {}), "(path_idpt, 'results')\n", (1470, 1492), False, 'from os.path import join\n'), ((1505, 1528), 'os.path.join', 'join', (['path_idpt', '"""figs"""'], {}), "(path_idpt, 'figs')\n", (1509, 1528), False, 'from os.path import join\n'), ((13148, 13185), 'utils.io.read_test_coords', 'io.read_test_coords', (['path_test_coords'], {}), '(path_test_coords)\n', (13167, 13185), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((27923, 27960), 'utils.io.read_test_coords', 'io.read_test_coords', (['path_test_coords'], {}), '(path_test_coords)\n', (27942, 27960), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((28335, 28559), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_calib1_test-2-3/coords/test-coords/test_coords_t-calib2_c-calib1.xlsx"""'], {}), "(\n '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_calib1_test-2-3/coords/test-coords/test_coords_t-calib2_c-calib1.xlsx'\n )\n", (28348, 28559), True, 'import pandas as pd\n'), ((28563, 28842), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_stack-id-on-bpe/testcalib2_calcalib1/test_coords_t_20X_ccalib1_tcalib2_c_20X_tcalib2_ccalib1_2022-04-26 20:45:34.334931.xlsx"""'], {}), "(\n '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_stack-id-on-bpe/testcalib2_calcalib1/test_coords_t_20X_ccalib1_tcalib2_c_20X_tcalib2_ccalib1_2022-04-26 20:45:34.334931.xlsx'\n )\n", (28576, 28842), True, 'import pandas as pd\n'), ((29834, 29848), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (29846, 29848), True, 'import matplotlib.pyplot as plt\n'), ((30336, 30354), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30352, 30354), True, 'import matplotlib.pyplot as plt\n'), ((30488, 30499), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30497, 30499), True, 'import matplotlib.pyplot as plt\n'), ((31056, 31129), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'sharex': '(True)', 'gridspec_kw': "{'height_ratios': [1, 2]}"}), "(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]})\n", (31068, 31129), True, 'import matplotlib.pyplot as plt\n'), ((32011, 32029), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (32027, 32029), True, 'import matplotlib.pyplot as plt\n'), ((32170, 32181), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (32179, 32181), True, 'import matplotlib.pyplot as plt\n'), ((32571, 32709), 'os.path.join', 'join', (['base_dir', '"""average-particle-similarity/average_similarity_SPCT_11.02.21-BPE_Pressure_Deflection_20X_c-calib1_t-calib2.xlsx"""'], {}), "(base_dir,\n 'average-particle-similarity/average_similarity_SPCT_11.02.21-BPE_Pressure_Deflection_20X_c-calib1_t-calib2.xlsx'\n )\n", (32575, 32709), False, 'from os.path import join\n'), ((32740, 32757), 'pandas.read_excel', 'pd.read_excel', (['fp'], {}), '(fp)\n', (32753, 32757), True, 'import pandas as pd\n'), ((32784, 32798), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (32796, 32798), True, 'import matplotlib.pyplot as plt\n'), ((33024, 33042), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (33040, 33042), True, 'import matplotlib.pyplot as plt\n'), ((33146, 33156), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33154, 33156), True, 'import matplotlib.pyplot as plt\n'), ((33161, 33172), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (33170, 33172), True, 'import matplotlib.pyplot as plt\n'), ((2505, 2552), 'utils.io.read_calib_coords', 'io.read_calib_coords', (['path_calib_coords', 'method'], {}), '(path_calib_coords, method)\n', (2525, 2552), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((2937, 3035), 'correction.correct.fit_in_focus_plane', 'correct.fit_in_focus_plane', ([], {'df': 'dfcpid', 'param_zf': 'param_zf', 'microns_per_pixel': 'microns_per_pixel'}), '(df=dfcpid, param_zf=param_zf, microns_per_pixel=\n microns_per_pixel)\n', (2963, 3035), False, 'from correction import correct\n'), ((3623, 3698), 'utils.fit.fit_3d_spline', 'fit.fit_3d_spline', ([], {'x': 'dfcpid.x', 'y': 'dfcpid.y', 'z': 'dfcpid[param_zf]', 'kx': 'kx', 'ky': 'ky'}), '(x=dfcpid.x, y=dfcpid.y, z=dfcpid[param_zf], kx=kx, ky=ky)\n', (3640, 3698), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((4636, 4673), 'utils.io.read_test_coords', 'io.read_test_coords', (['path_test_coords'], {}), '(path_test_coords)\n', (4655, 4673), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((4892, 4952), 'correction.correct.correct_z_by_spline', 'correct.correct_z_by_spline', (['dft'], {'bispl': 'bispl_c', 'param_z': '"""z"""'}), "(dft, bispl=bispl_c, param_z='z')\n", (4919, 4952), False, 'from correction import correct\n'), ((14030, 14224), 'utils.bin.bin_local_rmse_z', 'bin.bin_local_rmse_z', (['dft'], {'column_to_bin': 'column_to_bin', 'bins': '(1)', 'min_cm': 'min_cm', 'z_range': 'None', 'round_to_decimal': 'round_z_to_decimal', 'df_ground_truth': 'None', 'dropna': '(True)', 'error_column': '"""error"""'}), "(dft, column_to_bin=column_to_bin, bins=1, min_cm=\n min_cm, z_range=None, round_to_decimal=round_z_to_decimal,\n df_ground_truth=None, dropna=True, error_column='error')\n", (14050, 14224), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((14733, 14932), 'utils.bin.bin_local_rmse_z', 'bin.bin_local_rmse_z', (['dft'], {'column_to_bin': 'column_to_bin', 'bins': 'bins_z', 'min_cm': 'min_cm', 'z_range': 'None', 'round_to_decimal': 'round_z_to_decimal', 'df_ground_truth': 'None', 'dropna': '(True)', 'error_column': '"""error"""'}), "(dft, column_to_bin=column_to_bin, bins=bins_z, min_cm=\n min_cm, z_range=None, round_to_decimal=round_z_to_decimal,\n df_ground_truth=None, dropna=True, error_column='error')\n", (14753, 14932), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((15441, 15579), 'utils.bin.bin_generic', 'bin.bin_generic', (['dft'], {'column_to_bin': '"""bin"""', 'column_to_count': '"""id"""', 'bins': 'bins_z', 'round_to_decimal': 'round_z_to_decimal', 'return_groupby': '(True)'}), "(dft, column_to_bin='bin', column_to_count='id', bins=bins_z,\n round_to_decimal=round_z_to_decimal, return_groupby=True)\n", (15456, 15579), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((18493, 18596), 'utils.bin.bin_by_list', 'bin.bin_by_list', (['dft'], {'column_to_bin': 'column_to_bin', 'bins': 'bins_x', 'round_to_decimal': 'round_x_to_decimal'}), '(dft, column_to_bin=column_to_bin, bins=bins_x,\n round_to_decimal=round_x_to_decimal)\n', (18508, 18596), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((19018, 19032), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19030, 19032), True, 'import matplotlib.pyplot as plt\n'), ((19664, 19682), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19680, 19682), True, 'import matplotlib.pyplot as plt\n'), ((19838, 19849), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19847, 19849), True, 'import matplotlib.pyplot as plt\n'), ((25021, 25058), 'os.path.join', 'join', (['path_results', '"""on-edge-off-bpe"""'], {}), "(path_results, 'on-edge-off-bpe')\n", (25025, 25058), False, 'from os.path import join\n'), ((30382, 30445), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/compare-idpt-spct_num-particles.png')"], {}), "(path_figs + '/compare-idpt-spct_num-particles.png')\n", (30393, 30445), True, 'import matplotlib.pyplot as plt\n'), ((30473, 30483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30481, 30483), True, 'import matplotlib.pyplot as plt\n'), ((32057, 32127), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/compare-idpt-spct_num-particles_and_cm.png')"], {}), "(path_figs + '/compare-idpt-spct_num-particles_and_cm.png')\n", (32068, 32127), True, 'import matplotlib.pyplot as plt\n'), ((32155, 32165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32163, 32165), True, 'import matplotlib.pyplot as plt\n'), ((33070, 33141), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/average-particle-to-particle-similarity.png')"], {}), "(path_figs + '/average-particle-to-particle-similarity.png')\n", (33081, 33141), True, 'import matplotlib.pyplot as plt\n'), ((3127, 3212), 'utils.plotting.plot_fitted_plane_and_points', 'plotting.plot_fitted_plane_and_points', ([], {'df': 'dfcpid', 'dict_fit_plane': 'dictc_fit_plane'}), '(df=dfcpid, dict_fit_plane=dictc_fit_plane\n )\n', (3164, 3212), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((3220, 3283), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/idpt-calib-coords_fit-plane_raw.png')"], {}), "(path_figs + '/idpt-calib-coords_fit-plane_raw.png')\n", (3231, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3296, 3307), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3305, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3413), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dictc_fit_plane'], {'orient': '"""index"""', 'columns': "['value']"}), "(dictc_fit_plane, orient='index', columns=['value'])\n", (3361, 3413), True, 'import pandas as pd\n'), ((3928, 4056), 'utils.plotting.scatter_3d_and_spline', 'plotting.scatter_3d_and_spline', (['dfcpid.x', 'dfcpid.y', 'dfcpid[param_zf]', 'bispl_c'], {'cmap': '"""RdBu"""', 'grid_resolution': '(30)', 'view': '"""multi"""'}), "(dfcpid.x, dfcpid.y, dfcpid[param_zf],\n bispl_c, cmap='RdBu', grid_resolution=30, view='multi')\n", (3958, 4056), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((4574, 4585), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4583, 4585), True, 'import matplotlib.pyplot as plt\n'), ((5904, 5979), 'os.path.join', 'join', (['base_dir', '"""results-04.26.22_spct_calib1_test-2-3/coords/calib-coords"""'], {}), "(base_dir, 'results-04.26.22_spct_calib1_test-2-3/coords/calib-coords')\n", (5908, 5979), False, 'from os.path import join\n'), ((6013, 6100), 'os.path.join', 'join', (['path_spct_calib_coords', '"""calib_spct_pid_defocus_stats_c-calib1_t-calib2.xlsx"""'], {}), "(path_spct_calib_coords,\n 'calib_spct_pid_defocus_stats_c-calib1_t-calib2.xlsx')\n", (6017, 6100), False, 'from os.path import join\n'), ((6129, 6200), 'os.path.join', 'join', (['path_spct_calib_coords', '"""calib_spct_stats_c-calib1_t-calib2.xlsx"""'], {}), "(path_spct_calib_coords, 'calib_spct_stats_c-calib1_t-calib2.xlsx')\n", (6133, 6200), False, 'from os.path import join\n'), ((6231, 6318), 'os.path.join', 'join', (['path_spct_calib_coords', '"""calib_spct_pop_defocus_stats_c-calib1_t-calib2.xlsx"""'], {}), "(path_spct_calib_coords,\n 'calib_spct_pop_defocus_stats_c-calib1_t-calib2.xlsx')\n", (6235, 6318), False, 'from os.path import join\n'), ((6401, 6473), 'os.path.join', 'join', (['base_dir', '"""results-04.28.22_spct-calib2_test3/coords/calib-coords"""'], {}), "(base_dir, 'results-04.28.22_spct-calib2_test3/coords/calib-coords')\n", (6405, 6473), False, 'from os.path import join\n'), ((6506, 6592), 'os.path.join', 'join', (['path_spct_test_coords', '"""calib_spct_pid_defocus_stats_c-calib2_t-calib3.xlsx"""'], {}), "(path_spct_test_coords,\n 'calib_spct_pid_defocus_stats_c-calib2_t-calib3.xlsx')\n", (6510, 6592), False, 'from os.path import join\n'), ((6620, 6690), 'os.path.join', 'join', (['path_spct_test_coords', '"""calib_spct_stats_c-calib2_t-calib3.xlsx"""'], {}), "(path_spct_test_coords, 'calib_spct_stats_c-calib2_t-calib3.xlsx')\n", (6624, 6690), False, 'from os.path import join\n'), ((6720, 6806), 'os.path.join', 'join', (['path_spct_test_coords', '"""calib_spct_pop_defocus_stats_c-calib2_t-calib3.xlsx"""'], {}), "(path_spct_test_coords,\n 'calib_spct_pop_defocus_stats_c-calib2_t-calib3.xlsx')\n", (6724, 6806), False, 'from os.path import join\n'), ((8357, 8455), 'correction.correct.fit_in_focus_plane', 'correct.fit_in_focus_plane', ([], {'df': 'dfcpid', 'param_zf': 'param_zf', 'microns_per_pixel': 'microns_per_pixel'}), '(df=dfcpid, param_zf=param_zf, microns_per_pixel=\n microns_per_pixel)\n', (8383, 8455), False, 'from correction import correct\n'), ((11256, 11354), 'correction.correct.fit_in_focus_plane', 'correct.fit_in_focus_plane', ([], {'df': 'dfcpid', 'param_zf': 'param_zf', 'microns_per_pixel': 'microns_per_pixel'}), '(df=dfcpid, param_zf=param_zf, microns_per_pixel=\n microns_per_pixel)\n', (11282, 11354), False, 'from correction import correct\n'), ((15914, 15930), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15923, 15930), True, 'import matplotlib.pyplot as plt\n'), ((16042, 16056), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16054, 16056), True, 'import matplotlib.pyplot as plt\n'), ((16328, 16346), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16344, 16346), True, 'import matplotlib.pyplot as plt\n'), ((16503, 16514), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16512, 16514), True, 'import matplotlib.pyplot as plt\n'), ((16626, 16676), 'scipy.optimize.curve_fit', 'curve_fit', (['functions.line', 'dfrmse.z_true', 'dfrmse.z'], {}), '(functions.line, dfrmse.z_true, dfrmse.z)\n', (16635, 16676), False, 'from scipy.optimize import curve_fit\n'), ((17003, 17017), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (17015, 17017), True, 'import matplotlib.pyplot as plt\n'), ((17794, 17812), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17810, 17812), True, 'import matplotlib.pyplot as plt\n'), ((18275, 18286), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18284, 18286), True, 'import matplotlib.pyplot as plt\n'), ((19718, 19783), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/on-edge-off-bpe_calibration_curve.png')"], {}), "(path_figs + '/on-edge-off-bpe_calibration_curve.png')\n", (19729, 19783), True, 'import matplotlib.pyplot as plt\n'), ((19819, 19829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19827, 19829), True, 'import matplotlib.pyplot as plt\n'), ((20235, 20429), 'utils.bin.bin_local_rmse_z', 'bin.bin_local_rmse_z', (['dft'], {'column_to_bin': 'column_to_bin', 'bins': '(1)', 'min_cm': 'min_cm', 'z_range': 'None', 'round_to_decimal': 'round_z_to_decimal', 'df_ground_truth': 'None', 'dropna': '(True)', 'error_column': '"""error"""'}), "(dft, column_to_bin=column_to_bin, bins=1, min_cm=\n min_cm, z_range=None, round_to_decimal=round_z_to_decimal,\n df_ground_truth=None, dropna=True, error_column='error')\n", (20255, 20429), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((21001, 21200), 'utils.bin.bin_local_rmse_z', 'bin.bin_local_rmse_z', (['dft'], {'column_to_bin': 'column_to_bin', 'bins': 'bins_z', 'min_cm': 'min_cm', 'z_range': 'None', 'round_to_decimal': 'round_z_to_decimal', 'df_ground_truth': 'None', 'dropna': '(True)', 'error_column': '"""error"""'}), "(dft, column_to_bin=column_to_bin, bins=bins_z, min_cm=\n min_cm, z_range=None, round_to_decimal=round_z_to_decimal,\n df_ground_truth=None, dropna=True, error_column='error')\n", (21021, 21200), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((21765, 21903), 'utils.bin.bin_generic', 'bin.bin_generic', (['dft'], {'column_to_bin': '"""bin"""', 'column_to_count': '"""id"""', 'bins': 'bins_z', 'round_to_decimal': 'round_z_to_decimal', 'return_groupby': '(True)'}), "(dft, column_to_bin='bin', column_to_count='id', bins=bins_z,\n round_to_decimal=round_z_to_decimal, return_groupby=True)\n", (21780, 21903), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((25601, 25615), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (25613, 25615), True, 'import matplotlib.pyplot as plt\n'), ((26112, 26130), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26128, 26130), True, 'import matplotlib.pyplot as plt\n'), ((26311, 26322), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26320, 26322), True, 'import matplotlib.pyplot as plt\n'), ((26448, 26521), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'sharex': '(True)', 'gridspec_kw': "{'height_ratios': [1, 2]}"}), "(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]})\n", (26460, 26521), True, 'import matplotlib.pyplot as plt\n'), ((27385, 27403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27401, 27403), True, 'import matplotlib.pyplot as plt\n'), ((27587, 27598), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27596, 27598), True, 'import matplotlib.pyplot as plt\n'), ((29988, 30016), 'utils.plotting.lighten_color', 'lighten_color', (['scigreen', '(1.0)'], {}), '(scigreen, 1.0)\n', (30001, 30016), False, 'from utils.plotting import lighten_color\n'), ((30099, 30127), 'utils.plotting.lighten_color', 'lighten_color', (['scigreen', '(1.2)'], {}), '(scigreen, 1.2)\n', (30112, 30127), False, 'from utils.plotting import lighten_color\n'), ((31264, 31292), 'utils.plotting.lighten_color', 'lighten_color', (['scigreen', '(1.0)'], {}), '(scigreen, 1.0)\n', (31277, 31292), False, 'from utils.plotting import lighten_color\n'), ((31358, 31386), 'utils.plotting.lighten_color', 'lighten_color', (['scigreen', '(1.2)'], {}), '(scigreen, 1.2)\n', (31371, 31386), False, 'from utils.plotting import lighten_color\n'), ((31626, 31654), 'utils.plotting.lighten_color', 'lighten_color', (['scigreen', '(1.0)'], {}), '(scigreen, 1.0)\n', (31639, 31654), False, 'from utils.plotting import lighten_color\n'), ((31737, 31765), 'utils.plotting.lighten_color', 'lighten_color', (['scigreen', '(1.2)'], {}), '(scigreen, 1.2)\n', (31750, 31765), False, 'from utils.plotting import lighten_color\n'), ((7362, 7399), 'pandas.read_excel', 'pd.read_excel', (['path_calib_pid_defocus'], {}), '(path_calib_pid_defocus)\n', (7375, 7399), True, 'import pandas as pd\n'), ((7423, 7459), 'pandas.read_excel', 'pd.read_excel', (['path_calib_spct_stats'], {}), '(path_calib_spct_stats)\n', (7436, 7459), True, 'import pandas as pd\n'), ((7481, 7588), 'utils.modify.merge_calib_pid_defocus_and_correction_coords', 'modify.merge_calib_pid_defocus_and_correction_coords', (['path_calib_coords', 'method'], {'dfs': '[dfcstats, dfcpid]'}), '(path_calib_coords,\n method, dfs=[dfcstats, dfcpid])\n', (7533, 7588), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((7814, 7884), 'os.path.join', 'join', (['path_calib_coords', '"""calib_spct_pid_defocus_stats_calib1_xy.xlsx"""'], {}), "(path_calib_coords, 'calib_spct_pid_defocus_stats_calib1_xy.xlsx')\n", (7818, 7884), False, 'from os.path import join\n'), ((7906, 7943), 'pandas.read_excel', 'pd.read_excel', (['path_calib_pid_defocus'], {}), '(path_calib_pid_defocus)\n', (7919, 7943), True, 'import pandas as pd\n'), ((8547, 8632), 'utils.plotting.plot_fitted_plane_and_points', 'plotting.plot_fitted_plane_and_points', ([], {'df': 'dfcpid', 'dict_fit_plane': 'dictc_fit_plane'}), '(df=dfcpid, dict_fit_plane=dictc_fit_plane\n )\n', (8584, 8632), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((8640, 8704), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/calibration-coords_fit-plane_raw.png')"], {}), "(path_figs + '/calibration-coords_fit-plane_raw.png')\n", (8651, 8704), True, 'import matplotlib.pyplot as plt\n'), ((8717, 8728), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8726, 8728), True, 'import matplotlib.pyplot as plt\n'), ((8760, 8834), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dictc_fit_plane'], {'orient': '"""index"""', 'columns': "['value']"}), "(dictc_fit_plane, orient='index', columns=['value'])\n", (8782, 8834), True, 'import pandas as pd\n'), ((9045, 9120), 'utils.fit.fit_3d_spline', 'fit.fit_3d_spline', ([], {'x': 'dfcpid.x', 'y': 'dfcpid.y', 'z': 'dfcpid[param_zf]', 'kx': 'kx', 'ky': 'ky'}), '(x=dfcpid.x, y=dfcpid.y, z=dfcpid[param_zf], kx=kx, ky=ky)\n', (9062, 9120), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((9336, 9464), 'utils.plotting.scatter_3d_and_spline', 'plotting.scatter_3d_and_spline', (['dfcpid.x', 'dfcpid.y', 'dfcpid[param_zf]', 'bispl_c'], {'cmap': '"""RdBu"""', 'grid_resolution': '(30)', 'view': '"""multi"""'}), "(dfcpid.x, dfcpid.y, dfcpid[param_zf],\n bispl_c, cmap='RdBu', grid_resolution=30, view='multi')\n", (9366, 9464), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((9983, 9994), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9992, 9994), True, 'import matplotlib.pyplot as plt\n'), ((10284, 10320), 'pandas.read_excel', 'pd.read_excel', (['path_test_pid_defocus'], {}), '(path_test_pid_defocus)\n', (10297, 10320), True, 'import pandas as pd\n'), ((10344, 10379), 'pandas.read_excel', 'pd.read_excel', (['path_test_spct_stats'], {}), '(path_test_spct_stats)\n', (10357, 10379), True, 'import pandas as pd\n'), ((10401, 10508), 'utils.modify.merge_calib_pid_defocus_and_correction_coords', 'modify.merge_calib_pid_defocus_and_correction_coords', (['path_calib_coords', 'method'], {'dfs': '[dfcstats, dfcpid]'}), '(path_calib_coords,\n method, dfs=[dfcstats, dfcpid])\n', (10453, 10508), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((10734, 10804), 'os.path.join', 'join', (['path_calib_coords', '"""calib_spct_pid_defocus_stats_calib2_xy.xlsx"""'], {}), "(path_calib_coords, 'calib_spct_pid_defocus_stats_calib2_xy.xlsx')\n", (10738, 10804), False, 'from os.path import join\n'), ((10826, 10863), 'pandas.read_excel', 'pd.read_excel', (['path_calib_pid_defocus'], {}), '(path_calib_pid_defocus)\n', (10839, 10863), True, 'import pandas as pd\n'), ((11445, 11530), 'utils.plotting.plot_fitted_plane_and_points', 'plotting.plot_fitted_plane_and_points', ([], {'df': 'dfcpid', 'dict_fit_plane': 'dictc_fit_plane'}), '(df=dfcpid, dict_fit_plane=dictc_fit_plane\n )\n', (11482, 11530), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((11538, 11595), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/test-coords_fit-plane_raw.png')"], {}), "(path_figs + '/test-coords_fit-plane_raw.png')\n", (11549, 11595), True, 'import matplotlib.pyplot as plt\n'), ((11608, 11619), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11617, 11619), True, 'import matplotlib.pyplot as plt\n'), ((11651, 11725), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dictc_fit_plane'], {'orient': '"""index"""', 'columns': "['value']"}), "(dictc_fit_plane, orient='index', columns=['value'])\n", (11673, 11725), True, 'import pandas as pd\n'), ((11929, 12004), 'utils.fit.fit_3d_spline', 'fit.fit_3d_spline', ([], {'x': 'dfcpid.x', 'y': 'dfcpid.y', 'z': 'dfcpid[param_zf]', 'kx': 'kx', 'ky': 'ky'}), '(x=dfcpid.x, y=dfcpid.y, z=dfcpid[param_zf], kx=kx, ky=ky)\n', (11946, 12004), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((12220, 12348), 'utils.plotting.scatter_3d_and_spline', 'plotting.scatter_3d_and_spline', (['dfcpid.x', 'dfcpid.y', 'dfcpid[param_zf]', 'bispl_c'], {'cmap': '"""RdBu"""', 'grid_resolution': '(30)', 'view': '"""multi"""'}), "(dfcpid.x, dfcpid.y, dfcpid[param_zf],\n bispl_c, cmap='RdBu', grid_resolution=30, view='multi')\n", (12250, 12348), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((12860, 12871), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12869, 12871), True, 'import matplotlib.pyplot as plt\n'), ((16390, 16436), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/rmse-z_microns.png')"], {}), "(path_figs + '/rmse-z_microns.png')\n", (16401, 16436), True, 'import matplotlib.pyplot as plt\n'), ((16480, 16490), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16488, 16490), True, 'import matplotlib.pyplot as plt\n'), ((17235, 17263), 'utils.functions.line', 'functions.line', (['z_fit', '*popt'], {}), '(z_fit, *popt)\n', (17249, 17263), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((18252, 18262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18260, 18262), True, 'import matplotlib.pyplot as plt\n'), ((22274, 22290), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22283, 22290), True, 'import matplotlib.pyplot as plt\n'), ((22414, 22428), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22426, 22428), True, 'import matplotlib.pyplot as plt\n'), ((22724, 22742), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22740, 22742), True, 'import matplotlib.pyplot as plt\n'), ((22934, 22945), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22943, 22945), True, 'import matplotlib.pyplot as plt\n'), ((23069, 23119), 'scipy.optimize.curve_fit', 'curve_fit', (['functions.line', 'dfrmse.z_true', 'dfrmse.z'], {}), '(functions.line, dfrmse.z_true, dfrmse.z)\n', (23078, 23119), False, 'from scipy.optimize import curve_fit\n'), ((23468, 23482), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23480, 23482), True, 'import matplotlib.pyplot as plt\n'), ((24335, 24353), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24351, 24353), True, 'import matplotlib.pyplot as plt\n'), ((24904, 24915), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24913, 24915), True, 'import matplotlib.pyplot as plt\n'), ((26174, 26244), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns.png')"], {}), "(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns.png')\n", (26185, 26244), True, 'import matplotlib.pyplot as plt\n'), ((26288, 26298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26296, 26298), True, 'import matplotlib.pyplot as plt\n'), ((27447, 27520), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns_cm.png')"], {}), "(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns_cm.png')\n", (27458, 27520), True, 'import matplotlib.pyplot as plt\n'), ((27564, 27574), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27572, 27574), True, 'import matplotlib.pyplot as plt\n'), ((4442, 4461), 'numpy.round', 'np.round', (['rmse_c', '(3)'], {}), '(rmse_c, 3)\n', (4450, 4461), True, 'import numpy as np\n'), ((22907, 22917), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22915, 22917), True, 'import matplotlib.pyplot as plt\n'), ((23740, 23768), 'utils.functions.line', 'functions.line', (['z_fit', '*popt'], {}), '(z_fit, *popt)\n', (23754, 23768), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((24877, 24887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24885, 24887), True, 'import matplotlib.pyplot as plt\n'), ((9850, 9869), 'numpy.round', 'np.round', (['rmse_c', '(3)'], {}), '(rmse_c, 3)\n', (9858, 9869), True, 'import numpy as np\n'), ((12734, 12753), 'numpy.round', 'np.round', (['rmse_c', '(3)'], {}), '(rmse_c, 3)\n', (12742, 12753), True, 'import numpy as np\n'), ((17385, 17405), 'numpy.round', 'np.round', (['popt[0]', '(3)'], {}), '(popt[0], 3)\n', (17393, 17405), True, 'import numpy as np\n'), ((18021, 18041), 'numpy.round', 'np.round', (['popt[0]', '(3)'], {}), '(popt[0], 3)\n', (18029, 18041), True, 'import numpy as np\n'), ((18116, 18136), 'numpy.round', 'np.round', (['popt[1]', '(3)'], {}), '(popt[1], 3)\n', (18124, 18136), True, 'import numpy as np\n'), ((16796, 16832), 'utils.functions.line', 'functions.line', (['dfrmse.z_true', '*popt'], {}), '(dfrmse.z_true, *popt)\n', (16810, 16832), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n'), ((23894, 23914), 'numpy.round', 'np.round', (['popt[0]', '(3)'], {}), '(popt[0], 3)\n', (23902, 23914), True, 'import numpy as np\n'), ((24622, 24642), 'numpy.round', 'np.round', (['popt[0]', '(3)'], {}), '(popt[0], 3)\n', (24630, 24642), True, 'import numpy as np\n'), ((24725, 24745), 'numpy.round', 'np.round', (['popt[1]', '(3)'], {}), '(popt[1], 3)\n', (24733, 24745), True, 'import numpy as np\n'), ((23247, 23283), 'utils.functions.line', 'functions.line', (['dfrmse.z_true', '*popt'], {}), '(dfrmse.z_true, *popt)\n', (23261, 23283), False, 'from utils import fit, functions, bin, io, plotting, modify, plot_collections\n')]
|
import pyart
import pydda
from matplotlib import pyplot as plt
import numpy as np
berr_grid = pyart.io.read_grid("berr_Darwin_hires.nc")
cpol_grid = pyart.io.read_grid("cpol_Darwin_hires.nc")
sounding = pyart.io.read_arm_sonde(
"/home/rjackson/data/soundings/twpsondewnpnC3.b1.20060119.231600.custom.cdf")
print(berr_grid.projection)
print(cpol_grid.get_projparams())
u_back = sounding[1].u_wind
v_back = sounding[1].v_wind
z_back = sounding[1].height
#u_init, v_init, w_init = pydda.retrieval.make_constant_wind_field(cpol_grid, wind=(0.0,0.0,0.0), vel_field='VT')
u_init, v_init, w_init = pydda.retrieval.make_wind_field_from_profile(cpol_grid, sounding, vel_field='VT')
#u_init, v_init, w_init = pydda.retrieval.make_test_divergence_field(
# cpol_grid, 30, 9.0, 15e3, 20e3, 5, 0, -20e3, 0)
# Test mass continuity by putting convergence at surface and divergence aloft
berr_grid.fields['DT']['data'] = cpol_grid.fields['DT']['data']
# Step 1 - do iterations with just data
Grids = pydda.retrieval.get_dd_wind_field([berr_grid, cpol_grid], u_init,
v_init, w_init,u_back=u_back,
v_back=v_back, z_back=z_back,
Co=100.0, Cm=1500.0, vel_name='VT',
refl_field='DT', frz=5000.0,
filt_iterations=0,
mask_w_outside_opt=False)
plt.figure(figsize=(8,8))
pydda.vis.plot_horiz_xsection_barbs(Grids, 'DT', level=6,
vel_contours=[1, 4, 10])
plt.interactive(False)
cpol_z = cpol_grid.fields['DT']['data']
lat_level=45
plt.figure(figsize=(10,10))
plt.pcolormesh(cpol_x[::,lat_level,::], cpol_h[::,lat_level,::],
cpol_z[::,lat_level,::],
cmap=pyart.graph.cm_colorblind.HomeyerRainbow)
plt.colorbar(label='Z [dBZ]')
plt.barbs(cpol_x[::barb_density_vert,lat_level,::barb_density],
cpol_h[::barb_density_vert,lat_level,::barb_density],
u['data'][::barb_density_vert,lat_level,::barb_density],
w['data'][::barb_density_vert,lat_level,::barb_density])
cs = plt.contour(cpol_x[::,lat_level,::], cpol_h[::,lat_level,::],
w['data'][::,lat_level,::], levels=np.arange(1,20,2),
linewidth=16, alpha=0.5)
plt.clabel(cs)
plt.xlabel('X [km]', fontsize=20)
plt.ylabel('Z [m]', fontsize=20)
plt.show()
|
[
"pyart.io.read_arm_sonde",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.interactive",
"pyart.io.read_grid",
"matplotlib.pyplot.barbs",
"matplotlib.pyplot.colorbar",
"pydda.retrieval.get_dd_wind_field",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.pcolormesh",
"pydda.retrieval.make_wind_field_from_profile",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pydda.vis.plot_horiz_xsection_barbs"
] |
[((95, 137), 'pyart.io.read_grid', 'pyart.io.read_grid', (['"""berr_Darwin_hires.nc"""'], {}), "('berr_Darwin_hires.nc')\n", (113, 137), False, 'import pyart\n'), ((150, 192), 'pyart.io.read_grid', 'pyart.io.read_grid', (['"""cpol_Darwin_hires.nc"""'], {}), "('cpol_Darwin_hires.nc')\n", (168, 192), False, 'import pyart\n'), ((205, 316), 'pyart.io.read_arm_sonde', 'pyart.io.read_arm_sonde', (['"""/home/rjackson/data/soundings/twpsondewnpnC3.b1.20060119.231600.custom.cdf"""'], {}), "(\n '/home/rjackson/data/soundings/twpsondewnpnC3.b1.20060119.231600.custom.cdf'\n )\n", (228, 316), False, 'import pyart\n'), ((597, 683), 'pydda.retrieval.make_wind_field_from_profile', 'pydda.retrieval.make_wind_field_from_profile', (['cpol_grid', 'sounding'], {'vel_field': '"""VT"""'}), "(cpol_grid, sounding, vel_field\n ='VT')\n", (641, 683), False, 'import pydda\n'), ((997, 1246), 'pydda.retrieval.get_dd_wind_field', 'pydda.retrieval.get_dd_wind_field', (['[berr_grid, cpol_grid]', 'u_init', 'v_init', 'w_init'], {'u_back': 'u_back', 'v_back': 'v_back', 'z_back': 'z_back', 'Co': '(100.0)', 'Cm': '(1500.0)', 'vel_name': '"""VT"""', 'refl_field': '"""DT"""', 'frz': '(5000.0)', 'filt_iterations': '(0)', 'mask_w_outside_opt': '(False)'}), "([berr_grid, cpol_grid], u_init, v_init,\n w_init, u_back=u_back, v_back=v_back, z_back=z_back, Co=100.0, Cm=\n 1500.0, vel_name='VT', refl_field='DT', frz=5000.0, filt_iterations=0,\n mask_w_outside_opt=False)\n", (1030, 1246), False, 'import pydda\n'), ((1500, 1526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1510, 1526), True, 'from matplotlib import pyplot as plt\n'), ((1526, 1613), 'pydda.vis.plot_horiz_xsection_barbs', 'pydda.vis.plot_horiz_xsection_barbs', (['Grids', '"""DT"""'], {'level': '(6)', 'vel_contours': '[1, 4, 10]'}), "(Grids, 'DT', level=6, vel_contours=[1, \n 4, 10])\n", (1561, 1613), False, 'import pydda\n'), ((1646, 1668), 'matplotlib.pyplot.interactive', 'plt.interactive', (['(False)'], {}), '(False)\n', (1661, 1668), True, 'from matplotlib import pyplot as plt\n'), ((1723, 1751), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1733, 1751), True, 'from matplotlib import pyplot as plt\n'), ((1751, 1891), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['cpol_x[:, lat_level, :]', 'cpol_h[:, lat_level, :]', 'cpol_z[:, lat_level, :]'], {'cmap': 'pyart.graph.cm_colorblind.HomeyerRainbow'}), '(cpol_x[:, lat_level, :], cpol_h[:, lat_level, :], cpol_z[:,\n lat_level, :], cmap=pyart.graph.cm_colorblind.HomeyerRainbow)\n', (1765, 1891), True, 'from matplotlib import pyplot as plt\n'), ((1920, 1949), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Z [dBZ]"""'}), "(label='Z [dBZ]')\n", (1932, 1949), True, 'from matplotlib import pyplot as plt\n'), ((1950, 2204), 'matplotlib.pyplot.barbs', 'plt.barbs', (['cpol_x[::barb_density_vert, lat_level, ::barb_density]', 'cpol_h[::barb_density_vert, lat_level, ::barb_density]', "u['data'][::barb_density_vert, lat_level, ::barb_density]", "w['data'][::barb_density_vert, lat_level, ::barb_density]"], {}), "(cpol_x[::barb_density_vert, lat_level, ::barb_density], cpol_h[::\n barb_density_vert, lat_level, ::barb_density], u['data'][::\n barb_density_vert, lat_level, ::barb_density], w['data'][::\n barb_density_vert, lat_level, ::barb_density])\n", (1959, 2204), True, 'from matplotlib import pyplot as plt\n'), ((2396, 2410), 'matplotlib.pyplot.clabel', 'plt.clabel', (['cs'], {}), '(cs)\n', (2406, 2410), True, 'from matplotlib import pyplot as plt\n'), ((2411, 2444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X [km]"""'], {'fontsize': '(20)'}), "('X [km]', fontsize=20)\n", (2421, 2444), True, 'from matplotlib import pyplot as plt\n'), ((2445, 2477), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z [m]"""'], {'fontsize': '(20)'}), "('Z [m]', fontsize=20)\n", (2455, 2477), True, 'from matplotlib import pyplot as plt\n'), ((2478, 2488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2486, 2488), True, 'from matplotlib import pyplot as plt\n'), ((2334, 2353), 'numpy.arange', 'np.arange', (['(1)', '(20)', '(2)'], {}), '(1, 20, 2)\n', (2343, 2353), True, 'import numpy as np\n')]
|
import numpy as np
# use nanmean from bottleneck if it's installed, otherwise use the numpy one
# bottleneck nanmean is ~2.5x faster
try:
import bottleneck as bn
nanmean = bn.nanmean
except ImportError:
nanmean = np.nanmean
from pytplot import get_data, store_data, options
from ...utilities.tnames import tnames
def mms_eis_spec_combine_sc(
species='proton', data_units='flux', datatype='extof', data_rate='srvy',
level='l2', suffix='',
):
'''
Combines omni-directional energy spectrogram variable from EIS on multiple
MMS spacecraft.
Parameters
----------
datatype: str
'extof', 'electroenergy', or 'phxtof' (default: 'extof')
data_rate: str
instrument data rate, e.g., 'srvy' or 'brst' (default: 'srvy')
level: str
data level ['l1a','l1b','l2pre','l2' (default)]
data_units: str
desired units for data, e.g., 'flux' or 'cps' (default: 'flux')
suffix: str
suffix of the loaded data; useful for preserving original tplot var
species: str
species for calculation, e.g., proton, oxygen, alpha or electron
(default: 'proton')
Returns:
Name of tplot variables created.
'''
## Thoughts for extensions:
## - Ensure arguments passed to modules are of lowecase
if data_units == 'flux':
units_label = 'Intensity\n[1/cm^2-sr-s-keV]'
elif data_units == 'cps':
units_label = 'CountRate\n[counts/s]'
elif data_units == 'counts':
units_label = 'Counts\n[counts]'
#assert type(datatype) is str
if not isinstance(species, list): species = [species]
if not isinstance(datatype, list): datatype = [datatype]
out_vars = []
for species_id in species:
for dtype in datatype:
# retrieve: omni variables of species to determine # of probes
_species = species_id
if dtype == 'electronenergy':
_species = 'electron'
eis_sc_check = tnames('mms*eis*' + data_rate + '*' + dtype+'*' + _species + '*' + data_units + '*omni'+ suffix)
# process multiple probes
probes = []
for name in eis_sc_check:
probes.append(name[3:4])
if len(probes) > 4:
probes = probes[:-2]
if len(probes) > 1:
probe_string = probes[0] + '-' + probes[-1]
else:
if probes:
probe_string = probes[0]
else:
print('No probes found from eis_sc_check tnames.')
return
allmms_prefix = 'mmsx_epd_eis_' + data_rate + '_' + level + '_' + dtype + '_'
# DETERMINE SPACECRAFT WITH SMALLEST NUMBER OF TIME STEPS TO USE
# AS A REFERENCE SPACECRAFT
omni_vars = tnames('mms?_epd_eis_'+data_rate+'_'+level+'_'+dtype+'_'+_species+'_'+data_units+'_omni'+suffix)
if not omni_vars:
print('No EIS '+dtype+'data loaded!')
return
time_size = np.zeros(len(probes))
energy_size = np.zeros(len(probes))
# Retrieve probe's pitch angle dist for all 6 (omni) telescopes
for p, probe in enumerate(probes):
# note: return from get_data here is (times, data, v)
# according to https://github.com/MAVENSDC/PyTplot/blob/ec87591521e84bae8d81caccaf64fc2a5785186f/pytplot/get_data.py#L66
# note: there are also available 'spec_bins' values
#print(pytplot.data_quants[omni_vars[p]].coords)
#t, data, v = get_data(omni_vars[p])
omni_times, omni_data, omni_energies = get_data(omni_vars[p])
time_size[p] = len(omni_times)
energy_size[p] = len(omni_energies)
reftime_sc_loc = np.argmin(time_size)
ref_sc_time_size = int(min(time_size))
refenergy_sc_loc = np.argmin(energy_size)
ref_sc_energy_size = int(min(energy_size))
prefix = 'mms'+probes[reftime_sc_loc]+'_epd_eis_'+data_rate+'_'+level+'_'+dtype+'_'
# Retrieve specific probe's data based on minimum time/energy
# Note: I did not split these tuples as the namespace is reused, i.e., "_refprobe"
time_refprobe = get_data(omni_vars[reftime_sc_loc])
energy_refprobe = get_data(omni_vars[refenergy_sc_loc])
# time x energy x spacecraft
omni_spec_data = np.empty([len(time_refprobe[0]), len(energy_refprobe[2]), len(probes)])
omni_spec_data[:] = np.nan
# time x energy
omni_spec = np.empty([len(time_refprobe[0]), len(energy_refprobe[2])])
omni_spec[:] = np.nan
energy_data = np.zeros([len(energy_refprobe[2]), len(probes)])
common_energy = np.zeros(len(energy_refprobe[2]))
# Average omni flux over all spacecraft and define common energy grid
for pp in range(len(omni_vars)):
temp_data = get_data(omni_vars[pp])
energy_data[:,pp] = temp_data[2][0:len(common_energy)]
omni_spec_data[0:ref_sc_time_size,:,pp] = temp_data[1][0:ref_sc_time_size,0:len(common_energy)]
for ee in range(len(common_energy)):
common_energy[ee] = nanmean(energy_data[ee,:], axis=0)
# Average omni flux over all spacecraft
for tt in range(len(time_refprobe[0])):
for ee in range(len(energy_refprobe[2])):
omni_spec[tt,ee] = nanmean(omni_spec_data[tt,ee,:], axis=0)
# store new tplot variable
omni_spec[np.isnan(omni_spec)] = 0.
new_name = allmms_prefix+_species+'_'+data_units+'_omni'
store_data(new_name, data={'x':time_refprobe[0], 'y':omni_spec, 'v':energy_refprobe[2]})
options(new_name, 'ylog', True)
options(new_name, 'zlog', True)
options(new_name, 'spec', True)
options(new_name, 'Colormap', 'jet')
options(new_name, 'ztitle', units_label)
options(new_name, 'ytitle', ' \\ '.join(['mms'+probe_string, _species.upper(), 'Energy [keV]']))
out_vars.append(new_name)
# Spin-average the data
spin_nums = get_data(prefix+'spin'+suffix)
if spin_nums is None:
print('Error: Could not find EIS spin variable -- now ending procedure.')
return
# find where the spin starts
_, spin_starts = np.unique(spin_nums[1], return_index=True)
spin_sum_flux = np.zeros([len(spin_starts), len(omni_spec[0,:])])
current_start = 0
for spin_idx in range(len(spin_starts)):
spin_sum_flux[spin_idx,:] = nanmean(omni_spec[current_start:spin_starts[spin_idx],:], axis=0)
current_start = spin_starts[spin_idx] + 1
sp = '_spin'
new_name = allmms_prefix+_species+'_'+data_units+'_omni'+sp
store_data(new_name, data={'x':spin_nums[0][spin_starts], 'y':spin_sum_flux, 'v':energy_refprobe[2]})
options(new_name, 'spec', True)
options(new_name, 'zlog', True)
options(new_name, 'ylog', True)
options(new_name, 'spec', True)
out_vars.append(new_name)
return out_vars
|
[
"pytplot.store_data",
"numpy.argmin",
"pytplot.get_data",
"numpy.isnan",
"pytplot.options",
"numpy.unique"
] |
[((3932, 3952), 'numpy.argmin', 'np.argmin', (['time_size'], {}), '(time_size)\n', (3941, 3952), True, 'import numpy as np\n'), ((4035, 4057), 'numpy.argmin', 'np.argmin', (['energy_size'], {}), '(energy_size)\n', (4044, 4057), True, 'import numpy as np\n'), ((4408, 4443), 'pytplot.get_data', 'get_data', (['omni_vars[reftime_sc_loc]'], {}), '(omni_vars[reftime_sc_loc])\n', (4416, 4443), False, 'from pytplot import get_data, store_data, options\n'), ((4474, 4511), 'pytplot.get_data', 'get_data', (['omni_vars[refenergy_sc_loc]'], {}), '(omni_vars[refenergy_sc_loc])\n', (4482, 4511), False, 'from pytplot import get_data, store_data, options\n'), ((5873, 5968), 'pytplot.store_data', 'store_data', (['new_name'], {'data': "{'x': time_refprobe[0], 'y': omni_spec, 'v': energy_refprobe[2]}"}), "(new_name, data={'x': time_refprobe[0], 'y': omni_spec, 'v':\n energy_refprobe[2]})\n", (5883, 5968), False, 'from pytplot import get_data, store_data, options\n'), ((5974, 6005), 'pytplot.options', 'options', (['new_name', '"""ylog"""', '(True)'], {}), "(new_name, 'ylog', True)\n", (5981, 6005), False, 'from pytplot import get_data, store_data, options\n'), ((6018, 6049), 'pytplot.options', 'options', (['new_name', '"""zlog"""', '(True)'], {}), "(new_name, 'zlog', True)\n", (6025, 6049), False, 'from pytplot import get_data, store_data, options\n'), ((6062, 6093), 'pytplot.options', 'options', (['new_name', '"""spec"""', '(True)'], {}), "(new_name, 'spec', True)\n", (6069, 6093), False, 'from pytplot import get_data, store_data, options\n'), ((6106, 6142), 'pytplot.options', 'options', (['new_name', '"""Colormap"""', '"""jet"""'], {}), "(new_name, 'Colormap', 'jet')\n", (6113, 6142), False, 'from pytplot import get_data, store_data, options\n'), ((6155, 6195), 'pytplot.options', 'options', (['new_name', '"""ztitle"""', 'units_label'], {}), "(new_name, 'ztitle', units_label)\n", (6162, 6195), False, 'from pytplot import get_data, store_data, options\n'), ((6404, 6438), 'pytplot.get_data', 'get_data', (["(prefix + 'spin' + suffix)"], {}), "(prefix + 'spin' + suffix)\n", (6412, 6438), False, 'from pytplot import get_data, store_data, options\n'), ((6653, 6695), 'numpy.unique', 'np.unique', (['spin_nums[1]'], {'return_index': '(True)'}), '(spin_nums[1], return_index=True)\n', (6662, 6695), True, 'import numpy as np\n'), ((7136, 7244), 'pytplot.store_data', 'store_data', (['new_name'], {'data': "{'x': spin_nums[0][spin_starts], 'y': spin_sum_flux, 'v': energy_refprobe[2]}"}), "(new_name, data={'x': spin_nums[0][spin_starts], 'y':\n spin_sum_flux, 'v': energy_refprobe[2]})\n", (7146, 7244), False, 'from pytplot import get_data, store_data, options\n'), ((7250, 7281), 'pytplot.options', 'options', (['new_name', '"""spec"""', '(True)'], {}), "(new_name, 'spec', True)\n", (7257, 7281), False, 'from pytplot import get_data, store_data, options\n'), ((7294, 7325), 'pytplot.options', 'options', (['new_name', '"""zlog"""', '(True)'], {}), "(new_name, 'zlog', True)\n", (7301, 7325), False, 'from pytplot import get_data, store_data, options\n'), ((7338, 7369), 'pytplot.options', 'options', (['new_name', '"""ylog"""', '(True)'], {}), "(new_name, 'ylog', True)\n", (7345, 7369), False, 'from pytplot import get_data, store_data, options\n'), ((7382, 7413), 'pytplot.options', 'options', (['new_name', '"""spec"""', '(True)'], {}), "(new_name, 'spec', True)\n", (7389, 7413), False, 'from pytplot import get_data, store_data, options\n'), ((3780, 3802), 'pytplot.get_data', 'get_data', (['omni_vars[p]'], {}), '(omni_vars[p])\n', (3788, 3802), False, 'from pytplot import get_data, store_data, options\n'), ((5133, 5156), 'pytplot.get_data', 'get_data', (['omni_vars[pp]'], {}), '(omni_vars[pp])\n', (5141, 5156), False, 'from pytplot import get_data, store_data, options\n'), ((5766, 5785), 'numpy.isnan', 'np.isnan', (['omni_spec'], {}), '(omni_spec)\n', (5774, 5785), True, 'import numpy as np\n')]
|
'''
tournament to rank refiners + discriminators for simgan
'''
import numpy as np
import pandas as pd
import torch
def get_graph_ratings(refiners,
discriminators,
validation_data,
device,
starting_rating=1500,
starting_rd=350,
norm_val=173.7178,
n_rounds=3,
matches_per_pairing=5,
samples_per_match=10,
discriminator_win_thresh=0.6):
'''
TODO...can we get a Source?
https://arxiv.org/abs/1808.04888 ?????
Find the best refiner and discriminator from the list of refiners and discriminators using the Tournament Skill Rating Evaluation.
Parameters:
refiners (list(torch.nn)): list of refiners
discriminators (list(torch.nn)): list of discriminators
validation_data (simganData): SimGAN dataset
train_config (dict): dictionary holding information related to training
starting_rating (float): The rating that players were initialized to
starting_RD (float): The RD that players were initialized to
norm_val (float): The normalization value used to convert between phi and RD
n_rounds(int): Number of rounds for the tournament
matches_per_pairing(int): The number of matches per refiner/discriminator pairing to determine the overall winner
samples_per_match(int): The number of samples per match to determine the winner of the match
discriminator_win_thresh: The accuracy of the discriminator needed for the discriminator to be declared the winner
Returns:
A tuple a of Pandas DataFrames...
A Pandas DataFrame for metadata-ratings where 1 row is for 1 refiner (respectively for discriminator).
'''
n_refiners = len(refiners)
ids = np.arange(n_refiners + len(discriminators))
refiner_ids = ids[:n_refiners]
discriminator_ids = ids[n_refiners:]
ratings = {}
for id in ids:
ratings[id] = {'r': starting_rating, 'RD': starting_rd, 'mu': 0, 'phi': starting_rd/norm_val}
labels_real = torch.zeros(samples_per_match, dtype=torch.float, device=device)
labels_refined = torch.ones(samples_per_match, dtype=torch.float, device=device)
all_real = validation_data.real_raw
all_simulated = validation_data.simulated_raw
for rnd in range(n_rounds):
# instantiate match results
match_results = {}
for id in ids:
match_results[id] = {'opponent_mus': [], 'opponent_phis': [], 'scores': []}
# Perform matches between each pair (R,D)
for id_R, R in zip(refiner_ids, refiners):
for id_D, D in zip(discriminator_ids, discriminators):
# RODD - ?...why do we need multiple matches? why not just change samples to samples_per_match*matches_per_pairing
# ...like it's just running data through refiner and discrim. like why not just do that once but with more data?
for match in range(matches_per_pairing):
real_inds = np.random.choice(np.arange(len(all_real)), samples_per_match, replace=False)
real = torch.tensor(all_real[real_inds], dtype=torch.float, device=device)
sim_inds = np.random.choice(np.arange(len(all_simulated)), samples_per_match, replace=False)
simulated = torch.tensor(all_simulated[sim_inds], dtype=torch.float, device=device)
refined = R(simulated)
# Get discriminator accuracy on real and refined data
d_pred_real = D(real)
acc_real = calc_acc(d_pred_real, labels_real)
d_pred_refined = D(refined)
acc_refined = calc_acc(d_pred_refined, labels_refined)
# Find the average accuracy of the discriminator
avg_acc = (acc_real + acc_refined) / 2.0
# Add this match's results to match_results
match_results[id_D]['opponent_mus'].append(ratings[id_R]['mu'])
match_results[id_R]['opponent_mus'].append(ratings[id_D]['mu'])
match_results[id_D]['opponent_phis'].append(ratings[id_R]['phi'])
match_results[id_R]['opponent_phis'].append(ratings[id_D]['phi'])
if avg_acc >= discriminator_win_thresh: # An accuracy greater than or equal to this threshold is considered a win for the discriminator
# A score of 1 is a win
match_results[id_D]['scores'].append(1)
match_results[id_R]['scores'].append(0)
else:
match_results[id_D]['scores'].append(0)
match_results[id_R]['scores'].append(1)
# Update scores for the refiners and discriminators
new_ratings = ratings.copy()
for id in ids:
results = match_results[id]
glicko_calculations = calculate_new_glicko_scores(ratings[id]['mu'],
ratings[id]['phi'],
np.array(results['opponent_mus']),
np.array(results['opponent_phis']),
np.array(results['scores']),
starting_rating,
norm_val)
new_ratings[id]['mu'], new_ratings[id]['phi'], new_ratings[id]['r'], new_ratings[id]['RD'] = glicko_calculations
ratings = new_ratings
# Get refiner and discriminator with best ratings
ratings_pd = pd.DataFrame(ratings).T
refiner_ratings = ratings_pd.loc[refiner_ids]
discriminator_ratings = ratings_pd.loc[discriminator_ids]
return refiner_ratings, discriminator_ratings
def calc_acc(tensor_output, tensor_labels):
'''
Calculate the percent accuracy of the output, using the labels.
Note that the sigmoid is already calculated as part of the Discriminator Network.
Parameters:
tensor_output (torch.Tensor): M tensor output of the discriminator (M samples,) probability of being class '1'
tensor_labels (torch.Tensor): M tensor true labels for each sample
Returns:
acc (float): the probability accuracy of the output vs. the true labels
'''
y_pred = torch.round(tensor_output)#.detatch())
acc = torch.sum(y_pred == tensor_labels.detach()) / len(tensor_labels.detach())
return acc
def calculate_new_glicko_scores(old_mu, old_phi, opponent_mus, opponent_phis, scores, starting_rating, norm_val):
'''
TODO ...Source ????
http://www.glicko.net/glicko/glicko2.pdf ????
https://en.wikipedia.org/wiki/Glicko_rating_system ????
Calculate and return the new glicko values for the player using Glicko2 calculation
Parameters:
old_mu (float): The former mu rating
old_phi (float): The former phi rating
opponent_mus (list(float)): The mu ratings of the opponents played
opponent_phis (list(float)): The phi ratings of the opponents played
scores (list(inte)): The scores of the games played, 1 indicating a win, 0 indicating a loss
starting_rating (float): The rating that players were initialized to
norm_val (float): The normalization value used to convert between phi and RD
Returns:
(new_mu, new_phi, new_rating, new_rd) (float, float, float, float): The updated Glicko values for the player
'''
g = 1.0 / (1 + 3 * opponent_phis**2 / np.pi**2) ** 0.5 # TODO: explain/figure out what g is
E = 1.0 / (1 + np.exp(-1 * g * (old_mu - opponent_mus))) # Probability of player winning each match
v = np.sum(g**2 * E * (1 - E)) ** -1 # Estimated variance of the player's rating based on game outcomes
delta = v * np.sum(g * (scores - E)) # Estimated improvement in rating
new_phi = 1 / (1/old_phi**2 + 1/v) ** 0.5
new_mu = old_mu + new_phi**2 * np.sum(g * (scores - E))
new_rating = norm_val * new_mu + starting_rating
new_rd = norm_val * new_phi
return new_mu, new_phi, new_rating, new_rd
|
[
"pandas.DataFrame",
"torch.ones",
"numpy.sum",
"numpy.array",
"numpy.exp",
"torch.zeros",
"torch.round",
"torch.tensor"
] |
[((2232, 2296), 'torch.zeros', 'torch.zeros', (['samples_per_match'], {'dtype': 'torch.float', 'device': 'device'}), '(samples_per_match, dtype=torch.float, device=device)\n', (2243, 2296), False, 'import torch\n'), ((2318, 2381), 'torch.ones', 'torch.ones', (['samples_per_match'], {'dtype': 'torch.float', 'device': 'device'}), '(samples_per_match, dtype=torch.float, device=device)\n', (2328, 2381), False, 'import torch\n'), ((6683, 6709), 'torch.round', 'torch.round', (['tensor_output'], {}), '(tensor_output)\n', (6694, 6709), False, 'import torch\n'), ((5944, 5965), 'pandas.DataFrame', 'pd.DataFrame', (['ratings'], {}), '(ratings)\n', (5956, 5965), True, 'import pandas as pd\n'), ((8078, 8106), 'numpy.sum', 'np.sum', (['(g ** 2 * E * (1 - E))'], {}), '(g ** 2 * E * (1 - E))\n', (8084, 8106), True, 'import numpy as np\n'), ((8194, 8218), 'numpy.sum', 'np.sum', (['(g * (scores - E))'], {}), '(g * (scores - E))\n', (8200, 8218), True, 'import numpy as np\n'), ((7985, 8025), 'numpy.exp', 'np.exp', (['(-1 * g * (old_mu - opponent_mus))'], {}), '(-1 * g * (old_mu - opponent_mus))\n', (7991, 8025), True, 'import numpy as np\n'), ((8334, 8358), 'numpy.sum', 'np.sum', (['(g * (scores - E))'], {}), '(g * (scores - E))\n', (8340, 8358), True, 'import numpy as np\n'), ((5342, 5375), 'numpy.array', 'np.array', (["results['opponent_mus']"], {}), "(results['opponent_mus'])\n", (5350, 5375), True, 'import numpy as np\n'), ((5439, 5473), 'numpy.array', 'np.array', (["results['opponent_phis']"], {}), "(results['opponent_phis'])\n", (5447, 5473), True, 'import numpy as np\n'), ((5537, 5564), 'numpy.array', 'np.array', (["results['scores']"], {}), "(results['scores'])\n", (5545, 5564), True, 'import numpy as np\n'), ((3300, 3367), 'torch.tensor', 'torch.tensor', (['all_real[real_inds]'], {'dtype': 'torch.float', 'device': 'device'}), '(all_real[real_inds], dtype=torch.float, device=device)\n', (3312, 3367), False, 'import torch\n'), ((3513, 3584), 'torch.tensor', 'torch.tensor', (['all_simulated[sim_inds]'], {'dtype': 'torch.float', 'device': 'device'}), '(all_simulated[sim_inds], dtype=torch.float, device=device)\n', (3525, 3584), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.