repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
vikashachary/SolvedProblems | [
"1edea79222f914beef5ab4393c0e074a79f13a48"
] | [
"Python/checkerboxMatrix.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 08:12:19 2019\nCreate checker box\n@author: vikash\n\"\"\"\n\nimport numpy as np\n# Read the variable from STDIN\nn = int(input())\n\nmatrix = np.zeros((n,n), dtype=int)\nmatrix[1::2,::2] = 1\nmatrix[::2,1::2] = 1\nprint(matrix)"
] | [
[
"numpy.zeros"
]
] |
steven-lang/DAFNe | [
"b13912041a263904cf26ca5f3468c6bc64ce800c"
] | [
"dafne/data/datasets/icdar15.py"
] | [
"from detectron2.data.datasets import register_coco_instances\nfrom dafne.utils.sort_corners import sort_quadrilateral\nfrom detectron2.utils.colormap import colormap\nfrom detectron2.data.datasets.coco import load_coco_json\nfrom detectron2.data import (\n DatasetCatalog,\n MetadataCatalog,\n DatasetMapper,\n transforms as T,\n)\nimport cv2\nimport xml.etree.ElementTree as ET\n\nfrom detectron2.structures import BoxMode, PolygonMasks, RotatedBoxes\nfrom detectron2.data import detection_utils as utils\nimport copy\nimport torch\nimport contextlib\nimport datetime\nimport io\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pycocotools.mask as mask_util\nfrom fvcore.common.file_io import PathManager, file_lock\nfrom fvcore.common.timer import Timer\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nimport os\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_annotation(root_dir, img_id, imageset):\n if imageset == \"val\":\n imageset = \"train\"\n filename = os.path.join(root_dir, \"Annotations\", imageset, \"gt_img_\" + img_id + \".txt\")\n\n boxes, gt_classes = [], []\n with open(filename, \"r\", encoding=\"utf-8-sig\") as f:\n content = f.read()\n objects = content.split(\"\\n\")\n for obj in objects:\n if len(obj) != 0:\n box = obj.split(\",\")[0:8]\n label = 0\n box = [eval(x) for x in box]\n boxes.append(box)\n gt_classes.append(label)\n return {\"boxes\": np.array(boxes, dtype=np.int32), \"gt_classes\": np.array(gt_classes)}\n\n\ndef xywha2xy4(xywha): # a represents the angle(degree), clockwise, a=0 along the X axis\n x, y, w, h, a = xywha\n corner = np.array([[-w / 2, -h / 2], [w / 2, -h / 2], [w / 2, h / 2], [-w / 2, h / 2]])\n # a = np.deg2rad(a)\n transform = np.array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]])\n return transform.dot(corner.T).T + [x, y]\n\n\ndef norm_angle(angle, range=[-np.pi / 4, np.pi]):\n return (angle - range[0]) % range[1] + range[0]\n\n\nNAMES = [\"text\"]\n\nlabel2name = dict((label, name) for label, name in enumerate(NAMES))\nname2label = dict((name, label) for label, name in enumerate(NAMES))\n\n\ndef parse_annotation(img_id: str, root: str, image_set: str):\n anno = load_annotation(root_dir=root, img_id=img_id, imageset=image_set)\n\n # Construct image and annotation path\n if image_set == \"val\":\n image_set = \"train\" # val images are in the train folder\n img_path = os.path.join(root, \"images\", image_set, f\"img_{img_id}.jpg\")\n\n # Create new data record for each image\n record = {}\n record[\"file_name\"] = img_path\n record[\"image_id\"] = img_id # Strip starting letter \"P\"\n\n img = cv2.imread(img_path)\n record[\"width\"] = img.shape[1]\n record[\"height\"] = img.shape[0]\n\n # Collect annotations\n objs = []\n num_objects = anno[\"boxes\"].shape[0]\n for i in range(num_objects):\n obj = {}\n obbox = anno[\"boxes\"][i]\n label = 0\n\n bbox = np.array(obbox).reshape(1, -1)\n xmin, xmax = bbox[:, 0::2].min(), bbox[:, 0::2].max()\n ymin, ymax = bbox[:, 1::2].min(), bbox[:, 1::2].max()\n w = np.abs(xmax - xmin)\n h = np.abs(ymax - ymin)\n\n ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))\n is_valid_box = (w > 2) & (h > 2) & (ar < 30)\n if not is_valid_box:\n continue\n\n area = w * h\n bbox = np.array([xmin, ymin, xmax, ymax])\n\n obj[\"segmentation\"] = obbox.reshape(1, -1).tolist()\n obj[\"category_id\"] = label\n obj[\"bbox\"] = bbox\n obj[\"bbox_mode\"] = BoxMode.XYXY_ABS\n obj[\"difficult\"] = 0\n obj[\"area\"] = area\n objs.append(obj)\n record[\"annotations\"] = objs\n return record\n\n\ndef load_icdar15(root, image_set, cfg):\n image_sets = [image_set] if isinstance(image_set, str) else image_set\n dataset_dicts = []\n for image_set in image_sets:\n # Read lines in image set file\n with open(os.path.join(root, \"ImageSets\", f\"{image_set}.txt\")) as f:\n lines = f.read().splitlines()\n\n if cfg.DEBUG.OVERFIT_NUM_IMAGES > 0:\n # Select the first N images\n lines = lines[: cfg.DEBUG.OVERFIT_NUM_IMAGES]\n\n for img_id in lines:\n img_id = img_id.replace(\"gt_img_\", \"\")\n record = parse_annotation(img_id, root, image_set)\n dataset_dicts.append(record)\n\n return dataset_dicts\n\n\ndef register_icdar15_instances(name, split, metadata, image_root, cfg):\n \"\"\"\n Register a dataset in COCO's json annotation format for\n instance detection, instance segmentation and keypoint detection.\n (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.\n `instances*.json` and `person_keypoints*.json` in the dataset).\n\n This is an example of how to register a new dataset.\n You can do something similar to this function, to register new datasets.\n\n Args:\n name (str): the name that identifies a dataset, e.g. \"coco_2014_train\".\n metadata (dict): extra metadata associated with this dataset. You can\n leave it as an empty dict.\n image_root (str or path-like): directory which contains all the images.\n \"\"\"\n assert isinstance(name, str), name\n assert isinstance(image_root, (str, os.PathLike)), image_root\n\n DatasetCatalog.register(\n name,\n lambda: load_icdar15(\n root=metadata[\"root_dir\"],\n image_set=split,\n cfg=cfg,\n ),\n )\n\n # 2. Optionally, add metadata about this dataset,\n # since they might be useful in evaluation, visualization or logging\n MetadataCatalog.get(name).set(image_root=image_root, evaluator_type=\"icdar15\", **metadata)\n\n\ndef _make_datasets_dict():\n datasets_dict = {}\n # Construct datasets dict from currently available datasets\n for split in [\"train\", \"test\", \"val\"]:\n name = f\"icdar15_{split}\"\n img_dir = \"images/train\" if split in [\"train\", \"val\"] else \"images/test\"\n datasets_dict[name] = {\n \"root_dir\": \"icdar-2015\",\n \"img_dir\": img_dir,\n \"ann_file\": f\"ImageSets/{split}.txt\",\n \"split\": split,\n \"is_test\": \"test\" in name,\n }\n\n return datasets_dict\n\n\ndef register_icdar15(cfg):\n \"\"\"Setup method to register the icdar15 dataset.\"\"\"\n datasets_dict = _make_datasets_dict()\n\n # Get the data directory\n data_dir = os.environ[\"DAFNE_DATA_DIR\"]\n colors = colormap(rgb=True, maximum=255)\n for dataset_name, d in datasets_dict.items():\n\n def reg(name):\n register_icdar15_instances(\n name=name,\n metadata={\n \"is_test\": d[\"is_test\"],\n \"root_dir\": os.path.join(data_dir, d[\"root_dir\"]),\n \"thing_colors\": colors,\n },\n image_root=os.path.join(data_dir, d[\"root_dir\"], d[\"img_dir\"]),\n split=d[\"split\"],\n cfg=cfg,\n )\n\n # Register normal version\n reg(dataset_name)\n"
] | [
[
"numpy.array",
"numpy.sin",
"numpy.cos",
"numpy.abs",
"numpy.maximum"
]
] |
erickmartinez/pydlcp | [
"611eceeb0816af432e1c06ee171376af2bc13a0e"
] | [
"pydlcp/datastorage.py"
] | [
"\nimport h5py\nimport numpy as np\n\nvcr_type = np.dtype([('V', 'd'), ('C', 'd'), ('R', 'd')])\ntti_type = np.dtype([('time', 'd'), ('temperature', 'd'), ('current', 'd')])\ndlcp_type = np.dtype([('osc_level', 'd'), ('bias', 'd'), ('nominal_bias', 'd'), ('V', 'd'), ('C', 'd'), ('R', 'd')])\n\n\nclass BTSH5Store:\n \"\"\"\n This class provides methods to save and access BTS experimental results to an hdf5 file\n\n Attributes\n ----------\n _filename: str\n The path to the h5 file\n\n Methods\n -------\n _create_h5(self):\n Creates the h5 file and adds the basic structure containing 3 groups\n '/logs'\n A group to store all logs\n '/logs/temperature_and_current'\n A dataset that stores the temperautre and leakage current every x-seconds\n '/bts'\n A group to store al the bias-temperature CV results\n '/bts/time'\n A 1D dataset containing the timedelta at which each CV was taken\n '/bts/voltage'\n A Matrix containing the voltage sweep at each timedelta\n '/bts/capacitance'\n A Matrix containing the capacitance from the CV sweep\n '/bts/resistance'\n A Matrix containing Rs from the CV sweep.\n '/dlcp'\n A group containing al DLCP results\n '/dlcp/time'\n A 1D dataset containing the timedelta at which each DLCP sweep was taken\n '/dlcp/osc_level'\n A Matrix containing the osc_level sweep at each timedelta\n '/dlcp/bias'\n A Matrix containing the voltage sweep at each timedelta\n '/dlcp/nominal_bias'\n A Matrix containing the voltage sweep at each timedelta\n '/dlcp/voltage'\n A Matrix containing the voltage sweep at each timedelta\n '/dlcp/capacitance'\n A Matrix containing the capacitance from the DLCP sweep\n '/dlcp/resistance'\n A Matrix containing Rs from the CV sweep.\n\n create_temperature_log(self):\n Creates the resizable dataset \"/logs/temperature_and_current\" on the h5 file.\n\n log_temperature_current(self, time: float, temperature: float, current: float):\n Appends the temperature and current readings at time 't' to the '/logs/temperature_and_current' dataset\n\n get_bts_cv(self):\n Returns the stress_time, voltage, capacitance and resistance datasets in the /bts group\n\n get_dlcp_data(self):\n Returns the stress_time, osc_level, bias, nominal_bias, voltage, capacitance and resistance datasets in\n the /dlcp group\n\n get_bts_cv_at_time(self, time_idx: int) -> vcr_type:\n Returns a single CV sweep result at an specific time index\n\n get_bts_temperature(self):\n Returns the temperature log\n\n append_cv(self, time: float, cv_data: vcr_type):\n Appends a CV sweep result (vcr_type) to the /bts group\n\n append_dlcp(self, time: float, dlcp_data: dlcp_type):\n Appends a DLCP sweep result (dlcp_type) to the /dlcp group\n\n _append_resizeble_dataset(self, group_name: str, data_set_name: str, data):\n Appends data to a resizable dataset in the h5 file\n\n metadata(self, metadata: dict, group=\"/\"):\n Appends dictionary key values as attributes to the selected group/dataset\n\n create_resizeable_dataset(self, name: str, size: (int, int), group_name: str, dtype=None):\n Creates a resizable dataset on a specific group in the h5 file\n\n create_fixed_dataset(self, name: str, size: (int, int), group_name: str, dtype=None):\n Creates a dataset that cannot be resized on a specific group in the h5 file.\n\n \"\"\"\n\n def __init__(self, filename: str):\n \"\"\"\n Parameters\n ----------\n filename: str\n The name of the h5 file to store data to.\n \"\"\"\n self._filename = filename\n self._create_h5()\n\n def _create_h5(self):\n \"\"\"\n Creates the h5 file and appends the basic groups\n \"\"\"\n hf = h5py.File(self._filename, 'w')\n hf.create_group(\"logs\") # Store experimental logs\n hf.create_group(\"bts\") # Store bias temperature stress capacitance data\n hf.create_group(\"dlcp\") # Store DLCP data\n hf.close()\n\n def create_temperature_log(self):\n \"\"\"\n Creates the 'temperature_and_current' dataset on the '/logs' dataset of the h5 file.\n \"\"\"\n self.create_resizeable_dataset(name='temperature_and_current', size=(0,), group_name=\"logs\", dtype=tti_type)\n\n def log_temperature_current(self, time: [float, int], temperature: [float, int], current: float):\n \"\"\"\n Appends the tuple (time, temperature, current) to the 'temperature_and_current' dataset.\n\n Parameters\n ----------\n time: [float, int]\n The timedelta since the beginning of the log\n temperature: [float, int]\n The sample temperature in °C\n current: float\n The leakage current through the device.\n\n \"\"\"\n data = np.array([(time, temperature, current)], dtype=tti_type)\n self._append_resizeble_dataset(group_name=\"/logs\", data_set_name=\"temperature_and_current\", data=data,\n dtype=tti_type)\n\n def get_bts_cv(self) -> dict:\n \"\"\"\n Gets all the CV sweeps stored in the H5 file wrapped in a dictionary.\n\n Returns\n -------\n dict\n A dictionary containing the vector for 'stress_time', and the matrices 'voltage', 'capacitance' and\n 'resistance'\n \"\"\"\n with h5py.File(self._filename, 'r') as hf:\n if '/bts/time' in hf:\n stress_time = np.array(hf.get('/bts/time'))\n voltage = np.array(hf.get('/bts/voltage'))\n capacitance = np.array(hf.get('/bts/capacitance'))\n resistance = np.array(hf.get('/bts/resistance'))\n data = {\n 'stress_time': stress_time,\n 'voltage': voltage,\n 'capacitance': capacitance,\n 'resistance': resistance\n }\n else:\n data = None\n return data\n\n def get_dlcp_data(self) -> dict:\n \"\"\"\n Gets al DLCP sweep data as dictionary.\n\n Returns\n -------\n dict:\n A dictionary containing the vector for 'stress_time', and the matrices 'osc_level', 'bias', 'nominal_bias',\n 'voltage', 'capacitance' and 'resistance'\n \"\"\"\n with h5py.File(self._filename, 'r') as hf:\n if '/dlcp/time' in hf:\n stress_time = np.array(hf.get('/dlcp/time'))\n osc_level = np.array(hf.get('/dlcp/osc_level'))\n bias = np.array(hf.get('/dlcp/bias'))\n nominal_bias = np.array(hf.get('/dlcp/nominal_bias'))\n voltage = np.array(hf.get('/dlcp/voltage'))\n capacitance = np.array(hf.get('/dlcp/capacitance'))\n resistance = np.array(hf.get('/dlcp/resistance'))\n data = {\n 'stress_time': stress_time,\n 'osc_level': osc_level,\n 'bias': bias,\n 'nominal_bias': nominal_bias,\n 'voltage': voltage,\n 'capacitance': capacitance,\n 'resistance': resistance\n }\n else:\n data = None\n return data\n\n def get_bts_cv_at_time(self, time_idx: int):\n \"\"\"\n Returns a single CV sweep at a specific time index.\n\n Parameters\n ----------\n time_idx: int\n The row index corresponding to a specific time of the measurement.\n\n Returns\n -------\n np.ndarray(vcr_type)\n A numpy array of vcr_type\n \"\"\"\n data = self.get_bts_cv()\n if data is None:\n return np.array([], dtype=vcr_type)\n if abs(time_idx) < data['stress_time'].shape[0]:\n n = len(data['voltage'])\n vcr_data: vcr_type = np.empty(n, dtype=vcr_type)\n for i, v, c, r in zip(range(n), data['voltage'][time_idx],\n data['capacitance'][time_idx],\n data['resistance'][time_idx]):\n vcr_data[i] = (v, c, r)\n return vcr_data\n else:\n return np.array([], dtype=vcr_type)\n\n def get_bts_temperature(self):\n \"\"\"\n Returns the whole temperature log\n\n Returns\n -------\n np.ndarray(tti_type)\n The numpy array of type tti_type containing all temperature and leakage current logs.\n \"\"\"\n with h5py.File(self._filename, 'r') as hf:\n if '/logs/temperature_and_current' in hf:\n data: tti_type = np.array(hf['/logs/temperature_and_current'], dtype=tti_type)\n else:\n data: tti_type = None\n return data\n\n def append_cv(self, time: [float, int], cv_data: vcr_type):\n \"\"\"\n Appends a CV sweep to the H5 file\n\n Parameters\n ----------\n time: float\n The time delta at which the measurement was taken.\n cv_data: [vcr_type, np.ndarray(vcr_type)\n The CV sweep\n \"\"\"\n self._append_resizeble_dataset(group_name=\"/bts\", data_set_name=\"time\", data=np.array([time]))\n self._append_resizeble_dataset(group_name=\"/bts\", data_set_name=\"voltage\", data=cv_data['V'])\n self._append_resizeble_dataset(group_name=\"/bts\", data_set_name=\"capacitance\", data=cv_data['C'])\n self._append_resizeble_dataset(group_name=\"/bts\", data_set_name=\"resistance\", data=cv_data['R'])\n\n def append_dlcp(self, time: [float, int], dlcp_data: dlcp_type):\n \"\"\"\n Appends a DLCP sweep to the H5 file\n\n Parameters\n ----------\n time: [float, int]\n The time delta at which the DLCP measurement was performed\n dlcp_data: [dlcp_dtype, np.ndarray(dlcp_type)\n The DLCP data\n \"\"\"\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"time\", data=np.array([time]))\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"osc_level\", data=dlcp_data['osc_level'])\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"bias\", data=dlcp_data['bias'])\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"nominal_bias\", data=dlcp_data['nominal_bias'])\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"voltage\", data=dlcp_data['V'])\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"capacitance\", data=dlcp_data['C'])\n self._append_resizeble_dataset(group_name=\"/dlcp\", data_set_name=\"resistance\", data=dlcp_data['R'])\n\n def _append_resizeble_dataset(self, group_name: str, data_set_name: str, data, dtype=None):\n \"\"\"\n Appends data to a resizable dataset in the H5 File\n\n Parameters\n ----------\n group_name: str\n The name of the group to append the dataset to.\n data_set_name: str\n The name of the dataset to append.\n data: np.ndarray\n The data to store in the dataset\n dtype: np.dtype\n The type of data to store\n \"\"\"\n if not isinstance(data, np.ndarray) and not isinstance(data, list):\n data = np.array([data])\n with h5py.File(self._filename, 'a') as hf:\n group = hf.get(group_name)\n if data_set_name not in group:\n group_ds = group.create_dataset(name=data_set_name, shape=(0, data.shape[0]), compression='gzip',\n chunks=True, maxshape=(None, data.shape[0]), dtype=dtype)\n else:\n group_ds = group.get(data_set_name)\n\n n = group_ds.shape[0]\n group_ds.resize(n + 1, axis=0)\n group_ds[-data.shape[0]:] = data\n\n def metadata(self, metadata: dict, group=\"/\"):\n \"\"\"\n Saves a dictionary with the measurement metadata to the specified dataset/group.\n\n Parameters\n ----------\n metadata: dict\n A dictionary with the metadata to save\n group: str\n The dataset/group to save the attribures to.\n \"\"\"\n if not isinstance(metadata, dict):\n raise TypeError('The argument must be of type ')\n with h5py.File(self._filename, 'a') as hf:\n group = hf.get(group) if group != \"/\" else hf\n for key, val in metadata.items():\n group.attrs[key] = val\n\n def get_metadata(self, group=\"/\") -> dict:\n \"\"\"\n Returns the attributes of a selected group.\n\n Parameters\n ----------\n group: str\n The group to get the attributes from\n\n Returns\n -------\n dict:\n A dictionary with the attributes of the dataset/group\n \"\"\"\n with h5py.File(self._filename, 'r') as hf:\n metadata = dict(hf.get(group).attrs)\n return metadata\n\n def create_resizeable_dataset(self, name: str, size: (int, int), group_name: str, dtype=None):\n \"\"\"\n Creates a resizable dataset in the group 'group_name'.\n\n Parameters\n ----------\n name: str\n The name of the dataset\n size: (int, int)\n The shape of the dataset\n group_name: str\n The name of the group to save the dataset to\n dtype: np.dtype\n The type of data to be stored\n \"\"\"\n if not isinstance(name, str):\n raise TypeError('Name should be an instance of str')\n with h5py.File(self._filename, 'a') as hf:\n if group_name not in hf:\n hf.create_group(group_name)\n group = hf.get(group_name)\n group.create_dataset(name=name, shape=size, dtype=dtype, compression='gzip', chunks=True, maxshape=(None,))\n\n def create_fixed_dataset(self, name: str, size: (int, int), group_name: str, dtype=None):\n \"\"\"\n Creates a non-resizable dataset in the group 'group_name'.\n\n Parameters\n ----------\n name: str\n The name of the dataset\n size: (int, int)\n The shape of the dataset\n group_name: str\n The name of the group to save the dataset to\n dtype: np.dtype\n The type of the data being store\n \"\"\"\n if not isinstance(name, str):\n raise TypeError('Name should be an instance of str')\n with h5py.File(self._filename, 'a') as hf:\n if group_name not in hf:\n hf.create_group(group_name)\n group = hf.get(group_name)\n group.create_dataset(name=name, shape=size, dtype=dtype, compression='gzip')\n"
] | [
[
"numpy.array",
"numpy.empty",
"numpy.dtype"
]
] |
liuhuaijjin/epnet_pointformer.py | [
"ad9890895f7d91c6687b959ec1edc231a13184ef"
] | [
"lib/net/pointnet2_msg_unet_vgg16.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pointnet2_lib.pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG, PointnetSAModuleMSGPointformer\nfrom lib.config import cfg\nfrom torch.nn.functional import grid_sample\n\n\nBatchNorm2d = nn.BatchNorm2d\n\ndef conv3x3(in_planes, out_planes, stride = 1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size = 3, stride = stride,\n padding = 1, bias = False)\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, outplanes, stride = 1): #chin, chout, block_nums, stride\n super(BasicBlock,self).__init__()\n blocks=[nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=1, padding=1, bias=True),nn.BatchNorm2d(outplanes),nn.ReLU(inplace=True)]\n for _ in range(3):\n blocks+=[nn.Conv2d(outplanes, outplanes, kernel_size=3, stride=1, padding=1, bias=True),nn.BatchNorm2d(outplanes),nn.ReLU(inplace=True)]\n blocks.append(conv3x3(outplanes,outplanes,2*stride))\n self.layers = nn.Sequential(*blocks)\n def forward(self, x):\n return self.layers(x)\n\n# class BasicBlock(nn.Module):\n# def __init__(self, inplanes, outplanes, stride = 1):\n# super(BasicBlock, self).__init__()\n# self.conv1 = conv3x3(inplanes, outplanes, stride)\n# self.bn1 = BatchNorm2d(outplanes )\n# self.relu = nn.ReLU(inplace = True)\n# self.conv2 = conv3x3(outplanes, outplanes, 2*stride)\n#\n# def forward(self, x):\n#\n# out = self.conv1(x)\n# out = self.bn1(out)\n# out = self.relu(out)\n#\n# out = self.conv2(out)\n#\n# return out\n\nclass Fusion_Conv(nn.Module):\n def __init__(self, inplanes, outplanes):\n\n super(Fusion_Conv, self).__init__()\n\n self.conv1 = torch.nn.Conv1d(inplanes, outplanes, 1)\n self.bn1 = torch.nn.BatchNorm1d(outplanes)\n\n def forward(self, point_features, img_features):\n #print(point_features.shape, img_features.shape)\n fusion_features = torch.cat([point_features, img_features], dim=1)\n fusion_features = F.relu(self.bn1(self.conv1(fusion_features)))\n\n return fusion_features\n\n\n#================addition attention (add)=======================#\nclass IA_Layer(nn.Module):\n def __init__(self, channels):\n print('##############ADDITION ATTENTION(ADD)#########')\n super(IA_Layer, self).__init__()\n self.ic, self.pc = channels\n rc = self.pc // 4\n self.conv1 = nn.Sequential(nn.Conv1d(self.ic, self.pc, 1),\n nn.BatchNorm1d(self.pc),\n nn.ReLU())\n self.fc1 = nn.Linear(self.ic, rc)\n self.fc2 = nn.Linear(self.pc, rc)\n self.fc3 = nn.Linear(rc, 1)\n\n\n def forward(self, img_feas, point_feas):\n batch = img_feas.size(0)\n img_feas_f = img_feas.transpose(1,2).contiguous().view(-1, self.ic) #BCN->BNC->(BN)C\n point_feas_f = point_feas.transpose(1,2).contiguous().view(-1, self.pc) #BCN->BNC->(BN)C'\n ri = self.fc1(img_feas_f)\n rp = self.fc2(point_feas_f)\n att = F.sigmoid(self.fc3(F.tanh(ri + rp))) #BNx1\n att = att.squeeze(1)\n att = att.view(batch, 1, -1) #B1N\n # print(img_feas.size(), att.size())\n\n img_feas_new = self.conv1(img_feas)\n out = img_feas_new * att\n\n return out\n\nclass Atten_Fusion_Conv(nn.Module):\n def __init__(self, inplanes_I, inplanes_P, outplanes):\n super(Atten_Fusion_Conv, self).__init__()\n\n self.IA_Layer = IA_Layer(channels = [inplanes_I, inplanes_P])\n # self.conv1 = torch.nn.Conv1d(inplanes_P, outplanes, 1)\n self.conv1 = torch.nn.Conv1d(inplanes_P + inplanes_P, outplanes, 1)\n self.bn1 = torch.nn.BatchNorm1d(outplanes)\n\n\n def forward(self, point_features, img_features):\n img_features = self.IA_Layer(img_features, point_features)\n #print(\"img_features:\", img_features.shape)\n\n fusion_features = torch.cat([point_features, img_features], dim=1)\n fusion_features = F.relu(self.bn1(self.conv1(fusion_features)))\n\n return fusion_features\n\n\ndef Feature_Gather(feature_map, xy):\n \"\"\"\n :param xy:(B,N,2) normalize to [-1,1]\n :param feature_map:(B,C,H,W)\n :return:\n \"\"\"\n\n # use grid_sample for this.\n # xy(B,N,2)->(B,1,N,2)\n xy = xy.unsqueeze(1)\n\n interpolate_feature = grid_sample(feature_map, xy) # (B,C,1,N)\n\n return interpolate_feature.squeeze(2) # (B,C,N)\n\n\ndef get_model(input_channels = 6, use_xyz = True):\n return Pointnet2MSG(input_channels = input_channels, use_xyz = use_xyz)\n\n\nclass Pointnet2MSG(nn.Module):\n def __init__(self, input_channels = 6, use_xyz = True):\n super().__init__()\n\n self.SA_modules = nn.ModuleList()\n channel_in = input_channels\n\n skip_channel_list = [input_channels]\n for k in range(cfg.RPN.SA_CONFIG.NPOINTS.__len__()):\n mlps = cfg.RPN.SA_CONFIG.MLPS[k].copy()\n channel_out = 0\n for idx in range(mlps.__len__()):\n mlps[idx] = [channel_in] + mlps[idx]\n channel_out += mlps[idx][-1]\n\n if k == 1:\n self.SA_modules.append(\n PointnetSAModuleMSGPointformer(\n npoint = cfg.RPN.SA_CONFIG.NPOINTS[k],\n radii = cfg.RPN.SA_CONFIG.RADIUS[k],\n nsamples = cfg.RPN.SA_CONFIG.NSAMPLE[k],\n mlps = mlps,\n use_xyz = use_xyz,\n bn = cfg.RPN.USE_BN\n )\n )\n else:\n self.SA_modules.append(\n PointnetSAModuleMSG(\n npoint=cfg.RPN.SA_CONFIG.NPOINTS[k],\n radii=cfg.RPN.SA_CONFIG.RADIUS[k],\n nsamples=cfg.RPN.SA_CONFIG.NSAMPLE[k],\n mlps=mlps,\n use_xyz=use_xyz,\n bn=cfg.RPN.USE_BN\n )\n )\n skip_channel_list.append(channel_out)\n channel_in = channel_out\n\n ##################\n if cfg.LI_FUSION.ENABLED:\n self.Img_Block = nn.ModuleList()\n self.Fusion_Conv = nn.ModuleList()\n self.Fusion_Conv1 = nn.ModuleList()\n self.DeConv = nn.ModuleList()\n self.DeConv1 = nn.ModuleList()\n self.image_fusion_conv1 = nn.ModuleList()\n self.image_fusion_bn1 = nn.ModuleList()\n for i in range(-1, -(len(cfg.LI_FUSION.IMG_CHANNELS)-1), -1):\n self.DeConv1.append(nn.ConvTranspose2d(cfg.LI_FUSION.IMG_CHANNELS[i],cfg.LI_FUSION.IMG_CHANNELS[i-1],\n kernel_size=cfg.LI_FUSION.DeConv_Kernels[0],\n stride=cfg.LI_FUSION.DeConv_Kernels[0]))\n for i in range(len(cfg.LI_FUSION.IMG_CHANNELS) - 1):\n self.Img_Block.append(BasicBlock(cfg.LI_FUSION.IMG_CHANNELS[i], cfg.LI_FUSION.IMG_CHANNELS[i+1], stride=1))\n if cfg.LI_FUSION.ADD_Image_Attention:\n self.Fusion_Conv.append(\n Atten_Fusion_Conv(cfg.LI_FUSION.IMG_CHANNELS[i + 1], cfg.LI_FUSION.POINT_CHANNELS[i],\n cfg.LI_FUSION.POINT_CHANNELS[i]))\n else:\n self.Fusion_Conv.append(Fusion_Conv(cfg.LI_FUSION.IMG_CHANNELS[i + 1] + cfg.LI_FUSION.POINT_CHANNELS[i],\n cfg.LI_FUSION.POINT_CHANNELS[i]))\n\n self.DeConv.append(nn.ConvTranspose2d(cfg.LI_FUSION.IMG_CHANNELS[i + 1], cfg.LI_FUSION.DeConv_Reduce[i],\n kernel_size=cfg.LI_FUSION.DeConv_Kernels[i],\n stride=cfg.LI_FUSION.DeConv_Kernels[i]))\n for i in range(len(cfg.LI_FUSION.IMG_CHANNELS)-1):\n if cfg.LI_FUSION.ADD_Image_Attention:\n self.Fusion_Conv1.append(\n Atten_Fusion_Conv(cfg.LI_FUSION.IMG_CHANNELS[-i-1],cfg.LI_FUSION.FA_POINT_CHANNELS[i+1],\n cfg.LI_FUSION.FA_POINT_CHANNELS[i+1]))\n\n self.image_fusion_conv = nn.Conv2d(sum(cfg.LI_FUSION.DeConv_Reduce), cfg.LI_FUSION.IMG_FEATURES_CHANNEL//4, kernel_size = 1)\n for i in range(-1, -(len(cfg.LI_FUSION.IMG_CHANNELS)-1), -1):\n self.image_fusion_conv1.append(nn.Conv2d(cfg.LI_FUSION.IMG_CHANNELS[i],cfg.LI_FUSION.IMG_CHANNELS[i-1],kernel_size=1))\n self.image_fusion_bn1.append(torch.nn.BatchNorm2d(cfg.LI_FUSION.IMG_CHANNELS[i-1]))\n self.image_fusion_bn = torch.nn.BatchNorm2d(cfg.LI_FUSION.IMG_FEATURES_CHANNEL//4)\n\n if cfg.LI_FUSION.ADD_Image_Attention:\n self.final_fusion_img_point = Atten_Fusion_Conv(cfg.LI_FUSION.IMG_FEATURES_CHANNEL//4, cfg.LI_FUSION.IMG_FEATURES_CHANNEL, cfg.LI_FUSION.IMG_FEATURES_CHANNEL)\n else:\n self.final_fusion_img_point = Fusion_Conv(cfg.LI_FUSION.IMG_FEATURES_CHANNEL + cfg.LI_FUSION.IMG_FEATURES_CHANNEL//4, cfg.LI_FUSION.IMG_FEATURES_CHANNEL)\n\n\n self.FP_modules = nn.ModuleList()\n\n for k in range(cfg.RPN.FP_MLPS.__len__()):\n pre_channel = cfg.RPN.FP_MLPS[k + 1][-1] if k + 1 < len(cfg.RPN.FP_MLPS) else channel_out\n self.FP_modules.append(\n PointnetFPModule(mlp = [pre_channel + skip_channel_list[k]] + cfg.RPN.FP_MLPS[k])\n )\n\n def _break_up_pc(self, pc):\n xyz = pc[..., 0:3].contiguous()\n features = (\n pc[..., 3:].transpose(1, 2).contiguous()\n if pc.size(-1) > 3 else None\n )\n\n return xyz, features\n\n\n def forward(self, pointcloud: torch.cuda.FloatTensor, image=None, xy=None):\n xyz, features = self._break_up_pc(pointcloud)\n\n l_xyz, l_features = [xyz], [features]\n\n if cfg.LI_FUSION.ENABLED:\n #### normalize xy to [-1,1]\n size_range = [1280.0, 384.0]\n xy[:, :, 0] = xy[:, :, 0] / (size_range[0] - 1.0) * 2.0 - 1.0\n xy[:, :, 1] = xy[:, :, 1] / (size_range[1] - 1.0) * 2.0 - 1.0 # = xy / (size_range - 1.) * 2 - 1.\n l_xy_cor = [xy] #(2,16384,2)\n img = [image]\n\n for i in range(len(self.SA_modules)):\n li_xyz, li_features, li_index = self.SA_modules[i](l_xyz[i], l_features[i])\n\n if cfg.LI_FUSION.ENABLED:\n li_index = li_index.long().unsqueeze(-1).repeat(1,1,2)#(2,4096)-(2,4096,2)\n li_xy_cor = torch.gather(l_xy_cor[i],1,li_index)#(2,4096,2)\n image = self.Img_Block[i](img[i])#(B,64,192,640)\n #print(image.shape)\n img_gather_feature = Feature_Gather(image,li_xy_cor) #, scale= 2**(i+1))#(B,64,4096)\n li_features = self.Fusion_Conv[i](li_features,img_gather_feature)#(B,96,4096)\n l_xy_cor.append(li_xy_cor)\n img.append(image)\n\n l_xyz.append(li_xyz)\n l_features.append(li_features)\n\n\n # for i in range(-1, -(len(self.FP_modules) + 1), -1):\n # l_features[i - 1] = self.FP_modules[i](\n # l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]\n # )\n\n if cfg.LI_FUSION.ENABLED:\n DeConv = [img[-1]]#(B,512,24,80)\n for i in range(len(cfg.LI_FUSION.IMG_CHANNELS)-2):\n DeConv.append(self.DeConv1[i](DeConv[-1]))\n de_concat=torch.cat((DeConv[i+1],img[-i-2]),dim=1)#(B,512,48,160)\n img_fusion=F.relu(self.image_fusion_bn1[i](self.image_fusion_conv1[i](de_concat)))#(B,256,48,160)\n DeConv[i+1]=img_fusion#(B,256,48,160)\n\n img_fusion_gather_feature1 = []\n for i in range(len(cfg.LI_FUSION.IMG_CHANNELS)-1):\n img_fusion_gather_feature1.append(Feature_Gather(DeConv[i],l_xy_cor[-i-2]))#(B,512,256)\n #print(DeConv[i].shape,img_fusion_gather_feature1[i].shape)\n\n for i in range(-1, -(len(self.FP_modules) + 1), -1):\n l_features[i - 1] = self.FP_modules[i](\n l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]\n )\n l_features[i-1] = self.Fusion_Conv1[-i-1](l_features[i-1],img_fusion_gather_feature1[-i-1])\n\n # for i in range(1,len(img))\n # DeConv = []\n # for i in range(len(cfg.LI_FUSION.IMG_CHANNELS) - 1):\n # DeConv.append(self.DeConv[i](img[i + 1]))\n\n #de_concat = torch.cat(DeConv,dim=1)#(B,64,384,1280)\n\n #img_fusion = F.relu(self.image_fusion_bn(self.image_fusion_conv(de_concat)))#(B,32,384,1280)\n #img_fusion_gather_feature = Feature_Gather(img_fusion, xy)#(B,32,16384)\n #l_features[0] = self.final_fusion_img_point(l_features[0], img_fusion_gather_feature)#(B,128,16384)\n\n return l_xyz[0], l_features[0]\n\n\nclass Pointnet2MSG_returnMiddleStages(Pointnet2MSG):\n def __init__(self, input_channels = 6, use_xyz = True):\n super().__init__(input_channels, use_xyz)\n\n def forward(self, pointcloud: torch.cuda.FloatTensor):\n xyz, features = self._break_up_pc(pointcloud)\n\n l_xyz, l_features = [xyz], [features]\n idxs = []\n for i in range(len(self.SA_modules)):\n li_xyz, li_features, idx = self.SA_modules[i](l_xyz[i], l_features[i])\n l_xyz.append(li_xyz)\n l_features.append(li_features)\n idxs.append(idx)\n\n for i in range(-1, -(len(self.FP_modules) + 1), -1):\n l_features[i - 1] = self.FP_modules[i](\n l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]\n )\n\n return l_xyz, l_features, idxs\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.Sequential",
"torch.gather",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.grid_sample",
"torch.nn.BatchNorm1d",
"torch.nn.functional.tanh"
]
] |
wut-biolab-zhangyanping/mfcis | [
"4b3df36760182a1cdcc292e8dcfdc1775de0ae59"
] | [
"models/BaseModel.py"
] | [
"from tensorflow import keras\nfrom tensorflow.keras.layers import Conv1D, Dropout, Dense, BatchNormalization, Input, Concatenate, MaxPooling2D, Reshape\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.utils import multi_gpu_model\nfrom tensorflow.keras.applications.xception import Xception\nimport os\n## set the id of available gpu e.g. \"0\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\n\n# PD Layer 定义,用于纹理和叶脉的PD\nclass PD_Layer(Layer):\n def __init__(self, **kwargs):\n super(PD_Layer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.centers = self.add_weight(name='pd_center',\n shape=(input_shape[1], input_shape[2], input_shape[3]),\n initializer=keras.initializers.RandomNormal(),\n trainable=True)\n self.sharpness = self.add_weight(name='pd_sharpness',\n shape=(input_shape[1], input_shape[2], input_shape[3]),\n initializer=keras.initializers.Constant(4),\n trainable=True)\n super(PD_Layer, self).build(input_shape)\n\n def call(self, x):\n sharpeness = K.pow(self.sharpness, 2)\n x = x - self.centers\n x = K.pow(x, 2)\n y = K.stack([x[:, :, :, i] * sharpeness[:, :, i] for i in range(x.shape[-1])], axis=-1)\n y = K.sum(y, axis=2)\n y = K.exp(-1 * y)\n return y\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[1], 1)\n\n\n# 用于形状的PD Layer的定义\nclass PD_Layer_Shape(Layer):\n def __init__(self, **kwargs):\n super(PD_Layer_Shape, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.centers = self.add_weight(name='pd_center',\n shape=(input_shape[1], input_shape[2], input_shape[3]),\n initializer=keras.initializers.RandomNormal(),\n trainable=True)\n self.sharpness = self.add_weight(name='pd_sharpness',\n shape=(input_shape[1], input_shape[2], input_shape[3]),\n initializer=keras.initializers.Constant(3),\n trainable=True)\n super(PD_Layer_Shape, self).build(input_shape)\n\n def call(self, x):\n sharpeness = K.pow(self.sharpness, 2)\n x = x - self.centers\n x = K.pow(x, 2)\n y = K.stack([x[:, :, :, i] * sharpeness[:, :, i] for i in range(x.shape[-1])], axis=-1)\n y = K.sum(y, axis=2)\n y = K.exp(-1 * y)\n return y\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[1], 3)\n\n\n# 形状PD特征进一步提取网络部分\ndef stage_1(pd, direction, N, stage_1_kr=0.1, stage_1_drop_out_1=0.5, stage_1_neuron_num_1=512):\n x = PD_Layer_Shape()(pd)\n x = Conv1D(filters=16, kernel_size=1, strides=1, padding='same', activation='relu',\n kernel_regularizer=regularizers.l2(stage_1_kr), name='stage_1_conv_1_' + str(direction))(x)\n\n x = Conv1D(filters=16, kernel_size=1, strides=1, padding='same', activation='relu',\n kernel_regularizer=regularizers.l2(stage_1_kr), name='stage_1_conv_2_' + str(direction))(x)\n\n x = Reshape((N, 16, 1))(x)\n x = MaxPooling2D(pool_size=(1, 16))(x)\n x = Reshape((N,))(x)\n x = Dropout(stage_1_drop_out_1)(x)\n x = Dense(stage_1_neuron_num_1)(x)\n x = BatchNormalization()(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.5)(x)\n model = Model(inputs=pd, outputs=x)\n return model\n\n\n# 纹理PD和叶脉PD特征进一步提取部分的网络定义\ndef stage_2(pd, direction, N, stage_1_kr=0.1, stage_1_drop_out_1=0.5, stage_1_neuron_num_1=512):\n x = PD_Layer()(pd)\n x = Conv1D(filters=16, kernel_size=1, strides=1, padding='same', activation='relu',\n kernel_regularizer=regularizers.l2(stage_1_kr), name='stage_1_conv_1_' + str(direction))(x)\n\n x = Conv1D(filters=16, kernel_size=1, strides=1, padding='same', activation='relu',\n kernel_regularizer=regularizers.l2(stage_1_kr), name='stage_1_conv_2_' + str(direction))(x)\n x = Reshape((N, 16, 1))(x)\n x = MaxPooling2D(pool_size=(1, 16))(x)\n x = Reshape((N,))(x)\n x = Dropout(stage_1_drop_out_1)(x)\n x = Dense(stage_1_neuron_num_1)(x)\n x = BatchNormalization()(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.5)(x)\n model = Model(inputs=pd, outputs=x)\n return model\n\n\n# 将Slayer提取的PD的特征,于Xception 提取到的图像的特征在FC层处拼接在一起\ndef Combined_Model(parallels, config):\n # with tf.device('/cpu:0'):\n inputs = [Input(shape=(config['N'][i], 2, 3)) for i in range(config['shape_views'])]\n inputs.extend(Input(shape=(config['N'][j], 2, 1)) for j in range(config['shape_views'], config['views']))\n input_tensor = Input(shape=(config['image_size'][0], config['image_size'][1], 3))\n inputs.append(input_tensor)\n stage_1_outputs = []\n for i in range(config['shape_views']):\n model = stage_1(inputs[i], int(i), int(config['N'][i]), config['stage1_kr'], config['stage1_dropout'],\n int(config['stage1_neuron_num']))\n stage_1_outputs.append(model.output)\n\n for j in range(30, 34):\n model2 = stage_2(inputs[j], int(j), int(config['N'][j]), config['stage1_kr'], config['stage1_dropout'],\n int(config['stage1_neuron_num']))\n stage_1_outputs.append(model2.output)\n\n model_img = Xception(include_top=False,\n weights='imagenet',\n input_tensor=input_tensor,\n input_shape=(config['image_size'][0], config['image_size'][1], 3),\n pooling='max')\n\n stage_1_outputs.append(model_img.output)\n\n x = Concatenate(axis=1, name='concat')(stage_1_outputs)\n x = Dropout(0.5)(x)\n x = Dense(2048)(x)\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n x = Dense(config['classes'], activation='softmax')(x)\n fused_model = Model(inputs=inputs, outputs=x)\n rmsprop = RMSprop(lr=0.001)\n\n if (parallels > 1):\n parallel_model = multi_gpu_model(fused_model, gpus=parallels)\n parallel_model.compile(optimizer=rmsprop,\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n return parallel_model\n else:\n fused_model.compile(optimizer=rmsprop,\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n return fused_model\n\n\ndef Xception_Model(parallels, config):\n # with tf.device('/cpu:0'):\n input_tensor = Input(shape=(config['image_size'][0], config['image_size'][1], 3))\n base_model = Xception(include_top=False,\n weights='imagenet',\n input_tensor=input_tensor,\n input_shape=(config['image_size'][0], config['image_size'][1], 3),\n pooling='max')\n x = base_model.output\n print(x.shape)\n x = Dense(2048, activation='relu')(x)\n x = Dropout(0.5)(x)\n y = Dense(config['classes'], activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=y)\n if parallels > 1:\n parallel_model = multi_gpu_model(model, gpus=parallels)\n parallel_model.compile(optimizer='sgd',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n return parallel_model\n else:\n model.compile(optimizer='sgd',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n return model"
] | [
[
"tensorflow.keras.backend.sum",
"tensorflow.keras.applications.xception.Xception",
"tensorflow.keras.layers.Input",
"tensorflow.keras.backend.exp",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.backend.pow",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.utils.multi_gpu_model",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.initializers.Constant"
]
] |
justindlwhite/pymc | [
"9c2ba7afc829fef5799068a0215a53b0f69da7c0"
] | [
"pymc/tests/test_transforms.py"
] | [
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport aesara\nimport aesara.tensor as at\nimport numpy as np\nimport pytest\n\nfrom aesara.tensor.var import TensorConstant\n\nimport pymc as pm\nimport pymc.distributions.transforms as tr\n\nfrom pymc.aesaraf import floatX, jacobian\nfrom pymc.distributions import joint_logpt\nfrom pymc.tests.checks import close_to, close_to_logical\nfrom pymc.tests.helpers import SeededTest\nfrom pymc.tests.test_distributions import (\n Circ,\n MultiSimplex,\n R,\n Rminusbig,\n Rplusbig,\n Simplex,\n SortedVector,\n Unit,\n UnitSortedVector,\n Vector,\n)\n\n# some transforms (stick breaking) require additon of small slack in order to be numerically\n# stable. The minimal addable slack for float32 is higher thus we need to be less strict\ntol = 1e-7 if aesara.config.floatX == \"float64\" else 1e-6\n\n\ndef check_transform(transform, domain, constructor=at.dscalar, test=0, rv_var=None):\n x = constructor(\"x\")\n x.tag.test_value = test\n if rv_var is None:\n rv_var = x\n rv_inputs = rv_var.owner.inputs if rv_var.owner else []\n # test forward and forward_val\n # FIXME: What's being tested here? That the transformed graph can compile?\n forward_f = aesara.function([x], transform.forward(x, *rv_inputs))\n # test transform identity\n identity_f = aesara.function(\n [x], transform.backward(transform.forward(x, *rv_inputs), *rv_inputs)\n )\n for val in domain.vals:\n close_to(val, identity_f(val), tol)\n\n\ndef check_vector_transform(transform, domain, rv_var=None):\n return check_transform(transform, domain, at.dvector, test=np.array([0, 0]), rv_var=rv_var)\n\n\ndef get_values(transform, domain=R, constructor=at.dscalar, test=0, rv_var=None):\n x = constructor(\"x\")\n x.tag.test_value = test\n if rv_var is None:\n rv_var = x\n rv_inputs = rv_var.owner.inputs if rv_var.owner else []\n f = aesara.function([x], transform.backward(x, *rv_inputs))\n return np.array([f(val) for val in domain.vals])\n\n\ndef check_jacobian_det(\n transform,\n domain,\n constructor=at.dscalar,\n test=0,\n make_comparable=None,\n elemwise=False,\n rv_var=None,\n):\n y = constructor(\"y\")\n y.tag.test_value = test\n\n if rv_var is None:\n rv_var = y\n\n rv_inputs = rv_var.owner.inputs if rv_var.owner else []\n\n x = transform.backward(y, *rv_inputs)\n if make_comparable:\n x = make_comparable(x)\n\n if not elemwise:\n jac = at.log(at.nlinalg.det(jacobian(x, [y])))\n else:\n jac = at.log(at.abs_(at.diag(jacobian(x, [y]))))\n\n # ljd = log jacobian det\n actual_ljd = aesara.function([y], jac)\n\n computed_ljd = aesara.function(\n [y], at.as_tensor_variable(transform.log_jac_det(y, *rv_inputs)), on_unused_input=\"ignore\"\n )\n\n for yval in domain.vals:\n close_to(actual_ljd(yval), computed_ljd(yval), tol)\n\n\ndef test_simplex():\n check_vector_transform(tr.simplex, Simplex(2))\n check_vector_transform(tr.simplex, Simplex(4))\n\n check_transform(tr.simplex, MultiSimplex(3, 2), constructor=at.dmatrix, test=np.zeros((2, 2)))\n\n\ndef test_simplex_bounds():\n vals = get_values(tr.simplex, Vector(R, 2), at.dvector, np.array([0, 0]))\n\n close_to(vals.sum(axis=1), 1, tol)\n close_to_logical(vals > 0, True, tol)\n close_to_logical(vals < 1, True, tol)\n\n check_jacobian_det(tr.simplex, Vector(R, 2), at.dvector, np.array([0, 0]), lambda x: x[:-1])\n\n\ndef test_simplex_accuracy():\n val = np.array([-30])\n x = at.dvector(\"x\")\n x.tag.test_value = val\n identity_f = aesara.function([x], tr.simplex.forward(x, tr.simplex.backward(x, x)))\n close_to(val, identity_f(val), tol)\n\n\ndef test_sum_to_1():\n check_vector_transform(tr.sum_to_1, Simplex(2))\n check_vector_transform(tr.sum_to_1, Simplex(4))\n\n check_jacobian_det(tr.sum_to_1, Vector(Unit, 2), at.dvector, np.array([0, 0]), lambda x: x[:-1])\n\n\ndef test_log():\n check_transform(tr.log, Rplusbig)\n\n check_jacobian_det(tr.log, Rplusbig, elemwise=True)\n check_jacobian_det(tr.log, Vector(Rplusbig, 2), at.dvector, [0, 0], elemwise=True)\n\n vals = get_values(tr.log)\n close_to_logical(vals > 0, True, tol)\n\n\ndef test_log_exp_m1():\n check_transform(tr.log_exp_m1, Rplusbig)\n\n check_jacobian_det(tr.log_exp_m1, Rplusbig, elemwise=True)\n check_jacobian_det(tr.log_exp_m1, Vector(Rplusbig, 2), at.dvector, [0, 0], elemwise=True)\n\n vals = get_values(tr.log_exp_m1)\n close_to_logical(vals > 0, True, tol)\n\n\ndef test_logodds():\n check_transform(tr.logodds, Unit)\n\n check_jacobian_det(tr.logodds, Unit, elemwise=True)\n check_jacobian_det(tr.logodds, Vector(Unit, 2), at.dvector, [0.5, 0.5], elemwise=True)\n\n vals = get_values(tr.logodds)\n close_to_logical(vals > 0, True, tol)\n close_to_logical(vals < 1, True, tol)\n\n\ndef test_lowerbound():\n trans = tr.Interval(0.0, None)\n check_transform(trans, Rplusbig)\n\n check_jacobian_det(trans, Rplusbig, elemwise=True)\n check_jacobian_det(trans, Vector(Rplusbig, 2), at.dvector, [0, 0], elemwise=True)\n\n vals = get_values(trans)\n close_to_logical(vals > 0, True, tol)\n\n\ndef test_upperbound():\n trans = tr.Interval(None, 0.0)\n check_transform(trans, Rminusbig)\n\n check_jacobian_det(trans, Rminusbig, elemwise=True)\n check_jacobian_det(trans, Vector(Rminusbig, 2), at.dvector, [-1, -1], elemwise=True)\n\n vals = get_values(trans)\n close_to_logical(vals < 0, True, tol)\n\n\ndef test_interval():\n for a, b in [(-4, 5.5), (0.1, 0.7), (-10, 4.3)]:\n domain = Unit * np.float64(b - a) + np.float64(a)\n\n trans = tr.Interval(a, b)\n check_transform(trans, domain)\n\n check_jacobian_det(trans, domain, elemwise=True)\n\n vals = get_values(trans)\n close_to_logical(vals > a, True, tol)\n close_to_logical(vals < b, True, tol)\n\n\[email protected](\n aesara.config.floatX == \"float32\", reason=\"Test is designed for 64bit precision\"\n)\ndef test_interval_near_boundary():\n lb = -1.0\n ub = 1e-7\n x0 = np.nextafter(ub, lb)\n\n with pm.Model() as model:\n pm.Uniform(\"x\", initval=x0, lower=lb, upper=ub)\n\n log_prob = model.point_logps()\n np.testing.assert_allclose(list(log_prob.values()), np.array([-52.68]))\n\n\ndef test_circular():\n trans = tr.circular\n check_transform(trans, Circ)\n\n check_jacobian_det(trans, Circ)\n\n vals = get_values(trans)\n close_to_logical(vals > -np.pi, True, tol)\n close_to_logical(vals < np.pi, True, tol)\n\n assert isinstance(trans.forward(1, None), TensorConstant)\n\n\ndef test_ordered():\n check_vector_transform(tr.ordered, SortedVector(6))\n\n check_jacobian_det(tr.ordered, Vector(R, 2), at.dvector, np.array([0, 0]), elemwise=False)\n\n vals = get_values(tr.ordered, Vector(R, 3), at.dvector, np.zeros(3))\n close_to_logical(np.diff(vals) >= 0, True, tol)\n\n\ndef test_chain_values():\n chain_tranf = tr.Chain([tr.logodds, tr.ordered])\n vals = get_values(chain_tranf, Vector(R, 5), at.dvector, np.zeros(5))\n close_to_logical(np.diff(vals) >= 0, True, tol)\n\n\ndef test_chain_vector_transform():\n chain_tranf = tr.Chain([tr.logodds, tr.ordered])\n check_vector_transform(chain_tranf, UnitSortedVector(3))\n\n\[email protected](reason=\"Fails due to precision issue. Values just close to expected.\")\ndef test_chain_jacob_det():\n chain_tranf = tr.Chain([tr.logodds, tr.ordered])\n check_jacobian_det(chain_tranf, Vector(R, 4), at.dvector, np.zeros(4), elemwise=False)\n\n\nclass TestElementWiseLogp(SeededTest):\n def build_model(self, distfam, params, size, transform, initval=None):\n if initval is not None:\n initval = pm.floatX(initval)\n with pm.Model() as m:\n distfam(\"x\", size=size, transform=transform, initval=initval, **params)\n return m\n\n def check_transform_elementwise_logp(self, model):\n x = model.free_RVs[0]\n x_val_transf = x.tag.value_var\n\n pt = model.compute_initial_point(0)\n test_array_transf = floatX(np.random.randn(*pt[x_val_transf.name].shape))\n transform = x_val_transf.tag.transform\n test_array_untransf = transform.backward(test_array_transf, *x.owner.inputs).eval()\n\n # Create input variable with same dimensionality as untransformed test_array\n x_val_untransf = at.constant(test_array_untransf).type()\n\n jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)\n assert joint_logpt(x, sum=False)[0].ndim == x.ndim == jacob_det.ndim\n\n v1 = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf})\n v2 = joint_logpt(x, x_val_untransf, transformed=False).eval(\n {x_val_untransf: test_array_untransf}\n )\n close_to(v1, v2, tol)\n\n def check_vectortransform_elementwise_logp(self, model):\n x = model.free_RVs[0]\n x_val_transf = x.tag.value_var\n\n pt = model.compute_initial_point(0)\n test_array_transf = floatX(np.random.randn(*pt[x_val_transf.name].shape))\n transform = x_val_transf.tag.transform\n test_array_untransf = transform.backward(test_array_transf, *x.owner.inputs).eval()\n\n # Create input variable with same dimensionality as untransformed test_array\n x_val_untransf = at.constant(test_array_untransf).type()\n\n jacob_det = transform.log_jac_det(test_array_transf, *x.owner.inputs)\n # Original distribution is univariate\n if x.owner.op.ndim_supp == 0:\n assert joint_logpt(x, sum=False)[0].ndim == x.ndim == (jacob_det.ndim + 1)\n # Original distribution is multivariate\n else:\n assert joint_logpt(x, sum=False)[0].ndim == (x.ndim - 1) == jacob_det.ndim\n\n a = joint_logpt(x, x_val_transf, jacobian=False).eval({x_val_transf: test_array_transf})\n b = joint_logpt(x, x_val_untransf, transformed=False).eval(\n {x_val_untransf: test_array_untransf}\n )\n # Hack to get relative tolerance\n close_to(a, b, np.abs(0.5 * (a + b) * tol))\n\n @pytest.mark.parametrize(\n \"sigma,size\",\n [\n (2.5, 2),\n (5.0, (2, 3)),\n (np.ones(3) * 10.0, (4, 3)),\n ],\n )\n def test_half_normal(self, sigma, size):\n model = self.build_model(pm.HalfNormal, {\"sigma\": sigma}, size=size, transform=tr.log)\n self.check_transform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\"lam,size\", [(2.5, 2), (5.0, (2, 3)), (np.ones(3), (4, 3))])\n def test_exponential(self, lam, size):\n model = self.build_model(pm.Exponential, {\"lam\": lam}, size=size, transform=tr.log)\n self.check_transform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"a,b,size\",\n [\n (1.0, 1.0, 2),\n (0.5, 0.5, (2, 3)),\n (np.ones(3), np.ones(3), (4, 3)),\n ],\n )\n def test_beta(self, a, b, size):\n model = self.build_model(pm.Beta, {\"alpha\": a, \"beta\": b}, size=size, transform=tr.logodds)\n self.check_transform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"lower,upper,size\",\n [\n (0.0, 1.0, 2),\n (0.5, 5.5, (2, 3)),\n (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3)),\n ],\n )\n def test_uniform(self, lower, upper, size):\n def transform_params(*inputs):\n _, _, _, lower, upper = inputs\n lower = at.as_tensor_variable(lower) if lower is not None else None\n upper = at.as_tensor_variable(upper) if upper is not None else None\n return lower, upper\n\n interval = tr.Interval(bounds_fn=transform_params)\n model = self.build_model(\n pm.Uniform, {\"lower\": lower, \"upper\": upper}, size=size, transform=interval\n )\n self.check_transform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"lower, c, upper, size\",\n [\n (0.0, 1.0, 2.0, 2),\n (-10, 0, 200, (2, 3)),\n (np.zeros(3), np.ones(3), np.ones(3), (4, 3)),\n ],\n )\n def test_triangular(self, lower, c, upper, size):\n def transform_params(*inputs):\n _, _, _, lower, _, upper = inputs\n lower = at.as_tensor_variable(lower) if lower is not None else None\n upper = at.as_tensor_variable(upper) if upper is not None else None\n return lower, upper\n\n interval = tr.Interval(bounds_fn=transform_params)\n model = self.build_model(\n pm.Triangular, {\"lower\": lower, \"c\": c, \"upper\": upper}, size=size, transform=interval\n )\n self.check_transform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"mu,kappa,size\", [(0.0, 1.0, 2), (-0.5, 5.5, (2, 3)), (np.zeros(3), np.ones(3), (4, 3))]\n )\n def test_vonmises(self, mu, kappa, size):\n model = self.build_model(\n pm.VonMises, {\"mu\": mu, \"kappa\": kappa}, size=size, transform=tr.circular\n )\n self.check_transform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"a,size\", [(np.ones(2), None), (np.ones((2, 3)) * 0.5, None), (np.ones(3), (4,))]\n )\n def test_dirichlet(self, a, size):\n model = self.build_model(pm.Dirichlet, {\"a\": a}, size=size, transform=tr.simplex)\n self.check_vectortransform_elementwise_logp(model)\n\n def test_normal_ordered(self):\n model = self.build_model(\n pm.Normal,\n {\"mu\": 0.0, \"sigma\": 1.0},\n size=3,\n initval=np.asarray([-1.0, 1.0, 4.0]),\n transform=tr.ordered,\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"sigma,size\",\n [\n (2.5, (2,)),\n (np.ones(3), (4, 3)),\n ],\n )\n def test_half_normal_ordered(self, sigma, size):\n initval = np.sort(np.abs(np.random.randn(*size)))\n model = self.build_model(\n pm.HalfNormal,\n {\"sigma\": sigma},\n size=size,\n initval=initval,\n transform=tr.Chain([tr.log, tr.ordered]),\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\"lam,size\", [(2.5, (2,)), (np.ones(3), (4, 3))])\n def test_exponential_ordered(self, lam, size):\n initval = np.sort(np.abs(np.random.randn(*size)))\n model = self.build_model(\n pm.Exponential,\n {\"lam\": lam},\n size=size,\n initval=initval,\n transform=tr.Chain([tr.log, tr.ordered]),\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"a,b,size\",\n [\n (\n 1.0,\n 1.0,\n (2,),\n ),\n (np.ones(3), np.ones(3), (4, 3)),\n ],\n )\n def test_beta_ordered(self, a, b, size):\n initval = np.sort(np.abs(np.random.rand(*size)))\n model = self.build_model(\n pm.Beta,\n {\"alpha\": a, \"beta\": b},\n size=size,\n initval=initval,\n transform=tr.Chain([tr.logodds, tr.ordered]),\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"lower,upper,size\",\n [(0.0, 1.0, (2,)), (pm.floatX(np.zeros(3)), pm.floatX(np.ones(3)), (4, 3))],\n )\n def test_uniform_ordered(self, lower, upper, size):\n def transform_params(*inputs):\n _, _, _, lower, upper = inputs\n lower = at.as_tensor_variable(lower) if lower is not None else None\n upper = at.as_tensor_variable(upper) if upper is not None else None\n return lower, upper\n\n interval = tr.Interval(bounds_fn=transform_params)\n\n initval = np.sort(np.abs(np.random.rand(*size)))\n model = self.build_model(\n pm.Uniform,\n {\"lower\": lower, \"upper\": upper},\n size=size,\n initval=initval,\n transform=tr.Chain([interval, tr.ordered]),\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\"mu,kappa,size\", [(0.0, 1.0, (2,)), (np.zeros(3), np.ones(3), (4, 3))])\n def test_vonmises_ordered(self, mu, kappa, size):\n initval = np.sort(np.abs(np.random.rand(*size)))\n model = self.build_model(\n pm.VonMises,\n {\"mu\": mu, \"kappa\": kappa},\n size=size,\n initval=initval,\n transform=tr.Chain([tr.circular, tr.ordered]),\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"lower,upper,size,transform\",\n [\n (0.0, 1.0, (2,), tr.simplex),\n (0.5, 5.5, (2, 3), tr.simplex),\n (np.zeros(3), np.ones(3), (4, 3), tr.Chain([tr.sum_to_1, tr.logodds])),\n ],\n )\n def test_uniform_other(self, lower, upper, size, transform):\n initval = np.ones(size) / size[-1]\n model = self.build_model(\n pm.Uniform,\n {\"lower\": lower, \"upper\": upper},\n size=size,\n initval=initval,\n transform=transform,\n )\n self.check_vectortransform_elementwise_logp(model)\n\n @pytest.mark.parametrize(\n \"mu,cov,size,shape\",\n [\n (np.zeros(2), np.diag(np.ones(2)), None, (2,)),\n (np.zeros(3), np.diag(np.ones(3)), (4,), (4, 3)),\n ],\n )\n def test_mvnormal_ordered(self, mu, cov, size, shape):\n initval = np.sort(np.random.randn(*shape))\n model = self.build_model(\n pm.MvNormal, {\"mu\": mu, \"cov\": cov}, size=size, initval=initval, transform=tr.ordered\n )\n self.check_vectortransform_elementwise_logp(model)\n\n\ndef test_triangular_transform():\n with pm.Model() as m:\n x = pm.Triangular(\"x\", lower=0, c=1, upper=2)\n\n transform = x.tag.value_var.tag.transform\n assert np.isclose(transform.backward(-np.inf, *x.owner.inputs).eval(), 0)\n assert np.isclose(transform.backward(np.inf, *x.owner.inputs).eval(), 2)\n\n\ndef test_interval_transform_raises():\n with pytest.raises(ValueError, match=\"Lower and upper interval bounds cannot both be None\"):\n tr.Interval(None, None)\n\n with pytest.raises(ValueError, match=\"Interval bounds must be constant values\"):\n tr.Interval(at.constant(5) + 1, None)\n\n assert tr.Interval(at.constant(5), None)\n"
] | [
[
"numpy.nextafter",
"numpy.array",
"numpy.random.rand",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.random.randn",
"numpy.diff",
"numpy.float64",
"numpy.abs"
]
] |
makinacorpus/pandas | [
"3c1185fd03306a65101657c32b58e4f59c3d1376"
] | [
"pandas/tests/test_categorical.py"
] | [
"# pylint: disable=E1101,E1103,W0232\n\nfrom datetime import datetime\nfrom pandas.compat import range, lrange, u\nimport os\nimport pickle\nimport re\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp\n\nimport pandas.core.common as com\nimport pandas.compat as compat\nimport pandas.util.testing as tm\n\nclass TestCategorical(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.factor = Categorical.from_array(['a', 'b', 'b', 'a',\n 'a', 'c', 'c', 'c'])\n\n def test_getitem(self):\n self.assertEqual(self.factor[0], 'a')\n self.assertEqual(self.factor[-1], 'c')\n\n subf = self.factor[[0, 1, 2]]\n tm.assert_almost_equal(subf._codes, [0, 1, 1])\n\n subf = self.factor[np.asarray(self.factor) == 'c']\n tm.assert_almost_equal(subf._codes, [2, 2, 2])\n\n def test_constructor_unsortable(self):\n\n # it works!\n arr = np.array([1, 2, 3, datetime.now()], dtype='O')\n factor = Categorical.from_array(arr)\n self.assertFalse(factor.ordered)\n\n def test_constructor(self):\n\n exp_arr = np.array([\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"])\n c1 = Categorical(exp_arr)\n self.assert_numpy_array_equal(c1.__array__(), exp_arr)\n c2 = Categorical(exp_arr, categories=[\"a\",\"b\",\"c\"])\n self.assert_numpy_array_equal(c2.__array__(), exp_arr)\n c2 = Categorical(exp_arr, categories=[\"c\",\"b\",\"a\"])\n self.assert_numpy_array_equal(c2.__array__(), exp_arr)\n\n # categories must be unique\n def f():\n Categorical([1,2], [1,2,2])\n self.assertRaises(ValueError, f)\n def f():\n Categorical([\"a\",\"b\"], [\"a\",\"b\",\"b\"])\n self.assertRaises(ValueError, f)\n def f():\n Categorical([1,2], [1,2,np.nan, np.nan])\n self.assertRaises(ValueError, f)\n\n\n # Categorical as input\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n c2 = Categorical(c1)\n self.assertTrue(c1.equals(c2))\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\",\"b\",\"c\",\"d\"])\n c2 = Categorical(c1)\n self.assertTrue(c1.equals(c2))\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\",\"c\",\"b\"])\n c2 = Categorical(c1)\n self.assertTrue(c1.equals(c2))\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\",\"c\",\"b\"])\n c2 = Categorical(c1, categories=[\"a\",\"b\",\"c\"])\n self.assert_numpy_array_equal(c1.__array__(), c2.__array__())\n self.assert_numpy_array_equal(c2.categories, np.array([\"a\",\"b\",\"c\"]))\n\n # Series of dtype category\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\",\"b\",\"c\",\"d\"])\n c2 = Categorical(Series(c1))\n self.assertTrue(c1.equals(c2))\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\",\"c\",\"b\"])\n c2 = Categorical(Series(c1))\n self.assertTrue(c1.equals(c2))\n\n # Series\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"])\n c2 = Categorical(Series([\"a\", \"b\", \"c\", \"a\"]))\n self.assertTrue(c1.equals(c2))\n\n c1 = Categorical([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\",\"b\",\"c\",\"d\"])\n c2 = Categorical(Series([\"a\", \"b\", \"c\", \"a\"]), categories=[\"a\",\"b\",\"c\",\"d\"])\n self.assertTrue(c1.equals(c2))\n\n # This should result in integer categories, not float!\n cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])\n self.assertTrue(com.is_integer_dtype(cat.categories))\n\n # https://github.com/pydata/pandas/issues/3678\n cat = pd.Categorical([np.nan,1, 2, 3])\n self.assertTrue(com.is_integer_dtype(cat.categories))\n\n # this should result in floats\n cat = pd.Categorical([np.nan, 1, 2., 3 ])\n self.assertTrue(com.is_float_dtype(cat.categories))\n\n cat = pd.Categorical([np.nan, 1., 2., 3. ])\n self.assertTrue(com.is_float_dtype(cat.categories))\n\n # preserve int as far as possible by converting to object if NaN is in categories\n cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3])\n self.assertTrue(com.is_object_dtype(cat.categories))\n # This doesn't work -> this would probably need some kind of \"remember the original type\"\n # feature to try to cast the array interface result to...\n #vals = np.asarray(cat[cat.notnull()])\n #self.assertTrue(com.is_integer_dtype(vals))\n cat = pd.Categorical([np.nan,\"a\", \"b\", \"c\"], categories=[np.nan,\"a\", \"b\", \"c\"])\n self.assertTrue(com.is_object_dtype(cat.categories))\n # but don't do it for floats\n cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.])\n self.assertTrue(com.is_float_dtype(cat.categories))\n\n\n # corner cases\n cat = pd.Categorical([1])\n self.assertTrue(len(cat.categories) == 1)\n self.assertTrue(cat.categories[0] == 1)\n self.assertTrue(len(cat.codes) == 1)\n self.assertTrue(cat.codes[0] == 0)\n\n cat = pd.Categorical([\"a\"])\n self.assertTrue(len(cat.categories) == 1)\n self.assertTrue(cat.categories[0] == \"a\")\n self.assertTrue(len(cat.codes) == 1)\n self.assertTrue(cat.codes[0] == 0)\n\n # Scalars should be converted to lists\n cat = pd.Categorical(1)\n self.assertTrue(len(cat.categories) == 1)\n self.assertTrue(cat.categories[0] == 1)\n self.assertTrue(len(cat.codes) == 1)\n self.assertTrue(cat.codes[0] == 0)\n\n cat = pd.Categorical([1], categories=1)\n self.assertTrue(len(cat.categories) == 1)\n self.assertTrue(cat.categories[0] == 1)\n self.assertTrue(len(cat.codes) == 1)\n self.assertTrue(cat.codes[0] == 0)\n\n # Catch old style constructor useage: two arrays, codes + categories\n # We can only catch two cases:\n # - when the first is an integer dtype and the second is not\n # - when the resulting codes are all -1/NaN\n with tm.assert_produces_warning(RuntimeWarning):\n c_old = Categorical([0,1,2,0,1,2], categories=[\"a\",\"b\",\"c\"])\n\n with tm.assert_produces_warning(RuntimeWarning):\n c_old = Categorical([0,1,2,0,1,2], categories=[3,4,5])\n\n # the next one are from the old docs, but unfortunately these don't trigger :-(\n with tm.assert_produces_warning(None):\n c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])\n cat = Categorical([1,2], categories=[1,2,3])\n\n def test_constructor_with_generator(self):\n # This was raising an Error in isnull(single_val).any() because isnull returned a scalar\n # for a generator\n from pandas.compat import range as xrange\n\n exp = Categorical([0,1,2])\n cat = Categorical((x for x in [0,1,2]))\n self.assertTrue(cat.equals(exp))\n cat = Categorical(xrange(3))\n self.assertTrue(cat.equals(exp))\n\n # This uses xrange internally\n from pandas.core.index import MultiIndex\n MultiIndex.from_product([range(5), ['a', 'b', 'c']])\n\n # check that categories accept generators and sequences\n cat = pd.Categorical([0,1,2], categories=(x for x in [0,1,2]))\n self.assertTrue(cat.equals(exp))\n cat = pd.Categorical([0,1,2], categories=xrange(3))\n self.assertTrue(cat.equals(exp))\n\n\n def test_from_codes(self):\n\n # too few categories\n def f():\n Categorical.from_codes([1,2], [1,2])\n self.assertRaises(ValueError, f)\n\n # no int codes\n def f():\n Categorical.from_codes([\"a\"], [1,2])\n self.assertRaises(ValueError, f)\n\n # no unique categories\n def f():\n Categorical.from_codes([0,1,2], [\"a\",\"a\",\"b\"])\n self.assertRaises(ValueError, f)\n\n # too negative\n def f():\n Categorical.from_codes([-2,1,2], [\"a\",\"b\",\"c\"])\n self.assertRaises(ValueError, f)\n\n\n exp = Categorical([\"a\",\"b\",\"c\"], ordered=False)\n res = Categorical.from_codes([0,1,2], [\"a\",\"b\",\"c\"])\n self.assertTrue(exp.equals(res))\n\n # Not available in earlier numpy versions\n if hasattr(np.random, \"choice\"):\n codes = np.random.choice([0,1], 5, p=[0.9,0.1])\n pd.Categorical.from_codes(codes, categories=[\"train\", \"test\"])\n\n def test_comparisons(self):\n result = self.factor[self.factor == 'a']\n expected = self.factor[np.asarray(self.factor) == 'a']\n self.assertTrue(result.equals(expected))\n\n result = self.factor[self.factor != 'a']\n expected = self.factor[np.asarray(self.factor) != 'a']\n self.assertTrue(result.equals(expected))\n\n result = self.factor[self.factor < 'c']\n expected = self.factor[np.asarray(self.factor) < 'c']\n self.assertTrue(result.equals(expected))\n\n result = self.factor[self.factor > 'a']\n expected = self.factor[np.asarray(self.factor) > 'a']\n self.assertTrue(result.equals(expected))\n\n result = self.factor[self.factor >= 'b']\n expected = self.factor[np.asarray(self.factor) >= 'b']\n self.assertTrue(result.equals(expected))\n\n result = self.factor[self.factor <= 'b']\n expected = self.factor[np.asarray(self.factor) <= 'b']\n self.assertTrue(result.equals(expected))\n\n n = len(self.factor)\n\n other = self.factor[np.random.permutation(n)]\n result = self.factor == other\n expected = np.asarray(self.factor) == np.asarray(other)\n self.assert_numpy_array_equal(result, expected)\n\n result = self.factor == 'd'\n expected = np.repeat(False, len(self.factor))\n self.assert_numpy_array_equal(result, expected)\n\n # comparisons with categoricals\n cat_rev = pd.Categorical([\"a\",\"b\",\"c\"], categories=[\"c\",\"b\",\"a\"])\n cat_rev_base = pd.Categorical([\"b\",\"b\",\"b\"], categories=[\"c\",\"b\",\"a\"])\n cat = pd.Categorical([\"a\",\"b\",\"c\"])\n cat_base = pd.Categorical([\"b\",\"b\",\"b\"], categories=cat.categories)\n\n # comparisons need to take categories ordering into account\n res_rev = cat_rev > cat_rev_base\n exp_rev = np.array([True, False, False])\n self.assert_numpy_array_equal(res_rev, exp_rev)\n\n res_rev = cat_rev < cat_rev_base\n exp_rev = np.array([False, False, True])\n self.assert_numpy_array_equal(res_rev, exp_rev)\n\n res = cat > cat_base\n exp = np.array([False, False, True])\n self.assert_numpy_array_equal(res, exp)\n\n # Only categories with same categories can be compared\n def f():\n cat > cat_rev\n self.assertRaises(TypeError, f)\n\n cat_rev_base2 = pd.Categorical([\"b\",\"b\",\"b\"], categories=[\"c\",\"b\",\"a\",\"d\"])\n def f():\n cat_rev > cat_rev_base2\n self.assertRaises(TypeError, f)\n\n # Only categories with same ordering information can be compared\n cat_unorderd = cat.copy()\n cat_unorderd.ordered = False\n self.assertFalse((cat > cat).any())\n def f():\n cat > cat_unorderd\n self.assertRaises(TypeError, f)\n\n # comparison (in both directions) with Series will raise\n s = Series([\"b\",\"b\",\"b\"])\n self.assertRaises(TypeError, lambda: cat > s)\n self.assertRaises(TypeError, lambda: cat_rev > s)\n self.assertRaises(TypeError, lambda: s < cat)\n self.assertRaises(TypeError, lambda: s < cat_rev)\n\n # comparison with numpy.array will raise in both direction, but only on newer\n # numpy versions\n a = np.array([\"b\",\"b\",\"b\"])\n self.assertRaises(TypeError, lambda: cat > a)\n self.assertRaises(TypeError, lambda: cat_rev > a)\n\n # The following work via '__array_priority__ = 1000'\n # works only on numpy >= 1.7.1 and not on PY3.2\n if LooseVersion(np.__version__) > \"1.7.1\" and not compat.PY3_2:\n self.assertRaises(TypeError, lambda: a < cat)\n self.assertRaises(TypeError, lambda: a < cat_rev)\n\n def test_na_flags_int_categories(self):\n # #1457\n\n categories = lrange(10)\n labels = np.random.randint(0, 10, 20)\n labels[::5] = -1\n\n cat = Categorical(labels, categories, fastpath=True)\n repr(cat)\n\n self.assert_numpy_array_equal(com.isnull(cat), labels == -1)\n\n def test_categories_none(self):\n factor = Categorical(['a', 'b', 'b', 'a',\n 'a', 'c', 'c', 'c'])\n self.assertTrue(factor.equals(self.factor))\n\n def test_describe(self):\n # string type\n desc = self.factor.describe()\n expected = DataFrame.from_dict(dict(counts=[3, 2, 3],\n freqs=[3/8., 2/8., 3/8.],\n categories=['a', 'b', 'c'])\n ).set_index('categories')\n tm.assert_frame_equal(desc, expected)\n\n # check unused categories\n cat = self.factor.copy()\n cat.set_categories([\"a\",\"b\",\"c\",\"d\"], inplace=True)\n desc = cat.describe()\n expected = DataFrame.from_dict(dict(counts=[3, 2, 3, np.nan],\n freqs=[3/8., 2/8., 3/8., np.nan],\n categories=['a', 'b', 'c', 'd'])\n ).set_index('categories')\n tm.assert_frame_equal(desc, expected)\n\n # check an integer one\n desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()\n expected = DataFrame.from_dict(dict(counts=[5, 3, 3],\n freqs=[5/11., 3/11., 3/11.],\n categories=[1,2,3]\n )\n ).set_index('categories')\n tm.assert_frame_equal(desc, expected)\n\n # https://github.com/pydata/pandas/issues/3678\n # describe should work with NaN\n cat = pd.Categorical([np.nan,1, 2, 2])\n desc = cat.describe()\n expected = DataFrame.from_dict(dict(counts=[1, 2, 1],\n freqs=[1/4., 2/4., 1/4.],\n categories=[1,2,np.nan]\n )\n ).set_index('categories')\n tm.assert_frame_equal(desc, expected)\n\n # having NaN as category and as \"not available\" should also print two NaNs in describe!\n cat = pd.Categorical([np.nan,1, 2, 2])\n cat.set_categories([1,2,np.nan], rename=True, inplace=True)\n desc = cat.describe()\n expected = DataFrame.from_dict(dict(counts=[1, 2, np.nan, 1],\n freqs=[1/4., 2/4., np.nan, 1/4.],\n categories=[1,2,np.nan,np.nan]\n )\n ).set_index('categories')\n tm.assert_frame_equal(desc, expected)\n\n # empty categories show up as NA\n cat = Categorical([\"a\",\"b\",\"b\",\"b\"], categories=['a','b','c'], ordered=True)\n result = cat.describe()\n\n expected = DataFrame([[1,0.25],[3,0.75],[np.nan,np.nan]],\n columns=['counts','freqs'],\n index=Index(['a','b','c'],name='categories'))\n tm.assert_frame_equal(result,expected)\n\n # NA as a category\n cat = pd.Categorical([\"a\",\"c\",\"c\",np.nan], categories=[\"b\",\"a\",\"c\",np.nan] )\n result = cat.describe()\n\n expected = DataFrame([[np.nan, np.nan],[1,0.25],[2,0.5], [1,0.25]],\n columns=['counts','freqs'],\n index=Index(['b','a','c',np.nan],name='categories'))\n tm.assert_frame_equal(result,expected)\n\n\n def test_print(self):\n expected = [\"[a, b, b, a, a, c, c, c]\",\n \"Categories (3, object): [a < b < c]\"]\n expected = \"\\n\".join(expected)\n actual = repr(self.factor)\n self.assertEqual(actual, expected)\n\n def test_big_print(self):\n factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True)\n expected = [\"[a, b, c, a, b, ..., b, c, a, b, c]\",\n \"Name: cat, Length: 600\",\n \"Categories (3, object): [a, b, c]\"]\n expected = \"\\n\".join(expected)\n\n actual = repr(factor)\n\n self.assertEqual(actual, expected)\n\n def test_empty_print(self):\n factor = Categorical([], [\"a\",\"b\",\"c\"], name=\"cat\")\n expected = (\"[], Name: cat, Categories (3, object): [a < b < c]\")\n # hack because array_repr changed in numpy > 1.6.x\n actual = repr(factor)\n\n self.assertEqual(actual, expected)\n\n factor = Categorical([], [\"a\",\"b\",\"c\"])\n expected = (\"[], Categories (3, object): [a < b < c]\")\n actual = repr(factor)\n\n self.assertEqual(expected, actual)\n\n factor = Categorical([], [])\n expected = (\"[], Categories (0, object): []\")\n self.assertEqual(expected, repr(factor))\n\n def test_periodindex(self):\n idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',\n '2014-03', '2014-03'], freq='M')\n\n cat1 = Categorical.from_array(idx1)\n str(cat1)\n exp_arr = np.array([0, 0, 1, 1, 2, 2],dtype='int64')\n exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')\n self.assert_numpy_array_equal(cat1._codes, exp_arr)\n self.assertTrue(cat1.categories.equals(exp_idx))\n\n idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',\n '2014-03', '2014-01'], freq='M')\n cat2 = Categorical.from_array(idx2)\n str(cat2)\n exp_arr = np.array([2, 2, 1, 0, 2, 0],dtype='int64')\n exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')\n self.assert_numpy_array_equal(cat2._codes, exp_arr)\n self.assertTrue(cat2.categories.equals(exp_idx2))\n\n idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',\n '2013-08', '2013-07', '2013-05'], freq='M')\n cat3 = Categorical.from_array(idx3)\n exp_arr = np.array([6, 5, 4, 3, 2, 1, 0],dtype='int64')\n exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',\n '2013-10', '2013-11', '2013-12'], freq='M')\n self.assert_numpy_array_equal(cat3._codes, exp_arr)\n self.assertTrue(cat3.categories.equals(exp_idx))\n\n def test_categories_assigments(self):\n s = pd.Categorical([\"a\",\"b\",\"c\",\"a\"])\n exp = np.array([1,2,3,1])\n s.categories = [1,2,3]\n self.assert_numpy_array_equal(s.__array__(), exp)\n self.assert_numpy_array_equal(s.categories, np.array([1,2,3]))\n # lengthen\n def f():\n s.categories = [1,2,3,4]\n self.assertRaises(ValueError, f)\n # shorten\n def f():\n s.categories = [1,2]\n self.assertRaises(ValueError, f)\n\n def test_set_categories(self):\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True)\n exp_categories = np.array([\"c\",\"b\",\"a\"])\n exp_values = np.array([\"a\",\"b\",\"c\",\"a\"])\n\n res = cat.set_categories([\"c\",\"b\",\"a\"], inplace=True)\n self.assert_numpy_array_equal(cat.categories, exp_categories)\n self.assert_numpy_array_equal(cat.__array__(), exp_values)\n self.assertIsNone(res)\n\n res = cat.set_categories([\"a\",\"b\",\"c\"])\n # cat must be the same as before\n self.assert_numpy_array_equal(cat.categories, exp_categories)\n self.assert_numpy_array_equal(cat.__array__(), exp_values)\n # only res is changed\n exp_categories_back = np.array([\"a\",\"b\",\"c\"])\n self.assert_numpy_array_equal(res.categories, exp_categories_back)\n self.assert_numpy_array_equal(res.__array__(), exp_values)\n\n # not all \"old\" included in \"new\" -> all not included ones are now np.nan\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True)\n res = cat.set_categories([\"a\"])\n self.assert_numpy_array_equal(res.codes, np.array([0,-1,-1,0]))\n\n # still not all \"old\" in \"new\"\n res = cat.set_categories([\"a\",\"b\",\"d\"])\n self.assert_numpy_array_equal(res.codes, np.array([0,1,-1,0]))\n self.assert_numpy_array_equal(res.categories, np.array([\"a\",\"b\",\"d\"]))\n\n # all \"old\" included in \"new\"\n cat = cat.set_categories([\"a\",\"b\",\"c\",\"d\"])\n exp_categories = np.array([\"a\",\"b\",\"c\",\"d\"])\n self.assert_numpy_array_equal(cat.categories, exp_categories)\n\n # internals...\n c = Categorical([1,2,3,4,1], categories=[1,2,3,4])\n self.assert_numpy_array_equal(c._codes, np.array([0,1,2,3,0]))\n self.assert_numpy_array_equal(c.categories , np.array([1,2,3,4] ))\n self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1] ))\n c = c.set_categories([4,3,2,1]) # all \"pointers\" to '4' must be changed from 3 to 0,...\n self.assert_numpy_array_equal(c._codes, np.array([3,2,1,0,3])) # positions are changed\n self.assert_numpy_array_equal(c.categories, np.array([4,3,2,1])) # categories are now in new order\n self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1])) # output is the same\n self.assertTrue(c.min(), 4)\n self.assertTrue(c.max(), 1)\n\n def test_rename_categories(self):\n cat = pd.Categorical([\"a\",\"b\",\"c\",\"a\"])\n\n # inplace=False: the old one must not be changed\n res = cat.rename_categories([1,2,3])\n self.assert_numpy_array_equal(res.__array__(), np.array([1,2,3,1]))\n self.assert_numpy_array_equal(res.categories, np.array([1,2,3]))\n self.assert_numpy_array_equal(cat.__array__(), np.array([\"a\",\"b\",\"c\",\"a\"]))\n self.assert_numpy_array_equal(cat.categories, np.array([\"a\",\"b\",\"c\"]))\n res = cat.rename_categories([1,2,3], inplace=True)\n\n # and now inplace\n self.assertIsNone(res)\n self.assert_numpy_array_equal(cat.__array__(), np.array([1,2,3,1]))\n self.assert_numpy_array_equal(cat.categories, np.array([1,2,3]))\n\n # lengthen\n def f():\n cat.rename_categories([1,2,3,4])\n self.assertRaises(ValueError, f)\n # shorten\n def f():\n cat.rename_categories([1,2])\n self.assertRaises(ValueError, f)\n\n def test_reorder_categories(self):\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True)\n old = cat.copy()\n new = Categorical([\"a\",\"b\",\"c\",\"a\"], categories=[\"c\",\"b\",\"a\"], ordered=True)\n\n # first inplace == False\n res = cat.reorder_categories([\"c\",\"b\",\"a\"])\n # cat must be the same as before\n self.assert_categorical_equal(cat, old)\n # only res is changed\n self.assert_categorical_equal(res, new)\n\n # inplace == True\n res = cat.reorder_categories([\"c\",\"b\",\"a\"], inplace=True)\n self.assertIsNone(res)\n self.assert_categorical_equal(cat, new)\n\n # not all \"old\" included in \"new\"\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True)\n def f():\n cat.reorder_categories([\"a\"])\n self.assertRaises(ValueError, f)\n\n # still not all \"old\" in \"new\"\n def f():\n cat.reorder_categories([\"a\",\"b\",\"d\"])\n self.assertRaises(ValueError, f)\n\n # all \"old\" included in \"new\", but too long\n def f():\n cat.reorder_categories([\"a\",\"b\",\"c\",\"d\"])\n self.assertRaises(ValueError, f)\n\n def test_add_categories(self):\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True)\n old = cat.copy()\n new = Categorical([\"a\",\"b\",\"c\",\"a\"], categories=[\"a\",\"b\",\"c\",\"d\"], ordered=True)\n\n # first inplace == False\n res = cat.add_categories(\"d\")\n self.assert_categorical_equal(cat, old)\n self.assert_categorical_equal(res, new)\n\n res = cat.add_categories([\"d\"])\n self.assert_categorical_equal(cat, old)\n self.assert_categorical_equal(res, new)\n\n # inplace == True\n res = cat.add_categories(\"d\", inplace=True)\n self.assert_categorical_equal(cat, new)\n self.assertIsNone(res)\n\n # new is in old categories\n def f():\n cat.add_categories([\"d\"])\n self.assertRaises(ValueError, f)\n\n def test_remove_categories(self):\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True)\n old = cat.copy()\n new = Categorical([\"a\",\"b\",np.nan,\"a\"], categories=[\"a\",\"b\"], ordered=True)\n\n # first inplace == False\n res = cat.remove_categories(\"c\")\n self.assert_categorical_equal(cat, old)\n self.assert_categorical_equal(res, new)\n\n res = cat.remove_categories([\"c\"])\n self.assert_categorical_equal(cat, old)\n self.assert_categorical_equal(res, new)\n\n # inplace == True\n res = cat.remove_categories(\"c\", inplace=True)\n self.assert_categorical_equal(cat, new)\n self.assertIsNone(res)\n\n # removal is not in categories\n def f():\n cat.remove_categories([\"c\"])\n self.assertRaises(ValueError, f)\n\n def test_remove_unused_categories(self):\n c = Categorical([\"a\",\"b\",\"c\",\"d\",\"a\"], categories=[\"a\",\"b\",\"c\",\"d\",\"e\"])\n exp_categories_all = np.array([\"a\",\"b\",\"c\",\"d\",\"e\"])\n exp_categories_dropped = np.array([\"a\",\"b\",\"c\",\"d\"])\n\n self.assert_numpy_array_equal(c.categories, exp_categories_all)\n\n res = c.remove_unused_categories()\n self.assert_numpy_array_equal(res.categories, exp_categories_dropped)\n self.assert_numpy_array_equal(c.categories, exp_categories_all)\n\n res = c.remove_unused_categories(inplace=True)\n self.assert_numpy_array_equal(c.categories, exp_categories_dropped)\n self.assertIsNone(res)\n\n\n def test_nan_handling(self):\n\n # Nans are represented as -1 in codes\n c = Categorical([\"a\",\"b\",np.nan,\"a\"])\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\"]))\n self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))\n c[1] = np.nan\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\"]))\n self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0]))\n\n # If categories have nan included, the code should point to that instead\n c = Categorical([\"a\",\"b\",np.nan,\"a\"], categories=[\"a\",\"b\",np.nan])\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\",np.nan],dtype=np.object_))\n self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))\n c[1] = np.nan\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\",np.nan],dtype=np.object_))\n self.assert_numpy_array_equal(c._codes , np.array([0,2,2,0]))\n\n # Changing categories should also make the replaced category np.nan\n c = Categorical([\"a\",\"b\",\"c\",\"a\"])\n c.categories = [\"a\",\"b\",np.nan]\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\",np.nan],dtype=np.object_))\n self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))\n\n # Adding nan to categories should make assigned nan point to the category!\n c = Categorical([\"a\",\"b\",np.nan,\"a\"])\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\"]))\n self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))\n c.set_categories([\"a\",\"b\",np.nan], rename=True, inplace=True)\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\",np.nan],dtype=np.object_))\n self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))\n c[1] = np.nan\n self.assert_numpy_array_equal(c.categories , np.array([\"a\",\"b\",np.nan],dtype=np.object_))\n self.assert_numpy_array_equal(c._codes , np.array([0,2,-1,0]))\n\n\n def test_isnull(self):\n exp = np.array([False, False, True])\n c = Categorical([\"a\",\"b\",np.nan])\n res = c.isnull()\n self.assert_numpy_array_equal(res, exp)\n\n c = Categorical([\"a\",\"b\",np.nan], categories=[\"a\",\"b\",np.nan])\n res = c.isnull()\n self.assert_numpy_array_equal(res, exp)\n\n # test both nan in categories and as -1\n exp = np.array([True, False, True])\n c = Categorical([\"a\",\"b\",np.nan])\n c.set_categories([\"a\",\"b\",np.nan], rename=True, inplace=True)\n c[0] = np.nan\n res = c.isnull()\n self.assert_numpy_array_equal(res, exp)\n\n def test_codes_immutable(self):\n\n # Codes should be read only\n c = Categorical([\"a\",\"b\",\"c\",\"a\", np.nan])\n exp = np.array([0,1,2,0,-1],dtype='int8')\n self.assert_numpy_array_equal(c.codes, exp)\n\n # Assignments to codes should raise\n def f():\n c.codes = np.array([0,1,2,0,1],dtype='int8')\n self.assertRaises(ValueError, f)\n\n # changes in the codes array should raise\n # np 1.6.1 raises RuntimeError rather than ValueError\n codes= c.codes\n def f():\n codes[4] = 1\n self.assertRaises(ValueError, f)\n\n # But even after getting the codes, the original array should still be writeable!\n c[4] = \"a\"\n exp = np.array([0,1,2,0,0],dtype='int8')\n self.assert_numpy_array_equal(c.codes, exp)\n c._codes[4] = 2\n exp = np.array([0,1,2,0, 2],dtype='int8')\n self.assert_numpy_array_equal(c.codes, exp)\n\n\n def test_min_max(self):\n\n # unordered cats have no min/max\n cat = Categorical([\"a\",\"b\",\"c\",\"d\"], ordered=False)\n self.assertRaises(TypeError, lambda : cat.min())\n self.assertRaises(TypeError, lambda : cat.max())\n cat = Categorical([\"a\",\"b\",\"c\",\"d\"], ordered=True)\n _min = cat.min()\n _max = cat.max()\n self.assertEqual(_min, \"a\")\n self.assertEqual(_max, \"d\")\n cat = Categorical([\"a\",\"b\",\"c\",\"d\"], categories=['d','c','b','a'], ordered=True)\n _min = cat.min()\n _max = cat.max()\n self.assertEqual(_min, \"d\")\n self.assertEqual(_max, \"a\")\n cat = Categorical([np.nan,\"b\",\"c\",np.nan], categories=['d','c','b','a'], ordered=True)\n _min = cat.min()\n _max = cat.max()\n self.assertTrue(np.isnan(_min))\n self.assertEqual(_max, \"b\")\n\n _min = cat.min(numeric_only=True)\n self.assertEqual(_min, \"c\")\n _max = cat.max(numeric_only=True)\n self.assertEqual(_max, \"b\")\n\n cat = Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True)\n _min = cat.min()\n _max = cat.max()\n self.assertTrue(np.isnan(_min))\n self.assertEqual(_max, 1)\n\n _min = cat.min(numeric_only=True)\n self.assertEqual(_min, 2)\n _max = cat.max(numeric_only=True)\n self.assertEqual(_max, 1)\n\n def test_unique(self):\n cat = Categorical([\"a\",\"b\",\"c\",\"d\"])\n exp = np.asarray([\"a\",\"b\",\"c\",\"d\"])\n res = cat.unique()\n self.assert_numpy_array_equal(res, exp)\n self.assertEqual(type(res), type(exp))\n\n def test_mode(self):\n s = Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True)\n res = s.mode()\n exp = Categorical([5], categories=[5,4,3,2,1], ordered=True)\n self.assertTrue(res.equals(exp))\n s = Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True)\n res = s.mode()\n exp = Categorical([5,1], categories=[5,4,3,2,1], ordered=True)\n self.assertTrue(res.equals(exp))\n s = Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True)\n res = s.mode()\n exp = Categorical([], categories=[5,4,3,2,1], ordered=True)\n self.assertTrue(res.equals(exp))\n # NaN should not become the mode!\n s = Categorical([np.nan,np.nan,np.nan,4,5], categories=[5,4,3,2,1], ordered=True)\n res = s.mode()\n exp = Categorical([], categories=[5,4,3,2,1], ordered=True)\n self.assertTrue(res.equals(exp))\n s = Categorical([np.nan,np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)\n res = s.mode()\n exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)\n self.assertTrue(res.equals(exp))\n s = Categorical([np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)\n res = s.mode()\n exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)\n self.assertTrue(res.equals(exp))\n\n\n def test_sort(self):\n\n # unordered cats are not sortable\n cat = Categorical([\"a\",\"b\",\"b\",\"a\"], ordered=False)\n self.assertRaises(TypeError, lambda : cat.sort())\n cat = Categorical([\"a\",\"c\",\"b\",\"d\"], ordered=True)\n\n # order\n res = cat.order()\n exp = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp)\n\n cat = Categorical([\"a\",\"c\",\"b\",\"d\"], categories=[\"a\",\"b\",\"c\",\"d\"], ordered=True)\n res = cat.order()\n exp = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp)\n\n res = cat.order(ascending=False)\n exp = np.array([\"d\",\"c\",\"b\",\"a\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp)\n\n # sort (inplace order)\n cat1 = cat.copy()\n cat1.sort()\n exp = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(cat1.__array__(), exp)\n\n def test_slicing_directly(self):\n cat = Categorical([\"a\",\"b\",\"c\",\"d\",\"a\",\"b\",\"c\"])\n sliced = cat[3]\n tm.assert_equal(sliced, \"d\")\n sliced = cat[3:5]\n expected = Categorical([\"d\",\"a\"], categories=['a', 'b', 'c', 'd'])\n self.assert_numpy_array_equal(sliced._codes, expected._codes)\n tm.assert_index_equal(sliced.categories, expected.categories)\n\n def test_set_item_nan(self):\n cat = pd.Categorical([1,2,3])\n exp = pd.Categorical([1,np.nan,3], categories=[1,2,3])\n cat[1] = np.nan\n self.assertTrue(cat.equals(exp))\n\n # if nan in categories, the proper code should be set!\n cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])\n cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)\n cat[1] = np.nan\n exp = np.array([0,3,2,-1])\n self.assert_numpy_array_equal(cat.codes, exp)\n\n cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])\n cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)\n cat[1:3] = np.nan\n exp = np.array([0,3,3,-1])\n self.assert_numpy_array_equal(cat.codes, exp)\n\n cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])\n cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)\n cat[1:3] = [np.nan, 1]\n exp = np.array([0,3,0,-1])\n self.assert_numpy_array_equal(cat.codes, exp)\n\n cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])\n cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)\n cat[1:3] = [np.nan, np.nan]\n exp = np.array([0,3,3,-1])\n self.assert_numpy_array_equal(cat.codes, exp)\n\n cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3])\n cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)\n cat[pd.isnull(cat)] = np.nan\n exp = np.array([0,1,3,2])\n self.assert_numpy_array_equal(cat.codes, exp)\n\n def test_nbytes(self):\n cat = pd.Categorical([1,2,3])\n exp = cat._codes.nbytes + cat._categories.values.nbytes\n self.assertEqual(cat.nbytes, exp)\n\n def test_searchsorted(self):\n\n # See https://github.com/pydata/pandas/issues/8420\n # TODO: implement me...\n cat = pd.Categorical([1,2,3])\n def f():\n cat.searchsorted(3)\n self.assertRaises(NotImplementedError, f)\n\n def test_deprecated_labels(self):\n # TODO: labels is deprecated and should be removed in 0.18 or 2017, whatever is earlier\n cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])\n exp = cat.codes\n with tm.assert_produces_warning(FutureWarning):\n res = cat.labels\n self.assert_numpy_array_equal(res, exp)\n self.assertFalse(LooseVersion(pd.__version__) >= '0.18')\n\n def test_deprecated_levels(self):\n # TODO: levels is deprecated and should be removed in 0.18 or 2017, whatever is earlier\n cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])\n exp = cat.categories\n with tm.assert_produces_warning(FutureWarning):\n res = cat.levels\n self.assert_numpy_array_equal(res, exp)\n with tm.assert_produces_warning(FutureWarning):\n res = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])\n self.assert_numpy_array_equal(res.categories, exp)\n\n self.assertFalse(LooseVersion(pd.__version__) >= '0.18')\n\n\nclass TestCategoricalAsBlock(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.factor = Categorical.from_array(['a', 'b', 'b', 'a',\n 'a', 'c', 'c', 'c'])\n\n df = DataFrame({'value': np.random.randint(0, 10000, 100)})\n labels = [ \"{0} - {1}\".format(i, i + 499) for i in range(0, 10000, 500) ]\n\n df = df.sort(columns=['value'], ascending=True)\n df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)\n self.cat = df\n\n def test_dtypes(self):\n\n dtype = com.CategoricalDtype()\n hash(dtype)\n self.assertTrue(com.is_categorical_dtype(dtype))\n\n s = Series(self.factor,name='A')\n\n # dtypes\n self.assertTrue(com.is_categorical_dtype(s.dtype))\n self.assertTrue(com.is_categorical_dtype(s))\n self.assertFalse(com.is_categorical_dtype(np.dtype('float64')))\n\n # np.dtype doesn't know about our new dtype\n def f():\n np.dtype(dtype)\n self.assertRaises(TypeError, f)\n\n self.assertFalse(dtype == np.str_)\n self.assertFalse(np.str_ == dtype)\n\n # GH8143\n index = ['cat','obj','num']\n cat = pd.Categorical(['a', 'b', 'c'])\n obj = pd.Series(['a', 'b', 'c'])\n num = pd.Series([1, 2, 3])\n df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)\n\n result = df.dtypes == 'object'\n expected = Series([False,True,False],index=index)\n tm.assert_series_equal(result, expected)\n\n result = df.dtypes == 'int64'\n expected = Series([False,False,True],index=index)\n tm.assert_series_equal(result, expected)\n\n result = df.dtypes == 'category'\n expected = Series([True,False,False],index=index)\n tm.assert_series_equal(result, expected)\n\n def test_codes_dtypes(self):\n\n # GH 8453\n result = Categorical(['foo','bar','baz'])\n self.assertTrue(result.codes.dtype == 'int8')\n\n result = Categorical(['foo%05d' % i for i in range(400) ])\n self.assertTrue(result.codes.dtype == 'int16')\n\n result = Categorical(['foo%05d' % i for i in range(40000) ])\n self.assertTrue(result.codes.dtype == 'int32')\n\n # adding cats\n result = Categorical(['foo','bar','baz'])\n self.assertTrue(result.codes.dtype == 'int8')\n result = result.add_categories(['foo%05d' % i for i in range(400) ])\n self.assertTrue(result.codes.dtype == 'int16')\n\n # removing cats\n result = result.remove_categories(['foo%05d' % i for i in range(300) ])\n self.assertTrue(result.codes.dtype == 'int8')\n\n def test_basic(self):\n\n # test basic creation / coercion of categoricals\n s = Series(self.factor,name='A')\n self.assertEqual(s.dtype,'category')\n self.assertEqual(len(s),len(self.factor))\n str(s.values)\n str(s)\n\n # in a frame\n df = DataFrame({'A' : self.factor })\n result = df['A']\n tm.assert_series_equal(result,s)\n result = df.iloc[:,0]\n tm.assert_series_equal(result,s)\n self.assertEqual(len(df),len(self.factor))\n str(df.values)\n str(df)\n\n df = DataFrame({'A' : s })\n result = df['A']\n tm.assert_series_equal(result,s)\n self.assertEqual(len(df),len(self.factor))\n str(df.values)\n str(df)\n\n # multiples\n df = DataFrame({'A' : s, 'B' : s, 'C' : 1})\n result1 = df['A']\n result2 = df['B']\n tm.assert_series_equal(result1,s)\n tm.assert_series_equal(result2,s)\n self.assertEqual(len(df),len(self.factor))\n str(df.values)\n str(df)\n\n def test_creation_astype(self):\n l = [\"a\",\"b\",\"c\",\"a\"]\n s = pd.Series(l)\n exp = pd.Series(Categorical(l))\n res = s.astype('category')\n tm.assert_series_equal(res, exp)\n\n l = [1,2,3,1]\n s = pd.Series(l)\n exp = pd.Series(Categorical(l))\n res = s.astype('category')\n tm.assert_series_equal(res, exp)\n\n df = pd.DataFrame({\"cats\":[1,2,3,4,5,6], \"vals\":[1,2,3,4,5,6]})\n cats = Categorical([1,2,3,4,5,6])\n exp_df = pd.DataFrame({\"cats\":cats, \"vals\":[1,2,3,4,5,6]})\n df[\"cats\"] = df[\"cats\"].astype(\"category\")\n tm.assert_frame_equal(exp_df, df)\n\n df = pd.DataFrame({\"cats\":['a', 'b', 'b', 'a', 'a', 'd'], \"vals\":[1,2,3,4,5,6]})\n cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])\n exp_df = pd.DataFrame({\"cats\":cats, \"vals\":[1,2,3,4,5,6]})\n df[\"cats\"] = df[\"cats\"].astype(\"category\")\n tm.assert_frame_equal(exp_df, df)\n\n def test_construction_series(self):\n\n l = [1,2,3,1]\n exp = Series(l).astype('category')\n res = Series(l,dtype='category')\n tm.assert_series_equal(res, exp)\n\n l = [\"a\",\"b\",\"c\",\"a\"]\n exp = Series(l).astype('category')\n res = Series(l,dtype='category')\n tm.assert_series_equal(res, exp)\n\n # insert into frame with different index\n # GH 8076\n index = pd.date_range('20000101', periods=3)\n expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))\n expected.index = index\n\n expected = DataFrame({'x': expected})\n df = DataFrame({'x': Series(['a', 'b', 'c'],dtype='category')}, index=index)\n tm.assert_frame_equal(df, expected)\n\n def test_reindex(self):\n\n index = pd.date_range('20000101', periods=3)\n\n # reindexing to an invalid Categorical\n s = Series(['a', 'b', 'c'],dtype='category')\n result = s.reindex(index)\n expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))\n expected.index = index\n tm.assert_series_equal(result, expected)\n\n # partial reindexing\n expected = Series(Categorical(values=['b','c'],categories=['a', 'b', 'c']))\n expected.index = [1,2]\n result = s.reindex([1,2])\n tm.assert_series_equal(result, expected)\n\n expected = Series(Categorical(values=['c',np.nan],categories=['a', 'b', 'c']))\n expected.index = [2,3]\n result = s.reindex([2,3])\n tm.assert_series_equal(result, expected)\n\n\n\n def test_sideeffects_free(self):\n\n # Passing a categorical to a Series and then changing values in either the series or the\n # categorical should not change the values in the other one, IF you specify copy!\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"])\n s = pd.Series(cat, copy=True)\n self.assertFalse(s.cat is cat)\n s.cat.categories = [1,2,3]\n exp_s = np.array([1,2,3,1])\n exp_cat = np.array([\"a\",\"b\",\"c\",\"a\"])\n self.assert_numpy_array_equal(s.__array__(), exp_s)\n self.assert_numpy_array_equal(cat.__array__(), exp_cat)\n\n # setting\n s[0] = 2\n exp_s2 = np.array([2,2,3,1])\n self.assert_numpy_array_equal(s.__array__(), exp_s2)\n self.assert_numpy_array_equal(cat.__array__(), exp_cat)\n\n # however, copy is False by default\n # so this WILL change values\n cat = Categorical([\"a\",\"b\",\"c\",\"a\"])\n s = pd.Series(cat)\n self.assertTrue(s.values is cat)\n s.cat.categories = [1,2,3]\n exp_s = np.array([1,2,3,1])\n self.assert_numpy_array_equal(s.__array__(), exp_s)\n self.assert_numpy_array_equal(cat.__array__(), exp_s)\n\n s[0] = 2\n exp_s2 = np.array([2,2,3,1])\n self.assert_numpy_array_equal(s.__array__(), exp_s2)\n self.assert_numpy_array_equal(cat.__array__(), exp_s2)\n\n def test_nan_handling(self):\n\n # Nans are represented as -1 in labels\n s = Series(Categorical([\"a\",\"b\",np.nan,\"a\"]))\n self.assert_numpy_array_equal(s.cat.categories, np.array([\"a\",\"b\"]))\n self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0]))\n\n # If categories have nan included, the label should point to that instead\n s2 = Series(Categorical([\"a\",\"b\",np.nan,\"a\"], categories=[\"a\",\"b\",np.nan]))\n self.assert_numpy_array_equal(s2.cat.categories,\n np.array([\"a\",\"b\",np.nan], dtype=np.object_))\n self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0]))\n\n # Changing categories should also make the replaced category np.nan\n s3 = Series(Categorical([\"a\",\"b\",\"c\",\"a\"]))\n s3.cat.categories = [\"a\",\"b\",np.nan]\n self.assert_numpy_array_equal(s3.cat.categories,\n np.array([\"a\",\"b\",np.nan], dtype=np.object_))\n self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0]))\n\n def test_cat_accessor(self):\n s = Series(Categorical([\"a\",\"b\",np.nan,\"a\"]))\n self.assert_numpy_array_equal(s.cat.categories, np.array([\"a\",\"b\"]))\n self.assertEqual(s.cat.ordered, True)\n exp = Categorical([\"a\",\"b\",np.nan,\"a\"], categories=[\"b\",\"a\"])\n s.cat.set_categories([\"b\", \"a\"], inplace=True)\n self.assertTrue(s.values.equals(exp))\n res = s.cat.set_categories([\"b\", \"a\"])\n self.assertTrue(res.values.equals(exp))\n exp = Categorical([\"a\",\"b\",np.nan,\"a\"], categories=[\"b\",\"a\"])\n s[:] = \"a\"\n s = s.cat.remove_unused_categories()\n self.assert_numpy_array_equal(s.cat.categories, np.array([\"a\"]))\n\n def test_sequence_like(self):\n\n # GH 7839\n # make sure can iterate\n df = DataFrame({\"id\":[1,2,3,4,5,6], \"raw_grade\":['a', 'b', 'b', 'a', 'a', 'e']})\n df['grade'] = Categorical(df['raw_grade'])\n\n # basic sequencing testing\n result = list(df.grade.values)\n expected = np.array(df.grade.values).tolist()\n tm.assert_almost_equal(result,expected)\n\n # iteration\n for t in df.itertuples(index=False):\n str(t)\n\n for row, s in df.iterrows():\n str(s)\n\n for c, col in df.iteritems():\n str(s)\n\n def test_series_delegations(self):\n\n # invalid accessor\n self.assertRaises(TypeError, lambda : Series([1,2,3]).cat)\n tm.assertRaisesRegexp(TypeError,\n r\"Can only use .cat accessor with a 'category' dtype\",\n lambda : Series([1,2,3]).cat)\n self.assertRaises(TypeError, lambda : Series(['a','b','c']).cat)\n self.assertRaises(TypeError, lambda : Series(np.arange(5.)).cat)\n self.assertRaises(TypeError, lambda : Series([Timestamp('20130101')]).cat)\n\n # Series should delegate calls to '.categories', '.codes', '.ordered' and the\n # methods '.set_categories()' 'drop_unused_categories()' to the categorical\n s = Series(Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True))\n exp_categories = np.array([\"a\",\"b\",\"c\"])\n self.assert_numpy_array_equal(s.cat.categories, exp_categories)\n s.cat.categories = [1,2,3]\n exp_categories = np.array([1,2,3])\n self.assert_numpy_array_equal(s.cat.categories, exp_categories)\n\n exp_codes = Series([0,1,2,0],dtype='int8')\n tm.assert_series_equal(s.cat.codes, exp_codes)\n\n self.assertEqual(s.cat.ordered, True)\n s.cat.ordered = False\n self.assertEqual(s.cat.ordered, False)\n\n # reorder\n s = Series(Categorical([\"a\",\"b\",\"c\",\"a\"], ordered=True))\n exp_categories = np.array([\"c\",\"b\",\"a\"])\n exp_values = np.array([\"a\",\"b\",\"c\",\"a\"])\n s = s.cat.set_categories([\"c\",\"b\",\"a\"])\n self.assert_numpy_array_equal(s.cat.categories, exp_categories)\n self.assert_numpy_array_equal(s.values.__array__(), exp_values)\n self.assert_numpy_array_equal(s.__array__(), exp_values)\n\n # remove unused categories\n s = Series(Categorical([\"a\",\"b\",\"b\",\"a\"], categories=[\"a\",\"b\",\"c\"]))\n exp_categories = np.array([\"a\",\"b\"])\n exp_values = np.array([\"a\",\"b\",\"b\",\"a\"])\n s = s.cat.remove_unused_categories()\n self.assert_numpy_array_equal(s.cat.categories, exp_categories)\n self.assert_numpy_array_equal(s.values.__array__(), exp_values)\n self.assert_numpy_array_equal(s.__array__(), exp_values)\n\n # This method is likely to be confused, so test that it raises an error on wrong inputs:\n def f():\n s.set_categories([4,3,2,1])\n self.assertRaises(Exception, f)\n # right: s.cat.set_categories([4,3,2,1])\n\n def test_series_functions_no_warnings(self):\n df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})\n labels = [ \"{0} - {1}\".format(i, i + 9) for i in range(0, 100, 10)]\n with tm.assert_produces_warning(False):\n df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)\n\n def test_assignment_to_dataframe(self):\n # assignment\n df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')})\n labels = [ \"{0} - {1}\".format(i, i + 499) for i in range(0, 10000, 500) ]\n\n df = df.sort(columns=['value'], ascending=True)\n s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)\n d = s.values\n df['D'] = d\n str(df)\n\n result = df.dtypes\n expected = Series([np.dtype('int32'), com.CategoricalDtype()],index=['value','D'])\n tm.assert_series_equal(result,expected)\n\n df['E'] = s\n str(df)\n\n result = df.dtypes\n expected = Series([np.dtype('int32'), com.CategoricalDtype(), com.CategoricalDtype()],\n index=['value','D','E'])\n tm.assert_series_equal(result,expected)\n\n result1 = df['D']\n result2 = df['E']\n self.assertTrue(result1._data._block.values.equals(d))\n\n # sorting\n s.name = 'E'\n self.assertTrue(result2.sort_index().equals(s.sort_index()))\n\n cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10])\n df = pd.DataFrame(pd.Series(cat))\n\n def test_describe(self):\n\n # Categoricals should not show up together with numerical columns\n result = self.cat.describe()\n self.assertEquals(len(result.columns),1)\n\n\n # In a frame, describe() for the cat should be the same as for string arrays (count, unique,\n # top, freq)\n\n cat = Categorical([\"a\",\"b\",\"b\",\"b\"], categories=['a','b','c'], ordered=True)\n s = Series(cat)\n result = s.describe()\n expected = Series([4,2,\"b\",3],index=['count','unique','top', 'freq'])\n tm.assert_series_equal(result,expected)\n\n cat = pd.Series(pd.Categorical([\"a\",\"b\",\"c\",\"c\"]))\n df3 = pd.DataFrame({\"cat\":cat, \"s\":[\"a\",\"b\",\"c\",\"c\"]})\n res = df3.describe()\n self.assert_numpy_array_equal(res[\"cat\"].values, res[\"s\"].values)\n\n def test_repr(self):\n a = pd.Series(pd.Categorical([1,2,3,4], name=\"a\"))\n exp = u(\"0 1\\n1 2\\n2 3\\n3 4\\n\" +\n \"Name: a, dtype: category\\nCategories (4, int64): [1 < 2 < 3 < 4]\")\n\n self.assertEqual(exp, a.__unicode__())\n\n a = pd.Series(pd.Categorical([\"a\",\"b\"] *25, name=\"a\"))\n exp = u(\"\".join([\"%s a\\n%s b\\n\"%(i,i+1) for i in range(0,10,2)]) + \"...\\n\" +\n \"\".join([\"%s a\\n%s b\\n\"%(i,i+1) for i in range(40,50,2)]) +\n \"Name: a, Length: 50, dtype: category\\n\" +\n \"Categories (2, object): [a < b]\")\n self.assertEqual(exp,a._tidy_repr())\n\n levs = list(\"abcdefghijklmnopqrstuvwxyz\")\n a = pd.Series(pd.Categorical([\"a\",\"b\"], name=\"a\", categories=levs))\n exp = u(\"0 a\\n1 b\\n\" +\n \"Name: a, dtype: category\\n\"\n \"Categories (26, object): [a < b < c < d ... w < x < y < z]\")\n self.assertEqual(exp,a.__unicode__())\n\n\n def test_info(self):\n\n # make sure it works\n n = 2500\n df = DataFrame({ 'int64' : np.random.randint(100,size=n) })\n df['category'] = Series(np.array(list('abcdefghij')).take(np.random.randint(0,10,size=n))).astype('category')\n df.isnull()\n df.info()\n\n df2 = df[df['category']=='d']\n df2.info()\n\n def test_groupby_sort(self):\n\n # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby\n # This should result in a properly sorted Series so that the plot\n # has a sorted x axis\n #self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')\n\n res = self.cat.groupby(['value_group'])['value_group'].count()\n exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]\n tm.assert_series_equal(res, exp)\n\n def test_min_max(self):\n # unordered cats have no min/max\n cat = Series(Categorical([\"a\",\"b\",\"c\",\"d\"], ordered=False))\n self.assertRaises(TypeError, lambda : cat.min())\n self.assertRaises(TypeError, lambda : cat.max())\n\n cat = Series(Categorical([\"a\",\"b\",\"c\",\"d\"], ordered=True))\n _min = cat.min()\n _max = cat.max()\n self.assertEqual(_min, \"a\")\n self.assertEqual(_max, \"d\")\n\n cat = Series(Categorical([\"a\",\"b\",\"c\",\"d\"], categories=['d','c','b','a'], ordered=True))\n _min = cat.min()\n _max = cat.max()\n self.assertEqual(_min, \"d\")\n self.assertEqual(_max, \"a\")\n\n cat = Series(Categorical([np.nan,\"b\",\"c\",np.nan], categories=['d','c','b','a'], ordered=True))\n _min = cat.min()\n _max = cat.max()\n self.assertTrue(np.isnan(_min))\n self.assertEqual(_max, \"b\")\n\n cat = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))\n _min = cat.min()\n _max = cat.max()\n self.assertTrue(np.isnan(_min))\n self.assertEqual(_max, 1)\n\n def test_mode(self):\n s = Series(Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True))\n res = s.mode()\n exp = Series(Categorical([5], categories=[5,4,3,2,1], ordered=True))\n tm.assert_series_equal(res, exp)\n s = Series(Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True))\n res = s.mode()\n exp = Series(Categorical([5,1], categories=[5,4,3,2,1], ordered=True))\n tm.assert_series_equal(res, exp)\n s = Series(Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True))\n res = s.mode()\n exp = Series(Categorical([], categories=[5,4,3,2,1], ordered=True))\n tm.assert_series_equal(res, exp)\n\n def test_value_counts(self):\n\n s = pd.Series(pd.Categorical([\"a\",\"b\",\"c\",\"c\",\"c\",\"b\"], categories=[\"c\",\"a\",\"b\",\"d\"]))\n res = s.value_counts(sort=False)\n exp = Series([3,1,2,0], index=[\"c\",\"a\",\"b\",\"d\"])\n tm.assert_series_equal(res, exp)\n res = s.value_counts(sort=True)\n exp = Series([3,2,1,0], index=[\"c\",\"b\",\"a\",\"d\"])\n tm.assert_series_equal(res, exp)\n\n def test_groupby(self):\n\n cats = Categorical([\"a\", \"a\", \"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\"], categories=[\"a\",\"b\",\"c\",\"d\"])\n data = DataFrame({\"a\":[1,1,1,2,2,2,3,4,5], \"b\":cats})\n\n expected = DataFrame({ 'a' : Series([1,2,4,np.nan],index=Index(['a','b','c','d'],name='b')) })\n result = data.groupby(\"b\").mean()\n tm.assert_frame_equal(result, expected)\n\n raw_cat1 = Categorical([\"a\",\"a\",\"b\",\"b\"], categories=[\"a\",\"b\",\"z\"])\n raw_cat2 = Categorical([\"c\",\"d\",\"c\",\"d\"], categories=[\"c\",\"d\",\"y\"])\n df = DataFrame({\"A\":raw_cat1,\"B\":raw_cat2, \"values\":[1,2,3,4]})\n\n # single grouper\n gb = df.groupby(\"A\")\n expected = DataFrame({ 'values' : Series([3,7,np.nan],index=Index(['a','b','z'],name='A')) })\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n # multiple groupers\n gb = df.groupby(['A','B'])\n expected = DataFrame({ 'values' : Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],\n index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B'])) })\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n # multiple groupers with a non-cat\n df = df.copy()\n df['C'] = ['foo','bar']*2\n gb = df.groupby(['A','B','C'])\n expected = DataFrame({ 'values' :\n Series(np.nan,index=pd.MultiIndex.from_product([['a','b','z'],\n ['c','d','y'],\n ['foo','bar']],\n names=['A','B','C']))\n }).sortlevel()\n expected.iloc[[1,2,7,8],0] = [1,2,3,4]\n result = gb.sum()\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table(self):\n\n raw_cat1 = Categorical([\"a\",\"a\",\"b\",\"b\"], categories=[\"a\",\"b\",\"z\"])\n raw_cat2 = Categorical([\"c\",\"d\",\"c\",\"d\"], categories=[\"c\",\"d\",\"y\"])\n df = DataFrame({\"A\":raw_cat1,\"B\":raw_cat2, \"values\":[1,2,3,4]})\n result = pd.pivot_table(df, values='values', index=['A', 'B'])\n\n expected = Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],\n index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B']),\n name='values')\n tm.assert_series_equal(result, expected)\n\n def test_count(self):\n\n s = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))\n result = s.count()\n self.assertEqual(result, 2)\n\n def test_sort(self):\n\n # unordered cats are not sortable\n cat = Series(Categorical([\"a\",\"b\",\"b\",\"a\"], ordered=False))\n self.assertRaises(TypeError, lambda : cat.sort())\n\n cat = Series(Categorical([\"a\",\"c\",\"b\",\"d\"], ordered=True))\n\n res = cat.order()\n exp = np.array([\"a\",\"b\",\"c\",\"d\"])\n self.assert_numpy_array_equal(res.__array__(), exp)\n\n cat = Series(Categorical([\"a\",\"c\",\"b\",\"d\"], categories=[\"a\",\"b\",\"c\",\"d\"], ordered=True))\n res = cat.order()\n exp = np.array([\"a\",\"b\",\"c\",\"d\"])\n self.assert_numpy_array_equal(res.__array__(), exp)\n\n res = cat.order(ascending=False)\n exp = np.array([\"d\",\"c\",\"b\",\"a\"])\n self.assert_numpy_array_equal(res.__array__(), exp)\n\n raw_cat1 = Categorical([\"a\",\"b\",\"c\",\"d\"], categories=[\"a\",\"b\",\"c\",\"d\"], ordered=False)\n raw_cat2 = Categorical([\"a\",\"b\",\"c\",\"d\"], categories=[\"d\",\"c\",\"b\",\"a\"])\n s = [\"a\",\"b\",\"c\",\"d\"]\n df = DataFrame({\"unsort\":raw_cat1,\"sort\":raw_cat2, \"string\":s, \"values\":[1,2,3,4]})\n\n # Cats must be sorted in a dataframe\n res = df.sort(columns=[\"string\"], ascending=False)\n exp = np.array([\"d\", \"c\", \"b\", \"a\"])\n self.assert_numpy_array_equal(res[\"sort\"].values.__array__(), exp)\n self.assertEqual(res[\"sort\"].dtype, \"category\")\n\n res = df.sort(columns=[\"sort\"], ascending=False)\n exp = df.sort(columns=[\"string\"], ascending=True)\n self.assert_numpy_array_equal(res[\"values\"], exp[\"values\"])\n self.assertEqual(res[\"sort\"].dtype, \"category\")\n self.assertEqual(res[\"unsort\"].dtype, \"category\")\n\n def f():\n df.sort(columns=[\"unsort\"], ascending=False)\n self.assertRaises(TypeError, f)\n\n # multi-columns sort\n # GH 7848\n df = DataFrame({\"id\":[6,5,4,3,2,1], \"raw_grade\":['a', 'b', 'b', 'a', 'a', 'e']})\n df[\"grade\"] = pd.Categorical(df[\"raw_grade\"])\n df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])\n\n # sorts 'grade' according to the order of the categories\n result = df.sort(columns=['grade'])\n expected = df.iloc[[1,2,5,0,3,4]]\n tm.assert_frame_equal(result,expected)\n\n # multi\n result = df.sort(columns=['grade', 'id'])\n expected = df.iloc[[2,1,5,4,3,0]]\n tm.assert_frame_equal(result,expected)\n\n # reverse\n cat = Categorical([\"a\",\"c\",\"c\",\"b\",\"d\"], ordered=True)\n res = cat.order(ascending=False)\n exp_val = np.array([\"d\",\"c\", \"c\", \"b\",\"a\"],dtype=object)\n exp_categories = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp_val)\n self.assert_numpy_array_equal(res.categories, exp_categories)\n\n # some NaN positions\n\n cat = Categorical([\"a\",\"c\",\"b\",\"d\", np.nan], ordered=True)\n res = cat.order(ascending=False, na_position='last')\n exp_val = np.array([\"d\",\"c\",\"b\",\"a\", np.nan],dtype=object)\n exp_categories = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp_val)\n self.assert_numpy_array_equal(res.categories, exp_categories)\n\n cat = Categorical([\"a\",\"c\",\"b\",\"d\", np.nan], ordered=True)\n res = cat.order(ascending=False, na_position='first')\n exp_val = np.array([np.nan, \"d\",\"c\",\"b\",\"a\"],dtype=object)\n exp_categories = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp_val)\n self.assert_numpy_array_equal(res.categories, exp_categories)\n\n cat = Categorical([\"a\",\"c\",\"b\",\"d\", np.nan], ordered=True)\n res = cat.order(ascending=False, na_position='first')\n exp_val = np.array([np.nan, \"d\",\"c\",\"b\",\"a\"],dtype=object)\n exp_categories = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp_val)\n self.assert_numpy_array_equal(res.categories, exp_categories)\n\n cat = Categorical([\"a\",\"c\",\"b\",\"d\", np.nan], ordered=True)\n res = cat.order(ascending=False, na_position='last')\n exp_val = np.array([\"d\",\"c\",\"b\",\"a\",np.nan],dtype=object)\n exp_categories = np.array([\"a\",\"b\",\"c\",\"d\"],dtype=object)\n self.assert_numpy_array_equal(res.__array__(), exp_val)\n self.assert_numpy_array_equal(res.categories, exp_categories)\n\n def test_slicing(self):\n cat = Series(Categorical([1,2,3,4]))\n reversed = cat[::-1]\n exp = np.array([4,3,2,1])\n self.assert_numpy_array_equal(reversed.__array__(), exp)\n\n df = DataFrame({'value': (np.arange(100)+1).astype('int64')})\n df['D'] = pd.cut(df.value, bins=[0,25,50,75,100])\n\n expected = Series([11,'(0, 25]'],index=['value','D'])\n result = df.iloc[10]\n tm.assert_series_equal(result,expected)\n\n expected = DataFrame({'value': np.arange(11,21).astype('int64')},\n index=np.arange(10,20).astype('int64'))\n expected['D'] = pd.cut(expected.value, bins=[0,25,50,75,100])\n result = df.iloc[10:20]\n tm.assert_frame_equal(result,expected)\n\n expected = Series([9,'(0, 25]'],index=['value','D'])\n result = df.loc[8]\n tm.assert_series_equal(result,expected)\n\n def test_slicing_and_getting_ops(self):\n\n # systematically test the slicing operations:\n # for all slicing ops:\n # - returning a dataframe\n # - returning a column\n # - returning a row\n # - returning a single value\n\n cats = pd.Categorical([\"a\",\"c\",\"b\",\"c\",\"c\",\"c\",\"c\"], categories=[\"a\",\"b\",\"c\"])\n idx = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n values= [1,2,3,4,5,6,7]\n df = pd.DataFrame({\"cats\":cats,\"values\":values}, index=idx)\n\n # the expected values\n cats2 = pd.Categorical([\"b\",\"c\"], categories=[\"a\",\"b\",\"c\"])\n idx2 = pd.Index([\"j\",\"k\"])\n values2= [3,4]\n\n # 2:4,: | \"j\":\"k\",:\n exp_df = pd.DataFrame({\"cats\":cats2,\"values\":values2}, index=idx2)\n\n # :,\"cats\" | :,0\n exp_col = pd.Series(cats,index=idx,name='cats')\n\n # \"j\",: | 2,:\n exp_row = pd.Series([\"b\",3], index=[\"cats\",\"values\"], dtype=\"object\", name=\"j\")\n\n # \"j\",\"cats | 2,0\n exp_val = \"b\"\n\n # iloc\n # frame\n res_df = df.iloc[2:4,:]\n tm.assert_frame_equal(res_df, exp_df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n # row\n res_row = df.iloc[2,:]\n tm.assert_series_equal(res_row, exp_row)\n tm.assert_isinstance(res_row[\"cats\"], compat.string_types)\n\n # col\n res_col = df.iloc[:,0]\n tm.assert_series_equal(res_col, exp_col)\n self.assertTrue(com.is_categorical_dtype(res_col))\n\n # single value\n res_val = df.iloc[2,0]\n self.assertEqual(res_val, exp_val)\n\n # loc\n # frame\n res_df = df.loc[\"j\":\"k\",:]\n tm.assert_frame_equal(res_df, exp_df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n # row\n res_row = df.loc[\"j\",:]\n tm.assert_series_equal(res_row, exp_row)\n tm.assert_isinstance(res_row[\"cats\"], compat.string_types)\n\n # col\n res_col = df.loc[:,\"cats\"]\n tm.assert_series_equal(res_col, exp_col)\n self.assertTrue(com.is_categorical_dtype(res_col))\n\n # single value\n res_val = df.loc[\"j\",\"cats\"]\n self.assertEqual(res_val, exp_val)\n\n # ix\n # frame\n #res_df = df.ix[\"j\":\"k\",[0,1]] # doesn't work?\n res_df = df.ix[\"j\":\"k\",:]\n tm.assert_frame_equal(res_df, exp_df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n # row\n res_row = df.ix[\"j\",:]\n tm.assert_series_equal(res_row, exp_row)\n tm.assert_isinstance(res_row[\"cats\"], compat.string_types)\n\n # col\n res_col = df.ix[:,\"cats\"]\n tm.assert_series_equal(res_col, exp_col)\n self.assertTrue(com.is_categorical_dtype(res_col))\n\n # single value\n res_val = df.ix[\"j\",0]\n self.assertEqual(res_val, exp_val)\n\n # iat\n res_val = df.iat[2,0]\n self.assertEqual(res_val, exp_val)\n\n # at\n res_val = df.at[\"j\",\"cats\"]\n self.assertEqual(res_val, exp_val)\n\n # fancy indexing\n exp_fancy = df.iloc[[2]]\n\n res_fancy = df[df[\"cats\"] == \"b\"]\n tm.assert_frame_equal(res_fancy,exp_fancy)\n res_fancy = df[df[\"values\"] == 3]\n tm.assert_frame_equal(res_fancy,exp_fancy)\n\n # get_value\n res_val = df.get_value(\"j\",\"cats\")\n self.assertEqual(res_val, exp_val)\n\n # i : int, slice, or sequence of integers\n res_row = df.irow(2)\n tm.assert_series_equal(res_row, exp_row)\n tm.assert_isinstance(res_row[\"cats\"], compat.string_types)\n\n res_df = df.irow(slice(2,4))\n tm.assert_frame_equal(res_df, exp_df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n res_df = df.irow([2,3])\n tm.assert_frame_equal(res_df, exp_df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n res_col = df.icol(0)\n tm.assert_series_equal(res_col, exp_col)\n self.assertTrue(com.is_categorical_dtype(res_col))\n\n res_df = df.icol(slice(0,2))\n tm.assert_frame_equal(res_df, df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n res_df = df.icol([0,1])\n tm.assert_frame_equal(res_df, df)\n self.assertTrue(com.is_categorical_dtype(res_df[\"cats\"]))\n\n def test_slicing_doc_examples(self):\n\n #GH 7918\n cats = Categorical([\"a\",\"b\",\"b\",\"b\",\"c\",\"c\",\"c\"], categories=[\"a\",\"b\",\"c\"])\n idx = Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",])\n values= [1,2,2,2,3,4,5]\n df = DataFrame({\"cats\":cats,\"values\":values}, index=idx)\n\n result = df.iloc[2:4,:]\n expected = DataFrame({\"cats\":Categorical(['b','b'],categories=['a','b','c']),\"values\":[2,2]}, index=['j','k'])\n tm.assert_frame_equal(result, expected)\n\n result = df.iloc[2:4,:].dtypes\n expected = Series(['category','int64'],['cats','values'])\n tm.assert_series_equal(result, expected)\n\n result = df.loc[\"h\":\"j\",\"cats\"]\n expected = Series(Categorical(['a','b','b'],categories=['a','b','c']),index=['h','i','j'])\n tm.assert_series_equal(result, expected)\n\n result = df.ix[\"h\":\"j\",0:1]\n expected = DataFrame({'cats' : Series(Categorical(['a','b','b'],categories=['a','b','c']),index=['h','i','j']) })\n tm.assert_frame_equal(result, expected)\n\n def test_assigning_ops(self):\n\n # systematically test the assigning operations:\n # for all slicing ops:\n # for value in categories and value not in categories:\n # - assign a single value -> exp_single_cats_value\n # - assign a complete row (mixed values) -> exp_single_row\n # - assign multiple rows (mixed values) (-> array) -> exp_multi_row\n # - assign a part of a column with dtype == categorical -> exp_parts_cats_col\n # - assign a part of a column with dtype != categorical -> exp_parts_cats_col\n\n cats = pd.Categorical([\"a\",\"a\",\"a\",\"a\",\"a\",\"a\",\"a\"], categories=[\"a\",\"b\"])\n idx = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n values = [1,1,1,1,1,1,1]\n orig = pd.DataFrame({\"cats\":cats,\"values\":values}, index=idx)\n\n ### the expected values\n # changed single row\n cats1 = pd.Categorical([\"a\",\"a\",\"b\",\"a\",\"a\",\"a\",\"a\"], categories=[\"a\",\"b\"])\n idx1 = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n values1 = [1,1,2,1,1,1,1]\n exp_single_row = pd.DataFrame({\"cats\":cats1,\"values\":values1}, index=idx1)\n\n #changed multiple rows\n cats2 = pd.Categorical([\"a\",\"a\",\"b\",\"b\",\"a\",\"a\",\"a\"], categories=[\"a\",\"b\"])\n idx2 = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n values2 = [1,1,2,2,1,1,1]\n exp_multi_row = pd.DataFrame({\"cats\":cats2,\"values\":values2}, index=idx2)\n\n # changed part of the cats column\n cats3 = pd.Categorical([\"a\",\"a\",\"b\",\"b\",\"a\",\"a\",\"a\"], categories=[\"a\",\"b\"])\n idx3 = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n values3 = [1,1,1,1,1,1,1]\n exp_parts_cats_col = pd.DataFrame({\"cats\":cats3,\"values\":values3}, index=idx3)\n\n # changed single value in cats col\n cats4 = pd.Categorical([\"a\",\"a\",\"b\",\"a\",\"a\",\"a\",\"a\"], categories=[\"a\",\"b\"])\n idx4 = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n values4 = [1,1,1,1,1,1,1]\n exp_single_cats_value = pd.DataFrame({\"cats\":cats4,\"values\":values4}, index=idx4)\n\n #### iloc #####\n ################\n # - assign a single value -> exp_single_cats_value\n df = orig.copy()\n df.iloc[2,0] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n\n df = orig.copy()\n df.iloc[df.index == \"j\",0] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n\n # - assign a single value not in the current categories set\n def f():\n df = orig.copy()\n df.iloc[2,0] = \"c\"\n self.assertRaises(ValueError, f)\n\n # - assign a complete row (mixed values) -> exp_single_row\n df = orig.copy()\n df.iloc[2,:] = [\"b\",2]\n tm.assert_frame_equal(df, exp_single_row)\n\n # - assign a complete row (mixed values) not in categories set\n def f():\n df = orig.copy()\n df.iloc[2,:] = [\"c\",2]\n self.assertRaises(ValueError, f)\n\n # - assign multiple rows (mixed values) -> exp_multi_row\n df = orig.copy()\n df.iloc[2:4,:] = [[\"b\",2],[\"b\",2]]\n tm.assert_frame_equal(df, exp_multi_row)\n\n def f():\n df = orig.copy()\n df.iloc[2:4,:] = [[\"c\",2],[\"c\",2]]\n self.assertRaises(ValueError, f)\n\n # - assign a part of a column with dtype == categorical -> exp_parts_cats_col\n df = orig.copy()\n df.iloc[2:4,0] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\"])\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n with tm.assertRaises(ValueError):\n # different categories -> not sure if this should fail or pass\n df = orig.copy()\n df.iloc[2:4,0] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\",\"c\"])\n\n with tm.assertRaises(ValueError):\n # different values\n df = orig.copy()\n df.iloc[2:4,0] = pd.Categorical([\"c\",\"c\"], categories=[\"a\",\"b\",\"c\"])\n\n # - assign a part of a column with dtype != categorical -> exp_parts_cats_col\n df = orig.copy()\n df.iloc[2:4,0] = [\"b\",\"b\"]\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n with tm.assertRaises(ValueError):\n df.iloc[2:4,0] = [\"c\",\"c\"]\n\n #### loc #####\n ################\n # - assign a single value -> exp_single_cats_value\n df = orig.copy()\n df.loc[\"j\",\"cats\"] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n df = orig.copy()\n df.loc[df.index == \"j\",\"cats\"] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n # - assign a single value not in the current categories set\n def f():\n df = orig.copy()\n df.loc[\"j\",\"cats\"] = \"c\"\n self.assertRaises(ValueError, f)\n\n # - assign a complete row (mixed values) -> exp_single_row\n df = orig.copy()\n df.loc[\"j\",:] = [\"b\",2]\n tm.assert_frame_equal(df, exp_single_row)\n\n # - assign a complete row (mixed values) not in categories set\n def f():\n df = orig.copy()\n df.loc[\"j\",:] = [\"c\",2]\n self.assertRaises(ValueError, f)\n\n # - assign multiple rows (mixed values) -> exp_multi_row\n df = orig.copy()\n df.loc[\"j\":\"k\",:] = [[\"b\",2],[\"b\",2]]\n tm.assert_frame_equal(df, exp_multi_row)\n\n def f():\n df = orig.copy()\n df.loc[\"j\":\"k\",:] = [[\"c\",2],[\"c\",2]]\n self.assertRaises(ValueError, f)\n\n # - assign a part of a column with dtype == categorical -> exp_parts_cats_col\n df = orig.copy()\n df.loc[\"j\":\"k\",\"cats\"] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\"])\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n with tm.assertRaises(ValueError):\n # different categories -> not sure if this should fail or pass\n df = orig.copy()\n df.loc[\"j\":\"k\",\"cats\"] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\",\"c\"])\n\n with tm.assertRaises(ValueError):\n # different values\n df = orig.copy()\n df.loc[\"j\":\"k\",\"cats\"] = pd.Categorical([\"c\",\"c\"], categories=[\"a\",\"b\",\"c\"])\n\n # - assign a part of a column with dtype != categorical -> exp_parts_cats_col\n df = orig.copy()\n df.loc[\"j\":\"k\",\"cats\"] = [\"b\",\"b\"]\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n with tm.assertRaises(ValueError):\n df.loc[\"j\":\"k\",\"cats\"] = [\"c\",\"c\"]\n\n #### ix #####\n ################\n # - assign a single value -> exp_single_cats_value\n df = orig.copy()\n df.ix[\"j\",0] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n df = orig.copy()\n df.ix[df.index == \"j\",0] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n # - assign a single value not in the current categories set\n def f():\n df = orig.copy()\n df.ix[\"j\",0] = \"c\"\n self.assertRaises(ValueError, f)\n\n # - assign a complete row (mixed values) -> exp_single_row\n df = orig.copy()\n df.ix[\"j\",:] = [\"b\",2]\n tm.assert_frame_equal(df, exp_single_row)\n\n # - assign a complete row (mixed values) not in categories set\n def f():\n df = orig.copy()\n df.ix[\"j\",:] = [\"c\",2]\n self.assertRaises(ValueError, f)\n\n # - assign multiple rows (mixed values) -> exp_multi_row\n df = orig.copy()\n df.ix[\"j\":\"k\",:] = [[\"b\",2],[\"b\",2]]\n tm.assert_frame_equal(df, exp_multi_row)\n\n def f():\n df = orig.copy()\n df.ix[\"j\":\"k\",:] = [[\"c\",2],[\"c\",2]]\n self.assertRaises(ValueError, f)\n\n # - assign a part of a column with dtype == categorical -> exp_parts_cats_col\n df = orig.copy()\n df.ix[\"j\":\"k\",0] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\"])\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n with tm.assertRaises(ValueError):\n # different categories -> not sure if this should fail or pass\n df = orig.copy()\n df.ix[\"j\":\"k\",0] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\",\"c\"])\n\n with tm.assertRaises(ValueError):\n # different values\n df = orig.copy()\n df.ix[\"j\":\"k\",0] = pd.Categorical([\"c\",\"c\"], categories=[\"a\",\"b\",\"c\"])\n\n # - assign a part of a column with dtype != categorical -> exp_parts_cats_col\n df = orig.copy()\n df.ix[\"j\":\"k\",0] = [\"b\",\"b\"]\n tm.assert_frame_equal(df, exp_parts_cats_col)\n\n with tm.assertRaises(ValueError):\n df.ix[\"j\":\"k\",0] = [\"c\",\"c\"]\n\n # iat\n df = orig.copy()\n df.iat[2,0] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n # - assign a single value not in the current categories set\n def f():\n df = orig.copy()\n df.iat[2,0] = \"c\"\n self.assertRaises(ValueError, f)\n\n # at\n # - assign a single value -> exp_single_cats_value\n df = orig.copy()\n df.at[\"j\",\"cats\"] = \"b\"\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n # - assign a single value not in the current categories set\n def f():\n df = orig.copy()\n df.at[\"j\",\"cats\"] = \"c\"\n self.assertRaises(ValueError, f)\n\n # fancy indexing\n catsf = pd.Categorical([\"a\",\"a\",\"c\",\"c\",\"a\",\"a\",\"a\"], categories=[\"a\",\"b\",\"c\"])\n idxf = pd.Index([\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\"])\n valuesf = [1,1,3,3,1,1,1]\n df = pd.DataFrame({\"cats\":catsf,\"values\":valuesf}, index=idxf)\n\n exp_fancy = exp_multi_row.copy()\n exp_fancy[\"cats\"].cat.set_categories([\"a\",\"b\",\"c\"], inplace=True)\n\n df[df[\"cats\"] == \"c\"] = [\"b\",2]\n tm.assert_frame_equal(df, exp_multi_row)\n\n # set_value\n df = orig.copy()\n df.set_value(\"j\",\"cats\", \"b\")\n tm.assert_frame_equal(df, exp_single_cats_value)\n\n def f():\n df = orig.copy()\n df.set_value(\"j\",\"cats\", \"c\")\n self.assertRaises(ValueError, f)\n\n # Assigning a Category to parts of a int/... column uses the values of the Catgorical\n df = pd.DataFrame({\"a\":[1,1,1,1,1], \"b\":[\"a\",\"a\",\"a\",\"a\",\"a\"]})\n exp = pd.DataFrame({\"a\":[1,\"b\",\"b\",1,1], \"b\":[\"a\",\"a\",\"b\",\"b\",\"a\"]})\n df.loc[1:2,\"a\"] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\"])\n df.loc[2:3,\"b\"] = pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\"])\n tm.assert_frame_equal(df, exp)\n\n ######### Series ##########\n orig = Series(pd.Categorical([\"b\",\"b\"], categories=[\"a\",\"b\"]))\n s = orig.copy()\n s[:] = \"a\"\n exp = Series(pd.Categorical([\"a\",\"a\"], categories=[\"a\",\"b\"]))\n tm.assert_series_equal(s, exp)\n\n s = orig.copy()\n s[1] = \"a\"\n exp = Series(pd.Categorical([\"b\",\"a\"], categories=[\"a\",\"b\"]))\n tm.assert_series_equal(s, exp)\n\n s = orig.copy()\n s[s.index > 0] = \"a\"\n exp = Series(pd.Categorical([\"b\",\"a\"], categories=[\"a\",\"b\"]))\n tm.assert_series_equal(s, exp)\n\n s = orig.copy()\n s[[False, True]] = \"a\"\n exp = Series(pd.Categorical([\"b\",\"a\"], categories=[\"a\",\"b\"]))\n tm.assert_series_equal(s, exp)\n\n s = orig.copy()\n s.index = [\"x\", \"y\"]\n s[\"y\"] = \"a\"\n exp = Series(pd.Categorical([\"b\",\"a\"], categories=[\"a\",\"b\"]), index=[\"x\", \"y\"])\n tm.assert_series_equal(s, exp)\n\n # ensure that one can set something to np.nan\n s = Series(Categorical([1,2,3]))\n exp = Series(Categorical([1,np.nan,3]))\n s[1] = np.nan\n tm.assert_series_equal(s, exp)\n\n\n def test_comparisons(self):\n tests_data = [(list(\"abc\"), list(\"cba\"), list(\"bbb\")),\n ([1,2,3], [3,2,1], [2,2,2])]\n for data , reverse, base in tests_data:\n cat_rev = pd.Series(pd.Categorical(data, categories=reverse))\n cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse))\n cat = pd.Series(pd.Categorical(data))\n cat_base = pd.Series(pd.Categorical(base, categories=cat.cat.categories))\n s = Series(base)\n a = np.array(base)\n\n # comparisons need to take categories ordering into account\n res_rev = cat_rev > cat_rev_base\n exp_rev = Series([True, False, False])\n tm.assert_series_equal(res_rev, exp_rev)\n\n res_rev = cat_rev < cat_rev_base\n exp_rev = Series([False, False, True])\n tm.assert_series_equal(res_rev, exp_rev)\n\n res = cat > cat_base\n exp = Series([False, False, True])\n tm.assert_series_equal(res, exp)\n\n # Only categories with same categories can be compared\n def f():\n cat > cat_rev\n self.assertRaises(TypeError, f)\n\n # categorical cannot be compared to Series or numpy array, and also not the other way\n # around\n self.assertRaises(TypeError, lambda: cat > s)\n self.assertRaises(TypeError, lambda: cat_rev > s)\n self.assertRaises(TypeError, lambda: cat > a)\n self.assertRaises(TypeError, lambda: cat_rev > a)\n\n self.assertRaises(TypeError, lambda: s < cat)\n self.assertRaises(TypeError, lambda: s < cat_rev)\n\n self.assertRaises(TypeError, lambda: a < cat)\n self.assertRaises(TypeError, lambda: a < cat_rev)\n\n # Categoricals can be compared to scalar values\n res = cat_rev > base[0]\n tm.assert_series_equal(res, exp)\n\n # And test NaN handling...\n cat = pd.Series(pd.Categorical([\"a\",\"b\",\"c\", np.nan]))\n exp = Series([True, True, True, False])\n res = (cat == cat)\n tm.assert_series_equal(res, exp)\n\n def test_concat(self):\n cat = pd.Categorical([\"a\",\"b\"], categories=[\"a\",\"b\"])\n vals = [1,2]\n df = pd.DataFrame({\"cats\":cat, \"vals\":vals})\n cat2 = pd.Categorical([\"a\",\"b\",\"a\",\"b\"], categories=[\"a\",\"b\"])\n vals2 = [1,2,1,2]\n exp = pd.DataFrame({\"cats\":cat2, \"vals\":vals2}, index=pd.Index([0, 1, 0, 1]))\n\n res = pd.concat([df,df])\n tm.assert_frame_equal(exp, res)\n\n # Concat should raise if the two categoricals do not have the same categories\n cat3 = pd.Categorical([\"a\",\"b\"], categories=[\"a\",\"b\",\"c\"])\n vals3 = [1,2]\n df_wrong_categories = pd.DataFrame({\"cats\":cat3, \"vals\":vals3})\n\n def f():\n pd.concat([df,df_wrong_categories])\n self.assertRaises(ValueError, f)\n\n # GH 7864\n # make sure ordering is preserverd\n df = pd.DataFrame({\"id\":[1,2,3,4,5,6], \"raw_grade\":['a', 'b', 'b', 'a', 'a', 'e']})\n df[\"grade\"] = pd.Categorical(df[\"raw_grade\"])\n df['grade'].cat.set_categories(['e', 'a', 'b'])\n\n df1 = df[0:3]\n df2 = df[3:]\n\n self.assert_numpy_array_equal(df['grade'].cat.categories, df1['grade'].cat.categories)\n self.assert_numpy_array_equal(df['grade'].cat.categories, df2['grade'].cat.categories)\n\n dfx = pd.concat([df1, df2])\n dfx['grade'].cat.categories\n self.assert_numpy_array_equal(df['grade'].cat.categories, dfx['grade'].cat.categories)\n\n def test_append(self):\n cat = pd.Categorical([\"a\",\"b\"], categories=[\"a\",\"b\"])\n vals = [1,2]\n df = pd.DataFrame({\"cats\":cat, \"vals\":vals})\n cat2 = pd.Categorical([\"a\",\"b\",\"a\",\"b\"], categories=[\"a\",\"b\"])\n vals2 = [1,2,1,2]\n exp = pd.DataFrame({\"cats\":cat2, \"vals\":vals2}, index=pd.Index([0, 1, 0, 1]))\n\n res = df.append(df)\n tm.assert_frame_equal(exp, res)\n\n # Concat should raise if the two categoricals do not have the same categories\n cat3 = pd.Categorical([\"a\",\"b\"], categories=[\"a\",\"b\",\"c\"])\n vals3 = [1,2]\n df_wrong_categories = pd.DataFrame({\"cats\":cat3, \"vals\":vals3})\n\n def f():\n df.append(df_wrong_categories)\n self.assertRaises(ValueError, f)\n\n def test_na_actions(self):\n\n cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])\n vals = [\"a\",\"b\",np.nan,\"d\"]\n df = pd.DataFrame({\"cats\":cat, \"vals\":vals})\n cat2 = pd.Categorical([1,2,3,3], categories=[1,2,3])\n vals2 = [\"a\",\"b\",\"b\",\"d\"]\n df_exp_fill = pd.DataFrame({\"cats\":cat2, \"vals\":vals2})\n cat3 = pd.Categorical([1,2,3], categories=[1,2,3])\n vals3 = [\"a\",\"b\",np.nan]\n df_exp_drop_cats = pd.DataFrame({\"cats\":cat3, \"vals\":vals3})\n cat4 = pd.Categorical([1,2], categories=[1,2,3])\n vals4 = [\"a\",\"b\"]\n df_exp_drop_all = pd.DataFrame({\"cats\":cat4, \"vals\":vals4})\n\n # fillna\n res = df.fillna(value={\"cats\":3, \"vals\":\"b\"})\n tm.assert_frame_equal(res, df_exp_fill)\n\n def f():\n df.fillna(value={\"cats\":4, \"vals\":\"c\"})\n self.assertRaises(ValueError, f)\n\n res = df.fillna(method='pad')\n tm.assert_frame_equal(res, df_exp_fill)\n\n res = df.dropna(subset=[\"cats\"])\n tm.assert_frame_equal(res, df_exp_drop_cats)\n\n res = df.dropna()\n tm.assert_frame_equal(res, df_exp_drop_all)\n\n # make sure that fillna takes both missing values and NA categories into account\n c = Categorical([\"a\",\"b\",np.nan])\n c.set_categories([\"a\",\"b\",np.nan], rename=True, inplace=True)\n c[0] = np.nan\n df = pd.DataFrame({\"cats\":c, \"vals\":[1,2,3]})\n df_exp = pd.DataFrame({\"cats\": Categorical([\"a\",\"b\",\"a\"]), \"vals\": [1,2,3]})\n res = df.fillna(\"a\")\n tm.assert_frame_equal(res, df_exp)\n\n\n def test_astype_to_other(self):\n\n s = self.cat['value_group']\n expected = s\n tm.assert_series_equal(s.astype('category'),expected)\n tm.assert_series_equal(s.astype(com.CategoricalDtype()),expected)\n self.assertRaises(ValueError, lambda : s.astype('float64'))\n\n cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))\n exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])\n tm.assert_series_equal(cat.astype('str'), exp)\n s2 = Series(Categorical.from_array(['1', '2', '3', '4']))\n exp2 = Series([1,2,3,4]).astype(int)\n tm.assert_series_equal(s2.astype('int') , exp2)\n\n # object don't sort correctly, so just compare that we have the same values\n def cmp(a,b):\n tm.assert_almost_equal(np.sort(np.unique(a)),np.sort(np.unique(b)))\n expected = Series(np.array(s.values),name='value_group')\n cmp(s.astype('object'),expected)\n cmp(s.astype(np.object_),expected)\n\n # array conversion\n tm.assert_almost_equal(np.array(s),np.array(s.values))\n\n def test_numeric_like_ops(self):\n\n # numeric ops should not succeed\n for op in ['__add__','__sub__','__mul__','__truediv__']:\n self.assertRaises(TypeError, lambda : getattr(self.cat,op)(self.cat))\n\n # reduction ops should not succeed (unless specifically defined, e.g. min/max)\n s = self.cat['value_group']\n for op in ['kurt','skew','var','std','mean','sum','median']:\n self.assertRaises(TypeError, lambda : getattr(s,op)(numeric_only=False))\n\n # mad technically works because it takes always the numeric data\n\n # numpy ops\n s = pd.Series(pd.Categorical([1,2,3,4]))\n self.assertRaises(TypeError, lambda : np.sum(s))\n\n # numeric ops on a Series\n for op in ['__add__','__sub__','__mul__','__truediv__']:\n self.assertRaises(TypeError, lambda : getattr(s,op)(2))\n\n # invalid ufunc\n self.assertRaises(TypeError, lambda : np.log(s))\n\n def test_cat_tab_completition(self):\n # test the tab completion display\n ok_for_cat = ['categories','codes','ordered','set_categories',\n 'add_categories', 'remove_categories', 'rename_categories',\n 'reorder_categories', 'remove_unused_categories']\n def get_dir(s):\n results = [ r for r in s.cat.__dir__() if not r.startswith('_') ]\n return list(sorted(set(results)))\n\n s = Series(list('aabbcde')).astype('category')\n results = get_dir(s)\n tm.assert_almost_equal(results,list(sorted(set(ok_for_cat))))\n\n def test_pickle_v0_14_1(self):\n cat = pd.Categorical(values=['a', 'b', 'c'],\n levels=['a', 'b', 'c', 'd'],\n name='foobar', ordered=False)\n pickle_path = os.path.join(tm.get_data_path(),\n 'categorical_0_14_1.pickle')\n # This code was executed once on v0.14.1 to generate the pickle:\n #\n # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],\n # name='foobar')\n # with open(pickle_path, 'wb') as f: pickle.dump(cat, f)\n #\n self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n # '--with-coverage', '--cover-package=pandas.core']\n exit=False)\n"
] | [
[
"pandas.core.common.is_float_dtype",
"numpy.random.choice",
"pandas.compat.u",
"pandas.Timestamp",
"pandas.concat",
"pandas.compat.range",
"pandas.core.common.isnull",
"numpy.dtype",
"pandas.read_pickle",
"pandas.core.common.is_integer_dtype",
"numpy.log",
"pandas.DataFrame",
"pandas.core.common.CategoricalDtype",
"pandas.util.testing.assertRaises",
"pandas.PeriodIndex",
"pandas.util.testing.assert_index_equal",
"pandas.util.testing.assert_isinstance",
"pandas.util.testing.get_data_path",
"pandas.core.common.is_categorical_dtype",
"numpy.random.randint",
"pandas.util.testing.assert_produces_warning",
"numpy.arange",
"pandas.pivot_table",
"pandas.util.testing.assert_equal",
"numpy.array",
"pandas.util.testing.assert_almost_equal",
"pandas.Categorical.from_codes",
"pandas.compat.lrange",
"pandas.MultiIndex.from_product",
"pandas.Categorical.from_array",
"pandas.isnull",
"pandas.Index",
"pandas.core.common.is_object_dtype",
"pandas.cut",
"pandas.util.testing.assert_frame_equal",
"numpy.asarray",
"numpy.isnan",
"numpy.sum",
"pandas.date_range",
"numpy.random.permutation",
"pandas.util.testing.assert_series_equal",
"pandas.Categorical",
"pandas.Series",
"numpy.unique"
]
] |
vishalbelsare/cplvm | [
"d8f715258b2c363beb2d59e95e5b5b9e73b503a7"
] | [
"experiments/simulation_experiments/hypothesis_testing/cplvm_global_bfs_varyp.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport os\nfrom scipy.stats import poisson\nfrom scipy.special import logsumexp\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nfrom cplvm import CPLVM\nfrom cplvm import CPLVMLogNormalApprox\n\nimport functools\nimport warnings\nimport tensorflow.compat.v2 as tf\n\n# import os\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow_probability import bijectors as tfb\n\nimport matplotlib\n\nfont = {\"size\": 18}\nmatplotlib.rc(\"font\", **font)\n\nwarnings.filterwarnings(\"ignore\")\n\n\nif __name__ == \"__main__\":\n\n p_list = [10, 100, 1000]\n NUM_REPEATS = 50\n results_alternative = np.empty((NUM_REPEATS, len(p_list)))\n results_null = np.empty((NUM_REPEATS, len(p_list)))\n\n for ii in range(NUM_REPEATS):\n\n num_datapoints_x = 100\n num_datapoints_y = 100\n latent_dim_shared = 3\n latent_dim_foreground = 3\n\n bfs_experiment = []\n # bfs_control = []\n bfs_shuffled = []\n\n for jj, data_dim in enumerate(p_list):\n\n cplvm_for_data = CPLVM(\n k_shared=latent_dim_shared, k_foreground=latent_dim_foreground\n )\n\n concrete_clvm_model = functools.partial(\n cplvm_for_data.model,\n data_dim=data_dim,\n num_datapoints_x=num_datapoints_x,\n num_datapoints_y=num_datapoints_y,\n counts_per_cell_X=1,\n counts_per_cell_Y=1,\n is_H0=False,\n offset_term=True,\n )\n\n model = tfd.JointDistributionCoroutineAutoBatched(concrete_clvm_model)\n\n deltax, sf_x, sf_y, s, zx, zy, w, ty, X_sampled, Y_sampled = model.sample()\n # sf_x, sf_y, s, zx, zy, w, ty, X_sampled, Y_sampled = model.sample()\n\n X, Y = X_sampled.numpy(), Y_sampled.numpy()\n\n ## Run H0 and H1 models on data\n cplvm = CPLVM(\n k_shared=latent_dim_shared, k_foreground=latent_dim_foreground\n )\n approx_model_H0 = CPLVMLogNormalApprox(\n X,\n Y,\n latent_dim_shared,\n latent_dim_foreground,\n offset_term=True,\n is_H0=True,\n )\n approx_model_H1 = CPLVMLogNormalApprox(\n X,\n Y,\n latent_dim_shared,\n latent_dim_foreground,\n offset_term=True,\n is_H0=False,\n )\n H1_results = cplvm._fit_model_vi(\n X, Y, approx_model_H1, offset_term=True, is_H0=False\n )\n H0_results = cplvm._fit_model_vi(\n X, Y, approx_model_H0, offset_term=True, is_H0=True\n )\n\n H1_elbo = (\n -1\n * H1_results[\"loss_trace\"][-1].numpy()\n / (num_datapoints_x + num_datapoints_y)\n )\n\n H0_elbo = (\n -1\n * H0_results[\"loss_trace\"][-1].numpy()\n / (num_datapoints_x + num_datapoints_y)\n )\n\n curr_bf = H1_elbo - H0_elbo\n print(\"p: {0: <10} BF treatment: {1: .2f}\".format(data_dim, curr_bf))\n bfs_experiment.append(curr_bf)\n results_alternative[ii, jj] = curr_bf\n\n ### Shuffle background and foreground labels\n\n all_data = np.concatenate([X, Y], axis=1)\n shuffled_idx = np.random.permutation(\n np.arange(num_datapoints_x + num_datapoints_y)\n )\n x_idx = shuffled_idx[:num_datapoints_x]\n y_idx = shuffled_idx[num_datapoints_x:]\n X = all_data[:, x_idx]\n Y = all_data[:, y_idx]\n\n ## Run H0 and H1 models on data\n ## Run H0 and H1 models on data\n cplvm = CPLVM(\n k_shared=latent_dim_shared, k_foreground=latent_dim_foreground\n )\n approx_model_H0 = CPLVMLogNormalApprox(\n X,\n Y,\n latent_dim_shared,\n latent_dim_foreground,\n offset_term=True,\n is_H0=True,\n )\n approx_model_H1 = CPLVMLogNormalApprox(\n X,\n Y,\n latent_dim_shared,\n latent_dim_foreground,\n offset_term=True,\n is_H0=False,\n )\n H1_results = cplvm._fit_model_vi(\n X, Y, approx_model_H1, offset_term=True, is_H0=False\n )\n H0_results = cplvm._fit_model_vi(\n X, Y, approx_model_H0, offset_term=True, is_H0=True\n )\n\n H1_elbo = (\n -1\n * H1_results[\"loss_trace\"][-1].numpy()\n / (num_datapoints_x + num_datapoints_y)\n )\n\n H0_elbo = (\n -1\n * H0_results[\"loss_trace\"][-1].numpy()\n / (num_datapoints_x + num_datapoints_y)\n )\n\n curr_bf = H1_elbo - H0_elbo\n print(\"p: {0: <10} BF shuffled: {1: .2f}\".format(data_dim, curr_bf))\n bfs_shuffled.append(curr_bf)\n results_null[ii, jj] = curr_bf\n\n # bfs_control = np.array(bfs_control)[~np.isnan(bfs_control)]\n bfs_experiment = list(np.array(bfs_experiment)[~np.isnan(bfs_experiment)])\n bfs_shuffled = list(np.array(bfs_shuffled)[~np.isnan(bfs_shuffled)])\n # tpr_true, fpr_true, thresholds_true = roc_curve(y_true=np.concatenate([np.zeros(len(bfs_control)), np.ones(len(bfs_experiment))]), y_score=np.concatenate([bfs_control, bfs_experiment]))\n\n # print(np.concatenate([np.zeros(len(bfs_shuffled)), np.ones(len(bfs_experiment))]))\n # print(np.concatenate([bfs_shuffled, bfs_experiment]))\n # tpr_shuffled, fpr_shuffled, thresholds_shuffled = roc_curve(\n # y_true=np.concatenate(\n # [np.zeros(len(bfs_shuffled)), np.ones(len(bfs_experiment))]\n # ),\n # y_score=np.concatenate([bfs_shuffled, bfs_experiment]),\n # )\n\n # np.save(\"../out/cai/bfs_experiment_p{}.npy\".format(data_dim), bfs_experiment)\n # np.save(\"../out/cai/bfs_shuffled_p{}.npy\".format(data_dim), bfs_shuffled)\n\n # auc = roc_auc_score(\n # y_true=np.concatenate(\n # [np.zeros(len(bfs_shuffled)), np.ones(len(bfs_experiment))]\n # ),\n # y_score=np.concatenate([bfs_shuffled, bfs_experiment]),\n # )\n\n results_alternative_df = pd.melt(\n pd.DataFrame(results_alternative[: ii + 1, :], columns=p_list)\n )\n results_alternative_df[\"context\"] = \"Perturbed\"\n results_null_df = pd.melt(\n pd.DataFrame(results_null[: ii + 1, :], columns=p_list)\n )\n results_null_df[\"context\"] = \"Shuffled null\"\n\n results_df = pd.concat([results_alternative_df, results_null_df], axis=0)\n\n results_df.to_csv(\"../out/data_dimension_vs_ebfs.csv\")\n\n plt.figure(figsize=(7, 7))\n g = sns.lineplot(\n data=results_df, x=\"variable\", y=\"value\", hue=\"context\", err_style=\"bars\"\n )\n g.legend_.set_title(None)\n plt.xlabel(\"Data dimension\")\n plt.ylabel(\"EBF\")\n plt.tight_layout()\n plt.savefig(\"../out/data_dimension_vs_ebfs.png\")\n # plt.show()\n plt.close()\n # import ipdb; ipdb.set_trace()\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.isnan",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylabel",
"pandas.concat"
]
] |
ihabbou/ml-algos | [
"4a12e64b6d18e88dbb068df2b1aa523522bbc8d7"
] | [
"src/ml_algos/utils.py"
] | [
"import numpy as np\n\n\ndef distance(point1, point2):\n return np.linalg.norm(point1 - point2)\n"
] | [
[
"numpy.linalg.norm"
]
] |
xuchen-ethz/continuous_view_synthesis | [
"c6fc39e5ef4a21b8b33154404c71c4d53625fe8c"
] | [
"demo/demo_base.py"
] | [
"import matplotlib.backends.backend_tkagg as tkagg\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\nfrom tkinter.filedialog import askopenfilename\nimport tkinter; tkinter.Tk().withdraw()\n\nfrom models.base_model import BaseModel\nimport numpy as np\nimport cv2\n\nclass Demo():\n def __init__(self, opt):\n self.opt = opt\n self.model = BaseModel(opt)\n self.load_image()\n self.predict()\n self.init_plot()\n\n def init_plot(self):\n\n width = 1024 # pixels\n height = 1024\n margin = 50 # pixels\n dpi = 170. # dots per inch\n figsize = ((width + 10 * margin) / dpi, (height + 2 * margin) / dpi) # inches\n left = 5 * margin / dpi / figsize[0] # axes ratio\n bottom = margin / dpi / figsize[1]\n\n self.fig = plt.figure(figsize=figsize, dpi=dpi)\n self.fig.subplots_adjust(left=left, bottom=bottom, right=1. - left, top=1. - bottom)\n\n plt.axis('off')\n plt.rcParams['keymap.save'] = ''\n # input image\n self.ax_in_img = plt.axes()\n self.ax_in_img.axis('off')\n self.im_input = plt.imshow(self.output, animated=True)\n\n self.ax_next = plt.axes([0.05, 0.1, 0.15, 0.04])\n button_next = Button(self.ax_next, 'Load image', color='lightgray', hovercolor='0.975')\n button_next.on_clicked(self.load_image_pressed)\n\n self.cidpress = self.fig.canvas.mpl_connect('button_press_event', self.on_press)\n self.cidrelease = self.fig.canvas.mpl_connect('button_release_event', self.on_release)\n self.cidmotion = self.fig.canvas.mpl_connect('motion_notify_event', self.on_motion)\n self.cidzoom = self.fig.canvas.mpl_connect('scroll_event', self.on_scroll)\n self.cidkpress = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)\n self.fig.canvas.toolbar.set_cursor = lambda cursor: None # disable the annoying busy cursor\n self.is_pressed = False\n\n tkagg.defaultcursor = \"crosshair\"\n self.fig.canvas.toolbar.set_cursor(1)\n self.fig.canvas.toolbar.set_cursor = lambda cursor: None # disable the annoying busy cursor\n\n self.timer = self.fig.canvas.new_timer(interval=50, callbacks=[(self.on_timer, [], {})])\n self.timer.start()\n\n plt.show()\n\n def on_scroll(self,event):\n # get the current x and y limits\n cur_xlim = self.ax_in_img.get_xlim()\n cur_ylim = self.ax_in_img.get_ylim()\n cur_xrange = (cur_xlim[1] - cur_xlim[0]) * .5\n cur_yrange = (cur_ylim[1] - cur_ylim[0]) * .5\n if event.button == 'up':\n scale_factor = 1 / 1.05\n elif event.button == 'down':\n scale_factor = 1.05\n else:\n scale_factor = 1\n self.ax_in_img.set_xlim([512 - cur_xrange * scale_factor,\n 512 + cur_xrange * scale_factor])\n self.ax_in_img.set_ylim([512 - cur_yrange * scale_factor,\n 512 + cur_yrange * scale_factor])\n plt.draw() # force re-draw\n\n def on_press(self, event):\n pass\n\n def on_motion(self, event):\n pass\n\n def on_release(self, event):\n pass\n\n def on_key_press(self, event):\n pass\n\n def on_timer(self):\n self.predict()\n self.update_figure()\n\n def load_image_pressed(self,event):\n self.load_image()\n self.predict()\n self.update_figure()\n\n def load_image(self):\n filename = askopenfilename()\n self.data = {}\n self.image = cv2.imread(filename)\n self.pose = np.array([0, 0, 0, 0, 0, 0])\n self.pose_cur = self.pose.astype(np.float)\n self.z = None\n self.predict()\n\n def update_figure(self):\n self.im_input.set_array(self.output)\n self.fig.canvas.draw_idle()\n\n def predict(self):\n self.output = self.model.get_high_res(self.image,self.pose_cur,self.z)"
] | [
[
"matplotlib.widgets.Button",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
edsumpena/SkyStone | [
"dab78c1687547d19171626ec3287ab37082bb4c2"
] | [
"tools/parse_ftc.py"
] | [
"import sys\nimport os\nimport string\nimport math\nfrom math import pi\n\nfrom matplotlib import pyplot as plt\nimport numpy as nm\nfrom datetime import datetime\nimport array\n\nscript_dir=os.path.dirname(os.path.abspath(__file__));\n\nauto_time=[];\nauto_time2=[];\nauto_time_raw=[]; # offset time;\ncreate_time=[];\ncreate_time.append(0);\nauto_x=[]; # X pose in each step;\nauto_y=[];\nauto_h=[];\nauto_x1=[]; # X pose in each step;\nauto_y1=[];\nauto_h1=[];\nlast_time_offset = 0;\nstart_time=datetime.now();\nend_time=start_time;\ninit_time=start_time;\nlast_time=start_time;\nheading_imu=[];\nheading_odom=[];\nimu_time=[];\ndata=[];\ndata_time=[];\ndata_time_str=[]\ndata_x=[]; # xError in follower;\ndata_x_raw=[]; # current Pose\ndata_y=[];\ndata_y_raw=[]\ndata_h=[]; # headingError\ndata_h_rad=[];\ndata_h_raw=[];\ndata_h_raw_rad=[];\ndata_v_err=[];\ndata_v_target=[];\ndata_v_actual=[];\ndata_power=[];\npower_time=[];\nmax_power=0;\nmax_x_err=0;\nmax_y_err=0;\nmax_heading_err=0;\nmax_final_x_err=0;\nmax_final_y_err=0;\nmax_final_heading_err=0;\nmax_v=0;\np_name='unknown';\nmax_power_time = 0;\nprint_summary=0;\nlast_x_err=0;\nlast_y_err=0;\nlast_h_err=0;\nfilepath = sys.argv[1];\narg_c = len(sys.argv);\nif (arg_c>=3):\n print_summary = 1;\n\ndef get_time(t):\n t = t.split(' ')\n #print(t)\n t_s = ' ';\n t_s = t_s.join(t[:2])\n #print(t_s)\n t = datetime.strptime(t_s, '%m-%d %H:%M:%S.%f')\n return t;\n\nwith open(filepath) as fp:\n line = fp.readline()\n #print(line)\n while line:\n line = fp.readline();\n #print(line)\n if (\"StandardTrackingWheelLocalizer: using IMU:\" in line):\n t = line.split(\"StandardTrackingWheelLocalizer\");\n t = get_time(t[0]);\n t_delta = t-start_time;\n imu_time.append(t_delta.total_seconds());\n\n t = line.strip().split('StandardTrackingWheelLocalizer');\n t = t[1].split(' ');\n #print(t)\n #print(t[12], t[15]);\n t1 = float(t[5]);\n t2 = float(t[8]);\n if (t2>pi):\n t2 = (-1.0) * (2*pi - t2);\n #if (t1 < 0):\n # t1 = t1 + 2 * pi;\n #heading_imu.append(math.degrees(t1));\n #heading_odom.append(math.degrees(t2));\n heading_imu.append(t1);\n heading_odom.append(t2);\n\n if ((\"SampleMecanumDriveBase\" in line) or (\"BaseClass\" in line)) and (\"update: x\" in line):\n #print(line)\n t1 = line.split(\"update: x\");\n t2 = t1[1].strip();\n t3 = t2.split(' ');\n t = float(t3[0]);\n data_x_raw.append(t);\n\n curr_time = get_time(t1[0])\n delta = curr_time - start_time;\n data_time.append(delta.total_seconds());\n data_time_str.append(curr_time)\n last_time_offset = delta.total_seconds();\n end_time = curr_time;\n if ((\"SampleMecanumDriveBase\" in line) or (\"BaseClass\" in line)) and (\" y \" in line):\n t1 = line.split(\" y \");\n t2 = t1[1].strip();\n t3 = t2.split(' ');\n t = t3[0];\n data_y_raw.append(float(t));\n #print(\"y: \", t);\n if ((\"SampleMecanumDriveBase\" in line) or (\"BaseClass\" in line)) and (\": heading \" in line):\n t1 = line.split(\" heading \");\n t2 = t1[1].strip();\n t3 = t2.split(' ');\n #print(t3)\n t = t3[0];\n data_h_raw_rad.append(float(t));\n data_h_raw.append(math.degrees(float(t)));\n #print(\"y: \", t);\n\n if ((\"SampleMecanumDriveBase\" in line) or (\"BaseClass\" in line)) and (\"Error\" in line):\n #t = line.strip();\n if (\"xError\" in line):\n t1 = line.split('xError')\n #print(t1)\n t2 = t1[1]\n t = float(t2)\n data_x.append(t)\n last_x_err = t;\n if t > max_x_err:\n max_x_err = t;\n if (\"yError\" in line):\n t1 = line.split('yError')\n t2 = t1[1]\n t = float(t2)\n data_y.append(t)\n last_y_err = t;\n if t > max_y_err:\n max_y_err = t;\n if (\"headingError\" in line):\n t1 = line.split('headingError')\n t2 = t1[1]\n data_h_rad.append(float(t2))\n t = math.degrees(float(t2));\n last_h_err = t;\n data_h.append(t)\n if t > max_heading_err:\n max_heading_err = t;\n #####################################\n if (\"DriveVelocityPIDTuner: error 0\" in line):\n #print(t);\n t1 = line.split('error 0');\n data_v_err.append(float(t1[1]))\n if (\"DriveVelocityPIDTuner: targetVelocity\" in line):\n t1 = line.split('targetVelocity')\n data_v_target.append(float(t1[1]))\n if (\"DriveVelocityPIDTuner: velocity 0\" in line):\n t1 = line.split('velocity 0');\n t2 = t1[1];\n data_v_actual.append(float(t2))\n t = float(t2.strip());\n t = abs(t)\n if t > max_v:\n max_v = t;\n #############################################\n if (\"setMotorPowers\" in line) and (\"leftFront\" in line):\n #print(line)\n t = line.split('setMotorPowers');\n t1 = t[1].strip().split(' ');\n #print(t1)\n t2 = t1[1]\n t3 = float(t2)\n data_power.append(t3)\n t_time = get_time(t[0])\n d = t_time - start_time;\n power_time.append(d.total_seconds());\n #print(t3)\n if abs(t3)>abs(max_power):\n max_power=t3;\n max_power_time = t_time;\n t = max_power_time-start_time\n max_power_delta = t.total_seconds()\n ###########################################\n if (\"AutonomousPath: start new step: step\" in line):\n #print(line.rstrip())\n t = line.split('currentPos (');\n t1 = get_time(t[0]);\n last_time = t1;\n auto_time_raw.append(t1);\n t_delta = t1-start_time;\n auto_time2.append(t_delta.total_seconds());\n auto_time.append(last_time_offset);\n t = t[1].split(', ');\n #print(t)\n auto_x.append(float(t[0].rstrip()));\n auto_y.append(float(t[1].rstrip()));\n\n t2 = line.split('errorPos (');\n t3 = t2[1].split(', ');\n #print(t3);\n auto_x1.append(float(t3[0].rstrip()));\n auto_y1.append(float(t3[1].rstrip()));\n\n t=t[2];\n t=t[:-3].strip();\n auto_h.append(float(t));\n\n t = line.split(\"errorPos (\");\n t = (t[1][:-4]);\n t = t.split(', ');\n x = float(t[0]);\n y = float(t[1]);\n z = float(t[2]);\n #print(x, y, z);\n if (abs(x) > abs(max_final_x_err)):\n max_final_x_err = x;\n if (abs(y) > abs(max_final_y_err)):\n max_final_y_err = y;\n if (abs(z) > abs(max_final_heading_err)):\n max_final_heading_err = z;\n\n if (\"AutonomousPath: drive and builder created, initialized with pose\" in line) or (\"AutonomousPath: drive and builder reset, initialized with pose\" in line):\n #print(line.rstrip())\n t = line.split('AutonomousPath');\n t1 = get_time(t[0]);\n t_delta = t1-last_time\n #print(\"drive reset takes: \", t_delta.total_seconds());\n create_time.append(t_delta.total_seconds());\n ###########################################\n if (\"Robocol : received command: CMD_RUN_OP_MODE\" in line):\n t = line.strip().split(' ');\n p_name=t[-1]\n t = line.strip().split('Robotcol')\n init_time = get_time(t[0])\n #print(start_time)\n if (\"received command: CMD_RUN_OP_MODE\" in line):\n t = line.split('CMD_RUN_OP_MODE');\n start_time = get_time(t[0])\n # print(start_time)\n if (\"RobotCore\" in line) and (\"STOP - OPMODE\" in line):\n break;\n\n for i in range(len(data_x)):\n if (i%10==0):\n print(\"time\\t\\t\\ttime offset\\t xErr\\t\\t\\t X \\t\\t yErr\\t\\t \\t\\tY \\t\\t headingErr\\tHeading(degree)\\t\\t headingErr(rad) \\t heading(rad)\");\n print(data_time_str[i], \" \", data_time[i], \" \", data_x[i], \" \", data_x_raw[i], \" \", data_y[i], \" \", data_y_raw[i], \" \", data_h[i], \" \", data_h_raw[i], \" \", data_h_rad[i], \" \", data_h_raw_rad[i]);\n\n print(\"-----------------moving steps in autonomous------------------------\");\n for i in range(len(auto_time)):\n if (i==0):\n print(\"time\\t\\t\\ttime offset X\\t\\tY\\theading reset_time duration\");\n print(auto_time_raw[i], \" \", auto_time[i], \" \", auto_x[i], \" \", auto_y[i], \" \", auto_h[i], \" \", create_time[i], \"\\t 0\");\n else:\n print(auto_time_raw[i], \" \", auto_time[i], \" \", auto_x[i], \" \", auto_y[i], \" \", auto_h[i], \" \", create_time[i], \"\\t\", auto_time[i]-auto_time[i-1]);\n\n for i in range(len(data_v_err)):\n if (i%10==0):\n print(\"data_v, data_v_target, data_v_actual\");\n print(data_v_err[i].strip(), \" \", data_v_target[i].strip(), \" \", data_v_actual[i].strip());\n\nfp.close();\n\n\nt = len(data_x);\nif (t!=len(data_y) or t!=len(data_h) or t!=len(data_h_raw)) or (t==0):\n print(\"double check the parsing!!!\", t, \" \", len(data_h_raw), \" \", len(data_h), \" \", len(data_time));\n sys.exit()\nelse:\n print(\"parsing looks good, len: \", t);\n\n#os.system('cat ' + filepath + ' |grep SampleMecanumDriveBase | grep update |grep x');\nprint(\"-----------------moving steps in autonomous------------------------\");\nwith open(filepath) as fp:\n line = fp.readline()\n while line:\n line = fp.readline();\n if ((\"start new step: step\" in line) or (\"pose correction\" in line)):\n print(line.strip())\nfp.close();\n############### better than grep\n\nwith open(filepath) as fp:\n line = fp.readline()\n while line:\n line = fp.readline();\n if (\"IMUBufferReader: IMU gyro time delta\" in line):\n print(line.strip())\nfp.close();\n\nprint(\"===============summary==========================\")\nt = max_power_time.strftime('%H:%M:%S.%f');\nmax_power_time = t[:-3];\nprint(\"max power to wheel: \", max_power, \" timestamp: \", max_power_time, \" timeoffset: \", max_power_delta)\n\nprint(\"max_x_err (inches): \", max_x_err)\nprint(\"max_y_err (inches): \", max_y_err)\nprint(\"max_heading_err (degrees) \", max_heading_err)\nprint(\"max_velocity : \", max_v)\nduration = end_time - start_time;\nprint(\"init time: \", init_time);\nprint(\"start time: \", start_time, \" end time: \", end_time, \" run duration(seconds): \", duration.total_seconds());\nprint(\"\\nDrivetrain parameters:\");\nprint(\"program : \", p_name)\n\nwith open(filepath) as fp:\n line = fp.readline()\n while line:\n line = fp.readline();\n if ((\"DriveConstants\" in line) and (\"maxVel\" in line) and (\"maxAccel\" in line)):\n print(line.strip())\n if ((\"DriveConstants: Strafing paramters\" in line)):\n print(line.strip())\n if ((\"DriveConstants: test distance\" in line)):\n print(line.strip())\n if ((\"DriveConstants\" in line) and (\"PID\" in line)):\n print(line.strip())\n if ((\"DriveConstants: using IMU in localizer?\" in line)):\n print(line.strip())\n if ((\"DriveConstants: debug.ftc.brake\" in line)):\n print(line.strip())\n if ((\"DriveConstants: debug.ftc.resetfollow\" in line)):\n print(line.strip())\n if ((\"DriveConstants: using Odometry\" in line)):\n print(line.strip())\n if ((\"currentPos\" in line) and (\"errorPos\" in line)):\n print(line.strip())\n if ((\"AutonomousPath:\" in line) and (\"xml\" in line)):\n print(line.strip())\n fp.close();\nprint(\"max error: \", max_final_x_err, max_final_y_err, max_final_heading_err);\nprint(\"last error: \", last_x_err, last_y_err, last_h_err);\n#print(\"start time(in miliseconds): \", start_time.timestamp() * 1000, \" end time: \", end_time.timestamp() * 1000);\nprint(filepath);\n\nif print_summary != 0:\n plt.style.use('ggplot')\n #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=3, mode=\"expand\", borderaxespad=0.);\n plt.plot(data_time, data_x, label=\"xError\");\n plt.plot(data_time, data_y, label=\"yError\");\n plt.plot(data_time, data_h, label=\"headingError\");\n plt.scatter(auto_time, [0 for i in range(len(auto_time))], zorder=2); # mark the drive reset;\n plt.xlabel('time(seconds)');\n plt.ylabel('inches for x, y, degrees for heading');\n plt.legend();\n\n plt.figure();\n plt.plot(data_time, nm.add(data_x, data_x_raw), label=\"target X\");\n plt.plot(data_time, data_x_raw, 'g-', label=\"actual X\")\n plt.scatter(auto_time, auto_x, zorder=2)\n plt.scatter(auto_time, nm.add(auto_x, auto_x1), zorder=2)\n plt.xlabel('time (seconds)');\n plt.ylabel('distance(inches)');\n plt.legend();\n plt.figure();\n\n plt.plot(data_time, nm.add(data_y, data_y_raw), label=\"target Y\");\n plt.plot(data_time, data_y_raw, 'g-', label=\"actual Y\")\n plt.scatter(auto_time, auto_y, zorder=2)\n plt.scatter(auto_time, nm.add(auto_y, auto_y1), zorder=2)\n plt.xlabel('time');\n plt.ylabel('inches');\n #plt.ylim([-10, 10])\n plt.legend();\n #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=2, mode=\"expand\", borderaxespad=0.)\n\n plt.figure();\n plt.plot(data_time, nm.add(data_h_rad, data_h_raw_rad), label=\"target heading\");\n plt.plot(data_time, data_h_raw_rad, label=\"actual heading\")\n plt.xlabel('time');\n plt.ylabel('radius');\n plt.scatter(auto_time, auto_h, zorder=2)\n #plt.ylim([-30, 30])\n plt.legend();\n #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=2, mode=\"expand\", borderaxespad=0.)\n ##################\n plt.figure();\n plt.plot(power_time, data_power, label='power to wheel');\n plt.scatter(auto_time, [0 for i in range(len(auto_time))], zorder=2)\n plt.xlabel('time(seconds)');\n plt.ylabel('power');\n #####################################################################################################\n plt.figure();\n #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=2, mode=\"expand\", borderaxespad=0.)plt.plot(nm.add(data_x, data_x_raw), nm.add(data_y, data_y_raw), label=\"target path\");\n plt.plot(data_x_raw, data_y_raw, label=\"actual path\");\n plt.xlabel('X(inches)');\n plt.ylabel('Y(inches)');\n plt.scatter(auto_x, auto_y, zorder=2);\n plt.xlim([-70, 70])\n plt.ylim([-70, 70])\n plt.legend();\n #############################\n if (len(heading_odom)>0):\n plt.figure();\n plt.plot(imu_time, heading_imu, label=\"IMU\");\n plt.plot(imu_time, heading_odom, label='Odom\"');\n #for i in range(len(heading_odom)):\n #print(imu_time[i], heading_imu[i], heading_odom[i]);\n plt.legend();\n plt.xlabel('time(seconds)');\n plt.ylabel('heading(radius)');\n plt.ylim([0, 7.0])\n\n #####################################################################################################\n plt.figure();\n im = plt.imread(script_dir+\"\\\\skystone_field.png\");\n #plt.xlim([-100, 700])\n #plt.ylim([-100, 700])\n #plt.xticks([])\n #plt.yticks([])\n #plt.plot(data_x_raw, data_y_raw, label=\"actual path\");\n new_x = [];\n new_y = [];\n for i in range(len(data_x_raw)):\n new_x.append(300 - data_x_raw[i] * 100/24);\n new_y.append(300 - data_y_raw[i] * 100/24);\n #print(new_x[i], new_y[i]);\n #plt.scatter(new_y[i], 600-new_x[i], zorder=2);\n #plt.plot(new_y[i], 600-new_x[i])\n plt.plot(new_y, new_x)\n implot = plt.imshow(im);\n\n plt.show();\n #plt.waitforbuttonpress(1); input();\n #plt.close('all')\n"
] | [
[
"numpy.add",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.imshow"
]
] |
aklnk/xaesa | [
"9d2a2b0e69a052a89acc28f206621c107f101a7f"
] | [
"compare.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 20 11:07:03 2016\r\n\r\n@author: sasha\r\n\"\"\"\r\n\r\nfrom .init import QTVer\r\n\r\nif QTVer == 4:\r\n from PyQt4 import QtGui, QtCore\r\n from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\r\n from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\r\n \r\nif QTVer == 5:\r\n from PyQt5 import QtWidgets as QtGui\r\n from PyQt5 import QtCore\r\n from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\n from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass CompareWindow(QtGui.QDialog):\r\n\r\n def __init__(self):\r\n super(CompareWindow, self).__init__()\r\n \r\n self.exafs = []\r\n self.k = []\r\n\r\n self.r = []\r\n self.fr = []\r\n self.fi = []\r\n\r\n self.bftk = []\r\n self.bftexafs = []\r\n\r\n self.labels = []\r\n\r\n self.energy = []\r\n self.mju = []\r\n self.xes = []\r\n\r\n self.E0 = 0\r\n \r\n self.lines = []\r\n self.lines1 = []\r\n #mode\r\n #0 - exafs\r\n #1 - ft\r\n #2 - bft\r\n #3 - mju\r\n #10 - Xes original\r\n #11 - XES area Normalized\r\n #12 - XES Max Normalized\r\n self.mode = 0\r\n\r\n self.initUI()\r\n\r\n def initUI(self):\r\n \r\n #Figures \r\n self.fig = plt.figure(1, figsize=(15, 6))\r\n self.ax_exafs = self.fig.add_subplot(111)\r\n\r\n self.canv = FigureCanvas(self.fig)\r\n self.tbar = NavigationToolbar(self.canv, self)\r\n \r\n# plt.tight_layout() \r\n \r\n self.btnCancel = QtGui.QPushButton('Exit')\r\n self.btnCancel.clicked.connect(self.cancel)\r\n \r\n lfig = QtGui.QVBoxLayout()\r\n lfig.addWidget(self.tbar)\r\n lfig.addWidget(self.canv)\r\n \r\n lfig.addWidget(self.btnCancel)\r\n \r\n self.setLayout(lfig)\r\n \r\n self.canv.draw()\r\n \r\n #wid.setLayout(lfig)\r\n \r\n def plot(self):\r\n \r\n self.ax_exafs.clear()\r\n self.ax_exafs = self.fig.add_subplot(111)\r\n \r\n \r\n if self.mode == 0: #compare exafs \r\n for i in range(len(self.k)):\r\n l, = self.ax_exafs.plot(self.k[i], self.exafs[i], label = self.labels[i])\r\n self.ax_exafs.set_xlabel('Wavevector k, $\\AA^{-1}$')\r\n self.ax_exafs.set_ylabel('EXAFS, $\\AA^{-2}$')\r\n self.lines.append(l)\r\n \r\n if self.mode == 1: #compare ft \r\n for i in range(len(self.r)):\r\n line1, = self.ax_exafs.plot(self.r[i], self.fr[i], label = self.labels[i])\r\n line2, = self.ax_exafs.plot(self.r[i], self.fi[i])\r\n line2.set_color(line1.get_color())\r\n line2.set_linestyle('dotted')\r\n self.ax_exafs.set_xlabel('Distance R, $\\AA$')\r\n self.ax_exafs.set_ylabel('Fourier transform, $\\AA^{-3}$')\r\n self.lines.append(line1)\r\n self.lines1.append(line2)\r\n \r\n \r\n if self.mode == 2: #compare bft \r\n for i in range(len(self.bftk)):\r\n l, = self.ax_exafs.plot(self.bftk[i], self.bftexafs[i], label = self.labels[i])\r\n self.ax_exafs.set_xlabel('Wavevector k, $\\AA^{-1}$')\r\n self.ax_exafs.set_ylabel('EXAFS, $\\AA^{-2}$')\r\n self.lines.append(l)\r\n \r\n if self.mode == 3: #compare mju \r\n for i in range(len(self.energy)):\r\n l, = self.ax_exafs.plot(self.energy[i], self.mju[i], label = self.labels[i])\r\n self.ax_exafs.set_xlabel('Energy, eV')\r\n self.ax_exafs.set_ylabel('Absorption, a.u.')\r\n self.lines.append(l)\r\n \r\n \r\n if self.mode == 4: #compare xanes \r\n for i in range(len(self.energy)):\r\n l, = self.ax_exafs.plot(self.energy[i], self.mju[i], label = self.labels[i])\r\n self.ax_exafs.set_xlabel('Energy, eV')\r\n self.ax_exafs.set_ylabel('Absorption, a.u.')\r\n self.ax_exafs.axhline(y=1, linewidth=0.5, color = 'k', linestyle='--',)\r\n self.ax_exafs.set_xlim([self.E0-75,self.E0+200])\r\n self.lines.append(l)\r\n \r\n if self.mode == 10: # XES original \r\n for i in range(len(self.energy)):\r\n l, = self.ax_exafs.plot(self.energy[i], self.xes[i], label = self.labels[i])\r\n self.ax_exafs.set_xlabel('Energy, eV')\r\n self.ax_exafs.set_ylabel('Intensity, a.u.')\r\n self.lines.append(l)\r\n \r\n if self.mode == 11: # XES area normalized\r\n for i in range(len(self.energy)):\r\n l, = self.ax_exafs.plot(self.energy[i], self.xes[i], label = self.labels[i])\r\n self.ax_exafs.set_xlabel('Energy, eV')\r\n self.ax_exafs.set_ylabel('Area normalized intensity')\r\n self.lines.append(l)\r\n \r\n self.fig.canvas.mpl_connect('pick_event', self.onpick)\r\n \r\n self.fig.tight_layout()\r\n \r\n box = self.ax_exafs.get_position()\r\n self.ax_exafs.set_position([box.x0, box.y0, box.width * 0.7, box.height])\r\n \r\n leg = self.ax_exafs.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n \r\n # we will set up a dict mapping legend line to orig line, and enable\r\n # picking on the legend line\r\n self.lined = dict()\r\n for legline, origline in zip(leg.get_lines(), self.lines):\r\n legline.set_picker(5) # 5 pts tolerance\r\n self.lined[legline] = origline\r\n\r\n if self.mode == 1:\r\n self.lined1 = dict()\r\n for legline, origline in zip(leg.get_lines(), self.lines1):\r\n# legline.set_picker(5) # 5 pts tolerance\r\n self.lined1[legline] = origline\r\n \r\n self.canv.draw()\r\n\r\n \r\n def cancel(self):\r\n #do whatever you need with self.roiGroups \r\n self.close()\r\n \r\n def onpick(self, event):\r\n # on the pick event, find the orig line corresponding to the\r\n # legend proxy line, and toggle the visibility\r\n legline = event.artist\r\n origline = self.lined[legline]\r\n vis = not origline.get_visible()\r\n origline.set_visible(vis)\r\n if self.mode == 1:\r\n origline = self.lined1[legline]\r\n origline.set_visible(vis)\r\n \r\n # Change the alpha on the line in the legend so we can see what lines\r\n # have been toggled\r\n if vis:\r\n legline.set_alpha(1.0)\r\n else:\r\n legline.set_alpha(0.2)\r\n self.fig.canvas.draw()\r\n \r\n \r\n \r\n "
] | [
[
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.pyplot.figure"
]
] |
ymdatta/BagOfVisualWords | [
"e023e84824c2b0e1e56674b6505be6aee78e219f"
] | [
"get_train_data.py"
] | [
"import get_cluster_centres\nimport numpy as np\n\n\ndef get_images_and_labels_1(img_dict):\n img_labels = []\n img_list = []\n for key in img_dict.keys():\n for des in img_dict[key]:\n img_labels.append(key)\n\n # extend label 50 times\n # img_labels.extend(itertools.repeat(key, 50))\n\n img_des = des\n C = get_cluster_centres.get_cluster_centres(img_des, 50)\n\n img_list.append(C)\n\n img_labels_np = np.asarray(img_labels)\n img_list_np = np.asarray(img_list)\n\n (x, m, n) = img_list_np.shape\n\n img_list_np_reshaped = img_list_np.reshape(x, (m, n))\n\n return (img_labels_np, img_list_np_reshaped)\n\n \"\"\"\n Idea is to vshape the numpy array, each time we add a new image's\n descriptor list.\n\n \"\"\"\n\n\ndef get_images_and_labels(img_dict):\n\n img_labels = []\n img_list = []\n\n for key in img_dict.keys():\n for img in img_dict[key]:\n\n img_labels.append(key)\n img_list.append(img)\n\n return (img_list, img_labels)\n"
] | [
[
"numpy.asarray"
]
] |
szachovy/lpdr | [
"ef43c83b07c17c2e2c4dfb2c58f44abe688e424b"
] | [
"lpdr/lpdr.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__version__ = '0.1'\n__author__ = 'Wiktor Maj'\n\n#------\nimport numpy as np\nimport cv2\n\nfrom keras.models import model_from_json\nfrom pytesseract import image_to_string\n\nimport argparse\nimport os\nimport stat\nimport pkg_resources\n#------\n\n### tesseract utility for Windows\n#pytesseract.pytesseract.tesseract_cmd = 'absolute_path_to_tesseract.exe'\n\n### defaults for argparser (uncomment and change if needed)\n#DEFAULT_CAPTURE=False\n#DEFAULT_IMAGE_PATH='image.png' \nDEFAULT_NET_PATH='lpdr/wpod_net/wpod_net_update1' # see self.__net_path\n\ndef readable(path):\n ''' \n Check if the source path for the captured image or wpod_net is readable.\n \n Args:\n path: relative path to the destination file.\n\n Returns:\n Boolean flag whatever the path is readable or not.\n '''\n st = os.stat(path)\n return bool(st.st_mode & stat.S_IRGRP)\n \nclass ArgParser(type):\n '''\n Parsing utility, which sends the arguments passed from cli to LPD as a class argument.\n '''\n def __new__(cls, name, parents, dct):\n '''\n Create an object with a capture flag and image_path directly.\n \n Args:\n name: is the name of the class to be created\n parents: is the list of the class's parent classes\n dct: is the list of class's attributes (methods, static variables)\n \n Returns:\n Return object as an instance of LPD class.\n \n Raises:\n Exception: If the capture argument not passed correctly\n '''\n parser = argparse.ArgumentParser()\n try:\n parser.add_argument('-c', '--capture', default=DEFAULT_CAPTURE, help=\"Captures image from camera after run\")\n except NameError: \n parser.add_argument('-c', '--capture', default=False, help=\"Captures image from camera after run\")\n try:\n parser.add_argument('-i', '--image_path', default=DEFAULT_IMAGE_PATH, help=\"Defines input/output path for image\")\n except NameError:\n try:\n readable('image.jpg')\n parser.add_argument('-i', '--image_path', default='image.jpg', help=\"Defines input/output path for image\")\n except FileNotFoundError:\n parser.add_argument('-i', '--image_path', default='image.png', help=\"Defines input/output path for image\")\n \n args = parser.parse_args()\n \n try:\n dct['capture'] = int(args.capture)\n except ValueError:\n try:\n dct['capture'] = True if args.capture in ['True', 'T', 'Y', 'Yes'] else False\n except: \n raise Exception('capture argument not passed correctly')\n dct['image_path'] = args.image_path\n \n return super(ArgParser, cls).__new__(cls, name, parents, dct)\n \nclass SparseList(dict):\n '''\n Dict derivative created with the purpose of assurance that \n key point labels are passed correctly before license plate extraction.\n Sparsity is defined as a number of results according to confidence\n during destination points passing.\n '''\n def __init__(self):\n self.labels = {}\n \n def __setitem__(self, idx, val):\n '''\n Every value point is set to the label`s collection \n if only if all of them are not None and have the same\n shapes in a given destination.\n \n Args:\n idx: element index in labels\n val: value to insert into labels in given idx\n \n Raises:\n Exception: Conditions of a given label are not satisfied to be evaluated\n '''\n if (val['pts'].shape == (2, 4)) and (val['tl'].shape == (2, )) and (val['br'].shape == (2, )) and (val['prob'].shape == ()):\n self.labels[idx] = val\n else:\n raise Exception('matrix shapes does not match for the next stages, please change image settings')\n\n def __getitem__(self, idx):\n '''\n Args:\n idx: element index in labels\n \n Returns:\n Coordinate inserted into the collection.\n \n Raises:\n IndexError: If omitted coordinates are passed to the next step.\n '''\n try:\n return self.labels[idx]\n except KeyError:\n raise IndexError('uncomplete data, unable to evaluate')\n\n def get(self):\n '''\n Returns:\n Dict of labels filled by setitem from pred\n '''\n return self.labels\n \nclass LPD(metaclass=ArgParser):\n '''\n \n The first part of LPDR\n\n With given settings: \n - Load the image from an existent source or take the photo of the car (by default) or motorcycle.\n - Process the image to the appropriate form.\n - Load the wpod_net model and predict given a matrix.\n - Find the points where the probability of an object is above a given threshold\n - Collect affines from the corresponding point and transform to coordinates\n - Non-Max-Suppression\n - Find T-matrix from transformed label points\n - Process perspective distance to output image\n\n '''\n def __init__(self):\n ##### webcam settings #####\n # camera source\n self.__source = 0\n \n # width of image to be captured (in px)\n self.__width = 480\n \n # height of image to be captured (in px)\n self.__height = 480\n \n ##### storage #####\n # input matrix\n self._X = None\n\n # output matrix\n self._Y = None\n\n # rectangular or squared size of the plate\n self._size = None\n \n ##### detection settings #####\n # destination path to wpod_net files without extensions\n try:\n self.__net_path = DEFAULT_NET_PATH\n except NameError:\n self.__net_path = None\n \n # iou threshold in nms operation\n self.__iou_threshold = 0.1 \n \n # degree of confidence that the detection result is likely to be license plate\n self.__confidence = 0.5\n \n # alpha paramater during normalization\n self.__alpha = 7.75 \n \n # confidence level of whatever the licence plate is one line high or two\n self.__plate_confidence = 1.4\n \n # output size (width, height) for one line high license plate\n self.__one_line = (400, 100) \n \n # output size (width, height) for two lines high license plate\n self.__two_lines = (300, 300) \n \n # loss function denoted the corresponding vertices of a canonical unit square centered at the origin\n self.__loss = np.matrix([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]).T\n \n # ----------\n # object detection part execution\n self.detect()\n # ----------\n \n @property\n def source(self):\n return self.__source\n \n @property\n def width(self):\n return self.__width\n \n @property\n def height(self):\n return self.__height\n \n @property\n def Y(self):\n return self._Y\n \n @Y.setter\n def Y(self, val):\n self._Y = val\n \n @property\n def net_path(self):\n try: \n if readable(''.join([self.__net_path, '.h5'])) and readable(''.join([self.__net_path, '.json'])):\n return self.__net_path \n \n else:\n raise Exception('netpath is not readable')\n except FileNotFoundError:\n return\n \n @property\n def iou_threshold(self):\n return self.__iou_threshold\n \n @property\n def confidence(self):\n return self.__confidence\n \n @property\n def alpha(self):\n return self.__alpha\n \n @property\n def plate_confidence(self):\n return self.__plate_confidence\n \n @property\n def one_line(self):\n return self.__one_line\n \n @property\n def two_lines(self):\n return self.__two_lines\n \n @property\n def loss(self):\n return self.__loss\n \n def preprocess_image(self):\n '''\n Turn an image into a matrix.\n \n Raises:\n Exception: the following source is not readable from the program side.\n \n Returns:\n Matrix with specific shape and values range\n '''\n \n if not readable(self.image_path):\n raise Exception('image_path is not readable or does not exist')\n\n img = cv2.imread(self.image_path) \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img / 255\n if img.shape[:2] != (self.width, self.height):\n img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA)\n img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2])) \n return img\n \n def video(self):\n '''\n Catch the frame from a given optical source \n and save it as an image file in the catalog of main file execution.\n \n Raises:\n Exception: If given optical sources cannot be opened.\n '''\n cap = cv2.VideoCapture(self.source)\n \n if not cap.isOpened():\n raise Exception(\"Could not open video device\")\n \n cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)\n \n ret, frame = cap.read()\n\n cv2.imwrite(self.image_path, frame)\n cap.release()\n \n def load_model(self):\n '''\n Load the wpod_net to \n predict an input matrix source.\n \n Returns:\n wpod_net with pre-trained weights\n '''\n\n json_file = open(pkg_resources.resource_filename(__name__, '/'.join(('wpod_net', 'wpod_net_update1.json'))), 'r') if self.net_path is None else open('{}.json'.format(self.net_path), 'r')\n model = model_from_json(json_file.read(), custom_objects={})\n model.load_weights(pkg_resources.resource_filename(__name__, '/'.join(('wpod_net', 'wpod_net_update1.h5'))) if self.net_path is None else '{}.h5'.format(self.net_path)) \n json_file.close()\n return model\n \n @staticmethod\n def iou(tl1, br1, tl2, br2):\n '''\n \"Intersection over Union\" function implementation\n used by Non-max suppression\n \n Args:\n tl1: top left corner from the first box\n br1: bottom right corner from the first box\n tl2: top left corner from the second box\n br2: bottom right corner from the second box\n \n Returns:\n IOU value which determines the degree of overlapping boxes\n '''\n xi1 = max(tl1[0], tl2[0])\n yi1 = max(tl1[1], tl2[1])\n xi2 = min(br1[0], br2[0])\n yi2 = min(br1[1], br2[1])\n \n inter_area = (xi2 - xi1) * (yi2 - yi1)\n\n box1_area = (br1[1] - tl1[1]) * (br1[0] - tl1[0])\n box2_area = (br2[1] - tl2[1]) * (br2[0] - tl2[0])\n \n union_area = (box1_area + box2_area) - inter_area\n \n iou = inter_area / union_area\n return iou\n\n def nms(self):\n '''\n Filter overlapping boxes and select the most optimal ones from labels.\n \n Returns:\n None\n '''\n for idx, box in self.labels.get().copy().items():\n for i in range(idx+1):\n if self.iou(box['tl'], box['br'], self.labels[i]['tl'], self.labels[i]['br']) > self.iou_threshold:\n self.labels.labels = {k: self.labels[k] for k in range(i+1)} \n return\n \n @staticmethod\n def find_T_matrix(pts, t_pts):\n '''\n Calculate A matrix using processed label and size points \n for SVD. \n \n Args:\n pts: the result of the ptsh function\n t_pts: the result of the draw_rectangle function\n \n Returns:\n The last row of SVD unitary matrix V*\n '''\n A = np.zeros((8, 9))\n \n for i in range(0, 4):\n xi = pts[:, i].T\n xil = t_pts[:, i]\n\n A[i*2, 3:6] = -xi\n A[i*2, 6:] = xil[1]*xi\n A[i*2+1, :3] = xi\n A[i*2+1, 6:] = -xil[0]*xi\n \n [U, E, V] = np.linalg.svd(A)\n T = V[-1, :].reshape((3, 3))\n \n return T\n\n def normalization(self, pts, mn, MN):\n '''\n Function required to match the network output resolution\n after scaling and re-centering according to each point (m, n)\n in the feature map.\n \n Args:\n pts: points of the propagated matrix by a loss function\n mn: point cell of the feature map\n MN: merged feature map volume\n \n Returns:\n Normalized matrix\n '''\n return ((pts * self.alpha) + mn) / MN\n \n def probs(self):\n '''\n Returns:\n The Probability that the given object is a license plate \n in specific coordinate.\n '''\n return self.Y[..., 0]\n \n def affines(self):\n '''\n Returns:\n Affines from the specified coordinate.\n '''\n return self.Y[..., 2:]\n \n def draw_rectangle(self):\n '''\n Transform size indicators to get (4, 3) shape\n for T matrix transformer.\n \n Returns:\n Size as a matrix which \n draws the rectangle on the plate.\n '''\n return np.matrix([[0, self.size[0], self.size[0], 0], [0, 0, self.size[1], self.size[1]], [1, 1, 1, 1]], dtype=float)\n \n def plate_coordinates(self):\n '''\n Get the aspect ratio of division between top left and bottom right\n coordinates to know the type of license plate.\n \n Returns:\n Size more likely to be two lines plate or one line plate\n concerning the confidence level\n '''\n size = (self.labels.get()[0]['tl'] - self.labels.get()[0]['br'])[0] / (self.labels.get()[0]['tl'] - self.labels.get()[0]['br'])[1]\n return self.two_lines if size < self.plate_confidence else self.one_line\n \n def ptsh(self, point):\n '''\n Transform label points with the matrix shape \n to get (4, 3) shape for T matrix transformer. \n \n Args:\n point: Label point values\n \n Returns:\n The transformable array of label points\n '''\n return np.concatenate((point * np.array(self.X.shape[1::-1]).astype(float).reshape((2, 1)), np.ones((1, 4))))\n\n def collect_labels(self, xx, yy, probs, affines):\n '''\n Creates, propagates, and normalizes affine points as a matrix.\n It determines points, plate edges, and probabilities from the matrix\n and save it as a label for extraction.\n \n Args:\n xx: x coordinate which passes the threshold to be the desired object\n yy: y coordinate which passes the threshold to be the desired object\n probs: probabilities to be an object or not\n affines: features for a given point\n '''\n for idx, val in enumerate(xx):\n x, y = xx[idx], yy[idx]\n \n affine = affines[x, y]\n prob = probs[x, y]\n \n A = affine[[0, 1, 3, 4]].reshape((2, 2))\n A[0, 0] = max(A[0, 0], 0)\n A[1, 1] = max(A[1, 1], 0)\n\n pts = np.array(A*self.loss + affine[[2, 5]].reshape((2, 1)))\n\n MN = (np.array(self.X.shape[1::-1]).astype(float) / 16).reshape((2, 1)) #X\n mn = np.array([float(y) + 0.5, float(x) + 0.5]).reshape((2, 1))\n \n pts_prop = self.normalization(pts, mn, MN)\n \n self.labels[idx] = {'pts': pts_prop, 'tl': np.amin(pts, axis=1), 'br': np.amax(pts, axis=1), 'prob': prob}\n \n def pred(func):\n '''\n Wrapper for detect method\n \n Args:\n Function to be wrapped\n \n Returns:\n Prediction\n '''\n def get_Y(self):\n '''\n Predicts probabilities with affines\n on the X matrix using wpod_net model.\n '''\n if self.capture:\n self.video()\n \n self.X = self.preprocess_image()\n \n model = self.load_model()\n \n self.Y = model.predict(self.X)\n self.Y = np.squeeze(self.Y)\n func(self)\n \n return get_Y\n \n @pred\n def detect(self):\n '''\n LPD function to run.\n \n After prediction, it matches the corresponding points with affines\n and transfers it into the form of labels where each point is represented\n as transformed matrices with key point utilities.\n If the points are processed, it extracts the object from the X matrix.\n '''\n probs = self.probs()\n affines = self.affines()\n\n self.labels = SparseList()\n xx, yy = np.where(probs > self.confidence)\n self.collect_labels(xx, yy, probs, affines)\n self.nms()\n self.extract_plate()\n \n \n def extract_plate(self):\n '''\n With given final labels for matrices points, \n warp perspective from T matrix and append it\n to license plate indicators.\n \n Raises:\n Exception: If no labels are returned from detection steps\n '''\n self.Y = []\n \n \n if self.labels.get():\n self.size = self.plate_coordinates()\n \n for label in self.labels.get().values():\n \n t_ptsh = self.draw_rectangle()\n ptsh = self.ptsh(label['pts'])\n \n T = self.find_T_matrix(ptsh, t_ptsh)\n\n self.Y = cv2.warpPerspective(self.X[0, ...], T, self.size)\n else:\n raise Exception('No plate detected')\n \n\n\nclass LPR(LPD):\n '''\n The second part of LPDR \n \n With given settings:\n - Execute the LPD and acquire the storage for the next steps\n - The process resulting Y matrix to the better-performed form for OCR\n - Configure tesseract\n - Use tesseract to get the strings from the license plate and save it as a program output\n '''\n def __init__(self):\n super(LPR, self).__init__()\n \n ##### settings #####\n # default output value\n self.__output = None \n \n # tesseract configuration setting for OCR Engine mode\n self.__oem = 3\n \n # tesseract configuration setting for running a subset of layout analysis\n self.__psm = 9 if self.size == self.one_line else 12\n \n \n # ---------\n # character recognition part execution\n self.recognize()\n # --------\n \n @property\n def oem(self): \n if self.__oem in range(0, 4): \n return self.__oem \n else: \n raise Exception(\"OEM Option not available for tesseract\")\n\n @property\n def psm(self):\n if self.__psm in range(0, 14): \n return self.__psm \n else: \n raise Exception(\"PSM Option not available for tesseract\")\n \n @property\n def output(self):\n return self.__output \n\n @output.setter\n def output(self, text):\n self.__output = ''.join(filter(str.isalnum, text))\n self.__output = ''.join(letter for letter in self.__output if (letter.isupper() or letter.isdigit()))\n \n def tesseract_conf(self):\n '''\n Transform tesseract settings as a\n executable configuration options.\n '''\n return r'--oem {} --psm {}'.format(self.oem, self.psm)\n \n def processing(func):\n '''\n Wrapper for recognizing method\n \n Args:\n Function to be wrapped\n \n Returns:\n Processing function\n '''\n def after_processing(self):\n '''\n Executing steps to be processed before OCR\n '''\n self.Y = self.rescale()\n self.Y = self.denoise()\n self.Y = self.erode()\n \n func(self)\n \n return after_processing\n \n \n def rescale(self):\n '''\n Returns:\n Rescaled and converted output matrix.\n '''\n return (self.Y * 255).astype('uint8')\n\n def denoise(self):\n '''\n Returns:\n Denoised output matrix.\n '''\n return cv2.fastNlMeansDenoisingColored(self.Y, None, 17, 29)\n\n def erode(self):\n '''\n Returns:\n Letter erosion effect on the output matrix.\n '''\n kernel = np.ones((5, 5), np.uint8)\n return cv2.erode(self.Y, kernel, iterations=1)\n \n @processing\n def recognize(self):\n '''\n Do the OCR operation on processed output by tesseract with a given configuration\n and save it as an output string which is the cumulative program result. \n ''' \n self.output = image_to_string(self.Y, config=self.tesseract_conf())\n \n\nif __name__ == '__main__':\n print(LPR().output)\n\n"
] | [
[
"numpy.matrix",
"numpy.array",
"numpy.zeros",
"numpy.ones",
"numpy.where",
"numpy.amax",
"numpy.linalg.svd",
"numpy.amin",
"numpy.squeeze"
]
] |
PreferredAI/seer | [
"d043f4b28b7b2bb82cb2a72031ac1df1191db97b"
] | [
"local_search_contextualized_opinion.py"
] | [
"import argparse\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom explanation_generation import (contextualize_candidate_sentences,\n get_contextualizer, get_preference)\nfrom sentence_pair_model import TfIdfSentencePair\nfrom util import convert_str_to_list, substitute_word\n\nsummary_report = {}\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n default=\"selected.csv\",\n help=\"Selected sentences file path\",\n )\n parser.add_argument(\n \"-c\",\n \"--corpus\",\n type=str,\n default=\"data/toy/train.csv\",\n help=\"Corpus file path (train.csv)\",\n )\n parser.add_argument(\n \"-o\",\n \"--out\",\n type=str,\n default=\"explanations.csv\",\n help=\"Output file path\",\n )\n parser.add_argument(\n \"-p\",\n \"--preference_dir\",\n type=str,\n default=\"data/toy/efm\",\n help=\"EFM/MTER output directory\",\n )\n parser.add_argument(\n \"-m\", \"--contextualizer_path\", type=str, default=\"result/model.params\"\n )\n parser.add_argument(\n \"-k\",\n \"--top_k\",\n type=int,\n default=10,\n help=\"Top k opinions for contextualization\",\n )\n parser.add_argument(\n \"-s\",\n \"--strategy\",\n type=str,\n choices=[\n \"greedy-efm\",\n \"ilp-efm\",\n ],\n default=\"greedy-efm\",\n help=\"Strategy\",\n )\n parser.add_argument(\"--verbose\", action=\"store_true\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--debug_path\", type=str, default=\"debug_local_search.pkl\")\n parser.add_argument(\"--debug_size\", type=int, default=100)\n return parser.parse_args()\n\n\ndef compute_representative_cost(sentences, represented_sentences, spm):\n all_sentences = sentences + represented_sentences\n pairs = [\n (i, j)\n for i in range(len(sentences))\n for j in range(len(sentences), len(all_sentences))\n ]\n costs = spm.compute_cost(all_sentences, pairs)\n cost = 0\n for j in range(len(sentences), len(all_sentences)):\n min_cost = min(costs[: len(sentences), j])\n cost += min_cost\n return cost\n\n\ndef local_search_contextualize_opinion(\n user,\n item,\n sentences,\n aspects,\n corpus,\n contextualizer,\n sentence_pair_model,\n top_k=None,\n strategy=\"ilp-efm\",\n verbose=False,\n):\n local_searched_sentences = []\n if len(sentences) == 0:\n return local_searched_sentences\n\n review_idx = \"{}-{}\".format(user, item)\n\n candidates = corpus[(corpus[\"asin\"] == item) & (corpus[\"aspect\"].isin(aspects))]\n candidates = contextualize_candidate_sentences(\n candidates, user, contextualizer, top_k=top_k\n )\n if \"contextualized\" in strategy:\n candidates[\"instance\"] = candidates.apply(\n lambda x: \"{}-{}-{}\".format(x[\"asin\"], x[\"aspect\"], x[\"sentence\"]), axis=1\n )\n else:\n candidates[\"instance\"] = candidates.apply(\n lambda x: \"{}-{}-{}\".format(x[\"asin\"], x[\"aspect\"], x[\"original sentence\"]),\n axis=1,\n )\n candidates[\"sentence\"] = candidates[\"original sentence\"]\n candidates.drop_duplicates(\"instance\", inplace=True)\n candidates = candidates.set_index([\"instance\"])\n aspect_sentences_map = {}\n for aspect, sentence in zip(candidates[\"aspect\"], candidates[\"sentence\"]):\n aspect_sentences = aspect_sentences_map.setdefault(aspect, [])\n if sentence not in sentences:\n aspect_sentences.append(sentence)\n\n solution = {}\n for aspect, sentence in zip(aspects, sentences):\n aspect_sentences = solution.setdefault(aspect, [])\n aspect_sentences.append(sentence)\n\n for aspect, sentence in zip(aspects, sentences):\n represented_sentences = aspect_sentences_map.get(aspect)\n if len(represented_sentences) > 0:\n solution_sentences = solution[aspect].copy()\n instance = candidates.loc[\"{}-{}-{}\".format(item, aspect, sentence)]\n predicted_opinions = instance[\"top k opinions\"]\n opinion_position = instance[\"opinion_pos\"]\n\n # do local search here\n best_opinion = sentence.split()[opinion_position] # raw opinion\n new_sentence = sentence\n best_cost = compute_representative_cost(\n solution_sentences, represented_sentences, sentence_pair_model\n )\n solution_sentences.remove(sentence)\n best_idx = -1\n for idx, opinion in enumerate(predicted_opinions):\n new_sentence = substitute_word(sentence, opinion, opinion_position)\n temp_solution_sentences = solution_sentences + [new_sentence]\n cost = compute_representative_cost(\n temp_solution_sentences, represented_sentences, sentence_pair_model\n )\n if cost < best_cost:\n best_idx = idx\n best_cost = cost\n best_opinion = opinion\n sentence = substitute_word(sentence, best_opinion, opinion_position)\n summary_report.setdefault(review_idx, []).append(best_idx)\n local_searched_sentences.append(sentence)\n return local_searched_sentences\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n print(\"strategy: %s\" % args.strategy)\n print(\"load input from %s\" % args.input)\n df = pd.read_csv(args.input)\n df = df[df[\"selected sentences\"].notnull()]\n if args.debug:\n if args.debug_size > 0:\n df = df[: args.debug_size]\n df[\"selected sentences\"] = df[\"selected sentences\"].apply(\n lambda x: convert_str_to_list(x)\n )\n\n print(\"load corpus from %s\" % args.corpus)\n corpus = pd.read_csv(args.corpus)\n\n preference = get_preference(args.preference_dir, args.strategy, args.verbose)\n\n contextualizer = get_contextualizer(\n args.contextualizer_path, preference, args.strategy, verbose=args.verbose\n )\n\n sentence_pair_model = TfIdfSentencePair(\n args.verbose,\n )\n\n df[\"backup sentences\"] = df[\"sentences\"]\n\n tqdm.pandas(desc=\"Local search\")\n\n df[\"selected sentences\"] = df.progress_apply(\n lambda row: local_search_contextualize_opinion(\n row[\"reviewerID\"],\n row[\"asin\"],\n row[\"selected sentences\"],\n str(row[\"aspects\"]).split(\",\"),\n corpus,\n contextualizer,\n sentence_pair_model,\n top_k=args.top_k,\n strategy=args.strategy,\n verbose=args.verbose,\n ),\n axis=1,\n )\n\n df[\"sentences\"] = df[\"selected sentences\"].apply(lambda x: \" . \".join(x))\n\n df.to_csv(args.out, index=False)\n if args.debug:\n import pickle\n\n with open(args.debug_path, \"wb\") as f:\n pickle.dump(summary_report, f)\n print(summary_report)\n"
] | [
[
"pandas.read_csv"
]
] |
EVS-ATMOS/cmdv-rrm-anl | [
"1d73d2dc2cb3b86de43c817fe340d0b550e2e04b"
] | [
"code/w_pdfs_by_pope_regime_echotop.py"
] | [
"import matplotlib\nmatplotlib.use('Agg')\nimport pyart\nfrom netCDF4 import Dataset\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom copy import deepcopy\nimport glob\nimport math\nimport dask.array as da\nfrom distributed import Client, LocalCluster\nfrom dask import delayed, compute\nimport time\nimport sys\nfrom scipy import ndimage\n\n# Start a cluster with x workers\ncluster = LocalCluster(n_workers=int(sys.argv[1]))\nclient = Client(cluster)\n\n# Input the range of dates and time wanted for the collection of images\nstart_year = 2005\nstart_day = 1\nstart_month = 11\nstart_hour = 1\nstart_minute = 0\nstart_second = 0\n\nend_year = 2011\nend_month = 5\nend_day = 1\nend_hour = 0\nend_minute = 00\nend_second = 0\n\ndata_path = '/lcrc/group/earthscience/rjackson/multidop_grids/ddop/'\n\n\n# get_radar_times\n# start_year = Start year of animation\n# start_month = Start month of animation\n# start_day = Start day of animation\n# start_hour = Start hour of animation\n# end_year = End year of animation\n# end_month = End month of animation\n# end_day = End day of animation\n# end_minute = End minute of animation\n# minute_interval = Interval in minutes between scans (default is 5)\n# This procedure acquires an array of Radar classes between start_time and end_time \ndef get_dda_times(start_year, start_month, start_day,\n start_hour, start_minute, end_year,\n end_month, end_day, end_hour, \n end_minute, minute_interval=5):\n\n start_time = datetime(start_year,\n start_month,\n start_day,\n start_hour,\n start_minute,\n )\n end_time = datetime(end_year,\n end_month,\n end_day,\n end_hour,\n end_minute,\n )\n\n deltatime = end_time - start_time\n\n if(deltatime.seconds > 0 or deltatime.minute > 0):\n no_days = deltatime.days + 1\n else:\n no_days = deltatime.days\n \n if(start_day != end_day):\n no_days = no_days + 1\n \n days = np.arange(0, no_days, 1)\n print('We are about to load grid files for ' + str(no_days) + ' days')\n \n # Find the list of files for each day\n cur_time = start_time\n \n file_list = []\n time_list = []\n for i in days:\n year_str = \"%04d\" % cur_time.year\n day_str = \"%02d\" % cur_time.day\n month_str = \"%02d\" % cur_time.month\n format_str = (data_path +\n 'cf_compliant_grid' +\n year_str +\n month_str +\n day_str +\n '*.nc')\n \n print('Looking for files with format ' + format_str)\n \n data_list = glob.glob(format_str)\n \n for j in range(0, len(data_list)):\n file_list.append(data_list[j])\n cur_time = cur_time + timedelta(days=1)\n \n # Parse all of the dates and time in the interval and add them to the time list\n past_time = []\n for file_name in file_list:\n date_str = file_name[-15:-3]\n year_str = date_str[0:4]\n month_str = date_str[4:6]\n day_str = date_str[6:8]\n hour_str = date_str[8:10]\n minute_str = date_str[10:12]\n second_str = '00'\n \n cur_time = datetime(int(year_str),\n int(month_str),\n int(day_str),\n int(hour_str),\n int(minute_str),\n 0)\n time_list.append(cur_time)\n \n \n # Sort time list and make sure time are at least xx min apart\n time_list.sort()\n time_list_sorted = deepcopy(time_list)\n \n time_list_final = []\n past_time = []\n \n for times in time_list_sorted: \n \n cur_time = times \n \n if(past_time == []):\n past_time = cur_time\n \n if(cur_time - past_time >= timedelta(minutes=minute_interval)\n and cur_time >= start_time and cur_time <= end_time): \n time_list_final.append(cur_time)\n past_time = cur_time\n \n return time_list_final\n\n# Get a Radar object given a time period in the CPOL dataset\ndef get_grid_from_dda(time):\n year_str = \"%04d\" % time.year\n month_str = \"%02d\" % time.month\n day_str = \"%02d\" % time.day\n hour_str = \"%02d\" % time.hour\n minute_str = \"%02d\" % time.minute\n second_str = \"%02d\" % time.second\n file_name_str = (data_path +\n 'cf_compliant_grid' +\n year_str +\n month_str +\n day_str +\n hour_str +\n minute_str + '.nc')\n \n radar = pyart.io.read_grid(file_name_str)\n return radar\n\n# Get beam crossing angle between radars\ndef get_bca(grid):\n berr_origin = [-12960.1,-23091.1]\n x,y = np.meshgrid(grid.x['data'], grid.y['data'])\n a = np.sqrt(np.multiply(x,x)+np.multiply(y,y))\n b = np.sqrt(pow(x-berr_origin[0],2)+pow(y-berr_origin[1],2))\n c = np.sqrt(berr_origin[0]*berr_origin[0]+berr_origin[1]*berr_origin[1])\n theta_1 = np.arccos(x/a)\n theta_2 = np.arccos((x-berr_origin[1])/b)\n return np.arccos((a*a+b*b-c*c)/(2*a*b))\n\ndef get_updrafts(time):\n pyart_grid = get_grid_from_dda(time)\n bca = get_bca(pyart_grid)\n w = pyart_grid.fields['upward_air_velocity']['data']\n z = pyart_grid.fields['reflectivity']['data']\n bca = np.ma.masked_invalid(bca)\n\n for levels in range(0,num_levels-1):\n w_outside_updraft = np.logical_or(w[levels] < 1, w[levels] > 99.0)\n outside_dd_lobes = np.logical_or(bca < math.pi/6, bca > 5*math.pi/6)\n w[levels] = np.ma.masked_where(np.logical_or(w_outside_updraft,\n outside_dd_lobes), w[levels])\n z[levels] = np.ma.masked_where(np.logical_or(w_outside_updraft,\n outside_dd_lobes), z[levels])\n \n grid_z = pyart_grid.point_z['data']\n\n # Set mask to exclude data outside of updrafts\n w_temp = deepcopy(w)\n w_temp[~w_temp.mask] = 1\n w_temp[w_temp.mask] = 0\n w_temp.mask = False\n array_shape = w_temp.shape\n six_connected_structure = [[[0,0,0],\n [0,1,0],\n [0,0,0]],\n [[0,1,0],\n [1,1,1],\n [0,1,0]],\n [[0,0,0],\n [0,1,0],\n [0,0,0]]]\n updrafts, num_updrafts = ndimage.measurements.label(w_temp, \n structure=six_connected_structure)\n\n # Get echo top heights\n echo_top = np.zeros((array_shape[1],array_shape[2]))\n for i in range(0, array_shape[1]):\n for j in range(0, array_shape[2]):\n in_cloud = np.where(np.logical_and(z[:,i,j] > 1,\n z[:,i,j] < 10))\n print(grid_z[in_cloud,i,j])\n if(len(in_cloud[0]) > 0):\n in_cloud = in_cloud[0][-1]\n echo_top[i,j] = grid_z[in_cloud,i,j]\n else:\n echo_top[i,j] = np.nan\n \n # Get statistics in continous regions\n index=np.arange(0, num_updrafts + 1)\n max_z = ndimage.measurements.maximum(grid_z, \n labels=updrafts, \n index=index)\n min_z = ndimage.measurements.minimum(grid_z, \n labels=updrafts,\n index=index)\n \n max_w_individual = []\n level_individual = []\n label_individual = []\n count_individual = []\n echo_top_individual = []\n # Find deep convective cores and get max updraft speeds\n for levels in range(0,num_levels-1):\n label_level = updrafts[levels]\n masked_array = np.ma.zeros(updrafts.shape)\n masked_array.mask = True\n w_temp = w[levels]\n \n for labels in range(1, len(max_z)-1):\n indicies = np.ma.where(label_level == labels) \n \n if(len(indicies[0]) > 0 \n and max_z[labels] >= 6000\n and min_z[labels] <= 1000):\n max_w_individual.append(max(w_temp[indicies]))\n echo_top_individual.append(max(echo_top[indicies]))\n level_individual.append(levels)\n label_individual.append(labels)\n count_individual.append(count)\n \n # Convert to list of individual max w's for each updraft\n max_w_individual = np.array(max_w_individual)\n level_individual = np.array(level_individual)\n echo_top_individual = np.array(echo_top_individual)\n\n # Very large vertical velocities aloft\n if(len(max_w_individual) > 0):\n if(np.max(max_w_individual) > 60):\n print('Very large vertical velocity found:')\n print(time)\n max_w_individual = np.array([])\n level_individual = np.array([])\n echo_top_individual = np.array([])\n return_array = np.ma.zeros((len(max_w_individual),3))\n return_array[:,0] = max_w_individual\n return_array[:,1] = level_individual\n return_array[:,2] = echo_top_individual\n return return_array\n \n# Plot the radars from given time.\ntimes = get_dda_times(start_year, start_month, start_day,\n start_hour, start_minute, end_year,\n end_month, end_day, end_hour, \n end_minute, minute_interval=0)\n\nin_netcdf = Dataset('/lcrc/group/earthscience/rjackson/data/Pope_regime.cdf', \n mode='r') \nyear = in_netcdf.variables['year'][:]\nmonth = in_netcdf.variables['month'][:]\nday = in_netcdf.variables['day'][:]\ngroups = in_netcdf.variables['groups'][:]\n\npopedates = []\nfor i in range(0,len(day)):\n popedates.append(datetime(year=int(year[i]),\n month=int(month[i]),\n day=int(day[i])))\n\n# Since grids are uniform, calculate beam crossing angle for first grid and\n# apply to all\nfirst_grid = get_grid_from_dda(times[0])\nbca = get_bca(first_grid) \nnum_levels = 40\nz_levels = np.arange(0.5,0.5*(num_levels+1),0.5)*1000\ncount = 0\npope_regime = int(sys.argv[2])\nmin_height = int(sys.argv[3])*1e3\nmax_height = int(sys.argv[4])*1e3\n\n# Filter out data not in Pope regime\npope_times = []\nfor time in times:\n # Look for date in Pope regime data\n cur_date = datetime(year=time.year, month=time.month, day=time.day)\n inds = np.where([day <= cur_date for day in popedates])\n pope_index = inds[0][-1]\n if(groups[pope_index] == pope_regime):\n print((popedates[pope_index], time))\n pope_times.append(time)\n\nin_netcdf.close()\n\n# Get delayed structure to load files in parallel\nget_file = delayed(get_updrafts)\n\n# Calculate PDF\nmean_w = np.ma.zeros(num_levels)\nmedian_w = np.ma.zeros(num_levels)\nninety_w = np.ma.zeros(num_levels)\nninety_five_w = np.ma.zeros(num_levels)\nninety_nine_w = np.ma.zeros(num_levels)\nmean_z = np.ma.zeros(num_levels)\nmedian_z = np.ma.zeros(num_levels)\nninety_z = np.ma.zeros(num_levels)\nninety_five_z = np.ma.zeros(num_levels)\nninety_nine_z = np.ma.zeros(num_levels)\nbins = np.arange(-10,40,1)\nbins_z = np.arange(0,60,1)\nprint('Doing parallel grid loading...')\nimport time\nt1 = time.time()\nws = []\nfor i in range(0, len(pope_times), int(len(pope_times)/4)):\n ws_temp = [get_file(times) for times in pope_times[i:(i+len(pope_times)/4)]]\n ws_temp = compute(*ws_temp)\n ws.append(ws_temp)\n\nfor arrays in ws:\n array_temp = np.concatenate(arrays)\n print(array_temp.shape) \n\nws = np.concatenate([np.concatenate(arrays) for arrays in ws])\n\nt2 = time.time() - t1\nprint('Total time in s: ' + str(t2))\nprint('Time per scan = ' + str(t2/len(pope_times)))\nlevel_individual = ws[:,1] \nw_individual = ws[:,0]\necho_top_individual = ws[:,2]\necho_min = int(sys.argv[3])*1e3\necho_max = int(sys.argv[4])*1e3\nprint(len(level_individual))\nprint(len(w_individual))\nfor levels in range(0,num_levels): \n heights = np.logical_and(echo_top_individual >= echo_min,\n echo_top_individual <= echo_max)\n w_new = w_individual[np.logical_and(level_individual == levels, heights)]\n if(len(w_new) > 0):\n mean_w[levels] = np.nanmean(w_new) \n median_w[levels] = np.nanpercentile(w_new, 50)\n ninety_w[levels] = np.nanpercentile(w_new, 90)\n ninety_five_w[levels] = np.percentile(w_new, 95)\n ninety_nine_w[levels] = np.percentile(w_new, 99)\n else:\n mean_w[levels] = np.nan\n median_w[levels] = np.nan\n ninety_w[levels] = np.nan\n ninety_five_w[levels] = np.nan\n ninety_nine_w[levels] = np.nan\n \nprint('Writing netCDF file...')\n\n# Save to netCDF file\nout_netcdf = Dataset('wpdfregime' + \n str(pope_regime) + \n '_' + \n str(min_height) + \n '_' + \n str(max_height) + \n 'varble.cdf', 'w')\nout_netcdf.createDimension('levels', num_levels)\nmean_file = out_netcdf.createVariable('mean', mean_w.dtype, ('levels',))\nmean_file.long_name = 'Mean w'\nmean_file.units = 'm s-1'\nmean_file[:] = mean_w\n\nmedian_file = out_netcdf.createVariable('median', median_w.dtype, ('levels',))\nmedian_file.long_name = 'median w'\nmedian_file.units = 'm s-1'\nmedian_file[:] = median_w\n\nninety_file = out_netcdf.createVariable('ninety', ninety_w.dtype, ('levels',))\nninety_file.long_name = '90% w'\nninety_file.units = 'm s-1'\nninety_file[:] = ninety_w\n\nn5_file = out_netcdf.createVariable('ninety_five', ninety_five_w.dtype, ('levels',))\nn5_file.long_name = '95W w'\nn5_file.units = 'm s-1'\nn5_file[:] = ninety_five_w\n\nn5_file = out_netcdf.createVariable('ninety_nine', ninety_five_w.dtype, ('levels',))\nn5_file.long_name = '99W w'\nn5_file.units = 'm s-1'\nn5_file[:] = ninety_nine_w\n\nz_file = out_netcdf.createVariable('z', ninety_five_w.dtype, ('levels',))\nz_file.long_name = 'z'\nz_file.units = 'm'\nz_file[:] = z_levels\n \nout_netcdf.close()\n"
] | [
[
"numpy.arccos",
"numpy.multiply",
"numpy.where",
"numpy.nanmean",
"numpy.concatenate",
"numpy.max",
"numpy.nanpercentile",
"numpy.logical_and",
"scipy.ndimage.measurements.label",
"scipy.ndimage.measurements.minimum",
"numpy.arange",
"numpy.sqrt",
"scipy.ndimage.measurements.maximum",
"matplotlib.use",
"numpy.logical_or",
"numpy.array",
"numpy.zeros",
"numpy.percentile",
"numpy.ma.zeros",
"numpy.ma.masked_invalid",
"numpy.ma.where",
"numpy.meshgrid"
]
] |
4PiR2/SO-Pose | [
"a3a61d2c97b1084a4754d6c12e45e16d85809729"
] | [
"core/gdrn_modeling/datasets/ycbv_pbr.py"
] | [
"import hashlib\nimport logging\nimport os\nimport os.path as osp\nimport sys\n\ncur_dir = osp.dirname(osp.abspath(__file__))\nPROJ_ROOT = osp.normpath(osp.join(cur_dir, \"../../..\"))\nsys.path.insert(0, PROJ_ROOT)\nimport time\nfrom collections import OrderedDict\nimport mmcv\nimport numpy as np\nfrom tqdm import tqdm\nfrom transforms3d.quaternions import mat2quat, quat2mat\nimport ref\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import BoxMode\nfrom lib.pysixd import inout, misc\nfrom lib.utils.mask_utils import binary_mask_to_rle, cocosegm2mask\nfrom lib.utils.utils import dprint, iprint, lazy_property\n\n\nlogger = logging.getLogger(__name__)\nDATASETS_ROOT = osp.normpath(osp.join(PROJ_ROOT, \"datasets\"))\n\n\nclass YCBV_PBR_Dataset:\n def __init__(self, data_cfg):\n \"\"\"\n Set with_depth and with_masks default to True,\n and decide whether to load them into dataloader/network later\n with_masks:\n \"\"\"\n self.name = data_cfg[\"name\"]\n self.data_cfg = data_cfg\n\n self.objs = data_cfg[\"objs\"] # selected objects\n\n self.dataset_root = data_cfg.get(\"dataset_root\", osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_pbr\"))\n self.xyz_root = data_cfg.get(\"xyz_root\", osp.join(self.dataset_root, \"xyz_crop\"))\n assert osp.exists(self.dataset_root), self.dataset_root\n self.models_root = data_cfg[\"models_root\"] # BOP_DATASETS/ycbv/models\n self.scale_to_meter = data_cfg[\"scale_to_meter\"] # 0.001\n\n self.with_masks = data_cfg[\"with_masks\"]\n self.with_depth = data_cfg[\"with_depth\"]\n\n self.height = data_cfg[\"height\"] # 480\n self.width = data_cfg[\"width\"] # 640\n\n self.cache_dir = data_cfg.get(\"cache_dir\", osp.join(PROJ_ROOT, \".cache\")) # .cache\n self.use_cache = data_cfg.get(\"use_cache\", True)\n self.num_to_load = data_cfg[\"num_to_load\"] # -1\n self.filter_invalid = data_cfg.get(\"filter_invalid\", True)\n ##################################################\n\n # NOTE: careful! Only the selected objects\n self.cat_ids = [cat_id for cat_id, obj_name in ref.ycbv.id2obj.items() if obj_name in self.objs]\n # map selected objs to [0, num_objs-1]\n self.cat2label = {v: i for i, v in enumerate(self.cat_ids)} # id_map\n self.label2cat = {label: cat for cat, label in self.cat2label.items()}\n self.obj2label = OrderedDict((obj, obj_id) for obj_id, obj in enumerate(self.objs))\n ##########################################################\n\n self.scenes = [f\"{i:06d}\" for i in range(50)]\n\n def __call__(self):\n \"\"\"Load light-weight instance annotations of all images into a list of\n dicts in Detectron2 format.\n\n Do not load heavy data into memory in this file, since we will\n load the annotations of all images into memory.\n \"\"\"\n # cache the dataset_dicts to avoid loading masks from files\n hashed_file_name = hashlib.md5(\n (\n \"\".join([str(fn) for fn in self.objs])\n + \"dataset_dicts_{}_{}_{}_{}_{}\".format(\n self.name, self.dataset_root, self.with_masks, self.with_depth, __name__\n )\n ).encode(\"utf-8\")\n ).hexdigest()\n cache_path = osp.join(self.cache_dir, \"dataset_dicts_{}_{}.pkl\".format(self.name, hashed_file_name))\n\n if osp.exists(cache_path) and self.use_cache:\n logger.info(\"load cached dataset dicts from {}\".format(cache_path))\n return mmcv.load(cache_path)\n\n t_start = time.perf_counter()\n\n logger.info(\"loading dataset dicts: {}\".format(self.name))\n self.num_instances_without_valid_segmentation = 0\n self.num_instances_without_valid_box = 0\n dataset_dicts = [] # ######################################################\n # it is slow because of loading and converting masks to rle\n for scene in tqdm(self.scenes):\n scene_id = int(scene)\n scene_root = osp.join(self.dataset_root, scene)\n\n gt_dict = mmcv.load(osp.join(scene_root, \"scene_gt.json\"))\n gt_info_dict = mmcv.load(osp.join(scene_root, \"scene_gt_info.json\"))\n cam_dict = mmcv.load(osp.join(scene_root, \"scene_camera.json\"))\n\n for str_im_id in tqdm(gt_dict, postfix=f\"{scene_id}\"):\n int_im_id = int(str_im_id)\n rgb_path = osp.join(scene_root, \"rgb/{:06d}.jpg\").format(int_im_id)\n assert osp.exists(rgb_path), rgb_path\n\n depth_path = osp.join(scene_root, \"depth/{:06d}.png\".format(int_im_id))\n\n scene_im_id = f\"{scene_id}/{int_im_id}\"\n\n K = np.array(cam_dict[str_im_id][\"cam_K\"], dtype=np.float32).reshape(3, 3)\n depth_factor = 1000.0 / cam_dict[str_im_id][\"depth_scale\"] # 10000\n\n record = {\n \"dataset_name\": self.name,\n \"file_name\": osp.relpath(rgb_path, PROJ_ROOT),\n \"depth_file\": osp.relpath(depth_path, PROJ_ROOT),\n \"height\": self.height,\n \"width\": self.width,\n \"image_id\": int_im_id,\n \"scene_im_id\": scene_im_id, # for evaluation\n \"cam\": K,\n \"depth_factor\": depth_factor,\n \"img_type\": \"syn_pbr\", # NOTE: has background\n }\n insts = []\n for anno_i, anno in enumerate(gt_dict[str_im_id]):\n obj_id = anno[\"obj_id\"]\n if obj_id not in self.cat_ids:\n continue\n cur_label = self.cat2label[obj_id] # 0-based label\n R = np.array(anno[\"cam_R_m2c\"], dtype=\"float32\").reshape(3, 3)\n t = np.array(anno[\"cam_t_m2c\"], dtype=\"float32\") / 1000.0\n pose = np.hstack([R, t.reshape(3, 1)])\n quat = mat2quat(R).astype(\"float32\")\n\n proj = (record[\"cam\"] @ t.T).T\n proj = proj[:2] / proj[2]\n\n bbox_visib = gt_info_dict[str_im_id][anno_i][\"bbox_visib\"]\n bbox_obj = gt_info_dict[str_im_id][anno_i][\"bbox_obj\"]\n x1, y1, w, h = bbox_visib\n if self.filter_invalid:\n if h <= 1 or w <= 1:\n self.num_instances_without_valid_box += 1\n continue\n\n mask_file = osp.join(scene_root, \"mask/{:06d}_{:06d}.png\".format(int_im_id, anno_i))\n mask_visib_file = osp.join(scene_root, \"mask_visib/{:06d}_{:06d}.png\".format(int_im_id, anno_i))\n assert osp.exists(mask_file), mask_file\n assert osp.exists(mask_visib_file), mask_visib_file\n # load mask visib TODO: load both mask_visib and mask_full\n mask_single = mmcv.imread(mask_visib_file, \"unchanged\")\n area = mask_single.sum()\n if area < 3: # filter out too small or nearly invisible instances\n self.num_instances_without_valid_segmentation += 1\n continue\n\n visib_fract = gt_info_dict[str_im_id][anno_i].get(\"visib_fract\", 1.0)\n\n mask_rle = binary_mask_to_rle(mask_single, compressed=True)\n\n xyz_path = osp.join(self.xyz_root, f\"{scene_id:06d}/{int_im_id:06d}_{anno_i:06d}-xyz.pkl\")\n assert osp.exists(xyz_path), xyz_path\n inst = {\n \"category_id\": cur_label, # 0-based label\n \"bbox\": bbox_visib, # TODO: load both bbox_obj and bbox_visib\n \"bbox_mode\": BoxMode.XYWH_ABS,\n \"pose\": pose,\n \"quat\": quat,\n \"trans\": t,\n \"centroid_2d\": proj, # absolute (cx, cy)\n \"segmentation\": mask_rle,\n \"mask_full_file\": mask_file, # TODO: load as mask_full, rle\n \"visib_fract\": visib_fract,\n \"xyz_path\": xyz_path,\n }\n\n model_info = self.models_info[str(obj_id)]\n inst[\"model_info\"] = model_info\n # TODO: using full mask and full xyz\n for key in [\"bbox3d_and_center\"]:\n inst[key] = self.models[cur_label][key]\n insts.append(inst)\n if len(insts) == 0: # filter im without anno\n continue\n record[\"annotations\"] = insts\n dataset_dicts.append(record)\n\n if self.num_instances_without_valid_segmentation > 0:\n logger.warning(\n \"Filtered out {} instances without valid segmentation. \"\n \"There might be issues in your dataset generation process.\".format(\n self.num_instances_without_valid_segmentation\n )\n )\n if self.num_instances_without_valid_box > 0:\n logger.warning(\n \"Filtered out {} instances without valid box. \"\n \"There might be issues in your dataset generation process.\".format(self.num_instances_without_valid_box)\n )\n ##########################################################################\n if self.num_to_load > 0:\n self.num_to_load = min(int(self.num_to_load), len(dataset_dicts))\n dataset_dicts = dataset_dicts[: self.num_to_load]\n logger.info(\"loaded {} dataset dicts, using {}s\".format(len(dataset_dicts), time.perf_counter() - t_start))\n\n mmcv.mkdir_or_exist(osp.dirname(cache_path))\n mmcv.dump(dataset_dicts, cache_path, protocol=4)\n logger.info(\"Dumped dataset_dicts to {}\".format(cache_path))\n return dataset_dicts\n\n @lazy_property\n def models_info(self):\n models_info_path = osp.join(self.models_root, \"models_info.json\")\n assert osp.exists(models_info_path), models_info_path\n models_info = mmcv.load(models_info_path) # key is str(obj_id)\n return models_info\n\n @lazy_property\n def models(self):\n \"\"\"Load models into a list.\"\"\"\n cache_path = osp.join(self.models_root, \"models_{}.pkl\".format(self.name))\n if osp.exists(cache_path) and self.use_cache:\n # dprint(\"{}: load cached object models from {}\".format(self.name, cache_path))\n return mmcv.load(cache_path)\n\n models = []\n for obj_name in self.objs:\n model = inout.load_ply(\n osp.join(self.models_root, f\"obj_{ref.ycbv.obj2id[obj_name]:06d}.ply\"), vertex_scale=self.scale_to_meter\n )\n # NOTE: the bbox3d_and_center is not obtained from centered vertices\n # for BOP models, not a big problem since they had been centered\n model[\"bbox3d_and_center\"] = misc.get_bbox3d_and_center(model[\"pts\"])\n\n models.append(model)\n logger.info(\"cache models to {}\".format(cache_path))\n mmcv.dump(models, cache_path, protocol=4)\n return models\n\n def image_aspect_ratio(self):\n return self.width / self.height # 4/3\n\n\n########### register datasets ############################################################\n\n\ndef get_ycbv_metadata(obj_names, ref_key):\n \"\"\"task specific metadata.\"\"\"\n data_ref = ref.__dict__[ref_key]\n\n cur_sym_infos = {} # label based key\n loaded_models_info = data_ref.get_models_info()\n\n for i, obj_name in enumerate(obj_names):\n obj_id = data_ref.obj2id[obj_name]\n model_info = loaded_models_info[str(obj_id)]\n if \"symmetries_discrete\" in model_info or \"symmetries_continuous\" in model_info:\n sym_transforms = misc.get_symmetry_transformations(model_info, max_sym_disc_step=0.01)\n sym_info = np.array([sym[\"R\"] for sym in sym_transforms], dtype=np.float32)\n else:\n sym_info = None\n cur_sym_infos[i] = sym_info\n\n meta = {\"thing_classes\": obj_names, \"sym_infos\": cur_sym_infos}\n return meta\n\n\nycbv_model_root = \"BOP_DATASETS/ycbv/models/\"\n################################################################################\n\n\nSPLITS_YCBV_PBR = dict(\n ycbv_train_pbr=dict(\n name=\"ycbv_train_pbr\",\n objs=ref.ycbv.objects, # selected objects\n dataset_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_pbr\"),\n models_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/models\"),\n xyz_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_pbr/xyz_crop\"),\n scale_to_meter=0.001,\n with_masks=True, # (load masks but may not use it)\n with_depth=True, # (load depth path here, but may not use it)\n height=480,\n width=640,\n use_cache=True,\n num_to_load=-1,\n filter_invalid=True,\n ref_key=\"ycbv\",\n )\n)\n\n# single obj splits\nfor obj in ref.ycbv.objects:\n for split in [\"train_pbr\"]:\n name = \"ycbv_{}_{}\".format(obj, split)\n if split in [\"train_pbr\"]:\n filter_invalid = True\n elif split in [\"test\"]:\n filter_invalid = False\n else:\n raise ValueError(\"{}\".format(split))\n if name not in SPLITS_YCBV_PBR:\n SPLITS_YCBV_PBR[name] = dict(\n name=name,\n objs=[obj], # only this obj\n dataset_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_pbr\"),\n models_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/models\"),\n xyz_root=osp.join(DATASETS_ROOT, \"BOP_DATASETS/ycbv/train_pbr/xyz_crop\"),\n scale_to_meter=0.001,\n with_masks=True, # (load masks but may not use it)\n with_depth=True, # (load depth path here, but may not use it)\n height=480,\n width=640,\n use_cache=True,\n num_to_load=-1,\n filter_invalid=filter_invalid,\n ref_key=\"ycbv\",\n )\n\n\ndef register_with_name_cfg(name, data_cfg=None):\n \"\"\"Assume pre-defined datasets live in `./datasets`.\n\n Args:\n name: datasnet_name,\n data_cfg: if name is in existing SPLITS, use pre-defined data_cfg\n otherwise requires data_cfg\n data_cfg can be set in cfg.DATA_CFG.name\n \"\"\"\n dprint(\"register dataset: {}\".format(name))\n if name in SPLITS_YCBV_PBR:\n used_cfg = SPLITS_YCBV_PBR[name]\n else:\n assert data_cfg is not None, f\"dataset name {name} is not registered\"\n used_cfg = data_cfg\n DatasetCatalog.register(name, YCBV_PBR_Dataset(used_cfg))\n # something like eval_types\n MetadataCatalog.get(name).set(\n id=\"ycbv\", # NOTE: for pvnet to determine module\n ref_key=used_cfg[\"ref_key\"],\n objs=used_cfg[\"objs\"],\n eval_error_types=[\"ad\", \"rete\", \"proj\"],\n evaluator_type=\"bop\",\n **get_ycbv_metadata(obj_names=used_cfg[\"objs\"], ref_key=used_cfg[\"ref_key\"]),\n )\n\n\ndef get_available_datasets():\n return list(SPLITS_YCBV_PBR.keys())\n\n\n#### tests ###############################################\ndef test_vis():\n dset_name = sys.argv[1]\n assert dset_name in DatasetCatalog.list()\n\n meta = MetadataCatalog.get(dset_name)\n dprint(\"MetadataCatalog: \", meta)\n objs = meta.objs\n\n t_start = time.perf_counter()\n dicts = DatasetCatalog.get(dset_name)\n logger.info(\"Done loading {} samples with {:.3f}s.\".format(len(dicts), time.perf_counter() - t_start))\n\n dirname = \"output/{}-data-vis\".format(dset_name)\n os.makedirs(dirname, exist_ok=True)\n for d in dicts:\n img = read_image_cv2(d[\"file_name\"], format=\"BGR\")\n depth = mmcv.imread(d[\"depth_file\"], \"unchanged\") / 10000.0\n\n imH, imW = img.shape[:2]\n annos = d[\"annotations\"]\n masks = [cocosegm2mask(anno[\"segmentation\"], imH, imW) for anno in annos]\n bboxes = [anno[\"bbox\"] for anno in annos]\n bbox_modes = [anno[\"bbox_mode\"] for anno in annos]\n bboxes_xyxy = np.array(\n [BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)]\n )\n kpts_3d_list = [anno[\"bbox3d_and_center\"] for anno in annos]\n quats = [anno[\"quat\"] for anno in annos]\n transes = [anno[\"trans\"] for anno in annos]\n Rs = [quat2mat(quat) for quat in quats]\n # 0-based label\n cat_ids = [anno[\"category_id\"] for anno in annos]\n K = d[\"cam\"]\n kpts_2d = [misc.project_pts(kpt3d, K, R, t) for kpt3d, R, t in zip(kpts_3d_list, Rs, transes)]\n\n labels = [objs[cat_id] for cat_id in cat_ids]\n for _i in range(len(annos)):\n img_vis = vis_image_mask_bbox_cv2(\n img, masks[_i : _i + 1], bboxes=bboxes_xyxy[_i : _i + 1], labels=labels[_i : _i + 1]\n )\n img_vis_kpts2d = misc.draw_projected_box3d(img_vis.copy(), kpts_2d[_i])\n xyz_path = annos[_i][\"xyz_path\"]\n xyz_info = mmcv.load(xyz_path)\n x1, y1, x2, y2 = xyz_info[\"xyxy\"]\n xyz_crop = xyz_info[\"xyz_crop\"].astype(np.float32)\n xyz = np.zeros((imH, imW, 3), dtype=np.float32)\n xyz[y1 : y2 + 1, x1 : x2 + 1, :] = xyz_crop\n xyz_show = get_emb_show(xyz)\n xyz_crop_show = get_emb_show(xyz_crop)\n img_xyz = img.copy() / 255.0\n mask_xyz = ((xyz[:, :, 0] != 0) | (xyz[:, :, 1] != 0) | (xyz[:, :, 2] != 0)).astype(\"uint8\")\n fg_idx = np.where(mask_xyz != 0)\n img_xyz[fg_idx[0], fg_idx[1], :] = xyz_show[fg_idx[0], fg_idx[1], :3]\n img_xyz_crop = img_xyz[y1 : y2 + 1, x1 : x2 + 1, :]\n img_vis_crop = img_vis[y1 : y2 + 1, x1 : x2 + 1, :]\n # diff mask\n diff_mask_xyz = np.abs(masks[_i] - mask_xyz)[y1 : y2 + 1, x1 : x2 + 1]\n\n grid_show(\n [\n img[:, :, [2, 1, 0]],\n img_vis[:, :, [2, 1, 0]],\n img_vis_kpts2d[:, :, [2, 1, 0]],\n depth,\n # xyz_show,\n diff_mask_xyz,\n xyz_crop_show,\n img_xyz[:, :, [2, 1, 0]],\n img_xyz_crop[:, :, [2, 1, 0]],\n img_vis_crop,\n ],\n [\n \"img\",\n \"vis_img\",\n \"img_vis_kpts2d\",\n \"depth\",\n \"diff_mask_xyz\",\n \"xyz_crop_show\",\n \"img_xyz\",\n \"img_xyz_crop\",\n \"img_vis_crop\",\n ],\n row=3,\n col=3,\n )\n\n\nif __name__ == \"__main__\":\n \"\"\"Test the dataset loader.\n\n Usage:\n python -m core/datasets/ycbv_pbr.py ycbv_pbr_train\n \"\"\"\n from lib.vis_utils.image import grid_show\n from lib.utils.setup_logger import setup_my_logger\n\n import detectron2.data.datasets # noqa # add pre-defined metadata\n from lib.vis_utils.image import vis_image_mask_bbox_cv2\n from core.utils.utils import get_emb_show\n from core.utils.data_utils import read_image_cv2\n\n print(\"sys.argv:\", sys.argv)\n logger = setup_my_logger(name=\"core\")\n register_with_name_cfg(sys.argv[1])\n print(\"dataset catalog: \", DatasetCatalog.list())\n\n test_vis()\n"
] | [
[
"numpy.where",
"numpy.array",
"numpy.abs",
"numpy.zeros"
]
] |
feliximmohr/master_thesis_software | [
"875348747900da4732558a2d790d6ebe827ffac3"
] | [
"python_model_DNN/auditory_model/utils/load_data_raw.py"
] | [
"\"\"\"\nA python module that provides functions and classes to load data for training.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom keras.utils import Sequence\n\n\nclass DataGenerator_raw(Sequence):\n \"\"\"\n A class for a data generator for Keras sequential models. Creates\n the training samples in batches on the fly from the non-reduntant\n (raw) database. \n\n Attributes\n ----------\n list_IDs : numpy.ndarray\n A one-dimensional array containing global IDs or sample IDs.\n feature_data : numpy.ndarray\n Two-dimensional array of feature data from the non-redundant\n (raw) database.\n target_data : numpy.ndarray\n Two-dimensional array of target data from the non-redundant\n (raw) database.\n batch_size : int, optional\n Batch size.\n dim : int, optional\n Input dimension or number of features. Defaults to 96.\n shuffle : bool, optional\n Optionally shuffle the data for each epoch.\n n_frames : int, optional\n Number of frames/repititions in feature computation.\n n_angles : int, optional\n Number of angles. Defaults to 360.\n \"\"\"\n def __init__(self, list_IDs, feature_data, target_data, batch_size=32,\n dim=96, shuffle=True, n_frames=100, n_angles=360):\n \"\"\"Initialization.\"\"\"\n self.list_IDs = list_IDs\n self.feature_data = feature_data\n self.target_data = target_data\n self.batch_size = batch_size\n self.dim = dim\n self.shuffle = shuffle\n self.n_subjects = target_data.shape[1]\n self.n_frames = n_frames\n self.n_angles = n_angles\n self.on_epoch_end() #trigger once at beginning\n\n def __len__(self):\n \"\"\"Denotes the number of batches per epoch.\"\"\"\n return int(np.floor(len(self.list_IDs) / self.batch_size))\n\n def __getitem__(self, index):\n \"\"\"Generate one batch of data.\"\"\"\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)\n *self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n return X, y\n\n def on_epoch_end(self):\n \"\"\"Updates indexes after each epoch.\"\"\"\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __data_generation(self, list_IDs_temp):\n \"\"\"\n Generates data containing batch_size samples\n X : (n_samples, dim)\n \"\"\"\n # Initialization\n X = np.empty((self.batch_size, self.dim))\n y = np.empty((self.batch_size))\n # Generate data\n for i, ID in enumerate(list_IDs_temp):\n # one feature row for all 20 subjects 0...19 -> f=0\n feature_idx = np.floor(ID/self.n_subjects)\n # one target row for 100 frames for each subject 0...1999 -> t=0\n target_idx = np.floor(ID/(self.n_subjects*self.n_frames))\n subject_idx = ID - feature_idx*self.n_subjects \n \n # Store sample\n X[i,] = self.feature_data[int(feature_idx)]\n\n # Store targets\n y[i] = self.target_data[int(target_idx),int(subject_idx)]\n return X, y\n\n\ndef load_raw_ft_h5(filename, key_f='feature_data', key_t='target_data'):\n \"\"\"\n Load raw feature and target data from single HDF5 file specified by\n filename and key. If no keys provided, use default keys.\n \n Parameters\n ----------\n filename : str\n Name of a HDF5 file containing the data.\n key_f : str, optional\n Key identifying the feature data in HDF5 file.\n key_t : str, optional\n Key identifying the target data in HDF5 file.\n \n Returns\n -------\n feature_df : pandas DataFrame object\n DataFrame containing all features.\n target_df : pandas DataFrame object\n DataFrame containing all targets.\n f_column_labels : list of strings\n Column labels of feature DataFrame.\n \"\"\"\n feature_df = pd.read_hdf(filename, key=key_f)\n target_df = pd.read_hdf(filename, key=key_t)\n f_column_labels = feature_df.columns.tolist()\n return feature_df, target_df, f_column_labels\n\n\ndef load_raw_IDs_h5(filename, key_ID='ID_reference_table',\n key_p='position_table', key_c='condition_table',\n key_fp='feature_par'):\n \"\"\"\n Load raw metadata from single HDF5 file specified by filename and\n key. If no keys provided, use default keys.\n \n Parameters\n ----------\n filename : str\n Name of a HDF5 file containing the data.\n key_ID : str, optional\n Key identifying the ID reference table in HDF5 file.\n key_p : str, optional\n Key identifying the position reference table in HDF5 file.\n key_c : str, optional\n Key identifying the condition reference table in HDF5 file.\n key_fp : str, optional\n Key identifying the feature parameter data in HDF5 file.\n \n Returns\n -------\n ID_ref_df : pandas DataFrame object\n DataFrame containing ID reference table.\n pos_table_df : pandas DataFrame object\n DataFrame containing position table.\n cond_table_df : pandas DataFrame object\n DataFrame containing condition table.\n par_df : pandas DataFrame object\n DataFrame containing feature parameter data.\n \"\"\"\n\n ID_ref_df = pd.read_hdf(filename, key=key_ID)#.reset_index(drop=True)\n pos_table_df = pd.read_hdf(filename, key=key_p)\n cond_table_df = pd.read_hdf(filename, key=key_c)\n par_df = pd.read_hdf(filename, key=key_fp)\n return ID_ref_df, pos_table_df, cond_table_df, par_df\n\n\ndef load_raw_all_h5(filename, key_f=None, key_t=None, key_ID=None, key_p=None,\n key_c=None, key_fp=None):\n \"\"\"\n Load complete raw data from single HDF5 file specified by filename\n and key. If no keys provided, use default keys.\n \n Parameters\n ----------\n filename : str\n Name of a HDF5 file containing the data.\n key_f : str, optional\n Key identifying the feature data in HDF5 file.\n key_t : str, optional\n Key identifying the target data in HDF5 file.\n key_ID : str, optional\n Key identifying the ID reference table in HDF5 file.\n key_p : str, optional\n Key identifying the position reference table in HDF5 file.\n key_c : str, optional\n Key identifying the condition reference table in HDF5 file.\n key_fp : str, optional\n Key identifying the feature parameter data in HDF5 file.\n \n Returns\n -------\n feature_df : pandas DataFrame object\n DataFrame containing all features.\n target_df : pandas DataFrame object\n DataFrame containing all targets.\n ID_ref_df : pandas DataFrame object\n DataFrame containing ID reference table.\n pos_table_df : pandas DataFrame object\n DataFrame containing position table.\n cond_table_df : pandas DataFrame object\n DataFrame containing condition table.\n par_df : pandas DataFrame object\n DataFrame containing feature parameter data.\n \"\"\"\n\n feature_df, target_df, _ = load_raw_ft_h5(filename)\n ID_ref_df, pos_table_df, cond_table_df, par_df = load_raw_IDs_h5(filename)\n return feature_df, target_df, ID_ref_df, pos_table_df, cond_table_df, par_df"
] | [
[
"pandas.read_hdf",
"numpy.random.shuffle",
"numpy.empty",
"numpy.floor"
]
] |
dataiku/dss-plugin-deeplearning-image | [
"d7ae22e171c374b92bdca8f0730e6f529fbea3cb"
] | [
"custom-recipes/deeplearning-image-score-v2/recipe.py"
] | [
"import pandas as pd\nfrom dataiku.customrecipe import get_recipe_config\n\nimport dku_deeplearning_image.utils as utils\nimport dku_deeplearning_image.dku_constants as constants\nfrom dku_deeplearning_image.config_handler import create_dku_config\n\nfrom dku_deeplearning_image.recipes import ScoreRecipe\nfrom dku_deeplearning_image.misc_objects import DkuFileManager\nfrom dku_deeplearning_image.error_handler import raise_plugin_error\n\n\ndef get_input_output():\n file_manager = DkuFileManager()\n image_folder = file_manager.get_input_folder('image_folder')\n model_folder = file_manager.get_input_folder('model_folder')\n output_dataset = file_manager.get_output_dataset('scored_dataset')\n return image_folder, model_folder, output_dataset\n\n\[email protected]_func(txt='output dataset writing')\ndef write_output_dataset(output_dataset, image_folder, classification):\n images_paths = image_folder.list_paths_in_partition()\n output_df = utils.build_prediction_output_df(images_paths, classification)\n output_dataset.write_with_schema(pd.DataFrame(output_df))\n\n\[email protected]_func(txt='recipes')\ndef run():\n recipe_config = get_recipe_config()\n\n config = create_dku_config(recipe_config, constants.GOAL.SCORE)\n image_folder, model_folder, output_dataset = get_input_output()\n recipe = ScoreRecipe(config)\n\n classification = recipe.compute(image_folder, model_folder)\n\n write_output_dataset(output_dataset, image_folder, classification)\n\n\ntry:\n run()\nexcept Exception as err:\n raise_plugin_error(err)\n"
] | [
[
"pandas.DataFrame"
]
] |
Wu-Zhe/maskgan-local | [
"446688d9317fea0a5cbb4bd8b1cf227df6679dc7"
] | [
"official/recommendation/ncf_keras_main.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"NCF framework to train and evaluate the NeuMF model.\n\nThe NeuMF model assembles both MF and MLP models under the NCF framework. Check\n`neumf_model.py` for more details about the models.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n# pylint: disable=g-bad-import-order\nfrom absl import app as absl_app\nfrom absl import flags\nimport tensorflow as tf\n# pylint: enable=g-bad-import-order\n\nfrom official.datasets import movielens\nfrom official.recommendation import ncf_common\nfrom official.recommendation import neumf_model\nfrom official.recommendation import constants as rconst\nfrom official.utils.logs import logger\nfrom official.utils.logs import mlperf_helper\nfrom official.utils.misc import model_helpers\n\n\nFLAGS = flags.FLAGS\n\n\ndef _keras_loss(y_true, y_pred):\n # Here we are using the exact same loss used by the estimator\n loss = tf.losses.sparse_softmax_cross_entropy(\n labels=tf.cast(y_true, tf.int32),\n logits=y_pred)\n return loss\n\n\ndef _get_metric_fn(params):\n \"\"\"Get the metrix fn used by model compile.\"\"\"\n batch_size = params[\"batch_size\"]\n\n def metric_fn(y_true, y_pred):\n \"\"\"Returns the in_top_k metric.\"\"\"\n softmax_logits = y_pred\n logits = tf.slice(softmax_logits, [0, 1], [batch_size, 1])\n\n # The dup mask should be obtained from input data, but we did not yet find\n # a good way of getting it with keras, so we set it to zeros to neglect the\n # repetition correction\n dup_mask = tf.zeros([batch_size, 1])\n\n cross_entropy, metric_fn, in_top_k, ndcg, metric_weights = (\n neumf_model.compute_eval_loss_and_metrics_helper(\n logits,\n softmax_logits,\n dup_mask,\n params[\"num_neg\"],\n params[\"match_mlperf\"],\n params[\"use_xla_for_gpu\"]))\n\n in_top_k = tf.cond(\n tf.keras.backend.learning_phase(),\n lambda: tf.zeros(shape=in_top_k.shape, dtype=in_top_k.dtype),\n lambda: in_top_k)\n\n return in_top_k\n\n return metric_fn\n\n\ndef _get_train_and_eval_data(producer, params):\n \"\"\"Returns the datasets for training and evalutating.\"\"\"\n\n train_input_fn = producer.make_input_fn(is_training=True)\n train_input_dataset = train_input_fn(params)\n\n def preprocess_eval_input(features):\n labels = tf.zeros_like(features[movielens.USER_COLUMN])\n return features, labels\n\n eval_input_fn = producer.make_input_fn(is_training=False)\n eval_input_dataset = eval_input_fn(params).map(\n lambda features: preprocess_eval_input(features))\n\n return train_input_dataset, eval_input_dataset\n\n\nclass IncrementEpochCallback(tf.keras.callbacks.Callback):\n \"\"\"A callback to increase the requested epoch for the data producer.\n\n The reason why we need this is because we can only buffer a limited amount of\n data. So we keep a moving window to represent the buffer. This is to move the\n one of the window's boundaries for each epoch.\n \"\"\"\n\n def __init__(self, producer):\n self._producer = producer\n\n def on_epoch_begin(self, epoch, logs=None):\n self._producer.increment_request_epoch()\n\n\ndef _get_keras_model(params):\n \"\"\"Constructs and returns the model.\"\"\"\n batch_size = params['batch_size']\n\n user_input = tf.keras.layers.Input(\n shape=(),\n batch_size=batch_size,\n name=movielens.USER_COLUMN,\n dtype=rconst.USER_DTYPE)\n\n item_input = tf.keras.layers.Input(\n shape=(),\n batch_size=batch_size,\n name=movielens.ITEM_COLUMN,\n dtype=rconst.ITEM_DTYPE)\n\n base_model = neumf_model.construct_model(user_input, item_input, params)\n base_model_output = base_model.output\n\n zeros = tf.keras.layers.Lambda(\n lambda x: x * 0)(base_model_output)\n\n softmax_logits = tf.keras.layers.concatenate(\n [zeros, base_model_output],\n axis=-1)\n\n keras_model = tf.keras.Model(\n inputs=[user_input, item_input],\n outputs=softmax_logits)\n\n keras_model.summary()\n return keras_model\n\n\ndef run_ncf(_):\n \"\"\"Run NCF training and eval with Keras.\"\"\"\n # TODO(seemuch): Support different train and eval batch sizes\n if FLAGS.eval_batch_size != FLAGS.batch_size:\n tf.logging.warning(\n \"The Keras implementation of NCF currently does not support batch_size \"\n \"!= eval_batch_size ({} vs. {}). Overriding eval_batch_size to match \"\n \"batch_size\".format(FLAGS.eval_batch_size, FLAGS.batch_size)\n )\n FLAGS.eval_batch_size = FLAGS.batch_size\n\n params = ncf_common.parse_flags(FLAGS)\n\n # ncf_common rounds eval_batch_size (this is needed due to a reshape during\n # eval). This carries over that rounding to batch_size as well.\n params['batch_size'] = params['eval_batch_size']\n\n num_users, num_items, num_train_steps, num_eval_steps, producer = (\n ncf_common.get_inputs(params))\n\n params[\"num_users\"], params[\"num_items\"] = num_users, num_items\n producer.start()\n model_helpers.apply_clean(flags.FLAGS)\n\n keras_model = _get_keras_model(params)\n optimizer = ncf_common.get_optimizer(params)\n\n keras_model.compile(\n loss=_keras_loss,\n metrics=[_get_metric_fn(params)],\n optimizer=optimizer)\n\n train_input_dataset, eval_input_dataset = _get_train_and_eval_data(\n producer, params)\n\n keras_model.fit(\n train_input_dataset,\n epochs=FLAGS.train_epochs,\n callbacks=[IncrementEpochCallback(producer)],\n verbose=2)\n\n tf.logging.info(\"Training done. Start evaluating\")\n\n eval_results = keras_model.evaluate(\n eval_input_dataset,\n steps=num_eval_steps,\n verbose=2)\n\n tf.logging.info(\"Keras evaluation is done.\")\n\n return eval_results\n\n\ndef main(_):\n with logger.benchmark_context(FLAGS), \\\n mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):\n mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])\n if FLAGS.tpu:\n raise ValueError(\"NCF in Keras does not support TPU for now\")\n if FLAGS.num_gpus > 1:\n raise ValueError(\"NCF in Keras does not support distribution strategies. \"\n \"Please set num_gpus to 1\")\n run_ncf(FLAGS)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n ncf_common.define_ncf_flags()\n absl_app.run(main)\n"
] | [
[
"tensorflow.logging.set_verbosity",
"tensorflow.zeros",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Input",
"tensorflow.keras.backend.learning_phase",
"tensorflow.logging.info",
"tensorflow.keras.Model",
"tensorflow.zeros_like",
"tensorflow.slice",
"tensorflow.keras.layers.concatenate",
"tensorflow.cast"
]
] |
splunk/splunk-mltk-container-docker | [
"6e98e5984d99d7a3318f3e68c224d2a5163b717b"
] | [
"app/model/dnn_regressor.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n\n \n# In[ ]:\n\n\n# mltkc_import\n# this definition exposes all python module imports that should be available in all subsequent commands\nimport json\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# global constants\nMODEL_DIRECTORY = \"/srv/app/model/data/\"\n\n\n\n\n\n\n\n \n# In[ ]:\n\n\n# mltkc_stage\n# this cell is not executed from MLTK and should only be used for staging data into the notebook environment\ndef stage(name):\n with open(\"data/\"+name+\".csv\", 'r') as f:\n df = pd.read_csv(f)\n with open(\"data/\"+name+\".json\", 'r') as f:\n param = json.load(f)\n return df, param\n\n\n\n\n\n\n\n \n# In[ ]:\n\n\n# mltkc_init\n# initialize the model\n# params: data and parameters\n# returns the model object which will be used as a reference to call fit, apply and summary subsequently\ndef init(df,param):\n X = df[param['feature_variables']]\n print(\"FIT build model with input shape \" + str(X.shape))\n learning_rate = 0.1\n model_name = \"default_linear_regressor\"\n if 'options' in param:\n if 'model_name' in param['options']:\n model_name = param['options']['model_name']\n if 'params' in param['options']:\n if 'learning_rate' in param['options']['params']:\n learning_rate = int(param['options']['params']['learning_rate'])\n\n feature_columns = []\n for feature_name in param['feature_variables']:\n feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))\n \n model = tf.estimator.DNNRegressor(\n feature_columns=feature_columns,\n hidden_units=[32, 16, 8],\n model_dir=MODEL_DIRECTORY + model_name + \"/\",\n )\n return model\n\n\n\n\n\n\n\n \n# In[ ]:\n\n\n# mltkc_stage_create_model_fit\n# returns a fit info json object\ndef make_input_fn(df, param, n_epochs=None, batch_size=None, shuffle=True):\n def input_fn():\n dataset = tf.data.Dataset.from_tensor_slices((df[param['feature_variables']].to_dict(orient='list'), df[param['target_variables']].values))\n if shuffle:\n dataset = dataset.shuffle(buffer_size=len(df))\n return dataset.repeat(n_epochs).batch(batch_size)\n return input_fn\n\ndef fit(model,df,param):\n returns = {}\n X = df[param['feature_variables']]\n model_epochs = 100\n model_batch_size = 32\n if 'options' in param:\n if 'params' in param['options']:\n if 'epochs' in param['options']['params']:\n model_epochs = int(param['options']['params']['epochs'])\n if 'batch_size' in param['options']['params']:\n model_batch_size = int(param['options']['params']['batch_size'])\n # connect model training to tensorboard\n log_dir=\"/srv/notebooks/logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n # run the training\n input_fn_train = make_input_fn(df,param,model_epochs,model_batch_size)\n model.train(input_fn=input_fn_train, max_steps=model_epochs)\n # memorize parameters\n returns['model_epochs'] = model_epochs\n returns['model_batch_size'] = model_batch_size\n returns['model_loss_acc'] = model.evaluate(input_fn=input_fn_train)\n return returns\n\n\n\n\n\n\n\n \n# In[ ]:\n\n\n# mltkc_stage_create_model_apply\ndef apply(model,df,param):\n X = df[param['feature_variables']]\n model_epochs = 1\n model_batch_size = 32\n if 'options' in param:\n if 'params' in param['options']:\n if 'batch_size' in param['options']['params']:\n model_batch_size = int(param['options']['params']['batch_size'])\n output_fn_train = make_input_fn(df,param,model_epochs,model_batch_size)\n y_hat = pd.DataFrame([p['predictions'] for p in list(model.predict(output_fn_train))])\n return y_hat\n\n\n\n\n\n\n\n \n# In[ ]:\n\n\n# save model to name in expected convention \"<algo_name>_<model_name>.h5\"\ndef save(model,name):\n # model.save(MODEL_DIRECTORY + name + \".h5\")\n # serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(tf.feature_column.make_parse_example_spec([input_column]))\n # export_path = model.export_saved_model(MODEL_DIRECTORY + name +\"/\", serving_input_fn)\n return model\n\n\n\n\n\n \n# In[ ]:\n\n\n# load model from name in expected convention \"<algo_name>_<model_name>.h5\"\ndef load(name):\n # model = keras.models.load_model(MODEL_DIRECTORY + name + \".h5\")\n return model\n\n\n\n\n\n \n# In[ ]:\n\n\n# return model summary\ndef summary(model=None):\n returns = {\"version\": {\"tensorflow\": tf.__version__, \"keras\": keras.__version__} }\n if model is not None:\n returns[\"summary\"] = \"linear regressor\"\n return returns\n\n\n\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"tensorflow.feature_column.numeric_column",
"tensorflow.estimator.DNNRegressor"
]
] |
johnson880319/Software | [
"045894227f359e0a3a3ec5b7a53f8d1ebc06acdd"
] | [
"catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/matrix_linear_space.py"
] | [
"# coding=utf-8\nfrom abc import abstractmethod\n\nfrom contracts import check, contract\nfrom geometry.manifolds.differentiable_manifold import DifferentiableManifold\nfrom geometry.utils.numpy_backport import assert_allclose\nimport numpy as np\n\n__all__ = ['MatrixLinearSpace']\n\n\nclass MatrixLinearSpace(DifferentiableManifold):\n\n @contract(dimension='int,>0')\n def __init__(self, dimension, shape):\n ''' Note dimension is the intrinsic dimension. '''\n # TODO: give basis?\n self.shape = shape\n DifferentiableManifold.__init__(self, dimension=dimension)\n\n def zero(self):\n ''' Returns the zero element for this algebra. '''\n return np.zeros(self.shape)\n\n def norm(self, v):\n ''' Return the norm of a vector in the algebra.\n This is used in :py:class:`MatrixLieGroup` to measure\n distances between points in the Lie group.\n '''\n return np.linalg.norm(v, 2)\n\n # Manifolds methods\n def distance(self, a, b):\n return self.norm(a - b)\n\n @contract(bv='belongs_ts')\n def expmap(self, bv):\n base, vel = bv\n return base + vel\n\n @contract(base='belongs', p='belongs', returns='belongs_ts')\n def logmap(self, base, p):\n return base, p - base\n\n @contract(x='array')\n def belongs(self, x):\n if x.shape != self.shape:\n raise ValueError('Expected shape %r, not %r.' %\n (self.shape, x.shape))\n\n # TODO: make contract\n assert np.all(np.isreal(x)), \"Expected real vector\"\n proj = self.project(x)\n assert_allclose(proj, x, atol=1e-8) # XXX: tol\n\n def belongs_ts(self, bv):\n# formatm('bv', bv)\n check('tuple(shape(x),shape(x))', bv, x=self.shape)\n base, vel = bv\n self.belongs(base)\n self.belongs(vel)\n\n @abstractmethod\n def project(self, v): # @UnusedVariable\n ''' Projects a vector onto this Lie Algebra. '''\n\n def project_ts(self, bv):\n base, vel = bv\n return base, self.project(vel)\n"
] | [
[
"numpy.isreal",
"numpy.linalg.norm",
"numpy.zeros"
]
] |
zay3d/FinRL | [
"17633b874993f9e126131797dcc481730b8e32ac"
] | [
"finrl/env/env_stocktrading.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom gym.utils import seeding\nimport gym\nfrom gym import spaces\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pickle\nfrom stable_baselines3.common.vec_env import DummyVecEnv\n#from stable_baselines3.common import logger\n\n\nclass StockTradingEnv(gym.Env):\n \"\"\"A stock trading environment for OpenAI gym\"\"\"\n metadata = {'render.modes': ['human']}\n\n def __init__(self, \n df, \n stock_dim,\n hmax, \n initial_amount,\n buy_cost_pct,\n sell_cost_pct,\n reward_scaling,\n state_space,\n action_space,\n tech_indicator_list,\n turbulence_threshold=None,\n make_plots = False, \n print_verbosity = 10,\n day = 0, \n initial=True,\n previous_state=[],\n model_name = '',\n mode='',\n iteration=''):\n self.day = day\n self.df = df\n self.stock_dim = stock_dim\n self.hmax = hmax\n self.initial_amount = initial_amount\n self.buy_cost_pct = buy_cost_pct\n self.sell_cost_pct = sell_cost_pct\n self.reward_scaling = reward_scaling\n self.state_space = state_space\n self.action_space = action_space\n self.tech_indicator_list = tech_indicator_list\n self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,)) \n self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))\n self.data = self.df.loc[self.day,:]\n self.terminal = False \n self.make_plots = make_plots\n self.print_verbosity = print_verbosity\n self.turbulence_threshold = turbulence_threshold\n self.initial = initial\n self.previous_state = previous_state\n self.model_name=model_name\n self.mode=mode \n self.iteration=iteration\n # initalize state\n self.state = self._initiate_state()\n \n # initialize reward\n self.reward = 0\n self.turbulence = 0\n self.cost = 0\n self.trades = 0\n self.episode = 0\n # memorize all the total balance change\n self.asset_memory = [self.initial_amount]\n self.rewards_memory = []\n self.actions_memory=[]\n self.date_memory=[self._get_date()]\n #self.reset()\n self._seed()\n \n\n\n def _sell_stock(self, index, action):\n def _do_sell_normal():\n if self.state[index+1]>0: \n # Sell only if the price is > 0 (no missing data in this particular date)\n # perform sell action based on the sign of the action\n if self.state[index+self.stock_dim+1] > 0:\n # Sell only if current asset is > 0\n sell_num_shares = min(abs(action),self.state[index+self.stock_dim+1])\n sell_amount = self.state[index+1] * sell_num_shares * (1- self.sell_cost_pct)\n #update balance\n self.state[0] += sell_amount\n\n self.state[index+self.stock_dim+1] -= sell_num_shares\n self.cost +=self.state[index+1] * sell_num_shares * self.sell_cost_pct\n self.trades+=1\n else:\n sell_num_shares = 0\n else:\n sell_num_shares = 0\n\n return sell_num_shares\n \n # perform sell action based on the sign of the action\n if self.turbulence_threshold is not None:\n if self.turbulence>=self.turbulence_threshold:\n if self.state[index+1]>0: \n # Sell only if the price is > 0 (no missing data in this particular date)\n # if turbulence goes over threshold, just clear out all positions \n if self.state[index+self.stock_dim+1] > 0:\n # Sell only if current asset is > 0\n sell_num_shares = self.state[index+self.stock_dim+1]\n sell_amount = self.state[index+1]*sell_num_shares* (1- self.sell_cost_pct)\n #update balance\n self.state[0] += sell_amount\n self.state[index+self.stock_dim+1] =0\n self.cost += self.state[index+1]*self.state[index+self.stock_dim+1]* \\\n self.sell_cost_pct\n self.trades+=1\n else:\n sell_num_shares = 0\n else:\n sell_num_shares = 0\n else:\n sell_num_shares = _do_sell_normal()\n else:\n sell_num_shares = _do_sell_normal()\n\n return sell_num_shares\n\n \n def _buy_stock(self, index, action):\n\n def _do_buy():\n if self.state[index+1]>0: \n #Buy only if the price is > 0 (no missing data in this particular date) \n available_amount = self.state[0] // self.state[index+1]\n # print('available_amount:{}'.format(available_amount))\n \n #update balance\n buy_num_shares = min(available_amount, action)\n buy_amount = self.state[index+1] * buy_num_shares * (1+ self.buy_cost_pct)\n self.state[0] -= buy_amount\n\n self.state[index+self.stock_dim+1] += buy_num_shares\n \n self.cost+=self.state[index+1] * buy_num_shares * self.buy_cost_pct\n self.trades+=1\n else:\n buy_num_shares = 0\n\n return buy_num_shares\n\n # perform buy action based on the sign of the action\n if self.turbulence_threshold is None:\n buy_num_shares = _do_buy()\n else:\n if self.turbulence< self.turbulence_threshold:\n buy_num_shares = _do_buy()\n else:\n buy_num_shares = 0\n pass\n\n return buy_num_shares\n\n def _make_plot(self):\n plt.plot(self.asset_memory,'r')\n plt.savefig('results/account_value_trade_{}.png'.format(self.episode))\n plt.close()\n\n def step(self, actions):\n self.terminal = self.day >= len(self.df.index.unique())-1\n if self.terminal:\n # print(f\"Episode: {self.episode}\")\n if self.make_plots:\n self._make_plot() \n end_total_asset = self.state[0]+ \\\n sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))\n df_total_value = pd.DataFrame(self.asset_memory)\n tot_reward = self.state[0]+sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))- self.initial_amount \n df_total_value.columns = ['account_value']\n df_total_value['date'] = self.date_memory\n df_total_value['daily_return']=df_total_value['account_value'].pct_change(1)\n if df_total_value['daily_return'].std() !=0:\n sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \\\n df_total_value['daily_return'].std()\n df_rewards = pd.DataFrame(self.rewards_memory)\n df_rewards.columns = ['account_rewards']\n df_rewards['date'] = self.date_memory[:-1]\n if self.episode % self.print_verbosity == 0:\n print(f\"day: {self.day}, episode: {self.episode}\")\n print(f\"begin_total_asset: {self.asset_memory[0]:0.2f}\")\n print(f\"end_total_asset: {end_total_asset:0.2f}\")\n print(f\"total_reward: {tot_reward:0.2f}\")\n print(f\"total_cost: {self.cost:0.2f}\")\n print(f\"total_trades: {self.trades}\")\n if df_total_value['daily_return'].std() != 0:\n print(f\"Sharpe: {sharpe:0.3f}\")\n print(\"=================================\")\n\n if (self.model_name!='') and (self.mode!=''):\n df_actions = self.save_action_memory()\n df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration))\n df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)\n df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)\n plt.plot(self.asset_memory,'r')\n plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode,self.model_name, self.iteration),index=False)\n plt.close()\n\n # Add outputs to logger interface\n #logger.record(\"environment/portfolio_value\", end_total_asset)\n #logger.record(\"environment/total_reward\", tot_reward)\n #logger.record(\"environment/total_reward_pct\", (tot_reward / (end_total_asset - tot_reward)) * 100)\n #logger.record(\"environment/total_cost\", self.cost)\n #logger.record(\"environment/total_trades\", self.trades)\n\n return self.state, self.reward, self.terminal, {}\n\n else:\n\n actions = actions * self.hmax #actions initially is scaled between 0 to 1\n actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares\n if self.turbulence_threshold is not None:\n if self.turbulence>=self.turbulence_threshold:\n actions=np.array([-self.hmax]*self.stock_dim)\n begin_total_asset = self.state[0]+ \\\n sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))\n #print(\"begin_total_asset:{}\".format(begin_total_asset))\n \n argsort_actions = np.argsort(actions)\n \n sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]\n buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]\n\n for index in sell_index:\n # print(f\"Num shares before: {self.state[index+self.stock_dim+1]}\")\n # print(f'take sell action before : {actions[index]}')\n actions[index] = self._sell_stock(index, actions[index]) * (-1)\n # print(f'take sell action after : {actions[index]}')\n # print(f\"Num shares after: {self.state[index+self.stock_dim+1]}\")\n\n for index in buy_index:\n # print('take buy action: {}'.format(actions[index]))\n actions[index] = self._buy_stock(index, actions[index])\n\n self.actions_memory.append(actions)\n\n self.day += 1\n self.data = self.df.loc[self.day,:] \n if self.turbulence_threshold is not None: \n self.turbulence = self.data['turbulence'].values[0]\n self.state = self._update_state()\n \n end_total_asset = self.state[0]+ \\\n sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))\n self.asset_memory.append(end_total_asset)\n self.date_memory.append(self._get_date())\n self.reward = end_total_asset - begin_total_asset \n self.rewards_memory.append(self.reward)\n self.reward = self.reward*self.reward_scaling\n\n return self.state, self.reward, self.terminal, {}\n\n def reset(self): \n #initiate state\n self.state = self._initiate_state()\n \n if self.initial:\n self.asset_memory = [self.initial_amount]\n else:\n previous_total_asset = self.previous_state[0]+ \\\n sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)]))\n self.asset_memory = [previous_total_asset]\n\n self.day = 0\n self.data = self.df.loc[self.day,:]\n self.turbulence = 0\n self.cost = 0\n self.trades = 0\n self.terminal = False \n # self.iteration=self.iteration\n self.rewards_memory = []\n self.actions_memory=[]\n self.date_memory=[self._get_date()]\n \n self.episode+=1\n\n return self.state\n \n def render(self, mode='human',close=False):\n return self.state\n\n def _initiate_state(self):\n if self.initial:\n # For Initial State\n if len(self.df.tic.unique())>1:\n # for multiple stock\n state = [self.initial_amount] + \\\n self.data.close.values.tolist() + \\\n [0]*self.stock_dim + \\\n sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])\n else:\n # for single stock\n state = [self.initial_amount] + \\\n [self.data.close] + \\\n [0]*self.stock_dim + \\\n sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])\n else:\n #Using Previous State\n if len(self.df.tic.unique())>1:\n # for multiple stock\n state = [self.previous_state[0]] + \\\n self.data.close.values.tolist() + \\\n self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \\\n sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])\n else:\n # for single stock\n state = [self.previous_state[0]] + \\\n [self.data.close] + \\\n self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \\\n sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])\n return state\n\n def _update_state(self):\n if len(self.df.tic.unique())>1:\n # for multiple stock\n state = [self.state[0]] + \\\n self.data.close.values.tolist() + \\\n list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \\\n sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])\n\n else:\n # for single stock\n state = [self.state[0]] + \\\n [self.data.close] + \\\n list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \\\n sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])\n \n return state\n\n def _get_date(self):\n if len(self.df.tic.unique())>1:\n date = self.data.date.unique()[0]\n else:\n date = self.data.date\n return date\n\n def save_asset_memory(self):\n date_list = self.date_memory\n asset_list = self.asset_memory\n #print(len(date_list))\n #print(len(asset_list))\n df_account_value = pd.DataFrame({'date':date_list,'account_value':asset_list})\n return df_account_value\n\n def save_action_memory(self):\n if len(self.df.tic.unique())>1:\n # date and close price length must match actions length\n date_list = self.date_memory[:-1]\n df_date = pd.DataFrame(date_list)\n df_date.columns = ['date']\n \n action_list = self.actions_memory\n df_actions = pd.DataFrame(action_list)\n df_actions.columns = self.data.tic.values\n df_actions.index = df_date.date\n #df_actions = pd.DataFrame({'date':date_list,'actions':action_list})\n else:\n date_list = self.date_memory[:-1]\n action_list = self.actions_memory\n df_actions = pd.DataFrame({'date':date_list,'actions':action_list})\n return df_actions\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n\n def get_sb_env(self):\n e = DummyVecEnv([lambda: self])\n obs = e.reset()\n return e, obs\n"
] | [
[
"matplotlib.use",
"numpy.array",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.where",
"numpy.argsort"
]
] |
silent567/pytorch_geometric | [
"20f839935751f319541c1e0c075f09bb3da2a80a"
] | [
"examples/proteins_mincut_pool.py"
] | [
"import os.path as osp\nfrom math import ceil\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_geometric.datasets import TUDataset\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.nn import GCNConv, DenseGraphConv, dense_mincut_pool\nfrom torch_geometric.utils import to_dense_batch, to_dense_adj\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PROTEINS')\ndataset = TUDataset(path, name='PROTEINS').shuffle()\naverage_nodes = int(dataset.data.x.size(0) / len(dataset))\nn = (len(dataset) + 9) // 10\ntest_dataset = dataset[:n]\nval_dataset = dataset[n:2 * n]\ntrain_dataset = dataset[2 * n:]\ntest_loader = DataLoader(test_dataset, batch_size=20)\nval_loader = DataLoader(val_dataset, batch_size=20)\ntrain_loader = DataLoader(train_dataset, batch_size=20)\n\n\nclass Net(torch.nn.Module):\n def __init__(self, in_channels, out_channels, hidden_channels=32):\n super(Net, self).__init__()\n\n self.conv1 = GCNConv(in_channels, hidden_channels)\n\n num_nodes = ceil(0.5 * average_nodes)\n self.pool1 = Linear(hidden_channels, num_nodes)\n\n self.conv2 = DenseGraphConv(hidden_channels, hidden_channels)\n\n num_nodes = ceil(0.5 * num_nodes)\n self.pool2 = Linear(hidden_channels, num_nodes)\n\n self.conv3 = DenseGraphConv(hidden_channels, hidden_channels)\n\n self.lin1 = Linear(hidden_channels, hidden_channels)\n self.lin2 = Linear(hidden_channels, out_channels)\n\n def forward(self, x, edge_index, batch):\n x = F.relu(self.conv1(x, edge_index))\n\n x, mask = to_dense_batch(x, batch)\n adj = to_dense_adj(edge_index, batch)\n\n s = self.pool1(x)\n x, adj, mc1, o1 = dense_mincut_pool(x, adj, s, mask)\n\n x = F.relu(self.conv2(x, adj))\n s = self.pool2(x)\n\n x, adj, mc2, o2 = dense_mincut_pool(x, adj, s)\n\n x = self.conv3(x, adj)\n\n x = x.mean(dim=1)\n x = F.relu(self.lin1(x))\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1), mc1 + mc2, o1 + o2\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net(dataset.num_features, dataset.num_classes).to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=5e-4, weight_decay=1e-4)\n\n\ndef train(epoch):\n model.train()\n loss_all = 0\n\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n output, mc_loss, o_loss = model(data.x, data.edge_index, data.batch)\n loss = F.nll_loss(output, data.y.view(-1)) + mc_loss + o_loss\n loss.backward()\n loss_all += data.y.size(0) * loss.item()\n optimizer.step()\n return loss_all / len(train_dataset)\n\n\ndef test(loader):\n model.eval()\n correct = 0\n\n for data in loader:\n data = data.to(device)\n pred, mc_loss, o_loss = model(data.x, data.edge_index, data.batch)\n loss = F.nll_loss(pred, data.y.view(-1)) + mc_loss + o_loss\n correct += pred.max(dim=1)[1].eq(data.y.view(-1)).sum().item()\n\n return loss, correct / len(loader.dataset)\n\n\nbest_val_acc = test_acc = 0\nbest_val_loss = float('inf')\npatience = start_patience = 50\nfor epoch in range(1, 15000):\n train_loss = train(epoch)\n _, train_acc = test(train_loader)\n val_loss, val_acc = test(val_loader)\n if val_loss < best_val_loss:\n test_loss, test_acc = test(test_loader)\n best_val_acc = val_acc\n patience = start_patience\n else:\n patience -= 1\n if patience == 0:\n break\n print('Epoch: {:03d}, '\n 'Train Loss: {:.3f}, Train Acc: {:.3f}, '\n 'Val Loss: {:.3f}, Val Acc: {:.3f}, '\n 'Test Loss: {:.3f}, Test Acc: {:.3f}'.format(epoch, train_loss,\n train_acc, val_loss,\n val_acc, test_loss,\n test_acc))\n"
] | [
[
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.functional.log_softmax"
]
] |
devalab/SCONES | [
"b2b81c0a00c5a82b28c918b33cbdbad804727cfd"
] | [
"SCONES/model/dataset.py"
] | [
"import torch\nimport numpy as np\n\nfrom torch.utils.data import Dataset\nfrom utils.constants import AA_ID_DICT, AA_SASA\n\nclass PreprocessedDataset(Dataset):\n def __init__(self, radius_cutoff, device, logger=None):\n self.logger = logger\n if logger is None:\n import logging\n self.logger = logging\n self.device = device\n self.reset(radius_cutoff)\n\n def reset(self, radius_cutoff):\n self.dataset = []\n self.structure_cache = {}\n self.radius_cutoff = radius_cutoff\n \n self.counts = {\n \"no_target_skipped\" : 0,\n \"insufficient_structure\" : 0,\n \"insufficient_env_structure\" : 0,\n \"no_struct_at_mutation\" : 0,\n \"ddG_out_of_range\" : 0,\n \"proline_mutation\" : 0,\n \"not_enough_neighbors\" : 0,\n \"missing_nonstd_residue\" : 0,\n \"pH_out_of_range_skipped\" : 0\n }\n\n def add_dataset(self, dataset, training_filters):\n for sample in dataset:\n self.add(sample, training_filters)\n\n def preprocess_structure(self, structure):\n primary = list(structure[\"sequence\"])\n seqlen = len(primary)\n\n # [seqlen]\n transformed_primary = np.vectorize(AA_ID_DICT.get)(primary)\n transformed_primary = transformed_primary\n transformed_primary = torch.from_numpy(transformed_primary)\n\n assert(structure[\"distance_map_cb\"].shape[0] == seqlen)\n \n mask = structure[\"structure_mask\"]\n pos_avail = np.count_nonzero(mask)\n if pos_avail / len(mask) < 0.5:\n self.counts[\"insufficient_structure\"] += 1\n return None\n\n sas = torch.from_numpy(structure[\"sas\"])\n sas = sas.float()\n \n processed_struct = {\n \"primary\" : primary,\n \"primary_idx\" : transformed_primary,\n \"sas\" : sas,\n \"mask\" : mask,\n \"dmap_cb\" : structure[\"distance_map_cb\"],\n \"omap_w\" : structure[\"orientation_map_omega\"],\n \"omap_theta\" : structure[\"orientation_map_theta\"],\n \"omap_phi\" : structure[\"orientation_map_phi\"],\n }\n return processed_struct\n\n def add(self, sample, training_filters=False):\n def add_sample(id, dataset_sample, sequence, ref_residue, mutated_residue, primary_rt, primary_props_rt, primary_mt, primary_props_mt, position, neighbors, edge_features, edge_features_rev, target):\n sample = {\n \"id\" : id,\n \"dataset_sample\" : dataset_sample,\n \"sequence\" : sequence,\n \"ref_residue\" : ref_residue,\n \"mutated_residue\" : mutated_residue,\n \"primary_rt\" : primary_rt,\n \"primary_props_rt\" : primary_props_rt,\n \"primary_mt\" : primary_mt,\n \"primary_props_mt\" : primary_props_mt,\n \"position\" : position,\n \"neighbors\" : neighbors,\n \"edge_features\" : edge_features,\n \"edge_features_rev\" : edge_features_rev,\n \"target\" : target,\n }\n\n if self.device:\n for key, value in sample.items():\n if torch.is_tensor(value):\n sample[key] = sample[key].to(self.device)\n\n self.dataset.append(sample) \n\n target_ddG = sample[\"ddG\"]\n if np.isnan(target_ddG):\n self.counts[\"no_target_skipped\"] += 1\n return\n\n if training_filters and np.abs(target_ddG) > 8.0:\n self.counts[\"ddG_out_of_range\"] += 1\n return\n\n pH = sample[\"dataset_sample\"].get(\"pH\", 7.0)\n if training_filters and (2.0 > pH or pH > 12.0):\n self.counts[\"pH_out_of_range_skipped\"] += 1\n return\n\n if 'X' in sample[\"structure\"][\"sequence\"]:\n self.counts[\"missing_nonstd_residue\"] += 1\n return\n\n raw_structure = sample[\"structure\"]\n idx = raw_structure[\"id\"]\n if idx not in self.structure_cache:\n retval = self.preprocess_structure(raw_structure)\n if retval is None:\n return\n self.structure_cache[idx] = retval\n processed_structure = self.structure_cache[idx]\n\n sequence = processed_structure[\"primary\"]\n primary = processed_structure[\"primary_idx\"]\n mask = processed_structure[\"mask\"]\n position_c = sample[\"position\"] # central residue position\n\n sas = processed_structure[\"sas\"]\n dmap_cb = processed_structure[\"dmap_cb\"]\n omap_w = processed_structure[\"omap_w\"]\n omap_theta = processed_structure[\"omap_theta\"]\n omap_phi = processed_structure[\"omap_phi\"]\n\n # def is_struct_present(position):\n # if np.isnan(sas[position]):\n # return False\n # if np.any(np.isnan(dmap_cb[position, :])):\n # if mask[position] & 5 != 5:\n # return False\n # elif mask[position] & 7 != 7:\n # return False\n # return True\n\n def is_struct_present(position, sas=True):\n if sas and mask[position] & 16 == 0:\n return False\n if primary[position] == AA_ID_DICT['G']:\n if mask[position] & 5 != 5:\n return False\n elif mask[position] & 7 != 7:\n return False\n return True\n\n if not is_struct_present(position_c):\n self.counts[\"no_struct_at_mutation\"] += 1\n return\n\n # find neighbors using Cb\n cb_dist = dmap_cb[position_c, :]\n neighbors = np.where((cb_dist > 0) & (cb_dist < self.radius_cutoff))[0]\n num_neighbors = len(neighbors)\n assert (num_neighbors > 0)\n\n if training_filters and num_neighbors < 5:\n self.counts[\"not_enough_neighbors\"] += 1\n return\n\n if training_filters and not np.all([is_struct_present(i) for i in neighbors]):\n self.counts[\"insufficient_env_structure\"] += 1\n return\n\n neighbors = neighbors[[is_struct_present(i) for i in neighbors]]\n num_neighbors = len(neighbors)\n \n edge_features = torch.zeros((num_neighbors, 16))\n for i in range(num_neighbors):\n position_r = neighbors[i]\n\n cb_dist = dmap_cb[position_c, position_r]\n w_ang = omap_w[position_c, position_r]\n \n theta_ang12, theta_ang21 = omap_theta[position_c, position_r], omap_theta[position_r, position_c]\n phi_ang12, phi_ang21 = omap_phi[position_c, position_r], omap_phi[position_r, position_c]\n\n means8 = np.asarray([6.2193589e+00, 1.6547333e-01, 2.8232558e-02, 4.9734241e-03, 2.9435262e-01, 1.3330153e-01])\n scales8 = np.asarray([1.0191348, 0.02915691, 0.01040565, 0.00290022, 0.06066265, 0.0461563])\n\n edge_features[i, 0] = cb_dist\n edge_features[i, 1] = 1/cb_dist\n edge_features[i, 2] = (1/cb_dist)**2\n edge_features[i, 3] = (1/cb_dist)**3\n edge_features[i, 4] = np.exp(-cb_dist/5)\n edge_features[i, 5] = np.exp(-cb_dist/3)\n edge_features[i, :6] -= means8\n edge_features[i, :6] /= scales8\n\n if not np.isnan(w_ang):\n edge_features[i, 6] = np.sin(w_ang)\n edge_features[i, 7] = np.cos(w_ang)\n\n if not np.isnan(theta_ang12):\n edge_features[i, 8] = np.sin(theta_ang12)\n edge_features[i, 9] = np.cos(theta_ang12)\n \n if not np.isnan(theta_ang21):\n edge_features[i, 10] = np.sin(theta_ang21)\n edge_features[i, 11] = np.cos(theta_ang21)\n\n if not np.isnan(phi_ang12):\n edge_features[i, 12] = np.sin(phi_ang12)\n edge_features[i, 13] = np.cos(phi_ang12)\n\n if not np.isnan(phi_ang21):\n edge_features[i, 14] = np.sin(phi_ang21)\n edge_features[i, 15] = np.cos(phi_ang21)\n\n edge_features_rev = edge_features.clone()\n for i in range(num_neighbors):\n edge_features_rev[i, 8] = edge_features[i, 10]\n edge_features_rev[i, 9] = edge_features[i, 11]\n edge_features_rev[i, 10] = edge_features[i, 8]\n edge_features_rev[i, 11] = edge_features[i, 9]\n edge_features_rev[i, 12] = edge_features[i, 14]\n edge_features_rev[i, 13] = edge_features[i, 15]\n edge_features_rev[i, 14] = edge_features[i, 12]\n edge_features_rev[i, 15] = edge_features[i, 13]\n\n ref_residue = primary[position_c].item()\n mutated_residue = AA_ID_DICT[sample[\"mutated_residue\"]]\n\n primary_rt = primary.clone()\n primary_rt[position_c] = ref_residue\n\n primary_mt = primary.clone()\n primary_mt[position_c] = mutated_residue\n\n target = torch.zeros(1)\n target[0] = target_ddG\n\n primary_props_rt = sas.clone().reshape(1, -1)\n primary_props_mt = sas.clone().reshape(1, -1)\n primary_props_mt[0, position_c] *= AA_SASA[sample[\"mutated_residue\"]]/AA_SASA[sample[\"ref_residue\"]] # heuristic approximation of mutated residue SAS\n add_sample(sample[\"id\"], sample[\"dataset_sample\"], sequence, sample[\"ref_residue\"], sample[\"mutated_residue\"], primary_rt, primary_props_rt, primary_mt, primary_props_mt, position_c, neighbors, edge_features, edge_features_rev, target)\n\n def summary(self):\n self.logger.info(\"Dropped %d samples as they did not have ddG\" % self.counts[\"no_target_skipped\"])\n self.logger.info(\"Dropped %d samples as they have insufficient structure\" % self.counts[\"insufficient_structure\"])\n self.logger.info(\"Dropped %d samples as they have missing neighbor structure\" % self.counts[\"insufficient_env_structure\"])\n self.logger.info(\"Dropped %d samples as their pH during measurement was not in acceptable range\" % self.counts[\"pH_out_of_range_skipped\"])\n self.logger.info(\"Dropped %d samples as the mutation site didn't have local structure information\" % self.counts[\"no_struct_at_mutation\"])\n self.logger.info(\"Dropped %d samples as target was out of range\" % self.counts[\"ddG_out_of_range\"])\n self.logger.info(\"Dropped %d samples as mutation involved proline\" % self.counts[\"proline_mutation\"])\n self.logger.info(\"Dropped %d samples as they had too few neighbors\" % self.counts[\"not_enough_neighbors\"])\n self.logger.info(\"Dropped %d samples as they had missing or non-standard residue\" % self.counts[\"missing_nonstd_residue\"])\n\n # print(\"Average number of neighbors:\", np.mean(neighbor_counts))\n # print(\"Average edge features:\", np.mean(edge_features_pool, axis=0), np.std(edge_features_pool, axis=0))\n stabalizing_samples, neutral_samples, destabalizing_samples = 0, 0, 0\n for sample in self.dataset:\n target_ddG = sample[\"target\"].item()\n if target_ddG > 0.5: stabalizing_samples += 1\n elif target_ddG < -0.5: destabalizing_samples += 1\n else: neutral_samples += 1\n self.logger.info(\"Stabalizing samples: %d, neutral samples: %d, destabalizing samples: %d\" % (stabalizing_samples, neutral_samples, destabalizing_samples))\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n sample = self.dataset[idx]\n return sample\n\n \n"
] | [
[
"torch.zeros",
"numpy.count_nonzero",
"numpy.isnan",
"numpy.sin",
"numpy.vectorize",
"numpy.asarray",
"torch.is_tensor",
"numpy.exp",
"torch.from_numpy",
"numpy.where",
"numpy.abs",
"numpy.cos"
]
] |
LaGauffre/SMCCompoMo | [
"242feab1f1a6f923b682cfb8b033bb9c96317dc3"
] | [
"Python_memoire/smclomo/loss_distribution.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 2 15:42:14 2020\n\n@author: pierr\n\"\"\"\n\nimport scipy.special as sp\nimport math as ma\nimport pandas as pd\nimport numpy as np\nimport scipy.stats as st\nfrom scipy.optimize import minimize\n\n\ndef sim_gam_par(n, k, α, θ):\n \"\"\"\n Sample from a Gamma-Pareto model.\n\n Parameters\n ----------\n n : int \n sample size.\n k : float\n shape parameter of the Gamma distribution.\n α : float\n Tail index of the Pareto distribution.\n\n Returns\n -------\n array\n A sample drawn from the Weibull-Pareto distribution.\n \n Example\n -------\n k, α, θ = 1/2, 1/2, 5 \n X = sim_gam_par(1000, k, α, θ)\n \"\"\"\n β = θ / (k + α)\n r = α*sp.gamma(k)* sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k) / \\\n (1+ α*sp.gamma(k) * sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k))\n \n gamma_rv = st.gamma(k)\n par_rv = st.pareto(α)\n binom_rv = st.binom(1, r)\n par_rvs = θ * par_rv.rvs(size = n)\n binom_rvs = binom_rv.rvs(size = n)\n gamma_rvs = β * gamma_rv.ppf(sp.gammainc(k, θ / β) *\\\n np.random.uniform(size = n))\n return(binom_rvs * gamma_rvs + (1 - binom_rvs) * par_rvs)\n\n\ndef logp_gam_par(X):\n \"\"\"\n Likelihood function of the Gamma-Pareto model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n\n Returns\n -------\n function\n Allows the evaluation of the likelihood in the parameters provided the \n data.\n \n Example\n -------\n k, α, θ = 1/2, 1/2, 5 \n X = sim_gam_par(100, k, α, θ)\n logp = logp_gam_par(X)\n logp(np.array([k, α, θ]))\n costFn = lambda parms: -logp(parms)\n bnds = ((0, None), (0, None), (0, None))\n θ0 = (1, 1, 1)\n minRes = minimize(costFn, θ0,bounds=bnds)\n minRes\n \"\"\"\n def logp(parms):\n k, α, θ = tuple(parms)\n \n if np.all(parms > 0):\n β = θ / (k + α)\n r = α*sp.gamma(k)* sp.gammainc(k,θ / β) * np.exp(k+α)*(k+α)**(-k) / \\\n (1+ α*sp.gamma(k) * sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k))\n if β > 0 and r > 0 and r < 1:\n X1 = X[X < θ]\n X2 = X[X >= θ]\n F1 = sp.gammainc(k, θ / β)\n \n return(len(X1) * (np.log(r) - np.log(F1) - np.log(sp.gamma(k)) - \\\n k * np.log(β)) - sum(X1) / β +\\\n (k-1) * sum(np.log(X1)) + len(X2) *(np.log(1-r) +\\\n np.log(α) + α * np.log(θ)) - (α + 1) * sum(np.log(X2))\n )\n else: \n return(-np.inf)\n \n else:\n return(-np.inf)\n return logp\n\n\n\n\ndef logd_gam_par(parms):\n \"\"\"\n density function of the Gamma-Pareto model.\n\n Parameters\n ----------\n parms : ndArray \n particles.\n\n Returns\n -------\n function\n Allows the evaluation of the density functions for multiple parameter\n values.\n \"\"\"\n k, α, θ = parms[:,0], parms[:,1], parms[:,2]\n β = θ / (k + α)\n r = α*sp.gamma(k)* sp.gammainc(k,θ / β) * np.exp(k+α)*(k+α)**(-k) / \\\n (1+ α*sp.gamma(k) * sp.gammainc(k, θ / β) * np.exp(k+α)*(k+α)**(-k))\n F1 = sp.gammainc(k, θ / β)\n def logd(x):\n res = np.zeros(len(α))\n s = np.logical_and(β > 0, r > 0, r < 1)\n s1 = np.logical_and(s, x < θ)\n s2 = np.logical_and(s, x >= θ)\n \n res1 = np.log(r[s1]) - np.log(F1[s1]) - np.log(sp.gamma(k[s1])) - \\\n k[s1] * np.log(β[s1]) - x / β[s1] + (k[s1]-1) * np.log(x)\n\n res2 = (np.log(1-r[s2]) + np.log(α[s2]) + α[s2] * \\\n np.log(θ[s2])) - (α[s2] + 1) * np.log(x)\n \n res[np.where(s1)] = res1\n res[np.where(s2)] = res2\n res[np.where(np.invert(s))] = -np.inf\n return(res)\n return logd\n\n\ndef sim_wei_par(n, k, α, θ):\n \"\"\"\n Sample from a Weibull-Pareto model.\n\n Parameters\n ----------\n n : int \n sample size.\n k : float\n shape parameter of the Weibull distribution.\n α : float\n Tail index of the Pareto distribution.\n\n Returns\n -------\n array\n A sample drawn from the Weibull-Pareto distribution.\n \n Example\n -------\n k, α, θ = 1/2, 1/2, 5 \n X = sim_wei_par(1000, k, α, θ)\n \"\"\"\n β = (k / (k + α))**(1 / k) * θ\n r = (α / θ)*(1 - np.exp(-(k + α) / k))\\\n / (α / θ + (k / θ)*np.exp(-(k + α) / k))\n weib_rv = st.weibull_min(k)\n par_rv = st.pareto(α)\n binom_rv = st.binom(1, r)\n par_rvs = θ * par_rv.rvs(size = n)\n binom_rvs = binom_rv.rvs(size = n)\n weib_rvs = β * weib_rv.ppf(weib_rv.cdf(θ / β) *\\\n np.random.uniform(size = n))\n return(binom_rvs * weib_rvs + (1 - binom_rvs) * par_rvs)\n\n\ndef logp_wei_par(X):\n \"\"\"\n Likelihood function of the Weibull-Pareto model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n\n Returns\n -------\n function\n Allows the evaluation of the likelihood in the parameters provided the \n data.\n \n Example\n -------\n k, α, θ = 1/2, 1/2, 5 \n X = sim_wei_par(1000, k, α, θ)\n logp = logp_wei_par(X)\n logp(np.array([k, α, θ)])\n costFn = lambda parms: -logp(parms)\n bnds = ((0, None), (0, None), (0, None))\n θ0 = (1, 1, 1)\n minRes = minimize(costFn, θ0,bounds=bnds)\n minRes\n \"\"\"\n # parms = particles.to_numpy()[4]\n def logp(parms):\n k, α, θ = tuple(parms)\n \n if np.all(parms > 0):\n β = (k / (k + α))**(1 / k) * θ\n r = (α / θ)*(1 - np.exp(-(k + α) / k)) / (α / θ + (k / θ) *\\\n np.exp(-(k+α)/k))\n if β > 0 and r > 0 and r < 1:\n X1 = X[X < θ]\n X2 = X[X >= θ]\n F1 = 1 - np.exp(-(θ / β)**k)\n \n return(len(X1) * \\\n ( np.log(r) + np.log(k) - k * np.log(β) ) + \\\n (k-1) * sum(np.log(X1)) - sum( (X1/ β)**k ) -\\\n len(X1) * np.log(F1) + len(X2) *(np.log(1-r) +\\\n np.log(α) + α * np.log(θ)) - (α + 1) * sum(np.log(X2))\n )\n else: \n return(-np.inf)\n \n else:\n return(-np.inf)\n return logp\n\n\n\ndef logd_wei_par(parms):\n \"\"\"\n density function of the Weibull-Pareto model.\n\n Parameters\n ----------\n parms : ndArray \n particles.\n\n Returns\n -------\n function\n Allows the evaluation of the density functions for multiple parameter\n values.\n \"\"\"\n k, α, θ = parms[:,0], parms[:,1], parms[:,2]\n β = (k / (k + α))**(1 / k) * θ\n r = (α / θ)*(1 - np.exp(-(k + α) / k)) / (α / θ + (k / θ) * \\\n np.exp(-(k+α)/k))\n F1 = 1 - np.exp(-(θ / β)**k)\n def logd(x):\n res = np.zeros(len(α))\n s = np.logical_and(β > 0, r > 0, r < 1)\n s1 = np.logical_and(s, x < θ)\n s2 = np.logical_and(s, x >= θ)\n \n \n res1 = (np.log(r[s1]) + np.log(k[s1]) - k[s1] * np.log(β[s1])) + \\\n (k[s1]-1) * np.log(x) - (x/ β[s1]) ** k[s1] - \\\n np.log(F1[s1])\n\n res2 = (np.log(1-r[s2]) + np.log(α[s2]) + α[s2] * \\\n np.log(θ[s2])) - (α[s2] + 1) * np.log(x)\n \n res[np.where(s1)] = res1\n res[np.where(s2)] = res2\n res[np.where(np.invert(s))] = - np.inf\n return(res)\n return logd\n\n\n\n\ndef phi(z):\n \"\"\"\n Cdf of unit normal distribution\n\n Parameters\n ----------\n z : Float\n\n Returns\n -------\n CDF of unit normal distribution\n \"\"\"\n return( 1 / 2 * (1 + sp.erf(z /np.sqrt(2))))\n\ndef sim_lnorm_par(n, σ, α, θ):\n \"\"\"\n Sample from a lognormal-Pareto model.\n\n Parameters\n ----------\n n : int \n sample size.\n σ : float\n shape parameter of the lognormal distribution.\n α : float\n Tail index of the Pareto distribution.\n θ: float\n Threshold parameter\n\n Returns\n -------\n array\n A sample drawn from the lognormal-Pareto distribution.\n \n Example\n -------\n n, σ, α, θ =10, 1/2, 1/2, 5 \n X = sim_lnorm_par(n, σ, α, θ)\n \"\"\"\n μ = np.log(θ) - α * σ**2\n \n r = (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) ) / \\\n (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) + np.exp(-(α*σ)**2 / 2)) \n \n lnorm_rv = st.lognorm(s = σ, scale = np.exp(μ))\n \n par_rv = st.pareto(α)\n binom_rv = st.binom(1, r)\n par_rvs = θ * par_rv.rvs(size = n)\n binom_rvs = binom_rv.rvs(size = n)\n lnorm_rvs = lnorm_rv.ppf(lnorm_rv.cdf(θ) *\\\n np.random.uniform(size = n))\n return(binom_rvs * lnorm_rvs + (1 - binom_rvs) * par_rvs)\n\ndef logp_lnorm_par(X):\n \"\"\"\n Likelihood function of the lognormal-Pareto model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n\n Returns\n -------\n function\n Allows the evaluation of the likelihood in the parameters provided the \n data.\n \n Example\n -------\n n, σ, α, θ =100, 1/2, 1/2, 5\n X = sim_lnorm_par(n, σ, α, θ)\n logp = logp_lnorm_par(X)\n logp(np.array([σ, α, θ]))\n costFn = lambda parms: -logp(parms)\n bnds = ((0, None), (0, None), (0, None))\n θ0 = (1, 1, 3)\n minRes = minimize(costFn, θ0,bounds=bnds)\n minRes\n \"\"\"\n def logp(parms):\n σ, α, θ = tuple(parms)\n \n if np.all(parms > 0):\n μ = np.log(θ) - α * σ**2\n r = (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) ) / \\\n (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) + np.exp(-(α*σ)**2 / 2))\n if r > 0 and r < 1:\n X1 = X[X < θ]\n X2 = X[X >= θ]\n F1 = phi(α * σ)\n \n return(len(X1) * (np.log(r) - np.log(F1 * σ * np.sqrt(2 * ma.pi)))\\\n - sum(np.log(X1)) - sum((np.log(X1) - μ)**2) / 2 / σ**2 \\\n + len(X2) *(np.log(1-r) + np.log(α) + α * np.log(θ))\\\n - (α + 1) * sum(np.log(X2))\n )\n else: \n return(-np.inf)\n \n else:\n return(-np.inf)\n return logp\n\ndef logd_lnorm_par(parms):\n \"\"\"\n density function of the lognormal-Pareto model.\n\n Parameters\n ----------\n parms : ndArray \n particles.\n\n Returns\n -------\n function\n Allows the evaluation of the density functions for multiple parameter\n values.\n \"\"\"\n σ, α, θ = parms[:,0], parms[:,1], parms[:,2]\n μ = np.log(θ) - α * σ**2\n r = (α * σ * np.sqrt(2* ma.pi) *phi(α * σ) ) / \\\n (α * σ * np.sqrt(2* ma.pi) *phi(α * σ) + np.exp(-(α*σ)**2 / 2))\n F1 = phi(α * σ)\n def logd(x):\n \n s = np.logical_and(r > 0, r < 1)\n s1 = np.logical_and(s, x < θ)\n s2 = np.logical_and(s, x >= θ)\n res = np.zeros(len(r))\n \n res1 = (np.log(r[s1]) - np.log(F1[s1] * σ[s1] * np.sqrt(2 * ma.pi)))\\\n - np.log(x) - (np.log(x) - μ[s1])**2 / 2 / σ[s1]**2\n\n res2 = (np.log(1-r[s2]) + np.log(α[s2]) + α[s2] * np.log(θ[s2])) - (α[s2] + 1) * np.log(x)\n res[np.where(s1)] = res1\n res[np.where(s2)] = res2\n res[np.where(np.invert(s))] = -np.inf\n return(res)\n return logd\n\n\n\ndef logp_gamma(X):\n \"\"\"\n Likelihood function of the exponential model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n\n Returns\n -------\n function\n Allows the evaluation of the likelihood in the parameters provided the \n data.\n \n Example\n -------\n α, β = 3, 1 / 3\n gamma_rv = st.gamma(α)\n X = gamma_rv.rvs(size = 100) /β \n logp = logp_gamma(X)\n logp(α, β)\n \"\"\"\n def logp(parms):\n α, β = tuple(parms)\n if np.all(parms > 0) :\n return(len(X) * α * np.log(β) - sum(X) * β + (α-1) * sum(np.log(X))\n -len(X) * np.log(sp.gamma(α)))\n else:\n return(-np.inf)\n return logp\n\n\ndef logp_wrap(X, loss_model):\n \"\"\"\n Set the likelihood function for the chosen model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n loss_model: string\n name of the model\n\n Returns\n -------\n function\n Allows the evaluation of the likelihood in the parameters provided the \n data.\n \n Example\n -------\n \"\"\"\n if loss_model == \"wei-par\":\n return(logp_wei_par(X))\n elif loss_model == \"lnorm-par\":\n return(logp_lnorm_par(X))\n elif loss_model == \"gam-par\":\n return(logp_gam_par(X))\n elif loss_model == \"gamma\":\n return(logp_gamma(X))\n\ndef logd_wrap(parms, loss_model):\n \"\"\"\n Set the density function for the chosen model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n loss_model: string\n name of the model\n\n Returns\n -------\n function\n Allows the evaluation of the likelihood in the parameters provided the \n data.\n \n Example\n -------\n \"\"\"\n if loss_model == \"wei-par\":\n return(logd_wei_par(parms))\n elif loss_model == \"lnorm-par\":\n return(logd_lnorm_par(parms))\n elif loss_model == \"gam-par\":\n return(logd_gam_par(parms))\n\ndef mle_estimate(X, loss_model, parms_names):\n \"\"\"\n Provide the mle for the chosen model.\n\n Parameters\n ----------\n X : Array \n Insurance losses.\n loss_model: string\n name of the model\n parms_names: array\n name of the parameters\n\n Returns\n -------\n DatFrame\n Parameter estimate and the likelihood function\n \n Example\n -------\n k, α, θ = 1/2, 1/2, 5 \n X, loss_model = sim_wei_par(100, k, α, θ), \"wei-par\"\n mle_estimate(X, loss_model)\n\n \"\"\"\n logp = logp_wrap(X, loss_model)\n \n res = pd.DataFrame({parms_names[0]:[],parms_names[1]:[],parms_names[2]:[], 'log_lik':[]})\n for j in range(len(X)):\n θ = np.sort(X)[j]\n def logp_fixed_θ(parms):\n k, α = parms\n return(logp(np.array([k, α, θ])))\n costFn = lambda parms: -logp_fixed_θ(parms)\n bnds = ((0, None), (0, None))\n θ0 = (1, 1)\n try:\n minRes = minimize(costFn, θ0,bounds=bnds)\n except:\n minRes.x = np.array([1,1])\n res = pd.concat([res,\n pd.DataFrame({parms_names[0]:[minRes.x[0]],parms_names[1]:[minRes.x[1]],'θ':[θ],\n 'log_lik':logp(np.append(minRes.x,θ))})])\n \n return(res[res['log_lik'] == res['log_lik'].max()])\n\n\n\ndef cdf_gam_par(parms):\n \"\"\"\n cdf of a Gamma-Pareto model.\n\n Parameters\n ----------\n parms : array \n parameters of the gamma-Pareto model.\n \n Returns\n -------\n function\n CDF of the gamma-Pareto distribution.\n \"\"\"\n k, α, θ = parms\n β = θ / (k + α)\n \n cdf2 = sp.gammainc(k, θ / β)\n r = α*sp.gamma(k)* cdf2 * np.exp(k+α)*(k+α)**(-k) / \\\n (1+ α*sp.gamma(k) * cdf2 * np.exp(k+α)*(k+α)**(-k))\n def cdf_compo(x):\n cdf1 = sp.gammainc(k, x / β)\n res = r * cdf1 / cdf2 * np.sum(x < θ) + \\\n (r + (1-r) * (1 - (θ / x)**(α))) * np.sum(x >= θ) \n \n return(res)\n return(cdf_compo)\n\ndef cdf_lnorm_par(parms):\n \"\"\"\n cdf of a lnorm-Pareto model.\n\n Parameters\n ----------\n parms : array \n parameters of the gamma-Pareto model.\n \n Returns\n -------\n function\n CDF of the lnorm-Pareto distribution.\n \"\"\"\n σ, α, θ = parms\n μ = np.log(θ) - α * σ**2\n r = (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) ) / \\\n (α * σ *np.sqrt(2* ma.pi) *phi(α * σ) + np.exp(-(α*σ)**2 / 2)) \n \n cdf2 = 1/2 + sp.erf( (np.log(θ) - μ) / σ / np.sqrt(2) ) / 2\n def cdf_compo(x):\n cdf1 = 1/2 + sp.erf( (np.log(x) - μ) / σ / np.sqrt(2) ) / 2\n res = r * cdf1 / cdf2 * np.sum(x < θ) + \\\n (r + (1-r) * (1 - (θ / x)**(α))) * np.sum(x >= θ) \n \n return(res)\n \n return(cdf_compo)\n\ndef cdf_wei_par(parms):\n \"\"\"\n cdf of a Weibull-Pareto model.\n\n Parameters\n ----------\n parms : array \n parameters of the gamma-Pareto model.\n \n Returns\n -------\n function\n CDF of the Weibull-Pareto distribution.\n \"\"\"\n k, α, θ = parms\n β = (k / (k + α))**(1 / k) * θ\n r = (α / θ) * (1 - np.exp(-(k + α) / k))\\\n / (α / θ + (k / θ) * np.exp(-(k + α) / k))\n \n cdf2 = 1-np.exp(-(θ / β ) ** k )\n def cdf_compo(x):\n cdf1 = 1-np.exp(-(x / β ) ** k )\n res = r * cdf1 / cdf2 * np.sum(x < θ) + \\\n (r + (1-r) * (1 - (θ / x)**(α))) * np.sum(x >= θ) \n \n return(res)\n \n return(cdf_compo)\n\ndef cdf_wrap(parms, loss_model):\n \"\"\"\n Set the CD function for the chosen model.\n\n Parameters\n ----------\n parms : Array \n shape, tail and threshold parameter.\n loss_model: string\n name of the model\n\n Returns\n -------\n function\n Allows the evaluation of the CDF \n \n Example\n -------\n \"\"\"\n if loss_model == \"wei-par\":\n return(cdf_wei_par(parms))\n elif loss_model == \"lnorm-par\":\n return(cdf_lnorm_par(parms))\n elif loss_model == \"gam-par\":\n return(cdf_gam_par(parms))\n\n\ndef quantile_compo(p, loss_model, parms, low = 0, up = 1e8, err = 1e-6):\n \"\"\"\n Compute the quantile of order p for the chosen loss model.\n\n Parameters\n ----------\n p: scalar\n order of the quantile \n \n loss_model: string\n name of the model\n parms : Array \n shape, tail and threshold parameter.\n low, up, err: scalars\n Binary search algorithm parameter\n\n Returns\n -------\n function\n Allows the evaluation of the CDF \n \n Example\n -------\n \"\"\"\n cdf_compo = cdf_wrap(parms, loss_model)\n low, up = 0, 1e8\n err = 1e-6\n while up - low > err:\n new = (up + low) / 2\n val = cdf_compo(new)\n if val > p:\n up = new\n else: \n low = new\n return(new)\n\n"
] | [
[
"scipy.special.gammainc",
"scipy.special.gamma",
"numpy.exp",
"numpy.where",
"numpy.invert",
"numpy.sort",
"scipy.stats.binom",
"numpy.log",
"pandas.DataFrame",
"numpy.logical_and",
"numpy.sqrt",
"numpy.append",
"scipy.optimize.minimize",
"scipy.stats.gamma",
"numpy.array",
"scipy.stats.pareto",
"numpy.sum",
"scipy.stats.weibull_min",
"numpy.random.uniform",
"numpy.all"
]
] |
tfboyd/rl-reliability-metrics | [
"f5bbb3ce9c90f1bf599c002e843128ab541630a6"
] | [
"rl_reliability_metrics/evaluation/eval_metrics.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Authors of RL Reliability Metrics.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Evaluate robustness metrics for a given set of training curves or rollouts.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\n\nfrom absl import logging\nimport gin\nimport numpy as np\n\nfrom rl_reliability_metrics.analysis import io_utils_oss as io_utils\nfrom rl_reliability_metrics.evaluation import data_loading\n# Internal gfile dependencies\n\n\[email protected]\nclass Evaluator(object):\n \"\"\"Class for evaluating metrics.\"\"\"\n\n def __init__(self,\n metrics,\n dependent_variable='Metrics/AverageReturn',\n timepoint_variable=None,\n align_on_global_step=True):\n \"\"\"Initialize Evaluator.\n\n Args:\n metrics: List of instances of robustness metrics to evaluate. See\n :metrics. Per-metric parameters (window_size, etc) may be configured\n with Gin.\n dependent_variable: Name of Tensorboard summary that should be loaded for\n analysis of robustness, e.g. 'Metrics/AverageReturn'.\n timepoint_variable: Name of Tensorboard summary that defines a \"timepoint\"\n (i.e. the independent variable), e.g. 'Metrics/EnvironmentSteps'. Set\n None to simply use the 'steps' value of the dependent_variable summaries\n as the timepoint variable.\n align_on_global_step: see load_input_data\n \"\"\"\n self.metrics = metrics\n self.dependent_variable = dependent_variable\n self.timepoint_variable = timepoint_variable\n self.align_on_global_step = align_on_global_step\n\n @gin.configurable\n def evaluate(self, run_dirs, outfile_prefix='/tmp/robustness_results_'):\n \"\"\"Evaluate robustness metrics on a set of run directories.\n\n Args:\n run_dirs: List of paths to directories containing Tensorboard summaries\n for all the runs of an experiment, one directory per run. Summaries must\n include a scalar or tensor summary that defines the variable to be\n analyzed (the 'dependent_variable'). Optionally they may also have a\n scalar or tensor summary that defines a \"timepoint\" (the\n 'timepoint_variable').\n outfile_prefix: Prefix for JSON output files, where we write results and\n metric parameters.\n\n Returns:\n A dictionary of robustness values {metric_name: metric_value}\n \"\"\"\n curves = data_loading.load_input_data(run_dirs, self.dependent_variable,\n self.timepoint_variable,\n self.align_on_global_step)\n\n results = self.compute_metrics(curves)\n self.write_results(results, outfile_prefix)\n\n return results\n\n def evaluate_with_permutations(\n self,\n run_dirs_1,\n run_dirs_2,\n outfile_prefix='/tmp/robustness_results_permuted',\n n_permutations=1000,\n permutation_start_idx=0,\n random_seed=0):\n \"\"\"Evaluate robustness metrics on runs permuted across two sets.\n\n This method is useful for computing permutation tests to evaluate\n statistical significance on the difference in metric values between two\n sets of runs (e.g. for one algorithm vs another algorithm). In particular,\n this method is necessary to run permutation tests for across-run metrics\n (for per-run metrics, we can run permutation tests just by permuting the\n original metrics values or rankings).\n\n We permute the runs across the two sets and divide into two sets of the\n same size as the original two sets. We evaluate the metrics on the\n two permuted sets. This is performed n_permutations times. This provides a\n null distribution that can later be loaded to compute a p-value for a\n permutation test.\n\n Args:\n run_dirs_1: List of paths to directories containing Tensorboard summaries\n for all the runs of an experiment, one directory per run. Summaries must\n include a scalar or tensor summary that defines the variable to be\n analyzed (the 'dependent_variable'). Optionally they may also have a\n scalar or tensor summary that defines a \"timepoint\" (the\n 'timepoint_variable').\n run_dirs_2: Another list of paths.\n outfile_prefix: Prefix for JSON output files, where we write results and\n metric parameters.\n n_permutations: Number of permutations to perform.\n permutation_start_idx: If desired, the indexing of permutations can start\n at any integer. This affects the naming of the output files.\n random_seed: Numpy random seed.\n\n Returns:\n A list of robustness results. Each result is a dictionary of robustness\n values {metric_name: metric_value}\n \"\"\"\n np.random.seed(random_seed)\n\n curves_1 = data_loading.load_input_data(run_dirs_1, self.dependent_variable,\n self.timepoint_variable,\n self.align_on_global_step)\n curves_2 = data_loading.load_input_data(run_dirs_2, self.dependent_variable,\n self.timepoint_variable,\n self.align_on_global_step)\n all_curves = curves_1 + curves_2\n\n all_results = {}\n for i_permutation in range(permutation_start_idx,\n permutation_start_idx + n_permutations):\n logging.info('Permutation %d...', i_permutation)\n curves_permuted = permute_curves(all_curves)\n curves_permuted_1 = curves_permuted[:len(curves_1)]\n curves_permuted_2 = curves_permuted[len(curves_1):]\n\n results_1 = self.compute_metrics(curves_permuted_1)\n results_2 = self.compute_metrics(curves_permuted_2)\n all_results['permutation%d' % i_permutation] = {\n 'curves1': results_1,\n 'curves2': results_2\n }\n\n permutation_end_idx = permutation_start_idx + n_permutations - 1\n outfile_prefix_extended = '%spermutations%dto%d_' % (\n outfile_prefix, permutation_start_idx, permutation_end_idx)\n self.write_results(all_results, outfile_prefix_extended)\n\n return all_results\n\n def evaluate_with_bootstraps(\n self,\n run_dirs,\n outfile_prefix='/tmp/robustness_results_bootstrapped',\n n_bootstraps=1000,\n bootstrap_start_idx=0,\n random_seed=0):\n \"\"\"Evaluate robustness metrics on bootstrapped runs.\n\n I.e. the runs are resampled with replacement.\n\n This method is useful for computing bootstrapped confidence intervals on\n the metric values for a single set of runs (e.g. for a single algorithm).\n In particular, this method is necessary to obtain confidence intervals for\n across-run metrics (for per-run metrics, we can obtain confidence intervals\n just by bootstrapping the original metrics values or rankings).\n\n We bootstrap the runs (resample with replacement) n_bootstraps times, each\n time re-computing the metrics. This provides bootstrap distributions on\n the metric values that can later be loaded to compute confidence intervals.\n\n Args:\n run_dirs: List of paths to directories containing Tensorboard summaries\n for all the runs of an experiment, one directory per run. Summaries must\n include a scalar or tensor summary that defines the variable to be\n analyzed (the 'dependent_variable'). Optionally they may also have a\n scalar or tensor summary that defines a \"timepoint\" (the\n 'timepoint_variable').\n outfile_prefix: Prefix for JSON output files, where we write results and\n metric parameters.\n n_bootstraps: Number of bootstraps to perform.\n bootstrap_start_idx: If desired, the indexing of bootstraps can start at\n any integer. This affects the naming of the output files.\n random_seed: Numpy random seed.\n\n Returns:\n A dict of robustness results. Each entry in the dict has the form\n {'bootstrap%%BOOTSTRAP_IDX%%': metric_result_for_this_resampling}.\n Each metric result is a dictionary of metric values\n {metric_name: metric_value}.\n \"\"\"\n np.random.seed(random_seed)\n\n curves = data_loading.load_input_data(run_dirs, self.dependent_variable,\n self.timepoint_variable,\n self.align_on_global_step)\n\n all_results = {}\n for i_boot in range(bootstrap_start_idx,\n bootstrap_start_idx + n_bootstraps):\n logging.info('Bootstrap %d...', i_boot)\n curves_resampled = resample_curves(curves)\n results_resampled = self.compute_metrics(curves_resampled)\n\n all_results['bootstrap%d' % i_boot] = results_resampled\n\n bootstrap_end_idx = bootstrap_start_idx + n_bootstraps - 1\n outfile_prefix_extended = '%sbootstraps%dto%d_' % (\n outfile_prefix, bootstrap_start_idx, bootstrap_end_idx)\n self.write_results(all_results, outfile_prefix_extended)\n\n return all_results\n\n def compute_metrics(self, curves):\n \"\"\"Computes metrics on training curves.\"\"\"\n results = {}\n for metric in self.metrics:\n results[metric.name] = metric(curves)\n return results\n\n def write_metric_params(self, outfile_prefix):\n \"\"\"Write metric parameters to JSON.\"\"\"\n\n # Load the metric params.\n metric_params = get_metric_params(self.metrics)\n\n # Write the metric params.\n io_utils.makedirs(os.path.dirname(outfile_prefix))\n params_path = outfile_prefix + 'metric_params.json'\n with open(params_path, 'w') as outfile:\n json.dump(metric_params, outfile, cls=_NumpyEncoder)\n\n logging.info('Metric params written to: %s', params_path)\n return params_path\n\n @staticmethod\n def write_results(results, outfile_prefix):\n \"\"\"Write results to JSON.\"\"\"\n io_utils.makedirs(os.path.dirname(outfile_prefix))\n\n results_path = outfile_prefix + 'results.json'\n with open(results_path, 'w') as outfile:\n json.dump(results, outfile, cls=_NumpyEncoder)\n\n logging.info('Results written to: %s', results_path)\n return results_path\n\n\ndef get_metric_params(metrics):\n \"\"\"Gets public parameters for a list of metric instances.\n\n Args:\n metrics: A list of metric instances.\n\n Returns:\n Dictionary of metric parameters {metric_name: dict of params for metric}.\n Each entry is also a dictionary {metric_param: param_value}.\n \"\"\"\n metric_params = {}\n for metric in metrics:\n metric_params[metric.name] = {\n str(attr): value\n for attr, value in vars(metric).items()\n if attr[0] != '_'\n }\n return metric_params\n\n\nclass _NumpyEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef get_run_dirs(summary_path, data_type, selected_runs=None):\n \"\"\"Get the subdirectories corresponding to each run.\"\"\"\n if selected_runs:\n # only get the specified subdirectories\n run_dirs = selected_runs\n else:\n # get all the subdirectories\n run_dirs = io_utils.listdir(summary_path)\n run_dirs = [os.path.join(summary_path, d, data_type) for d in run_dirs]\n\n logging.info('Run directories:')\n for d in run_dirs:\n logging.info(' %s', d)\n\n return run_dirs\n\n\ndef permute_curves(curves):\n \"\"\"Permute a list of curves.\n\n Args:\n curves: A list of curves, e.g. a 2-D Numpy array.\n\n Returns:\n A list of curves, with the same length as the original curves.\n \"\"\"\n indices = np.random.permutation(len(curves))\n return [curves[ind] for ind in indices]\n\n\ndef resample_curves(curves):\n \"\"\"Resample with replacement from a list of curves.\n\n Args:\n curves: A list of curves, e.g. a 2-D Numpy array.\n\n Returns:\n A list of curves, with the same length as the original curves.\n \"\"\"\n n_curves = len(curves)\n indices = np.random.choice(range(n_curves), n_curves)\n return [curves[ind] for ind in indices]\n"
] | [
[
"numpy.random.seed"
]
] |
kanand77/unet-updated | [
"54a4b7169f280ec78cedd787ffd9404f1c70645d"
] | [
"data_3.py"
] | [
"import numpy as np \nimport os\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nimport skimage.io as io\n\nclass DataGenerator(Sequence):\n def __init__(self, im_IDs, train_path, truth_path, \n train_prefix = 'input', truth_prefix = 'truth',\n N_t = 25, batch_size=4, shuffle=True, \n dim = (256,256), n_channels=2, do_fft = False, load_series=True):\n\n self.im_IDs = im_IDs\n self.N_t = N_t\n self.dim = dim\n self.load_series = load_series\n if load_series:\n self.n_channels = n_channels * N_t\n else:\n self.n_channels = n_channels\n \n self.train_path = train_path\n self.truth_path = truth_path\n self.train_prefix = train_prefix\n self.truth_prefix = truth_prefix \n\n self.do_fft = do_fft\n\n self.list_IDs = []\n for im_ID in (self.im_IDs):\n if load_series:\n self.list_IDs.append('%03d' % (im_ID, ))\n else:\n for j in range(self.N_t):\n self.list_IDs.append('%03d_t%02d' % (im_ID, j))\n\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.on_epoch_end()\n\n print('Initialized with {} total IDs'.format(len(self.list_IDs)))\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(len(self.list_IDs) / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n if self.load_series:\n X, y = self.__data_generation_series(list_IDs_temp)\n else:\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __data_generation(self, list_IDs_temp):\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, self.n_channels))\n\n # Generate data\n for i, ID in enumerate(list_IDs_temp):\n for k in range(2):\n \n save_str = ''\n if k == 0:\n save_str = 'r'\n elif k == 1:\n save_str = 'i'\n\n # Load training data\n f = '%s_%s_%s' % (self.train_prefix, ID, save_str)\n img = io.imread(os.path.join(self.train_path, \"%s.png\" % f), as_gray = True).astype(np.float)\n img = (img / 255.0) - 0.5\n \n # Maybe normalize these images (img)\n X[i,:,:,k] = img\n\n # Loading the truth data\n f = '%s_%s_%s' % (self.truth_prefix, ID, save_str)\n img = io.imread(os.path.join(self.truth_path, \"%s.png\" % f), as_gray = True).astype(np.float)\n img = (img / 255.0) - 0.5\n\n # Maybe normalize these images (img)\n y[i,:,:,k] = img\n\n if self.do_fft:\n for i in range(self.batch_size):\n img = X[i,:,:,0] + 1j * X[i,:,:,1]\n img = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(img)))\n X[i,:,:,0] = img.real\n X[i,:,:,1] = img.imag\n\n img = y[i,:,:,0] + 1j * y[i,:,:,1]\n img = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(img)))\n y[i,:,:,0] = img.real\n y[i,:,:,1] = img.imag\n\n return X, y\n\n def __data_generation_series(self, list_IDs_temp):\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, self.n_channels))\n\n # Generate data\n for i, ID in enumerate(list_IDs_temp):\n for it in range(self.N_t):\n for k in range(2):\n \n ii = it * 2 + k\n\n save_str = ''\n if k == 0:\n save_str = 'r'\n elif k == 1:\n save_str = 'i'\n\n # Load training data\n f = '%s_%s_t%02d_%s' % (self.train_prefix, ID, it, save_str)\n img = io.imread(os.path.join(self.train_path, \"%s.png\" % f), as_gray = True).astype(np.float)\n img = (img / 255.0) - 0.5\n \n # Maybe normalize these images (img)\n X[i,:,:,ii] = img\n\n # Loading the truth data\n f = '%s_%s_t%02d_%s' % (self.truth_prefix, ID, it, save_str)\n img = io.imread(os.path.join(self.truth_path, \"%s.png\" % f), as_gray = True).astype(np.float)\n img = (img / 255.0) - 0.5\n\n # Maybe normalize these images (img)\n y[i,:,:,ii] = img\n\n return X, y"
] | [
[
"numpy.fft.fftshift",
"numpy.empty",
"numpy.random.shuffle"
]
] |
jlec/numdifftools | [
"43071da54627f896213cabcea61158d29f4e86b0"
] | [
"numdifftools/extrapolation.py"
] | [
"'''\nCreated on 28. aug. 2015\n\n@author: pab\n'''\nfrom __future__ import division, print_function\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.ndimage.filters import convolve1d\nimport warnings\nEPS = np.finfo(float).eps\n_EPS = EPS\n_TINY = np.finfo(float).tiny\n\n\nclass Dea(object):\n '''\n LIMEXP is the maximum number of elements the\n epsilon table data can contain. The epsilon table\n is stored in the first (LIMEXP+2) entries of EPSTAB.\n\n\n LIST OF MAJOR VARIABLES\n -----------------------\n E0,E1,E2,E3 - DOUBLE PRECISION\n The 4 elements on which the computation of\n a new element in the epsilon table is based.\n NRES - INTEGER\n Number of extrapolation results actually\n generated by the epsilon algorithm in prior\n calls to the routine.\n NEWELM - INTEGER\n Number of elements to be computed in the\n new diagonal of the epsilon table. The\n condensed epsilon table is computed. Only\n those elements needed for the computation of\n the next diagonal are preserved.\n RES - DOUBLE PREISION\n New element in the new diagonal of the\n epsilon table.\n ERROR - DOUBLE PRECISION\n An estimate of the absolute error of RES.\n Routine decides whether RESULT=RES or\n RESULT=SVALUE by comparing ERROR with\n ABSERR from the previous call.\n RES3LA - DOUBLE PREISION\n Vector of DIMENSION 3 containing at most\n the last 3 results.\n '''\n def __init__(self, limexp=3):\n self.limexp = 2 * (limexp // 2) + 1\n self.epstab = np.zeros(limexp+5)\n self.ABSERR = 10.\n self._n = 0\n self._nres = 0\n if (limexp < 3):\n raise ValueError('LIMEXP IS LESS THAN 3')\n\n def _compute_error(self, RES3LA, NRES, RES):\n fact = [6.0, 2.0, 1.0][min(NRES-1, 2)]\n error = fact * np.abs(RES - RES3LA[:NRES]).sum()\n return error\n\n def _shift_table(self, EPSTAB, N, NEWELM, NUM):\n i_0 = 1 if ((NUM // 2) * 2 == NUM - 1) else 0\n i_n = 2 * NEWELM + 2\n EPSTAB[i_0:i_n:2] = EPSTAB[i_0 + 2:i_n + 2:2]\n\n if (NUM != N):\n i_n = NUM - N\n EPSTAB[:N + 1] = EPSTAB[i_n:i_n + N + 1]\n return EPSTAB\n\n def _update_RES3LA(self, RES3LA, RESULT, NRES):\n if NRES > 2:\n RES3LA[:2] = RES3LA[1:]\n RES3LA[2] = RESULT\n else:\n RES3LA[NRES] = RESULT\n\n def __call__(self, SVALUE):\n\n EPSTAB = self.epstab\n RES3LA = EPSTAB[-3:]\n RESULT = SVALUE\n N = self._n\n NRES = self._nres\n EPSTAB[N] = SVALUE\n if (N == 0):\n ABSERR = abs(RESULT)\n elif (N == 1):\n ABSERR = 6.0 * abs(RESULT - EPSTAB[0])\n else:\n ABSERR = self.ABSERR\n EPSTAB[N + 2] = EPSTAB[N]\n NEWELM = N // 2\n NUM = N\n K1 = N\n for I in range(NEWELM):\n E0 = EPSTAB[K1 - 2]\n E1 = EPSTAB[K1 - 1]\n E2 = RES = EPSTAB[K1 + 2]\n DELTA2, DELTA3 = E2 - E1, E1 - E0\n ERR2, ERR3 = abs(DELTA2), abs(DELTA3)\n TOL2 = max(abs(E2), abs(E1)) * _EPS\n TOL3 = max(abs(E1), abs(E0)) * _EPS\n converged = (ERR2 <= TOL2 and ERR3 <= TOL3)\n if converged:\n ABSERR = ERR2 + ERR3\n RESULT = RES\n break\n if (I != 0):\n E3 = EPSTAB[K1]\n DELTA1 = E1 - E3\n ERR1 = abs(DELTA1)\n TOL1 = max(abs(E1), abs(E3)) * _EPS\n converged = (ERR1 <= TOL1 or ERR2 <= TOL2 or\n ERR3 <= TOL3)\n if not converged:\n SS = 1.0 / DELTA1 + 1.0 / DELTA2 - 1.0 / DELTA3\n else:\n converged = (ERR2 <= TOL2 or ERR3 <= TOL3)\n if not converged:\n SS = 1.0 / DELTA2 - 1.0 / DELTA3\n EPSTAB[K1] = E1\n if (converged or abs(SS * E1) <= 1e-04):\n N = 2 * I\n if (NRES == 0):\n ABSERR = ERR2 + ERR3\n RESULT = RES\n else:\n RESULT = RES3LA[min(NRES-1, 2)]\n break\n RES = E1 + 1.0 / SS\n EPSTAB[K1] = RES\n K1 = K1 - 2\n if (NRES == 0):\n ABSERR = ERR2 + abs(RES - E2) + ERR3\n RESULT = RES\n continue\n ERROR = self._compute_error(RES3LA, NRES, RES)\n\n if (ERROR > 10.0 * ABSERR):\n continue\n ABSERR = ERROR\n RESULT = RES\n else:\n ERROR = self._compute_error(RES3LA, NRES, RES)\n\n # 50\n if (N == self.limexp - 1):\n N = 2 * (self.limexp // 2) - 1\n EPSTAB = self._shift_table(EPSTAB, N, NEWELM, NUM)\n self._update_RES3LA(RES3LA, RESULT, NRES)\n\n ABSERR = max(ABSERR, 10.0*_EPS * abs(RESULT))\n NRES = NRES + 1\n\n N += 1\n self._n = N\n self._nres = NRES\n # EPSTAB[-3:] = RES3LA\n self.ABSERR = ABSERR\n return RESULT, ABSERR\n\n\ndef test_dea():\n def linfun(i):\n return np.linspace(0, np.pi/2., 2**i+1)\n dea = Dea(limexp=11)\n print('NO. PANELS TRAP. APPROX APPROX W/EA ABSERR')\n for k in np.arange(10):\n x = linfun(k)\n val = np.trapz(np.sin(x), x)\n vale, err = dea(val)\n print('%5d %20.8f %20.8f %20.8f' % (len(x)-1, val, vale, err))\n\n\ndef test_epsal():\n HUGE = 1.E+60\n TINY = 1.E-60\n ZERO = 0.E0\n ONE = 1.E0\n true_vals = [0.78539816, 0.94805945, 0.99945672]\n E = []\n for N, SOFN in enumerate([0.78539816, 0.94805945, 0.98711580]):\n E.append(SOFN)\n if N == 0:\n ESTLIM = SOFN\n else:\n AUX2 = ZERO\n for J in range(N, 0, -1):\n AUX1 = AUX2\n AUX2 = E[J-1]\n DIFF = E[J] - AUX2\n if (abs(DIFF) <= TINY):\n E[J-1] = HUGE\n else:\n E[J-1] = AUX1 + ONE/DIFF\n\n if (N % 2) == 0:\n ESTLIM = E[0]\n else:\n ESTLIM = E[1]\n print(ESTLIM, true_vals[N])\n\n\ndef dea3(v0, v1, v2, symmetric=False):\n \"\"\"\n Extrapolate a slowly convergent sequence\n\n Parameters\n ----------\n v0, v1, v2 : array-like\n 3 values of a convergent sequence to extrapolate\n\n Returns\n -------\n result : array-like\n extrapolated value\n abserr : array-like\n absolute error estimate\n\n Description\n -----------\n DEA3 attempts to extrapolate nonlinearly to a better estimate\n of the sequence's limiting value, thus improving the rate of\n convergence. The routine is based on the epsilon algorithm of\n P. Wynn, see [1]_.\n\n Example\n -------\n # integrate sin(x) from 0 to pi/2\n\n >>> import numpy as np\n >>> import numdifftools as nd\n >>> Ei= np.zeros(3)\n >>> linfun = lambda i : np.linspace(0, np.pi/2., 2**(i+5)+1)\n >>> for k in np.arange(3):\n ... x = linfun(k)\n ... Ei[k] = np.trapz(np.sin(x),x)\n >>> [En, err] = nd.dea3(Ei[0], Ei[1], Ei[2])\n >>> truErr = Ei-1.\n >>> (truErr, err, En)\n (array([ -2.00805680e-04, -5.01999079e-05, -1.25498825e-05]),\n array([ 0.00020081]), array([ 1.]))\n\n See also\n --------\n dea\n\n Reference\n ---------\n .. [1] C. Brezinski (1977)\n \"Acceleration de la convergence en analyse numerique\",\n \"Lecture Notes in Math.\", vol. 584,\n Springer-Verlag, New York, 1977.\n \"\"\"\n E0, E1, E2 = np.atleast_1d(v0, v1, v2)\n abs, max = np.abs, np.maximum # @ReservedAssignment\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\") # ignore division by zero and overflow\n delta2, delta1 = E2 - E1, E1 - E0\n err2, err1 = abs(delta2), abs(delta1)\n tol2, tol1 = max(abs(E2), abs(E1)) * _EPS, max(abs(E1), abs(E0)) * _EPS\n delta1[err1 < _TINY] = _TINY\n delta2[err2 < _TINY] = _TINY # avoid division by zero and overflow\n ss = 1.0 / delta2 - 1.0 / delta1 + _TINY\n smalle2 = (abs(ss * E1) <= 1.0e-3)\n converged = (err1 <= tol1) & (err2 <= tol2) | smalle2\n result = np.where(converged, E2 * 1.0, E1 + 1.0 / ss)\n abserr = err1 + err2 + np.where(converged, tol2 * 10, abs(result-E2))\n if symmetric and len(result) > 1:\n return result[:-1], abserr[1:]\n return result, abserr\n\n\nclass Richardson(object):\n '''\n Extrapolates as sequence with Richardsons method\n\n Notes\n -----\n Suppose you have series expansion that goes like this\n\n L = f(h) + a0 * h^p_0 + a1 * h^p_1+ a2 * h^p_2 + ...\n\n where p_i = order + step * i and f(h) -> L as h -> 0, but f(0) != L.\n\n If we evaluate the right hand side for different stepsizes h\n we can fit a polynomial to that sequence of approximations.\n This is exactly what this class does.\n\n Example\n -------\n >>> import numpy as np\n >>> import numdifftools as nd\n >>> n = 3\n >>> Ei = np.zeros((n,1))\n >>> h = np.zeros((n,1))\n >>> linfun = lambda i : np.linspace(0, np.pi/2., 2**(i+5)+1)\n >>> for k in np.arange(n):\n ... x = linfun(k)\n ... h[k] = x[1]\n ... Ei[k] = np.trapz(np.sin(x),x)\n >>> En, err, step = nd.Richardson(step=1, order=1)(Ei, h)\n >>> truErr = Ei-1.\n >>> (truErr, err, En)\n (array([[ -2.00805680e-04],\n [ -5.01999079e-05],\n [ -1.25498825e-05]]), array([[ 0.00320501]]), array([[ 1.]]))\n\n '''\n def __init__(self, step_ratio=2.0, step=1, order=1, num_terms=2):\n self.num_terms = num_terms\n self.order = order\n self.step = step\n self.step_ratio = step_ratio\n\n def _r_matrix(self, num_terms):\n step = self.step\n i, j = np.ogrid[0:num_terms+1, 0:num_terms]\n r_mat = np.ones((num_terms + 1, num_terms + 1))\n r_mat[:, 1:] = (1.0 / self.step_ratio) ** (i*(step*j + self.order))\n return r_mat\n\n def _get_richardson_rule(self, sequence_length=None):\n if sequence_length is None:\n sequence_length = self.num_terms + 1\n num_terms = min(self.num_terms, sequence_length - 1)\n if num_terms > 0:\n r_mat = self._r_matrix(num_terms)\n return linalg.pinv(r_mat)[0]\n return np.ones((1,))\n\n def _estimate_error(self, new_sequence, old_sequence, steps, rule):\n m, _n = new_sequence.shape\n\n if m < 2:\n return (np.abs(new_sequence) * EPS + steps) * 10.0\n cov1 = np.sum(rule**2) # 1 spare dof\n fact = np.maximum(12.7062047361747 * np.sqrt(cov1), EPS * 10.)\n err = np.abs(np.diff(new_sequence, axis=0)) * fact\n tol = np.maximum(np.abs(new_sequence[1:]),\n np.abs(new_sequence[:-1])) * EPS * fact\n converged = err <= tol\n abserr = err + np.where(converged, tol * 10,\n abs(new_sequence[:-1]-old_sequence[1:])*fact)\n # abserr = err1 + err2 + np.where(converged, tol2 * 10, abs(result-E2))\n # abserr = s * fact + np.abs(new_sequence) * EPS * 10.0\n return abserr\n\n def extrapolate(self, sequence, steps):\n return self.__call__(sequence, steps)\n\n def __call__(self, sequence, steps):\n ne = sequence.shape[0]\n rule = self._get_richardson_rule(ne)\n nr = rule.size - 1\n m = ne - nr\n new_sequence = convolve1d(sequence, rule[::-1], axis=0, origin=(nr//2))\n abserr = self._estimate_error(new_sequence, sequence, steps, rule)\n return new_sequence[:m], abserr[:m], steps[:m]\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.sin",
"scipy.ndimage.filters.convolve1d",
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.diff",
"scipy.linalg.pinv",
"numpy.finfo",
"numpy.where",
"numpy.arange",
"numpy.atleast_1d",
"numpy.sqrt",
"numpy.abs",
"numpy.linspace"
]
] |
DragonYong/DCGANFACEGENERATION | [
"7e14f355e0cefb2d61f695ca01e0441d174266ff"
] | [
"models.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2021/4/26-15:57\n# @Author : TuringEmmy\n# @Email : [email protected]\n# @WeChat : superior_god\n# @File : models.py\n# @Project : 00PythonProjects\nimport tensorflow as tf\n\n\ndef lrelu(x, leak=0.2):\n return tf.maximum(x, leak * x)\n\n\ndef sigmoid_cross_entropy_with_logits(x, y):\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)\n\n\ndef discriminator(image, reuse=None, is_training=None):\n momentum = 0.9\n with tf.variable_scope('discriminator', reuse=reuse):\n h0 = lrelu(tf.layers.conv2d(image, kernel_size=5, filters=64, strides=2, padding='same'))\n\n h1 = tf.layers.conv2d(h0, kernel_size=5, filters=128, strides=2, padding='same')\n h1 = lrelu(tf.contrib.layers.batch_norm(h1, is_training=is_training, decay=momentum))\n\n h2 = tf.layers.conv2d(h1, kernel_size=5, filters=256, strides=2, padding='same')\n h2 = lrelu(tf.contrib.layers.batch_norm(h2, is_training=is_training, decay=momentum))\n\n h3 = tf.layers.conv2d(h2, kernel_size=5, filters=512, strides=2, padding='same')\n h3 = lrelu(tf.contrib.layers.batch_norm(h3, is_training=is_training, decay=momentum))\n\n h4 = tf.contrib.layers.flatten(h3)\n h4 = tf.layers.dense(h4, units=1)\n return tf.nn.sigmoid(h4), h4\n\n\ndef generator(z, is_training=None):\n momentum = 0.9\n with tf.variable_scope('generator', reuse=None):\n d = 4\n h0 = tf.layers.dense(z, units=d * d * 512)\n h0 = tf.reshape(h0, shape=[-1, d, d, 512])\n h0 = tf.nn.relu(tf.contrib.layers.batch_norm(h0, is_training=is_training, decay=momentum))\n\n h1 = tf.layers.conv2d_transpose(h0, kernel_size=5, filters=256, strides=2, padding='same')\n h1 = tf.nn.relu(tf.contrib.layers.batch_norm(h1, is_training=is_training, decay=momentum))\n\n h2 = tf.layers.conv2d_transpose(h1, kernel_size=5, filters=128, strides=2, padding='same')\n h2 = tf.nn.relu(tf.contrib.layers.batch_norm(h2, is_training=is_training, decay=momentum))\n\n h3 = tf.layers.conv2d_transpose(h2, kernel_size=5, filters=64, strides=2, padding='same')\n h3 = tf.nn.relu(tf.contrib.layers.batch_norm(h3, is_training=is_training, decay=momentum))\n\n h4 = tf.layers.conv2d_transpose(h3, kernel_size=5, filters=3, strides=2, padding='same', activation=tf.nn.tanh,\n name='g')\n return h4\n"
] | [
[
"tensorflow.contrib.layers.batch_norm",
"tensorflow.reshape",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.variable_scope",
"tensorflow.layers.conv2d",
"tensorflow.layers.dense",
"tensorflow.layers.conv2d_transpose",
"tensorflow.maximum",
"tensorflow.nn.sigmoid",
"tensorflow.contrib.layers.flatten"
]
] |
XingxingZhang/PyTorch-NLP | [
"b998dbbd943f7a00f67fd94aacbe5e865577da33"
] | [
"examples/awd-lstm-lm/utils.py"
] | [
"from torch.autograd import Variable\n\n\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == Variable:\n return Variable(h.data)\n else:\n return tuple(repackage_hidden(v) for v in h)\n"
] | [
[
"torch.autograd.Variable"
]
] |
renatomatz/pdpipe | [
"edcb5553d40e724d02f89dc828301d03155a19ee"
] | [
"pdpipe/nltk_stages.py"
] | [
"\"\"\"PdPipeline stages dependent on the nltk Python library.\n\nPlease note that the nltk Python package must be installed for the stages in\nthis module to work.\n\nWhen attempting to load stages from this module, pdpipe will first attempt to\nimport nltk. If it fails, it will issue a warning, will not import any of the\npipeline stages that make up this module, and continue to load other pipeline\nstages.\n\"\"\"\n\nimport os\nimport importlib\nimport collections\n\nimport nltk\nimport pandas as pd\n\nfrom pdpipe.core import PdPipelineStage\nfrom pdpipe.util import out_of_place_col_insert\nfrom pdpipe.col_generation import MapColVals\nfrom pdpipe.shared import (\n _interpret_columns_param,\n _list_str\n)\n\n\nclass TokenizeWords(MapColVals):\n \"\"\"A pipeline stage that tokenize a sentence into words by whitespaces.\n\n Note: The nltk package must be installed for this pipeline stage to work.\n\n Parameters\n ----------\n columns : str or list-like\n Column names in the DataFrame to be tokenized.\n drop : bool, default True\n If set to True, the source columns are dropped after being tokenized,\n and the resulting tokenized columns retain the names of the source\n columns. Otherwise, tokenized columns gain the suffix '_tok'.\n\n Example\n -------\n >>> import pandas as pd; import pdpipe as pdp;\n >>> df = pd.DataFrame(\n ... [[3.2, \"Kick the baby!\"]], [1], ['freq', 'content'])\n >>> tokenize_stage = pdp.TokenizeWords('content')\n >>> tokenize_stage(df)\n freq content\n 1 3.2 [Kick, the, baby, !]\n \"\"\"\n\n _DEF_TOKENIZE_EXC_MSG = (\"Tokenize stage failed because not all columns \"\n \"{} are present in input dataframe and are of\"\n \" dtype object.\")\n _DEF_TOKENIZE_APP_MSG = \"Tokenizing {}...\"\n\n @staticmethod\n def __check_punkt():\n try:\n nltk.word_tokenize('a a')\n except LookupError: # pragma: no cover\n # try:\n # nltk.data.find('corpora/stopwords')\n # except LookupError: # pragma: no cover\n dpath = os.path.expanduser('~/nltk_data/tokenizers')\n os.makedirs(dpath, exist_ok=True)\n nltk.download('punkt')\n\n def __init__(self, columns, drop=True, **kwargs):\n self.__check_punkt()\n self._columns = _interpret_columns_param(columns)\n col_str = _list_str(self._columns)\n super_kwargs = {\n 'columns': columns,\n 'value_map': nltk.word_tokenize,\n 'drop': drop,\n 'suffix': '_tok',\n 'exmsg': TokenizeWords._DEF_TOKENIZE_EXC_MSG.format(col_str),\n 'appmsg': TokenizeWords._DEF_TOKENIZE_APP_MSG.format(col_str),\n 'desc': \"Tokenize {}\".format(col_str),\n }\n super_kwargs.update(**kwargs)\n super().__init__(**super_kwargs)\n\n def _prec(self, df):\n return super()._prec(df) and all(\n col_type == object for col_type in df.dtypes[self._columns])\n\n\nclass UntokenizeWords(MapColVals):\n \"\"\"A pipeline stage that joins token lists to whitespace-seperated strings.\n\n Note: The nltk package must be installed for this pipeline stage to work.\n\n Parameters\n ----------\n columns : str or list-like\n Column names in the DataFrame to be untokenized.\n drop : bool, default True\n If set to True, the source columns are dropped after being untokenized,\n and the resulting columns retain the names of the source columns.\n Otherwise, untokenized columns gain the suffix '_untok'.\n\n Example\n -------\n >>> import pandas as pd; import pdpipe as pdp;\n >>> data = [[3.2, ['Shake', 'and', 'bake!']]]\n >>> df = pd.DataFrame(data, [1], ['freq', 'content'])\n >>> untokenize_stage = pdp.UntokenizeWords('content')\n >>> untokenize_stage(df)\n freq content\n 1 3.2 Shake and bake!\n \"\"\"\n\n _DEF_UNTOKENIZE_EXC_MSG = (\"Unokenize stage failed because not all columns\"\n \" {} are present in input dataframe and are of\"\n \" dtype object.\")\n\n @staticmethod\n def _untokenize_list(token_list):\n return ' '.join(token_list)\n\n def __init__(self, columns, drop=True, **kwargs):\n self._columns = _interpret_columns_param(columns)\n col_str = _list_str(self._columns)\n super_kwargs = {\n 'columns': columns,\n 'value_map': UntokenizeWords._untokenize_list,\n 'drop': drop,\n 'suffix': '_untok',\n 'exmsg': UntokenizeWords._DEF_UNTOKENIZE_EXC_MSG.format(col_str),\n 'appmsg': \"Untokenizing {}\".format(col_str),\n 'desc': \"Untokenize {}\".format(col_str),\n }\n super_kwargs.update(**kwargs)\n super().__init__(**super_kwargs)\n\n def _prec(self, df):\n return super()._prec(df) and all(\n col_type == object for col_type in df.dtypes[self._columns])\n\n\nclass RemoveStopwords(MapColVals):\n \"\"\"A pipeline stage that removes stopwords from a tokenized list.\n\n Note: The nltk package must be installed for this pipeline stage to work.\n\n Parameters\n ----------\n langugae : str or array-like\n If a string is given, interpreted as the language of the stopwords, and\n should then be one of the languages supported by the NLTK Stopwords\n Corpus. If a list is given, it is assumed to be the list of stopwords\n to remove.\n columns : str or list-like\n Column names in the DataFrame from which to remove stopwords.\n drop : bool, default True\n If set to True, the source columns are dropped after stopword removal,\n and the resulting columns retain the names of the source columns.\n Otherwise, resulting columns gain the suffix '_nostop'.\n\n Example\n -------\n >> import pandas as pd; import pdpipe as pdp;\n >> data = [[3.2, ['kick', 'the', 'baby']]]\n >> df = pd.DataFrame(data, [1], ['freq', 'content'])\n >> remove_stopwords = pdp.RemoveStopwords('english', 'content')\n >> remove_stopwords(df)\n freq content\n 1 3.2 [kick, baby]\n \"\"\"\n\n _DEF_STOPWORDS_EXC_MSG = (\"RemoveStopwords stage failed because not all \"\n \"columns {} are present in input dataframe and \"\n \"are of dtype object.\")\n _DEF_STOPWORDS_APP_MSG = \"Removing stopwords from {}...\"\n\n class _StopwordsRemover(object):\n def __init__(self, stopwords_list):\n self.stopwords_list = stopwords_list\n\n def __call__(self, word_list):\n return [w for w in word_list if w not in self.stopwords_list]\n\n @staticmethod\n def __stopwords_by_language(language):\n try:\n from nltk.corpus import stopwords\n return stopwords.words(language)\n except LookupError: # pragma: no cover\n # try:\n # nltk.data.find('corpora/stopwords')\n # except LookupError: # pragma: no cover\n dpath = os.path.expanduser('~/nltk_data/corpora/stopwords')\n os.makedirs(dpath, exist_ok=True)\n nltk.download('stopwords')\n from nltk.corpus import stopwords\n return stopwords.words(language)\n\n def __init__(self, language, columns, drop=True, **kwargs):\n self._language = language\n if isinstance(language, str):\n self._stopwords_list = RemoveStopwords.__stopwords_by_language(\n language)\n elif isinstance(language, collections.Iterable):\n self._stopwords_list = list(language)\n else:\n raise TypeError(\"language parameter should be string or list!\")\n self._stopwords_remover = RemoveStopwords._StopwordsRemover(\n self._stopwords_list)\n self._columns = _interpret_columns_param(columns)\n col_str = _list_str(self._columns)\n super_kwargs = {\n 'columns': columns,\n 'value_map': self._stopwords_remover,\n 'drop': drop,\n 'suffix': '_nostop',\n 'exmsg': RemoveStopwords._DEF_STOPWORDS_EXC_MSG.format(col_str),\n 'appmsg': RemoveStopwords._DEF_STOPWORDS_APP_MSG.format(col_str),\n 'desc': \"Remove stopwords from {}\".format(col_str),\n }\n super_kwargs.update(**kwargs)\n super().__init__(**super_kwargs)\n\n def _prec(self, df):\n return super()._prec(df) and all(\n col_type == object for col_type in df.dtypes[self._columns])\n\n\nclass SnowballStem(MapColVals):\n \"\"\"A pipeline stage that stems words in a list using the Snowball stemmer.\n\n Note: The nltk package must be installed for this pipeline stage to work.\n\n Parameters\n ----------\n stemmer_name : str\n The name of the Snowball stemmer to use. Should be one of the Snowball\n stemmers implemented by nltk. E.g. 'EnglishStemmer'.\n columns : str or list-like\n Column names in the DataFrame to stem tokens in.\n drop : bool, default True\n If set to True, the source columns are dropped after stemming, and the\n resulting columns retain the names of the source columns. Otherwise,\n resulting columns gain the suffix '_stem'.\n\n Example\n -------\n >>> import pandas as pd; import pdpipe as pdp;\n >>> data = [[3.2, ['kicking', 'boats']]]\n >>> df = pd.DataFrame(data, [1], ['freq', 'content'])\n >>> remove_stopwords = pdp.SnowballStem('EnglishStemmer', 'content')\n >>> remove_stopwords(df)\n freq content\n 1 3.2 [kick, boat]\n \"\"\"\n\n _DEF_STEM_EXC_MSG = (\"SnowballStem stage failed because not all \"\n \"columns {} are present in input dataframe and \"\n \"are of dtype object.\")\n _DEF_STEM_APP_MSG = \"Stemming tokens in {}...\"\n\n class _TokenListStemmer(object):\n def __init__(self, stemmer):\n self.stemmer = stemmer\n\n def __call__(self, token_list):\n return [self.stemmer.stem(w) for w in token_list]\n\n @staticmethod\n def __stemmer_by_name(stemmer_name):\n snowball_module = importlib.import_module('nltk.stem.snowball')\n stemmer_cls = getattr(snowball_module, stemmer_name)\n return stemmer_cls()\n\n @staticmethod\n def __safe_stemmer_by_name(stemmer_name):\n try:\n return SnowballStem.__stemmer_by_name(stemmer_name)\n except LookupError: # pragma: no cover\n dpath = os.path.expanduser('~/nltk_data/stemmers')\n os.makedirs(dpath, exist_ok=True)\n nltk.download('snowball_data')\n return SnowballStem.__stemmer_by_name(stemmer_name)\n\n def __init__(self, stemmer_name, columns, drop=True, **kwargs):\n self.stemmer_name = stemmer_name\n self.stemmer = SnowballStem.__safe_stemmer_by_name(stemmer_name)\n self.list_stemmer = SnowballStem._TokenListStemmer(self.stemmer)\n self._columns = _interpret_columns_param(columns)\n col_str = _list_str(self._columns)\n super_kwargs = {\n 'columns': columns,\n 'value_map': self.list_stemmer,\n 'drop': drop,\n 'suffix': '_stem',\n 'exmsg': SnowballStem._DEF_STEM_EXC_MSG.format(col_str),\n 'appmsg': SnowballStem._DEF_STEM_APP_MSG.format(col_str),\n 'desc': \"Stem tokens in {}\".format(col_str),\n }\n super_kwargs.update(**kwargs)\n super().__init__(**super_kwargs)\n\n def _prec(self, df):\n return super()._prec(df) and all(\n col_type == object for col_type in df.dtypes[self._columns])\n\n\nclass DropRareTokens(PdPipelineStage):\n \"\"\"A pipeline stage that drop rare tokens from token lists.\n\n Note: The nltk package must be installed for this pipeline stage to work.\n\n Parameters\n ----------\n columns : str or list-like\n Column names in the DataFrame for which to drop rare words.\n threshold : int\n The rarity threshold to use. Only tokens appearing more than this\n number of times in a column will remain in token lists in that column.\n drop : bool, default True\n If set to True, the source columns are dropped after being transformed,\n and the resulting columns retain the names of the source columns.\n Otherwise, the new columns gain the suffix '_norare'.\n\n Example\n -------\n >>> import pandas as pd; import pdpipe as pdp;\n >>> data = [[7, ['a', 'a', 'b']], [3, ['b', 'c', 'd']]]\n >>> df = pd.DataFrame(data, columns=['num', 'chars'])\n >>> rare_dropper = pdp.DropRareTokens('chars', 1)\n >>> rare_dropper(df)\n num chars\n 0 7 [a, a, b]\n 1 3 [b]\n \"\"\"\n\n _DEF_RARE_EXC_MSG = (\"DropRareTokens stage failed because not all columns \"\n \"{} were found in input dataframe.\")\n\n def __init__(self, columns, threshold, drop=True, **kwargs):\n self._columns = _interpret_columns_param(columns)\n self._threshold = threshold\n self._drop = drop\n self._rare_removers = {}\n col_str = _list_str(self._columns)\n super_kwargs = {\n 'exmsg': DropRareTokens._DEF_RARE_EXC_MSG.format(col_str),\n 'appmsg': \"Dropping rare tokens from {}...\".format(col_str),\n 'desc': \"Drop rare tokens from {}\".format(col_str)\n }\n super_kwargs.update(**kwargs)\n super().__init__(**super_kwargs)\n\n def _prec(self, df):\n return set(self._columns).issubset(df.columns)\n\n class _RareRemover(object):\n def __init__(self, rare_words):\n self.rare_words = rare_words\n\n def __call__(self, tokens):\n return [w for w in tokens if w not in self.rare_words]\n\n @staticmethod\n def __get_rare_remover(series, threshold):\n token_list = [item for sublist in series for item in sublist]\n freq_dist = nltk.FreqDist(token_list)\n freq_series = pd.DataFrame.from_dict(freq_dist, orient='index')[0]\n rare_words = freq_series[freq_series <= threshold]\n return DropRareTokens._RareRemover(rare_words)\n\n def _fit_transform(self, df, verbose):\n inter_df = df\n for colname in self._columns:\n source_col = df[colname]\n loc = df.columns.get_loc(colname) + 1\n new_name = colname + \"_norare\"\n if self._drop:\n inter_df = inter_df.drop(colname, axis=1)\n new_name = colname\n loc -= 1\n rare_remover = DropRareTokens.__get_rare_remover(\n source_col, self._threshold)\n self._rare_removers[colname] = rare_remover\n inter_df = out_of_place_col_insert(\n df=inter_df,\n series=source_col.map(rare_remover),\n loc=loc,\n column_name=new_name)\n self.is_fitted = True\n return inter_df\n\n def _transform(self, df, verbose):\n inter_df = df\n for colname in self._columns:\n source_col = df[colname]\n loc = df.columns.get_loc(colname) + 1\n new_name = colname + \"_norare\"\n if self._drop:\n inter_df = inter_df.drop(colname, axis=1)\n new_name = colname\n loc -= 1\n rare_remover = self._rare_removers[colname]\n inter_df = out_of_place_col_insert(\n df=inter_df,\n series=source_col.map(rare_remover),\n loc=loc,\n column_name=new_name)\n return inter_df\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] |
orris27/nus_cs5242_project_group27 | [
"2aa5722e8d9d722ff1a3a37f36cb35c1ece481f8"
] | [
"models/EncoderRNN.py"
] | [
"import torch.nn as nn\r\n\r\n\r\nclass EncoderRNN(nn.Module):\r\n def __init__(self, dim_vid, dim_hidden, input_dropout_p=0.2, rnn_dropout_p=0.5,\r\n n_layers=1, bidirectional=False, rnn_cell='gru'):\r\n \"\"\"\r\n\r\n Args:\r\n hidden_dim (int): dim of hidden state of rnn\r\n input_dropout_p (int): dropout probability for the input sequence\r\n dropout_p (float): dropout probability for the output sequence\r\n n_layers (int): number of rnn layers\r\n rnn_cell (str): type of RNN cell ('LSTM'/'GRU')\r\n \"\"\"\r\n super(EncoderRNN, self).__init__()\r\n self.dim_vid = dim_vid\r\n self.dim_hidden = dim_hidden\r\n self.input_dropout_p = input_dropout_p\r\n self.rnn_dropout_p = rnn_dropout_p\r\n self.n_layers = n_layers\r\n self.bidirectional = bidirectional\r\n self.rnn_cell = rnn_cell\r\n\r\n self.vid2hid = nn.Linear(dim_vid, dim_hidden)\r\n self.input_dropout = nn.Dropout(input_dropout_p)\r\n\r\n if rnn_cell.lower() == 'lstm':\r\n self.rnn_cell = nn.LSTM\r\n elif rnn_cell.lower() == 'gru':\r\n self.rnn_cell = nn.GRU\r\n\r\n self.rnn = self.rnn_cell(dim_hidden, dim_hidden, n_layers, batch_first=True,\r\n bidirectional=bool(bidirectional), dropout=self.rnn_dropout_p)\r\n\r\n self._init_hidden()\r\n\r\n def _init_hidden(self):\r\n nn.init.xavier_normal_(self.vid2hid.weight)\r\n\r\n def forward(self, vid_feats):\r\n \"\"\"\r\n Applies a multi-layer RNN to an input sequence.\r\n Args:\r\n input_var (batch, seq_len): tensor containing the features of the input sequence.\r\n input_lengths (list of int, optional): A list that contains the lengths of sequences\r\n in the mini-batch\r\n Returns: output, hidden\r\n - **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence\r\n - **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the features in the hidden state h\r\n \"\"\"\r\n batch_size, seq_len, dim_vid = vid_feats.size()\r\n vid_feats = self.vid2hid(vid_feats.view(-1, dim_vid))\r\n vid_feats = self.input_dropout(vid_feats)\r\n vid_feats = vid_feats.view(batch_size, seq_len, self.dim_hidden)\r\n self.rnn.flatten_parameters()\r\n output, hidden = self.rnn(vid_feats) # (batch_size, 30, 512)\r\n return output, hidden\r\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.xavier_normal_"
]
] |
Mawiszus/World-GAN | [
"0ad21849e284e18c44e7ffede0eefb764e0ff4bb"
] | [
"minecraft/block2vec/block2vec_dataset.py"
] | [
"import os\nfrom collections import defaultdict\nfrom itertools import product\nfrom typing import Tuple\n\nfrom loguru import logger\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom PyAnvilEditor.pyanvil import World\nfrom torch.utils.data.dataset import Dataset\n\n\nclass Block2VecDataset(Dataset):\n\n def __init__(self, input_world_path: str, coords: Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]], cutout_coords: bool, neighbor_radius: int = 1):\n \"\"\"Block dataset with configurable neighborhood radius.\n\n Args:\n input_world_path (str): path to the Minecraft world\n coords (Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]]): x, y, z coordinates of extracted region\n neighbor_radius (int): neighbors to retrieve as a context\n \"\"\"\n super().__init__()\n self.input_world_path = input_world_path\n self.world = World(os.path.basename(input_world_path),\n save_location=os.path.abspath(os.path.dirname(input_world_path)), write=False, debug=False)\n self.x_lims, self.y_lims, self.z_lims = coords if cutout_coords else self._read_size()\n padding = 2 * neighbor_radius # one token on each side\n self.x_dim = self.x_lims[1] - self.x_lims[0] - padding\n self.y_dim = self.y_lims[1] - self.y_lims[0] - padding\n self.z_dim = self.z_lims[1] - self.z_lims[0] - padding\n logger.info(\"Cutting {} x {} x {} volume from {}\", self.x_dim,\n self.y_dim, self.z_dim, self.input_world_path)\n self.neighbor_radius = neighbor_radius\n self._read_blocks()\n self._init_discards()\n\n def _init_discards(self):\n t = 0.001\n token_frequencies = list(self.block_frequency.values())\n f = np.array(token_frequencies) / sum(token_frequencies)\n self.discards = 1.0 - (np.sqrt(f / t) + 1) * (t / f)\n\n def _read_size(self):\n regions = os.listdir(self.world.world_folder / 'region')\n arr_regions = np.zeros((len(regions), 2))\n for i, r in enumerate(regions):\n name = r.split(\".\")\n rx = int(name[1])\n rz = int(name[2])\n arr_regions[i] = rx, rz\n igno_border = 0\n x_lims = [int((min(arr_regions[:, 0]) * 32 * 16) + igno_border),\n int((max(arr_regions[:, 0]) * 32 * 16) - igno_border)]\n z_lims = [int((min(arr_regions[:, 1]) * 32 * 16) + igno_border),\n int((max(arr_regions[:, 1]) * 32 * 16) - igno_border)]\n y_lims = [0, 256]\n return x_lims, y_lims, z_lims\n\n def _read_blocks(self):\n self.block_frequency = defaultdict(int)\n coordinates = [(x, y, z) for x, y, z in product(range(self.x_lims[0], self.x_lims[1] + 1),\n range(self.y_lims[0], self.y_lims[1] + 1), range(self.z_lims[0], self.z_lims[1] + 1))]\n logger.info(\"Collecting {} blocks\", len(self))\n for name in tqdm([self._get_block(*coord) for coord in coordinates]):\n self.block_frequency[name] += 1\n logger.info(\n \"Found the following blocks {blocks}\", blocks=dict(self.block_frequency))\n self.block2idx = dict()\n self.idx2block = dict()\n for name, count in self.block_frequency.items():\n block_idx = len(self.block2idx)\n self.block2idx[name] = block_idx\n self.idx2block[block_idx] = name\n\n def __getitem__(self, index):\n coords = self._idx_to_coords(index)\n block = self._get_block(*coords)\n target = self.block2idx[block]\n if np.random.rand() < self.discards[target]:\n return self.__getitem__(np.random.randint(self.__len__()))\n neighbor_blocks = self._get_neighbors(*coords)\n context = np.array([self.block2idx[n] for n in neighbor_blocks])\n return target, context\n\n def _idx_to_coords(self, index):\n z = index % (self.z_dim + 1)\n y = int(((index - z) / (self.z_dim + 1)) % (self.y_dim + 1))\n x = int(((index - z) / (self.z_dim + 1) - y) / (self.y_dim + 1))\n x += self.x_lims[0] + self.neighbor_radius\n y += self.y_lims[0] + self.neighbor_radius\n z += self.z_lims[0] + self.neighbor_radius\n return x, y, z\n\n def _get_block(self, x, y, z):\n block = self.world.get_block([x, y, z])\n name = block.get_state().name\n return name\n\n def _get_neighbors(self, x, y, z):\n neighbor_coords = [(x + x_diff, y + y_diff, z + z_diff) for x_diff, y_diff, z_diff in product(list(\n range(-self.neighbor_radius, self.neighbor_radius + 1)), repeat=3) if x_diff != 0 or y_diff != 0 or z_diff != 0]\n return [self._get_block(*coord) for coord in neighbor_coords]\n\n def __len__(self):\n return self.x_dim * self.y_dim * self.z_dim\n"
] | [
[
"numpy.array",
"numpy.random.rand",
"numpy.sqrt"
]
] |
BCV-Uniandes/SMIT | [
"c1084aa5040ac18a48db7679e050c4ce577b8535"
] | [
"models/spectral.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import Parameter\n\n\ndef l2normalize(v, eps=1e-12):\n return v / (v.norm() + eps)\n\n\nclass SpectralNorm(nn.Module):\n def __init__(self, module, name='weight', power_iterations=1):\n super(SpectralNorm, self).__init__()\n self.module = module\n self.name = name\n self.power_iterations = power_iterations\n if not self._made_params():\n self._make_params()\n\n def _update_u_v(self):\n u = getattr(self.module, self.name + \"_u\")\n v = getattr(self.module, self.name + \"_v\")\n w = getattr(self.module, self.name + \"_bar\")\n\n height = w.data.shape[0]\n for _ in range(self.power_iterations):\n v.data = l2normalize(\n torch.mv(torch.t(w.view(height, -1).data), u.data))\n u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))\n\n sigma = u.dot(w.view(height, -1).mv(v))\n setattr(self.module, self.name, w / sigma.expand_as(w))\n\n def _made_params(self):\n try:\n getattr(self.module, self.name + \"_u\")\n getattr(self.module, self.name + \"_v\")\n getattr(self.module, self.name + \"_bar\")\n return True\n except AttributeError:\n return False\n\n def _make_params(self):\n w = getattr(self.module, self.name)\n\n height = w.data.shape[0]\n width = w.view(height, -1).data.shape[1]\n\n u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)\n v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)\n u.data = l2normalize(u.data)\n v.data = l2normalize(v.data)\n w_bar = Parameter(w.data)\n\n del self.module._parameters[self.name]\n\n self.module.register_parameter(self.name + \"_u\", u)\n self.module.register_parameter(self.name + \"_v\", v)\n self.module.register_parameter(self.name + \"_bar\", w_bar)\n\n def forward(self, *args):\n self._update_u_v()\n return self.module.forward(*args)\n"
] | [
[
"torch.nn.Parameter"
]
] |
dmw51/reactiondataextractor | [
"f7d2ee9a2a7df17ffcf9b33efee2bcb49dfdcbae"
] | [
"reactiondataextractor/extractors/diagrams.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nDiagrams\n=======\n\nThis module contains a single diagram extraction class.\n\nauthor: Damian Wilary\nemail: [email protected]\n\n\"\"\"\nimport copy\nimport logging\nfrom matplotlib.patches import Rectangle\nimport numpy as np\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.preprocessing import MinMaxScaler\nfrom skimage.transform import probabilistic_hough_line\n\n\nfrom ..models import BaseExtractor, Rect, Figure, FigureRoleEnum, Panel\nfrom ..utils.processing import dilate_fragments, erase_elements, isolate_patches, skeletonize_area_ratio, skeletonize\nfrom .. import settings\n\nlog = logging.getLogger('extract.diagrams')\n\n\nclass DiagramExtractor(BaseExtractor):\n \"\"\"Main class for extracting diagrams from chemical reaction schemes\n :param fig: main figure\n :type fig: Figure\n :param arrows: all arrows in the reaction scheme\n :type arrows: list[SolidArrow]\"\"\"\n def __init__(self, fig=None, arrows=None):\n self.fig = fig if fig is not None else settings.main_figure[0]\n self.arrows = arrows if arrows is not None else []\n self._extracted = None\n self.backbones = None\n\n @property\n def extracted(self):\n \"\"\"Returns extracted objects\"\"\"\n return self._extracted\n\n def extract(self):\n \"\"\"Main extraction method\"\"\"\n self.backbones = self.detect_backbones()\n self.fig.kernel_sizes = self._find_optimal_dilation_ksize()\n self._extracted = self.complete_structures()\n return self.extracted\n\n def plot_extracted(self, ax):\n \"\"\"Adds extracted panels onto a canvas of ``ax``\"\"\"\n if not self.extracted:\n pass\n else:\n for panel in self.extracted:\n rect_bbox = Rectangle((panel.left, panel.top), panel.right - panel.left, panel.bottom - panel.top,\n facecolor=(52/255, 0, 103/255), edgecolor=(6/255, 0, 99/255), alpha=0.4)\n ax.add_patch(rect_bbox)\n\n def detect_backbones(self):\n \"\"\"\n Detects carbon backbones based on features such as size, aspect ratio and number of detected single bonds.\n\n Based on an estimated single bond length, each connected component is analysed to find the number of bonds.\n Area and aspect ratio are also used as features. Finally, a DBSCAN is performed on the formed, normalised\n dataset.\n :return: connected components classified as structural backbones\n :rtype: list\n \"\"\"\n fig = self.fig\n ccs = fig.connected_components\n ccs = sorted(ccs, key=lambda panel: panel.area, reverse=True)\n cc_lines = []\n for cc in ccs:\n isolated_cc_fig = isolate_patches(fig, [cc])\n isolated_cc_fig = skeletonize(isolated_cc_fig)\n\n num_lines = len(probabilistic_hough_line(isolated_cc_fig.img,\n line_length=fig.single_bond_length, threshold=10, line_gap=0))\n cc_lines.append(num_lines)\n\n cc_lines = np.array(cc_lines).reshape(-1, 1)\n area = np.array([cc.area for cc in ccs]).reshape(-1, 1)\n aspect_ratio = np.array([cc.aspect_ratio for cc in ccs]).reshape(-1, 1)\n mean_area = np.mean(area)\n\n data = np.hstack((cc_lines, area, aspect_ratio))\n data = MinMaxScaler().fit_transform(data)\n\n labels = DBSCAN(eps=0.15, min_samples=20).fit_predict(data)\n\n paired = list(zip(ccs, labels))\n paired = [(cc, label) if cc.area > mean_area else (cc, 0) for cc, label in paired]\n\n backbones = [panel for panel, label in paired if label == -1]\n backbones = set(backbones)\n arrows = set([cc for cc in fig.connected_components if cc.role == FigureRoleEnum.ARROW])\n backbones = list(backbones.difference(arrows))\n\n [setattr(backbone, 'role', FigureRoleEnum.STRUCTUREBACKBONE) for backbone in backbones]\n\n return backbones\n\n def complete_structures(self):\n \"\"\"\n Dilates a figure and uses backbones to find complete chemical structures (backbones + superatoms etc.).\n\n Arrows are first removed to increase accuracy of the process. Figure is dilates around each backbone according\n to density of features around it. The diagrams are derived from the dilated backbones. Roles are assigned\n to the disconnected diagram parts.\n :return:bounding boxes of chemical structures\n :rtype: list\n \"\"\"\n fig = self.fig\n fig_no_arrows = erase_elements(fig, self.arrows)\n dilated_structure_panels, other_ccs = self.find_dilated_structures(fig_no_arrows)\n structure_panels = self._complete_structures(dilated_structure_panels)\n self._assign_backbone_auxiliaries(structure_panels, other_ccs) # Assigns cc roles\n temp = copy.deepcopy(structure_panels)\n # simple filtering to account for multiple backbone parts (disconnected by heteroatom characters)\n # corresponding to the same diagram\n for panel1 in temp:\n for panel2 in temp:\n if panel2.contains(panel1) and panel2 != panel1:\n try:\n structure_panels.remove(panel1)\n except ValueError:\n pass\n\n return list(set(structure_panels))\n\n def find_dilated_structures(self, fig=None):\n \"\"\"\n Finds dilated structures by first dilating the image several times using backbone-specific kernel size.\n\n For each backbone, the figure is dilated using a backbone-specific kernel size. Dilated structure panel is then\n found based on comparison with the original backbone. A crop is made for each structure. If there is more than\n one connected component that is fully contained within the crop, it is noted and this information used later\n when the small disconnected ccs are assigned roles (This additional connected component is likely a label).\n :param Figure fig: Analysed figure\n :return: (dilated_structure_panels, other_ccs) pair of collections containing the dilated panels and\n separate ccs present within these dilated panels\n :rtype: tuple of lists\n \"\"\"\n if fig is None:\n fig = self.fig\n dilated_structure_panels = []\n other_ccs = []\n dilated_imgs = {}\n\n for backbone in self.backbones:\n ksize = fig.kernel_sizes[backbone]\n try:\n dilated_temp = dilated_imgs[ksize]\n except KeyError:\n dilated_temp = dilate_fragments(fig, ksize)\n dilated_imgs[ksize] = dilated_temp\n\n dilated_structure_panel = [cc for cc in dilated_temp.connected_components if cc.contains(backbone)][0]\n # Crop around with a small extension to get the connected component correctly\n structure_crop = dilated_structure_panel.create_extended_crop(dilated_temp, extension=5)\n other = [structure_crop.in_main_fig(c) for c in structure_crop.connected_components if\n structure_crop.in_main_fig(c) != dilated_structure_panel]\n other_ccs.extend(other)\n dilated_structure_panels.append(dilated_structure_panel)\n\n return dilated_structure_panels, other_ccs\n\n def _assign_backbone_auxiliaries(self, structure_panels, cno_ccs):\n \"\"\"\n Assigns roles to small disconnected diagram parts.\n\n Takes in the detected structures panels and ccs that are contained inside structure panels but are\n non-overlapping (``cno_ccs``) - including in the dilated figure. Assigns roles to all (small) connected\n components contained within structure panels, and finally resets role for the special ``cno_ccs``. These are\n likely to be labels lying very close to the diagrams themselves.\n :param [Panel,...] structure_panels: iterable of found structure panels\n :param [Panel,...] cno_ccs: contained-non-overlapping cc;ccs that are not parts of diagrams even though\n their panels are situated fully inside panels of chemical diagrams (common with labels).\n :return: None (mutates ''role'' attribute of each relevant connected component)\n \"\"\"\n fig = self.fig\n\n for parent_panel in structure_panels:\n for cc in fig.connected_components:\n if parent_panel.contains(cc): # Set the parent panel for all\n setattr(cc, 'parent_panel', parent_panel)\n if cc.role != FigureRoleEnum.STRUCTUREBACKBONE:\n # Set role for all except backbone which had been set\n setattr(cc, 'role', FigureRoleEnum.STRUCTUREAUXILIARY)\n\n for cc in cno_ccs:\n # ``cno_ccs`` are dilated - find raw ccs in ``fig``\n fig_ccs = [fig_cc for fig_cc in fig.connected_components if cc.contains(fig_cc)]\n\n [setattr(fig_cc, 'role', None) for fig_cc in fig_ccs]\n\n log.debug('Roles of structure auxiliaries have been assigned.')\n\n def _complete_structures(self, dilated_structure_panels):\n \"\"\"Uses ``dilated_structure_panels`` to find all constituent ccs of each chemical structure.\n\n Finds connected components belonging to a chemical structure and creates a large panel out of them. This\n effectively normalises panel sizes to be independent of chosen dilation kernel sizes.\n :return [Panel,...]: iterable of Panels bounding complete chemical structures.\n \"\"\"\n\n structure_panels = []\n for dilated_structure in dilated_structure_panels:\n constituent_ccs = [cc for cc in self.fig.connected_components if dilated_structure.contains(cc)]\n parent_structure_panel = Panel.create_megarect(constituent_ccs)\n structure_panels.append(parent_structure_panel)\n return structure_panels\n\n def _find_optimal_dilation_ksize(self):\n \"\"\"\n Use structural backbones to calculate local skeletonised-pixel ratio and find optimal dilation kernel sizes for\n structural segmentation. Each backbone is assigned its own dilation kernel to account for varying skel-pixel\n ratio around different backbones\n :return: kernel sizes appropriate for each backbone\n :rtype: dict\n \"\"\"\n\n backbones = [cc for cc in self.fig.connected_components if cc.role == FigureRoleEnum.STRUCTUREBACKBONE]\n\n kernel_sizes = {}\n for backbone in backbones:\n left, right, top, bottom = backbone\n horz_ext, vert_ext = backbone.width // 2, backbone.height // 2\n crop_rect = Rect(left - horz_ext, right + horz_ext, top - vert_ext, bottom + vert_ext)\n p_ratio = skeletonize_area_ratio(self.fig, crop_rect)\n log.debug(f'found in-crop skel_pixel ratio: {p_ratio}')\n\n if p_ratio >= 0.02:\n kernel_size = 4\n elif 0.01 < p_ratio < 0.02:\n kernel_size = np.ceil(20 - 800 * p_ratio)\n else:\n kernel_size = 12\n kernel_sizes[backbone] = kernel_size\n\n log.debug(f'Structure segmentation kernels:{kernel_sizes.values()}')\n return kernel_sizes\n"
] | [
[
"numpy.array",
"numpy.ceil",
"numpy.mean",
"sklearn.cluster.DBSCAN",
"sklearn.preprocessing.MinMaxScaler",
"numpy.hstack",
"matplotlib.patches.Rectangle"
]
] |
yuxiang-zhou/DenseDeformableModel | [
"382d9cc2ccee629c64ec873110c3653bcc3a30fe"
] | [
"dAAMs/gridview.py"
] | [
"from menpo.shape import PointCloud, TriMesh\n\nimport numpy as np\n\n\ndef grid_triangulation(shape):\n height, width = shape\n row_to_index = lambda x: x * width\n top_triangles = lambda x: np.concatenate([\n np.arange(row_to_index(x), row_to_index(x) + width - 1)[..., None],\n np.arange(row_to_index(x) + 1, row_to_index(x) + width)[..., None],\n np.arange(row_to_index(x + 1),\n row_to_index(x + 1) + width - 1)[..., None]], axis=1)\n \n # Half edges are opposite directions\n bottom_triangles = lambda x: np.concatenate([\n np.arange(row_to_index(x + 1),\n row_to_index(x + 1) + width - 1)[..., None],\n np.arange(row_to_index(x) + 1, row_to_index(x) + width)[..., None],\n np.arange(row_to_index(x + 1) + 1,\n row_to_index(x + 1) + width)[..., None]], axis=1)\n \n trilist = []\n for k in xrange(height - 1):\n trilist.append(top_triangles(k))\n trilist.append(bottom_triangles(k))\n \n return np.concatenate(trilist)\n\n\ndef zero_flow_grid_pcloud(shape, triangulated=False, mask=None, grid_size=1):\n point_grid = np.meshgrid(range(0, shape[0], grid_size),\n range(0, shape[1], grid_size), indexing='ij')\n point_grid_vec = np.vstack([p.ravel() for p in point_grid]).T\n # point_grid_im = point_grid_vec.reshape(shape + (2,))\n\n if triangulated:\n trilist = grid_triangulation(shape)\n pcloud = TriMesh(point_grid_vec, trilist=trilist)\n else:\n pcloud = PointCloud(point_grid_vec)\n \n if mask is not None:\n return pcloud.from_mask(mask.pixels.ravel())\n else:\n return pcloud"
] | [
[
"numpy.concatenate"
]
] |
ans2human/Github-Scraper | [
"4554f76bd70c3ed02db7893cafee3143aeb08d8b"
] | [
"github.py"
] | [
"import time\nimport base64\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n\n\nurl = \"https://github.com/login\"\ndriver = webdriver.Chrome(executable_path= 'C:/Users/reckonsys/anshuman-work/product-analysis/productanalysis/chromedriver.exe')\ndriver.get(url)\n\ntry:\n try:\n emailid=driver.find_element_by_id(\"login_field\")\n emailid.send_keys(\"ans2human\")\n passw=driver.find_element_by_id(\"password\")\n pwd = base64.b64decode('your encoded password').decode(\"utf-8\")\n passw.send_keys(pwd)\n signin=driver.find_element_by_xpath('//*[@id=\"login\"]/form/div[3]/input[4]')\n signin.click()\n except driver.find_element_by_xpath('//*[@id=\"js-flash-container\"]/div').text:\n print(\"Couldn't login!! Check the credentials\")\n driver.close()\n\n dropdwn = driver.find_element_by_xpath('/html/body/div[1]/header/div[8]/details/summary/img')\n dropdwn.click()\n profile = driver.find_element_by_xpath('/html/body/div[1]/header/div[8]/details/details-menu/a[1]')\n profile.click()\n time.sleep(5)\n\n\n uname = driver.find_element_by_xpath('//h1[@class=\"vcard-names\"]/span').text\n repos = driver.find_element_by_xpath('//*[@id=\"js-pjax-container\"]/div/div[3]/div[2]/nav/a[2]/span').text\n usrnme = driver.find_element_by_xpath('//*[@id=\"js-pjax-container\"]/div/div[1]/div[2]/div[2]/div[2]/h1/span[2]').text\n position = driver.find_element_by_xpath('//*[@id=\"js-pjax-container\"]/div/div[1]/div[2]/div[2]/div[5]/div/div[2]/div').text\n company = driver.find_element_by_xpath('//*[@id=\"js-pjax-container\"]/div/div[1]/div[2]/div[2]/div[5]/div/ul/li[1]/span').text\n city = driver.find_element_by_xpath('//*[@id=\"js-pjax-container\"]/div/div[1]/div[2]/div[2]/div[5]/div/ul/li[2]/span').text\n \n\n datal = list(zip([usrnme], [uname], [repos], [position], [company], [city]))\n dt = pd.DataFrame(datal, columns = ['Username', 'Name', 'Repositories', 'Position', 'Company', 'City'])\n print(dt)\n driver.close()\n\nexcept NoSuchElementException:\n print(\"Couldn't login!! Check the credential\")\n driver.close()\n\n"
] | [
[
"pandas.DataFrame"
]
] |
usnistgov/perm_hmm | [
"b57b3cca51d0d91bde438a62f26c0b0123c26aa5"
] | [
"perm_hmm/policies/ignore_transitions.py"
] | [
"\"\"\"For the special case of two states and two outcomes, computes the optimal\npermutations for the related HMM that has transition matrix equal to the\nidentity matrix.\n\nBecause there are only two states, we adopt the convention that the two states\nare called the ``dark`` and ``bright`` states. The ``dark`` state is the one\nsuch that the outcome is more likely to be ``0``, and the ``bright`` state is\nthe one such that the outcome is more likely to be ``1``.\n\nThis module uses the :py:mod:`~adapt_hypo_test` package to compute the optimal\npermutations.\n\"\"\"\nimport torch\nfrom perm_hmm.policies.policy import PermPolicy\nfrom adapt_hypo_test.two_states import no_transitions as nt\n\n\nclass IgnoreTransitions(PermPolicy):\n r\"\"\"Ignoring the transition matrix, computes the optimal permutations for\n the HMM for all possible outcomes.\n\n This method of computing permutations has complexity O(t**2), where t is the\n number of steps.\n\n In addition to the attributes of the base class, instances of this class\n have the following attributes:\n\n ``p``:\n A float, the probability of the dark state giving outcome 1.\n\n ``q``:\n A float, the probability of the bright state giving outcome 0.\n\n ``dtb``:\n The permutation that takes the dark state to the bright state.\n\n ``id``:\n The identity permutation.\n\n ``x``:\n A representation of the log odds of the belief state that we compute\n the permutations at. See the :py:mod:`~adapt_hypo_test` module for more\n details.\n\n ``sigmas``:\n A list indicating whether to apply to nontrivial permutation when\n reaching a particular log odds.\n \"\"\"\n\n def __init__(self, possible_perms, p, q, dark_state, bright_state, save_history=False):\n r\"\"\"Initialization.\n\n This class computes the optimal permutations for the case that the\n transition matrix is trivial, and that there is one bright state and\n one dark state. The \"true\" model may have more states, and a nontrivial\n transition matrix. To make the identification between the two models,\n we need to know which state is to be interpreted as the dark state\n and which as the bright state. The possible perms of the true model are\n needed to identify which corresponds to the dark-bright swap.\n\n :param possible_perms: Possible permutations of the true model.\n :param dark_state: Which state of the true model corresponds to the\n dark state.\n :param bright_state: Similar for bright state.\n :raises ValueError: If the identity or the swap permutations are not\n included as possible permutations.\n \"\"\"\n super().__init__(possible_perms, save_history=save_history)\n self.p = p\n self.q = q\n num_states = possible_perms.shape[-1]\n dtb = torch.nonzero(possible_perms[:, dark_state] == bright_state, as_tuple=False)\n if len(dtb) == 0:\n raise ValueError(\"Need to be able to take dark to bright\")\n self.dtb = possible_perms[dtb[0].item()]\n identity = torch.nonzero(torch.all(possible_perms == torch.arange(num_states), dim=-1), as_tuple=False)\n if len(identity) == 0:\n raise ValueError(\"The identity must be an allowed permutation\")\n self.id = possible_perms[identity[0].item()]\n self.x = None\n self.sigmas = None\n self.step = 0\n\n def reset(self, save_history=False, reset_sigmas=False):\n super().reset(save_history=save_history)\n self.x = None\n self.step = 0\n if reset_sigmas:\n self.sigmas = None\n\n def solve(self, n):\n r\"\"\"Needs to be called before ``calculate_perm``.\n\n Solves for the ideal permutations in the model where we ignore\n transitions. Calls\n :py:func:`~adapt_hypo_test.two_states.no_transitions.solve` to do so.\n\n :param n: The number of steps to compute for.\n :return: The expanded value function :math:`\\chi`. See\n :py:mod:`~adapt_hypo_test` for more details.\n \"\"\"\n self.sigmas, chi = nt.solve(self.p, self.q, n)\n return chi\n\n def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):\n if self.sigmas is None:\n raise RuntimeError(\"Call .solve first with a total number of steps.\")\n if self.x is None:\n self.x = torch.zeros(data.shape + (2,), dtype=int)\n self.x[~data.int().bool(), 0] -= 1\n self.x[data.int().bool(), 1] += 1\n self.step += 1\n if self.step == len(self.sigmas):\n return self.id.expand(data.shape + self.id.shape).clone().detach(), {\"x\": self.x.clone().detach()}\n else:\n self.x, p = nt.evaluate_sigma(self.sigmas[self.step], self.x.numpy())\n self.x = torch.from_numpy(self.x)\n perm = self.id.expand(data.shape + self.id.shape).clone().detach()\n perm[p] = self.dtb\n return perm, {\"x\": self.x.clone().detach()}\n"
] | [
[
"torch.zeros",
"torch.nonzero",
"torch.arange",
"torch.from_numpy"
]
] |
debcaldarola/FedAvg_pytorch | [
"ed4180020f7c0fd2060cd55a4daff79f91634bed"
] | [
"models/cifar10/cnn.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nIMAGE_SIZE = 32\nIMAGES_DIR = os.path.join('..', 'data', 'cifar10', 'data', 'raw', 'img')\n\ntransform_train = transforms.Compose([\n # transforms.RandomCrop(IMAGE_SIZE, padding=4),\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n# Normalize the test set same as training set without augmentation\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\nclass ClientModel(nn.Module):\n def __init__(self, lr, num_classes, device):\n super(ClientModel, self).__init__()\n self.num_classes = num_classes\n self.device = device\n self.lr = lr\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n\n self.classifier = nn.Sequential(\n nn.Linear(64*5*5, 384),\n nn.ReLU(),\n nn.Linear(384, 192),\n nn.ReLU(),\n nn.Linear(192, self.num_classes)\n )\n\n self.size = self.model_size()\n\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = torch.reshape(x, (x.shape[0], -1))\n x = self.classifier(x)\n return x\n\n def process_x(self, raw_x_batch):\n x_batch = [self._load_image(i) for i in raw_x_batch]\n x_batch = np.array(x_batch)\n return x_batch\n\n def process_y(self, raw_y_batch):\n return np.array(raw_y_batch)\n\n def _load_image(self, img_name):\n if 'test' in img_name:\n name = img_name.split('/')\n img = Image.open(os.path.join(IMAGES_DIR, 'test', name[-1]))\n else:\n img = Image.open(os.path.join(IMAGES_DIR, 'train', img_name))\n if self.training:\n img = transform_train(img)\n else:\n img = transform_test(img)\n img = img.cpu().detach().numpy()\n return img\n\n def model_size(self):\n tot_size = 0\n for param in self.parameters():\n tot_size += param.size()[0]\n return tot_size"
] | [
[
"torch.nn.Linear",
"numpy.array",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.reshape"
]
] |
Tariod/security | [
"2b544fdaa27760bd80879e3106cfb39dc986c73f"
] | [
"lab_01/part_04/part_04.py"
] | [
"import math\nfrom functools import partial\n\nimport pandas as pd\nimport random\nimport string\n\nfrom calculate_frequency import calculate_frequency_norm\nfrom ngrams import ngrams\nfrom substitution_cipher import substitution_cipher\n\n# ================ ALPHABET CONF ================\nALPHABET = string.ascii_uppercase\nNGRAMS_SIZE = 3 # NGRAMS_SIZE > 1\nNGRAMS_STAT = {string.ascii_uppercase: [None, '../ngrams-frequency/trigrams_frequency.csv']}\n# =================== GEN CONF ==================\nPOPULATION_SIZE = 200\nNUMBER_OF_GENERATIONS = 600\nCROSSOVER_COEFFICIENT = 0.6\nMUTATION_PROBABILITY = 0.3\n\nencoded = 'EFFPQLEKVTVPCPYFLMVHQLUEWCNVWFYGHYTCETHQEKLPVMSAKSPVPAPVYWMVHQLUSPQLYWLASLFVWPQLMVHQLUPLRPSQLULQESPBLWPCS' \\\n 'VRVWFLHLWFLWPUEWFYOTCMQYSLWOYWYETHQEKLPVMSAKSPVPAPVYWHEPPLUWSGYULEMQTLPPLUGUYOLWDTVSQETHQEKLPVPVSMTLEUPQE' \\\n 'PCYAMEWWYTYWDLUULTCYWPQLSEOLSVOHTLUYAPVWLYGDALSSVWDPQLNLCKCLRQEASPVILSLEUMQBQVMQCYAHUYKEKTCASLFPYFLMVHQLU' \\\n 'PQLHULIVYASHEUEDUEHQBVTTPQLVWFLRYGMYVWMVFLWMLSPVTTBYUNESESADDLSPVYWCYAMEWPUCPYFVIVFLPQLOLSSEDLVWHEUPSKCPQ' \\\n 'LWAOKLUYGMQEUEMPLUSVWENLCEWFEHHTCGULXALWMCEWETCSVSPYLEMQYGPQLOMEWCYAGVWFEBECPYASLQVDQLUYUFLUGULXALWMCSPEP' \\\n 'VSPVMSBVPQPQVSPCHLYGMVHQLUPQLWLRPOEDVMETBYUFBVTTPENLPYPQLWLRPTEKLWZYCKVPTCSTESQPQULLGYAUMEHVPETFWMEHVPETB' \\\n 'ZMEHVPETB'\n\n\ndef parse_ngram_stats(alphabet, ngrams_size):\n stat_filename = NGRAMS_STAT[alphabet][ngrams_size - 2]\n stats = pd.read_csv(stat_filename)\n stats['ngram'] = stats['ngram'].map(lambda ngram: tuple([s for s in ngram]))\n stats['frequency'] = stats['frequency'].map(lambda fr: math.log2(fr))\n stats = stats.set_index('ngram')\n return stats['frequency'].to_dict()\n\n\ndef substitution_score(msg, alphabet, ngram_size):\n eng_frequency = parse_ngram_stats(alphabet, ngram_size)\n\n def substitution_score_bind(substitution):\n decoded_msg = substitution_cipher(msg, [dict(zip(substitution['alphabet'], alphabet))])\n decoded_msg = ngrams(decoded_msg, ngram_size)\n ngrams_frequency = calculate_frequency_norm(decoded_msg)\n\n for ngram in ngrams_frequency:\n if substitution['score'] is None:\n substitution['score'] = 0\n substitution['score'] += eng_frequency.get(ngram, 0) * ngrams_frequency[ngram]\n\n return substitution\n\n return substitution_score_bind\n\n\ndef init_individual(gens):\n return random.sample(gens, len(gens))\n\n\ndef init_population(gens, size):\n return [{'alphabet': init_individual(gens), 'score': None} for _ in range(size)]\n\n\ndef natural_selection(population, fitness):\n return [fitness(individual) if individual['score'] is None else individual for individual in population]\n\n\ndef evolution(population):\n population = sorted(population, key=lambda ind: ind['score'], reverse=True)\n best_size = len(population) // 3\n return population[:best_size], population[best_size:]\n\n\ndef intercourse(gens, crossover_coefficient, x, y):\n chromosome_size = len(gens)\n x_indexes = random.sample(range(chromosome_size), k=int(crossover_coefficient * chromosome_size))\n\n chromosome_x = list(map(lambda val: val[1] if val[0] in x_indexes else None, enumerate(x['alphabet'])))\n chromosome_y = list(\n map(lambda val: None if val[0] in x_indexes or val[1] in chromosome_x else val[1], enumerate(y['alphabet'])))\n\n chromosome = [y_gens if x_gens is None else x_gens for x_gens, y_gens in zip(chromosome_x, chromosome_y)]\n\n def filter_gens(g):\n return g not in chromosome\n\n remaining_gens = list(filter(filter_gens, gens))\n random.shuffle(remaining_gens)\n\n chromosome = list(map(lambda val: remaining_gens.pop() if val is None else val, chromosome))\n\n return {'alphabet': chromosome, 'score': None}\n\n\ndef mutation(population, mutation_probability):\n new_population = []\n for ind in population:\n if random.random() < mutation_probability:\n ind['score'] = None\n gen1 = random.randrange(0, len(ind['alphabet']))\n gen2 = random.randrange(0, len(ind['alphabet']))\n ind['alphabet'][gen1], ind['alphabet'][gen2] = ind['alphabet'][gen2], ind['alphabet'][gen1]\n new_population.append(ind)\n return new_population\n\n\ndef main():\n best_keys = []\n calc_score = substitution_score(encoded, ALPHABET, NGRAMS_SIZE)\n intercourse_bind = partial(intercourse, ALPHABET, CROSSOVER_COEFFICIENT)\n # Initial population\n population = init_population(ALPHABET, POPULATION_SIZE)\n for gen in range(NUMBER_OF_GENERATIONS):\n if (gen + 1) % 100 == 0:\n print(\"Generation \" + str(gen + 1))\n if len(best_keys) > 0:\n print(\"Best key: \" + ''.join(best_keys[0]['alphabet']))\n print(\"Score: \" + str(best_keys[0]['score']))\n # Natural selection\n population = natural_selection(population, calc_score)\n best, others = evolution(population)\n\n best_keys = sorted(best_keys + best, key=lambda k: k['score'], reverse=True)\n temp = []\n for index, key in enumerate(best_keys):\n if len(temp) == 5:\n break\n if index == best_keys.index(key):\n temp.append(key.copy())\n best_keys = temp\n\n # Breeding\n parents = random.sample(others, len(others) // 2)\n random.shuffle(parents)\n\n children = []\n for x in best:\n y = parents.pop()\n children.append(intercourse_bind(x, y))\n children.append(intercourse_bind(y, x))\n\n population = best + children\n population += init_population(ALPHABET, POPULATION_SIZE - len(population))\n\n # Mutation\n population = mutation(population, MUTATION_PROBABILITY)\n print(\"=\" * 20 + \" Finish \" + \"=\" * 20)\n best_keys = sorted(best_keys, key=lambda ind: ind['score'], reverse=True)\n for i in range(5):\n key = best_keys[i]\n print(\"Key:\" + str(i + 1))\n print(\"Fitness: \" + str(key['score']))\n print(\"Substitution: \" + ''.join(key['alphabet']))\n decoded_msg = substitution_cipher(encoded, [dict(zip(key['alphabet'], ALPHABET))])\n print(\"Decoded message: \" + decoded_msg)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_csv"
]
] |
catalinbotean/Licenta | [
"241184a31564ff676fe9637acf9c95539eca880b"
] | [
"network/Resnet.py"
] | [
"\"\"\"\n# Code Adapted from:\n# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2017,\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport network.mynn as mynn\nfrom network.sync_switchwhiten import SyncSwitchWhiten2d\nfrom network.instance_whitening import InstanceWhitening\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet_adapt101']\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n \"\"\"\n Basic Block for Resnet\n \"\"\"\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, iw=0):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = mynn.Norm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = mynn.Norm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n self.iw = iw\n if self.iw == 1:\n self.instance_norm_layer = InstanceWhitening(planes * self.expansion)\n self.relu = nn.ReLU(inplace=False)\n elif self.iw == 2:\n self.instance_norm_layer = InstanceWhitening(planes * self.expansion)\n self.relu = nn.ReLU(inplace=False)\n elif self.iw == 3:\n self.instance_norm_layer = nn.InstanceNorm2d(planes * self.expansion, affine=False)\n self.relu = nn.ReLU(inplace=True)\n elif self.iw == 4:\n self.instance_norm_layer = nn.InstanceNorm2d(planes * self.expansion, affine=True)\n self.relu = nn.ReLU(inplace=True)\n elif self.iw == 5:\n self.instance_norm_layer = SyncSwitchWhiten2d(planes * self.expansion,\n num_pergroup=16,\n sw_type=2,\n T=5,\n tie_weight=False,\n eps=1e-5,\n momentum=0.99,\n affine=True)\n self.relu = nn.ReLU(inplace=True)\n else:\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x_tuple):\n if len(x_tuple) == 2:\n w_arr = x_tuple[1]\n x = x_tuple[0]\n else:\n print(\"error!!!\")\n return\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n if self.iw >= 1:\n if self.iw == 1 or self.iw == 2:\n out, w = self.instance_norm_layer(out)\n w_arr.append(w)\n else:\n out = self.instance_norm_layer(out)\n\n out = self.relu(out)\n\n return [out, w_arr]\n\n\nclass Bottleneck(nn.Module):\n \"\"\"\n Bottleneck Layer for Resnet\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, iw=0):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = mynn.Norm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = mynn.Norm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = mynn.Norm2d(planes * self.expansion)\n self.downsample = downsample\n self.stride = stride\n\n self.iw = iw\n if self.iw == 1:\n self.instance_norm_layer = InstanceWhitening(planes * self.expansion)\n self.relu = nn.ReLU(inplace=False)\n elif self.iw == 2:\n self.instance_norm_layer = InstanceWhitening(planes * self.expansion)\n self.relu = nn.ReLU(inplace=False)\n elif self.iw == 3:\n self.instance_norm_layer = nn.InstanceNorm2d(planes * self.expansion, affine=False)\n self.relu = nn.ReLU(inplace=True)\n elif self.iw == 4:\n self.instance_norm_layer = nn.InstanceNorm2d(planes * self.expansion, affine=True)\n self.relu = nn.ReLU(inplace=True)\n elif self.iw == 5:\n self.instance_norm_layer = SyncSwitchWhiten2d(planes * self.expansion,\n num_pergroup=16,\n sw_type=2,\n T=5,\n tie_weight=False,\n eps=1e-5,\n momentum=0.99,\n affine=True)\n self.relu = nn.ReLU(inplace=True)\n else:\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x_tuple):\n if len(x_tuple) == 2:\n w_arr = x_tuple[1]\n x = x_tuple[0]\n else:\n print(\"error!!!\")\n return\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n if self.iw >= 1:\n if self.iw == 1 or self.iw == 2:\n out, w = self.instance_norm_layer(out)\n w_arr.append(w)\n else:\n out = self.instance_norm_layer(out)\n\n out = self.relu(out)\n\n return [out, w_arr]\n\n\nclass ResNet3X3(nn.Module):\n \"\"\"\n Resnet Global Module for Initialization\n \"\"\"\n\n def __init__(self, block, layers, wt_layer=None, num_classes=1000):\n self.inplanes = 128\n super(ResNet3X3, self).__init__()\n # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n # bias=False)\n # self.bn1 = mynn.Norm2d(64)\n # self.relu = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n\n if wt_layer[0] == 1:\n self.bn1 = InstanceWhitening(64)\n self.relu1 = nn.ReLU(inplace=False)\n elif wt_layer[0] == 2:\n self.bn1 = InstanceWhitening(64)\n self.relu1 = nn.ReLU(inplace=False)\n elif wt_layer[0] == 3:\n self.bn1 = nn.InstanceNorm2d(64, affine=False)\n self.relu1 = nn.ReLU(inplace=True)\n elif wt_layer[0] == 4:\n self.bn1 = nn.InstanceNorm2d(64, affine=True)\n self.relu1 = nn.ReLU(inplace=True)\n elif wt_layer[0] == 5:\n self.bn1 = SyncSwitchWhiten2d(64,\n num_pergroup=16,\n sw_type=2,\n T=5,\n tie_weight=False,\n eps=1e-5,\n momentum=0.99,\n affine=True)\n self.relu1 = nn.ReLU(inplace=True)\n else:\n self.bn1 = mynn.Norm2d(64)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n if wt_layer[1] == 1:\n self.bn2 = InstanceWhitening(64)\n self.relu2 = nn.ReLU(inplace=False)\n elif wt_layer[1] == 2:\n self.bn2 = InstanceWhitening(64)\n self.relu2 = nn.ReLU(inplace=False)\n elif wt_layer[1] == 3:\n self.bn2 = nn.InstanceNorm2d(64, affine=False)\n self.relu2 = nn.ReLU(inplace=True)\n elif wt_layer[1] == 4:\n self.bn2 = nn.InstanceNorm2d(64, affine=True)\n self.relu2 = nn.ReLU(inplace=True)\n elif wt_layer[1] == 5:\n self.bn2 = SyncSwitchWhiten2d(64,\n num_pergroup=16,\n sw_type=2,\n T=5,\n tie_weight=False,\n eps=1e-5,\n momentum=0.99,\n affine=True)\n self.relu2 = nn.ReLU(inplace=True)\n else:\n self.bn2 = mynn.Norm2d(64)\n self.relu2 = nn.ReLU(inplace=True)\n\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1,\n bias=False)\n if wt_layer[2] == 1:\n self.bn3 = InstanceWhitening(self.inplanes)\n self.relu3 = nn.ReLU(inplace=False)\n elif wt_layer[2] == 2:\n self.bn3 = InstanceWhitening(self.inplanes)\n self.relu3 = nn.ReLU(inplace=False)\n elif wt_layer[2] == 3:\n self.bn3 = nn.InstanceNorm2d(self.inplanes, affine=False)\n self.relu3 = nn.ReLU(inplace=True)\n elif wt_layer[2] == 4:\n self.bn3 = nn.InstanceNorm2d(self.inplanes, affine=True)\n self.relu3 = nn.ReLU(inplace=True)\n elif wt_layer[2] == 5:\n self.bn3 = SyncSwitchWhiten2d(self.inplanes,\n num_pergroup=16,\n sw_type=2,\n T=5,\n tie_weight=False,\n eps=1e-5,\n momentum=0.99,\n affine=True)\n self.relu3 = nn.ReLU(inplace=True)\n else:\n self.bn3 = mynn.Norm2d(self.inplanes)\n self.relu3 = nn.ReLU(inplace=True)\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], wt_layer=wt_layer[3])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, wt_layer=wt_layer[4])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, wt_layer=wt_layer[5])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, wt_layer=wt_layer[6])\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.wt_layer = wt_layer\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, wt_layer=0):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n mynn.Norm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, iw=0))\n self.inplanes = planes * block.expansion\n for index in range(1, blocks):\n layers.append(block(self.inplanes, planes,\n iw=0 if (wt_layer > 0 and index < blocks - 1) else wt_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n w_arr = []\n x_size = x.size() # 800\n\n x = self.conv1(x)\n if self.wt_layer[0] == 1 or self.wt_layer[0] == 2:\n x, w = self.bn1(x)\n w_arr.append(w)\n else:\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n if self.wt_layer[1] == 1 or self.wt_layer[1] == 2:\n x, w = self.bn2(x)\n w_arr.append(w)\n else:\n x = self.bn2(x)\n x = self.relu2(x)\n x = self.conv3(x)\n if self.wt_layer[2] == 1 or self.wt_layer[2] == 2:\n x, w = self.bn3(x)\n w_arr.append(w)\n else:\n x = self.bn3(x)\n x = self.relu3(x)\n x = self.maxpool(x)\n\n x_tuple = self.layer1([x, w_arr]) # 400\n low_level = x_tuple[0]\n\n x_tuple = self.layer2(x_tuple) # 100\n x_tuple = self.layer3(x_tuple) # 100\n aux_out = x_tuple[0]\n x_tuple = self.layer4(x_tuple) # 100\n\n x = x_tuple[0]\n w_arr = x_tuple[1]\n #x = self.avgpool(x)\n #x = x.view(x.size(0), -1)\n #x = self.fc(x)\n\n return x\n\nclass ResNet(nn.Module):\n \"\"\"\n Resnet Global Module for Initialization\n \"\"\"\n\n def __init__(self, block, layers, wt_layer=None, num_classes=1000):\n self.inplanes = 64\n # self.inplanes = 128\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n if wt_layer[2] == 1:\n self.bn1 = InstanceWhitening(64)\n self.relu = nn.ReLU(inplace=False)\n elif wt_layer[2] == 2:\n self.bn1 = InstanceWhitening(64)\n self.relu = nn.ReLU(inplace=False)\n elif wt_layer[2] == 3:\n self.bn1 = nn.InstanceNorm2d(64, affine=False)\n self.relu = nn.ReLU(inplace=True)\n elif wt_layer[2] == 4:\n self.bn1 = nn.InstanceNorm2d(64, affine=True)\n self.relu = nn.ReLU(inplace=True)\n elif wt_layer[2] == 5:\n self.bn1 = SyncSwitchWhiten2d(self.inplanes,\n num_pergroup=16,\n sw_type=2,\n T=5,\n tie_weight=False,\n eps=1e-5,\n momentum=0.99,\n affine=True)\n self.relu = nn.ReLU(inplace=True)\n else:\n self.bn1 = mynn.Norm2d(64)\n self.relu = nn.ReLU(inplace=True)\n\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], wt_layer=wt_layer[3])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, wt_layer=wt_layer[4])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, wt_layer=wt_layer[5])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, wt_layer=wt_layer[6])\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.wt_layer = wt_layer\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, wt_layer=0):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n mynn.Norm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, iw=0))\n self.inplanes = planes * block.expansion\n for index in range(1, blocks):\n layers.append(block(self.inplanes, planes,\n iw=0 if (wt_layer > 0 and index < blocks - 1) else wt_layer))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n w_arr = []\n x_size = x.size() # 800\n\n x = self.conv1(x)\n if self.wt_layer[2] == 1 or self.wt_layer[2] == 2:\n x, w = self.bn1(x)\n w_arr.append(w)\n else:\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x_tuple = self.layer1([x, w_arr]) # 400\n low_level = x_tuple[0]\n\n x_tuple = self.layer2(x_tuple) # 100\n x_tuple = self.layer3(x_tuple) # 100\n aux_out = x_tuple[0]\n x_tuple = self.layer4(x_tuple) # 100\n\n x = x_tuple[0]\n w_arr = x_tuple[1]\n\n #x = self.avgpool(x)\n #x = x.view(x.size(0), -1)\n #x = self.fc(x)\n\n return x\n\n\n\ndef resnet18(pretrained=True, wt_layer=None, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if wt_layer is None:\n wt_layer = [0, 0, 0, 0, 0, 0, 0]\n model = ResNet(BasicBlock, [2, 2, 2, 2], wt_layer=wt_layer, **kwargs)\n if pretrained:\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n print(\"########### pretrained ##############\")\n mynn.forgiving_state_restore(model, model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=True, wt_layer=None, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if wt_layer is None:\n wt_layer = [0, 0, 0, 0, 0, 0, 0]\n model = ResNet(Bottleneck, [3, 4, 6, 3], wt_layer=wt_layer, **kwargs)\n if pretrained:\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n print(\"########### pretrained ##############\")\n mynn.forgiving_state_restore(model, model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=True, wt_layer=None, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n :param pretrained:\n \"\"\"\n if wt_layer is None:\n wt_layer = [0, 0, 0, 0, 0, 0, 0]\n model = ResNet3X3(Bottleneck, [3, 4, 23, 3], wt_layer=wt_layer, **kwargs)\n if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n print(\"########### pretrained ##############\")\n # model.load_state_dict(torch.load('./pretrained/resnet101-imagenet.pth', map_location=\"cpu\"))\n mynn.forgiving_state_restore(model, torch.load('./pretrained/resnet101-imagenet.pth', map_location=\"cpu\"))\n return model\n\n\ndef resnet_adapt101(args, pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n\n Args:\n args: arguments that contain adapt_layer information\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n :param pretrained:\n \"\"\"\n model = ResNet3X3(args, **kwargs)\n if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n print(\"########### pretrained ##############\")\n model.load_state_dict(torch.load('./pretrained/resnet_adapt101-imagenet.pth', map_location=\"cpu\"))\n # mynn.forgiving_state_restore(model, torch.load('./pretrained/resnet101-imagenet.pth', map_location=\"cpu\"))\n return model\n\n\ndef resnet152(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.load"
]
] |
CEMES-CNRS/pymodaq_femto | [
"3482ff7ad049cb35ae47dd2edf35176d473ee4ed"
] | [
"src/pymodaq_femto/materials.py"
] | [
"from pypret.material import BaseMaterial\nimport numpy as np\n\n\nclass SellmeierF1(BaseMaterial):\n \"\"\" Defines a dispersive material via a specific Sellmeier equation.\n\n This subclass supports materials with a Sellmeier equation of the\n form::\n\n n^2(l) - 1 = c1 + c2 * l^2 / (l2 - c3^2) + ...\n\n This is formula 1 from refractiveindex.info [DispersionFormulas]_.\n \"\"\"\n\n def _func(self, x):\n c = self._coefficients\n x2 = x * x\n n2 = np.full_like(x, 1.0 + c[0])\n for i in range(1, len(c) - 1, 2):\n n2 += c[i] * x2 / (x2 - c[i + 1] * c[i + 1])\n n2[n2 < 0] = 0\n return np.sqrt(n2)\n\n\nclass SellmeierF2(BaseMaterial):\n \"\"\" Defines a dispersive material via a specific Sellmeier equation.\n\n This subclass supports materials with a Sellmeier equation of the\n form::\n\n n^2(l) - 1 = c1 + c2 * l^2 / (l2 - c3) + ...\n\n This is formula 2 from refractiveindex.info [DispersionFormulas]_.\n \"\"\"\n\n def _func(self, x):\n c = self._coefficients\n x2 = x * x\n n2 = np.full_like(x, 1.0 + c[0])\n for i in range(1, c.size - 1, 2):\n n2 += c[i] * x2 / (x2 - c[i + 1])\n n2[n2 < 0] = 0\n return np.sqrt(n2)\n\n\nclass RefractiveIndexDotInfo(BaseMaterial):\n \"\"\" Defines a dispersive material via a specific Sellmeier equation.\n\n This subclass supports materials with a Sellmeier equation of the\n form::\n\n n^2(l) = c1 + c2 * l^(c3) / (l^2 - c4^(c5)) + c6 * l^(c7) / (l^2 - c8^(c9)) + c10 * l^(c11) + ...\n\n This is formula 4 from refractiveindex.info [DispersionFormulas]_.\n \"\"\"\n\n def _func(self, x):\n c = self._coefficients\n x2 = x * x\n n2 = np.full_like(x, c[0])\n\n if len(c) > 1:\n n2 += c[1] * x ** c[2] / (x2 - c[3] ** c[4])\n if len(c) > 5:\n n2 += c[5] * x ** c[6] / (x2 - c[7] ** c[8])\n for i in range(9, len(c) - 1, 2):\n n2 += c[i] * x ** c[i + 1]\n n2[n2 < 0] = 0\n return np.sqrt(n2)\n\n\n# Fused Silica dispersion with extended spectral range\nFS = SellmeierF1(\n coefficients=[\n 0.0000000,\n 0.6961663,\n 0.0684043,\n 0.4079426,\n 0.1162414,\n 0.8974794,\n 9.8961610,\n ],\n freq_range=[1e-7, 6.7e-6],\n name=\"FS\",\n long_name=\"Fused silica (fused quartz) extended range\",\n)\n\n# Air dispersion\nAir = SellmeierF1(\n coefficients=[\n 0.0000000,\n 14926.44e-8,\n 19.36e-6,\n 41807.57e-8,\n 7.434e-3,\n 0.0000000,\n 0.0000000,\n ],\n freq_range=[1e-7, 1e-4],\n name=\"Air\",\n long_name=\"Air at 0 degrees C\",\n)\n\n# BK7\nBK7 = SellmeierF2(\n coefficients=[\n 0.00000000000,\n 1.039612120,\n 0.00600069867,\n 0.231792344,\n 0.02001791440,\n 1.010469450,\n 103.560653,\n ],\n freq_range=[0.3e-6, 2.5e-6],\n name=\"BK7\",\n long_name=\"N-BK7 (SCHOTT)\",\n)\n\n# KDP\nKDP = RefractiveIndexDotInfo(\n coefficients=[2.259276, 13.00522, 2, 400, 1, 0.01008956, 0, 0.0129426, 1],\n freq_range=[0.2138e-6, 1.529e-6],\n name=\"KDP\",\n long_name=\"Potassium dihydrogen phosphate\",\n)\n\n# ADP\nADP = RefractiveIndexDotInfo(\n coefficients=[2.302842, 15.102464, 2, 400, 1, 0.011125165, 0, 0.01325366, 1],\n freq_range=[0.2138e-6, 1.529e-6],\n name=\"ADP\",\n long_name=\"Ammonium dihydrogen phosphate\",\n)\n"
] | [
[
"numpy.sqrt",
"numpy.full_like"
]
] |
wotchin/openGauss-server | [
"ebd92e92b0cfd76b121d98e4c57a22d334573159"
] | [
"src/gausskernel/dbmind/sqldiag/src/pre_process.py"
] | [
"\"\"\"\nCopyright (c) 2020 Huawei Technologies Co.,Ltd.\n\nopenGauss is licensed under Mulan PSL v2.\nYou can use this software according to the terms and conditions of the Mulan PSL v2.\nYou may obtain a copy of Mulan PSL v2 at:\n\n http://license.coscl.org.cn/MulanPSL2\n\nTHIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND,\nEITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,\nMERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.\nSee the Mulan PSL v2 for more details.\n\"\"\"\nimport json\nimport os\nimport stat\nimport re\nimport numpy as np\n\n\nclass Preprocessor(object):\n def __init__(self, alpha=0.5, filepath=None):\n self.symbol_list = ['\\'', '(', ')', ',', ';', '%', '\\n'] # stop list\n self.pattern_date = re.compile(r'\\d{4}-\\d{1,2}-\\d{1,2}')\n self.pattern_float = re.compile(r'^[-+]?\\d+$|^[-+]?\\d+\\.\\d+$')\n self.word_dict = dict()\n self.word_count = 2 # 0 and 1 have special mean in dictionary\n self.max_len = 0\n self.alpha = alpha # ratio for re-training\n self.filepath = filepath\n\n def split_line(self, line):\n \"\"\"\n function: remove stop letter\n :param line: input line string\n :return: output line string\n \"\"\"\n i = 0\n while i < len(line):\n if line[i] in self.symbol_list:\n line = line[:i] + line[i + 1:]\n else:\n i += 1\n return line\n\n def word2id(self, line):\n \"\"\"\n function: transform line to int vector\n :return: line vector\n \"\"\"\n tmp = []\n for i in range(len(line)):\n if line[i] in self.word_dict:\n tmp += [int(self.word_dict[line[i]])]\n else:\n tmp += [self.word_count]\n self.word_dict[line[i]] = self.word_count\n self.word_count += 1\n return tmp\n\n def save(self):\n # remove previous file\n if os.path.exists(self.filepath):\n os.remove(self.filepath)\n json_dict = json.dumps(self.word_dict, indent=4)\n\n with os.fdopen(os.open(self.filepath, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IRUSR),'w') as f:\n f.write(json_dict)\n\n def load(self):\n with open(self.filepath) as f:\n self.word_dict = json.load(f)\n\n def pre_process(self, data_sr):\n \"\"\"\n function: pre-process for train and test data\n :param data_sr: data series\n :return: dataset, info of dataset\n \"\"\"\n dataset = []\n exe_time = []\n wait_time = []\n lock_time = []\n dataset_tmp = []\n for line in data_sr:\n # line factor: [execution time, lock time, wait time, sql text]\n exe_time += [float(line[0])]\n wait_time += [float(line[1])]\n lock_time += [float(line[2])]\n line[-1] = \" \".join(self.split_line(line[-1]).strip().split())\n dataset_tmp += [self.word2id(line[-1].split())]\n if len(line[-1].split()) > self.max_len:\n self.max_len = len(line[-1].split())\n for line in dataset_tmp:\n line_vector = [[0] * (self.word_count - 1)] * (self.max_len - len(line))\n for i in line:\n line_vector += [[0] * (int(i) - 1) + [1] + [0] * (self.word_count - 1 - int(i))]\n dataset += [line_vector]\n time_vectors = dict()\n time_vectors['exe_time'] = np.array(exe_time)\n time_vectors['wait_time'] = np.array(wait_time)\n time_vectors['lock_time'] = np.array(lock_time)\n\n # word in dict will not appear space, so we can use this key saving word_count\n self.word_dict['word count'] = self.word_count - 1\n # word in dict will not appear space, so we can use this key saving max_len\n self.word_dict['max len'] = self.max_len\n\n self.save()\n\n dataset_np = np.array(dataset).astype('float32')\n return dataset_np, time_vectors\n\n def transform(self, data_sr):\n self.load()\n count = self.word_dict['word count'] # word_count of original training data\n self.word_count = len(self.word_dict) - 1\n self.max_len = self.word_dict['max len']\n dataset = []\n for line in data_sr:\n line = \" \".join(self.split_line(line).strip().split())\n line_tmp = self.word2id(line.split())\n line_vector = [[0] * count] * (self.max_len - len(line_tmp))\n for i in line_tmp:\n i = min(i, count - 1)\n line_vector += [[0] * (i - 1) + [1] + [0] * (count - i)]\n line_vector = line_vector[:self.max_len]\n dataset += [line_vector]\n\n if self.word_count > count * (1 + self.alpha):\n print('Ratio of new data has reached your set threshold, suggest re-training!')\n\n self.save()\n\n dataset_np = np.array(dataset).astype('float32')\n return dataset_np\n"
] | [
[
"numpy.array"
]
] |
Ali-AliAli-Ali/abs-lib | [
"d04962adef1908ad2f276c5dd390a0e5cbf21b09"
] | [
"tests/kpTests.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, \\\n ConjunctKnowledgePatternItem\n\n\n# Tests for knowledge pattern part of abslib\nclass KnowledgePatternManagerTest(unittest.TestCase):\n\n def testDisjunctsInconsistent(self):\n arrays = [[[1, 1], [0.1, 0.2], [0.2, 0.4], [0.5, 0.7]]]\n for disjunct_intervals_inconsistent in arrays:\n knowledgePattern = DisjunctKnowledgePatternItem(disjunct_intervals_inconsistent)\n result = KnowledgePatternManager.checkInconsistency(knowledgePattern)\n self.assertTrue(result.inconsistent, \"False negative inconsistency result\")\n self.assertTrue(np.array(result.array).shape == np.array(disjunct_intervals_inconsistent).shape,\n \"Incorrect result array size\")\n for i in range(len(result.array)):\n self.assertTrue(disjunct_intervals_inconsistent[i][0] <= result.array[i][0]\n and result.array[i][1] <= disjunct_intervals_inconsistent[i][1],\n \"Intervals couldn't become larger\")\n\n def testDisjunctsNotInconsistent(self):\n arrays = [[[1, 1], [0.1, 0.2], [0.2, 0.4], [0.7, 0.7]]]\n for disjunct_intervals_inconsistent in arrays:\n knowledgePattern = DisjunctKnowledgePatternItem(disjunct_intervals_inconsistent)\n result = KnowledgePatternManager.checkInconsistency(knowledgePattern)\n self.assertFalse(result.inconsistent, \"False positive inconsistency result\")\n\n def testQuantsInconsistent(self):\n arrays = [[[0.24, 0.25], [0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]]\n for quant_intervals_inconsistent in arrays:\n knowledgePattern = QuantKnowledgePatternItem(quant_intervals_inconsistent)\n result = KnowledgePatternManager.checkInconsistency(knowledgePattern)\n self.assertTrue(result.inconsistent, \"False negative inconsistency result\")\n self.assertTrue(np.array(result.array).shape == np.array(quant_intervals_inconsistent).shape,\n \"Incorrect result array size\")\n for i in range(len(result.array)):\n self.assertTrue(quant_intervals_inconsistent[i][0] <= result.array[i][0]\n and result.array[i][1] <= quant_intervals_inconsistent[i][1],\n \"Intervals couldn't become larger\")\n\n def testQuantsNotInconsistent(self):\n arrays = [[[0.2, 0.3], [0.2, 0.3], [0.2, 0.3], [0.6, 0.7]]]\n for quant_intervals_inconsistent in arrays:\n knowledgePattern = QuantKnowledgePatternItem(quant_intervals_inconsistent)\n result = KnowledgePatternManager.checkInconsistency(knowledgePattern)\n self.assertFalse(result.inconsistent, \"False positive inconsistency result\")\n\n def testConjunctsInconsistent(self):\n arrays = [[[1, 1], [0.6, 0.9], [0.6, 0.9], [0.2, 0.3]]]\n for conjunct_intervals_inconsistent in arrays:\n knowledgePattern = ConjunctKnowledgePatternItem(conjunct_intervals_inconsistent)\n result = KnowledgePatternManager.checkInconsistency(knowledgePattern)\n self.assertTrue(result.inconsistent, \"False negative inconsistency result\")\n self.assertTrue(np.array(result.array).shape == np.array(conjunct_intervals_inconsistent).shape,\n \"Incorrect result array size\")\n for i in range(len(result.array)):\n self.assertTrue(conjunct_intervals_inconsistent[i][0] <= result.array[i][0]\n and result.array[i][1] <= conjunct_intervals_inconsistent[i][1],\n \"Intervals couldn't become larger\")\n\n def testConjunctsNotInconsistent(self):\n arrays = [[[1, 1], [0.1, 0.2], [0.2, 0.4], [0.8, 0.8]]]\n for conjunct_intervals_inconsistent in arrays:\n knowledgePattern = DisjunctKnowledgePatternItem(conjunct_intervals_inconsistent)\n result = KnowledgePatternManager.checkInconsistency(knowledgePattern)\n self.assertFalse(result.inconsistent, \"False positive inconsistency result\")\n\n def testDisjunctsToQuantsMatrix(self):\n matrices = [(np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 1.0],\n [-0.0, -0.0, -0.0, -0.0, -1.0, 1.0, 1.0, -1.0],\n [0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0],\n [-0.0, -0.0, -1.0, 1.0, -0.0, -0.0, 1.0, -1.0],\n [-0.0, -1.0, -0.0, 1.0, -0.0, 1.0, -0.0, -1.0],\n [0.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0]], dtype=np.double), 3)]\n for matrix, n in matrices:\n generated_matrix = MatrixProducer.getDisjunctsToQuantsMatrix(n)\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n self.assertEqual(matrix[i][j], generated_matrix[i][j], \"Wrong matrix generation algorithm\")\n\n def testConjunctsToQuantsMatrix(self):\n matrices = [(np.array([[1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0],\n [0.0, 1.0, -0.0, -1.0, -0.0, -1.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, -1.0, -0.0, -0.0, -1.0, 1.0],\n [0.0, 0.0, 0.0, 1.0, -0.0, -0.0, -0.0, -1.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, -1.0, -1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -0.0, -1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]], dtype=np.double), 3)]\n for matrix, n in matrices:\n generated_matrix = MatrixProducer.getConjunctsToQuantsMatrix(n)\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n self.assertEqual(matrix[i][j], generated_matrix[i][j], \"Wrong matrix generation algorithm\")\n\n def testQuantsToDisjunctsMatrix(self):\n matrices = [(np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0],\n [0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]], dtype=np.double), 3)]\n for matrix, n in matrices:\n generated_matrix = MatrixProducer.getQuantsToDisjunctsMatrix(n)\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n self.assertEqual(matrix[i][j], generated_matrix[i][j], \"Wrong matrix generation algorithm\")\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array"
]
] |
liketheflower/DL_training | [
"a7123bf846085a698089ab30dd15017f2e857f51"
] | [
"cifar10_image_resize.py"
] | [
"# Large CNN model for the CIFAR-10 Dataset\nimport numpy\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.constraints import maxnorm\nfrom keras.optimizers import SGD\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n# load data\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\n# normalize inputs from 0-255 to 0.0-1.0\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train = X_train / 255.0\nX_test = X_test / 255.0\n\nprint(X_train.shape)\nprint(X_train[0])\n\n'''\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n# Create the model\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=(3, 32, 32), activation='relu', padding='same'))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dropout(0.2))\nmodel.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(num_classes, activation='softmax'))\n# Compile model\nepochs = 25\nlrate = 0.01\ndecay = lrate/epochs\nsgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\nprint(model.summary())\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=64)\n# Final evaluation of the model\n'''\n"
] | [
[
"numpy.random.seed"
]
] |
wangmengyun1998/HydroDataset | [
"40d461f564aedf8d1c456cdbba6f393f18ad5fcd"
] | [
"test/test_camels.py"
] | [
"import os\nimport pytest\n\nimport numpy as np\n\nimport definitions\nfrom hydrodataset.data.data_camels import Camels\n\n\[email protected]\ndef camels_aus_path():\n return os.path.join(definitions.DATASET_DIR, \"camels\", \"camels_aus\")\n\n\[email protected]\ndef camels_br_path():\n return os.path.join(definitions.DATASET_DIR, \"camels\", \"camels_br\")\n\n\[email protected]\ndef camels_cl_path():\n return os.path.join(definitions.DATASET_DIR, \"camels\", \"camels_cl\")\n\n\[email protected]\ndef camels_gb_path():\n return os.path.join(definitions.DATASET_DIR, \"camels\", \"camels_gb\")\n\n\[email protected]\ndef camels_us_path():\n return os.path.join(definitions.DATASET_DIR, \"camels\", \"camels_us\")\n\n\[email protected]\ndef camels_yr_path():\n return os.path.join(definitions.DATASET_DIR, \"camels\", \"camels_yr\")\n\n\[email protected]\ndef canopex_path():\n return os.path.join(definitions.DATASET_DIR, \"canopex\")\n\n\[email protected]\ndef lamah_ce_path():\n return os.path.join(definitions.DATASET_DIR, \"lamah_ce\")\n\n\[email protected]\ndef aus_region():\n return \"AUS\"\n\n\[email protected]\ndef br_region():\n return \"BR\"\n\n\[email protected]\ndef cl_region():\n return \"CL\"\n\n\[email protected]\ndef gb_region():\n return \"GB\"\n\n\[email protected]\ndef us_region():\n return \"US\"\n\n\[email protected]\ndef yr_region():\n return \"YR\"\n\n\[email protected]\ndef ca_region():\n return \"CA\"\n\n\[email protected]\ndef lamah_ce_region():\n return \"CE\"\n\n\ndef test_download_camels(camels_us_path):\n camels_us = Camels(camels_us_path, download=True)\n assert os.path.isfile(\n os.path.join(camels_us_path, \"basin_set_full_res\", \"HCDN_nhru_final_671.shp\")\n )\n assert os.path.isdir(\n os.path.join(camels_us_path, \"camels_streamflow\", \"camels_streamflow\")\n )\n\n\ndef test_read_camels_streamflow(camels_us_path, us_region):\n camels_us = Camels(camels_us_path, download=False, region=us_region)\n gage_ids = camels_us.read_object_ids()\n flows1 = camels_us.read_target_cols(\n gage_ids[:5], [\"2013-01-01\", \"2018-01-01\"], target_cols=[\"usgsFlow\"]\n )\n print(flows1)\n flows2 = camels_us.read_target_cols(\n gage_ids[:5], [\"2015-01-01\", \"2018-01-01\"], target_cols=[\"usgsFlow\"]\n )\n print(flows2)\n\n\ndef test_read_camels_us(camels_us_path, us_region):\n camels_us = Camels(camels_us_path, download=False, region=us_region)\n gage_ids = camels_us.read_object_ids()\n assert gage_ids.size == 671\n attrs = camels_us.read_constant_cols(\n gage_ids[:5], var_lst=[\"soil_conductivity\", \"elev_mean\", \"geol_1st_class\"]\n )\n np.testing.assert_almost_equal(\n attrs,\n np.array(\n [\n [1.10652248, 250.31, 10.0],\n [2.37500506, 92.68, 0.0],\n [1.28980735, 143.8, 10.0],\n [1.37329168, 247.8, 10.0],\n [2.61515428, 310.38, 7.0],\n ]\n ),\n )\n forcings = camels_us.read_relevant_cols(\n gage_ids[:5], [\"1990-01-01\", \"2010-01-01\"], var_lst=[\"dayl\", \"prcp\", \"srad\"]\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 3]))\n flows = camels_us.read_target_cols(\n gage_ids[:5], [\"1990-01-01\", \"2010-01-01\"], target_cols=[\"usgsFlow\"]\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 1]))\n streamflow_types = camels_us.get_target_cols()\n np.testing.assert_array_equal(streamflow_types, np.array([\"usgsFlow\"]))\n focing_types = camels_us.get_relevant_cols()\n np.testing.assert_array_equal(\n focing_types, np.array([\"dayl\", \"prcp\", \"srad\", \"swe\", \"tmax\", \"tmin\", \"vp\"])\n )\n attr_types = camels_us.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"gauge_lat\", \"gauge_lon\", \"elev_mean\"])\n )\n\n\ndef test_download_camels_aus(camels_aus_path):\n camels_aus = Camels(camels_aus_path, download=True, region=\"AUS\")\n assert os.path.isfile(\n os.path.join(\n camels_aus_path,\n \"05_hydrometeorology\",\n \"05_hydrometeorology\",\n \"01_precipitation_timeseries\",\n \"precipitation_AWAP.csv\",\n )\n )\n\n\ndef test_read_camels_aus(camels_aus_path, aus_region):\n camels_aus = Camels(camels_aus_path, download=False, region=aus_region)\n gage_ids = camels_aus.read_object_ids()\n assert gage_ids.size == 222\n attrs = camels_aus.read_constant_cols(\n gage_ids[:5], var_lst=[\"catchment_area\", \"slope_fdc\", \"geol_sec\"]\n )\n np.testing.assert_array_equal(\n attrs,\n np.array(\n [\n [1.25773e04, 3.66793e-01, 6.00000e00],\n [1.13929e04, 3.29998e-01, 6.00000e00],\n [5.65300e02, 1.78540e-02, 6.00000e00],\n [4.58300e02, 5.00234e-01, 6.00000e00],\n [7.73170e03, 3.74751e-01, 1.00000e00],\n ]\n ),\n )\n forcings = camels_aus.read_relevant_cols(\n gage_ids[:5],\n [\"1990-01-01\", \"2010-01-01\"],\n var_lst=[\"precipitation_AWAP\", \"et_morton_actual_SILO\", \"tmin_SILO\"],\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 3]))\n flows = camels_aus.read_target_cols(\n gage_ids[:5],\n [\"1990-01-01\", \"2010-01-01\"],\n target_cols=[\"streamflow_MLd\", \"streamflow_mmd\"],\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 2]))\n streamflow_types = camels_aus.get_target_cols()\n np.testing.assert_array_equal(\n streamflow_types,\n np.array(\n [\n \"streamflow_MLd\",\n \"streamflow_MLd_inclInfilled\",\n \"streamflow_mmd\",\n \"streamflow_QualityCodes\",\n ]\n ),\n )\n focing_types = camels_aus.get_relevant_cols()\n np.testing.assert_array_equal(\n np.sort(focing_types),\n np.sort(\n [\n \"precipitation_AWAP\",\n \"precipitation_SILO\",\n \"precipitation_var_AWAP\",\n \"et_morton_actual_SILO\",\n \"et_morton_point_SILO\",\n \"et_morton_wet_SILO\",\n \"et_short_crop_SILO\",\n \"et_tall_crop_SILO\",\n \"evap_morton_lake_SILO\",\n \"evap_pan_SILO\",\n \"evap_syn_SILO\",\n \"solarrad_AWAP\",\n \"tmax_AWAP\",\n \"tmin_AWAP\",\n \"vprp_AWAP\",\n \"mslp_SILO\",\n \"radiation_SILO\",\n \"rh_tmax_SILO\",\n \"rh_tmin_SILO\",\n \"tmax_SILO\",\n \"tmin_SILO\",\n \"vp_deficit_SILO\",\n \"vp_SILO\",\n ]\n ),\n )\n attr_types = camels_aus.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"station_name\", \"drainage_division\", \"river_region\"])\n )\n\n\ndef test_download_camels_br(camels_br_path):\n camels_br = Camels(camels_br_path, download=True, region=\"BR\")\n assert os.path.isfile(\n os.path.join(\n camels_br_path,\n \"01_CAMELS_BR_attributes\",\n \"01_CAMELS_BR_attributes\",\n \"CAMELS_BR_attributes_description.xlsx\",\n )\n )\n\n\ndef test_read_camels_br(camels_br_path, br_region):\n camels_br = Camels(camels_br_path, download=False, region=br_region)\n gage_ids = camels_br.read_object_ids()\n assert gage_ids.size == 897\n attrs = camels_br.read_constant_cols(\n gage_ids[:5], var_lst=[\"geol_class_1st\", \"p_mean\", \"runoff_ratio\"]\n )\n np.testing.assert_array_equal(\n attrs,\n np.array(\n [\n [8.0, 6.51179, 0.55336],\n [6.0, 5.38941, 0.72594],\n [6.0, 5.70191, 0.759],\n [6.0, 5.19877, 0.39463],\n [8.0, 5.49805, 0.38579],\n ]\n ),\n )\n forcings = camels_br.read_relevant_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n var_lst=[\n \"precipitation_chirps\",\n \"evapotransp_gleam\",\n \"potential_evapotransp_gleam\",\n \"temperature_min_cpc\",\n ],\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 4]))\n # start from 1995/01/01 end with 2017/04/30\n flows = camels_br.read_target_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n target_cols=[\"streamflow_m3s\", \"streamflow_mm_selected_catchments\"],\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 2]))\n streamflow_types = camels_br.get_target_cols()\n np.testing.assert_array_equal(\n streamflow_types,\n np.array(\n [\n \"streamflow_m3s\",\n \"streamflow_mm_selected_catchments\",\n \"streamflow_simulated\",\n ]\n ),\n )\n focing_types = camels_br.get_relevant_cols()\n np.testing.assert_array_equal(\n focing_types,\n np.array(\n [\n \"precipitation_chirps\",\n \"precipitation_mswep\",\n \"precipitation_cpc\",\n \"evapotransp_gleam\",\n \"evapotransp_mgb\",\n \"potential_evapotransp_gleam\",\n \"temperature_min_cpc\",\n \"temperature_mean_cpc\",\n \"temperature_max_cpc\",\n ]\n ),\n )\n attr_types = camels_br.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"p_mean\", \"pet_mean\", \"et_mean\"])\n )\n\n\ndef test_download_camels_cl(camels_cl_path):\n camels_cl = Camels(camels_cl_path, download=True, region=\"CL\")\n assert os.path.isfile(\n os.path.join(\n camels_cl_path, \"1_CAMELScl_attributes\", \"1_CAMELScl_attributes.txt\"\n )\n )\n\n\ndef test_read_camels_cl(camels_cl_path, cl_region):\n camels_cl = Camels(camels_cl_path, download=False, region=cl_region)\n gage_ids = camels_cl.read_object_ids()\n assert gage_ids.size == 516\n attrs = camels_cl.read_constant_cols(\n gage_ids[:5], var_lst=[\"geol_class_1st\", \"crop_frac\"]\n )\n np.testing.assert_almost_equal(\n attrs,\n np.array(\n [\n [9.0, 0.0],\n [9.0, 0.014243],\n [9.0, 0.020827],\n [10.0, 0.1055],\n [10.0, 0.0684],\n ]\n ),\n decimal=4,\n )\n forcings = camels_cl.read_relevant_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n var_lst=[\"pet_8d_modis\", \"precip_cr2met\", \"swe\"],\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 3]))\n flows = camels_cl.read_target_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n target_cols=[\"streamflow_m3s\", \"streamflow_mm\"],\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 2]))\n streamflow_types = camels_cl.get_target_cols()\n np.testing.assert_array_equal(\n streamflow_types, np.array([\"streamflow_m3s\", \"streamflow_mm\"])\n )\n focing_types = camels_cl.get_relevant_cols()\n np.testing.assert_array_equal(\n focing_types,\n np.array(\n [\n \"precip_cr2met\",\n \"precip_chirps\",\n \"precip_mswep\",\n \"precip_tmpa\",\n \"tmin_cr2met\",\n \"tmax_cr2met\",\n \"tmean_cr2met\",\n \"pet_8d_modis\",\n \"pet_hargreaves\",\n \"swe\",\n ]\n ),\n )\n attr_types = camels_cl.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"gauge_name\", \"gauge_lat\", \"gauge_lon\"])\n )\n\n\ndef test_download_camels_gb(camels_gb_path, gb_region):\n camels_gb = Camels(camels_gb_path, download=True, region=gb_region)\n assert os.path.isfile(\n os.path.join(\n camels_gb_path,\n \"8344e4f3-d2ea-44f5-8afa-86d2987543a9\",\n \"8344e4f3-d2ea-44f5-8afa-86d2987543a9\",\n \"data\",\n \"CAMELS_GB_climatic_attributes.csv\",\n )\n )\n\n\ndef test_read_camels_gb(camels_gb_path, gb_region):\n camels_gb = Camels(camels_gb_path, download=False, region=gb_region)\n gage_ids = camels_gb.read_object_ids()\n assert gage_ids.size == 671\n attrs = camels_gb.read_constant_cols(\n gage_ids[:5], var_lst=[\"p_mean\", \"slope_fdc\", \"gauge_name\"]\n )\n np.testing.assert_array_equal(\n attrs,\n np.array(\n [\n [2.29, 1.94, 596.0],\n [2.31, 1.95, 670.0],\n [2.65, 4.01, 647.0],\n [2.31, 1.54, 393.0],\n [2.29, 1.47, 217.0],\n ]\n ),\n )\n forcings = camels_gb.read_relevant_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n var_lst=[\"precipitation\", \"pet\", \"temperature\", \"peti\"],\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 4]))\n flows = camels_gb.read_target_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n target_cols=[\"discharge_spec\", \"discharge_vol\"],\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 2]))\n streamflow_types = camels_gb.get_target_cols()\n np.testing.assert_array_equal(\n streamflow_types, np.array([\"discharge_spec\", \"discharge_vol\"])\n )\n focing_types = camels_gb.get_relevant_cols()\n np.testing.assert_array_equal(\n focing_types,\n np.array(\n [\n \"precipitation\",\n \"pet\",\n \"temperature\",\n \"peti\",\n \"humidity\",\n \"shortwave_rad\",\n \"longwave_rad\",\n \"windspeed\",\n ]\n ),\n )\n attr_types = camels_gb.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"p_mean\", \"pet_mean\", \"aridity\"])\n )\n\n\ndef test_download_camels_yr(camels_yr_path, yr_region):\n camels_yr = Camels(camels_yr_path, download=True, region=yr_region)\n assert os.path.isfile(\n os.path.join(\n camels_yr_path,\n \"9_Normal_Camels_YR\",\n \"1_Normal_Camels_YR_basin_data\",\n \"0146\",\n \"attributes.json\",\n )\n )\n\n\ndef test_read_camels_yr(camels_yr_path, yr_region):\n camels_yr = Camels(camels_yr_path, download=False, region=yr_region)\n gage_ids = camels_yr.read_object_ids()\n assert gage_ids.size == 102\n attrs = camels_yr.read_constant_cols(\n gage_ids[:5], var_lst=[\"area\", \"barren\", \"bdticm\"]\n )\n np.testing.assert_almost_equal(\n attrs,\n np.array(\n [\n [3.11520000e04, 2.98706264e-03, 3.33904449e03],\n [4.27056000e05, 2.30162622e-02, 1.80570119e03],\n [3.80128000e05, 2.47549979e-02, 1.77874628e03],\n [7.33561000e05, 1.41340180e-02, 2.01143843e03],\n [2.04213000e05, 7.75394506e-03, 1.53321208e03],\n ]\n ),\n decimal=3,\n )\n forcings = camels_yr.read_relevant_cols(\n gage_ids[:5],\n [\"1995-01-01\", \"2015-01-01\"],\n var_lst=[\"pre\", \"evp\", \"gst_mean\", \"prs_mean\"],\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 4]))\n flows = camels_yr.read_target_cols(\n gage_ids[:5], [\"1995-01-01\", \"2015-01-01\"], target_cols=[\"normalized_q\"]\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 1]))\n streamflow_types = camels_yr.get_target_cols()\n np.testing.assert_array_equal(streamflow_types, np.array([\"normalized_q\"]))\n focing_types = camels_yr.get_relevant_cols()\n np.testing.assert_array_equal(\n focing_types,\n np.array(\n [\n \"pre\",\n \"evp\",\n \"gst_mean\",\n \"prs_mean\",\n \"tem_mean\",\n \"rhu\",\n \"win_mean\",\n \"gst_min\",\n \"prs_min\",\n \"tem_min\",\n \"gst_max\",\n \"prs_max\",\n \"tem_max\",\n \"ssd\",\n \"win_max\",\n ]\n ),\n )\n attr_types = camels_yr.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"area\", \"barren\", \"bdticm\"])\n )\n\n\ndef test_download_canopex(canopex_path, ca_region):\n canopex = Camels(canopex_path, download=True, region=ca_region)\n assert os.path.isfile(\n os.path.join(\n canopex_path, \"CANOPEX_NRCAN_ASCII\", \"CANOPEX_NRCAN_ASCII\", \"1.dly\"\n )\n )\n\n\ndef test_read_canopex_data(canopex_path, ca_region):\n canopex = Camels(canopex_path, download=False, region=ca_region)\n gage_ids = canopex.read_object_ids()\n assert gage_ids.size == 611\n attrs = canopex.read_constant_cols(\n gage_ids[:5],\n var_lst=[\"Drainage_Area_km2\", \"Land_Use_Grass_frac\", \"Permeability_logk_m2\"],\n )\n np.testing.assert_almost_equal(\n attrs,\n np.array(\n [\n [3.28438700e02, 6.94000000e-02, -1.35251586e01],\n [3.43515600e02, 6.16000000e-02, -1.42367348e01],\n [1.45554950e03, 3.59000000e-02, -1.50071080e01],\n [5.64820300e02, 3.05000000e-02, -1.51002546e01],\n [1.05383090e03, 3.27000000e-02, -1.51999998e01],\n ]\n ),\n decimal=3,\n )\n forcings = canopex.read_relevant_cols(\n gage_ids[:5], [\"1990-01-01\", \"2010-01-01\"], var_lst=[\"prcp\", \"tmax\", \"tmin\"]\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 3]))\n flows = canopex.read_target_cols(\n gage_ids[:5], [\"1990-01-01\", \"2010-01-01\"], target_cols=[\"discharge\"]\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 1]))\n streamflow_types = canopex.get_target_cols()\n np.testing.assert_array_equal(streamflow_types, np.array([\"discharge\"]))\n focing_types = canopex.get_relevant_cols()\n np.testing.assert_array_equal(focing_types, np.array([\"prcp\", \"tmax\", \"tmin\"]))\n attr_types = canopex.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types[:3], np.array([\"Source\", \"Name\", \"Official_ID\"])\n )\n\n\ndef test_download_lamah_ce(lamah_ce_path, lamah_ce_region):\n lamah_ce = Camels(lamah_ce_path, download=True, region=lamah_ce_region)\n assert os.path.isfile(\n os.path.join(\n lamah_ce_path,\n \"2_LamaH-CE_daily\",\n \"A_basins_total_upstrm\",\n \"2_timeseries\",\n \"daily\",\n \"ID_882.csv\",\n )\n )\n\n\ndef test_read_lamah_ce(lamah_ce_path, lamah_ce_region):\n lamah_ce = Camels(lamah_ce_path, download=False, region=lamah_ce_region)\n gage_ids = lamah_ce.read_object_ids()\n assert gage_ids.size == 859\n attrs = lamah_ce.read_constant_cols(\n gage_ids[:5], var_lst=[\"area_calc\", \"elev_mean\", \"elev_med\"]\n )\n np.testing.assert_almost_equal(\n attrs,\n np.array(\n [\n [4668.379, 1875.0, 1963.0],\n [102.287, 1775.0, 1827.0],\n [536.299, 1844.0, 1916.0],\n [66.286, 1894.0, 1907.0],\n [72.448, 1774.0, 1796.0],\n ]\n ),\n decimal=3,\n )\n forcings = lamah_ce.read_relevant_cols(\n gage_ids[:5],\n [\"1990-01-01\", \"2010-01-01\"],\n var_lst=[\"2m_temp_max\", \"prec\", \"volsw_4\"],\n )\n np.testing.assert_array_equal(forcings.shape, np.array([5, 7305, 3]))\n flows = lamah_ce.read_target_cols(\n gage_ids[:5], [\"1990-01-01\", \"2010-01-01\"], target_cols=[\"qobs\"]\n )\n np.testing.assert_array_equal(flows.shape, np.array([5, 7305, 1]))\n streamflow_types = lamah_ce.get_target_cols()\n np.testing.assert_array_equal(streamflow_types, np.array([\"qobs\"]))\n focing_types = lamah_ce.get_relevant_cols()\n np.testing.assert_array_equal(\n focing_types,\n np.array(\n [\n \"2m_temp_max\",\n \"2m_temp_mean\",\n \"2m_temp_min\",\n \"2m_dp_temp_max\",\n \"2m_dp_temp_mean\",\n \"2m_dp_temp_min\",\n \"10m_wind_u\",\n \"10m_wind_v\",\n \"fcst_alb\",\n \"lai_high_veg\",\n \"lai_low_veg\",\n \"swe\",\n \"surf_net_solar_rad_max\",\n \"surf_net_solar_rad_mean\",\n \"surf_net_therm_rad_max\",\n \"surf_net_therm_rad_mean\",\n \"surf_press\",\n \"total_et\",\n \"prec\",\n \"volsw_123\",\n \"volsw_4\",\n ]\n ),\n )\n attr_types = lamah_ce.get_constant_cols()\n np.testing.assert_array_equal(\n attr_types,\n np.array(\n [\n \"area_calc\",\n \"elev_mean\",\n \"elev_med\",\n \"elev_std\",\n \"elev_ran\",\n \"slope_mean\",\n \"mvert_dist\",\n \"mvert_ang\",\n \"elon_ratio\",\n \"strm_dens\",\n \"p_mean\",\n \"et0_mean\",\n \"eta_mean\",\n \"arid_1\",\n \"arid_2\",\n \"p_season\",\n \"frac_snow\",\n \"hi_prec_fr\",\n \"hi_prec_du\",\n \"hi_prec_ti\",\n \"lo_prec_fr\",\n \"lo_prec_du\",\n \"lo_prec_ti\",\n \"lc_dom\",\n \"agr_fra\",\n \"bare_fra\",\n \"forest_fra\",\n \"glac_fra\",\n \"lake_fra\",\n \"urban_fra\",\n \"lai_max\",\n \"lai_diff\",\n \"ndvi_max\",\n \"ndvi_min\",\n \"gvf_max\",\n \"gvf_diff\",\n \"bedrk_dep\",\n \"root_dep\",\n \"soil_poros\",\n \"soil_condu\",\n \"soil_tawc\",\n \"sand_fra\",\n \"silt_fra\",\n \"clay_fra\",\n \"grav_fra\",\n \"oc_fra\",\n \"gc_dom\",\n \"gc_ig_fra\",\n \"gc_mt_fra\",\n \"gc_pa_fra\",\n \"gc_pb_fra\",\n \"gc_pi_fra\",\n \"gc_py_fra\",\n \"gc_sc_fra\",\n \"gc_sm_fra\",\n \"gc_ss_fra\",\n \"gc_su_fra\",\n \"gc_va_fra\",\n \"gc_vb_fra\",\n \"gc_wb_fra\",\n \"geol_perme\",\n \"geol_poros\",\n ]\n ),\n )\n\n\ndef test_ca_p_mean(canopex_path, ca_region):\n canopex = Camels(canopex_path, download=False, region=ca_region)\n gage_ids = canopex.read_object_ids()\n p_mean = canopex.read_mean_prep(gage_ids[:5])\n np.testing.assert_almost_equal(\n p_mean, np.array([2.91712073, 3.14145935, 3.12958083, 3.09248435, 3.04431583])\n )\n"
] | [
[
"numpy.array",
"numpy.sort"
]
] |
aris-mukherjee/TransUNet-modified | [
"185307b677fd6ee05604213c90e14e028fab476a"
] | [
"BIDMC_test.py"
] | [
"import argparse\nimport logging\nimport os\nimport random\nimport sys\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom datasets.dataset_synapse import Synapse_dataset\nfrom utils import test_single_volume\nfrom networks.vit_seg_modeling import VisionTransformer as ViT_seg\nfrom networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg\nimport config.system_paths as sys_config\nimport utils_data\nfrom networks.unet_class import UNET\nimport utils\nfrom sklearn.calibration import CalibrationDisplay\nimport matplotlib.pyplot as plt\nfrom calibration_functions import find_bin_values\nfrom calibration_functions import find_area\nfrom calibration_functions import plot_roc_curve\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import roc_auc_score\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--volume_path', type=str,\n default='/itet-stor/arismu/bmicdatasets-originals/Originals/Challenge_Datasets/Prostate_PROMISE12/TrainingData/', help='root dir for validation volume data') # for acdc volume_path=root_dir\nparser.add_argument('--test_dataset', type=str,\n default='BIDMC', help='experiment_name')\nparser.add_argument('--num_classes', type=int,\n default=3, help='output channel of network')\nparser.add_argument('--max_iterations', type=int,default=6800, help='maximum epoch number to train')\nparser.add_argument('--max_epochs', type=int, default=400, help='maximum epoch number to train')\nparser.add_argument('--batch_size', type=int, default=16,\n help='batch_size per gpu')\nparser.add_argument('--img_size', type=int, default=256, help='input patch size of network input')\n#parser.add_argument('--is_savenii', action=\"store_true\", help='whether to save results during inference')\nparser.add_argument('--is_savenii', type=bool, default=True, help='whether to save results during inference')\n\nparser.add_argument('--n_skip', type=int, default=3, help='using number of skip-connect, default is num')\nparser.add_argument('--vit_name', type=str, default='R50-ViT-B_16', help='select one vit model')\n\nparser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!')\nparser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')\nparser.add_argument('--base_lr', type=float, default=1e-3, help='segmentation network learning rate')\nparser.add_argument('--seed', type=int, default=1234, help='random seed')\nparser.add_argument('--vit_patches_size', type=int, default=16, help='vit_patches_size, default is 16')\nparser.add_argument('--target_resolution', type=float, default=0.625, help='target resolution') \nparser.add_argument('--image_depth_tr', type=int, default=32, help='target resolution') \nparser.add_argument('--image_depth_ts', type=int, default=32, help='target resolution') \nparser.add_argument('--test_cv_fold_num', type = int, default = 1) # 1 / 2 / 3 / 4\nparser.add_argument('--NORMALIZE', type = int, default = 1) # 1 / 0\nargs = parser.parse_args()\n\n\n\ndef inference(args, model, test_save_path=None):\n\n # ============================\n # Load test data\n # ============================ \n \n loaded_test_data = utils_data.load_testing_data(args.test_dataset, #needs to be adapted for different test set\n args.test_cv_fold_num,\n args.img_size,\n args.target_resolution,\n args.image_depth_ts)\n\n\n imts = loaded_test_data[0] #shape (194, 256, 256)\n gtts = loaded_test_data[1]\n orig_data_res_x = loaded_test_data[2]\n orig_data_res_y = loaded_test_data[3]\n orig_data_res_z = loaded_test_data[4]\n orig_data_siz_x = loaded_test_data[5]\n orig_data_siz_y = loaded_test_data[6]\n orig_data_siz_z = loaded_test_data[7]\n name_test_subjects = loaded_test_data[8]\n num_test_subjects = loaded_test_data[9]\n ids = loaded_test_data[10]\n\n \n\n model.eval()\n metric_list = 0.0\n pred_list = []\n label_list = []\n fpr = dict()\n tpr = dict()\n roc_auc = dict() \n\n \n for sub_num in range(num_test_subjects):\n\n\n # ============================\n # Group slices belonging to the same patients\n # ============================ \n\n subject_id_start_slice = np.sum(orig_data_siz_z[:sub_num]) #194 at the end of the loop\n subject_id_end_slice = np.sum(orig_data_siz_z[:sub_num+1]) #174 at the end of the loop\n image = imts[:,:, subject_id_start_slice:subject_id_end_slice] \n label = gtts[:,:, subject_id_start_slice:subject_id_end_slice] \n\n image = torch.from_numpy(image)\n label = torch.from_numpy(label)\n image, label = image.cuda(), label.cuda() \n image = image.permute(2, 0, 1)\n label = label.permute(2, 0, 1)\n\n image = torch.rot90(image, 1, [1, 2])\n label = torch.rot90(label, 1, [1, 2])\n\n # ==================================================================\n # setup logging\n # ==================================================================\n logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n subject_name = str(name_test_subjects[sub_num])[2:-1]\n logging.info('============================================================')\n logging.info('Subject ' + str(sub_num+1) + ' out of ' + str(num_test_subjects) + ': ' + subject_name)\n\n # ============================\n # Perform the prediction for each test patient individually & calculate dice score and Hausdorff distance\n # ============================ \n\n metric_i, pred_l, label_l = test_single_volume(image, label, model, classes=args.num_classes, dataset = 'BIDMC', optim = 'ADAM', model_type = 'UNWT', seed= '1234', patch_size=[args.img_size, args.img_size],\n test_save_path=test_save_path, case=sub_num, z_spacing=args.z_spacing)\n\n metric_list += np.array(metric_i)\n pred_list.extend(pred_l)\n label_list.extend(label_l)\n logging.info('case %s mean_dice %f mean_hd95 %f' % (sub_num, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))\n metric_list = metric_list / num_test_subjects #get mean metrics for every class\n\n # ============================\n # Log the mean performance achieved for each class\n # ============================ \n\n first_bin_frac_pos, second_bin_frac_pos, third_bin_frac_pos, fourth_bin_frac_pos, fifth_bin_frac_pos = find_bin_values(pred_list, label_list)\n find_area(first_bin_frac_pos, second_bin_frac_pos, third_bin_frac_pos, fourth_bin_frac_pos, fifth_bin_frac_pos)\n disp = CalibrationDisplay.from_predictions(label_list, pred_list)\n plt.show()\n plt.savefig(f'/scratch_net/biwidl217_second/arismu/Data_MT/plots/UNWT_BIDMC.png')\n\n fpr, tpr, _ = roc_curve(label_list, pred_list)\n roc_auc = auc(fpr, tpr)\n plot_roc_curve(fpr, tpr, roc_auc, 'ROC_UNWT_BIDMC')\n\n for i in range(0, args.num_classes):\n logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i][0], metric_list[i][1]))\n performance = np.mean(metric_list, axis=0)[0]\n mean_hd95 = np.mean(metric_list, axis=0)[1]\n logging.info('Testing performance in best val model: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))\n return \"Testing Finished!\"\n\n\nif __name__ == \"__main__\":\n\n if not args.deterministic:\n cudnn.benchmark = True\n cudnn.deterministic = False\n else:\n cudnn.benchmark = False\n cudnn.deterministic = True\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n dataset_config = {\n 'BIDMC': {\n 'volume_path': '/itet-stor/arismu/bmicdatasets-originals/Originals/Challenge_Datasets/Prostate_PROMISE12/TrainingData/',\n 'num_classes': 3,\n 'z_spacing': 1,\n },\n }\n dataset_name = args.test_dataset\n args.num_classes = dataset_config[dataset_name]['num_classes']\n args.volume_path = dataset_config[dataset_name]['volume_path']\n args.Dataset = dataset_name\n args.z_spacing = dataset_config[dataset_name]['z_spacing']\n args.is_pretrain = True\n\n # ============================\n # Same snapshot path as defined in the train script to access the trained model\n # ============================ \n\n args.exp = 'TU_RUNMC' + str(args.img_size) \n snapshot_path = \"../model/{}/{}\".format(args.exp, 'TU')\n snapshot_path = snapshot_path + '_pretrain' if args.is_pretrain else snapshot_path\n snapshot_path += '_' + args.vit_name\n snapshot_path = snapshot_path + '_skip' + str(args.n_skip)\n snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path\n snapshot_path = snapshot_path+'_'+str(args.max_iterations)[0:2]+'k' if args.max_iterations != 6800 else snapshot_path\n snapshot_path = snapshot_path + '_epo' +str(args.max_epochs) if args.max_epochs != 400 else snapshot_path\n if dataset_name == 'ACDC': # using max_epoch instead of iteration to control training duration\n snapshot_path = snapshot_path + '_' + str(args.max_iterations)[0:2] + 'k' if args.max_iterations != 30000 else snapshot_path\n snapshot_path = snapshot_path+'_bs'+str(args.batch_size)\n snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 1e-3 else snapshot_path\n snapshot_path = snapshot_path + '_'+str(args.img_size)\n snapshot_path = snapshot_path + '_s'+str(args.seed) if args.seed!=1234 else snapshot_path\n\n config_vit = CONFIGS_ViT_seg[args.vit_name]\n config_vit.n_classes = args.num_classes\n config_vit.n_skip = args.n_skip\n config_vit.patches.size = (args.vit_patches_size, args.vit_patches_size)\n if args.vit_name.find('R50') !=-1:\n config_vit.patches.grid = (int(args.img_size/args.vit_patches_size), int(args.img_size/args.vit_patches_size))\n net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes).cuda()\n #net = UNET(in_channels = 3, out_channels = 3, features = [32, 64, 128, 256]).cuda()\n\n snapshot = os.path.join('/scratch_net/biwidl217_second/arismu/Master_Thesis_Codes/project_TransUNet/model/2021/TU_3seeds/', 'REVISED_ADAM_best_val_loss_seed1234.pth')\n #f not os.path.exists(snapshot): snapshot = snapshot.replace('best_model', 'epoch_' + str(args.max_epochs-1))\n\n # ============================\n # Load the trained parameters into the model\n # ============================ \n\n net.load_state_dict(torch.load(snapshot))\n\n # ============================\n # Logging\n # ============================ \n\n snapshot_name = snapshot_path.split('/')[-1]\n log_folder = './test_log/test_log_' + 'TU_BIDMC256'\n os.makedirs(log_folder, exist_ok=True)\n logging.basicConfig(filename=log_folder + '/'+snapshot_name+\".txt\", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n logging.info(str(args))\n logging.info(snapshot_name)\n\n # ============================\n # Save the predictions as nii files\n # ============================ \n\n if args.is_savenii:\n args.test_save_dir = '../predictions_2022/'\n test_save_path = os.path.join(args.test_save_dir, 'BIDMC_UNWT_test_seed1234')\n os.makedirs(test_save_path, exist_ok=True)\n else:\n test_save_path = None\n inference(args, net, test_save_path)\n\nprint(\"test.py successfully executed\")\n\n"
] | [
[
"numpy.array",
"torch.cuda.manual_seed",
"numpy.random.seed",
"matplotlib.pyplot.savefig",
"numpy.sum",
"torch.rot90",
"numpy.mean",
"torch.from_numpy",
"torch.manual_seed",
"matplotlib.pyplot.show",
"sklearn.metrics.auc",
"torch.load",
"sklearn.calibration.CalibrationDisplay.from_predictions",
"sklearn.metrics.roc_curve"
]
] |
yuhaitao1994/Biendata_Molecule_Prediction_Challenge_3rd | [
"9f450f15045683d416bb02ad9633c95d9bc0908d"
] | [
"data_loader.py"
] | [
"# coding=utf-8\n\"\"\"\n加载原始数据,并将categorical features转化成NN模型1的one-hot编码,将numerical features标准化\n@author: yuhaitao\n\"\"\"\nimport pandas as pd\nimport os\nimport tqdm\nimport numpy as np\nimport seaborn as sns\nimport json\nimport datetime\nimport multiprocessing\nfrom sklearn.model_selection import KFold\n\n\ndef min_max_norm(x, feature_infos):\n # deep部分进行min-max归一化\n min_value = feature_infos[x.name]['min']\n max_value = feature_infos[x.name]['max']\n out = pd.Series(index=range(x.size))\n index = 0\n for one in x:\n if one == max_value:\n out[index] = 1.0\n elif one == min_value:\n out[index] = 0.0\n else:\n out[index] = (one - min_value) / (max_value - min_value)\n index += 1\n return out\n\n \ndef var_norm(x, feature_infos):\n # 方差归一化\n mean = feature_infos[x.name]['mean']\n std = feature_infos[x.name]['std']\n out = pd.Series(index=range(x.size))\n index = 0\n for one in x:\n out[index] = (one - mean) / std\n index += 1\n return out\n\n\nclass myDataLoader(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, data_path):\n \"\"\"\n 初始化\n \"\"\"\n self.data_path = data_path\n\n def dataset_for_boost(self, train_file, test_file, label_file):\n \"\"\"\n 加载数据集\n \"\"\"\n train_data = pd.read_csv(os.path.join(self.data_path, train_file))\n test_data = pd.read_csv(os.path.join(self.data_path, test_file))\n train_answer = pd.read_csv(os.path.join(self.data_path, label_file))\n\n train_data = train_data.merge(train_answer, on='id', how='left')\n\n # 处理缺失值,该数据集暂时没有\n\n # 去掉数值全部相同的特征\n singleValuesCnt = 0\n for i in train_data.columns:\n if len(train_data[i].unique()) == 1:\n train_data.drop([i], axis=1, inplace=True)\n test_data.drop([i], axis=1, inplace=True)\n singleValuesCnt += 1\n print(\"{}singleValues feathers are cleaned..\".format(singleValuesCnt))\n\n return train_data, test_data\n\n def normalize_to_json(self, train_data):\n \"\"\"\n 将训练集数据规范化后的均值,边界等指标存入json文件\n \"\"\"\n feature_infos = {}\n # 随机采样80%的数据进行统计,模拟5-fold\n train_data = train_data.sample(frac=0.8, replace=False, axis=0)\n # 分开处理deep部分与wide部分\n use_cols = [col for col in train_data.columns if col !=\n 'id' and 'p' not in col]\n print(f'Number of common used features: {len(use_cols)}')\n deep_cols, wide_cols = [], []\n for col in use_cols:\n if train_data[col].dtype == float:\n deep_cols.append(col)\n else:\n wide_cols.append(col)\n\n # 处理deep部分\n def numeric_status(x):\n return pd.Series([x.min(), x.mean(), x.max(), x.std(), ], index=['min', 'ave', 'max', 'std'])\n deep_norm_df = train_data[deep_cols].apply(numeric_status)\n\n for col in deep_cols:\n c_max = min(\n deep_norm_df[col][2], deep_norm_df[col][1] + deep_norm_df[col][3] * 3)\n c_min = max(\n deep_norm_df[col][0], deep_norm_df[col][1] - deep_norm_df[col][3] * 3)\n feature_infos[col] = {'min': c_min, 'max': c_max,\n 'mean': deep_norm_df[col][1], 'std': deep_norm_df[col][3]}\n\n # 处理wide部分\n def categorical_status(x):\n cat_dict = {}\n for one in x:\n if not pd.isnull(one):\n if int(one) in cat_dict:\n cat_dict[int(one)] += 1\n else:\n cat_dict[int(one)] = 1\n\n cat_list = [tup[0] for tup in sorted(\n cat_dict.items(), key=lambda x:x[1], reverse=True)[:min(len(cat_dict), 100)]]\n # if 0 not in cat_list:\n # cat_list.append(0)\n cat_list.sort()\n return pd.Series([cat_list], index=['list'])\n wide_norm_df = train_data[wide_cols].apply(categorical_status)\n\n for col in wide_cols:\n feature_infos[col] = {'list': wide_norm_df[col][0]}\n\n # 处理label(暂时没标准化)\n label_cols = ['p1', 'p2', 'p3', 'p4', 'p5', 'p6']\n label_norm_df = train_data[label_cols].apply(numeric_status)\n for col in label_cols:\n l_max = min(\n label_norm_df[col][2], label_norm_df[col][1] + label_norm_df[col][3] * 3)\n l_min = max(\n label_norm_df[col][0], label_norm_df[col][1] - label_norm_df[col][3] * 3)\n feature_infos[col] = {'min': l_min, 'max': l_max,\n 'mean': label_norm_df[col][1], 'std': label_norm_df[col][3]}\n\n with open('./feature_info.json', 'w') as f:\n f.write(json.dumps(feature_infos))\n\n\n def prepare_nn_data(self, data, mode='train', norm_mode='min_max'):\n \"\"\"\n 将数据规范化后存入csv文件\n \"\"\"\n with open('./feature_info.json', 'r') as f:\n feature_infos = json.load(f)\n\n nn_data_file = os.path.join(\n self.data_path, f'nn_{mode}_{norm_mode}.csv')\n\n use_cols, not_use_cols = [], []\n for col in data.columns:\n if col != 'id' and 'p' not in col:\n use_cols.append(col)\n else:\n not_use_cols.append(col)\n\n print(f'Number of common used features: {len(use_cols)}')\n print('*' * 120)\n # 划分wide 与 deep 不同部分的特征\n deep_cols, wide_cols = [], []\n for col in use_cols:\n if data[col].dtype == float:\n deep_cols.append(col)\n else:\n wide_cols.append(col)\n\n # wide部分进行one-hot编码,通过align方法保持所有编码的维度与feature info中存储的信息一致\n start_time = datetime.datetime.now()\n print(f'Number of wide features: {len(wide_cols)}')\n one_hot_list = []\n for col in wide_cols:\n for c in feature_infos[col]['list']:\n one_hot_list.append(f'w{col}_{c}')\n print(f'one hot dimension: {len(one_hot_list)}')\n wide_df = pd.DataFrame(columns=one_hot_list)\n # 生成当前数据集的one hot\n one_hot_df = pd.get_dummies(data[wide_cols].astype(\n str), prefix=['w' + col for col in wide_cols])\n print(f'current one hot dimension: {len(one_hot_df.columns)}')\n # 两个dataframe合并,以feature info中的维度为准\n _, wide_df = wide_df.align(one_hot_df, join='left', axis=1, fill_value=0)\n wide_df = wide_df.astype(np.float32)\n\n end_time = datetime.datetime.now()\n print(f'data processing cost time: {(end_time-start_time)}')\n print(f'wide part dimension: {wide_df.shape}')\n print('*' * 120)\n print(wide_df.columns)\n\n # deep部分\n if norm_mode == 'min_max':\n deep_df = data[deep_cols].apply(min_max_norm, args=(feature_infos,))\n else:\n deep_df = data[deep_cols].apply(var_norm, args=(feature_infos, ))\n print(f'Number of deep features: {len(deep_cols)}')\n print(f'deep part dimension: {deep_df.shape}')\n print('*' * 120)\n\n # 保存到文件\n out_df = pd.DataFrame()\n out_df = out_df.join(wide_df, how='right')\n out_df = out_df.join(deep_df, how='right')\n out_df = out_df.join(data[not_use_cols], how='right')\n out_df.to_csv(nn_data_file, index=False)\n\n\n def data_for_nn(self, train_file, test_file):\n \"\"\"\n 直接读取已经标准化与one hot编码好的csv文件\n \"\"\"\n train_data = pd.read_csv(os.path.join(self.data_path, train_file))\n test_data = pd.read_csv(os.path.join(self.data_path, test_file))\n return train_data, test_data\n\n\n\n\nif __name__ == '__main__':\n # 加载数据\n data_loader = myDataLoader('./data/molecule_open_data')\n train_data, test_data = data_loader.dataset_for_boost(\n train_file='candidate_train.csv', test_file='candidate_test_clean.csv', label_file='train_answer.csv')\n\n # data_loader.normalize_to_json(train_data)\n data_loader.prepare_nn_data(train_data, 'train', 'var') # 训练集\n # data_loader.prepare_nn_data(test_data, 'test', 'var') # test A\n data_loader.prepare_nn_data(test_data, 'new_test', 'var') # test B\n\n"
] | [
[
"pandas.isnull",
"pandas.DataFrame",
"pandas.Series"
]
] |
jacks808/tensorflow-chatbot | [
"f9a53891294989a07c9b1516f685883b9f07856c"
] | [
"utils/data_helper.py"
] | [
"import codecs\nimport logging\nimport os\nimport re\nfrom collections import Counter\n\nimport jieba\nimport numpy as np\n\n\ndef cut_file(hparams):\n \"\"\"\n cut a file from sentence to words\n :param hparams: hparams\n :return: None\n \"\"\"\n src_file_path = hparams.data_path\n target_file_path = src_file_path + hparams.cut_data_postfix\n stopwords_path = hparams.stopwords_path\n\n # load stopwords set\n stopword_set = set()\n with open(stopwords_path, 'r', encoding='utf-8') as stopwords:\n for stopword in stopwords:\n stopword_set.add(stopword.strip('\\n'))\n\n output = open(target_file_path, 'w', encoding='utf-8')\n with open(src_file_path, 'r', encoding='utf-8') as content:\n for texts_num, line in enumerate(content):\n line = line.strip('\\n')\n words = cut_sentence(hparams, line)\n for word in words:\n if word not in stopword_set:\n output.write(word.strip() + ' ')\n output.write('\\n')\n\n if (texts_num + 1) % 1000 == 0:\n logging.info(\"process %d line\" % (texts_num + 1))\n\n logging.info(\"Total cut %d line\" % (texts_num + 1))\n output.close()\n\n\ndef cut_sentence(hparams, sentence):\n \"\"\"\n cut word\n :param hparams:\n :param sentence:\n :return:\n \"\"\"\n jieba_dict_path = hparams.jieba_dict_path\n\n if jieba.get_dict_file().name != hparams.jieba_dict_path:\n jieba.set_dictionary(jieba_dict_path)\n\n words = jieba.lcut(sentence, cut_all=False)\n return words\n\n\ndef tokenizer(hparams, sentence):\n \"\"\"\n 切词工具, 后续替换成jieba分词\n # Example:\n pprint(tokenizer('Hello world?? \"sdfs%@#%'))\n :param sentence: 输入的句子\n :return: 词list\n \"\"\"\n if isinstance(sentence, bytes):\n sentence = sentence.decode(\"utf-8\")\n\n tokens = re.findall(r\"[\\w]+|[^\\s\\w]\", sentence)\n return tokens\n\n\ndef build_vocab(hparams, sentences, is_target=False, max_vocab_size=None):\n \"\"\"\n 生成词典\n\n # Example:\n pprint(build_vocab(all_input_sentences))\n print('\\n')\n pprint(build_vocab(all_target_sentences))\n\n :param hparams: hparams\n :param sentences: 句子(不需要切词)\n :param is_target: 是否为decoder使用\n :param max_vocab_size: 最大词典大小\n :return: 词典(使用词查id), 反查表(使用id查词), 词典大小\n \"\"\"\n # 获取counter\n word_counter = Counter()\n vocab = dict()\n reverse_vocab = dict()\n\n # 遍历sentences, 并进行切词和统计\n for sentence in sentences:\n tokens = tokenizer(hparams, sentence)\n word_counter.update(tokens)\n\n # 确定词典大小\n if max_vocab_size is None:\n max_vocab_size = len(word_counter)\n\n # 如果是解码的句子, 则补充开始符号: <s> 和 补全符号<pad>\n if is_target:\n vocab[hparams.SYMBOLS_START] = 0\n vocab[hparams.SYMBOLS_PAD] = 1\n vocab[hparams.SYMBOLS_UNKNOWN] = 2\n vocab_idx = 3\n for key, value in word_counter.most_common(max_vocab_size):\n vocab[key] = vocab_idx\n vocab_idx += 1\n else:\n vocab[hparams.SYMBOLS_PAD] = 0\n vocab[hparams.SYMBOLS_UNKNOWN] = 1\n vocab_idx = 2\n for key, value in word_counter.most_common(max_vocab_size):\n vocab[key] = vocab_idx\n vocab_idx += 1\n\n # 生成反查表\n for key, value in vocab.items():\n reverse_vocab[value] = key\n\n # 返回: 词典(使用词查id), 反查表(使用id查词), 词典大小\n return vocab, reverse_vocab, max_vocab_size\n\n\ndef init_data(hparams):\n \"\"\"\n init data\n :param hparams:\n :return: a data info dict, contains: enc_vocab, dec_vocab, enc_reverse_vocab, dec_reverse_vocab, input_batches, target_batches\n \"\"\"\n all_input_sentences, all_target_sentences = read_data_from_file(hparams)\n\n # encoder data\n enc_vocab, enc_reverse_vocab, enc_vocab_size = build_vocab(hparams, all_input_sentences)\n\n # decoder data\n dec_vocab, dec_reverse_vocab, dec_vocab_size = build_vocab(hparams, all_target_sentences,\n is_target=True)\n\n # update hparam\n hparams.enc_vocab_size = enc_vocab_size\n hparams.dec_vocab_size = dec_vocab_size\n\n # padding batch data\n batch_pad_size = len(all_input_sentences) % hparams.batch_size\n if batch_pad_size > 0:\n all_input_sentences.extend(all_input_sentences[:hparams.batch_size - batch_pad_size])\n all_target_sentences.extend(all_target_sentences[:hparams.batch_size - batch_pad_size])\n\n data_info = {\n 'enc_vocab': enc_vocab,\n 'dec_vocab': dec_vocab,\n 'enc_reverse_vocab': enc_reverse_vocab,\n 'dec_reverse_vocab': dec_reverse_vocab,\n 'input_batches': np.reshape(all_input_sentences, [-1, hparams.batch_size]),\n 'target_batches': np.reshape(all_target_sentences, [-1, hparams.batch_size]),\n }\n return data_info\n\n\ndef read_data_from_file(hparams):\n \"\"\"\n read data from file\n :param hparams: use hparams.train_data_path\n :return:\n \"\"\"\n encoder_data = []\n decoder_data = []\n\n # use cut file\n data_path = hparams.data_path + hparams.cut_data_postfix\n if not os.path.exists(data_path):\n raise Exception(\"cut file not exists, please run `python main.py --mode=cut_data` \")\n\n with codecs.open(data_path) as file:\n for line in file.readlines():\n try:\n question, answer = line.strip().split('|')\n question = question.strip()\n answer = answer.strip()\n except ValueError:\n raise Exception(\"read_data_from_file error while handle line : \", line,\n \"please fix your data and try again\")\n encoder_data.append(question)\n decoder_data.append(answer)\n\n return encoder_data, decoder_data\n"
] | [
[
"numpy.reshape"
]
] |
dmitryvinn/nevergrad | [
"4909e3f694bdb7dbef8d27d70a9ea1f70aca6892"
] | [
"nevergrad/optimization/test_callbacks.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\nfrom pathlib import Path\nimport logging\nimport os\nimport numpy as np\nimport nevergrad as ng\nimport nevergrad.common.typing as tp\nfrom . import optimizerlib\nfrom . import callbacks\n\n\n# pylint: disable=unused-argument\ndef _func(x: tp.Any, y: tp.Any, blublu: str, array: tp.Any, multiobjective: bool = False) -> tp.Loss:\n return 12 if not multiobjective else [12, 12]\n\n\ndef test_log_parameters(tmp_path: Path) -> None:\n filepath = tmp_path / \"logs.txt\"\n cases = [0, np.int_(1), np.float_(2.0), np.nan, float(\"inf\"), np.inf]\n instrum = ng.p.Instrumentation(\n ng.ops.mutations.Translation()(ng.p.Array(shape=(1,))),\n ng.p.Scalar(),\n blublu=ng.p.Choice(cases),\n array=ng.p.Array(shape=(3, 2)),\n )\n optimizer = optimizerlib.NoisyOnePlusOne(parametrization=instrum, budget=32)\n optimizer.register_callback(\"tell\", ng.callbacks.ParametersLogger(filepath, append=False))\n optimizer.minimize(_func, verbosity=2)\n # pickling\n logger = callbacks.ParametersLogger(filepath)\n logs = logger.load_flattened()\n assert len(logs) == 32\n assert isinstance(logs[-1][\"1\"], float)\n assert len(logs[-1]) == 32\n logs = logger.load_flattened(max_list_elements=2)\n assert len(logs[-1]) == 28\n # deletion\n logger = callbacks.ParametersLogger(filepath, append=False)\n assert not logger.load()\n\n\ndef test_multiobjective_log_parameters(tmp_path: Path) -> None:\n filepath = tmp_path / \"logs.txt\"\n instrum = ng.p.Instrumentation(\n None, 2.0, blublu=\"blublu\", array=ng.p.Array(shape=(3, 2)), multiobjective=True\n )\n optimizer = optimizerlib.OnePlusOne(parametrization=instrum, budget=2)\n optimizer.register_callback(\"tell\", ng.callbacks.ParametersLogger(filepath, append=False))\n optimizer.minimize(_func, verbosity=2)\n # pickling\n logger = callbacks.ParametersLogger(filepath)\n logs = logger.load_flattened()\n assert len(logs) == 2\n\n\ndef test_chaining_log_parameters(tmp_path: Path) -> None:\n filepath = tmp_path / \"logs.txt\"\n params = ng.p.Instrumentation(\n None, 2.0, blublu=\"blublu\", array=ng.p.Array(shape=(3, 2)), multiobjective=False\n )\n zmethods = [\"CauchyLHSSearch\", \"DE\", \"CMA\"]\n ztmp1 = [ng.optimizers.registry[zmet] for zmet in zmethods]\n optmodel = ng.families.Chaining(ztmp1, [50, 50]) #\n optim = optmodel(parametrization=params, budget=100, num_workers=3)\n logger = ng.callbacks.ParametersLogger(filepath)\n optim.register_callback(\"tell\", logger)\n optim.minimize(_func, verbosity=2)\n # read\n logger = callbacks.ParametersLogger(filepath)\n logs = logger.load_flattened()\n assert len(logs) == 100\n\n\ndef test_dump_callback(tmp_path: Path) -> None:\n filepath = tmp_path / \"pickle.pkl\"\n optimizer = optimizerlib.OnePlusOne(parametrization=2, budget=32)\n optimizer.register_callback(\"tell\", ng.callbacks.OptimizerDump(filepath))\n cand = optimizer.ask()\n assert not filepath.exists()\n optimizer.tell(cand, 0)\n assert filepath.exists()\n\n\ndef test_progressbar_dump(tmp_path: Path) -> None:\n filepath = tmp_path / \"pickle.pkl\"\n optimizer = optimizerlib.OnePlusOne(parametrization=2, budget=32)\n optimizer.register_callback(\"tell\", ng.callbacks.ProgressBar())\n for _ in range(8):\n cand = optimizer.ask()\n optimizer.tell(cand, 0)\n optimizer.dump(filepath)\n # should keep working after dump\n cand = optimizer.ask()\n optimizer.tell(cand, 0)\n # and be reloadable\n optimizer = optimizerlib.OnePlusOne.load(filepath)\n for _ in range(12):\n cand = optimizer.ask()\n optimizer.tell(cand, 0)\n\n\nclass _EarlyStoppingTestee:\n def __init__(self) -> None:\n self.num_calls = 0\n\n def __call__(self, *args, **kwds) -> float:\n self.num_calls += 1\n return np.random.rand()\n\n\ndef test_early_stopping() -> None:\n instrum = ng.p.Instrumentation(None, 2.0, blublu=\"blublu\", array=ng.p.Array(shape=(3, 2)))\n func = _EarlyStoppingTestee()\n optimizer = optimizerlib.OnePlusOne(parametrization=instrum, budget=100)\n early_stopping = ng.callbacks.EarlyStopping(lambda opt: opt.num_ask > 3)\n optimizer.register_callback(\"ask\", early_stopping)\n optimizer.register_callback(\"ask\", ng.callbacks.EarlyStopping.timer(100)) # should not get triggered\n optimizer.minimize(func, verbosity=2)\n # num_ask is set at the end of ask, so the callback sees the old value.\n assert func.num_calls == 4\n # below functions are included in the docstring of EarlyStopping\n assert optimizer.current_bests[\"minimum\"].mean < 12\n assert optimizer.recommend().loss < 12 # type: ignore\n\n\ndef test_duration_criterion() -> None:\n optim = optimizerlib.OnePlusOne(2, budget=100)\n crit = ng.callbacks._DurationCriterion(0.01)\n assert not crit(optim)\n assert not crit(optim)\n assert not crit(optim)\n time.sleep(0.01)\n assert crit(optim)\n\n\ndef test_optimization_logger(caplog) -> None:\n instrum = ng.p.Instrumentation(\n None, 2.0, blublu=\"blublu\", array=ng.p.Array(shape=(3, 2)), multiobjective=False\n )\n logging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n logger = logging.getLogger(__name__)\n optimizer = optimizerlib.OnePlusOne(parametrization=instrum, budget=3)\n optimizer.register_callback(\n \"tell\",\n callbacks.OptimizationLogger(\n logger=logger, log_level=logging.INFO, log_interval_tells=10, log_interval_seconds=0.1\n ),\n )\n with caplog.at_level(logging.INFO):\n optimizer.minimize(_func, verbosity=2)\n assert (\n \"After 0, recommendation is Instrumentation(Tuple(None,2.0),Dict(array=Array{(3,2)},blublu=blublu,multiobjective=False))\"\n in caplog.text\n )\n\n\ndef test_optimization_logger_MOO(caplog) -> None:\n instrum = ng.p.Instrumentation(\n None, 2.0, blublu=\"blublu\", array=ng.p.Array(shape=(3, 2)), multiobjective=True\n )\n logging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n logger = logging.getLogger(__name__)\n optimizer = optimizerlib.OnePlusOne(parametrization=instrum, budget=3)\n optimizer.register_callback(\n \"tell\",\n callbacks.OptimizationLogger(\n logger=logger, log_level=logging.INFO, log_interval_tells=10, log_interval_seconds=0.1\n ),\n )\n with caplog.at_level(logging.INFO):\n optimizer.minimize(_func, verbosity=2)\n assert (\n \"After 0, the respective minimum loss for each objective in the pareto front is [12. 12.]\"\n in caplog.text\n )\n"
] | [
[
"numpy.random.rand",
"numpy.float_",
"numpy.int_"
]
] |
kmarathe10/mmf | [
"2e4acaad7ca8eee4319e1205a560eed81733a0be"
] | [
"tests/modules/test_losses.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport collections\nimport unittest\nfrom unittest.mock import MagicMock\n\nimport torch\n\nimport mmf.modules.losses as losses\nfrom mmf.common.registry import registry\nfrom mmf.common.sample import SampleList\n\nRETURN_VALUE = torch.tensor(1.0)\n\n\ndef build_loss_side_effect(return_value=RETURN_VALUE):\n def loss_side_effect(item):\n loss_object_mock = MagicMock(return_value=return_value)\n loss_class_mock = MagicMock(return_value=loss_object_mock)\n valid_losses = [\"cross_entropy\", \"multi\"]\n if isinstance(item, collections.abc.MutableMapping):\n if item[\"type\"] not in valid_losses:\n return None\n elif item not in valid_losses:\n return None\n else:\n return loss_class_mock\n\n return loss_side_effect\n\n\nclass TestModuleLosses(unittest.TestCase):\n def setUp(self):\n torch.manual_seed(1234)\n\n def test_mmf_loss(self):\n get_loss_class_mock = MagicMock(side_effect=build_loss_side_effect())\n registry.get_loss_class = get_loss_class_mock\n # Test if MMFLoss accepts empty parameters\n self.assertRaises(ValueError, losses.MMFLoss)\n self.assertTrue(losses.MMFLoss({\"type\": \"cross_entropy\"}).name, \"cross_entropy\")\n self.assertTrue(losses.MMFLoss(\"cross_entropy\").name, \"cross_entropy\")\n self.assertRaises(AssertionError, losses.MMFLoss, [])\n # Multi requires dict\n self.assertRaises(AssertionError, losses.MMFLoss, \"multi\")\n\n cross_entropy = losses.MMFLoss(\"cross_entropy\")\n cross_entropy_from_dict = losses.MMFLoss({\"type\": \"cross_entropy\"})\n sample_list = SampleList()\n sample_list.dataset_type = \"val\"\n sample_list.dataset_name = \"vqa2\"\n\n output = cross_entropy(sample_list, {})\n output_from_dict = cross_entropy_from_dict(sample_list, {})\n\n self.assertEqual(output, {\"val/vqa2/cross_entropy\": torch.tensor(1.0)})\n self.assertEqual(output_from_dict, output)\n\n get_loss_class_mock.side_effect = build_loss_side_effect(1.0)\n output = cross_entropy(sample_list, {})\n\n self.assertEqual(output, {\"val/vqa2/cross_entropy\": torch.tensor(1.0)})\n self.assertEqual(output_from_dict, output)\n\n self.assertTrue(get_loss_class_mock.called)\n self.assertEqual(get_loss_class_mock.call_count, 5)\n\n def test_caption_cross_entropy(self):\n caption_ce_loss = losses.CaptionCrossEntropyLoss()\n\n expected = dict()\n predicted = dict()\n\n # Test complete match\n expected[\"targets\"] = torch.empty((1, 10), dtype=torch.long)\n expected[\"targets\"].fill_(4)\n predicted[\"scores\"] = torch.zeros((1, 10, 10))\n predicted[\"scores\"][:, :, 4] = 100.0\n\n self.assertEqual(caption_ce_loss(expected, predicted).item(), 0.0)\n\n # Test random initialized\n torch.manual_seed(1234)\n expected[\"targets\"] = torch.randint(0, 9491, (5, 10))\n predicted[\"scores\"] = torch.rand((5, 10, 9491))\n\n self.assertAlmostEqual(caption_ce_loss(expected, predicted).item(), 9.2507, 4)\n"
] | [
[
"torch.zeros",
"torch.rand",
"torch.manual_seed",
"torch.randint",
"torch.tensor",
"torch.empty"
]
] |
andyGFHill/fieldosophy | [
"8677048d56b382a45a80383fe8ff84d75a5f9760"
] | [
"examples/SPDE/2D/SPDEFEM2DSphereNonStat.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script demonstrates:\n * Creating an spherical Matérn FEM approximation model in 2 dimensions.\n * Generate samples from this model.\n * Compute covariances.\n\n\nThis file is part of Fieldosophy, a toolkit for random fields.\n\nCopyright (C) 2021 Anders Gunnar Felix Hildeman <[email protected]>\n\nThis Source Code is subject to the terms of the BSD 3-Clause License.\nIf a copy of the license was not distributed with this file, you can obtain one at https://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\n\n\n# Import package\nfrom fieldosophy.GRF import FEM\nfrom fieldosophy.GRF import GRF\nfrom fieldosophy import mesh as mesher\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib import cm\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy import stats\nfrom scipy import optimize\n\n\n \nprint(\"Running two-dimensional FEM Manifold test case\")\n\n\nplt.figure(1)\nplt.clf()\n#plt.figure(2)\n#plt.clf()\n\n\n# %% Create 2D mesh\n\n# define boundary [in degrees]\nlon = [-180.0, 180.0]\nlat = [-60.0, 60.0]\n\n# Define smallest correlation range [in degrees]\ncorrMin = 50.0\n# Define extension range [in degrees]\nextension = corrMin\n\n# Create data points\ndataGrid = np.meshgrid( \\\n np.linspace(lon[0], lon[1], num = int(np.ceil( np.diff(lon)[0]/extension*1.2 )) ), \\\n np.linspace(lat[0], lat[1], num = int(np.ceil( np.diff(lat)[0]/extension*1.2 )) ) \\\n )\ndataPoints = np.hstack( (dataGrid[0].reshape(-1,1), dataGrid[1].reshape(-1,1)) )\n\n# Mesh\nprint(\"Compute Mesh\")\nmeshPlane = None\n\n# Create spherical mesh\nmeshSphere = mesher.Mesh.meshOnSphere( maxDiam = 2 * np.sin( 2 * extension / 180.0 * np.pi / 2.0 ), maxNumNodes = int(1e4), radius = 1)\n\n\n# Cut away unwanted regions\nmeshSphere = meshSphere.cutOutsideMeshOnSphere( \\\n mesher.geometrical.lonlat2Sphere( dataPoints.transpose() ), \\\n distance = 1.1 * extension / 180.0 * np.pi )\n\n\n# Create refined sphere\nmeshSphere = meshSphere.refine( \\\n maxDiam = 1 * np.sin( extension / 180.0 * np.pi / 2.0 ), \\\n maxNumNodes = meshSphere.N + 10000, \\\n transformation = mesher.geometrical.mapToHypersphere )\n \n# Cut away unwanted regions\nmeshSphere = meshSphere.cutOutsideMeshOnSphere( \\\n mesher.geometrical.lonlat2Sphere( dataPoints.transpose() ), \\\n distance = 1.0 * extension / 180.0 * np.pi ) \n\n# Create refined sphere\nmeshSphere = meshSphere.refine( \\\n maxDiam = 2/5 * np.sin( corrMin / 180.0 * np.pi / 2.0 ), \\\n maxNumNodes = meshSphere.N + 10000, \\\n transformation = mesher.geometrical.mapToHypersphere )\n\n\n\nprint(\"Plot mesh\")\n\nfig = plt.figure(1)\nax = fig.add_subplot(221, projection='3d')\nax.cla()\nax.set_title( \"Mesh\" ) \n\n# Plot mesh\nmeshPlotter = mesher.MeshPlotter(meshSphere)\nedges = meshPlotter.getLines()\nax.plot(edges[0], edges[1], edges[2], color=\"blue\")\nedges = meshPlotter.getBoundaryLines()\nax.plot(edges[0], edges[1], edges[2], color=\"red\")\n\n\ntemp = mesher.geometrical.lonlat2Sphere(dataPoints.transpose())\nax.scatter( temp[0,:], temp[1,:], temp[2,:], color=\"red\" ) \n\n\n\n\n\n# %% Create FEM system\n\nprint(\"Set up FEM system\")\n\n# Define the random field\nnu = 2\nsigma = 1\nsigmaEps = 1e-3\n\n# Get mid points of triangles\ntriPoints = np.mean( meshSphere.nodes[ meshSphere.triangles, : ], axis=1 )\n# Set ranges in longitudal and latitudal directions\nr = np.array([1*corrMin, 3*corrMin]) / 180.0 * np.pi\nr = np.repeat( r.reshape((1,-1)), repeats = meshSphere.NT, axis=0)\n# Compute local basis of tangent spaces\nvectors = FEM.tangentVectorsOnSphere( triPoints, northPole = np.array([0.0,0.0,1.0]) )\n\ndef mapFEMParams( params ):\n # Function to map own parameters to FEM parameters\n \n # Compute kappa and H\n logGSqrt, GInv = FEM.orthVectorsToG( vectors, params[\"r\"]/np.sqrt(8*nu) )\n\n return (logGSqrt, GInv)\n\n\nBCDirichlet = np.NaN * np.ones((meshSphere.N))\nBCDirichlet[meshSphere.getBoundary()[\"nodes\"]] = 0\nBCDirichlet = None\nBCRobin = np.ones( (meshSphere.getBoundary()[\"edges\"].shape[0], 2) )\nBCRobin[:, 0] = 0 # Association with constant\nBCRobin[:, 1] = - 1 # Association with solution\n# BCRobin = None\n\n# Create FEM object\nfem = FEM.nonStatFEM( mesh = meshSphere, childParams = { \"r\":r, \"f\":mapFEMParams}, nu = nu, sigma = sigma, BCDirichlet = BCDirichlet, BCRobin = BCRobin )\n\n\n# temp = triPoints\n# ax.scatter( temp[:, 0], temp[:,1], temp[:,2], color=\"green\" ) \n\n\n\n\n\n# %% Sample\n\n# Acquire realizations\nprint(\"Generate realizations\")\n\nM = int(5e3)\n\nZ = fem.generateRandom( M )\n\n\nobsPoints = ( \\\n np.linspace(0.99*lon[0]+0.01*lon[1],0.01*lon[0]+0.99*lon[1], num=80), \\\n np.linspace(0.99*lat[0]+0.01*lat[1],0.01*lat[0]+0.99*lat[1], num=80) \\\n )\nobsPoints = np.meshgrid( obsPoints[0], obsPoints[1] )\nobsPoints3D = mesher.geometrical.lonlat2Sphere( np.stack( (obsPoints[0].flatten(), obsPoints[1].flatten()), axis=0 ) ) * 0.99\nobsPoints3D = np.ascontiguousarray( obsPoints3D.transpose() )\n\n# temp = obsPoints3D\n# ax.scatter( temp[:, 0], temp[:,1], temp[:,2], color=\"green\" ) \n\n# Get observation matrix\nprint(\"Acquire observation matrix\")\nobsMat = fem.mesh.getObsMat( obsPoints3D, embTol = 0.05, centersOfCurvature = np.zeros( (1,3) ) ) \n# obsMat = fem.mesh.getObsMat( triPoints, embTol = 20 )\n# obsMat = fem.mesh.getObsMat( meshSphere.nodes[meshSphere.triangles[:,0], :], embTol = 2/5 * np.sin( corrMin / 180.0 * np.pi / 2.0 ) / 10 )\nZObs = obsMat.tocsr() * Z + stats.norm.rvs( loc = 0, scale = sigmaEps, size = M*obsMat.shape[0] ).reshape((obsMat.shape[0], M))\n\n\n\n\n\n\n\n# %% Plot \n\n\nfig = plt.figure(1)\n# ax = fig.add_subplot(122, projection='3d')\nax = fig.add_subplot(222)\nax.cla()\nax.set_title( \"A realization\" ) \n\n# m = Basemap(projection='ortho',lat_0=0,lon_0=0) \nm = Basemap(projection='moll',lon_0=0,resolution='c')\nm.drawmapboundary(fill_color='aquamarine')\nm.drawcoastlines(linewidth=0.25)\nm.drawcountries(linewidth=0.25)\nm.fillcontinents(color='coral',lake_color='aqua')\nm.drawmeridians(np.arange(0,360,30)) # grid every 30 deg\nm.drawparallels(np.arange(-90,90,30))\n\nx,y = m(obsPoints[0], obsPoints[1])\n\nm.contourf( x, y, ZObs[:,0].reshape( obsPoints[0].shape ) )\n\n\n# lon, lat = mesher.geometrical.sphere2Lonlat( triPoints )\n# x,y = map( lon, lat )\n# ax.tricontourf(lon,lat, ZObs[:,0])\n\n# collec = ax.plot_trisurf( meshSphere.nodes[:,0], meshSphere.nodes[:,1], meshSphere.nodes[:,2], \\\n# triangles = meshSphere.triangles, cmap = cm.jet, shade = False )\n# collec.set_array( ZObs[:,0] )\n# collec.autoscale()\n\n\n\n\n\n\nprint(\"Plot covariances\")\n\n\nfig = plt.figure(1)\nax = fig.add_subplot(223)\nax.cla()\nax.set_title( \"Covariance\" )\n\n\n# Get point to compare covariance with\ncovPoint = np.array( [ [0,0] ] )\ncovPoint3D = mesher.geometrical.lonlat2Sphere( covPoint.transpose() ) * 0.95\ncovPoint3D = np.ascontiguousarray( covPoint3D.transpose() )\ncovObsMat = fem.mesh.getObsMat( covPoint3D, embTol = 0.05, centersOfCurvature = np.zeros( (1,3) ) )\n\n\n# Compute SPDE covariance\nruny = fem.multiplyWithCovariance(covObsMat.transpose())\nruny = obsMat.tocsr() * runy\n\n# m = Basemap(projection='ortho',lat_0=0,lon_0=0) \nm = Basemap(projection='moll',lon_0=0,resolution='c')\nm.drawmapboundary(fill_color='aquamarine')\nm.drawcoastlines(linewidth=0.25)\nm.drawcountries(linewidth=0.25)\nm.fillcontinents(color='coral',lake_color='aqua')\nm.drawmeridians(np.arange(0,360,30)) # grid every 30 deg\nm.drawparallels(np.arange(-90,90,30))\n\nm.contourf( x, y, runy.reshape( obsPoints[0].shape ) )\n\n\n\n\n\n\n\n\n\nfig = plt.figure(1)\nax = fig.add_subplot(224)\nax.cla()\nax.set_title( \"Conditional\" )\n\n\n# Get points to condition on\ncondPoints = np.array( [ [0,0], [12,57] ] )\ncondPoints3D = mesher.geometrical.lonlat2Sphere( condPoints.transpose() ) * 0.99\ncondPoints3D = np.ascontiguousarray( condPoints3D.transpose() )\ncondObsMat = fem.mesh.getObsMat( condPoints3D, embTol = 0.05, centersOfCurvature = np.zeros( (1,3) ) )\ncondVal = np.array( [1, -1] )\n\n# Compute conditional distribution\ncondDistr = fem.cond(condVal, condObsMat, sigmaEps)\n# Get conditional mean at observation points\ncondMean = obsMat.tocsr() * condDistr.mu\n\n# m = Basemap(projection='ortho',lat_0=0,lon_0=0) \nm = Basemap(projection='moll',lon_0=0,resolution='c')\nm.drawmapboundary(fill_color='aquamarine')\nm.drawcoastlines(linewidth=0.25)\nm.drawcountries(linewidth=0.25)\nm.fillcontinents(color='coral',lake_color='aqua')\nm.drawmeridians(np.arange(0,360,30)) # grid every 30 deg\nm.drawparallels(np.arange(-90,90,30))\n\nm.contourf( x, y, condMean.reshape( obsPoints[0].shape ) )\n\n\n\n\n# %% Plot marginal standard deviation\n\n\nfig = plt.figure(1)\nax = fig.add_subplot(224)\nax.cla()\nax.set_title( \"Marginal std\" )\n\n# m = Basemap(projection='ortho',lat_0=0,lon_0=0) \nm = Basemap(projection='moll',lon_0=0,resolution='c')\nm.drawmapboundary(fill_color='aquamarine')\nm.drawcoastlines(linewidth=0.25)\nm.drawcountries(linewidth=0.25)\nm.fillcontinents(color='coral',lake_color='aqua')\nm.drawmeridians(np.arange(0,360,30)) # grid every 30 deg\nm.drawparallels(np.arange(-90,90,30))\n\ntemp = m.contourf( x, y, np.std(ZObs,axis=1).reshape(obsPoints[0].shape) )\n\nfig.colorbar(temp, ax=ax, orientation='horizontal')\n\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.sin",
"scipy.stats.norm.rvs",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.diff",
"numpy.std",
"numpy.arange",
"numpy.sqrt",
"matplotlib.pyplot.clf",
"numpy.linspace",
"numpy.meshgrid"
]
] |
yshuqiao/steelDetect | [
"2424d3706d8d7e351094d2daa2869e70ed450836"
] | [
"mmdet/datasets/pipelines/transforms.py"
] | [
"import inspect\n\nimport mmcv\nimport numpy as np\nfrom numpy import random\n\nfrom mmdet.core import PolygonMasks\nfrom mmdet.core.evaluation.bbox_overlaps import bbox_overlaps\nfrom ..builder import PIPELINES\n\ntry:\n from imagecorruptions import corrupt\nexcept ImportError:\n corrupt = None\n\ntry:\n import albumentations\n from albumentations import Compose\nexcept ImportError:\n albumentations = None\n Compose = None\n\n\[email protected]_module()\nclass Resize(object):\n \"\"\"Resize images & bbox & mask.\n\n This transform resizes the input image to some scale. Bboxes and masks are\n then resized with the same scale factor. If the input dict contains the key\n \"scale\", then the scale in the input dict is used, otherwise the specified\n scale in the init method is used. If the input dict contains the key\n \"scale_factor\" (if MultiScaleFlipAug does not give img_scale but\n scale_factor), the actual scale will be computed by image shape and\n scale_factor.\n\n `img_scale` can either be a tuple (single-scale) or a list of tuple\n (multi-scale). There are 3 multiscale modes:\n\n - ``ratio_range is not None``: randomly sample a ratio from the ratio \\\n range and multiply it with the image scale.\n - ``ratio_range is None`` and ``multiscale_mode == \"range\"``: randomly \\\n sample a scale from the multiscale range.\n - ``ratio_range is None`` and ``multiscale_mode == \"value\"``: randomly \\\n sample a scale from multiple scales.\n\n Args:\n img_scale (tuple or list[tuple]): Images scales for resizing.\n multiscale_mode (str): Either \"range\" or \"value\".\n ratio_range (tuple[float]): (min_ratio, max_ratio)\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n override (bool, optional): Whether to override `scale` and\n `scale_factor` so as to call resize twice. Default False. If True,\n after the first resizing, the existed `scale` and `scale_factor`\n will be ignored so the second resizing can be allowed.\n This option is a work-around for multiple times of resize in DETR.\n Defaults to False.\n \"\"\"\n\n def __init__(self,\n img_scale=None,\n multiscale_mode='range',\n ratio_range=None,\n keep_ratio=True,\n bbox_clip_border=True,\n backend='cv2',\n override=False):\n if img_scale is None:\n self.img_scale = None\n else:\n if isinstance(img_scale, list):\n self.img_scale = img_scale\n else:\n self.img_scale = [img_scale]\n assert mmcv.is_list_of(self.img_scale, tuple)\n\n if ratio_range is not None:\n # mode 1: given a scale and a range of image ratio\n assert len(self.img_scale) == 1\n else:\n # mode 2: given multiple scales or a range of scales\n assert multiscale_mode in ['value', 'range']\n\n self.backend = backend\n self.multiscale_mode = multiscale_mode\n self.ratio_range = ratio_range\n self.keep_ratio = keep_ratio\n # TODO: refactor the override option in Resize\n self.override = override\n self.bbox_clip_border = bbox_clip_border\n\n @staticmethod\n def random_select(img_scales):\n \"\"\"Randomly select an img_scale from given candidates.\n\n Args:\n img_scales (list[tuple]): Images scales for selection.\n\n Returns:\n (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \\\n where ``img_scale`` is the selected image scale and \\\n ``scale_idx`` is the selected index in the given candidates.\n \"\"\"\n\n assert mmcv.is_list_of(img_scales, tuple)\n scale_idx = np.random.randint(len(img_scales))\n img_scale = img_scales[scale_idx]\n return img_scale, scale_idx\n\n @staticmethod\n def random_sample(img_scales):\n \"\"\"Randomly sample an img_scale when ``multiscale_mode=='range'``.\n\n Args:\n img_scales (list[tuple]): Images scale range for sampling.\n There must be two tuples in img_scales, which specify the lower\n and uper bound of image scales.\n\n Returns:\n (tuple, None): Returns a tuple ``(img_scale, None)``, where \\\n ``img_scale`` is sampled scale and None is just a placeholder \\\n to be consistent with :func:`random_select`.\n \"\"\"\n\n assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2\n img_scale_long = [max(s) for s in img_scales]\n img_scale_short = [min(s) for s in img_scales]\n long_edge = np.random.randint(\n min(img_scale_long),\n max(img_scale_long) + 1)\n short_edge = np.random.randint(\n min(img_scale_short),\n max(img_scale_short) + 1)\n img_scale = (long_edge, short_edge)\n return img_scale, None\n\n @staticmethod\n def random_sample_ratio(img_scale, ratio_range):\n \"\"\"Randomly sample an img_scale when ``ratio_range`` is specified.\n\n A ratio will be randomly sampled from the range specified by\n ``ratio_range``. Then it would be multiplied with ``img_scale`` to\n generate sampled scale.\n\n Args:\n img_scale (tuple): Images scale base to multiply with ratio.\n ratio_range (tuple[float]): The minimum and maximum ratio to scale\n the ``img_scale``.\n\n Returns:\n (tuple, None): Returns a tuple ``(scale, None)``, where \\\n ``scale`` is sampled ratio multiplied with ``img_scale`` and \\\n None is just a placeholder to be consistent with \\\n :func:`random_select`.\n \"\"\"\n\n assert isinstance(img_scale, tuple) and len(img_scale) == 2\n min_ratio, max_ratio = ratio_range\n assert min_ratio <= max_ratio\n ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio\n scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)\n return scale, None\n\n def _random_scale(self, results):\n \"\"\"Randomly sample an img_scale according to ``ratio_range`` and\n ``multiscale_mode``.\n\n If ``ratio_range`` is specified, a ratio will be sampled and be\n multiplied with ``img_scale``.\n If multiple scales are specified by ``img_scale``, a scale will be\n sampled according to ``multiscale_mode``.\n Otherwise, single scale will be used.\n\n Args:\n results (dict): Result dict from :obj:`dataset`.\n\n Returns:\n dict: Two new keys 'scale` and 'scale_idx` are added into \\\n ``results``, which would be used by subsequent pipelines.\n \"\"\"\n\n if self.ratio_range is not None:\n scale, scale_idx = self.random_sample_ratio(\n self.img_scale[0], self.ratio_range)\n elif len(self.img_scale) == 1:\n scale, scale_idx = self.img_scale[0], 0\n elif self.multiscale_mode == 'range':\n scale, scale_idx = self.random_sample(self.img_scale)\n elif self.multiscale_mode == 'value':\n scale, scale_idx = self.random_select(self.img_scale)\n else:\n raise NotImplementedError\n\n results['scale'] = scale\n results['scale_idx'] = scale_idx\n\n def _resize_img(self, results):\n \"\"\"Resize images with ``results['scale']``.\"\"\"\n for key in results.get('img_fields', ['img']):\n if self.keep_ratio:\n img, scale_factor = mmcv.imrescale(\n results[key],\n results['scale'],\n return_scale=True,\n backend=self.backend)\n # the w_scale and h_scale has minor difference\n # a real fix should be done in the mmcv.imrescale in the future\n new_h, new_w = img.shape[:2]\n h, w = results[key].shape[:2]\n w_scale = new_w / w\n h_scale = new_h / h\n else:\n img, w_scale, h_scale = mmcv.imresize(\n results[key],\n results['scale'],\n return_scale=True,\n backend=self.backend)\n results[key] = img\n\n scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],\n dtype=np.float32)\n results['img_shape'] = img.shape\n # in case that there is no padding\n results['pad_shape'] = img.shape\n results['scale_factor'] = scale_factor\n results['keep_ratio'] = self.keep_ratio\n\n def _resize_bboxes(self, results):\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes\n\n def _resize_masks(self, results):\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])\n\n def _resize_seg(self, results):\n \"\"\"Resize semantic segmentation map with ``results['scale']``.\"\"\"\n for key in results.get('seg_fields', []):\n if self.keep_ratio:\n gt_seg = mmcv.imrescale(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n else:\n gt_seg = mmcv.imresize(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n results['gt_semantic_seg'] = gt_seg\n\n def __call__(self, results):\n \"\"\"Call function to resize images, bounding boxes, masks, semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \\\n 'keep_ratio' keys are added into result dict.\n \"\"\"\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'multiscale_mode={self.multiscale_mode}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'keep_ratio={self.keep_ratio})'\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str\n\n\[email protected]_module()\nclass RandomFlip(object):\n \"\"\"Flip the image & bbox & mask.\n\n If the input dict contains the key \"flip\", then the flag will be used,\n otherwise it will be randomly decided by a ratio specified in the init\n method.\n\n When random flip is enabled, ``flip_ratio``/``direction`` can either be a\n float/string or tuple of float/string. There are 3 flip modes:\n\n - ``flip_ratio`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``flip_ratio`` .\n E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``flip_ratio`` is float, ``direction`` is list of string: the image wil\n be ``direction[i]``ly flipped with probability of\n ``flip_ratio/len(direction)``.\n E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``flip_ratio`` is list of float, ``direction`` is list of string:\n given ``len(flip_ratio) == len(direction)``, the image wil\n be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.\n E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with probability\n of 0.3, vertically with probability of 0.5\n\n Args:\n flip_ratio (float | list[float], optional): The flipping probability.\n Default: None.\n direction(str | list[str], optional): The flipping direction. Options\n are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.\n If input is a list, the length must equal ``flip_ratio``. Each\n element in ``flip_ratio`` indicates the flip probability of\n corresponding direction.\n \"\"\"\n\n def __init__(self, flip_ratio=None, direction='horizontal'):\n if isinstance(flip_ratio, list):\n assert mmcv.is_list_of(flip_ratio, float)\n assert 0 <= sum(flip_ratio) <= 1\n elif isinstance(flip_ratio, float):\n assert 0 <= flip_ratio <= 1\n elif flip_ratio is None:\n pass\n else:\n raise ValueError('flip_ratios must be None, float, '\n 'or list of float')\n self.flip_ratio = flip_ratio\n\n valid_directions = ['horizontal', 'vertical', 'diagonal']\n if isinstance(direction, str):\n assert direction in valid_directions\n elif isinstance(direction, list):\n assert mmcv.is_list_of(direction, str)\n assert set(direction).issubset(set(valid_directions))\n else:\n raise ValueError('direction must be either str or list of str')\n self.direction = direction\n\n if isinstance(flip_ratio, list):\n assert len(self.flip_ratio) == len(self.direction)\n\n def bbox_flip(self, bboxes, img_shape, direction):\n \"\"\"Flip bboxes horizontally.\n\n Args:\n bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)\n img_shape (tuple[int]): Image shape (height, width)\n direction (str): Flip direction. Options are 'horizontal',\n 'vertical'.\n\n Returns:\n numpy.ndarray: Flipped bounding boxes.\n \"\"\"\n\n assert bboxes.shape[-1] % 4 == 0\n flipped = bboxes.copy()\n if direction == 'horizontal':\n w = img_shape[1]\n flipped[..., 0::4] = w - bboxes[..., 2::4]\n flipped[..., 2::4] = w - bboxes[..., 0::4]\n elif direction == 'vertical':\n h = img_shape[0]\n flipped[..., 1::4] = h - bboxes[..., 3::4]\n flipped[..., 3::4] = h - bboxes[..., 1::4]\n elif direction == 'diagonal':\n w = img_shape[1]\n h = img_shape[0]\n flipped[..., 0::4] = w - bboxes[..., 2::4]\n flipped[..., 1::4] = h - bboxes[..., 3::4]\n flipped[..., 2::4] = w - bboxes[..., 0::4]\n flipped[..., 3::4] = h - bboxes[..., 1::4]\n else:\n raise ValueError(f\"Invalid flipping direction '{direction}'\")\n return flipped\n\n def __call__(self, results):\n \"\"\"Call function to flip bounding boxes, masks, semantic segmentation\n maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Flipped results, 'flip', 'flip_direction' keys are added \\\n into result dict.\n \"\"\"\n\n if 'flip' not in results:\n if isinstance(self.direction, list):\n # None means non-flip\n direction_list = self.direction + [None]\n else:\n # None means non-flip\n direction_list = [self.direction, None]\n\n if isinstance(self.flip_ratio, list):\n non_flip_ratio = 1 - sum(self.flip_ratio)\n flip_ratio_list = self.flip_ratio + [non_flip_ratio]\n else:\n non_flip_ratio = 1 - self.flip_ratio\n # exclude non-flip\n single_ratio = self.flip_ratio / (len(direction_list) - 1)\n flip_ratio_list = [single_ratio] * (len(direction_list) -\n 1) + [non_flip_ratio]\n\n cur_dir = np.random.choice(direction_list, p=flip_ratio_list)\n\n results['flip'] = cur_dir is not None\n if 'flip_direction' not in results:\n results['flip_direction'] = cur_dir\n if results['flip']:\n # flip image\n for key in results.get('img_fields', ['img']):\n results[key] = mmcv.imflip(\n results[key], direction=results['flip_direction'])\n # flip bboxes\n for key in results.get('bbox_fields', []):\n results[key] = self.bbox_flip(results[key],\n results['img_shape'],\n results['flip_direction'])\n # flip masks\n for key in results.get('mask_fields', []):\n results[key] = results[key].flip(results['flip_direction'])\n\n # flip segs\n for key in results.get('seg_fields', []):\n results[key] = mmcv.imflip(\n results[key], direction=results['flip_direction'])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'\n\n\[email protected]_module()\nclass Pad(object):\n \"\"\"Pad the image & mask.\n\n There are two padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number.\n Added keys are \"pad_shape\", \"pad_fixed_size\", \"pad_size_divisor\",\n\n Args:\n size (tuple, optional): Fixed padding size.\n size_divisor (int, optional): The divisor of padded size.\n pad_val (float, optional): Padding value, 0 by default.\n \"\"\"\n\n def __init__(self, size=None, size_divisor=None, pad_val=0):\n self.size = size\n self.size_divisor = size_divisor\n self.pad_val = pad_val\n # only one of size and size_divisor should be valid\n assert size is not None or size_divisor is not None\n assert size is None or size_divisor is None\n\n def _pad_img(self, results):\n \"\"\"Pad images according to ``self.size``.\"\"\"\n for key in results.get('img_fields', ['img']):\n if self.size is not None:\n padded_img = mmcv.impad(\n results[key], shape=self.size, pad_val=self.pad_val)\n elif self.size_divisor is not None:\n padded_img = mmcv.impad_to_multiple(\n results[key], self.size_divisor, pad_val=self.pad_val)\n results[key] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor\n\n def _pad_masks(self, results):\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n pad_shape = results['pad_shape'][:2]\n for key in results.get('mask_fields', []):\n results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)\n\n def _pad_seg(self, results):\n \"\"\"Pad semantic segmentation map according to\n ``results['pad_shape']``.\"\"\"\n for key in results.get('seg_fields', []):\n results[key] = mmcv.impad(\n results[key], shape=results['pad_shape'][:2])\n\n def __call__(self, results):\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_masks(results)\n self._pad_seg(results)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(size={self.size}, '\n repr_str += f'size_divisor={self.size_divisor}, '\n repr_str += f'pad_val={self.pad_val})'\n return repr_str\n\n\[email protected]_module()\nclass Normalize(object):\n \"\"\"Normalize the image.\n\n Added key is \"img_norm_cfg\".\n\n Args:\n mean (sequence): Mean values of 3 channels.\n std (sequence): Std values of 3 channels.\n to_rgb (bool): Whether to convert the image from BGR to RGB,\n default is true.\n \"\"\"\n\n def __init__(self, mean, std, to_rgb=True):\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n self.to_rgb = to_rgb\n\n def __call__(self, results):\n \"\"\"Call function to normalize images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Normalized results, 'img_norm_cfg' key is added into\n result dict.\n \"\"\"\n for key in results.get('img_fields', ['img']):\n results[key] = mmcv.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'\n return repr_str\n\n\[email protected]_module()\nclass RandomCrop(object):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute `crop_size` is sampled based on `crop_type` and `image_size`,\n then the cropped results are generated.\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n height and width.\n crop_type (str, optional): one of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])]. Default \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Default False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and\n `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and\n `gt_masks_ignore`.\n - If the crop does not contain any gt-bbox region and\n `allow_negative_crop` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size,\n crop_type='absolute',\n allow_negative_crop=False,\n bbox_clip_border=True):\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n # The key correspondence from bboxes to labels and masks.\n self.bbox2label = {\n 'gt_bboxes': 'gt_labels',\n 'gt_bboxes_ignore': 'gt_labels_ignore'\n }\n self.bbox2mask = {\n 'gt_bboxes': 'gt_masks',\n 'gt_bboxes_ignore': 'gt_masks_ignore'\n }\n\n def _crop_data(self, results, crop_size, allow_negative_crop):\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (tuple): Expected absolute size after cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area. Default to False.\n\n Returns:\n dict: Randomly cropped results, 'img_shape' key in result dict is\n updated according to crop size.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n for key in results.get('img_fields', ['img']):\n img = results[key]\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results[key] = img\n results['img_shape'] = img_shape\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n # e.g. gt_bboxes and gt_bboxes_ignore\n bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],\n dtype=np.float32)\n bboxes = results[key] - bbox_offset\n if self.bbox_clip_border:\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (key == 'gt_bboxes' and not valid_inds.any()\n and not allow_negative_crop):\n return None\n results[key] = bboxes[valid_inds, :]\n # label fields. e.g. gt_labels and gt_labels_ignore\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][valid_inds]\n\n # mask fields, e.g. gt_masks and gt_masks_ignore\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]\n\n return results\n\n def _get_crop_size(self, image_size):\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (tuple): (h, w).\n\n Returns:\n crop_size (tuple): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return (min(self.crop_size[0], h), min(self.crop_size[1], w))\n elif self.crop_type == 'absolute_range':\n assert self.crop_size[0] <= self.crop_size[1]\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_h, crop_w = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n elif self.crop_type == 'relative_range':\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n def __call__(self, results):\n \"\"\"Call function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Randomly cropped results, 'img_shape' key in result dict is\n updated according to crop size.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str\n\n\[email protected]_module()\nclass SegRescale(object):\n \"\"\"Rescale semantic segmentation maps.\n\n Args:\n scale_factor (float): The scale factor of the final output.\n backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n \"\"\"\n\n def __init__(self, scale_factor=1, backend='cv2'):\n self.scale_factor = scale_factor\n self.backend = backend\n\n def __call__(self, results):\n \"\"\"Call function to scale the semantic segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with semantic segmentation map scaled.\n \"\"\"\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = mmcv.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'\n\n\[email protected]_module()\nclass PhotoMetricDistortion(object):\n \"\"\"Apply photometric distortion to image sequentially, every transformation\n is applied with a probability of 0.5. The position of random contrast is in\n second or second to last.\n\n 1. random brightness\n 2. random contrast (mode 0)\n 3. convert color from BGR to HSV\n 4. random saturation\n 5. random hue\n 6. convert color from HSV to BGR\n 7. random contrast (mode 1)\n 8. randomly swap channels\n\n Args:\n brightness_delta (int): delta of brightness.\n contrast_range (tuple): range of contrast.\n saturation_range (tuple): range of saturation.\n hue_delta (int): delta of hue.\n \"\"\"\n\n def __init__(self,\n brightness_delta=32,\n contrast_range=(0.5, 1.5),\n saturation_range=(0.5, 1.5),\n hue_delta=18):\n self.brightness_delta = brightness_delta\n self.contrast_lower, self.contrast_upper = contrast_range\n self.saturation_lower, self.saturation_upper = saturation_range\n self.hue_delta = hue_delta\n\n def __call__(self, results):\n \"\"\"Call function to perform photometric distortion on images.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with images distorted.\n \"\"\"\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert img.dtype == np.float32, \\\n 'PhotoMetricDistortion needs the input image of dtype np.float32,'\\\n ' please set \"to_float32=True\" in \"LoadImageFromFile\" pipeline'\n # random brightness\n if random.randint(2):\n delta = random.uniform(-self.brightness_delta,\n self.brightness_delta)\n img += delta\n\n # mode == 0 --> do random contrast first\n # mode == 1 --> do random contrast last\n mode = random.randint(2)\n if mode == 1:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n img *= alpha\n\n # convert color from BGR to HSV\n img = mmcv.bgr2hsv(img)\n\n # random saturation\n if random.randint(2):\n img[..., 1] *= random.uniform(self.saturation_lower,\n self.saturation_upper)\n\n # random hue\n if random.randint(2):\n img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)\n img[..., 0][img[..., 0] > 360] -= 360\n img[..., 0][img[..., 0] < 0] += 360\n\n # convert color from HSV to BGR\n img = mmcv.hsv2bgr(img)\n\n # random contrast\n if mode == 0:\n if random.randint(2):\n alpha = random.uniform(self.contrast_lower,\n self.contrast_upper)\n img *= alpha\n\n # randomly swap channels\n if random.randint(2):\n img = img[..., random.permutation(3)]\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(\\nbrightness_delta={self.brightness_delta},\\n'\n repr_str += 'contrast_range='\n repr_str += f'{(self.contrast_lower, self.contrast_upper)},\\n'\n repr_str += 'saturation_range='\n repr_str += f'{(self.saturation_lower, self.saturation_upper)},\\n'\n repr_str += f'hue_delta={self.hue_delta})'\n return repr_str\n\n\[email protected]_module()\nclass Expand(object):\n \"\"\"Random expand the image & bboxes.\n\n Randomly place the original image on a canvas of 'ratio' x original image\n size filled with mean values. The ratio is in the range of ratio_range.\n\n Args:\n mean (tuple): mean value of dataset.\n to_rgb (bool): if need to convert the order of mean to align with RGB.\n ratio_range (tuple): range of expand ratio.\n prob (float): probability of applying this transformation\n \"\"\"\n\n def __init__(self,\n mean=(0, 0, 0),\n to_rgb=True,\n ratio_range=(1, 4),\n seg_ignore_label=None,\n prob=0.5):\n self.to_rgb = to_rgb\n self.ratio_range = ratio_range\n if to_rgb:\n self.mean = mean[::-1]\n else:\n self.mean = mean\n self.min_ratio, self.max_ratio = ratio_range\n self.seg_ignore_label = seg_ignore_label\n self.prob = prob\n\n def __call__(self, results):\n \"\"\"Call function to expand images, bounding boxes.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with images, bounding boxes expanded\n \"\"\"\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n\n h, w, c = img.shape\n ratio = random.uniform(self.min_ratio, self.max_ratio)\n # speedup expand when meets large image\n if np.all(self.mean == self.mean[0]):\n expand_img = np.empty((int(h * ratio), int(w * ratio), c),\n img.dtype)\n expand_img.fill(self.mean[0])\n else:\n expand_img = np.full((int(h * ratio), int(w * ratio), c),\n self.mean,\n dtype=img.dtype)\n left = int(random.uniform(0, w * ratio - w))\n top = int(random.uniform(0, h * ratio - h))\n expand_img[top:top + h, left:left + w] = img\n\n results['img'] = expand_img\n # expand bboxes\n for key in results.get('bbox_fields', []):\n results[key] = results[key] + np.tile(\n (left, top), 2).astype(results[key].dtype)\n\n # expand masks\n for key in results.get('mask_fields', []):\n results[key] = results[key].expand(\n int(h * ratio), int(w * ratio), top, left)\n\n # expand segs\n for key in results.get('seg_fields', []):\n gt_seg = results[key]\n expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),\n self.seg_ignore_label,\n dtype=gt_seg.dtype)\n expand_gt_seg[top:top + h, left:left + w] = gt_seg\n results[key] = expand_gt_seg\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'seg_ignore_label={self.seg_ignore_label})'\n return repr_str\n\n\[email protected]_module()\nclass MinIoURandomCrop(object):\n \"\"\"Random crop the image & bboxes, the cropped patches have minimum IoU\n requirement with original image & bboxes, the IoU threshold is randomly\n selected from min_ious.\n\n Args:\n min_ious (tuple): minimum IoU threshold for all intersections with\n bounding boxes\n min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,\n where a >= min_crop_size).\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n The keys for bboxes, labels and masks should be paired. That is, \\\n `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \\\n `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.\n \"\"\"\n\n def __init__(self,\n min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n min_crop_size=0.3,\n bbox_clip_border=True):\n # 1: return ori img\n self.min_ious = min_ious\n self.sample_mode = (1, *min_ious, 0)\n self.min_crop_size = min_crop_size\n self.bbox_clip_border = bbox_clip_border\n self.bbox2label = {\n 'gt_bboxes': 'gt_labels',\n 'gt_bboxes_ignore': 'gt_labels_ignore'\n }\n self.bbox2mask = {\n 'gt_bboxes': 'gt_masks',\n 'gt_bboxes_ignore': 'gt_masks_ignore'\n }\n\n def __call__(self, results):\n \"\"\"Call function to crop images and bounding boxes with minimum IoU\n constraint.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with images and bounding boxes cropped, \\\n 'img_shape' key is updated.\n \"\"\"\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(min_ious={self.min_ious}, '\n repr_str += f'min_crop_size={self.min_crop_size}), '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str\n\n\[email protected]_module()\nclass Corrupt(object):\n \"\"\"Corruption augmentation.\n\n Corruption transforms implemented based on\n `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.\n\n Args:\n corruption (str): Corruption name.\n severity (int, optional): The severity of corruption. Default: 1.\n \"\"\"\n\n def __init__(self, corruption, severity=1):\n self.corruption = corruption\n self.severity = severity\n\n def __call__(self, results):\n \"\"\"Call function to corrupt image.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Result dict with images corrupted.\n \"\"\"\n\n if corrupt is None:\n raise RuntimeError('imagecorruptions is not installed')\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n results['img'] = corrupt(\n results['img'].astype(np.uint8),\n corruption_name=self.corruption,\n severity=self.severity)\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(corruption={self.corruption}, '\n repr_str += f'severity={self.severity})'\n return repr_str\n\n\[email protected]_module()\nclass Albu(object):\n \"\"\"Albumentation augmentation.\n\n Adds custom transformations from Albumentations library.\n Please, visit `https://albumentations.readthedocs.io`\n to get more information.\n\n An example of ``transforms`` is as followed:\n\n .. code-block::\n\n [\n dict(\n type='ShiftScaleRotate',\n shift_limit=0.0625,\n scale_limit=0.0,\n rotate_limit=0,\n interpolation=1,\n p=0.5),\n dict(\n type='RandomBrightnessContrast',\n brightness_limit=[0.1, 0.3],\n contrast_limit=[0.1, 0.3],\n p=0.2),\n dict(type='ChannelShuffle', p=0.1),\n dict(\n type='OneOf',\n transforms=[\n dict(type='Blur', blur_limit=3, p=1.0),\n dict(type='MedianBlur', blur_limit=3, p=1.0)\n ],\n p=0.1),\n ]\n\n Args:\n transforms (list[dict]): A list of albu transformations\n bbox_params (dict): Bbox_params for albumentation `Compose`\n keymap (dict): Contains {'input key':'albumentation-style key'}\n skip_img_without_anno (bool): Whether to skip the image if no ann left\n after aug\n \"\"\"\n\n def __init__(self,\n transforms,\n bbox_params=None,\n keymap=None,\n update_pad_shape=False,\n skip_img_without_anno=False):\n if Compose is None:\n raise RuntimeError('albumentations is not installed')\n\n self.transforms = transforms\n self.filter_lost_elements = False\n self.update_pad_shape = update_pad_shape\n self.skip_img_without_anno = skip_img_without_anno\n\n # A simple workaround to remove masks without boxes\n if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params\n and 'filter_lost_elements' in bbox_params):\n self.filter_lost_elements = True\n self.origin_label_fields = bbox_params['label_fields']\n bbox_params['label_fields'] = ['idx_mapper']\n del bbox_params['filter_lost_elements']\n\n self.bbox_params = (\n self.albu_builder(bbox_params) if bbox_params else None)\n self.aug = Compose([self.albu_builder(t) for t in self.transforms],\n bbox_params=self.bbox_params)\n\n if not keymap:\n self.keymap_to_albu = {\n 'img': 'image',\n 'gt_masks': 'masks',\n 'gt_bboxes': 'bboxes'\n }\n else:\n self.keymap_to_albu = keymap\n self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}\n\n def albu_builder(self, cfg):\n \"\"\"Import a module from albumentations.\n\n It inherits some of :func:`build_from_cfg` logic.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n\n Returns:\n obj: The constructed object.\n \"\"\"\n\n assert isinstance(cfg, dict) and 'type' in cfg\n args = cfg.copy()\n\n obj_type = args.pop('type')\n if mmcv.is_str(obj_type):\n if albumentations is None:\n raise RuntimeError('albumentations is not installed')\n obj_cls = getattr(albumentations, obj_type)\n elif inspect.isclass(obj_type):\n obj_cls = obj_type\n else:\n raise TypeError(\n f'type must be a str or valid type, but got {type(obj_type)}')\n\n if 'transforms' in args:\n args['transforms'] = [\n self.albu_builder(transform)\n for transform in args['transforms']\n ]\n\n return obj_cls(**args)\n\n @staticmethod\n def mapper(d, keymap):\n \"\"\"Dictionary mapper. Renames keys according to keymap provided.\n\n Args:\n d (dict): old dict\n keymap (dict): {'old_key':'new_key'}\n Returns:\n dict: new dict.\n \"\"\"\n\n updated_dict = {}\n for k, v in zip(d.keys(), d.values()):\n new_k = keymap.get(k, k)\n updated_dict[new_k] = d[k]\n return updated_dict\n\n def __call__(self, results):\n # dict to albumentations format\n results = self.mapper(results, self.keymap_to_albu)\n # TODO: add bbox_fields\n if 'bboxes' in results:\n # to list of boxes\n if isinstance(results['bboxes'], np.ndarray):\n results['bboxes'] = [x for x in results['bboxes']]\n # add pseudo-field for filtration\n if self.filter_lost_elements:\n results['idx_mapper'] = np.arange(len(results['bboxes']))\n\n # TODO: Support mask structure in albu\n if 'masks' in results:\n if isinstance(results['masks'], PolygonMasks):\n raise NotImplementedError(\n 'Albu only supports BitMap masks now')\n ori_masks = results['masks']\n if albumentations.__version__ < '0.5':\n results['masks'] = results['masks'].masks\n else:\n results['masks'] = [mask for mask in results['masks'].masks]\n\n results = self.aug(**results)\n\n if 'bboxes' in results:\n if isinstance(results['bboxes'], list):\n results['bboxes'] = np.array(\n results['bboxes'], dtype=np.float32)\n results['bboxes'] = results['bboxes'].reshape(-1, 4)\n\n # filter label_fields\n if self.filter_lost_elements:\n\n for label in self.origin_label_fields:\n results[label] = np.array(\n [results[label][i] for i in results['idx_mapper']])\n if 'masks' in results:\n results['masks'] = np.array(\n [results['masks'][i] for i in results['idx_mapper']])\n results['masks'] = ori_masks.__class__(\n results['masks'], results['image'].shape[0],\n results['image'].shape[1])\n\n if (not len(results['idx_mapper'])\n and self.skip_img_without_anno):\n return None\n\n if 'gt_labels' in results:\n if isinstance(results['gt_labels'], list):\n results['gt_labels'] = np.array(results['gt_labels'])\n results['gt_labels'] = results['gt_labels'].astype(np.int64)\n\n # back to the original format\n results = self.mapper(results, self.keymap_back)\n\n # update final shape\n if self.update_pad_shape:\n results['pad_shape'] = results['img'].shape\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'\n return repr_str\n\n\[email protected]_module()\nclass RandomCenterCropPad(object):\n \"\"\"Random center crop and random around padding for CornerNet.\n\n This operation generates randomly cropped image from the original image and\n pads it simultaneously. Different from :class:`RandomCrop`, the output\n shape may not equal to ``crop_size`` strictly. We choose a random value\n from ``ratios`` and the output shape could be larger or smaller than\n ``crop_size``. The padding operation is also different from :class:`Pad`,\n here we use around padding instead of right-bottom padding.\n\n The relation between output image (padding image) and original image:\n\n .. code:: text\n\n output image\n\n +----------------------------+\n | padded area |\n +------|----------------------------|----------+\n | | cropped area | |\n | | +---------------+ | |\n | | | . center | | | original image\n | | | range | | |\n | | +---------------+ | |\n +------|----------------------------|----------+\n | padded area |\n +----------------------------+\n\n There are 5 main areas in the figure:\n\n - output image: output image of this operation, also called padding\n image in following instruction.\n - original image: input image of this operation.\n - padded area: non-intersect area of output image and original image.\n - cropped area: the overlap of output image and original image.\n - center range: a smaller area where random center chosen from.\n center range is computed by ``border`` and original image's shape\n to avoid our random center is too close to original image's border.\n\n Also this operation act differently in train and test mode, the summary\n pipeline is listed below.\n\n Train pipeline:\n\n 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image\n will be ``random_ratio * crop_size``.\n 2. Choose a ``random_center`` in center range.\n 3. Generate padding image with center matches the ``random_center``.\n 4. Initialize the padding image with pixel value equals to ``mean``.\n 5. Copy the cropped area to padding image.\n 6. Refine annotations.\n\n Test pipeline:\n\n 1. Compute output shape according to ``test_pad_mode``.\n 2. Generate padding image with center matches the original image\n center.\n 3. Initialize the padding image with pixel value equals to ``mean``.\n 4. Copy the ``cropped area`` to padding image.\n\n Args:\n crop_size (tuple | None): expected size after crop, final size will\n computed according to ratio. Requires (h, w) in train mode, and\n None in test mode.\n ratios (tuple): random select a ratio from tuple and crop image to\n (crop_size[0] * ratio) * (crop_size[1] * ratio).\n Only available in train mode.\n border (int): max distance from center select area to image border.\n Only available in train mode.\n mean (sequence): Mean values of 3 channels.\n std (sequence): Std values of 3 channels.\n to_rgb (bool): Whether to convert the image from BGR to RGB.\n test_mode (bool): whether involve random variables in transform.\n In train mode, crop_size is fixed, center coords and ratio is\n random selected from predefined lists. In test mode, crop_size\n is image's original shape, center coords and ratio is fixed.\n test_pad_mode (tuple): padding method and padding shape value, only\n available in test mode. Default is using 'logical_or' with\n 127 as padding shape value.\n\n - 'logical_or': final_shape = input_shape | padding_shape_value\n - 'size_divisor': final_shape = int(\n ceil(input_shape / padding_shape_value) * padding_shape_value)\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n \"\"\"\n\n def __init__(self,\n crop_size=None,\n ratios=(0.9, 1.0, 1.1),\n border=128,\n mean=None,\n std=None,\n to_rgb=None,\n test_mode=False,\n test_pad_mode=('logical_or', 127),\n bbox_clip_border=True):\n if test_mode:\n assert crop_size is None, 'crop_size must be None in test mode'\n assert ratios is None, 'ratios must be None in test mode'\n assert border is None, 'border must be None in test mode'\n assert isinstance(test_pad_mode, (list, tuple))\n assert test_pad_mode[0] in ['logical_or', 'size_divisor']\n else:\n assert isinstance(crop_size, (list, tuple))\n assert crop_size[0] > 0 and crop_size[1] > 0, (\n 'crop_size must > 0 in train mode')\n assert isinstance(ratios, (list, tuple))\n assert test_pad_mode is None, (\n 'test_pad_mode must be None in train mode')\n\n self.crop_size = crop_size\n self.ratios = ratios\n self.border = border\n # We do not set default value to mean, std and to_rgb because these\n # hyper-parameters are easy to forget but could affect the performance.\n # Please use the same setting as Normalize for performance assurance.\n assert mean is not None and std is not None and to_rgb is not None\n self.to_rgb = to_rgb\n self.input_mean = mean\n self.input_std = std\n if to_rgb:\n self.mean = mean[::-1]\n self.std = std[::-1]\n else:\n self.mean = mean\n self.std = std\n self.test_mode = test_mode\n self.test_pad_mode = test_pad_mode\n self.bbox_clip_border = bbox_clip_border\n\n def _get_border(self, border, size):\n \"\"\"Get final border for the target size.\n\n This function generates a ``final_border`` according to image's shape.\n The area between ``final_border`` and ``size - final_border`` is the\n ``center range``. We randomly choose center from the ``center range``\n to avoid our random center is too close to original image's border.\n Also ``center range`` should be larger than 0.\n\n Args:\n border (int): The initial border, default is 128.\n size (int): The width or height of original image.\n Returns:\n int: The final border.\n \"\"\"\n k = 2 * border / size\n i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))\n return border // i\n\n def _filter_boxes(self, patch, boxes):\n \"\"\"Check whether the center of each box is in the patch.\n\n Args:\n patch (list[int]): The cropped area, [left, top, right, bottom].\n boxes (numpy array, (N x 4)): Ground truth boxes.\n\n Returns:\n mask (numpy array, (N,)): Each box is inside or outside the patch.\n \"\"\"\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (\n center[:, 0] < patch[2]) * (\n center[:, 1] < patch[3])\n return mask\n\n def _crop_image_and_paste(self, image, center, size):\n \"\"\"Crop image with a given center and size, then paste the cropped\n image to a blank image with two centers align.\n\n This function is equivalent to generating a blank image with ``size``\n as its shape. Then cover it on the original image with two centers (\n the center of blank image and the random center of original image)\n aligned. The overlap area is paste from the original image and the\n outside area is filled with ``mean pixel``.\n\n Args:\n image (np array, H x W x C): Original image.\n center (list[int]): Target crop center coord.\n size (list[int]): Target crop size. [target_h, target_w]\n\n Returns:\n cropped_img (np array, target_h x target_w x C): Cropped image.\n border (np array, 4): The distance of four border of\n ``cropped_img`` to the original image area, [top, bottom,\n left, right]\n patch (list[int]): The cropped area, [left, top, right, bottom].\n \"\"\"\n center_y, center_x = center\n target_h, target_w = size\n img_h, img_w, img_c = image.shape\n\n x0 = max(0, center_x - target_w // 2)\n x1 = min(center_x + target_w // 2, img_w)\n y0 = max(0, center_y - target_h // 2)\n y1 = min(center_y + target_h // 2, img_h)\n patch = np.array((int(x0), int(y0), int(x1), int(y1)))\n\n left, right = center_x - x0, x1 - center_x\n top, bottom = center_y - y0, y1 - center_y\n\n cropped_center_y, cropped_center_x = target_h // 2, target_w // 2\n cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)\n for i in range(img_c):\n cropped_img[:, :, i] += self.mean[i]\n y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)\n x_slice = slice(cropped_center_x - left, cropped_center_x + right)\n cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]\n\n border = np.array([\n cropped_center_y - top, cropped_center_y + bottom,\n cropped_center_x - left, cropped_center_x + right\n ],\n dtype=np.float32)\n\n return cropped_img, border, patch\n\n def _train_aug(self, results):\n \"\"\"Random crop and around padding the original image.\n\n Args:\n results (dict): Image infomations in the augment pipeline.\n\n Returns:\n results (dict): The updated dict.\n \"\"\"\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) # 0:4:2表示下标从0到4,步长为2\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results\n\n def _test_aug(self, results):\n \"\"\"Around padding the original image without cropping.\n\n The padding mode and value are from ``test_pad_mode``.\n\n Args:\n results (dict): Image infomations in the augment pipeline.\n\n Returns:\n results (dict): The updated dict.\n \"\"\"\n img = results['img']\n h, w, c = img.shape\n results['img_shape'] = img.shape\n if self.test_pad_mode[0] in ['logical_or']:\n target_h = h | self.test_pad_mode[1]\n target_w = w | self.test_pad_mode[1]\n elif self.test_pad_mode[0] in ['size_divisor']:\n divisor = self.test_pad_mode[1]\n target_h = int(np.ceil(h / divisor)) * divisor\n target_w = int(np.ceil(w / divisor)) * divisor\n else:\n raise NotImplementedError(\n 'RandomCenterCropPad only support two testing pad mode:'\n 'logical-or and size_divisor.')\n\n cropped_img, border, _ = self._crop_image_and_paste(\n img, [h // 2, w // 2], [target_h, target_w])\n results['img'] = cropped_img\n results['pad_shape'] = cropped_img.shape\n results['border'] = border\n return results\n\n def __call__(self, results):\n img = results['img']\n assert img.dtype == np.float32, (\n 'RandomCenterCropPad needs the input image of dtype np.float32,'\n ' please set \"to_float32=True\" in \"LoadImageFromFile\" pipeline')\n h, w, c = img.shape\n assert c == len(self.mean)\n if self.test_mode:\n return self._test_aug(results)\n else:\n return self._train_aug(results)\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'ratios={self.ratios}, '\n repr_str += f'border={self.border}, '\n repr_str += f'mean={self.input_mean}, '\n repr_str += f'std={self.input_std}, '\n repr_str += f'to_rgb={self.to_rgb}, '\n repr_str += f'test_mode={self.test_mode}, '\n repr_str += f'test_pad_mode={self.test_pad_mode}), '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str\n\n\[email protected]_module()\nclass CutOut(object):\n \"\"\"CutOut operation.\n\n Randomly drop some regions of image used in\n `Cutout <https://arxiv.org/abs/1708.04552>`_.\n\n Args:\n n_holes (int | tuple[int, int]): Number of regions to be dropped.\n If it is given as a list, number of holes will be randomly\n selected from the closed interval [`n_holes[0]`, `n_holes[1]`].\n cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate\n shape of dropped regions. It can be `tuple[int, int]` to use a\n fixed cutout shape, or `list[tuple[int, int]]` to randomly choose\n shape from the list.\n cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The\n candidate ratio of dropped regions. It can be `tuple[float, float]`\n to use a fixed ratio or `list[tuple[float, float]]` to randomly\n choose ratio from the list. Please note that `cutout_shape`\n and `cutout_ratio` cannot be both given at the same time.\n fill_in (tuple[float, float, float] | tuple[int, int, int]): The value\n of pixel to fill in the dropped regions. Default: (0, 0, 0).\n \"\"\"\n\n def __init__(self,\n n_holes,\n cutout_shape=None,\n cutout_ratio=None,\n fill_in=(0, 0, 0)):\n\n assert (cutout_shape is None) ^ (cutout_ratio is None), \\\n 'Either cutout_shape or cutout_ratio should be specified.'\n assert (isinstance(cutout_shape, (list, tuple))\n or isinstance(cutout_ratio, (list, tuple)))\n if isinstance(n_holes, tuple):\n assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]\n else:\n n_holes = (n_holes, n_holes)\n self.n_holes = n_holes\n self.fill_in = fill_in\n self.with_ratio = cutout_ratio is not None\n self.candidates = cutout_ratio if self.with_ratio else cutout_shape\n if not isinstance(self.candidates, list):\n self.candidates = [self.candidates]\n\n def __call__(self, results):\n \"\"\"Call function to drop some regions of image.\"\"\"\n h, w, c = results['img'].shape\n n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)\n for _ in range(n_holes):\n x1 = np.random.randint(0, w)\n y1 = np.random.randint(0, h)\n index = np.random.randint(0, len(self.candidates))\n if not self.with_ratio:\n cutout_w, cutout_h = self.candidates[index]\n else:\n cutout_w = int(self.candidates[index][0] * w)\n cutout_h = int(self.candidates[index][1] * h)\n\n x2 = np.clip(x1 + cutout_w, 0, w)\n y2 = np.clip(y1 + cutout_h, 0, h)\n results['img'][y1:y2, x1:x2, :] = self.fill_in\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(n_holes={self.n_holes}, '\n repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio\n else f'cutout_shape={self.candidates}, ')\n repr_str += f'fill_in={self.fill_in})'\n return repr_str\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.ceil",
"numpy.random.choice",
"numpy.asarray",
"numpy.zeros",
"numpy.random.rand",
"numpy.random.permutation",
"numpy.tile",
"numpy.random.random_sample",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.clip",
"numpy.all"
]
] |
antonybholmes/libgsea | [
"f810f32370be51bec4d1b982098552b0f526b515"
] | [
"libgsea/extgsea.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 13 14:13:10 2018\n\n@author: antony\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport sys\nimport matplotlib\nfrom matplotlib.colors import Normalize\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport libplot\nimport matplotlib.gridspec as gridspec\n\n\n# http://arep.med.harvard.edu/N-Regulation/Tolonen2006/GSEA/index.html\n\n\nclass ExtGSEA(object):\n def __init__(self, ranked_gene_list, ranked_score, permutations=1000, w=1):\n self.__w = w\n self.__np = permutations\n \n l = len(ranked_gene_list)\n \n rk = np.concatenate((ranked_gene_list, ranked_gene_list))\n rsc = np.concatenate((ranked_score, -ranked_score), axis=0)\n ix = np.argsort(rsc)[::-1]\n \n print(np.sort(rsc)[::-1])\n \n pn = np.concatenate((np.ones(l), -np.ones(l)), axis=0)\n \n self.__rk = ranked_gene_list\n self.__rs = ranked_score\n \n self.__rkc = rk[ix]\n self.__rsc = rsc[ix]\n self.__pn = pn[ix]\n \n # Defaults if nothing found\n self.__es = -1\n self.__nes = -1\n self.__pv = -1\n self.__ledge = []\n self.__bg = {}\n \n self.__gsn1 = 'n1'\n self.__gsn2 = 'n2'\n \n self.__run = False\n \n def enrichment_score(self, gs1):\n l = len(self.__rk)\n \n hits = np.zeros(l)\n \n for i in range(0, l):\n if self.__rk[i] in gs1:\n hits[i] = 1\n\n # Compute ES\n \n if self.__w != 1:\n score_hit = np.cumsum(np.abs(self.__rs * hits) ** self.__w)\n else:\n score_hit = np.cumsum(np.abs(self.__rs * hits))\n \n score_hit = score_hit / score_hit[-1]\n score_miss = np.cumsum(1 - hits)\n score_miss = score_miss / score_miss[-1]\n \n es_all = score_hit - score_miss\n es = np.max(es_all) + np.min(es_all)\n \n isen = np.zeros(l)\n \n if es < 0:\n ixpk = np.where(es_all == np.min(es_all))[0][0]\n isen[ixpk:] = 1\n ledge = self.__rk[(isen == 1) & (hits == 1)]\n ledge = ledge[::-1]\n else:\n ixpk = np.where(es_all == np.max(es_all))[0][0]\n print(ixpk)\n isen[0:(ixpk + 1)] = 1\n ledge = self.__rk[(isen == 1) & (hits == 1)] \n \n return es, es_all, hits, ledge\n \n def ext_gsea(self, gs1, gs2, name1='Gene set 1', name2='Gene set 2'):\n self.__gs1 = gs1\n self.__gs2 = gs2\n self.__gsn1 = name1\n self.__gsn2 = name2\n \n l = len(self.__rk)\n \n self.__hits1 = np.zeros(l)\n self.__hits2 = np.zeros(l)\n \n for i in range(0, l):\n if self.__rk[i] in gs1:\n self.__hits1[i] = 1\n \n if self.__rk[i] in gs2:\n self.__hits2[i] = 1\n \n \n l = len(self.__rkc)\n \n self.__isgs = np.zeros(l)\n \n for i in range(0, l):\n if (self.__pn[i] > 0 and self.__rkc[i] in gs1) or (self.__pn[i] < 0 and self.__rkc[i] in gs2):\n self.__isgs[i] = 1\n \n \n \n # Compute ES\n \n if self.__w != 1:\n self.__score_hit = np.cumsum(np.abs(self.__rsc * self.__isgs) ** self.__w)\n else:\n self.__score_hit = np.cumsum(np.abs(self.__rsc * self.__isgs))\n \n self.__score_hit = self.__score_hit / self.__score_hit[-1]\n self.__score_miss = np.cumsum(1 - self.__isgs)\n self.__score_miss = self.__score_miss / self.__score_miss[-1]\n \n self.__es_all = self.__score_hit - self.__score_miss\n self.__es = np.max(self.__es_all) + np.min(self.__es_all)\n \n isen = np.zeros(l)\n \n if self.__es < 0:\n ixpk = np.where(self.__es_all == np.min(self.__es_all))[0][0]\n isen[ixpk:] = 1\n self.__ledge = self.__rkc[(isen == 1) & (self.__isgs == 1)]\n self.__ledge = self.__ledge[::-1]\n else:\n ixpk = np.where(self.__es_all == np.max(self.__es_all))[0][0]\n isen[0:(ixpk + 1)] = 1\n self.__ledge = self.__rkc[(isen == 1) & (self.__isgs == 1)]\n \n if self.__np > 0:\n self.__bg['es'] = np.zeros(self.__np)\n \n for i in range(0, self.__np):\n self.__bg['isgs'] = self.__isgs[np.random.permutation(l)]; \n \n if self.__w != 1:\n self.__bg['hit'] = np.cumsum((np.abs(self.__rsc * self.__bg['isgs'])) ** self.__w)\n else:\n self.__bg['hit'] = np.cumsum(np.abs(self.__rsc * self.__bg['isgs']))\n \n self.__bg['hit'] = self.__bg['hit'] / self.__bg['hit'][-1]\n self.__bg['miss'] = np.cumsum(1 - self.__bg['isgs']);\n self.__bg['miss'] = self.__bg['miss'] / self.__bg['miss'][-1]\n self.__bg['all'] = self.__bg['hit'] - self.__bg['miss'];\n self.__bg['es'][i] = max(self.__bg['all']) + min(self.__bg['all']);\n\n if self.__es < 0:\n self.__pv = np.sum(self.__bg['es'] <= self.__es) / self.__np\n self.__nes = self.__es / np.abs(np.mean(self.__bg['es'][self.__bg['es'] < 0]))\n else:\n self.__pv = np.sum(self.__bg['es'] >= self.__es) / self.__np\n self.__nes = self.__es / np.abs(np.mean(self.__bg['es'][self.__bg['es'] > 0]))\n else:\n self.__pv = -1\n self.__nes = -1\n \n self.__run = True\n \n return self.__es, self.__nes, self.__pv, self.__ledge\n \n @property\n def bg(self):\n return self.__bg\n \n @property\n def score_hit(self):\n return self.__score_hit\n \n @property\n def isgs(self):\n return self.__isgs\n \n @property\n def es(self):\n return self.__es\n \n @property\n def es_all(self):\n return self.__es_all\n \n @property\n def score_miss(self):\n return self.__score_miss\n \n def plot(self, title=None, out=None):\n \"\"\"\n Replot existing GSEA plot to make it better for publications\n \"\"\"\n \n if not self.__run:\n return\n \n libplot.setup()\n \n # output truetype\n #plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})\n # in most case, we will have mangy plots, so do not display plots\n # It's also convinient to run this script on command line.\n \n fig = libplot.new_base_fig(w=10, h=7)\n \n # GSEA Plots\n gs = gridspec.GridSpec(16, 1)\n \n \n \n x = np.array(list(range(0, len(self.__rk))))\n \n \n es1, es_all1, hits1, ledge1 = self.enrichment_score(self.__gs1)\n es2, es_all2, hits2, ledge2 = self.enrichment_score(self.__gs2)\n \n \n # Ranked Metric Scores Plot\n \n ix = list(range(0, len(x), 100))\n \n print(ix)\n \n x1 = x[ix]\n y1 = self.__rs[ix]\n \n print(hits1)\n \n ax1 = fig.add_subplot(gs[10:])\n ax1.fill_between(x1, y1=y1, y2=0, color='#2c5aa0')\n ax1.set_ylabel(\"Ranked list metric\", fontsize=14)\n \n ax1.text(.05, .9, self.__gsn1, color='black', horizontalalignment='left', verticalalignment='top',\n transform=ax1.transAxes)\n ax1.text(.95, .05, self.__gsn2, color='red', horizontalalignment='right', verticalalignment='bottom',\n transform=ax1.transAxes)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n ax1.set_xlim((0, len(x)))\n \n #\n # Hits\n #\n \n # gene hits\n ax2 = fig.add_subplot(gs[8:9], sharex=ax1)\n \n # the x coords of this transformation are data, and the y coord are axes\n trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)\n ax2.vlines(np.where(hits1 == 1)[0], 0, 1, linewidth=.5, transform=trans2, color ='black')\n libplot.invisible_axes(ax2)\n \n ax3 = fig.add_subplot(gs[9:10], sharex=ax1)\n \n # the x coords of this transformation are data, and the y coord are axes\n trans3 = transforms.blended_transform_factory(ax3.transData, ax3.transAxes)\n ax3.vlines(np.where(hits2 == 1)[0], 0, 1, linewidth=.5,transform=trans3, color ='red')\n libplot.invisible_axes(ax3)\n \n \n #\n # Enrichment score plot\n #\n \n ax4 = fig.add_subplot(gs[:8], sharex=ax1)\n \n # max es\n y2 = np.max(es_all1)\n x1 = np.where(es_all1 == y2)[0]\n print(x1, y2)\n ax4.vlines(x1, 0, y2, linewidth=.5, color='grey')\n \n y2 = np.min(es_all2)\n x1 = np.where(es_all2 == y2)[0]\n print(x1, y2)\n ax4.vlines(x1, 0, y2, linewidth=.5, color='grey')\n \n y1 = es_all1\n y2 = es_all2\n \n \n ax4.plot(x, y1, linewidth=3, color ='black')\n ax4.plot(x, y2, linewidth=3, color ='red')\n \n \n \n ax4.tick_params(axis='both', which='both', color='dimgray')\n #ax4.spines['left'].set_color('dimgray')\n ax4.spines['bottom'].set_visible(False) #set_color('dimgray')\n \n # the y coords of this transformation are data, and the x coord are axes\n trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)\n ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')\n \n \n \n ax4.set_ylabel(\"Enrichment score (ES)\", fontsize=14)\n ax4.set_xlim(min(x), max(x))\n ax4.spines['top'].set_visible(False)\n ax4.spines['right'].set_visible(False)\n ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')\n ax4.locator_params(axis='y', nbins=5)\n # FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.\n ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )\n \n \n \n if title is not None:\n fig.suptitle(title)\n \n \n\n fig.tight_layout(pad=2) #rect=[o, o, w, w])\n \n if out is not None:\n plt.savefig(out, dpi=600)\n"
] | [
[
"numpy.concatenate",
"numpy.max",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.sum",
"numpy.ones",
"numpy.random.permutation",
"numpy.min",
"numpy.mean",
"numpy.where",
"numpy.sort",
"numpy.argsort",
"matplotlib.transforms.blended_transform_factory",
"numpy.cumsum",
"numpy.abs",
"matplotlib.gridspec.GridSpec"
]
] |
vigneshbabupj/Project_Vision | [
"dd4eefe99da0a5b4283fcc5b70a95e7657f1e450"
] | [
"plane_decoder/modules.py"
] | [
"\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license\n(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nimport time\nimport numpy as np\nfrom torch import nn\nimport sys\n\ndef unmoldDetections(config, camera, detections, detection_masks, depth_np, unmold_masks=True, debug=False):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)]\n mrcnn_mask: [N, height, width, num_classes]\n image_shape: [height, width, depth] Original size of the image before resizing\n window: [y1, x1, y2, x2] Box in the image where the real image is\n excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n if config.GLOBAL_MASK:\n masks = detection_masks[torch.arange(len(detection_masks)).cuda().long(), 0, :, :]\n else:\n masks = detection_masks[torch.arange(len(detection_masks)).cuda().long(), detections[:, 4].long(), :, :]\n pass\n\n final_masks = []\n for detectionIndex in range(len(detections)):\n box = detections[detectionIndex][:4].long()\n if (box[2] - box[0]) * (box[3] - box[1]) <= 0:\n continue\n \n mask = masks[detectionIndex]\n mask = mask.unsqueeze(0).unsqueeze(0)\n mask = F.interpolate(mask, size=(box[2] - box[0], box[3] - box[1]), mode='bilinear',align_corners=False)\n mask = mask.squeeze(0).squeeze(0)\n\n final_mask = torch.zeros(config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM).cuda()\n final_mask[box[0]:box[2], box[1]:box[3]] = mask\n final_masks.append(final_mask)\n continue\n final_masks = torch.stack(final_masks, dim=0)\n \n if config.NUM_PARAMETER_CHANNELS > 0:\n ## We could potentially predict depth and/or normals for each instance (not being used)\n parameters_array = detection_masks[torch.arange(len(detection_masks)).cuda().long(), -config.NUM_PARAMETER_CHANNELS:, :, :]\n final_parameters_array = []\n for detectionIndex in range(len(detections)):\n box = detections[detectionIndex][:4].long()\n if (box[2] - box[0]) * (box[3] - box[1]) <= 0:\n continue\n parameters = F.interpolate(parameters_array[detectionIndex].unsqueeze(0), size=(box[2] - box[0], box[3] - box[1]), mode='bilinear').squeeze(0)\n final_parameters = torch.zeros(config.NUM_PARAMETER_CHANNELS, config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM).cuda()\n final_parameters[:, box[0]:box[2], box[1]:box[3]] = parameters\n final_parameters_array.append(final_parameters)\n continue\n final_parameters = torch.stack(final_parameters_array, dim=0) \n final_masks = torch.cat([final_masks.unsqueeze(1), final_parameters], dim=1)\n pass\n\n masks = final_masks\n\n if 'normal' in config.ANCHOR_TYPE:\n ## Compute offset based normal prediction and depthmap prediction\n ranges = config.getRanges(camera).transpose(1, 2).transpose(0, 1)\n zeros = torch.zeros(3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM).cuda() \n ranges = torch.cat([zeros, ranges, zeros], dim=1)\n \n if config.NUM_PARAMETER_CHANNELS == 4:\n ## If we predict depthmap and normal map for each instance, we compute normals again (not used)\n masks_cropped = masks[:, 0:1, 80:560]\n mask_sum = masks_cropped.sum(-1).sum(-1)\n plane_normals = (masks[:, 2:5, 80:560] * masks_cropped).sum(-1).sum(-1) / mask_sum\n plane_normals = plane_normals / torch.clamp(torch.norm(plane_normals, dim=-1, keepdim=True), min=1e-4)\n XYZ_np_cropped = (ranges * masks[:, 1:2])[:, :, 80:560]\n offsets = ((plane_normals.view(-1, 3, 1, 1) * XYZ_np_cropped).sum(1, keepdim=True) * masks_cropped).sum(-1).sum(-1) / mask_sum\n plane_parameters = plane_normals * offsets.view((-1, 1))\n masks = masks[:, 0] \n else:\n if config.NUM_PARAMETER_CHANNELS > 0:\n ## If we predict depthmap independently for each instance, we use the individual depthmap instead of the global depth map (not used) \n if config.OCCLUSION:\n XYZ_np = ranges * depth_np \n XYZ_np_cropped = XYZ_np[:, 80:560]\n masks_cropped = masks[:, 1, 80:560] \n masks = masks[:, 0]\n else:\n XYZ_np_cropped = (ranges * masks[:, 1:2])[:, :, 80:560]\n masks = masks[:, 0]\n masks_cropped = masks[:, 80:560]\n pass\n else:\n ## We use the global depthmap prediction to compute plane offsets\n XYZ_np = ranges * depth_np \n XYZ_np_cropped = XYZ_np[:, 80:560]\n masks_cropped = masks[:, 80:560] \n pass\n\n if config.FITTING_TYPE % 2 == 1:\n ## We fit all plane parameters using depthmap prediction (not used)\n A = masks_cropped.unsqueeze(1) * XYZ_np_cropped\n b = masks_cropped\n Ab = (A * b.unsqueeze(1)).sum(-1).sum(-1)\n AA = (A.unsqueeze(2) * A.unsqueeze(1)).sum(-1).sum(-1)\n plane_parameters = torch.stack([torch.matmul(torch.inverse(AA[planeIndex]), Ab[planeIndex]) for planeIndex in range(len(AA))], dim=0)\n plane_offsets = torch.norm(plane_parameters, dim=-1, keepdim=True)\n plane_parameters = plane_parameters / torch.clamp(torch.pow(plane_offsets, 2), 1e-4) \n else:\n ## We compute only plane offset using depthmap prediction \n plane_parameters = detections[:, 6:9] \n plane_normals = plane_parameters / torch.clamp(torch.norm(plane_parameters, dim=-1, keepdim=True), 1e-4)\n offsets = ((plane_normals.view(-1, 3, 1, 1) * XYZ_np_cropped).sum(1) * masks_cropped).sum(-1).sum(-1) / torch.clamp(masks_cropped.sum(-1).sum(-1), min=1e-4)\n plane_parameters = plane_normals * offsets.view((-1, 1))\n pass\n pass\n detections = torch.cat([detections[:, :6], plane_parameters], dim=-1)\n pass\n return detections, masks\n\ndef planeXYZModule(ranges, planes, width, height, max_depth=10):\n \"\"\"Compute plane XYZ from plane parameters\n ranges: K^(-1)x\n planes: plane parameters\n \n Returns:\n plane depthmaps\n \"\"\"\n planeOffsets = torch.norm(planes, dim=-1, keepdim=True)\n planeNormals = planes / torch.clamp(planeOffsets, min=1e-4)\n\n normalXYZ = torch.matmul(ranges, planeNormals.transpose(0, 1))\n normalXYZ[normalXYZ == 0] = 1e-4\n planeDepths = planeOffsets.squeeze(-1) / normalXYZ\n planeDepths = torch.clamp(planeDepths, min=0, max=max_depth)\n return planeDepths.unsqueeze(-1) * ranges.unsqueeze(2)\n\ndef planeDepthsModule(ranges, planes, width, height, max_depth=10):\n \"\"\"Compute coordinate maps from plane parameters\n ranges: K^(-1)x\n planes: plane parameters\n \n Returns:\n plane coordinate maps\n \"\"\"\n planeOffsets = torch.norm(planes, dim=-1, keepdim=True)\n planeNormals = planes / torch.clamp(planeOffsets, min=1e-4)\n\n normalXYZ = torch.matmul(ranges, planeNormals.transpose(0, 1))\n normalXYZ[normalXYZ == 0] = 1e-4\n planeDepths = planeOffsets.squeeze(-1) / normalXYZ\n if max_depth > 0:\n planeDepths = torch.clamp(planeDepths, min=0, max=max_depth)\n pass\n return planeDepths\n\ndef warpModuleDepth(config, camera, depth_1, features_2, extrinsics_1, extrinsics_2, width, height):\n \"\"\"Warp one feature map to another view given camera pose and depth\"\"\"\n padding = (width - height) // 2\n XYZ_1 = config.getRanges(camera) * depth_1[padding:-padding].unsqueeze(-1)\n warped_features, valid_mask = warpModuleXYZ(config, camera, XYZ_1.unsqueeze(2), features_2, extrinsics_1, extrinsics_2, width, height)\n return warped_features.squeeze(0), valid_mask\n\ndef warpModuleXYZ(config, camera, XYZ_1, features_2, extrinsics_1, extrinsics_2, width, height):\n \"\"\"Warp one feature map to another view given camera pose and XYZ\"\"\"\n XYZ_shape = XYZ_1.shape\n numPlanes = int(XYZ_1.shape[2])\n\n XYZ_1 = XYZ_1.view((-1, 3))\n XYZ_2 = torch.matmul(torch.matmul(torch.cat([XYZ_1, torch.ones((len(XYZ_1), 1)).cuda()], dim=-1), extrinsics_1.inverse().transpose(0, 1)), extrinsics_2.transpose(0, 1))\n validMask = XYZ_2[:, 1] > 1e-4\n U = (XYZ_2[:, 0] / torch.clamp(XYZ_2[:, 1], min=1e-4) * camera[0] + camera[2]) / camera[4] * 2 - 1\n V = (-XYZ_2[:, 2] / torch.clamp(XYZ_2[:, 1], min=1e-4) * camera[1] + camera[3]) / camera[5] * 2 - 1\n\n padding = (width - height) // 2\n grids = torch.stack([U, V], dim=-1)\n\n validMask = (validMask) & (U >= -1) & (U <= 1) & (V >= -1) & (V <= 1)\n warped_features = F.grid_sample(features_2[:, :, padding:-padding], grids.unsqueeze(1).unsqueeze(0))\n numFeatureChannels = int(features_2.shape[1])\n warped_features = warped_features.view((numFeatureChannels, height, width, numPlanes)).transpose(2, 3).transpose(1, 2).transpose(0, 1).contiguous().view((-1, int(features_2.shape[1]), height, width))\n zeros = torch.zeros((numPlanes, numFeatureChannels, (width - height) // 2, width)).cuda()\n warped_features = torch.cat([zeros, warped_features, zeros], dim=2)\n validMask = validMask.view((numPlanes, height, width))\n validMask = torch.cat([zeros[:, 1], validMask.float(), zeros[:, 1]], dim=1)\n return warped_features, validMask\n\n\ndef calcXYZModule(config, camera, detections, masks, depth_np, return_individual=False, debug_type=0):\n \"\"\"Compute a global coordinate map from plane detections\"\"\"\n ranges = config.getRanges(camera)\n ranges_ori = ranges\n zeros = torch.zeros(3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM).cuda() \n ranges = torch.cat([zeros, ranges.transpose(1, 2).transpose(0, 1), zeros], dim=1)\n XYZ_np = ranges * depth_np\n\n if len(detections) == 0:\n detection_mask = torch.zeros((config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM)).cuda()\n if return_individual:\n return XYZ_np, detection_mask, []\n else:\n return XYZ_np, detection_mask\n pass\n \n plane_parameters = detections[:, 6:9]\n \n XYZ = torch.ones((3, config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM)).cuda() * 10\n depthMask = torch.zeros((config.IMAGE_MAX_DIM, config.IMAGE_MAX_DIM)).cuda()\n planeXYZ = planeXYZModule(ranges_ori, plane_parameters, width=config.IMAGE_MAX_DIM, height=config.IMAGE_MIN_DIM)\n planeXYZ = planeXYZ.transpose(2, 3).transpose(1, 2).transpose(0, 1)\n zeros = torch.zeros(3, (config.IMAGE_MAX_DIM - config.IMAGE_MIN_DIM) // 2, config.IMAGE_MAX_DIM, int(planeXYZ.shape[-1])).cuda()\n planeXYZ = torch.cat([zeros, planeXYZ, zeros], dim=1)\n\n one_hot = True \n if one_hot:\n for detectionIndex in range(len(detections)):\n mask = masks[detectionIndex]\n with torch.no_grad():\n mask_binary = torch.round(mask)\n pass\n if config.FITTING_TYPE >= 2:\n if (torch.norm(planeXYZ[:, :, :, detectionIndex] - XYZ_np, dim=0) * mask_binary).sum() / torch.clamp(mask_binary.sum(), min=1e-4) > 0.5:\n mask_binary = torch.zeros(mask_binary.shape).cuda()\n pass\n pass\n mask_binary = mask_binary * (planeXYZ[1, :, :, detectionIndex] < XYZ[1]).float()\n XYZ = planeXYZ[:, :, :, detectionIndex] * mask_binary + XYZ * (1 - mask_binary)\n depthMask = torch.max(depthMask, mask)\n continue\n XYZ = XYZ * torch.round(depthMask) + XYZ_np * (1 - torch.round(depthMask))\n else:\n background_mask = torch.clamp(1 - masks.sum(0, keepdim=True), min=0)\n all_masks = torch.cat([background_mask, masks], dim=0)\n all_XYZ = torch.cat([XYZ_np.unsqueeze(-1), planeXYZ], dim=-1)\n XYZ = (all_XYZ.transpose(2, 3).transpose(1, 2) * all_masks).sum(1)\n depthMask = torch.ones(depthMask.shape).cuda()\n pass\n\n if debug_type == 2:\n XYZ = XYZ_np\n pass\n\n if return_individual:\n return XYZ, depthMask, planeXYZ.transpose(2, 3).transpose(1, 2).transpose(0, 1)\n return XYZ, depthMask\n\n\n\nclass ConvBlock(torch.nn.Module):\n \"\"\"The block consists of a convolution layer, an optional batch normalization layer, and a ReLU layer\"\"\"\n def __init__(self, in_planes, out_planes, kernel_size=1, stride=1, padding=0, output_padding=0, mode='conv', use_bn=True):\n super(ConvBlock, self).__init__()\n self.use_bn = use_bn\n if mode == 'conv':\n self.conv = torch.nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=not self.use_bn)\n elif mode == 'deconv':\n self.conv = torch.nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=not self.use_bn)\n elif mode == 'interpolate':\n self.conv = torch.nn.Sequential(torch.nn.Upsample(scale_factor=stride, mode='nearest'), torch.nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=1, padding=padding, bias=not self.use_bn))\n elif mode == 'conv_3d':\n self.conv = torch.nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=not self.use_bn)\n elif mode == 'deconv_3d':\n self.conv = torch.nn.ConvTranspose3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=not self.use_bn)\n else:\n print('conv mode not supported', mode)\n exit(1)\n pass\n if '3d' not in mode:\n self.bn = torch.nn.BatchNorm2d(out_planes)\n else:\n self.bn = torch.nn.BatchNorm3d(out_planes)\n pass\n self.relu = torch.nn.ReLU(inplace=True)\n return\n \n def forward(self, inp):\n if self.use_bn:\n return self.relu(self.bn(self.conv(inp)))\n else:\n return self.relu(self.conv(inp))\n\nclass LinearBlock(torch.nn.Module):\n \"\"\"The block consists of a linear layer and a ReLU layer\"\"\" \n def __init__(self, in_planes, out_planes):\n super(LinearBlock, self).__init__()\n self.linear = torch.nn.Linear(in_planes, out_planes)\n self.relu = torch.nn.ReLU(inplace=True)\n return\n\n def forward(self, inp):\n return self.relu(self.linear(inp)) \n\n\ndef l2NormLossMask(pred, gt, mask, dim):\n \"\"\"L2 loss with a mask\"\"\"\n return torch.sum(torch.norm(pred - gt, dim=dim) * mask) / torch.clamp(mask.sum(), min=1)\n\ndef l2LossMask(pred, gt, mask):\n \"\"\"MSE with a mask\"\"\" \n return torch.sum(torch.pow(pred - gt, 2) * mask) / torch.clamp(mask.sum(), min=1)\n\ndef l1LossMask(pred, gt, mask):\n \"\"\"L1 loss with a mask\"\"\" \n return torch.sum(torch.abs(pred - gt) * mask) / torch.clamp(mask.sum(), min=1)\n\n\ndef invertDepth(depth, inverse=False):\n \"\"\"Invert depth or not\"\"\"\n if inverse:\n valid_mask = (depth > 1e-4).float()\n depth_inv = 1.0 / torch.clamp(depth, min=1e-4)\n return depth_inv * valid_mask\n else:\n return depth\n\n\nclass PlaneToDepth(torch.nn.Module):\n def __init__(self, normalized_K = True, normalized_flow = True, inverse_depth = True, W = 64, H = 48):\n\n super(PlaneToDepth, self).__init__()\n\n self.normalized_K = normalized_K\n self.normalized_flow = normalized_flow\n self.inverse_depth = inverse_depth\n\n with torch.no_grad():\n self.URANGE = ((torch.arange(W).float() + 0.5) / W).cuda().view((1, -1)).repeat(H, 1)\n self.VRANGE = ((torch.arange(H).float() + 0.5) / H).cuda().view((-1, 1)).repeat(1, W)\n self.ONES = torch.ones((H, W)).cuda()\n pass\n \n def forward(self, intrinsics, plane, return_XYZ=False):\n\n \"\"\"\n :param K1: intrinsics of 1st image, 3x3\n :param K2: intrinsics of 2nd image, 3x3\n :param depth: depth map of first image, 1 x height x width\n :param rot: rotation from first to second image, 3\n :param trans: translation from first to second, 3\n :return: normalized flow from 1st image to 2nd image, 2 x height x width\n \"\"\"\n\n with torch.no_grad():\n urange = (self.URANGE * intrinsics[4] - intrinsics[2]) / intrinsics[0]\n vrange = (self.VRANGE * intrinsics[5] - intrinsics[3]) / intrinsics[1]\n ranges = torch.stack([urange,\n self.ONES,\n -vrange], -1)\n pass\n\n planeOffset = torch.norm(plane, dim=-1)\n planeNormal = plane / torch.clamp(planeOffset.unsqueeze(-1), min=1e-4)\n depth = planeOffset / torch.clamp(torch.sum(ranges.unsqueeze(-2) * planeNormal, dim=-1), min=1e-4)\n depth = torch.clamp(depth, min=0, max=10)\n\n if self.inverse_depth:\n depth = invertDepth(depth)\n depth = depth.transpose(1, 2).transpose(0, 1)\n\n if return_XYZ:\n return depth, depth.unsqueeze(-1) * ranges\n return depth \n\nclass PlaneToDepthLayer(torch.nn.Module):\n\n def __init__(self, normalized_K = False, normalized_flow = True, inverse_depth = True):\n\n super(PlaneToDepthLayer, self).__init__()\n\n self.plane_to_depth = PlaneToDepth(normalized_K = normalized_K,\n normalized_flow = normalized_flow,\n inverse_depth = inverse_depth)\n\n def forward(self, intrinsics, plane, mask):\n\n \"\"\"\n :param K1: 3x3 if shared_K is True, otherwise K1 is nx3x3\n :param K2: 3x3 if shared_K is True, otherwise K2 is nx3x3\n :param depth: n x 1 x h x w\n :param rot: n x 3\n :param trans: n x3\n :param shared_K: if True, we share intrinsics for the depth images of the whole batch\n :return: n x 2 x h x w\n \"\"\"\n\n batch_size = plane.size(0)\n\n depths = ()\n for i in range(batch_size):\n\n depth = self.plane_to_depth(intrinsics[i], plane[i], mask[i])\n depths += (depth, )\n depth = torch.stack(depths, 0)\n return depth \n"
] | [
[
"torch.nn.Linear",
"torch.round",
"torch.cat",
"torch.stack",
"torch.nn.BatchNorm2d",
"torch.inverse",
"torch.ones",
"torch.nn.BatchNorm3d",
"torch.norm",
"torch.nn.ConvTranspose2d",
"torch.abs",
"torch.nn.Conv3d",
"torch.nn.ConvTranspose3d",
"torch.zeros",
"torch.max",
"torch.clamp",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.pow",
"torch.arange",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.nn.Upsample"
]
] |
yujheli/MachineLearningProjects | [
"fb6b2e7c38545f87d5b9fa13bb9c6dbededd47b1"
] | [
"MatrixFactorization/hw5.py"
] | [
"import argparse\nimport numpy as np\nimport pandas as pd\nimport keras.backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import add, Dot, Input, Dense, Lambda, Reshape, Dropout, Embedding, Concatenate\nfrom keras.regularizers import l2\nfrom keras.initializers import Zeros\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.engine.topology import Layer\nfrom keras.preprocessing.sequence import pad_sequences\nimport tensorflow as tf\nfrom sklearn.externals import joblib\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Matrix Factorization.')\n parser.add_argument('--train', required=True)\n parser.add_argument('--test', required=True)\n parser.add_argument('--dim', type=int, default=256)\n parser.add_argument('--dnn', type=int, nargs='*')\n parser.add_argument('--norm', type=int)\n\n\n return parser.parse_args()\n\n\n\n\ndef read_data(trainfile, testfile):\n traindf, testdf = pd.read_csv(trainfile), pd.read_csv(testfile)\n\n traindf['test'] = 0\n testdf['test'] = 1\n\n df = pd.concat([traindf, testdf])\n\n id2user = df['UserID'].unique()\n id2movie = df['MovieID'].unique()\n\n user2id = {k: id for id, k in enumerate(id2user)}\n movie2id = {k: id for id, k in enumerate(id2movie)}\n\n df['UserID'] = df['UserID'].apply(lambda x: user2id[x])\n df['MovieID'] = df['MovieID'].apply(lambda x: movie2id[x])\n\n df_train = df.loc[df['test'] == 0]\n\n return df_train[['UserID', 'MovieID']].values, df_train['Rating'].values, df[['UserID', 'MovieID']].values, user2id, movie2id\n\n\ndef rmse(y_true, y_pred):\n y_pred = K.clip(y_pred, 1.0, 5.0)\n return K.sqrt(K.mean(K.pow(y_true - y_pred, 2)))\n\n\nclass WeightedAvgOverTime(Layer):\n def __init__(self, **kwargs):\n self.supports_masking = True\n super(WeightedAvgOverTime, self).__init__(**kwargs)\n \n def call(self, x, mask=None):\n if mask is not None:\n mask = K.cast(mask, K.floatx())\n mask = K.expand_dims(mask, axis=-1)\n s = K.sum(mask, axis=1)\n if K.equal(s, K.zeros_like(s)) is None:\n return K.mean(x, axis=1)\n else:\n return K.cast(K.sum(x * mask, axis=1) / K.sqrt(s), K.floatx())\n else:\n return K.mean(x, axis=1)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[-1])\n\n def compute_mask(self, x, mask=None):\n return None\n\n def get_config(self):\n base_config = super(WeightedAvgOverTime, self).get_config()\n return dict(list(base_config.items()))\n\n\ndef build_MF(num_users, num_movies, dim, feedback_u, feedback_m):\n u_input = Input(shape=(1,))\n U = Embedding(num_users, dim, embeddings_regularizer=l2(0.00001))(u_input)\n U = Reshape((dim,))(U)\n U = Dropout(0.1)(U)\n\n m_input = Input(shape=(1,))\n M = Embedding(num_movies, dim, embeddings_regularizer=l2(0.00001))(m_input)\n M = Reshape((dim,))(M)\n M = Dropout(0.1)(M)\n\n \n # F_u = Reshape((feedback_u.shape[1],))(Embedding(num_users, feedback_u.shape[1], trainable=False, weights=[feedback_u])(u_input))\n # F_u = Embedding(num_movies+1, dim, embeddings_initializer=Zeros(), embeddings_regularizer=l2(0.00001), mask_zero=True)(F_u)\n # F_u = Dropout(0.1)(F_u)\n # F_u = WeightedAvgOverTime()(F_u)\n\n # U = add([U, F_u])\n \n # F_m = Reshape((feedback_m.shape[1],))(Embedding(num_movies, feedback_m.shape[1], trainable=False, weights=[feedback_m])(m_input))\n # F_m = Embedding(num_users+1, dim, embeddings_initializer=Zeros(), embeddings_regularizer=l2(0.00001), mask_zero=True)(F_m)\n # F_m = Dropout(0.1)(F_m)\n # F_m = WeightedAvgOverTime()(F_m)\n\n # M = add([M, F_m])\n \n pred = Dot(axes=-1)([U, M])\n U_bias = Reshape((1,))(Embedding(num_users, 1, embeddings_regularizer=l2(0.00001))(u_input))\n M_bias = Reshape((1,))(Embedding(num_users, 1, embeddings_regularizer=l2(0.00001))(m_input))\n\n pred = add([pred, U_bias, M_bias])\n pred = Lambda(lambda x: x + K.constant(3.5817, dtype=K.floatx()))(pred)\n \n return Model(inputs=[u_input, m_input], outputs=[pred])\n\n\n\ndef build_DNN(num_users, num_movies, dim, feedback_u, feedback_m, dnn):\n u_input = Input(shape=(1,))\n U = Embedding(num_users, dim, embeddings_regularizer=l2(0.00001))(u_input)\n U = Reshape((dim,))(U)\n U = Dropout(0.1)(U)\n\n m_input = Input(shape=(1,))\n M = Embedding(num_movies, dim, embeddings_regularizer=l2(0.00001))(m_input)\n M = Reshape((dim,))(M)\n M = Dropout(0.1)(M)\n\n \n pred = Concatenate()([U, M])\n for units in dnn:\n pred = Dense(units, activation='relu')(pred)\n pred = Dropout(0.3)(pred)\n\n pred = Dense(1, activation='relu')(pred)\n \n return Model(inputs=[u_input, m_input], outputs=[pred])\n\n\ndef get_feedback(X, num_users, num_movies):\n feedback_u = [[] for u in range(num_users)]\n feedback_m = [[] for i in range(num_movies)]\n\n for u, m in zip(X[:, 0], X[:, 1]):\n feedback_u[u].append(m+1)\n feedback_m[m].append(u+1)\n\n return feedback_u, feedback_m\n\nargs = parse_args()\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nK.tensorflow_backend.set_session(tf.Session(config=config))\n\nX_train, Y_train, X, user2id, movie2id = read_data(args.train, args.test)\n\nnorm_flag = 0\nif args.norm is not None:\n Y_train = Y_train-np.mean(Y_train)\n Y_train = Y_train/np.std(Y_train)\n print(\"Doing normalizing\")\n norm_flag = 1\n\n\nnum_users, num_movies = len(user2id), len(movie2id)\n\nfeedback_u, feedback_m = get_feedback(X, num_users, num_movies)\nfeedback_u, feedback_m = pad_sequences(feedback_u), pad_sequences(feedback_m)\n\nnp.save('user2id', user2id)\nnp.save('movie2id', movie2id)\n\nnp.random.seed(5)\nindices = np.random.permutation(len(X_train))\nX_train, Y_train = X_train[indices], Y_train[indices]\n\ndim = args.dim\n\n\n\ncallbacks = []\ncallbacks.append(EarlyStopping(monitor='val_rmse', patience=100))\n\n\nif args.dnn is not None:\n model = build_DNN(num_users, num_movies, dim, feedback_u, feedback_m, args.dnn)\n callbacks.append(ModelCheckpoint('model_dnn_'+str(args.dim)+'_'+str(norm_flag)+'.h5', monitor='val_rmse', save_best_only=True))\n print(\"Building DNN\")\nelse:\n model = build_MF(num_users, num_movies, dim, feedback_u, feedback_m)\n callbacks.append(ModelCheckpoint('model_MF_'+str(args.dim)+'_'+str(norm_flag)+'.h5', monitor='val_rmse', save_best_only=True))\n print(\"Building MF\")\n\nmodel.summary()\n\n\n\nmodel.compile(loss='mse', optimizer='adam', metrics=[rmse])\nhis = model.fit([X_train[:, 0], X_train[:, 1]], Y_train, epochs=50, batch_size=128, validation_split=0.1, callbacks=callbacks) \n\n# joblib.dump(t,'pun_tokenizer.pkl')\n# joblib.dump(his,'his_dim_'+str(args.dim)+'.pkl')\n\nif args.dnn is not None:\n joblib.dump(his.history,'his_dnn_'+str(args.dim)+'_'+str(norm_flag)+'.pkl')\nelse:\n joblib.dump(his.history,'his_MF_'+str(args.dim)+'_'+str(norm_flag)+'.pkl')\n\n \n\n\n"
] | [
[
"numpy.random.seed",
"tensorflow.Session",
"numpy.save",
"numpy.mean",
"tensorflow.ConfigProto",
"numpy.std",
"pandas.concat",
"pandas.read_csv"
]
] |
gseba/Missions_to_Mars | [
"a131078004241e4f3c019253dc4dfd17fc7ccf8a"
] | [
"scrape_mars.py"
] | [
"# Import dependencies\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nimport time\nimport re\nimport pandas as pd\n\n# Define a function the visits a webpage and returns a beautful soup\ndef get_soup(url):\n \n executable_path = {\"executable_path\": \"chromedriver.exe\"}\n browser = Browser('chrome', **executable_path, headless=True)\n \n # Visit with selenium\n browser.visit(url)\n\n # Wait 1 second to allow the page to load\n time.sleep(3)\n\n # Save the browser's html as text\n html = browser.html\n\n # Convert html text to beautiful soup object\n soup = bs(html, 'html.parser')\n\n # Print the webpage title\n print(soup.title.text +'\\n')\n \n return soup\n\n# Function that scrapes News Title and Summary from the Nasa Mars News site and returns a list of dictionaries\ndef get_news(base_url, args):\n \n url = base_url + args\n \n # Get soup\n soup = get_soup(url)\n\n # Identify the Title and Summary via div.content_title and div.article_teaser_body\n results = soup.find_all('div', class_=\"list_text\")\n\n # news is a list of dictionaries\n news = []\n # Iterate over the results \n for result in results:\n\n # Only Retrieve results if they have a summary text\n try:\n date = result.find('div', class_=\"list_date\").text\n summary = result.find('div', class_=\"article_teaser_body\").text\n headline = result.find('div', class_=\"content_title\").get_text()\n #print(f\"{date} : {headline}\\n{summary}\\n\\n\")\n\n # Save results to a dictionary\n dictionary = {}\n dictionary= {\n 'date' : date,\n 'headline' : headline,\n 'summary' : summary\n }\n\n # Append list with dictionary\n news.append(dictionary)\n\n # If the article is missing content, skip it\n except:\n print(\"Nothing found\")\n\n return news\n\n# Function which scrapes the JPL 'Featured Image' and returns the url to the image\ndef get_featured_img(base_url, args):\n \n url = base_url + args\n \n soup = get_soup(url)\n\n # Identify the featured image url via section.centered_text clearfix main_feature primary_media_feature single\n result = soup.find_all('section', class_=\"centered_text clearfix main_feature primary_media_feature single\")[0]\n\n # Split the string with '(' or ')'\n string = re.split('[(|)]', result.article['style'])[1]\n\n # Strip the single quotes\n string = string[1:-1]\n\n # Combine with base url\n image_url = base_url + string\n\n return image_url\n\n# A function which scrapes Mars weather data from the Mars Weather Twitter Page and returns the latest weather\ndef get_weather(base_url, args):\n \n url = base_url + args\n \n soup = get_soup(url)\n\n # Identify the weather report via tweet -> tweet-text\n results = soup.find_all('div', class_=\"tweet\")\n\n # The latest tweet is in the first result; save in a variable\n mars_weather = results[0].find('p', class_=\"tweet-text\").text\n\n # Remove the link off the end\n mars_weather = mars_weather.split('pic.twitter')[0]\n\n # The first result contains the latest tweet.\n print('Latest weather report:''\\n\\n' + mars_weather + '\\n\\n')\n\n ## Print all tweets for good measure\n #for result in results:\n # print(result.find('p', class_=\"tweet-text\").get_text())\n \n return mars_weather\n\n# Function which scapes Mars facts from space-facts.com/mars and returns a dataframe\ndef get_facts(base_url, args):\n \n url = base_url + args\n \n # Scrape for data tables with pandas\n tables = pd.read_html(url)\n\n fact_table = tables[0].to_html()\n \n # The relevant table is the first table\n return fact_table\n\n# Function which scrapes images of the Mars Hemispheres and returns a list of dicntionaries of titles and urls\ndef get_hemis(base_url, args):\n \n url = base_url + args\n\n # Get soup of the main page\n soup = get_soup(url)\n\n # Find urls to high-res images via section -> results-accordian -> div.item, a.href.text \n results = soup.find_all('section', {'id':'results-accordian'})[0].find_all('div', class_='item')\n inter_urls = []\n\n # Iterate over each link to the sub-page\n for result in results:\n try:\n # Find urls via ->a.hef\n inter_urls.append(base_url+result.a['href'])\n except:\n print(\"null\")\n\n # \n hemi_list = []\n for url in inter_urls:\n # Visit each link\n soup = get_soup(url)\n try:\n img_url = soup.find_all('div', class_='downloads')[0].find_all('li')[0].a['href']\n img_title = soup.title.text\n img_title = img_title.split(\" |\")[0]\n dictionary = {\n 'title' : img_title,\n 'img_url' : img_url\n }\n hemi_list.append(dictionary)\n except:\n print(\"null\")\n\n return hemi_list\n\ndef scrape():\n # Get news about Mars\n url = 'https://mars.nasa.gov/news/'\n args = ''\n news = get_news(url,'')\n #print(news)\n\n # Get featured image\n base_url = 'https://www.jpl.nasa.gov'\n args = '/spaceimages/?search=&category=Mars'\n image_url = get_featured_img(base_url, args)\n\n # Get weather information\n url = 'https://twitter.com/marswxreport?lang=en'\n args = ''\n mars_weather = get_weather(url, args)\n\n # Get Mars facts\n url = 'https://space-facts.com/mars'\n args = ''\n fact_table = get_facts(url,args)\n\n # Get images of hemispheres\n base_url = 'https://astrogeology.usgs.gov/'\n args = 'search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n hemi_list = get_hemis(base_url, args)\n\n # Save scraping results to a dictionary\n scrape_dictionary = {\n 'news' : news[0],\n 'featured_image' : image_url,\n 'mars_weather' : mars_weather,\n 'mars_facts' : fact_table,\n 'hemispheres' : hemi_list\n }\n\n return scrape_dictionary"
] | [
[
"pandas.read_html"
]
] |
annukamat/edu-live | [
"de06f7bad6a395c546ecf85d66e08bb8de2b3255"
] | [
"python-stuff/data-wrangling/labels-maker.py"
] | [
"import pandas as pd\nimport numpy as np\nimport cv2\n\n\ndata_file = 'thinking.csv'\n\n\"\"\"\nlabels must be a list of segments where the time serie class is true \n(i.e. the list of segments where the drawer has \"hands down\")\n\"\"\"\n\nlabels = [(29,96),(122,145),(162,185),(209,220),(244,261),(339,454)]\n\ndf = pd.read_csv('data/unlabeled_csv/'+data_file,index_col=0)\ndf[\"label\"] = False\n\nfor interval in labels:\n start,end = interval\n df.loc[start:end,\"label\"] = True\n\ndf.to_csv('data/raw_labeled/'+data_file)\n\ngood_points = df.loc[df['label'] == True][['x','y']]\npts = good_points.to_numpy().astype(int)\n\nimg = np.zeros((720,1280), dtype=np.uint8)\nimg[pts.T[1],pts.T[0]]=255\nimg = cv2.flip(img, 1)\n\ncv2.imshow('frame', img)\nkey = cv2.waitKey(0)\ncv2.destroyAllWindows()"
] | [
[
"pandas.read_csv",
"numpy.zeros"
]
] |
onetask-ai/sequence-learn | [
"6f0fc2974ba591167ee9778a93e2ad833693ab55"
] | [
"sequencelearn/point_tagger.py"
] | [
"from sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.svm import SVC\nfrom sequencelearn import CONSTANT_OUTSIDE, PointTagger\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\n\n\nclass SupportVectorTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = SVC(**kwargs)\n\n\nclass LogisticTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = LogisticRegression(**kwargs)\n\n\nclass NearestNeighborTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = KNeighborsClassifier(**kwargs)\n\n\nclass BayesTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = GaussianNB(**kwargs)\n\n\nclass GaussianTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = GaussianProcessClassifier(**kwargs)\n\n\nclass TreeTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = DecisionTreeClassifier(**kwargs)\n\n\nclass ForestTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = RandomForestClassifier(**kwargs)\n\n\nclass AdaTagger(PointTagger):\n def __init__(self, constant_outside=CONSTANT_OUTSIDE, **kwargs):\n super().__init__(constant_outside)\n self.model = AdaBoostClassifier(**kwargs)\n"
] | [
[
"sklearn.gaussian_process.GaussianProcessClassifier",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.naive_bayes.GaussianNB",
"sklearn.svm.SVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.tree.DecisionTreeClassifier"
]
] |
g2des/17700_project | [
"ccb7ff64574aa8e649d8a3e743755e61b070d1f9"
] | [
"tests/tf_user.py"
] | [
"import csv, logging, requests, re, numpy as np\nfrom locust import HttpUser, TaskSet, task, constant_pacing ##ADD THIS\nimport json\nimport pyonmttok\nimport tensorflow as tf\n\n\nUSER_CREDENTIALS = None\nsentences = None\nnp.random.seed(17700)\n\nwith open('./data/training/news-commentary-v9.fr-en.en') as file:\n sentences = file.readlines()\nlogging.info(\"Reading sentences completed.\")\n\nclass LoginWithUniqueUsersSteps(TaskSet):\n\n\n tokenizer = pyonmttok.Tokenizer(\"none\", sp_model_path=\"ende/1/assets.extra/wmtende.model\")\n\n regex = re.compile('(?<=\\[)(.*?)(?=\\])')\n ## ADD THIS\n MAX_LENGTH = 10\n ## STOP\n\n\n def pad_batch(self, batch_tokens):\n \"\"\"Pads a batch of tokens.\"\"\"\n lengths = [len(tokens) for tokens in batch_tokens]\n max_length = max(lengths)\n for tokens, length in zip(batch_tokens, lengths):\n if max_length > length:\n tokens += [\"\"] * (max_length - length)\n return batch_tokens, lengths, max_length\n \n def on_start(self):\n self.user_id = np.random.randint(len(USER_CREDENTIALS))\n self.users_requests = USER_CREDENTIALS[self.user_id]\n logging.info(f\"START : Created user with id {self.user_id}. Remaining {len(USER_CREDENTIALS)} users\")\n ## ADD THIS\n self.requests = []\n for i in self.users_requests:\n # sentence = sentences[int(i)%len(self.users_requests)].split()\n sentence = sentences[int(i)%len(sentences)].split()\n if len(sentence) < self.MAX_LENGTH:\n continue\n else:\n self.requests.append(\" \".join(sentence[:self.MAX_LENGTH]))\n ## STOP\n # [sentences[int(i)] for i in self.users_requests]\n logging.info(f\"START : Total Number of requests : {len(self.requests)}\")\n\n @task\n def translate(self):\n if len(self.requests) > 0:\n sentence = [self.requests.pop()]\n batch_input = [self.tokenizer.tokenize(text)[0] for text in sentence]\n batch_tokens, lengths, max_length = self.pad_batch(batch_input)\n batch_size = len(lengths)\n convert_tf = batch_tokens\n request = {\"inputs\": {\"tokens\":convert_tf, \"length\":lengths}}\n response = self.client.post(\"/invocations\", json=request)\n logging.info(f\"TASK : Sent with request ##{sentence}## with ##{response.json()}##\")\n logging.warn(f\"TASK : Number of requests remaining {len(self.requests)}\")\n else:\n self.interrupt()\n\n def on_stop(self):\n logging.info(\"STOP : Stopping client\")\n\n\nclass LoginWithUniqueUsersTest(HttpUser):\n tasks = {LoginWithUniqueUsersSteps}\n host = 'http://ec2-3-132-170-187.us-east-2.compute.amazonaws.com:8080'\n ## ADD THIS\n wait_time = constant_pacing(1.0)\n ## STOP\n # # sock = None\n def __init__(self, *args, **kwargs):\n super(LoginWithUniqueUsersTest, self).__init__( *args, **kwargs)\n logging.info(f\"Logging onto {self.host}\")\n global USER_CREDENTIALS\n if (USER_CREDENTIALS == None):\n with open('./data/users_random_sentences.csv', 'r') as f:\n reader = csv.reader(f)\n USER_CREDENTIALS = list(reader)\n logging.info(\"Reading sentences and user list completed\")"
] | [
[
"numpy.random.seed"
]
] |
yuyay/ASNG-NAS | [
"a13c4828cfa9acc1eebd598dc1f88ee18e152159"
] | [
"inpainting/main_inpainting_int.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport csv\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.utils\nimport torchvision\nimport torch.backends.cudnn as cudnn\n\nfrom common import utils\nfrom common.utils import RandomPixelMasking, RandomHalfMasking, CenterMasking\nfrom common.eval_test import evaluate\nfrom inpainting_int.cae_model import ProbablisticCAE\nfrom inpainting_int.train import arch_search_valid, train\n\n\ndef load_data(path='../data/', data_name='celebA', img_size=64):\n print('Loading ' + data_name + 'data...')\n train_transform, test_transform = utils.data_transforms(img_size=img_size)\n\n if data_name != 'svhn':\n # The image data should be contained in sub folders (e.g., ../data/celebA/train/image/aaa.png)\n train_data = torchvision.datasets.ImageFolder('{}{}/train'.format(path, data_name), transform=train_transform)\n test_data = torchvision.datasets.ImageFolder('{}{}/test'.format(path, data_name), transform=test_transform)\n else:\n train_data = torchvision.datasets.SVHN(path, split='train', transform=train_transform, download=True)\n test_data = torchvision.datasets.SVHN(path, split='test', transform=test_transform, download=True)\n # extra_data = torchvision.datasets.SVHN(path, split='extra', transform=train_transform, download=True)\n # train_data = torch.utils.data.ConcatDataset([train_data, extra_data])\n\n print('train_data_size: %d, test_data_size: %d' % (len(train_data), len(test_data)))\n return train_data, test_data\n\n\n# Save result data\nclass SaveResult(object):\n def __init__(self, res_file_name='result.csv'):\n self.res_file_name = res_file_name\n # header\n with open(self.res_file_name, 'w') as fp:\n writer = csv.writer(fp, lineterminator='\\n')\n writer.writerow(['exp_index', 'train_time', 'MLE_MSE', 'MLE_PSNR', 'MLE_SSIM', 'det_param', 'max_param',\n 'node_num', 'cat_d', 'cat_valid_d', 'n_cat', 'int_d', 'n_int', 'active_num', 'net_str'])\n\n def save(self, exp_index, model, train_time, res):\n dist = model.asng\n params = np.sum(np.prod(param.size()) for param in model.parameters())\n net_str = model.mle_network_string(sep=' ')\n with open(self.res_file_name, 'a') as fp:\n writer = csv.writer(fp, lineterminator='\\n')\n writer.writerow([exp_index, train_time, res['MLE_MSE'], res['MLE_PSNR'], res['MLE_SSIM'],\n model.get_params_mle(), params, len(model.module_info), dist.d_cat, dist.valid_d_cat,\n dist.n_cat, dist.d_int, dist.n_int, int(model.is_active.sum()), net_str])\n\n\ndef experiment(exp_num=1, start_id=0, data_name='celebA', dataset_path='../data/', corrupt_type='RandomPixel', gpu_id=0,\n init_delta_factor=0.0, batchsize=16, train_ite=200000, retrain_ite=500000, out_dir='./result/'):\n\n if gpu_id >= 0:\n torch.cuda.set_device(gpu_id)\n cudnn.benchmark = True\n cudnn.enabled = True\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # Corrupt function\n if corrupt_type == 'RandomPixel':\n corrupt_func = RandomPixelMasking()\n elif corrupt_type == 'RandomHalf':\n corrupt_func = RandomHalfMasking()\n elif corrupt_type == 'Center':\n corrupt_func = CenterMasking()\n else:\n print('Invalid corrupt function type!')\n return\n\n train_res = SaveResult(res_file_name=out_dir + 'train_result.csv')\n retrain_res = SaveResult(res_file_name=out_dir + 'retrain_result.csv')\n with open(out_dir + 'description.txt', 'w') as o:\n o.write('data_name: ' + data_name + '\\n')\n o.write('corrupt_func: ' + corrupt_type + '\\n')\n o.write('batchsize: %d\\n' % batchsize)\n o.write('train_ite: %d\\n' % train_ite)\n o.write('retrain_ite: %d\\n' % retrain_ite)\n\n train_data, test_data = load_data(path=dataset_path, data_name=data_name, img_size=64)\n ch_size = train_data[0][0].shape[0]\n\n for n in np.arange(start_id, start_id + exp_num):\n prefix = out_dir + '{:02d}_'.format(n)\n\n print('Architecture Search...')\n nn_model = ProbablisticCAE(in_ch_size=ch_size, out_ch_size=ch_size, row_size=1, col_size=20, level_back=5,\n downsample=True, k_sizes=(1, 3, 5), ch_range=(64, 256), c=None,\n delta_init_factor=init_delta_factor)\n optimizer = torch.optim.SGD(nn_model.parameters(), lr=0.025, momentum=0.9, weight_decay=0., nesterov=False)\n lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, train_ite)\n\n # Training\n period = {'max_ite': train_ite, 'save': train_ite/50, 'verbose_ite': 100}\n n_model, train_time = \\\n arch_search_valid(nn_model, train_data, test_data, corrupt_func, optimizer, lr_scheduler, clip_value=5.,\n batchsize=batchsize, lam=2, valid_rate=0.5, gpu_id=gpu_id, period=period,\n out_model=prefix + 'trained_model.pt', log_file=prefix + 'train_log.csv')\n\n # Testing\n res = evaluate(nn_model, test_data, corrupt_func, gpu_id=gpu_id, batchsize=batchsize,\n img_out_dir=prefix+'trained_model_out_img/')\n\n train_res.save(n, nn_model, train_time, res) # Save result\n\n # Load theta from log file\n #import pandas as pd\n #df = pd.read_csv(prefix + 'train_log.csv')\n #theta = np.array(df.iloc[-1, 14:])\n #nn_model = ProbablisticCAE(in_ch_size=ch_size, out_ch_size=ch_size, row_size=1, col_size=20, level_back=5,\n # downsample=True, k_sizes=(1, 3, 5), ch_nums=(64, 128, 256), skip=(True, False),\n # M=None)\n #nn_model.asng.load_theta_from_log(theta)\n\n print('Retraining...')\n nn_model = ProbablisticCAE(in_ch_size=ch_size, out_ch_size=ch_size, row_size=1, col_size=20, level_back=5,\n downsample=True, k_sizes=(1, 3, 5), ch_range=(64, 256), c=nn_model.asng.mle())\n optimizer = torch.optim.Adam(nn_model.parameters(), lr=0.001, betas=(0.9, 0.999))\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(retrain_ite*2/5),\n int(retrain_ite*4/5)], gamma=0.1)\n\n # Re-training\n period = {'max_ite': retrain_ite, 'save': retrain_ite/50, 'verbose_ite': 100}\n nn_model, train_time = train(nn_model, train_data, test_data, corrupt_func, optimizer, lr_scheduler,\n clip_value=5., batchsize=batchsize, gpu_id=gpu_id, period=period,\n out_model=prefix + 'retrained_model.pt', log_file=prefix + 'retrain_log.csv')\n\n # Testing\n res = evaluate(nn_model, test_data, corrupt_func, gpu_id=gpu_id, batchsize=batchsize,\n img_out_dir=prefix + 'retrained_model_out_img/')\n\n retrain_res.save(n, nn_model, train_time, res) # Save result\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='ASNG-NAS (Int) for Inpainting')\n parser.add_argument('--exp_id_start', '-s', type=int, default=0, help='Starting index number of experiment')\n parser.add_argument('--exp_num', '-e', type=int, default=1, help='Number of experiments')\n parser.add_argument('--data_path', '-p', default='../data/', help='Data path')\n parser.add_argument('--data_name', '-d', default='celebA', help='Data name (celebA / cars / svhn)')\n parser.add_argument('--corrupt_type', '-c', default='RandomPixel',\n help='Corrupt function (RandomPixel / RandomHalf / Center)')\n parser.add_argument('--gpu_id', '-g', type=int, default=0, help='GPU ID')\n\n parser.add_argument('--init_delta_factor', '-f', type=float, default=0.0, help='Init delta factor')\n parser.add_argument('--batch_size', '-b', type=int, default=16, help='Mini-batch size')\n parser.add_argument('--train_ite', '-t', type=int, default=50000,\n help='Maximum number of training iterations (W updates)')\n parser.add_argument('--retrain_ite', '-r', type=int, default=500000,\n help='Maximum number of re-training iterations (W updates)')\n parser.add_argument('--out_dir', '-o', default='./result/', help='Output directory')\n args = parser.parse_args()\n\n start_id = args.exp_id_start\n exp_num = args.exp_num\n data_path = args.data_path\n data_name = args.data_name\n corrupt_type = args.corrupt_type\n gpu_id = args.gpu_id\n init_delta_factor = args.init_delta_factor\n batch_size = args.batch_size\n train_ite = args.train_ite\n retrain_ite = args.retrain_ite\n out_dir = args.out_dir + data_name + '_' + corrupt_type + '/'\n\n experiment(exp_num=exp_num, start_id=start_id, data_name=data_name, dataset_path=data_path,\n corrupt_type=corrupt_type, gpu_id=gpu_id, init_delta_factor=init_delta_factor, batchsize=batch_size,\n train_ite=train_ite, retrain_ite=retrain_ite, out_dir=out_dir)\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.cuda.set_device",
"numpy.arange"
]
] |
bezirganyan/Occup-R2N2 | [
"9adf6d0a9cc6f884fc17c80b24e72060dbacf3c1"
] | [
"im2mesh/occupr2n2/models/decoder.py"
] | [
"\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom im2mesh.layers import (\n ResnetBlockFC, CResnetBlockConv1d,\n CBatchNorm1d, CBatchNorm1d_legacy,\n ResnetBlockConv1d\n)\n\n\nclass Decoder(nn.Module):\n ''' Decoder class.\n\n It does not perform any form of normalization.\n\n Args:\n dim (int): input dimension\n z_dim (int): dimension of latent code z\n c_dim (int): dimension of latent conditioned code c\n hidden_size (int): hidden size of Decoder network\n leaky (bool): whether to use leaky ReLUs\n '''\n\n def __init__(self, dim=3, z_dim=128, c_dim=128,\n hidden_size=128, leaky=False):\n super().__init__()\n self.z_dim = z_dim\n self.c_dim = c_dim\n\n # Submodules\n self.fc_p = nn.Linear(dim, hidden_size)\n\n if not z_dim == 0:\n self.fc_z = nn.Linear(z_dim, hidden_size)\n\n if not c_dim == 0:\n self.fc_c = nn.Linear(c_dim, hidden_size)\n\n self.block0 = ResnetBlockFC(hidden_size)\n self.block1 = ResnetBlockFC(hidden_size)\n self.block2 = ResnetBlockFC(hidden_size)\n self.block3 = ResnetBlockFC(hidden_size)\n self.block4 = ResnetBlockFC(hidden_size)\n\n self.fc_out = nn.Linear(hidden_size, 1)\n\n if not leaky:\n self.actvn = F.relu\n else:\n self.actvn = lambda x: F.leaky_relu(x, 0.2)\n\n def forward(self, p, z, c=None, **kwargs):\n batch_size, T, D = p.size()\n\n net = self.fc_p(p)\n\n if self.z_dim != 0:\n net_z = self.fc_z(z).unsqueeze(1)\n net = net + net_z\n\n if self.c_dim != 0:\n net_c = self.fc_c(c).unsqueeze(1)\n net = net + net_c\n\n net = self.block0(net)\n net = self.block1(net)\n net = self.block2(net)\n net = self.block3(net)\n net = self.block4(net)\n\n out = self.fc_out(self.actvn(net))\n out = out.squeeze(-1)\n\n return out\n\n\nclass DecoderCBatchNorm(nn.Module):\n ''' Decoder with conditional batch normalization (CBN) class.\n\n Args:\n dim (int): input dimension\n z_dim (int): dimension of latent code z\n c_dim (int): dimension of latent conditioned code c\n hidden_size (int): hidden size of Decoder network\n leaky (bool): whether to use leaky ReLUs\n legacy (bool): whether to use the legacy structure\n '''\n\n def __init__(self, dim=3, z_dim=128, c_dim=128,\n hidden_size=256, leaky=False, legacy=False, n_classes=1, instance_loss=False):\n super().__init__()\n #print('using sigmoid')\n self.z_dim = z_dim\n if not z_dim == 0:\n self.fc_z = nn.Linear(z_dim, hidden_size)\n\n self.fc_p = nn.Conv1d(dim, hidden_size, 1)\n self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)\n self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)\n self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)\n self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)\n self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)\n\n if not legacy:\n self.bn = CBatchNorm1d(c_dim, hidden_size)\n else:\n self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)\n\n self.instance_loss = instance_loss\n\n self.fc_out = nn.Conv1d(hidden_size, n_classes, 1)\n self.fc_vote = None\n\n if self.instance_loss:\n self.fc_vote = nn.Conv1d(hidden_size, 3, 1)\n\n if not leaky:\n self.actvn = F.relu\n else:\n self.actvn = lambda x: F.leaky_relu(x, 0.2)\n\n def forward(self, p, z, c, **kwargs):\n p = p.transpose(1, 2).float()\n #batch_size, D, T = p.size()\n net = self.fc_p(p)\n\n if self.z_dim != 0:\n net_z = self.fc_z(z).unsqueeze(2)\n net = net + net_z\n\n net = self.block0(net, c)\n net = self.block1(net, c)\n net = self.block2(net, c)\n net = self.block3(net, c)\n net = self.block4(net, c)\n\n lgt = self.actvn(self.bn(net, c))\n out = self.fc_out(lgt)\n #sigmoid\n #out = F.sigmoid(out)\n out = out.squeeze(1).squeeze(0)\n if self.instance_loss:\n vote = self.fc_vote(lgt)\n vote = vote.squeeze(1).squeeze(0).T\n return out, vote\n\n print(out.shape)\n return out\n\n\nclass DecoderCBatchNorm2(nn.Module):\n ''' Decoder with CBN class 2.\n\n It differs from the previous one in that the number of blocks can be\n chosen.\n\n Args:\n dim (int): input dimension\n z_dim (int): dimension of latent code z\n c_dim (int): dimension of latent conditioned code c\n hidden_size (int): hidden size of Decoder network\n leaky (bool): whether to use leaky ReLUs\n n_blocks (int): number of ResNet blocks\n '''\n\n def __init__(self, dim=3, z_dim=0, c_dim=128,\n hidden_size=256, n_blocks=5):\n super().__init__()\n self.z_dim = z_dim\n if z_dim != 0:\n self.fc_z = nn.Linear(z_dim, c_dim)\n\n self.conv_p = nn.Conv1d(dim, hidden_size, 1)\n self.blocks = nn.ModuleList([\n CResnetBlockConv1d(c_dim, hidden_size) for i in range(n_blocks)\n ])\n\n self.bn = CBatchNorm1d(c_dim, hidden_size)\n self.conv_out = nn.Conv1d(hidden_size, 1, 1)\n self.actvn = nn.ReLU()\n\n def forward(self, p, z, c, **kwargs):\n p = p.transpose(1, 2)\n batch_size, D, T = p.size()\n net = self.conv_p(p)\n\n if self.z_dim != 0:\n c = c + self.fc_z(z)\n\n for block in self.blocks:\n net = block(net, c)\n\n out = self.conv_out(self.actvn(self.bn(net, c)))\n out = out.squeeze(1)\n\n return out\n\n\nclass DecoderCBatchNormNoResnet(nn.Module):\n ''' Decoder CBN with no ResNet blocks class.\n\n Args:\n dim (int): input dimension\n z_dim (int): dimension of latent code z\n c_dim (int): dimension of latent conditioned code c\n hidden_size (int): hidden size of Decoder network\n leaky (bool): whether to use leaky ReLUs\n '''\n\n def __init__(self, dim=3, z_dim=128, c_dim=128,\n hidden_size=256, leaky=False):\n super().__init__()\n self.z_dim = z_dim\n if not z_dim == 0:\n self.fc_z = nn.Linear(z_dim, hidden_size)\n\n self.fc_p = nn.Conv1d(dim, hidden_size, 1)\n self.fc_0 = nn.Conv1d(hidden_size, hidden_size, 1)\n self.fc_1 = nn.Conv1d(hidden_size, hidden_size, 1)\n self.fc_2 = nn.Conv1d(hidden_size, hidden_size, 1)\n self.fc_3 = nn.Conv1d(hidden_size, hidden_size, 1)\n self.fc_4 = nn.Conv1d(hidden_size, hidden_size, 1)\n\n self.bn_0 = CBatchNorm1d(c_dim, hidden_size)\n self.bn_1 = CBatchNorm1d(c_dim, hidden_size)\n self.bn_2 = CBatchNorm1d(c_dim, hidden_size)\n self.bn_3 = CBatchNorm1d(c_dim, hidden_size)\n self.bn_4 = CBatchNorm1d(c_dim, hidden_size)\n self.bn_5 = CBatchNorm1d(c_dim, hidden_size)\n\n self.fc_out = nn.Conv1d(hidden_size, 1, 1)\n\n if not leaky:\n self.actvn = F.relu\n else:\n self.actvn = lambda x: F.leaky_relu(x, 0.2)\n\n def forward(self, p, z, c, **kwargs):\n p = p.transpose(1, 2)\n batch_size, D, T = p.size()\n net = self.fc_p(p)\n\n if self.z_dim != 0:\n net_z = self.fc_z(z).unsqueeze(2)\n net = net + net_z\n\n net = self.actvn(self.bn_0(net, c))\n net = self.fc_0(net)\n net = self.actvn(self.bn_1(net, c))\n net = self.fc_1(net)\n net = self.actvn(self.bn_2(net, c))\n net = self.fc_2(net)\n net = self.actvn(self.bn_3(net, c))\n net = self.fc_3(net)\n net = self.actvn(self.bn_4(net, c))\n net = self.fc_4(net)\n net = self.actvn(self.bn_5(net, c))\n out = self.fc_out(net)\n out = out.squeeze(1)\n\n return out\n\n\nclass DecoderBatchNorm(nn.Module):\n ''' Decoder with batch normalization class.\n\n Args:\n dim (int): input dimension\n z_dim (int): dimension of latent code z\n c_dim (int): dimension of latent conditioned code c\n hidden_size (int): hidden size of Decoder network\n leaky (bool): whether to use leaky ReLUs\n '''\n\n def __init__(self, dim=3, z_dim=128, c_dim=128,\n hidden_size=256, leaky=False):\n super().__init__()\n self.z_dim = z_dim\n self.c_dim = c_dim\n\n # Submodules\n if not z_dim == 0:\n self.fc_z = nn.Linear(z_dim, hidden_size)\n\n if self.c_dim != 0:\n self.fc_c = nn.Linear(c_dim, hidden_size)\n self.fc_p = nn.Conv1d(dim, hidden_size, 1)\n self.block0 = ResnetBlockConv1d(hidden_size)\n self.block1 = ResnetBlockConv1d(hidden_size)\n self.block2 = ResnetBlockConv1d(hidden_size)\n self.block3 = ResnetBlockConv1d(hidden_size)\n self.block4 = ResnetBlockConv1d(hidden_size)\n\n self.bn = nn.BatchNorm1d(hidden_size)\n\n self.fc_out = nn.Conv1d(hidden_size, 1, 1)\n\n if not leaky:\n self.actvn = F.relu\n else:\n self.actvn = lambda x: F.leaky_relu(x, 0.2)\n\n def forward(self, p, z, c, **kwargs):\n p = p.transpose(1, 2)\n batch_size, D, T = p.size()\n net = self.fc_p(p)\n\n if self.z_dim != 0:\n net_z = self.fc_z(z).unsqueeze(2)\n net = net + net_z\n\n if self.c_dim != 0:\n net_c = self.fc_c(c).unsqueeze(2)\n net = net + net_c\n\n net = self.block0(net)\n net = self.block1(net)\n net = self.block2(net)\n net = self.block3(net)\n net = self.block4(net)\n\n out = self.fc_out(self.actvn(self.bn(net)))\n out = out.squeeze(1)\n\n return out\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.functional.leaky_relu"
]
] |
sillwood/voicemos | [
"6088b9c5dfd3dc44febd55e8a8fc02b93de1e9e1"
] | [
"helpers.py"
] | [
"import os\n\nimport numpy as np\nimport scipy\nimport wandb\n\nfrom tabula import Helper\n\n\nclass WandbHelper(Helper):\n def __init__(self, conf):\n\n wandb.init(project=\"voicemos\", entity=\"jiamenggao\", config=conf)\n wandb.run.name = conf[\"exp_name\"]\n\n def iter_end(self, data, metadata):\n wandb.log({\"loss\": data[\"loss\"]})\n\n\nclass LRSchedulerHelper(Helper):\n def __init__(self, scheduler):\n self.scheduler = scheduler\n\n def epoch_end(self, data, metadata):\n self.scheduler.step()\n\n\nclass SWAHelper(Helper):\n def __init__(self, swa_model, model, scheduler, swa_scheduler, swa_start):\n self.scheduler = scheduler\n self.model = model\n self.swa_scheduler = scheduler\n self.swa_model = swa_model\n self.swa_start = swa_start\n\n def epoch_end(self, data, metadata):\n if metadata[\"epoch\"] > self.swa_start:\n self.swa_model.update_parameters(self.model)\n self.swa_scheduler.step()\n else:\n self.scheduler.step()\n\n\nclass SWAGHelper(Helper):\n def __init__(\n self, swa_model, model, scheduler, swa_start, swa_epoch=True, swa_iters=None\n ):\n self.scheduler = scheduler\n self.model = model\n self.swa_model = swa_model\n self.swa_start = swa_start\n self.swa_epoch = swa_epoch\n self.swa_iters = swa_iters\n\n def iter_end(self, data, metadata):\n if (\n self.swa_iters is not None\n and metadata[\"epoch\"] > self.swa_start\n and metadata[\"iters\"] % self.swa_iters == 0\n ):\n self.swa_model.collect_model(self.model)\n\n def epoch_end(self, data, metadata):\n if metadata[\"epoch\"] > self.swa_start:\n if self.swa_epoch:\n self.swa_model.collect_model(self.model)\n else:\n self.scheduler.step()\n\n\nclass SwagSampleHelper(Helper):\n def __init__(self, swag_model):\n self.swag_model = swag_model\n\n def epoch_start(self, data, metadata):\n self.swag_model.sample(0.0)\n\n\nclass SwagEvalHelper(Helper):\n def __init__(self, out_fname):\n\n self.score_dict = {}\n self.out_fname = out_fname\n\n def epoch_start(self, data, metadata):\n self.current_scores = []\n\n def iter_end(self, data, metadata):\n gt_scores = data[\"inputs\"][\"mean_score\"]\n pred_scores = data[\"outputs\"]\n fnames = data[\"inputs\"][\"fnames\"]\n\n for fname, pred_score, gt_score in zip(fnames, pred_scores, gt_scores):\n system = fname.split(\"-\")[0]\n if fname not in self.score_dict:\n self.score_dict[fname] = [\n {\n \"pred_score\": pred_score.cpu().numpy(),\n \"gt_score\": gt_score.cpu().numpy(),\n \"system\": system,\n }\n ]\n else:\n self.score_dict[fname].append(\n {\n \"pred_score\": pred_score.cpu().numpy(),\n \"gt_score\": gt_score.cpu().numpy(),\n \"system\": system,\n }\n )\n\n def compile_scores(self):\n score_dict = {}\n for key, v in self.score_dict.items():\n pred_score = sum([i[\"pred_score\"] for i in v]) / len(v)\n gt_score = sum([i[\"gt_score\"] for i in v]) / len(v)\n system = v[0][\"system\"]\n score_dict[key] = {\n \"pred_score\": pred_score,\n \"gt_score\": gt_score,\n \"system\": system,\n }\n scores = [(v[\"pred_score\"], v[\"gt_score\"]) for k, v in score_dict.items()]\n scores = np.array(scores)\n pred_scores = scores[:, 0]\n gt_scores = scores[:, 1]\n\n sys_dict = {}\n systems = list(set([v[\"system\"] for v in score_dict.values()]))\n for system in systems:\n scores = [\n (v[\"pred_score\"], v[\"gt_score\"])\n for k, v in score_dict.items()\n if v[\"system\"] == system\n ]\n scores = np.array(scores)\n pred_score = np.mean(scores[:, 0])\n gt_score = np.mean(scores[:, 1])\n\n sys_dict[system] = {\n \"pred_score\": pred_score,\n \"gt_score\": gt_score,\n }\n\n scores = [(v[\"pred_score\"], v[\"gt_score\"]) for k, v in sys_dict.items()]\n scores = np.array(scores)\n sys_pred_scores = scores[:, 0]\n sys_gt_scores = scores[:, 1]\n\n utt_scores = [\n np.mean((gt_scores - pred_scores) ** 2),\n np.corrcoef(gt_scores, pred_scores)[0][1],\n scipy.stats.kendalltau(gt_scores, pred_scores)[0],\n scipy.stats.spearmanr(gt_scores, pred_scores)[0],\n ]\n sys_scores = [\n np.mean((sys_gt_scores - sys_pred_scores) ** 2),\n np.corrcoef(sys_gt_scores, sys_pred_scores)[0][1],\n scipy.stats.kendalltau(sys_gt_scores, sys_pred_scores)[0],\n scipy.stats.spearmanr(sys_gt_scores, sys_pred_scores)[0],\n ]\n row = \"{:>12} {:>10} {:>10} {:>10} {:>10}\"\n\n utt_scores = [\"{:.4f}\".format(i) for i in utt_scores]\n sys_scores = [\"{:.4f}\".format(i) for i in sys_scores]\n print(row.format(\"\", \"MSE\", \"LCC\", \"KTAU\", \"SRCC\"))\n print(row.format(\"Utterance\", *utt_scores))\n print(row.format(\"System\", *sys_scores))\n\n with open(self.out_fname, \"w\") as f:\n for fname, output in score_dict.items():\n score = output[\"pred_score\"]\n f.write(f\"{fname},{score}\\n\")\n\n\nclass EvalSave(Helper):\n def __init__(self, out_fname):\n self.out_fname = out_fname\n with open(self.out_fname, \"w\") as _:\n pass\n\n def iter_end(self, data, metadata):\n # Need a better writer\n with open(self.out_fname, \"a\") as f:\n for fname, output in zip(data[\"inputs\"][\"fnames\"], data[\"outputs\"]):\n score = output.item()\n f.write(f\"{fname},{score}\\n\")\n\n\nclass FeatSave(Helper):\n def iter_end(self, data, metadata):\n # Need a better writer\n for fname, feat in zip(data[\"inputs\"][\"fnames\"], data[\"feats\"]):\n fname = fname.replace(\".wav\", \".npy\")\n\n feat_path = os.path.join(\"wav2vec_feats\", fname)\n\n np.save(feat_path, feat.cpu().numpy())\n\n\nclass MSEHelper(Helper):\n def __init__(self):\n self.score_dict = {}\n\n def epoch_start(self, data, metadata):\n pass\n\n def iter_end(self, data, metadata):\n gt_scores = data[\"inputs\"][\"mean_score\"]\n pred_scores = data[\"outputs\"]\n fnames = data[\"inputs\"][\"fnames\"]\n\n for fname, pred_score, gt_score in zip(fnames, pred_scores, gt_scores):\n system = fname.split(\"-\")[0]\n self.score_dict[fname] = {\n \"pred_score\": pred_score.cpu().numpy(),\n \"gt_score\": gt_score.cpu().numpy(),\n \"system\": system,\n }\n\n def epoch_end(self, data, metadata):\n scores = [(v[\"pred_score\"], v[\"gt_score\"]) for k, v in self.score_dict.items()]\n scores = np.array(scores)\n pred_scores = scores[:, 0]\n gt_scores = scores[:, 1]\n\n sys_dict = {}\n for system in self._systems:\n scores = [\n (v[\"pred_score\"], v[\"gt_score\"])\n for k, v in self.score_dict.items()\n if v[\"system\"] == system\n ]\n scores = np.array(scores)\n pred_score = np.mean(scores[:, 0])\n gt_score = np.mean(scores[:, 1])\n\n sys_dict[system] = {\n \"pred_score\": pred_score,\n \"gt_score\": gt_score,\n }\n\n scores = [(v[\"pred_score\"], v[\"gt_score\"]) for k, v in sys_dict.items()]\n scores = np.array(scores)\n sys_pred_scores = scores[:, 0]\n sys_gt_scores = scores[:, 1]\n\n utt_scores = [\n np.mean((gt_scores - pred_scores) ** 2),\n np.corrcoef(gt_scores, pred_scores)[0][1],\n scipy.stats.kendalltau(gt_scores, pred_scores)[0],\n scipy.stats.spearmanr(gt_scores, pred_scores)[0],\n ]\n sys_scores = [\n np.mean((sys_gt_scores - sys_pred_scores) ** 2),\n np.corrcoef(sys_gt_scores, sys_pred_scores)[0][1],\n scipy.stats.kendalltau(sys_gt_scores, sys_pred_scores)[0],\n scipy.stats.spearmanr(sys_gt_scores, sys_pred_scores)[0],\n ]\n row = \"{:>12} {:>10} {:>10} {:>10} {:>10}\"\n\n utt_scores = [\"{:.4f}\".format(i) for i in utt_scores]\n sys_scores = [\"{:.4f}\".format(i) for i in sys_scores]\n print(row.format(\"\", \"MSE\", \"LCC\", \"KTAU\", \"SRCC\"))\n print(row.format(\"Utterance\", *utt_scores))\n print(row.format(\"System\", *sys_scores))\n if wandb.run is not None:\n wandb.log(\n {\n \"Sys MSE\": float(sys_scores[0]),\n \"Sys SRCC\": float(sys_scores[-1]),\n \"Utt MSE\": float(utt_scores[0]),\n \"Utt SRCC\": float(utt_scores[-1]),\n }\n )\n\n @property\n def _systems(self):\n systems = list(set([v[\"system\"] for v in self.score_dict.values()]))\n\n return systems\n"
] | [
[
"numpy.array",
"scipy.stats.kendalltau",
"scipy.stats.spearmanr",
"numpy.mean",
"numpy.corrcoef"
]
] |
Fabrizio94/ECGClassification | [
"c4c00224cb9fd3fb67823a41581831bb7e112c0a"
] | [
"rpeakdetection/pan_tompkins.py"
] | [
"import os\nimport numpy as np\nfrom rpeakdetection.rpeak_detector import RPeakDetector\nrpd = RPeakDetector()\nevaluation_width = 36\necg_folder = \"../data/ecg/mitdb/\"\npeaks_folder = \"../data/peaks/pan_tompkins/\"\nprecisions = list()\nrecalls = list()\nfor name in os.listdir(peaks_folder):\n peaks = list()\n file = open(peaks_folder + name, \"r\")\n name = name.replace(\".tsv\", \"\")\n for line in file:\n peak = line.replace(\"\\n\", \"\")\n peaks.append(int(peak))\n precision, recall = rpd.evaluate(peaks, ecg_folder + name, evaluation_width )\n precisions.append(precision)\n recalls.append(recall)\nprint(\"av prec\")\nprint(np.mean(precisions))\nprint(\"av recall\")\nprint(np.mean(recalls))"
] | [
[
"numpy.mean"
]
] |
qihongl/keras-resnet50-demo | [
"6f612d89374b69c5f9047dd0bffb779be4c8940a"
] | [
"test_vgg16.py"
] | [
"# reference1: https://github.com/fchollet/deep-learning-models\n# reference2: https://github.com/philipperemy/keras-visualize-activations\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom dep.imagenet_utils import preprocess_input, decode_predictions\nfrom dep.read_acts_keras import get_activations\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom os.path import join\n\n# get the model\nmodel = VGG16(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None,\n pooling=None, classes=1000)\n\n# load an image\nimg_dir = 'imgs'\nimg_name = 'stanford.jpg'\nimg_path = join(img_dir, img_name)\nimg = image.load_img(img_path, target_size=(224, 224))\n# plt.imshow(img)\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\n\n# feed the image\npreds = model.predict(x)\npreds_info = decode_predictions(preds)\nfor pred_info in preds_info[0]:\n print(pred_info)\n\n# fetch the activities\nactivations = get_activations(model, x, print_shape_only=True)\n"
] | [
[
"numpy.expand_dims"
]
] |
Tenfleques/margipose | [
"ae0580cb7b3b41c21965cd32e280d2af0e8cf2c3"
] | [
"src/margipose/bin/run_gui.py"
] | [
"#!/usr/bin/env python3\n\nimport matplotlib\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.mplot3d import Axes3D\nimport tkinter as tk\nfrom tkinter import ttk\nimport tkinter.font\nimport argparse\nimport torch\nfrom functools import lru_cache\nimport os\nimport numpy as np\nfrom pose3d_utils.coords import ensure_homogeneous, ensure_cartesian\n\nfrom margipose.data.get_dataset import get_dataset\nfrom margipose.data.skeleton import absolute_to_root_relative, \\\n VNect_Common_Skeleton, apply_rigid_alignment, CanonicalSkeletonDesc\nfrom margipose.utils import plot_skeleton_on_axes3d, plot_skeleton_on_axes, seed_all, init_algorithms\nfrom margipose.models import load_model\nfrom margipose.eval import mpjpe, pck\nfrom margipose.data_specs import DataSpecs, ImageSpecs, JointsSpecs\nfrom margipose.cli import Subcommand\n\n\nCPU = torch.device('cpu')\n\n\ndef parse_args(argv):\n \"\"\"Parse command-line arguments.\"\"\"\n\n parser = argparse.ArgumentParser(prog='margipose-gui',\n description='3D human pose browser GUI',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--model', type=str, metavar='FILE',\n help='path to model file')\n parser.add_argument('--dataset', type=str, metavar='STR', default='mpi3d-test',\n help='dataset name')\n\n args = parser.parse_args(argv[1:])\n\n return args\n\n\n@lru_cache(maxsize=32)\ndef load_example(dataset, example_index):\n example = dataset[example_index]\n input = example['input']\n input_image = dataset.input_to_pil_image(input)\n camera = example['camera_intrinsic']\n transform_opts = example['transform_opts']\n gt_skel = None\n if 'target' in example:\n gt_skel = dict(original=example['original_skel'])\n gt_skel_norm = ensure_homogeneous(example['target'], d=3)\n gt_skel_denorm = dataset.denormalise_with_skeleton_height(gt_skel_norm, camera, transform_opts)\n gt_skel['image_space'] = camera.project_cartesian(gt_skel_denorm)\n gt_skel['camera_space'] = dataset.untransform_skeleton(gt_skel_denorm, transform_opts)\n return dict(\n input=input,\n input_image=input_image,\n camera=camera,\n transform_opts=transform_opts,\n gt_skel=gt_skel,\n )\n\n\n@lru_cache(maxsize=32)\ndef load_and_process_example(dataset, example_index, device, model):\n example = load_example(dataset, example_index)\n if model is None:\n return example\n in_var = example['input'].unsqueeze(0).to(device, torch.float32)\n out_var = model(in_var)\n pred_skel_norm = ensure_homogeneous(out_var.squeeze(0).to(CPU, torch.float64), d=3)\n pred_skel_denorm = dataset.denormalise_with_skeleton_height(\n pred_skel_norm, example['camera'], example['transform_opts'])\n pred_skel_image_space = example['camera'].project_cartesian(pred_skel_denorm)\n pred_skel_camera_space = dataset.untransform_skeleton(pred_skel_denorm, example['transform_opts'])\n return dict(\n pred_skel=dict(\n normalised=pred_skel_norm,\n camera_space=pred_skel_camera_space,\n image_space=pred_skel_image_space,\n ),\n xy_heatmaps=[hm.squeeze(0).to(CPU, torch.float32) for hm in model.xy_heatmaps],\n zy_heatmaps=[hm.squeeze(0).to(CPU, torch.float32) for hm in model.zy_heatmaps],\n xz_heatmaps=[hm.squeeze(0).to(CPU, torch.float32) for hm in model.xz_heatmaps],\n **example\n )\n\n\ndef root_relative(skel):\n return absolute_to_root_relative(\n ensure_cartesian(skel, d=3),\n CanonicalSkeletonDesc.root_joint_id\n )\n\n\nclass MainGUIApp(tk.Tk):\n def __init__(self, dataset, device, model):\n super().__init__()\n\n self.dataset = dataset\n self.device = device\n self.model = model\n\n self.wm_title('3D pose estimation')\n self.geometry('1280x800')\n\n matplotlib.rcParams['savefig.format'] = 'svg'\n matplotlib.rcParams['savefig.directory'] = os.curdir\n\n # Variables\n self.var_cur_example = tk.StringVar()\n self.var_pred_visible = tk.IntVar(value=0)\n self.var_gt_visible = tk.IntVar(value=1)\n self.var_mpjpe = tk.StringVar(value='??')\n self.var_pck = tk.StringVar(value='??')\n self.var_aligned = tk.IntVar(value=0)\n self.var_joint = tk.StringVar(value='pelvis')\n\n if self.model is not None:\n self.var_pred_visible.set(1)\n\n global_toolbar = self._make_global_toolbar(self)\n global_toolbar.pack(side=tk.TOP, fill=tk.X)\n\n self.notebook = ttk.Notebook(self)\n self.notebook.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True, padx=4, pady=4)\n def on_change_tab(event):\n self.update_current_tab()\n self.notebook.bind('<<NotebookTabChanged>>', on_change_tab)\n\n self.tab_update_funcs = [\n self._make_overview_tab(self.notebook),\n self._make_heatmap_tab(self.notebook),\n ]\n\n self.current_example_index = 0\n\n @property\n def current_example_index(self):\n return int(self.var_cur_example.get())\n\n @current_example_index.setter\n def current_example_index(self, value):\n self.var_cur_example.set(str(value))\n self.on_change_example()\n\n @property\n def pred_visible(self):\n return self.var_pred_visible.get() != 0\n\n @property\n def gt_visible(self):\n return self.var_gt_visible.get() != 0 and self.current_example['gt_skel'] is not None\n\n @property\n def is_aligned(self):\n return self.var_aligned.get() != 0\n\n def update_current_tab(self):\n cur_tab_index = self.notebook.index('current')\n\n if self.model is not None and self.current_example['gt_skel']:\n actual = root_relative(self.current_example['pred_skel']['camera_space'])\n expected = root_relative(self.current_example['gt_skel']['original'])\n\n if self.is_aligned:\n actual = apply_rigid_alignment(actual, expected)\n\n included_joints = [\n CanonicalSkeletonDesc.joint_names.index(joint_name)\n for joint_name in VNect_Common_Skeleton\n ]\n self.var_mpjpe.set('{:0.4f}'.format(mpjpe(actual, expected, included_joints)))\n self.var_pck.set('{:0.4f}'.format(pck(actual, expected, included_joints)))\n\n self.tab_update_funcs[cur_tab_index]()\n\n def _make_global_toolbar(self, master):\n toolbar = tk.Frame(master, bd=1, relief=tk.RAISED)\n\n def add_label(text):\n opts = dict(text=text) if isinstance(text, str) else dict(textvariable=text)\n label = tk.Label(toolbar, **opts)\n label.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n return label\n\n add_label('Example index:')\n txt_cur_example = tk.Spinbox(\n toolbar, textvariable=self.var_cur_example, command=self.on_change_example,\n wrap=True, from_=0, to=len(self.dataset) - 1, font=tk.font.Font(size=12))\n def on_key_cur_example(event):\n if event.keysym == 'Return':\n self.on_change_example()\n txt_cur_example.bind('<Key>', on_key_cur_example)\n txt_cur_example.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n\n if self.model is not None:\n add_label('MPJPE:')\n add_label(self.var_mpjpe)\n add_label('PCK@150mm:')\n add_label(self.var_pck)\n\n chk_aligned = tk.Checkbutton(\n toolbar, text='Procrustes alignment', variable=self.var_aligned,\n command=lambda: self.update_current_tab())\n chk_aligned.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n\n return toolbar\n\n def _make_overview_tab(self, notebook: ttk.Notebook):\n tab = tk.Frame(notebook)\n notebook.add(tab, text='Overview')\n\n toolbar = tk.Frame(tab, bd=1, relief=tk.RAISED)\n toolbar.pack(side=tk.TOP, fill=tk.X)\n chk_pred_visible = tk.Checkbutton(\n toolbar, text='Show prediction', variable=self.var_pred_visible,\n command=lambda: self.update_current_tab())\n chk_pred_visible.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n if self.model is None:\n self.var_pred_visible.set(0)\n chk_pred_visible.configure(state='disabled')\n chk_gt_visible = tk.Checkbutton(\n toolbar, text='Show ground truth', variable=self.var_gt_visible,\n command=lambda: self.update_current_tab())\n if hasattr(self.dataset, 'subset') and self.dataset.subset == 'test':\n self.var_gt_visible.set(0)\n chk_gt_visible.configure(state='disabled')\n chk_gt_visible.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n\n fig = Figure()\n fig.subplots_adjust(0.05, 0.10, 0.95, 0.95, 0.05, 0.05)\n canvas = FigureCanvasTkAgg(fig, tab)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n nav_toolbar = NavigationToolbar2Tk(canvas, tab)\n nav_toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n prev_ax1: Axes3D = None\n\n def update_tab():\n fig.clf()\n\n skels = []\n if self.pred_visible:\n skels.append(self.current_example['pred_skel'])\n if self.gt_visible:\n skels.append(self.current_example['gt_skel'])\n\n ax1: Axes3D = fig.add_subplot(1, 2, 1, projection='3d')\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.imshow(self.current_example['input_image'])\n\n ground_truth = root_relative(self.current_example['gt_skel']['original'])\n for i, skel in enumerate(skels):\n alpha = 1 / (3 ** i)\n skel3d = root_relative(skel['camera_space'])\n if self.is_aligned:\n skel3d = apply_rigid_alignment(skel3d, ground_truth)\n plot_skeleton_on_axes3d(skel3d, CanonicalSkeletonDesc,\n ax1, invert=True, alpha=alpha)\n plot_skeleton_on_axes(skel['image_space'], CanonicalSkeletonDesc, ax2, alpha=alpha)\n\n # Preserve 3D axes view\n nonlocal prev_ax1\n if prev_ax1 is not None:\n ax1.view_init(prev_ax1.elev, prev_ax1.azim)\n prev_ax1 = ax1\n\n canvas.draw()\n\n return update_tab\n\n def _make_heatmap_tab(self, notebook: ttk.Notebook):\n tab = tk.Frame(notebook)\n tab_index = len(notebook.tabs())\n notebook.add(tab, text='Heatmaps')\n\n if self.model is None:\n notebook.tab(tab_index, state='disabled')\n\n toolbar = tk.Frame(tab, bd=1, relief=tk.RAISED)\n toolbar.pack(side=tk.TOP, fill=tk.X)\n\n joint_names = list(sorted(self.dataset.skeleton_desc.joint_names))\n\n opt_joint = tk.OptionMenu(\n toolbar, self.var_joint, *joint_names,\n command=lambda event: self.update_current_tab())\n opt_joint.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n\n var_image_visible = tk.IntVar(value=1)\n chk_image_visible = tk.Checkbutton(\n toolbar, text='Show image overlay', variable=var_image_visible,\n command=lambda: self.update_current_tab())\n chk_image_visible.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n\n var_mean_crosshairs = tk.IntVar(value=1)\n chk_mean_crosshairs = tk.Checkbutton(\n toolbar, text='Show mean', variable=var_mean_crosshairs,\n command=lambda: self.update_current_tab())\n chk_mean_crosshairs.pack(side=tk.LEFT, fill=tk.Y, padx=2, pady=2)\n\n fig = Figure()\n canvas = FigureCanvasTkAgg(fig, tab)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n nav_toolbar = NavigationToolbar2Tk(canvas, tab)\n nav_toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n prev_ax3d: Axes3D = None\n\n def update_tab():\n fig.clf()\n joint_index = self.dataset.skeleton_desc.joint_names.index(self.var_joint.get())\n\n cmap = plt.get_cmap('gist_yarg')\n img = self.current_example['input_image']\n hms = [\n (3, self.current_example['xy_heatmaps'][-1][joint_index], ('x', 'y')),\n (1, self.current_example['xz_heatmaps'][-1][joint_index], ('x', 'z')),\n (4, self.current_example['zy_heatmaps'][-1][joint_index], ('z', 'y')),\n ]\n\n for subplot_id, hm, (xlabel, ylabel) in hms:\n ax = fig.add_subplot(2, 2, subplot_id)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n extent = [0, hm.size(-1), hm.size(-2), 0]\n ax.imshow(hm, cmap=cmap, extent=extent)\n if subplot_id == 3 and var_image_visible.get() != 0:\n ax.imshow(img, extent=extent, alpha=0.5)\n if var_mean_crosshairs.get() != 0:\n ax.axvline(\n np.average(np.arange(0, hm.size(-1)), weights=np.array(hm.sum(-2))),\n ls='dashed',\n )\n ax.axhline(\n np.average(np.arange(0, hm.size(-2)), weights=np.array(hm.sum(-1))),\n ls='dashed',\n )\n\n size = self.current_example['xy_heatmaps'][-1].size(-1)\n ax: Axes3D = fig.add_subplot(2, 2, 2, projection='3d')\n plot_skeleton_on_axes3d(\n (root_relative(self.current_example['pred_skel']['normalised']) + 1) * 0.5 * size,\n self.dataset.skeleton_desc, ax, invert=True)\n ax.set_xlim(0, size)\n ax.set_ylim(0, size)\n ax.set_zlim(size, 0)\n # Preserve 3D axes view\n nonlocal prev_ax3d\n if prev_ax3d is not None:\n ax.view_init(prev_ax3d.elev, prev_ax3d.azim)\n prev_ax3d = ax\n\n canvas.draw()\n\n return update_tab\n\n def on_change_example(self):\n self.current_example = load_and_process_example(\n self.dataset, self.current_example_index, self.device, self.model)\n\n self.update_current_tab()\n\n\ndef main(argv, common_opts):\n args = parse_args(argv)\n seed_all(12345)\n init_algorithms(deterministic=True)\n torch.set_grad_enabled(False)\n\n device = common_opts['device']\n\n if args.model:\n model = load_model(args.model).to(device).eval()\n data_specs = model.data_specs\n else:\n model = None\n data_specs = DataSpecs(\n ImageSpecs(224, mean=ImageSpecs.IMAGENET_MEAN, stddev=ImageSpecs.IMAGENET_STDDEV),\n JointsSpecs(CanonicalSkeletonDesc, n_dims=3),\n )\n\n dataset = get_dataset(args.dataset, data_specs, use_aug=False)\n\n app = MainGUIApp(dataset, device, model)\n app.mainloop()\n\n\nGUI_Subcommand = Subcommand(name='gui', func=main, help='browse examples and predictions')\n\nif __name__ == '__main__':\n GUI_Subcommand.run()\n"
] | [
[
"torch.device",
"matplotlib.pyplot.get_cmap",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.figure.Figure",
"torch.set_grad_enabled",
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk"
]
] |
davidbailey/dpd | [
"29bce937e34afa2161788a5c4a911e590a388229"
] | [
"dpd/folium/plot_stops.py"
] | [
"from functools import partial\n\nimport folium\nimport geopandas\nfrom matplotlib import pyplot as plt\nimport mplleaflet\nfrom shapely.geometry import Point\n\nfrom dpd.osrm import contour_plot\n\n\ndef _plot_linestring(foliumMap, row):\n folium.PolyLine(\n locations=zip(row[\"geometry\"].xy[1], row[\"geometry\"].xy[0]), color=row[\"color\"]\n ).add_to(foliumMap)\n\n\ndef plot_stops(foliumMap, stops, markercolor=\"red\", with_contour=False, mode=\"walking\"):\n \"\"\"\n Plot a table of stops on a Folium map.\n\n Args:\n foliumMap (folium.folium.Map): the map to plot the stops on\n stops (pandas.DataFrame): the DataFrame that contains the stops to plot\n color (str): marker color for the stops\n with_contour (bool): if the stops should include a contour_plot representing walk times\n \"\"\"\n if with_contour:\n fig, ax = plt.subplots()\n stops.apply(\n lambda row: contour_plot(\n ax,\n Point(float(row[\"stop_lon\"]), float(row[\"stop_lat\"])),\n 0.025,\n 15,\n mode=mode,\n ),\n axis=1,\n )\n geojson = mplleaflet.fig_to_geojson(fig=fig)\n features = geopandas.GeoDataFrame.from_features(geojson[\"features\"])\n features.apply(partial(_plot_linestring, foliumMap), axis=1)\n stops.apply(\n lambda row: folium.Marker(\n [row[\"stop_lat\"], row[\"stop_lon\"]],\n popup=row[\"stop_name\"],\n icon=folium.Icon(color=markercolor),\n ).add_to(foliumMap),\n axis=1,\n )\n"
] | [
[
"matplotlib.pyplot.subplots"
]
] |
francis-mujani/News_and_Trading | [
"fb4c5642fd1f2facd300840b9091d85820d5e5ba"
] | [
"flask/model.py"
] | [
"import requests\nfrom pymongo import MongoClient\nfrom bson import json_util\nimport math\nimport numpy as np\nimport pandas as pd\nimport tensorflow\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM, Dropout\nimport config\nimport os\nimport joblib\n\n# This file will create a scaler and a LSTM model for each company of Dow Jones.\n\n# MongoDB configuration.\n\nusername = config.mongo_user\npassword = config.mongo_pw\n\nmongobase = config.mongo_db\n\nconnection = MongoClient('mongodb+srv://'+str(username)+':'+str(password)+'@'+str(mongobase)+'.mongodb.net/test?authSource=admin&replicaSet=BaseDB-shard-0&readPreference=primary&appname=MongoDB%20Compass%20Community&ssl=true')\n\nstocks = []\n\n# Function to create the model.\n\ndef get_model(symbol):\n print(symbol)\n db = connection[symbol]\n collection = db.stock\n df = pd.DataFrame(list(collection.find()))\n df._id = pd.to_datetime(df._id, infer_datetime_format=True)\n df = df.sort_values(\"_id\", ascending = True)\n # Create a new dataframe with only the 'Close' column\n data = df.filter(['close'])\n # Converting the dataframe to a numpy array\n dataset = data.values\n # Get /Compute the number of rows to train the model on\n training_data_len = math.ceil(len(dataset)*.8)\n # Scale the all of the data to be values between 0 and 1 \n scaler = MinMaxScaler(feature_range=(0, 1)) \n scaled_data = scaler.fit_transform(dataset)\n if not os.path.exists('model'):\n os.makedirs('model')\n joblib.dump(scaler, 'model/scaler_'+symbol+'.pkl') \n # Create the scaled training data set \n train_data = scaled_data[0:training_data_len , : ]\n # Split the data into x_train and y_train data sets\n x_train=[]\n y_train = []\n for i in range(60,len(train_data)):\n x_train.append(train_data[i-60:i,0])\n y_train.append(train_data[i,0])\n # Convert x_train and y_train to numpy arrays\n x_train, y_train = np.array(x_train), np.array(y_train)\n # Reshape the data into the shape accepted by the LSTM\n x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))\n # Build the LSTM network model\n model = Sequential()\n model.add(LSTM(units=50, return_sequences=True,input_shape=(x_train.shape[1],1)))\n model.add(LSTM(units=50, return_sequences=False))\n model.add(Dense(units=25))\n model.add(Dense(units=1))\n # Compile the model\n model.compile(optimizer='adam', loss='mean_squared_error')\n # Train the model\n model.fit(x_train, y_train, batch_size=1, epochs=1)\n # Test data set\n test_data = scaled_data[training_data_len - 60: , : ]\n # Create the x_test and y_test data sets\n x_test = []\n y_test = dataset[training_data_len : , : ]\n # Get all of the rows from the start of test to the rest and all of the columns \n # (in this case it's only column 'close')\n for i in range(60,len(test_data)):\n x_test.append(test_data[i-60:i,0])\n # Convert x_test to a numpy array \n x_test = np.array(x_test)\n # Reshape the data into the shape accepted by the LSTM\n x_test = np.reshape(x_test, (x_test.shape[0],x_test.shape[1],1))\n # Getting the models predicted price values\n predictions = model.predict(x_test) \n predictions = scaler.inverse_transform(predictions)#Undo scaling\n # Calculate/Get the value of RMSE\n rmse=np.sqrt(np.mean(((predictions- y_test)**2)))\n print(\"The RMSE of this model is : \" + str(rmse))\n # We create a directory \"model\" to save our model\n if not os.path.exists('model'):\n os.makedirs('model')\n # We save the model in the directory\n model.save(\"model/\"+symbol+\".h5\")\n print(\"The model has been saved.\")\n return model\n\n# Import of Dow Jones 30 companies into dataset.\n\ndj30 = pd.read_excel(\"dj30.xls\")\n\nfor i in connection.list_database_names():\n if i in dj30.ticker.to_list():\n stocks.append(i)\n\nfor symbol in stocks:\n try:\n get_model(symbol)\n except:\n pass"
] | [
[
"pandas.to_datetime",
"numpy.array",
"numpy.reshape",
"pandas.read_excel",
"numpy.mean",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.keras.layers.LSTM"
]
] |
PPTMiao/mtl-ssl | [
"b61449c3f902414304657de6ec217077e441a6b9"
] | [
"object_detection/meta_architectures/faster_rcnn_meta_arch.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Faster R-CNN meta-architecture definition.\n\nGeneral tensorflow implementation of Faster R-CNN detection models.\n\nSee Faster R-CNN: Ren, Shaoqing, et al.\n\"Faster R-CNN: Towards real-time object detection with region proposal\nnetworks.\" Advances in neural information processing systems. 2015.\n\nWe allow for two modes: first_stage_only=True and first_stage_only=False. In\nthe former setting, all of the user facing methods (e.g., predict, postprocess,\nloss) can be used as if the model consisted only of the RPN, returning class\nagnostic proposals (these can be thought of as approximate detections with no\nassociated class information). In the latter setting, proposals are computed,\nthen passed through a second stage \"box classifier\" to yield (multi-class)\ndetections.\n\nImplementations of Faster R-CNN models must define a new\nFasterRCNNFeatureExtractor and override three methods: `preprocess`,\n`_extract_proposal_features` (the first stage of the model), and\n`_extract_box_classifier_features` (the second stage of the model). Optionally,\nthe `restore_fn` method can be overridden. See tests for an example.\n\nA few important notes:\n+ Batching conventions: We support batched inference and training where\nall images within a batch have the same resolution. Batch sizes are determined\ndynamically via the shape of the input tensors (rather than being specified\ndirectly as, e.g., a model constructor).\n\nA complication is that due to non-max suppression, we are not guaranteed to get\nthe same number of proposals from the first stage RPN (region proposal network)\nfor each image (though in practice, we should often get the same number of\nproposals). For this reason we pad to a max number of proposals per image\nwithin a batch. This `self.max_num_proposals` property is set to the\n`first_stage_max_proposals` parameter at inference time and the\n`second_stage_batch_size` at training time since we subsample the batch to\nbe sent through the box classifier during training.\n\nFor the second stage of the pipeline, we arrange the proposals for all images\nwithin the batch along a single batch dimension. For example, the input to\n_extract_box_classifier_features is a tensor of shape\n`[total_num_proposals, crop_height, crop_width, depth]` where\ntotal_num_proposals is batch_size * self.max_num_proposals. (And note that per\nthe above comment, a subset of these entries correspond to zero paddings.)\n\n+ Coordinate representations:\nFollowing the API (see model.DetectionModel definition), our outputs after\npostprocessing operations are always normalized boxes however, internally, we\nsometimes convert to absolute --- e.g. for loss computation. In particular,\nanchors and proposal_boxes are both represented as absolute coordinates.\n\nTODO: Support TPU implementations and sigmoid loss.\n\"\"\"\nfrom abc import abstractmethod\nfrom copy import deepcopy\nfrom functools import partial\n\nimport re\nimport collections\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection.anchor_generators import grid_anchor_generator\nfrom object_detection.core import balanced_positive_negative_sampler as sampler\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import box_predictor\nfrom object_detection.core import mask_predictor\nfrom object_detection.core import losses\nfrom object_detection.core import model\nfrom object_detection.core import post_processing\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.core import target_assigner\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom global_utils.custom_utils import log\n\nslim = tf.contrib.slim\n\n\nclass FasterRCNNFeatureExtractor(object):\n \"\"\"Faster R-CNN Feature Extractor definition.\"\"\"\n\n def __init__(self,\n is_training,\n first_stage_features_stride,\n reuse_weights=tf.AUTO_REUSE,\n weight_decay=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: A boolean indicating whether the training version of the\n computation graph should be constructed.\n first_stage_features_stride: Output stride of extracted RPN feature map.\n reuse_weights: Whether to reuse variables. Default is None.\n weight_decay: float weight decay for feature extractor (default: 0.0).\n \"\"\"\n self._is_training = is_training\n self._first_stage_features_stride = first_stage_features_stride\n self._reuse_weights = reuse_weights\n self._weight_decay = weight_decay\n\n @abstractmethod\n def preprocess(self, resized_inputs):\n \"\"\"Feature-extractor specific preprocessing (minus image resizing).\"\"\"\n pass\n\n def extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features.\n\n This function is responsible for extracting feature maps from preprocessed\n images. These features are used by the region proposal network (RPN) to\n predict proposals.\n\n Args:\n preprocessed_inputs: A [batch, height, width, channels] float tensor\n representing a batch of images.\n scope: A scope name.\n\n Returns:\n rpn_feature_map: A tensor with shape [batch, height, width, depth]\n \"\"\"\n with tf.variable_scope(scope, values=[preprocessed_inputs]):\n return self._extract_proposal_features(preprocessed_inputs, scope)\n\n @abstractmethod\n def _extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features, to be overridden.\"\"\"\n pass\n\n def extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features.\n\n Args:\n proposal_feature_maps: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, crop_height, crop_width, depth]\n representing the feature map cropped to each proposal.\n scope: A scope name.\n\n Returns:\n proposal_classifier_features: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, height, width, depth]\n representing box classifier features for each proposal.\n \"\"\"\n with tf.variable_scope(scope, values=[proposal_feature_maps]):\n return self._extract_box_classifier_features(proposal_feature_maps, scope)\n\n @abstractmethod\n def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features, to be overridden.\"\"\"\n pass\n\n def restore_from_classification_checkpoint_fn(\n self,\n first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n Args:\n first_stage_feature_extractor_scope: A scope name for the first stage\n feature extractor.\n second_stage_feature_extractor_scope: A scope name for the second stage\n feature extractor.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n variables_to_restore = {}\n for variable in tf.global_variables():\n for scope_name in [first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope]:\n if variable.op.name.startswith(scope_name):\n var_name = variable.op.name.replace(scope_name + '/', '')\n variables_to_restore[var_name] = variable\n return variables_to_restore\n\n def mtl_restore_from_classification_checkpoint_fn(\n self, scope_name):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n variables_to_restore = {}\n for variable in tf.global_variables():\n if variable.op.name.startswith(scope_name):\n var_name = variable.op.name.replace(scope_name + '/', '')\n variables_to_restore[var_name] = variable\n return variables_to_restore\n\n\nclass FasterRCNNMetaArch(model.DetectionModel):\n \"\"\"Faster R-CNN Meta-architecture definition.\"\"\"\n\n def __init__(self,\n is_training,\n num_classes,\n image_resizer_fn,\n feature_extractor,\n first_stage_only,\n first_stage_anchor_generator,\n first_stage_clip_window,\n first_stage_atrous_rate,\n first_stage_box_predictor_trainable,\n first_stage_box_predictor_arg_scope,\n first_stage_box_predictor_kernel_size,\n first_stage_box_predictor_depth,\n first_stage_minibatch_size,\n first_stage_positive_balance_fraction,\n first_stage_nms_score_threshold,\n first_stage_nms_iou_threshold,\n first_stage_max_proposals,\n first_stage_localization_loss_weight,\n first_stage_objectness_loss_weight,\n initial_crop_size,\n maxpool_kernel_size,\n maxpool_stride,\n second_stage_mask_rcnn_box_predictor,\n second_stage_batch_size,\n second_stage_balance_fraction,\n second_stage_non_max_suppression_fn,\n second_stage_score_conversion_fn,\n second_stage_localization_loss_weight,\n second_stage_classification_loss_weight,\n hard_example_miner,\n mtl_refiner_arg_scope,\n mtl=None,\n window_box_predictor=None,\n closeness_box_predictor=None,\n edgemask_predictor=None,\n parallel_iterations=16\n ):\n \"\"\"FasterRCNNMetaArch Constructor.\n\n Args:\n is_training: A boolean indicating whether the training version of the\n computation graph should be constructed.\n num_classes: Number of classes. Note that num_classes *does not*\n include the background category, so if groundtruth labels take values\n in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the\n assigned classification targets can range from {0,... K}).\n image_resizer_fn: A callable for image resizing. This callable always\n takes a rank-3 image tensor (corresponding to a single image) and\n returns a rank-3 image tensor, possibly with new spatial dimensions.\n See builders/image_resizer_builder.py.\n feature_extractor: A FasterRCNNFeatureExtractor object.\n first_stage_only: Whether to construct only the Region Proposal Network\n (RPN) part of the model.\n first_stage_anchor_generator: An anchor_generator.AnchorGenerator object\n (note that currently we only support\n grid_anchor_generator.GridAnchorGenerator objects)\n first_stage_atrous_rate: A single integer indicating the atrous rate for\n the single convolution op which is applied to the `rpn_features_to_crop`\n tensor to obtain a tensor to be used for box prediction. Some feature\n extractors optionally allow for producing feature maps computed at\n denser resolutions. The atrous rate is used to compensate for the\n denser feature maps by using an effectively larger receptive field.\n (This should typically be set to 1).\n first_stage_box_predictor_arg_scope: Slim arg_scope for conv2d,\n separable_conv2d and fully_connected ops for the RPN box predictor.\n first_stage_box_predictor_kernel_size: Kernel size to use for the\n convolution op just prior to RPN box predictions.\n first_stage_box_predictor_depth: Output depth for the convolution op\n just prior to RPN box predictions.\n first_stage_minibatch_size: The \"batch size\" to use for computing the\n objectness and location loss of the region proposal network. This\n \"batch size\" refers to the number of anchors selected as contributing\n to the loss function for any given image within the image batch and is\n only called \"batch_size\" due to terminology from the Faster R-CNN paper.\n first_stage_positive_balance_fraction: Fraction of positive examples\n per image for the RPN. The recommended value for Faster RCNN is 0.5.\n first_stage_nms_score_threshold: Score threshold for non max suppression\n for the Region Proposal Network (RPN). This value is expected to be in\n [0, 1] as it is applied directly after a softmax transformation. The\n recommended value for Faster R-CNN is 0.\n first_stage_nms_iou_threshold: The Intersection Over Union (IOU) threshold\n for performing Non-Max Suppression (NMS) on the boxes predicted by the\n Region Proposal Network (RPN).\n first_stage_max_proposals: Maximum number of boxes to retain after\n performing Non-Max Suppression (NMS) on the boxes predicted by the\n Region Proposal Network (RPN).\n first_stage_localization_loss_weight: A float\n first_stage_objectness_loss_weight: A float\n initial_crop_size: A single integer indicating the output size\n (width and height are set to be the same) of the initial bilinear\n interpolation based cropping during ROI pooling.\n maxpool_kernel_size: A single integer indicating the kernel size of the\n max pool op on the cropped feature map during ROI pooling.\n maxpool_stride: A single integer indicating the stride of the max pool\n op on the cropped feature map during ROI pooling.\n second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for\n the second stage.\n second_stage_batch_size: The batch size used for computing the\n classification and refined location loss of the box classifier. This\n \"batch size\" refers to the number of proposals selected as contributing\n to the loss function for any given image within the image batch and is\n only called \"batch_size\" due to terminology from the Faster R-CNN paper.\n second_stage_balance_fraction: Fraction of positive examples to use\n per image for the box classifier. The recommended value for Faster RCNN\n is 0.25.\n second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression\n callable that takes `boxes`, `scores`, optional `clip_window` and\n optional (kwarg) `mask` inputs (with all other inputs already set)\n and returns a dictionary containing tensors with keys:\n `detection_boxes`, `detection_scores`, `detection_classes`,\n `num_detections`, and (optionally) `detection_masks`. See\n `post_processing.batch_multiclass_non_max_suppression` for the type and\n shape of these tensors.\n second_stage_score_conversion_fn: Callable elementwise nonlinearity\n (that takes tensors as inputs and returns tensors). This is usually\n used to convert logits to probabilities.\n second_stage_localization_loss_weight: A float\n second_stage_classification_loss_weight: A float\n hard_example_miner: A losses.HardExampleMiner object (can be None).\n parallel_iterations: (Optional) The number of iterations allowed to run\n in parallel for calls to tf.map_fn.\n Raises:\n ValueError: If `second_stage_batch_size` > `first_stage_max_proposals`\n ValueError: If first_stage_anchor_generator is not of type\n grid_anchor_generator.GridAnchorGenerator.\n \"\"\"\n super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes)\n\n if second_stage_batch_size > first_stage_max_proposals:\n raise ValueError('second_stage_batch_size should be no greater than '\n 'first_stage_max_proposals.')\n if not isinstance(first_stage_anchor_generator,\n grid_anchor_generator.GridAnchorGenerator):\n raise ValueError('first_stage_anchor_generator must be of type '\n 'grid_anchor_generator.GridAnchorGenerator.')\n\n self._is_training = is_training\n self._image_resizer_fn = image_resizer_fn\n self._feature_extractor = feature_extractor\n self._first_stage_only = first_stage_only\n\n # The first class is reserved as background.\n unmatched_cls_target = tf.constant(\n [1] + self._num_classes * [0], dtype=tf.float32)\n self._proposal_target_assigner = target_assigner.create_target_assigner(\n 'FasterRCNN', 'proposal')\n self._detector_target_assigner = target_assigner.create_target_assigner(\n 'FasterRCNN', 'detection', unmatched_cls_target=unmatched_cls_target)\n # Both proposal and detector target assigners use the same box coder\n self._box_coder = self._proposal_target_assigner.box_coder\n\n # (First stage) Region proposal network parameters\n self._first_stage_anchor_generator = first_stage_anchor_generator\n self._first_stage_clip_window = first_stage_clip_window\n self._first_stage_atrous_rate = first_stage_atrous_rate\n self._first_stage_box_predictor_trainable = \\\n first_stage_box_predictor_trainable\n self._first_stage_box_predictor_arg_scope = (\n first_stage_box_predictor_arg_scope)\n self._first_stage_box_predictor_kernel_size = (\n first_stage_box_predictor_kernel_size)\n self._first_stage_box_predictor_depth = first_stage_box_predictor_depth\n self._first_stage_minibatch_size = first_stage_minibatch_size\n self._first_stage_sampler = sampler.BalancedPositiveNegativeSampler(\n positive_fraction=first_stage_positive_balance_fraction)\n self._first_stage_box_predictor = box_predictor.ConvolutionalBoxPredictor(\n self._is_training and first_stage_box_predictor_trainable,\n num_classes=1,\n conv_hyperparams=self._first_stage_box_predictor_arg_scope,\n min_depth=0, max_depth=0, num_layers_before_predictor=0,\n use_dropout=False, dropout_keep_prob=1.0, kernel_size=1,\n box_code_size=self._box_coder.code_size)\n\n self._mtl_refiner_arg_scope = mtl_refiner_arg_scope\n\n self._first_stage_nms_score_threshold = first_stage_nms_score_threshold\n self._first_stage_nms_iou_threshold = first_stage_nms_iou_threshold\n self._first_stage_max_proposals = first_stage_max_proposals\n\n self._first_stage_localization_loss = (\n losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True, sigma=3.0))\n self._first_stage_objectness_loss = (\n losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True))\n self._first_stage_loc_loss_weight = first_stage_localization_loss_weight\n self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight\n\n # Per-region cropping parameters\n self._initial_crop_size = initial_crop_size\n self._maxpool_kernel_size = maxpool_kernel_size\n self._maxpool_stride = maxpool_stride\n\n self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor\n\n self._second_stage_batch_size = second_stage_batch_size\n self._second_stage_sampler = sampler.BalancedPositiveNegativeSampler(\n positive_fraction=second_stage_balance_fraction)\n\n self._second_stage_nms_fn = second_stage_non_max_suppression_fn\n self._second_stage_score_conversion_fn = second_stage_score_conversion_fn\n\n self._second_stage_localization_loss = (\n losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True))\n self._second_stage_classification_loss = (\n losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True))\n self._second_stage_loc_loss_weight = second_stage_localization_loss_weight\n self._second_stage_cls_loss_weight = second_stage_classification_loss_weight\n self._hard_example_miner = hard_example_miner\n self._parallel_iterations = parallel_iterations\n\n # Mullti Tasks parmaeters\n self._mtl = mtl\n self._window_box_predictor = window_box_predictor\n self._closeness_box_predictor = closeness_box_predictor\n self._edgemask_predictor = edgemask_predictor\n\n self._window_class_loss = losses.WeightedSoftmaxClassificationLoss_v2(anchorwise_output=True)\n self._closeness_loss = losses.WeightedSoftmaxClassificationLoss_v2(anchorwise_output=True)\n self._edgemask_loss = losses.WeightedSoftmaxClassificationLoss_v2(anchorwise_output=True)\n\n @property\n def first_stage_feature_extractor_scope(self):\n return 'FirstStageFeatureExtractor'\n\n @property\n def second_stage_feature_extractor_scope(self):\n return 'SecondStageFeatureExtractor'\n\n @property\n def first_stage_box_predictor_scope(self):\n return 'FirstStageBoxPredictor'\n\n @property\n def second_stage_box_predictor_scope(self):\n return 'SecondStageBoxPredictor'\n\n @property\n def window_box_predictor_scope(self):\n return 'WindowBoxPredictor'\n\n @property\n def edgemask_predictor_scope(self):\n return 'EdgeMaskPredictor'\n\n @property\n def closeness_box_predictor_scope(self):\n return 'ClosenessBoxPredictor'\n\n @property\n def mtl_refiner_scope(self):\n return 'MTLClassRefiner'\n\n @property\n def max_num_proposals(self):\n \"\"\"Max number of proposals (to pad to) for each image in the input batch.\n\n At training time, this is set to be the `second_stage_batch_size` if hard\n example miner is not configured, else it is set to\n `first_stage_max_proposals`. At inference time, this is always set to\n `first_stage_max_proposals`.\n\n Returns:\n A positive integer.\n \"\"\"\n if self._is_training and not self._hard_example_miner:\n return self._second_stage_batch_size\n return self._first_stage_max_proposals\n\n def preprocess(self, inputs):\n \"\"\"Feature-extractor specific preprocessing.\n\n See base class.\n\n For Faster R-CNN, we perform image resizing in the base class --- each\n class subclassing FasterRCNNMetaArch is responsible for any additional\n preprocessing (e.g., scaling pixel values to be in [-1, 1]).\n\n Args:\n inputs: a [batch, height_in, width_in, channels] float tensor representing\n a batch of images with values between 0 and 255.0.\n\n Returns:\n preprocessed_inputs: a [batch, height_out, width_out, channels] float\n tensor representing a batch of images.\n Raises:\n ValueError: if inputs tensor does not have type tf.float32\n \"\"\"\n if inputs.dtype is not tf.float32:\n raise ValueError('`preprocess` expects a tf.float32 tensor')\n with tf.name_scope('Preprocessor'):\n resized_inputs = tf.map_fn(self._image_resizer_fn,\n elems=inputs,\n dtype=tf.float32,\n parallel_iterations=self._parallel_iterations)\n return self._feature_extractor.preprocess(resized_inputs)\n\n def predict(self, preprocessed_inputs):\n \"\"\"Predicts unpostprocessed tensors from input tensor.\n\n This function takes an input batch of images and runs it through the\n forward pass of the network to yield \"raw\" un-postprocessed predictions.\n If `first_stage_only` is True, this function only returns first stage\n RPN predictions (un-postprocessed). Otherwise it returns both\n first stage RPN predictions as well as second stage box classifier\n predictions.\n\n Other remarks:\n + Anchor pruning vs. clipping: following the recommendation of the Faster\n R-CNN paper, we prune anchors that venture outside the image window at\n training time and clip anchors to the image window at inference time.\n + Proposal padding: as described at the top of the file, proposals are\n padded to self._max_num_proposals and flattened so that proposals from all\n images within the input batch are arranged along the same batch dimension.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] to be used for predicting proposal\n boxes and corresponding objectness scores.\n 2) rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n 3) image_shape: a 1-D tensor of shape [4] representing the input\n image shape.\n 4) rpn_box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN (in absolute coordinates). Note that\n `num_anchors` can differ depending on whether the model is created in\n training or inference mode.\n\n (and if first_stage_only=False):\n 7) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, 4] representing predicted\n (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals\n 8) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 9) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 10) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes (in absolute coordinates).\n 11) mask_predictions: (optional) a 4-D tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask predictions.\n \"\"\"\n (rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,\n image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)\n (rpn_box_encodings, rpn_objectness_predictions_with_background\n ) = self._predict_rpn_proposals(rpn_box_predictor_features)\n\n # The Faster R-CNN paper recommends pruning anchors that venture outside\n # the image window at training time and clipping at inference time.\n clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))\n\n if self._is_training and not self._first_stage_clip_window:\n (rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors_boxlist) = self._remove_invalid_anchors_and_predictions(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors_boxlist, clip_window)\n else:\n anchors_boxlist = box_list_ops.clip_to_window(\n anchors_boxlist, clip_window)\n\n anchors = anchors_boxlist.get()\n prediction_dict = {\n 'rpn_box_predictor_features': rpn_box_predictor_features,\n 'rpn_features_to_crop': rpn_features_to_crop,\n 'image_shape': image_shape,\n 'rpn_box_encodings': rpn_box_encodings,\n 'rpn_objectness_predictions_with_background':\n rpn_objectness_predictions_with_background,\n 'anchors': anchors\n }\n\n if not self._first_stage_only:\n prediction_dict.update(self._predict_second_stage(\n rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n rpn_features_to_crop,\n anchors, image_shape))\n return prediction_dict\n\n def _predict_second_stage(self, rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n rpn_features_to_crop,\n anchors,\n image_shape):\n \"\"\"Predicts the output tensors from second stage of Faster R-CNN.\n\n Args:\n rpn_box_encodings: 4-D float tensor of shape\n [batch_size, num_valid_anchors, self._box_coder.code_size] containing\n predicted boxes.\n rpn_objectness_predictions_with_background: 2-D float tensor of shape\n [batch_size, num_valid_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch_size, height, width, depth] representing image features to crop\n using the proposal boxes predicted by the RPN.\n anchors: 2-D float tensor of shape\n [num_anchors, self._box_coder.code_size].\n image_shape: A 1D int32 tensors of size [4] containing the image shape.\n\n Returns:\n prediction_dict: a dictionary holding \"raw\" prediction tensors:\n 1) refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, 4] representing predicted\n (final) refined box encodings, where\n total_num_proposals=batch_size*self._max_num_proposals\n 2) class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors, where\n total_num_proposals=batch_size*self._max_num_proposals.\n Note that this tensor *includes* background class predictions\n (at class index 0).\n 3) num_proposals: An int32 tensor of shape [batch_size] representing the\n number of proposals generated by the RPN. `num_proposals` allows us\n to keep track of which entries are to be treated as zero paddings and\n which are not since we always pad the number of proposals to be\n `self.max_num_proposals` for each image.\n 4) proposal_boxes: A float32 tensor of shape\n [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes (in absolute coordinates).\n 5) mask_predictions: (optional) a 4-D tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask predictions.\n \"\"\"\n mtl = self._mtl\n proposal_boxes_normalized, _, num_proposals = self._postprocess_rpn(\n rpn_box_encodings, rpn_objectness_predictions_with_background,\n anchors, image_shape)\n flatten_proposal_boxes_normalized = \\\n self._flatten_first_two_dimensions(proposal_boxes_normalized)\n\n flattened_proposal_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, proposal_boxes_normalized))\n\n if mtl.shared_feature == 'proposal_feature_maps':\n if mtl.stop_gradient_for_aux_tasks:\n mtl_flattened_proposal_feature_maps = tf.identity(flattened_proposal_feature_maps)\n mtl_flattened_proposal_feature_maps = tf.stop_gradient(mtl_flattened_proposal_feature_maps)\n else:\n mtl_flattened_proposal_feature_maps = flattened_proposal_feature_maps\n\n box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_proposal_feature_maps,\n scope=self.second_stage_feature_extractor_scope))\n\n box_predictions = self._mask_rcnn_box_predictor.predict(\n box_classifier_features,\n num_predictions_per_location=1,\n boxes_normalized=flatten_proposal_boxes_normalized,\n scope=self.second_stage_box_predictor_scope)\n refined_box_encodings = tf.squeeze(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n class_predictions_with_background = tf.squeeze(box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)\n\n absolute_proposal_boxes = ops.normalized_to_image_coordinates(\n proposal_boxes_normalized, image_shape, self._parallel_iterations)\n\n prediction_dict = {\n 'refined_box_encodings': refined_box_encodings,\n 'class_predictions_with_background': class_predictions_with_background,\n 'num_proposals': num_proposals,\n 'proposal_boxes': absolute_proposal_boxes,\n 'proposal_boxes_normalized': proposal_boxes_normalized\n }\n\n if mtl.shared_feature == 'classifier_feature_maps':\n if mtl.stop_gradient_for_aux_tasks:\n mtl_feature = tf.identity(box_classifier_features)\n mtl_feature = tf.stop_gradient(mtl_feature)\n else:\n mtl_feature = box_classifier_features\n\n if mtl.closeness:\n if mtl.shared_feature == 'proposal_feature_maps':\n scope = self.closeness_box_predictor_scope\n mtl_feature = (self._feature_extractor.extract_box_classifier_features(\n mtl_flattened_proposal_feature_maps, scope=scope))\n closeness_box_predictions = self._closeness_box_predictor.predict_class(\n mtl_feature, scope=self.closeness_box_predictor_scope)\n closeness_predictions = tf.squeeze(closeness_box_predictions[\n box_predictor.CLASS_PREDICTIONS], axis=1)\n prediction_dict['closeness_predictions'] = closeness_predictions\n\n return prediction_dict\n\n def predict_with_window(self, prediction_dict, window_boxes_normalized=None):\n # expanded gt box\n mtl = self._mtl\n if window_boxes_normalized == None:\n window_boxes_normalized = tf.stack(\n self.window_lists(fields.BoxListFields.boxes))\n rpn_features_to_crop = prediction_dict['rpn_features_to_crop']\n flattened_window_input_feature_maps = (\n self._compute_second_stage_input_feature_maps(\n rpn_features_to_crop, window_boxes_normalized))\n\n if mtl.stop_gradient_for_aux_tasks and mtl.shared_feature == 'proposal_feature_maps':\n flattened_window_input_feature_maps = tf.stop_gradient(flattened_window_input_feature_maps)\n\n if mtl.shared_feature == 'proposal_feature_maps':\n scope = self.window_box_predictor_scope\n else:\n scope = self.second_stage_feature_extractor_scope\n window_box_classifier_features = (\n self._feature_extractor.extract_box_classifier_features(\n flattened_window_input_feature_maps,\n scope=scope))\n\n if mtl.stop_gradient_for_aux_tasks and mtl.shared_feature == 'classifier_feature_maps':\n window_box_classifier_features = tf.stop_gradient(window_box_classifier_features)\n\n window_box_predictions = self._window_box_predictor.predict_class(\n window_box_classifier_features,\n activation_fn=None,\n scope=self.window_box_predictor_scope)\n window_class_predictions = tf.squeeze(window_box_predictions[\n box_predictor.CLASS_PREDICTIONS], axis=1)\n prediction_dict['window_class_predictions'] = \\\n window_class_predictions\n return prediction_dict\n\n def predict_edgemask(self, prediction_dict):\n input_feature = prediction_dict['rpn_features_to_crop']\n edgemask_predictions = self._edgemask_predictor.predict(\n input_feature, scope=self.edgemask_predictor_scope)\n prediction_dict['edgemask_predictions'] = edgemask_predictions[mask_predictor.MASK_PREDICTIONS]\n return prediction_dict\n\n def predict_with_mtl_results(self, prediction_dict):\n mtl = self._mtl\n mtl_prediction_source = []\n prediction_org = prediction_dict['class_predictions_with_background']\n if mtl.stop_gradient_for_prediction_org:\n prediction_org = tf.stop_gradient(prediction_org)\n\n mtl_prediction_source.append(prediction_org)\n\n if mtl.window:\n n_expand_window_for_refine = 4\n\n y_min, x_min, y_max, x_max = tf.split(\n prediction_dict['proposal_boxes_normalized'], num_or_size_splits=4, axis=2)\n\n y_min_list = []\n x_min_list = []\n y_max_list = []\n x_max_list = []\n dx_neg = x_min / n_expand_window_for_refine\n dx_pos = (1 - x_max) / n_expand_window_for_refine\n dy_neg = y_min / n_expand_window_for_refine\n dy_pos = (1 - y_max) / n_expand_window_for_refine\n\n for i in range(n_expand_window_for_refine+1):\n y_min_sub = y_min - dy_neg * i\n x_min_sub = x_min - dx_neg * i\n y_max_sub = y_max + dy_pos * i\n x_max_sub = x_max + dx_pos * i\n y_min_list.append(y_min_sub)\n x_min_list.append(x_min_sub)\n y_max_list.append(y_max_sub)\n x_max_list.append(x_max_sub)\n\n expand_window_boxes = tf.squeeze(tf.stack([y_min_list, x_min_list, y_max_list, x_max_list], axis=4), axis=3)\n n_expand, n_batch, n_proposal, _ = expand_window_boxes.get_shape().as_list()\n if n_batch == None:\n n_batch = 1\n\n flatten_expand_window_boxes = tf.reshape(expand_window_boxes, [1, n_expand * n_batch * n_proposal, 4])\n\n window_prediction_dict = dict()\n window_prediction_dict['rpn_features_to_crop'] = prediction_dict['rpn_features_to_crop']\n window_prediction_dict = self.predict_with_window(window_prediction_dict, window_boxes_normalized=flatten_expand_window_boxes)\n expand_window_class_predictions = window_prediction_dict['window_class_predictions']\n expand_window_class_predictions = \\\n tf.reshape(expand_window_class_predictions, [n_expand, n_batch * n_proposal, -1])\n expand_window_class_predictions = \\\n tf.transpose(expand_window_class_predictions, perm=[1,0,2])\n\n prediction_dict['expand_window_class_predictions'] = \\\n expand_window_class_predictions\n\n with tf.variable_scope(self.mtl_refiner_scope, reuse=False):\n if mtl.window:\n net = prediction_dict['expand_window_class_predictions']\n net = tf.reshape(net, [n_proposal, -1]) # (64,5,21) to (64,105)\n mtl_prediction_source.append(net)\n if mtl.closeness:\n net = prediction_dict['closeness_predictions']\n if mtl.global_closeness:\n n_batch = tf.shape(net)[0]\n net = tf.reduce_mean(net, axis=0)\n net = tf.expand_dims(net, 0)\n net = tf.tile(net, [n_batch, 1])\n mtl_prediction_source.append(net)\n\n net = tf.concat(mtl_prediction_source, axis=1)\n with slim.arg_scope(self._mtl_refiner_arg_scope):\n n_features = net.get_shape().as_list()[-1]\n net = tf.stop_gradient(net)\n for i in range(mtl.refine_num_fc_layers):\n net = slim.fully_connected(net, n_features, activation_fn=tf.nn.relu, scope='fc'+str(i+1))\n if mtl.refine_dropout_rate < 1.0:\n net = slim.dropout(net, mtl.refine_dropout_rate, is_training=self._is_training, scope='dropout'+str(i+1))\n mtl_refined_class_predictions_with_background = \\\n slim.fully_connected(net, self.num_classes + 1, scope='fc' + str(mtl.refine_num_fc_layers + 1), activation_fn=None)\n\n if mtl.refine_residue:\n mtl_refined_class_predictions_with_background = mtl_refined_class_predictions_with_background + prediction_org\n\n prediction_dict['mtl_refined_class_predictions_with_background'] = mtl_refined_class_predictions_with_background\n return prediction_dict\n\n def _extract_rpn_feature_maps(self, preprocessed_inputs):\n \"\"\"Extracts RPN features.\n\n This function extracts two feature maps: a feature map to be directly\n fed to a box predictor (to predict location and objectness scores for\n proposals) and a feature map from which to crop regions which will then\n be sent to the second stage box classifier.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] image tensor.\n\n Returns:\n rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch, height, width, depth] to be used for predicting proposal boxes\n and corresponding objectness scores.\n rpn_features_to_crop: A 4-D float32 tensor with shape\n [batch, height, width, depth] representing image features to crop using\n the proposals boxes.\n anchors: A BoxList representing anchors (for the RPN) in\n absolute coordinates.\n image_shape: A 1-D tensor representing the input image shape.\n \"\"\"\n image_shape = tf.shape(preprocessed_inputs)\n rpn_features_to_crop = self._feature_extractor.extract_proposal_features(\n preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)\n\n feature_map_shape = tf.shape(rpn_features_to_crop)\n anchors = self._first_stage_anchor_generator.generate(\n [(feature_map_shape[1], feature_map_shape[2])])\n kernel_size = self._first_stage_box_predictor_kernel_size\n with tf.variable_scope(self.first_stage_box_predictor_scope, reuse=tf.AUTO_REUSE):\n with slim.arg_scope(self._first_stage_box_predictor_arg_scope):\n rpn_box_predictor_features = slim.conv2d(\n rpn_features_to_crop,\n self._first_stage_box_predictor_depth,\n kernel_size=[kernel_size, kernel_size],\n rate=self._first_stage_atrous_rate,\n trainable=self._first_stage_box_predictor_trainable,\n )\n return (rpn_box_predictor_features, rpn_features_to_crop,\n anchors, image_shape)\n\n def _predict_rpn_proposals(self, rpn_box_predictor_features):\n \"\"\"Adds box predictors to RPN feature map to predict proposals.\n\n Note resulting tensors will not have been postprocessed.\n\n Args:\n rpn_box_predictor_features: A 4-D float32 tensor with shape\n [batch, height, width, depth] to be used for predicting proposal boxes\n and corresponding objectness scores.\n\n Returns:\n box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n\n Raises:\n RuntimeError: if the anchor generator generates anchors corresponding to\n multiple feature maps. We currently assume that a single feature map\n is generated for the RPN.\n \"\"\"\n num_anchors_per_location = (\n self._first_stage_anchor_generator.num_anchors_per_location())\n if len(num_anchors_per_location) != 1:\n raise RuntimeError('anchor_generator is expected to generate anchors '\n 'corresponding to a single feature map.')\n box_predictions = self._first_stage_box_predictor.predict(\n rpn_box_predictor_features,\n num_anchors_per_location[0],\n scope=self.first_stage_box_predictor_scope)\n\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n objectness_predictions_with_background = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n return (tf.squeeze(box_encodings, axis=2),\n objectness_predictions_with_background)\n\n def _remove_invalid_anchors_and_predictions(\n self,\n box_encodings,\n objectness_predictions_with_background,\n anchors_boxlist,\n clip_window):\n \"\"\"Removes anchors that (partially) fall outside an image.\n\n Also removes associated box encodings and objectness predictions.\n\n Args:\n box_encodings: 3-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted boxes.\n objectness_predictions_with_background: 3-D float tensor of shape\n [batch_size, num_anchors, 2] containing class\n predictions (logits) for each of the anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN)\n in absolute coordinates.\n clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax]\n extent of the window to clip/prune to.\n\n Returns:\n box_encodings: 4-D float tensor of shape\n [batch_size, num_valid_anchors, self._box_coder.code_size] containing\n predicted boxes, where num_valid_anchors <= num_anchors\n objectness_predictions_with_background: 2-D float tensor of shape\n [batch_size, num_valid_anchors, 2] containing class\n predictions (logits) for each of the anchors, where\n num_valid_anchors <= num_anchors. Note that this\n tensor *includes* background class predictions (at class index 0).\n anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in\n absolute coordinates.\n \"\"\"\n pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(\n anchors_boxlist, clip_window)\n def _batch_gather_kept_indices(predictions_tensor):\n return tf.map_fn(\n partial(tf.gather, indices=keep_indices),\n elems=predictions_tensor,\n dtype=tf.float32,\n parallel_iterations=self._parallel_iterations,\n back_prop=True)\n return (_batch_gather_kept_indices(box_encodings),\n _batch_gather_kept_indices(objectness_predictions_with_background),\n pruned_anchors_boxlist)\n\n def _flatten_first_two_dimensions(self, inputs):\n \"\"\"Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.\n\n Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape\n [A * B, ..., depth].\n\n Args:\n inputs: A float tensor with shape [A, B, ..., depth]. Note that the first\n two and last dimensions must be statically defined.\n Returns:\n A float tensor with shape [A * B, ..., depth] (where the first and last\n dimension are statically defined.\n \"\"\"\n combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)\n flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +\n combined_shape[2:])\n return tf.reshape(inputs, flattened_shape)\n\n def postprocess(self, prediction_dict):\n \"\"\"Convert prediction tensors to final detections.\n\n This function converts raw predictions tensors to final detection results.\n See base class for output format conventions. Note also that by default,\n scores are to be interpreted as logits, but if a score_converter is used,\n then scores are remapped (and may thus have a different interpretation).\n\n If first_stage_only=True, the returned results represent proposals from the\n first stage RPN and are padded to have self.max_num_proposals for each\n image; otherwise, the results can be interpreted as multiclass detections\n from the full two-stage model and are padded to self._max_detections.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If first_stage_only=True, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n `image_shape`, and `anchors` fields. Otherwise we expect\n prediction_dict to additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`,\n `proposal_boxes` and, optionally, `mask_predictions` fields.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_boxes: [batch, max_detection, 4]\n detection_scores: [batch, max_detections]\n detection_classes: [batch, max_detections]\n (this entry is only created if rpn_mode=False)\n num_detections: [batch]\n \"\"\"\n with tf.name_scope('FirstStagePostprocessor'):\n image_shape = prediction_dict['image_shape']\n if self._first_stage_only:\n proposal_boxes, proposal_scores, num_proposals = self._postprocess_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'],\n image_shape)\n return {\n 'detection_boxes': proposal_boxes,\n 'detection_scores': proposal_scores,\n 'num_detections': num_proposals\n }\n with tf.name_scope('SecondStagePostprocessor'):\n mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)\n if self._mtl.refine and 'mtl_refined_class_predictions_with_background' in prediction_dict.keys():\n class_predictions_with_background = prediction_dict['mtl_refined_class_predictions_with_background']\n else:\n class_predictions_with_background = prediction_dict['class_predictions_with_background']\n detections_dict = self._postprocess_box_classifier(\n prediction_dict['refined_box_encodings'],\n class_predictions_with_background,\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n image_shape,\n mask_predictions=mask_predictions)\n return detections_dict\n\n def _postprocess_rpn(self,\n rpn_box_encodings_batch,\n rpn_objectness_predictions_with_background_batch,\n anchors,\n image_shape):\n \"\"\"Converts first stage prediction tensors from the RPN to proposals.\n\n This function decodes the raw RPN predictions, runs non-max suppression\n on the result.\n\n Note that the behavior of this function is slightly modified during\n training --- specifically, we stop the gradient from passing through the\n proposal boxes and we only return a balanced sampled subset of proposals\n with size `second_stage_batch_size`.\n\n Args:\n rpn_box_encodings_batch: A 3-D float32 tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted proposal box encodings.\n rpn_objectness_predictions_with_background_batch: A 3-D float tensor of\n shape [batch_size, num_anchors, 2] containing objectness predictions\n (logits) for each of the anchors with 0 corresponding to background\n and 1 corresponding to object.\n anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN. Note that `num_anchors` can differ depending\n on whether the model is created in training or inference mode.\n image_shape: A 1-D tensor representing the input image shape.\n\n Returns:\n proposal_boxes: A float tensor with shape\n [batch_size, max_num_proposals, 4] representing the (potentially zero\n padded) proposal boxes for all images in the batch. These boxes are\n represented as normalized coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, max_num_proposals] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n \"\"\"\n rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)\n rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(\n rpn_box_encodings_batch)\n tiled_anchor_boxes = tf.tile(\n tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])\n proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,\n tiled_anchor_boxes)\n proposal_boxes = tf.squeeze(proposal_boxes, axis=2)\n rpn_objectness_softmax_without_background = tf.nn.softmax(\n rpn_objectness_predictions_with_background_batch)[:, :, 1]\n clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))\n (proposal_boxes, proposal_scores, _, _,\n num_proposals) = post_processing.batch_multiclass_non_max_suppression(\n tf.expand_dims(proposal_boxes, axis=2),\n tf.expand_dims(rpn_objectness_softmax_without_background,\n axis=2),\n self._first_stage_nms_score_threshold,\n self._first_stage_nms_iou_threshold,\n self._first_stage_max_proposals,\n self._first_stage_max_proposals,\n clip_window=clip_window)\n if self._is_training:\n proposal_boxes = tf.stop_gradient(proposal_boxes)\n if not self._hard_example_miner:\n (groundtruth_boxlists, groundtruth_classes_with_background_list,\n ) = self._format_groundtruth_data(image_shape)\n (proposal_boxes, proposal_scores,\n num_proposals) = self._unpad_proposals_and_sample_box_classifier_batch(\n proposal_boxes, proposal_scores, num_proposals,\n groundtruth_boxlists, groundtruth_classes_with_background_list)\n # normalize proposal boxes\n proposal_boxes_reshaped = tf.reshape(proposal_boxes, [-1, 4])\n normalized_proposal_boxes_reshaped = box_list_ops.to_normalized_coordinates(\n box_list.BoxList(proposal_boxes_reshaped),\n image_shape[1], image_shape[2], check_range=False).get()\n proposal_boxes = tf.reshape(normalized_proposal_boxes_reshaped,\n [-1, proposal_boxes.shape[1].value, 4])\n return proposal_boxes, proposal_scores, num_proposals\n\n def _unpad_proposals_and_sample_box_classifier_batch(\n self,\n proposal_boxes,\n proposal_scores,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list):\n \"\"\"Unpads proposals and samples a minibatch for second stage.\n\n Args:\n proposal_boxes: A float tensor with shape\n [batch_size, num_proposals, 4] representing the (potentially zero\n padded) proposal boxes for all images in the batch. These boxes are\n represented as normalized coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, num_proposals] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates\n of the groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n\n Returns:\n proposal_boxes: A float tensor with shape\n [batch_size, second_stage_batch_size, 4] representing the (potentially\n zero padded) proposal boxes for all images in the batch. These boxes\n are represented as normalized coordinates.\n proposal_scores: A float tensor with shape\n [batch_size, second_stage_batch_size] representing the (potentially zero\n padded) proposal objectness scores for all images in the batch.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n \"\"\"\n single_image_proposal_box_sample = []\n single_image_proposal_score_sample = []\n single_image_num_proposals_sample = []\n for (single_image_proposal_boxes,\n single_image_proposal_scores,\n single_image_num_proposals,\n single_image_groundtruth_boxlist,\n single_image_groundtruth_classes_with_background) in zip(\n tf.unstack(proposal_boxes),\n tf.unstack(proposal_scores),\n tf.unstack(num_proposals),\n groundtruth_boxlists,\n groundtruth_classes_with_background_list):\n static_shape = single_image_proposal_boxes.get_shape()\n sliced_static_shape = tf.TensorShape([tf.Dimension(None),\n static_shape.dims[-1]])\n single_image_proposal_boxes = tf.slice(\n single_image_proposal_boxes,\n [0, 0],\n [single_image_num_proposals, -1])\n single_image_proposal_boxes.set_shape(sliced_static_shape)\n\n single_image_proposal_scores = tf.slice(single_image_proposal_scores,\n [0],\n [single_image_num_proposals])\n single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)\n single_image_boxlist.add_field(fields.BoxListFields.scores,\n single_image_proposal_scores)\n sampled_boxlist = self._sample_box_classifier_minibatch(\n single_image_boxlist,\n single_image_groundtruth_boxlist,\n single_image_groundtruth_classes_with_background)\n sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(\n sampled_boxlist,\n num_boxes=self._second_stage_batch_size)\n single_image_num_proposals_sample.append(tf.minimum(\n sampled_boxlist.num_boxes(),\n self._second_stage_batch_size))\n bb = sampled_padded_boxlist.get()\n single_image_proposal_box_sample.append(bb)\n single_image_proposal_score_sample.append(\n sampled_padded_boxlist.get_field(fields.BoxListFields.scores))\n return (tf.stack(single_image_proposal_box_sample),\n tf.stack(single_image_proposal_score_sample),\n tf.stack(single_image_num_proposals_sample))\n\n def _format_groundtruth_data(self, image_shape, with_background=True):\n \"\"\"Helper function for preparing groundtruth data for target assignment.\n\n In order to be consistent with the model.DetectionModel interface,\n groundtruth boxes are specified in normalized coordinates and classes are\n specified as label indices with no assumed background category. To prepare\n for target assignment, we:\n 1) convert boxes to absolute coordinates,\n 2) add a background class at class index 0\n\n Args:\n image_shape: A 1-D int32 tensor of shape [4] representing the shape of the\n input image batch.\n\n Returns:\n groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates\n of the groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n \"\"\"\n groundtruth_boxlists = [\n box_list_ops.to_absolute_coordinates(\n box_list.BoxList(boxes), image_shape[1], image_shape[2])\n for boxes in self.groundtruth_lists(fields.BoxListFields.boxes)]\n\n if with_background:\n groundtruth_classes_list = [\n tf.to_float(\n tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'))\n for one_hot_encoding in self.groundtruth_lists(\n fields.BoxListFields.classes)]\n else:\n groundtruth_classes_list = [\n tf.to_float(one_hot_encoding)\n for one_hot_encoding in self.groundtruth_lists(\n fields.BoxListFields.classes)]\n\n groundtruth_closeness_list = self.groundtruth_lists(fields.BoxListFields.closeness)\n groundtruth_ignore_list = self.groundtruth_lists(fields.BoxListFields.ignore)\n for gt_boxlist, gt_ignore, gt_closeness \\\n in zip(groundtruth_boxlists, groundtruth_ignore_list, groundtruth_closeness_list):\n\n if gt_ignore is not None:\n gt_boxlist.add_field(fields.BoxListFields.ignore, gt_ignore)\n if gt_closeness is not None:\n gt_boxlist.add_field(fields.BoxListFields.closeness, gt_closeness)\n\n return groundtruth_boxlists, groundtruth_classes_list\n\n def _sample_box_classifier_minibatch(self,\n proposal_boxlist,\n groundtruth_boxlist,\n groundtruth_classes_with_background):\n \"\"\"Samples a mini-batch of proposals to be sent to the box classifier.\n\n Helper function for self._postprocess_rpn.\n\n Args:\n proposal_boxlist: A BoxList containing K proposal boxes in absolute\n coordinates.\n groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in\n absolute coordinates.\n groundtruth_classes_with_background: A tensor with shape\n `[N, self.num_classes + 1]` representing groundtruth classes. The\n classes are assumed to be k-hot encoded, and include background as the\n zero-th class.\n\n Returns:\n a BoxList contained sampled proposals.\n \"\"\"\n (cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(\n proposal_boxlist, groundtruth_boxlist,\n groundtruth_classes_with_background)\n # Selects all boxes as candidates if none of them is selected according\n # to cls_weights. This could happen as boxes within certain IOU ranges\n # are ignored. If triggered, the selected boxes will still be ignored\n # during loss computation.\n cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0))\n positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)\n sampled_indices = self._second_stage_sampler.subsample(\n tf.cast(cls_weights, tf.bool),\n self._second_stage_batch_size,\n positive_indicator)\n return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)\n\n def _compute_second_stage_input_feature_maps(self, features_to_crop,\n proposal_boxes_normalized):\n \"\"\"Crops to a set of proposals from the feature map for a batch of images.\n\n Helper function for self._postprocess_rpn. This function calls\n `tf.image.crop_and_resize` to create the feature map to be passed to the\n second stage box classifier for each proposal.\n\n Args:\n features_to_crop: A float32 tensor with shape\n [batch_size, height, width, depth]\n proposal_boxes_normalized: A float32 tensor with shape [batch_size,\n num_proposals, box_code_size] containing proposal boxes in\n normalized coordinates.\n\n Returns:\n A float32 tensor with shape [K, new_height, new_width, depth].\n \"\"\"\n def get_box_inds(proposals):\n proposals_shape = proposals.get_shape().as_list()\n if any(dim is None for dim in proposals_shape):\n proposals_shape = tf.shape(proposals)\n ones_mat = tf.ones(proposals_shape[:-1], dtype=tf.int32)\n if len(proposals.get_shape().as_list()) > 2:\n multiplier = tf.expand_dims(\n tf.range(start=0, limit=proposals_shape[0]), 1)\n else: # XXX not seperated by batch (regard all as batch 0)\n multiplier = tf.constant(0, dtype=tf.int32)\n return tf.reshape(ones_mat * multiplier, [-1])\n\n if len(proposal_boxes_normalized.get_shape().as_list()) > 2:\n flattened_proposal_boxes_normalized = \\\n self._flatten_first_two_dimensions(proposal_boxes_normalized)\n else:\n flattened_proposal_boxes_normalized = proposal_boxes_normalized\n\n cropped_regions = tf.image.crop_and_resize(\n features_to_crop,\n flattened_proposal_boxes_normalized,\n get_box_inds(proposal_boxes_normalized),\n (self._initial_crop_size, self._initial_crop_size))\n return slim.max_pool2d(\n cropped_regions,\n [self._maxpool_kernel_size, self._maxpool_kernel_size],\n stride=self._maxpool_stride)\n\n def _compute_second_stage_input_feature_maps_nopool(self, features_to_crop,\n proposal_boxes_normalized):\n \"\"\"Crops to a set of proposals from the feature map for a batch of images.\n\n Helper function for self._postprocess_rpn. This function calls\n `tf.image.crop_and_resize` to create the feature map to be passed to the\n second stage box classifier for each proposal.\n\n Args:\n features_to_crop: A float32 tensor with shape\n [batch_size, height, width, depth]\n proposal_boxes_normalized: A float32 tensor with shape [batch_size,\n num_proposals, box_code_size] containing proposal boxes in\n normalized coordinates.\n\n Returns:\n A float32 tensor with shape [K, new_height, new_width, depth].\n \"\"\"\n def get_box_inds(proposals):\n proposals_shape = proposals.get_shape().as_list()\n if any(dim is None for dim in proposals_shape):\n proposals_shape = tf.shape(proposals)\n ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)\n multiplier = tf.expand_dims(\n tf.range(start=0, limit=proposals_shape[0]), 1)\n return tf.reshape(ones_mat * multiplier, [-1])\n\n cropped_regions = tf.image.crop_and_resize(\n features_to_crop,\n self._flatten_first_two_dimensions(proposal_boxes_normalized),\n get_box_inds(proposal_boxes_normalized),\n (self._initial_crop_size, self._initial_crop_size))\n return slim.max_pool2d(\n cropped_regions,\n [self._maxpool_kernel_size, self._maxpool_kernel_size],\n stride=self._maxpool_stride)\n\n def _postprocess_box_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n image_shape,\n mask_predictions=None,\n mask_threshold=0.5):\n \"\"\"Converts predictions from the second stage box classifier to detections.\n\n Args:\n refined_box_encodings: a 3-D tensor with shape\n [total_num_padded_proposals, num_classes, 4] representing predicted\n (final) refined box encodings.\n class_predictions_with_background: a 3-D tensor with shape\n [total_num_padded_proposals, num_classes + 1] containing class\n predictions (logits) for each of the proposals. Note that this tensor\n *includes* background class predictions (at class index 0).\n proposal_boxes: [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n image_shape: a 1-D tensor representing the input image shape.\n mask_predictions: (optional) a 4-D tensor with shape\n [total_num_padded_proposals, num_classes, mask_height, mask_width]\n containing instance mask predictions.\n mask_threshold: a scalar threshold determining which mask values are\n rounded to 0 or 1.\n\n Returns:\n A dictionary containing:\n `detection_boxes`: [batch, max_detection, 4]\n `detection_scores`: [batch, max_detections]\n `detection_classes`: [batch, max_detections]\n `num_detections`: [batch]\n `detection_masks`:\n (optional) [batch, max_detections, mask_height, mask_width]\n \"\"\"\n refined_box_encodings_batch = tf.reshape(refined_box_encodings,\n [-1, self.max_num_proposals,\n self.num_classes,\n self._box_coder.code_size])\n class_predictions_with_background_batch = tf.reshape(\n class_predictions_with_background,\n [-1, self.max_num_proposals, self.num_classes + 1]\n )\n refined_decoded_boxes_batch = self._batch_decode_boxes(\n refined_box_encodings_batch, proposal_boxes)\n class_predictions_with_background_batch = (\n self._second_stage_score_conversion_fn(\n class_predictions_with_background_batch))\n class_predictions_batch = tf.reshape(\n tf.slice(class_predictions_with_background_batch,\n [0, 0, 1], [-1, -1, -1]),\n [-1, self.max_num_proposals, self.num_classes])\n clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]]))\n\n mask_predictions_batch = None\n if mask_predictions is not None:\n mask_height = mask_predictions.shape[2].value\n mask_width = mask_predictions.shape[3].value\n mask_predictions_batch = tf.reshape(\n mask_predictions, [-1, self.max_num_proposals,\n self.num_classes, mask_height, mask_width])\n (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,\n num_detections) = self._second_stage_nms_fn(\n refined_decoded_boxes_batch,\n class_predictions_batch,\n clip_window=clip_window,\n change_coordinate_frame=True,\n num_valid_boxes=num_proposals,\n masks=mask_predictions_batch)\n detections = {'detection_boxes': nmsed_boxes,\n 'detection_scores': nmsed_scores,\n 'detection_classes': nmsed_classes,\n 'num_detections': tf.to_float(num_detections)}\n if nmsed_masks is not None:\n detections['detection_masks'] = nmsed_masks\n if mask_predictions is not None:\n detections['detection_masks'] = tf.to_float(\n tf.greater_equal(detections['detection_masks'], mask_threshold))\n return detections\n\n def _batch_decode_boxes(self, box_encodings, anchor_boxes):\n \"\"\"Decode tensor of refined box encodings.\n\n Args:\n refined_box_encodings: a 3-D tensor with shape\n [batch_size, max_num_proposals, num_classes, self._box_coder.code_size]\n representing predicted (final) refined box encodings.\n proposal_boxes: [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes.\n\n Returns:\n refined_box_predictions: a [batch_size, max_num_proposals, num_classes, 4]\n float tensor representing (padded) refined bounding box predictions\n (for each image in batch, proposal and class).\n \"\"\"\n \"\"\"Decodes box encodings with respect to the anchor boxes.\n\n Args:\n box_encodings: a 4-D tensor with shape\n [batch_size, num_anchors, num_classes, self._box_coder.code_size]\n representing box encodings.\n anchor_boxes: [batch_size, num_anchors, 4] representing\n decoded bounding boxes.\n\n Returns:\n decoded_boxes: a [batch_size, num_anchors, num_classes, 4]\n float tensor representing bounding box predictions\n (for each image in batch, proposal and class).\n \"\"\"\n combined_shape = shape_utils.combined_static_and_dynamic_shape(\n box_encodings)\n num_classes = combined_shape[2]\n tiled_anchor_boxes = tf.tile(\n tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])\n tiled_anchors_boxlist = box_list.BoxList(\n tf.reshape(tiled_anchor_boxes, [-1, 4]))\n decoded_boxes = self._box_coder.decode(\n tf.reshape(box_encodings, [-1, self._box_coder.code_size]),\n tiled_anchors_boxlist)\n return tf.reshape(decoded_boxes.get(),\n tf.stack([combined_shape[0], combined_shape[1],\n num_classes, 4]))\n\n def loss(self, prediction_dict, scope=None):\n \"\"\"Compute scalar loss tensors given prediction tensors.\n\n If first_stage_only=True, only RPN related losses are computed (i.e.,\n `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all\n losses are computed.\n\n Args:\n prediction_dict: a dictionary holding prediction tensors (see the\n documentation for the predict method. If first_stage_only=True, we\n expect prediction_dict to contain `rpn_box_encodings`,\n `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,\n `image_shape`, and `anchors` fields. Otherwise we expect\n prediction_dict to additionally contain `refined_box_encodings`,\n `class_predictions_with_background`, `num_proposals`, and\n `proposal_boxes` fields.\n scope: Optional scope name.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`, 'second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n \"\"\"\n mtl = self._mtl\n with tf.name_scope(scope, 'Loss', prediction_dict.values()):\n (groundtruth_boxlists, groundtruth_classes_with_background_list\n ) = self._format_groundtruth_data(prediction_dict['image_shape'])\n loss_dict = self._loss_rpn(\n prediction_dict['rpn_box_encodings'],\n prediction_dict['rpn_objectness_predictions_with_background'],\n prediction_dict['anchors'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list)\n if not self._first_stage_only:\n if mtl.closeness:\n closeness_predictions = prediction_dict['closeness_predictions']\n else:\n closeness_predictions = None\n\n loss_dict.update(\n self._loss_box_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n closeness_predictions=closeness_predictions,\n ))\n\n if mtl.window:\n if not self._first_stage_only:\n window_classes_with_background_list = \\\n self.window_lists(fields.BoxListFields.classes)\n loss_dict.update(\n self._loss_window_class(\n prediction_dict['window_class_predictions'],\n window_classes_with_background_list))\n if mtl.edgemask:\n edgemask_list = self.edgemask_lists(fields.BoxListFields.edgemask)\n loss_dict.update(self._loss_edgemask(prediction_dict['edgemask_predictions'], edgemask_list))\n\n if mtl.refine:\n loss_dict.update(\n self._loss_refined_classifier(\n prediction_dict['refined_box_encodings'],\n prediction_dict['mtl_refined_class_predictions_with_background'],\n prediction_dict['proposal_boxes'],\n prediction_dict['num_proposals'],\n groundtruth_boxlists,\n groundtruth_classes_with_background_list))\n\n summary_dict = dict()\n\n return loss_dict\n\n def _loss_rpn(self,\n rpn_box_encodings,\n rpn_objectness_predictions_with_background,\n anchors,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list):\n \"\"\"Computes scalar RPN loss tensors.\n\n Uses self._proposal_target_assigner to obtain regression and classification\n targets for the first stage RPN, samples a \"minibatch\" of anchors to\n participate in the loss computation, and returns the RPN losses.\n\n Args:\n rpn_box_encodings: A 4-D float tensor of shape\n [batch_size, num_anchors, self._box_coder.code_size] containing\n predicted proposal box encodings.\n rpn_objectness_predictions_with_background: A 2-D float tensor of shape\n [batch_size, num_anchors, 2] containing objectness predictions\n (logits) for each of the anchors with 0 corresponding to background\n and 1 corresponding to object.\n anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors\n for the first stage RPN. Note that `num_anchors` can differ depending\n on whether the model is created in training or inference mode.\n groundtruth_boxlists: A list of BoxLists containing coordinates of the\n groundtruth boxes.\n groundtruth_classes_with_background_list: A list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the\n class targets with the 0th index assumed to map to the background class.\n\n Returns:\n a dictionary mapping loss keys (`first_stage_localization_loss`,\n `first_stage_objectness_loss`) to scalar tensors representing\n corresponding loss values.\n \"\"\"\n with tf.name_scope('RPNLoss'):\n (batch_cls_targets, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _) = target_assigner.batch_assign_targets(\n self._proposal_target_assigner, box_list.BoxList(anchors),\n groundtruth_boxlists, len(groundtruth_boxlists)*[None])\n batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)\n\n def _minibatch_subsample_fn(inputs):\n cls_targets, cls_weights = inputs\n return self._first_stage_sampler.subsample(\n tf.cast(cls_weights, tf.bool),\n self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))\n batch_sampled_indices = tf.to_float(tf.map_fn(\n _minibatch_subsample_fn,\n [batch_cls_targets, batch_cls_weights],\n dtype=tf.bool,\n parallel_iterations=self._parallel_iterations,\n back_prop=True))\n\n # Normalize by number of examples in sampled minibatch\n normalizer = tf.reduce_sum(batch_sampled_indices, axis=1)\n batch_one_hot_targets = tf.one_hot(\n tf.to_int32(batch_cls_targets), depth=2)\n sampled_reg_indices = tf.multiply(batch_sampled_indices,\n batch_reg_weights)\n\n localization_losses = self._first_stage_localization_loss(\n rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices)\n objectness_losses = self._first_stage_objectness_loss(\n rpn_objectness_predictions_with_background,\n batch_one_hot_targets, weights=batch_sampled_indices)\n localization_loss = tf.reduce_mean(\n tf.reduce_sum(localization_losses, axis=1) / normalizer)\n objectness_loss = tf.reduce_mean(\n tf.reduce_sum(objectness_losses, axis=1) / normalizer)\n loss_dict = {\n 'first_stage_localization_loss':\n self._first_stage_loc_loss_weight * localization_loss,\n 'first_stage_objectness_loss':\n self._first_stage_obj_loss_weight * objectness_loss,\n }\n tf.add_to_collection('main_loss', tf.identity(loss_dict['first_stage_localization_loss'], name='first_stage_localization_loss'))\n tf.add_to_collection('main_loss', tf.identity(loss_dict['first_stage_objectness_loss'], name='first_stage_objectness_loss'))\n return loss_dict\n\n def _loss_box_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list,\n closeness_predictions=None,\n ):\n \"\"\"Computes scalar box classifier loss tensors.\n\n Uses self._detector_target_assigner to obtain regression and classification\n targets for the second stage box classifier, optionally performs\n hard mining, and returns losses. All losses are computed independently\n for each image and then averaged across the batch.\n\n This function assumes that the proposal boxes in the \"padded\" regions are\n actually zero (and thus should not be matched to).\n\n Args:\n refined_box_encodings: a 3-D tensor with shape\n [total_num_proposals, num_classes, box_coder.code_size] representing\n predicted (final) refined box encodings.\n class_predictions_with_background: a 3-D tensor with shape\n [total_num_proposals, num_classes + 1] containing class\n predictions (logits) for each of the anchors. Note that this tensor\n *includes* background class predictions (at class index 0).\n proposal_boxes: [batch_size, self.max_num_proposals, 4] representing\n decoded proposal bounding boxes.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n groundtruth_boxlists: a list of BoxLists containing coordinates of the\n groundtruth boxes.\n groundtruth_classes_with_background_list: a list of 2-D one-hot\n (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the\n class targets with the 0th index assumed to map to the background class.\n\n Returns:\n a dictionary mapping loss keys ('second_stage_localization_loss',\n 'second_stage_classification_loss') to scalar tensors representing\n corresponding loss values.\n \"\"\"\n mtl = self._mtl\n with tf.name_scope('BoxClassifierLoss'):\n paddings_indicator = self._padded_batched_proposals_indicator(\n num_proposals, self.max_num_proposals)\n proposal_boxlists = [\n box_list.BoxList(proposal_boxes_single_image)\n for proposal_boxes_single_image in tf.unstack(proposal_boxes)]\n batch_size = len(proposal_boxlists)\n\n num_proposals_or_one = tf.to_float(tf.expand_dims(\n tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1))\n normalizer = tf.tile(num_proposals_or_one,\n [1, self.max_num_proposals]) * batch_size\n\n (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _, batch_closeness_targets) = target_assigner.batch_assign_targets(\n self._detector_target_assigner, proposal_boxlists,\n groundtruth_boxlists, groundtruth_classes_with_background_list, extension=True)\n\n # We only predict refined location encodings for the non background\n # classes, but we now pad it to make it compatible with the class\n # predictions\n flat_cls_targets_with_background = tf.reshape(\n batch_cls_targets_with_background,\n [batch_size * self.max_num_proposals, -1])\n refined_box_encodings_with_background = tf.pad(\n refined_box_encodings, [[0, 0], [1, 0], [0, 0]])\n refined_box_encodings_masked_by_class_targets = tf.boolean_mask(\n refined_box_encodings_with_background,\n tf.greater(flat_cls_targets_with_background, 0))\n reshaped_refined_box_encodings = tf.reshape(\n refined_box_encodings_masked_by_class_targets,\n [batch_size, -1, 4])\n\n second_stage_loc_losses = self._second_stage_localization_loss(\n reshaped_refined_box_encodings,\n batch_reg_targets, weights=batch_reg_weights) / normalizer\n second_stage_cls_losses = self._second_stage_classification_loss(\n class_predictions_with_background,\n batch_cls_targets_with_background,\n weights=batch_cls_weights) / normalizer\n second_stage_loc_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_loc_losses, paddings_indicator))\n second_stage_cls_loss = tf.reduce_sum(\n tf.boolean_mask(second_stage_cls_losses, paddings_indicator))\n\n if self._hard_example_miner:\n (second_stage_loc_loss, second_stage_cls_loss\n ) = self._unpad_proposals_and_apply_hard_mining(\n proposal_boxlists, second_stage_loc_losses,\n second_stage_cls_losses, num_proposals)\n loss_dict = {\n 'second_stage_localization_loss':\n (self._second_stage_loc_loss_weight * second_stage_loc_loss),\n 'second_stage_classification_loss':\n (self._second_stage_cls_loss_weight * second_stage_cls_loss)\n }\n\n tf.add_to_collection('main_loss', tf.identity(loss_dict['second_stage_localization_loss'], name='second_stage_localization_loss'))\n tf.add_to_collection('main_loss', tf.identity(loss_dict['second_stage_classification_loss'], name='second_stage_classification_loss'))\n\n normalizer_reg = tf.expand_dims(tf.maximum(1.0, tf.reduce_sum(batch_reg_weights, axis=1)), axis=1)\n\n ############################ closeness ############################\n if closeness_predictions is not None:\n closeness_predictions = closeness_predictions[:, 1:]\n batch_closeness_targets = batch_closeness_targets[:, :, 1:]\n\n closeness_losses = self._closeness_loss(\n closeness_predictions,\n batch_closeness_targets,\n weights=batch_reg_weights) / normalizer_reg\n\n # if all of gt is 0, ignore that\n norm_without_bg = tf.reduce_sum(batch_closeness_targets, axis=2)\n closeness_losses = closeness_losses * norm_without_bg\n closeness_loss = tf.reduce_sum(closeness_losses)\n\n loss_dict['closeness_classification_loss'] = mtl.closeness_loss_weight * closeness_loss\n\n return loss_dict\n\n def _loss_refined_classifier(self,\n refined_box_encodings,\n class_predictions_with_background,\n proposal_boxes,\n num_proposals,\n groundtruth_boxlists,\n groundtruth_classes_with_background_list):\n\n with tf.name_scope('RefineClassifierLoss'):\n paddings_indicator = self._padded_batched_proposals_indicator(\n num_proposals, self.max_num_proposals)\n proposal_boxlists = [\n box_list.BoxList(proposal_boxes_single_image)\n for proposal_boxes_single_image in tf.unstack(proposal_boxes)]\n batch_size = len(proposal_boxlists)\n\n num_proposals_or_one = tf.to_float(tf.expand_dims(\n tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1))\n normalizer = tf.tile(num_proposals_or_one,\n [1, self.max_num_proposals]) * batch_size\n\n (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, _) = target_assigner.batch_assign_targets(\n self._detector_target_assigner, proposal_boxlists,\n groundtruth_boxlists, groundtruth_classes_with_background_list)\n\n refined_cls_losses = self._second_stage_classification_loss(\n class_predictions_with_background,\n batch_cls_targets_with_background,\n weights=batch_cls_weights) / normalizer\n refined_cls_loss = tf.reduce_sum(\n tf.boolean_mask(refined_cls_losses, paddings_indicator))\n\n if self._hard_example_miner:\n (_, refined_cls_loss, _\n ) = self._unpad_proposals_and_apply_hard_mining(\n proposal_boxlists, refined_cls_losses,\n refined_cls_losses, num_proposals)\n loss_dict = {\n 'refined_classification_loss':\n (self._mtl.refined_classification_loss_weight * refined_cls_loss)\n }\n return loss_dict\n\n def _loss_window_class(self,\n window_class_predictions_with_background,\n window_class_list):\n with tf.name_scope('WindowClass'):\n window_class = tf.stack(window_class_list)\n\n is_empty = tf.equal(tf.size(window_class), 0)\n\n window_class_losses = tf.cond(is_empty,\n lambda: tf.constant(0, tf.float32),\n lambda: self._window_class_loss(window_class_predictions_with_background, window_class))\n\n window_class_loss = tf.reduce_mean(window_class_losses)\n\n loss_dict = {\n 'window_class_loss':\n (self._mtl.window_class_loss_weight * window_class_loss)\n }\n\n return loss_dict\n\n def _loss_edgemask(self, edgemask_predictions, groundtruth_edgemask_list):\n with tf.name_scope('EdgeMask'):\n edgemask_org = tf.stack(groundtruth_edgemask_list) # (B,2,H,W)\n edgemask_org = tf.transpose(edgemask_org, [1,0,2,3]) # (2,B,H,W)\n edgemask_weight = edgemask_org[1] # (B,H,W)\n edgemask_fg = edgemask_org[0] # (B,H,W)\n edgemask_bg = tf.ones_like(edgemask_fg, dtype=tf.float32) - edgemask_fg # (B,H,W)\n edgemask = tf.stack([edgemask_bg, edgemask_fg], axis=0) # (2,B,H,W)\n edgemask = tf.transpose(edgemask, perm=[1, 2, 3, 0]) # (B,H,W,2)\n\n edgemask_predictions_resize = tf.image.resize_images(edgemask_predictions,\n [tf.shape(edgemask)[1], tf.shape(edgemask)[2]])\n\n edgemask_loss_total = self._edgemask_loss(edgemask_predictions_resize, edgemask, weights=edgemask_weight)\n edgemask_loss = tf.reduce_mean(edgemask_loss_total)\n\n loss_dict = {\n 'edgemask_loss':\n (self._mtl.edgemask_loss_weight * edgemask_loss)\n }\n\n return loss_dict\n\n def _padded_batched_proposals_indicator(self,\n num_proposals,\n max_num_proposals):\n \"\"\"Creates indicator matrix of non-pad elements of padded batch proposals.\n\n Args:\n num_proposals: Tensor of type tf.int32 with shape [batch_size].\n max_num_proposals: Maximum number of proposals per image (integer).\n\n Returns:\n A Tensor of type tf.bool with shape [batch_size, max_num_proposals].\n \"\"\"\n batch_size = tf.size(num_proposals)\n tiled_num_proposals = tf.tile(\n tf.expand_dims(num_proposals, 1), [1, max_num_proposals])\n tiled_proposal_index = tf.tile(\n tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])\n return tf.greater(tiled_num_proposals, tiled_proposal_index)\n\n def _unpad_proposals_and_apply_hard_mining(self,\n proposal_boxlists,\n second_stage_loc_losses,\n second_stage_cls_losses,\n num_proposals):\n \"\"\"Unpads proposals and applies hard mining.\n\n Args:\n proposal_boxlists: A list of `batch_size` BoxLists each representing\n `self.max_num_proposals` representing decoded proposal bounding boxes\n for each image.\n second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape\n `[batch_size, self.max_num_proposals]` representing per-anchor\n second stage localization loss values.\n second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape\n `[batch_size, self.max_num_proposals]` representing per-anchor\n second stage classification loss values.\n num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]\n representing the number of proposals predicted for each image in\n the batch.\n\n Returns:\n second_stage_loc_loss: A scalar float32 tensor representing the second\n stage localization loss.\n second_stage_cls_loss: A scalar float32 tensor representing the second\n stage classification loss.\n \"\"\"\n for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,\n single_image_num_proposals) in zip(\n proposal_boxlists,\n tf.unstack(second_stage_loc_losses),\n tf.unstack(second_stage_cls_losses),\n tf.unstack(num_proposals)):\n proposal_boxlist = box_list.BoxList(\n tf.slice(proposal_boxlist.get(),\n [0, 0], [single_image_num_proposals, -1]))\n single_image_loc_loss = tf.slice(single_image_loc_loss,\n [0], [single_image_num_proposals])\n single_image_cls_loss = tf.slice(single_image_cls_loss,\n [0], [single_image_num_proposals])\n return self._hard_example_miner(\n location_losses=tf.expand_dims(single_image_loc_loss, 0),\n cls_losses=tf.expand_dims(single_image_cls_loss, 0),\n decoded_boxlist_list=[proposal_boxlist])\n\n def restore_map(self,\n from_detection_checkpoint=True,\n restore_box_predictor=False,\n restore_window=False,\n restore_edgemask=False,\n restore_closeness=False,\n restore_mtl_refine=False):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n See parent class for details.\n Args:\n from_detection_checkpoint: whether to restore from a full detection\n checkpoint (with compatible variable names) or to restore from a\n classification checkpoint for initialization prior to training.\n restore_box_predictor: Whether to restore the weights of box predictor.\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n if not from_detection_checkpoint:\n return self._feature_extractor.restore_from_classification_checkpoint_fn(\n self.first_stage_feature_extractor_scope,\n self.second_stage_feature_extractor_scope)\n\n variables_to_restore = {}\n for variable in tf.global_variables():\n var_name = variable.op.name\n skipped = True\n for scope_name in [self.first_stage_feature_extractor_scope,\n self.second_stage_feature_extractor_scope]:\n if var_name.startswith(scope_name):\n log.infov(' Restore [%s]', var_name)\n variables_to_restore[var_name] = variable\n skipped = False\n for scope_name in [self.first_stage_box_predictor_scope,\n self.second_stage_box_predictor_scope]:\n if var_name.startswith(scope_name) and restore_box_predictor:\n log.infov(' Restore [%s]', var_name)\n variables_to_restore[var_name] = variable\n skipped = False\n if restore_window:\n for scope_name in [self.window_box_predictor_scope]:\n if var_name.startswith(scope_name):\n log.infov(' Restore [%s]', var_name)\n variables_to_restore[var_name] = variable\n skipped = False\n if restore_edgemask:\n for scope_name in [self.edgemask_predictor_scope]:\n if var_name.startswith(scope_name):\n log.infov(' Restore [%s]', var_name)\n variables_to_restore[var_name] = variable\n skipped = False\n if restore_closeness:\n for scope_name in [self.closeness_box_predictor_scope]:\n if var_name.startswith(scope_name):\n log.infov(' Restore [%s]', var_name)\n variables_to_restore[var_name] = variable\n skipped = False\n if restore_mtl_refine:\n for scope_name in [self.mtl_refiner_scope]:\n if var_name.startswith(scope_name):\n log.infov(' Restore [%s]', var_name)\n variables_to_restore[var_name] = variable\n skipped = False\n if skipped:\n log.warn(' Skip [%s]', var_name)\n continue\n return variables_to_restore\n"
] | [
[
"tensorflow.ones",
"tensorflow.ones_like",
"tensorflow.reshape",
"tensorflow.Dimension",
"tensorflow.stack",
"tensorflow.greater",
"tensorflow.nn.softmax",
"tensorflow.to_float",
"tensorflow.tile",
"tensorflow.identity",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.argmax",
"tensorflow.global_variables",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.split",
"tensorflow.pad",
"tensorflow.range",
"tensorflow.expand_dims",
"tensorflow.map_fn",
"tensorflow.name_scope",
"tensorflow.reduce_sum",
"tensorflow.to_int32",
"tensorflow.unstack",
"tensorflow.boolean_mask",
"tensorflow.size",
"tensorflow.multiply",
"tensorflow.greater_equal",
"tensorflow.slice",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
] |
kaikai581/t2k-mppc-daq | [
"6b4f7bf04d885e952d9fd653df8f9ca1dd31089e"
] | [
"FEBDAQMULTx2/data_analysis/12_labview_dark_rate/1_plot_dcr_vs_thr_by_stitching.py"
] | [
"#!/usr/bin/env python\n'''\nMake the dark count rate vs. threshold plot by concatenating multiple files.\n'''\n\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport os\nimport pandas as pd\nimport seaborn as sns\n\nclass segmented_rate:\n def __init__(self, infpns):\n self.dfs_raw = []\n self.df_rate = pd.DataFrame()\n self.infpns = infpns\n\n # load all files into dataframes\n self.load_dfs(infpns)\n # calculate rates for each threshold and store to df_rate\n self.calculate_rate()\n \n def calculate_rate(self):\n '''\n Calculate rates for each threshold and store to df_rate.\n '''\n for df in self.dfs_raw:\n for thr in df.columns:\n # self.df_rate[thr] = pd.Series(df[thr].iloc[2:].mean())\n self.df_rate = self.df_rate.append({'threshold (mV)': int(thr), 'rate (Hz)': df[thr].iloc[2:].mean()}, ignore_index=True)\n\n def cleanup_col(self, df):\n bad_cols = [colname for colname in df.columns if not colname.isnumeric()]\n df.drop(columns=bad_cols, inplace=True)\n\n def load_dfs(self, infpns):\n for infpn in infpns:\n df = pd.read_csv(infpn, sep='\\t', index_col=False)\n self.cleanup_col(df)\n self.dfs_raw.append(df)\n \n def plot_rate_vs_thr(self):\n '''\n Output a summary plot.\n '''\n if len(self.df_rate.columns) == 0:\n print('No rate data found.')\n return\n \n # prepare the output folder\n out_dir = 'plots'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n ax = sns.scatterplot(data=self.df_rate, x='threshold (mV)', y='rate (Hz)')\n ax.set_yscale('log')\n ax.grid('both')\n fig = ax.get_figure()\n fig.tight_layout()\n fig.savefig(f'{out_dir}/dcr_vs_thr.png')\n fig.clf()\n \n def to_csv(self):\n fns = [os.path.splitext(os.path.basename(s))[0] for s in self.infpns]\n out_dir = 'combined_data'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n out_fn = os.path.commonprefix(fns)+os.path.commonprefix([s[::-1] for s in fns])[::-1]+'.csv'\n out_pn = os.path.join(out_dir, out_fn)\n \n self.df_rate.to_csv(out_pn, index=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input_filenames', type=str, nargs='*', default=['data/20210820_LabVIEW_dark_rate/2021-08-19-151915-B-SK-CH2.txt', 'data/20210820_LabVIEW_dark_rate/2021-08-19-152157-B-SK-CH2.txt'])\n args = parser.parse_args()\n \n my_data = segmented_rate(args.input_filenames)\n my_data.plot_rate_vs_thr()\n my_data.to_csv()\n"
] | [
[
"matplotlib.use",
"pandas.DataFrame",
"pandas.read_csv"
]
] |
jasondark/cvxpy | [
"bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93"
] | [
"cvxpy/tests/test_dgp.py"
] | [
"import cvxpy\nfrom cvxpy.tests.base_test import BaseTest\nimport numpy as np\n\n\nclass TestDgp(BaseTest):\n def test_product(self):\n x = cvxpy.Variable((), pos=True)\n y = cvxpy.Variable((), pos=True)\n prod = x * y\n self.assertTrue(prod.is_dgp())\n self.assertTrue(prod.is_log_log_convex())\n self.assertTrue(prod.is_log_log_concave())\n\n prod *= prod\n self.assertTrue(prod.is_dgp())\n self.assertTrue(prod.is_log_log_convex())\n self.assertTrue(prod.is_log_log_concave())\n\n prod *= 5.0\n self.assertTrue(prod.is_dgp())\n self.assertTrue(prod.is_log_log_convex())\n self.assertTrue(prod.is_log_log_concave())\n\n prod *= -5.0\n self.assertTrue(not prod.is_dgp())\n self.assertTrue(not prod.is_log_log_convex())\n self.assertTrue(not prod.is_log_log_concave())\n\n def test_product_with_unconstrained_variables_is_not_dgp(self):\n x = cvxpy.Variable()\n y = cvxpy.Variable()\n prod = x * y\n self.assertTrue(not prod.is_dgp())\n self.assertTrue(not prod.is_log_log_convex())\n self.assertTrue(not prod.is_log_log_concave())\n\n z = cvxpy.Variable((), pos=True)\n prod = x * z\n self.assertTrue(not prod.is_dgp())\n self.assertTrue(not prod.is_log_log_convex())\n self.assertTrue(not prod.is_log_log_concave())\n\n def test_division(self):\n x = cvxpy.Variable(pos=True)\n y = cvxpy.Variable(pos=True)\n div = x / y\n\n self.assertTrue(div.is_log_log_affine())\n\n posynomial = 5.0 * x * y + 1.2 * y * y\n div = x / y\n self.assertTrue(div.is_log_log_affine())\n\n div = posynomial / (3.0 * x * y**(-0.1))\n self.assertTrue(div.is_log_log_convex())\n self.assertFalse(div.is_log_log_concave())\n self.assertTrue(div.is_dgp())\n\n div = posynomial / (3.0 * x + y)\n self.assertFalse(div.is_log_log_convex())\n self.assertFalse(div.is_log_log_concave())\n self.assertFalse(div.is_dgp())\n\n def test_add(self):\n x = cvxpy.Variable(pos=True)\n y = cvxpy.Variable(pos=True)\n expr = x + y\n self.assertTrue(expr.is_dgp())\n self.assertTrue(expr.is_log_log_convex())\n self.assertTrue(not expr.is_log_log_concave())\n\n posynomial = 5.0 * x * y + 1.2 * y * y\n self.assertTrue(posynomial.is_dgp())\n self.assertTrue(posynomial.is_log_log_convex())\n\n def test_add_with_unconstrained_variables_is_not_dgp(self):\n x = cvxpy.Variable()\n y = cvxpy.Variable(pos=True)\n expr = x + y\n self.assertTrue(not expr.is_dgp())\n self.assertTrue(not expr.is_log_log_convex())\n self.assertTrue(not expr.is_log_log_concave())\n\n posynomial = 5.0 * x * y + 1.2 * y * y\n self.assertTrue(not posynomial.is_dgp())\n self.assertTrue(not posynomial.is_log_log_convex())\n self.assertTrue(not posynomial.is_log_log_concave())\n\n def test_monomials(self):\n x = cvxpy.Variable(pos=True)\n y = cvxpy.Variable(pos=True)\n z = cvxpy.Variable(pos=True)\n monomial = 5.0 * (x ** 0.1) * y ** (-0.1) * z ** (3)\n self.assertTrue(monomial.is_dgp())\n self.assertTrue(monomial.is_log_log_convex())\n self.assertTrue(monomial.is_log_log_concave())\n\n monomial *= -1.0\n self.assertTrue(not monomial.is_dgp())\n self.assertTrue(not monomial.is_log_log_convex())\n self.assertTrue(not monomial.is_log_log_concave())\n\n def test_maximum(self):\n x = cvxpy.Variable(pos=True)\n y = cvxpy.Variable(pos=True)\n z = cvxpy.Variable(pos=True)\n monomial = 5.0 * (x ** 0.1) * y ** (-0.1) * z ** (3)\n posynomial = 5.0 * x * y + 1.2 * y * y\n another_posynomial = posynomial * posynomial\n expr = cvxpy.maximum(monomial, posynomial, another_posynomial)\n self.assertTrue(expr.is_dgp())\n self.assertTrue(expr.is_log_log_convex())\n self.assertTrue(not expr.is_log_log_concave())\n\n expr = posynomial * expr\n self.assertTrue(expr.is_dgp())\n self.assertTrue(expr.is_log_log_convex())\n self.assertTrue(not expr.is_log_log_concave())\n\n expr = posynomial * expr + expr\n self.assertTrue(expr.is_dgp())\n self.assertTrue(expr.is_log_log_convex())\n\n def test_minimum(self):\n x = cvxpy.Variable(pos=True)\n y = cvxpy.Variable(pos=True)\n z = cvxpy.Variable(pos=True)\n monomial = 5.0 * (x ** 0.1) * y ** (-0.1) * z ** (3)\n posynomial = 5.0 * x * y + 1.2 * y * y\n another_posynomial = posynomial * posynomial\n expr = cvxpy.minimum(monomial, 1 / posynomial, 1 / another_posynomial)\n self.assertTrue(expr.is_dgp())\n self.assertTrue(not expr.is_log_log_convex())\n self.assertTrue(expr.is_log_log_concave())\n\n expr = (1 / posynomial) * expr\n self.assertTrue(expr.is_dgp())\n self.assertTrue(not expr.is_log_log_convex())\n self.assertTrue(expr.is_log_log_concave())\n\n expr = expr ** 2\n self.assertTrue(expr.is_dgp())\n self.assertTrue(not expr.is_log_log_convex())\n self.assertTrue(expr.is_log_log_concave())\n\n def test_constant(self):\n x = cvxpy.Constant(1.0)\n self.assertTrue(x.is_dgp())\n self.assertFalse((-1.0*x).is_dgp())\n\n def test_geo_mean(self):\n x = cvxpy.Variable(3, pos=True)\n p = [1, 2, 0.5]\n geo_mean = cvxpy.geo_mean(x, p)\n self.assertTrue(geo_mean.is_dgp())\n self.assertTrue(geo_mean.is_log_log_affine())\n self.assertTrue(geo_mean.is_log_log_convex())\n self.assertTrue(geo_mean.is_log_log_concave())\n\n def test_builtin_sum(self):\n x = cvxpy.Variable(2, pos=True)\n self.assertTrue(sum(x).is_log_log_convex())\n\n def test_gmatmul(self):\n x = cvxpy.Variable(2, pos=True)\n A = cvxpy.Variable((2, 2))\n with self.assertRaises(Exception) as cm:\n cvxpy.gmatmul(A, x)\n self.assertTrue(str(cm.exception) ==\n \"gmatmul(A, X) requires that A be constant.\")\n\n x = cvxpy.Variable(2)\n A = np.ones((4, 2))\n with self.assertRaises(Exception) as cm:\n cvxpy.gmatmul(A, x)\n self.assertTrue(str(cm.exception) ==\n \"gmatmul(A, X) requires that X be positive.\")\n\n x = cvxpy.Variable(3, pos=True)\n A = np.ones((4, 3))\n gmatmul = cvxpy.gmatmul(A, x)\n self.assertTrue(gmatmul.is_dgp())\n self.assertTrue(gmatmul.is_log_log_affine())\n self.assertTrue(gmatmul.is_log_log_convex())\n self.assertTrue(gmatmul.is_log_log_concave())\n self.assertTrue(gmatmul.is_nonneg())\n self.assertTrue(gmatmul.is_incr(0))\n self.assertTrue(cvxpy.gmatmul(-A, x).is_decr(0))\n\n x = cvxpy.Variable((2, 3), pos=True)\n A = np.array([[2., -1.], [0., 3.]])\n gmatmul = cvxpy.gmatmul(A, x)\n self.assertTrue(gmatmul.is_dgp())\n self.assertTrue(gmatmul.is_log_log_affine())\n self.assertTrue(gmatmul.is_log_log_convex())\n self.assertTrue(gmatmul.is_log_log_concave())\n self.assertFalse(gmatmul.is_incr(0))\n self.assertFalse(gmatmul.is_decr(0))\n\n def test_power_sign(self):\n x = cvxpy.Variable(pos=True)\n self.assertTrue((x**1).is_nonneg())\n self.assertFalse((x**1).is_nonpos())\n"
] | [
[
"numpy.array",
"numpy.ones"
]
] |
aaalgo/paperworks | [
"ea26a86469a878bcd4cb4ed3f5303ec37d21d733"
] | [
"flow/register.py"
] | [
"import sys\nfrom sklearn.linear_model import LinearRegression\nfrom reportlab.lib.units import inch\nfrom skimage import measure\nfrom flow.models import *\nfrom params import *\nimport numpy as np\nimport cv2\n\ndef points2paper (points):\n return [(x * SCAN_PPI/inch, y * SCAN_PPI/inch) for x, y in points]\n\ndef boxes2paper (boxes):\n return [(x * SCAN_PPI / inch,\n y * SCAN_PPI / inch,\n w * SCAN_PPI / inch,\n h * SCAN_PPI / inch)\n for x, y, w, h in boxes]\n\ndef expand (box, shape, l):\n y0, x0, y1, x1 = box\n H, W = shape\n y0 = max(0, y0-l)\n x0 = max(0, x0-l)\n y1 = min(H, y1+l)\n x1 = min(W, x1+l)\n return y0, x0, y1, x1\n\ndef detect_center (patch):\n cv2.imwrite('xxx.png', patch)\n # detect outer circle\n circles = cv2.HoughCircles(patch, cv2.HOUGH_GRADIENT, 1, 1000, param1=50,param2=30, minRadius=30, maxRadius=60)\n if circles is None:\n return None\n outside = circles[0, 0, :]\n #print(\"outside circle\", outside)\n\n # remove outside circle \n H, W = patch.shape\n mask = np.ones((H, W), dtype=np.uint8)\n x, y, r = np.round(outside).astype(\"int\")\n cv2.circle(mask, (x, y), r * 4 //5, 0, -1)\n patch[mask > 0] = 0\n # detect inside circle\n circles = cv2.HoughCircles(patch, cv2.HOUGH_GRADIENT, 1, 1000, param1=50,param2=30, minRadius=0, maxRadius=0)\n if circles is None:\n return None\n inside = circles[0, 0, :]\n #print(\"inside circle\", inside)\n\n '''\n patch = cv2.cvtColor(patch, cv2.COLOR_GRAY2BGR)\n cv2.circle(patch, (x, y), r, (0, 255, 0), 4)\n x, y, r = np.round(inside).astype(\"int\")\n cv2.circle(patch, (x, y), r, (0, 255, 0), 4)\n '''\n\n x1, y1, _ = outside\n x2, y2, _ = inside\n\n return (x1+x2)/2, (y1+y2)/2\n\ndef detect_circles (gray, off):\n #image = cv2.GaussianBlur(gray, (9, 9), 0)\n binary = gray < BLACK_TH\n labels = measure.label(binary, background=0)\n\n #cv2.imwrite('xxx.png', binary * 255)\n #sys.exit(0)\n H, W = binary.shape\n\n centers = []\n X0, Y0 = off\n for box in measure.regionprops(labels):\n if box.area < 1500:\n continue\n #print(box.bbox)\n y0,x0,y1,x1 = expand(box.bbox, binary.shape, 10)\n \n patch = gray[y0:y1, x0:x1]\n\n x, y = detect_center(patch)\n \n #print(\"\\t\", box.area, box.centroid)\n centers.append([X0+x0+x, Y0+y0+y])\n return centers\n\ndef detect_anchors (gray):\n H, W = gray.shape\n if H > W:\n h = H//4\n w = W//7\n else:\n h = H//7\n w = W//4\n\n blocks = [(0, 0),\n (W-w, 0),\n (0, H-h),\n (W-w, H-h)]\n anchors = []\n for i, (x, y) in enumerate(blocks):\n aoi = np.copy(gray[y:(y+h), x:(x+w)])\n cv2.imwrite('block-%d.png' % i , aoi)\n circles = detect_circles(aoi, (x, y))\n\n if H > W: # portrait\n circles.sort(key=lambda a: a[1])\n else: # landscape\n circles.sort(key=lambda a: a[0])\n anchors.append(circles)\n pass\n return anchors\n\ndef rotate_clockwise (image):\n image = cv2.transpose(image)\n return cv2.flip(image, 1)\n\ndef rotate_counterclockwise (image):\n image = cv2.transpose(image)\n return cv2.flip(image, 0)\n\ndef rotate_normalize (image):\n anchors = detect_anchors(image)\n print([len(x) for x in anchors])\n if len(anchors[1]) == 5:\n return rotate_clockwise\n elif len(anchors[2]) == 5:\n return rotate_counterclockwise\n pass\n\n\ndef calibrate (image, layout):\n anchors = detect_anchors(image)\n assert len(anchors[3]) == 5\n X = np.array(sum(anchors, []), dtype=np.float32)\n gs_anchors = points2paper(layout.anchors)\n y1 = np.array([x for x , _ in gs_anchors], dtype=np.float32)\n y2 = np.array([y for _ , y in gs_anchors], dtype=np.float32)\n reg_x = LinearRegression()\n reg_y = LinearRegression()\n reg_x.fit(X, y1)\n print(\"SCORE X:\", reg_x.score(X, y1))\n reg_y.fit(X, y2)\n print(\"SCORE Y:\", reg_x.score(X, y2))\n #print(reg_x.coef_, reg_x.intercept_)\n #print(reg_y.coef_, reg_y.intercept_)\n affine = np.zeros((2,3), dtype=np.float32)\n affine[0, :2] = reg_x.coef_\n affine[0, 2] = reg_x.intercept_\n affine[1, :2] = reg_y.coef_\n affine[1, 2] = reg_y.intercept_\n return affine\n\ndef bbox1(img):\n a = np.where(img > 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox\n\ndef bbox2(img):\n rows = np.sum(img, axis=1)\n cols = np.sum(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n return rmin, cmin, rmax+1, cmax+1\n\ndef crop_margin (image, gray):\n black = gray < BLACK_TH\n y0, x0, y1, x1 = expand(bbox2(black), black.shape, 20)\n return image[y0:y1, x0:x1], gray[y0:y1, x0:x1]\n\ndef normalize (image, image_raw, layout):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.normalize(gray, None, 0, 255, cv2.NORM_MINMAX)\n\n image = image_raw\n\n image, gray = crop_margin(image, gray)\n\n rotate = rotate_normalize(gray)\n image = rotate(image)\n gray = rotate(gray)\n\n affine = calibrate(gray, layout)\n\n W, H = layout.paper_size\n W = int(round(W * SCAN_PPI / inch))\n H = int(round(H * SCAN_PPI / inch))\n\n return cv2.warpAffine(image, affine, (W, H))\n\n\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.zeros",
"sklearn.linear_model.LinearRegression",
"numpy.round",
"numpy.sum",
"numpy.copy",
"numpy.ones",
"numpy.min",
"numpy.where"
]
] |
SyuyaMurakami/RiskQuantLib-Doc | [
"2503befc24c2e422e51f8b9f468c8d8439e11c65"
] | [
"RiskQuantLib/Set/InterestRate/base.py"
] | [
"#!/usr/bin/python\n#coding = utf-8\nimport numpy as np\nimport pandas as pd\nfrom RiskQuantLib.Property.NumberProperty.numberProperty import numberProperty\nfrom RiskQuantLib.Property.InterestRate.interestRate import interestRate\n\nclass setBase:\n\n def setCode(self,codeString):\n self.code = codeString\n\n def setName(self,nameString):\n self.name = nameString\n\n def setInterestRateType(self,interestRateTypeString):\n self.interestRateType = interestRateTypeString\n\n def setSubInterestRateType(self,subInterestRateType):\n self.subInterestRateType = subInterestRateType\n\n def setInterestRate(self,interestRateNum,interestRateDateTimeStamp = pd.Timestamp.now()):\n if not hasattr(self,'__interestRate'):\n self.__interestRate = interestRate(interestRateNum)\n self.interestRate = self.__interestRate.value\n else:\n self.__interestRate.setValue(interestRateNum)\n self.__interestRate.setEffectiveDate(interestRateDateTimeStamp)\n\n def setTenor(self,tenorValue,unitString='Year'):\n if not hasattr(self,'__tenor'):\n self.__tenor = numberProperty(tenorValue, unitString)\n self.tenor = self.__tenor.value\n else:\n self.__tenor.setValue(tenorValue)\n self.__tenor.setUnit(unitString)\n\n def setDayCount(self,dayCountObject):\n self.dayCount = dayCountObject\n\n def setCompounding(self,compoundingObject):\n self.compounding = compoundingObject\n\n def setFrequency(self,frequencyObject):\n self.frequency = frequencyObject\n\n # build module, contents below will be automatically built and replaced, self-defined functions shouldn't be written here\n #-<Begin>\n #-<End>"
] | [
[
"pandas.Timestamp.now"
]
] |
funnylittleman/Multilingual_Text_to_Speech | [
"4fad3dd1b74964cdd6f66d5e8d659d942fd56c57"
] | [
"finetune.py"
] | [
"import os\nimport time\nimport datetime\nimport math\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom dataset.dataset import TextToSpeechDatasetCollection, TextToSpeechCollate\nfrom params.params import Params as hp\nfrom utils import audio, text\nfrom modules.tacotron2 import Tacotron, TacotronLoss\nfrom utils.logging import Logger\nfrom utils.samplers import RandomImbalancedSampler, PerfectBatchSampler\nfrom utils import lengths_to_mask, to_gpu\n\nfrom tqdm import tqdm\n\n\ndef cos_decay(global_step, decay_steps):\n \"\"\"Cosine decay function\n \n Arguments:\n global_step -- current training step\n decay_steps -- number of decay steps \n \"\"\"\n global_step = min(global_step, decay_steps)\n return 0.5 * (1 + math.cos(math.pi * global_step / decay_steps))\n\n\ndef train(logging_start_epoch, epoch, data, model, criterion, optimizer):\n \"\"\"Main training procedure.\n \n Arguments:\n logging_start_epoch -- number of the first epoch to be logged\n epoch -- current epoch \n data -- DataLoader which can provide batches for an epoch\n model -- model to be trained\n criterion -- instance of loss function to be optimized\n optimizer -- instance of optimizer which will be used for parameter updates\n \"\"\"\n\n model.train() \n\n # initialize counters, etc.\n learning_rate = optimizer.param_groups[0]['lr']\n print('learning_rate', learning_rate)\n cla = 0\n done, start_time = 0, time.time()\n\n # loop through epoch batches\n for i, batch in tqdm(enumerate(data), total=len(data)): \n\n global_step = done + epoch * len(data)\n optimizer.zero_grad() \n\n # parse batch\n batch = list(map(to_gpu, batch))\n src, src_len, trg_mel, trg_lin, trg_len, stop_trg, spkrs, langs = batch\n\n # get teacher forcing ratio\n if hp.constant_teacher_forcing: tf = hp.teacher_forcing\n else: tf = cos_decay(max(global_step - hp.teacher_forcing_start_steps, 0), hp.teacher_forcing_steps)\n\n # run the model\n post_pred, pre_pred, stop_pred, alignment, spkrs_pred, enc_output = model(src, src_len, trg_mel, trg_len, spkrs, langs, tf)\n \n # evaluate loss function\n post_trg = trg_lin if hp.predict_linear else trg_mel\n classifier = model._reversal_classifier if hp.reversal_classifier else None\n loss, batch_losses = criterion(src_len, trg_len, pre_pred, trg_mel, post_pred, post_trg, stop_pred, stop_trg, alignment, \n spkrs, spkrs_pred, enc_output, classifier)\n\n # evaluate adversarial classifier accuracy, if present\n if hp.reversal_classifier:\n input_mask = lengths_to_mask(src_len)\n trg_spkrs = torch.zeros_like(input_mask, dtype=torch.int64) \n for s in range(hp.speaker_number):\n speaker_mask = (spkrs == s)\n trg_spkrs[speaker_mask] = s\n matches = (trg_spkrs == torch.argmax(torch.nn.functional.softmax(spkrs_pred, dim=-1), dim=-1))\n matches[~input_mask] = False\n cla = torch.sum(matches).item() / torch.sum(input_mask).item()\n\n # comptute gradients and make a step\n loss.backward() \n gradient = torch.nn.utils.clip_grad_norm_(model.parameters(), hp.gradient_clipping)\n optimizer.step() \n \n # log training progress\n # if epoch >= logging_start_epoch:\n # Logger.training(global_step, batch_losses, gradient, learning_rate, time.time() - start_time, cla) \n\n # update criterion states (params and decay of the loss and so on ...)\n criterion.update_states()\n\n start_time = time.time()\n done += 1 \n \n\ndef evaluate(epoch, data, model, criterion): \n \"\"\"Main evaluation procedure.\n \n Arguments:\n epoch -- current epoch \n data -- DataLoader which can provide validation batches\n model -- model to be evaluated\n criterion -- instance of loss function to measure performance\n \"\"\"\n\n model.eval()\n\n # initialize counters, etc.\n mcd, mcd_count = 0, 0\n cla, cla_count = 0, 0\n eval_losses = {}\n\n # loop through epoch batches\n with torch.no_grad(): \n for i, batch in tqdm(enumerate(data), total=len(data)):\n\n # parse batch\n batch = list(map(to_gpu, batch))\n src, src_len, trg_mel, trg_lin, trg_len, stop_trg, spkrs, langs = batch\n\n # run the model (twice, with and without teacher forcing)\n post_pred, pre_pred, stop_pred, alignment, spkrs_pred, enc_output = model(src, src_len, trg_mel, trg_len, spkrs, langs, 1.0)\n post_pred_0, _, stop_pred_0, alignment_0, _, _ = model(src, src_len, trg_mel, trg_len, spkrs, langs, 0.0)\n stop_pred_probs = torch.sigmoid(stop_pred_0)\n\n # evaluate loss function\n post_trg = trg_lin if hp.predict_linear else trg_mel\n classifier = model._reversal_classifier if hp.reversal_classifier else None\n loss, batch_losses = criterion(src_len, trg_len, pre_pred, trg_mel, post_pred, post_trg, stop_pred, stop_trg, alignment, \n spkrs, spkrs_pred, enc_output, classifier)\n \n # compute mel cepstral distorsion\n for j, (gen, ref, stop) in enumerate(zip(post_pred_0, trg_mel, stop_pred_probs)):\n stop_idxes = np.where(stop.cpu().numpy() > 0.5)[0]\n stop_idx = min(np.min(stop_idxes) + hp.stop_frames, gen.size()[1]) if len(stop_idxes) > 0 else gen.size()[1]\n gen = gen[:, :stop_idx].data.cpu().numpy()\n ref = ref[:, :trg_len[j]].data.cpu().numpy()\n if hp.normalize_spectrogram:\n gen = audio.denormalize_spectrogram(gen, not hp.predict_linear)\n ref = audio.denormalize_spectrogram(ref, True)\n if hp.predict_linear: gen = audio.linear_to_mel(gen)\n mcd = (mcd_count * mcd + audio.mel_cepstral_distorision(gen, ref, 'dtw')) / (mcd_count+1)\n mcd_count += 1\n\n # compute adversarial classifier accuracy\n if hp.reversal_classifier:\n input_mask = lengths_to_mask(src_len)\n trg_spkrs = torch.zeros_like(input_mask, dtype=torch.int64) \n for s in range(hp.speaker_number):\n speaker_mask = (spkrs == s)\n trg_spkrs[speaker_mask] = s\n matches = (trg_spkrs == torch.argmax(torch.nn.functional.softmax(spkrs_pred, dim=-1), dim=-1))\n matches[~input_mask] = False\n cla = (cla_count * cla + torch.sum(matches).item() / torch.sum(input_mask).item()) / (cla_count+1)\n cla_count += 1\n\n # add batch losses to epoch losses\n for k, v in batch_losses.items(): \n eval_losses[k] = v + eval_losses[k] if k in eval_losses else v \n\n # normalize loss per batch\n for k in eval_losses.keys():\n eval_losses[k] /= len(data)\n\n # log evaluation\n # Logger.evaluation(epoch+1, eval_losses, mcd, src_len, trg_len, src, post_trg, post_pred, post_pred_0, stop_pred_probs, stop_trg, alignment_0, cla)\n for key in eval_losses.keys():\n print(f'{key} =\\t{eval_losses[key]}')\n \n return sum(eval_losses.values())\n\n\nclass DataParallelPassthrough(torch.nn.DataParallel):\n \"\"\"Simple wrapper around DataParallel.\"\"\" \n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.module, name)\n\n\nif __name__ == '__main__':\n import argparse\n import os\n import re\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--base_directory\", type=str, default=\".\", help=\"Base directory of the project.\")\n parser.add_argument(\"--checkpoint\", type=str, default=None, help=\"Name of the initial checkpoint.\")\n parser.add_argument(\"--checkpoint_root\", type=str, default=\"checkpoints\", help=\"Base directory of checkpoints.\")\n parser.add_argument(\"--data_root\", type=str, default=\"data\", help=\"Base directory of datasets.\")\n parser.add_argument(\"--flush_seconds\", type=int, default=60, help=\"How often to flush pending summaries to tensorboard.\")\n parser.add_argument('--hyper_parameters', type=str, default=None, help=\"Name of the hyperparameters file.\")\n parser.add_argument('--logging_start', type=int, default=1, help=\"First epoch to be logged\")\n parser.add_argument('--max_gpus', type=int, default=2, help=\"Maximal number of GPUs of the local machine to use.\")\n parser.add_argument('--loader_workers', type=int, default=2, help=\"Number of subprocesses to use for data loading.\")\n \n \n parser.add_argument('--epochs', type=int, default=50, help=\"Number of epochs.\")\n parser.add_argument('--batch_size', type=int, default=1, help=\"batch_size\")\n parser.add_argument('--checkpoint_each_epochs', type=int, default=5, help=\"checkpoint_each_epochs\")\n \n parser.add_argument('--learning_rate', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--encoder_lr', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--decoder_lr', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--postnet_lr', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--prenet_lr', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--embedding_lr', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--attention_lr', type=float, default=1e-7, help=\"lr\")\n parser.add_argument('--reversal_classifier_lr', type=float, default=1e-7, help=\"lr\")\n \n args = parser.parse_args()\n\n # set up seeds and the target torch device\n np.random.seed(42)\n torch.manual_seed(42)\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # prepare directory for checkpoints \n checkpoint_dir = os.path.join(args.base_directory, args.checkpoint_root)\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n # load checkpoint (dict) with saved hyper-parameters (let some of them be overwritten because of fine-tuning)\n if args.checkpoint:\n checkpoint = os.path.join(checkpoint_dir, args.checkpoint)\n checkpoint_state = torch.load(checkpoint, map_location='cpu')\n checkpoint_state['parameters']['unique_speakers'].append('finetuning')\n checkpoint_state['parameters']['speaker_number'] += 1\n hp.load_state_dict(checkpoint_state['parameters']) \n\n# # load hyperparameters\n# if args.hyper_parameters is not None:\n# hp_path = os.path.join(args.base_directory, 'params', f'{args.hyper_parameters}.json')\n# hp.load(hp_path)\n \n # For finetuning\n hp.dataset = 'finetuning'\n hp.batch_size = args.batch_size\n hp.epochs = args.epochs\n hp.checkpoint_each_epochs = args.checkpoint_each_epochs\n \n\n # load dataset\n dataset = TextToSpeechDatasetCollection(os.path.join(args.data_root, hp.dataset), known_unique_speakers=hp.unique_speakers)\n\n if hp.multi_language and hp.balanced_sampling and hp.perfect_sampling and 0:\n dp_devices = args.max_gpus if hp.parallelization and torch.cuda.device_count() > 1 else 1 \n train_sampler = PerfectBatchSampler(dataset.train, hp.languages, hp.batch_size, data_parallel_devices=dp_devices, shuffle=True, drop_last=True)\n train_data = DataLoader(dataset.train, batch_sampler=train_sampler, collate_fn=TextToSpeechCollate(False), num_workers=args.loader_workers)\n eval_sampler = PerfectBatchSampler(dataset.dev, hp.languages, hp.batch_size, data_parallel_devices=dp_devices, shuffle=False)\n eval_data = DataLoader(dataset.dev, batch_sampler=eval_sampler, collate_fn=TextToSpeechCollate(False), num_workers=args.loader_workers)\n else:\n sampler = RandomImbalancedSampler(dataset.train) if hp.multi_language and hp.balanced_sampling else None\n train_data = DataLoader(dataset.train, batch_size=hp.batch_size, drop_last=True, shuffle=(not hp.multi_language or not hp.balanced_sampling),\n sampler=sampler, collate_fn=TextToSpeechCollate(True), num_workers=args.loader_workers)\n eval_data = DataLoader(dataset.dev, batch_size=hp.batch_size, drop_last=False, shuffle=False,\n collate_fn=TextToSpeechCollate(True), num_workers=args.loader_workers)\n\n # find out number of unique speakers and languages\n# hp.speaker_number = 0 if not hp.multi_speaker else dataset.train.get_num_speakers() \n # For finetuning\n hp.speaker_number = 92\n\n hp.language_number = 0 if not hp.multi_language else len(hp.languages)\n # save all found speakers to hyper parameters\n# if hp.multi_speaker and not args.checkpoint:\n# hp.unique_speakers = dataset.train.unique_speakers\n\n # acquire dataset-dependent constants, these should probably be the same while going from checkpoint\n if not args.checkpoint:\n # compute per-channel constants for spectrogram normalization\n hp.mel_normalize_mean, hp.mel_normalize_variance = dataset.train.get_normalization_constants(True)\n if hp.predict_linear:\n hp.lin_normalize_mean, hp.lin_normalize_variance = dataset.train.get_normalization_constants(False) \n\n # instantiate model\n if torch.cuda.is_available(): \n model = Tacotron().cuda()\n if hp.parallelization and args.max_gpus > 1 and torch.cuda.device_count() > 1:\n model = DataParallelPassthrough(model, device_ids=list(range(args.max_gpus)))\n else: model = Tacotron()\n\n # instantiate optimizer and scheduler\n# encoder_params = list(model._encoder.parameters())\n# decoder_params = list(model._decoder.parameters()) \n# postnet_params = list(model._postnet.parameters()) \n# prenet_params = list(model._prenet.parameters())\n# embedding_params = list(model._embedding.parameters())\n# attention_params = list(model._attention.parameters())\n# decoder_params += postnet_params + embedding_params + attention_params + prenet_params\n# reversal_classifier_params = []\n# if hp.reversal_classifier:\n# reversal_classifier_params += list(model._reversal_classifier.parameters()) \n \n# hp.learning_rate_encoder = args.encoder_lr\n# hp.learning_rate = args.learning_rate\n\n decoder_params = list(model._decoder.parameters())\n optimizer = torch.optim.Adam(decoder_params, lr=args.learning_rate, weight_decay=hp.weight_decay)\n # optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=hp.weight_decay)\n # if hp.encoder_optimizer:\n # encoder_params = list(model._encoder.parameters())\n # other_params = list(model._decoder.parameters()) + list(model._postnet.parameters()) + list(model._prenet.parameters()) + \\\n # list(model._embedding.parameters()) + list(model._attention.parameters())\n # if hp.reversal_classifier:\n # other_params += list(model._reversal_classifier.parameters()) \n # optimizer = torch.optim.Adam([\n # {'params': other_params},\n # {'params': encoder_params, 'lr': args.encoder_lr}\n # ], lr=args.learning_rate, weight_decay=hp.weight_decay)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, hp.learning_rate_decay_each // len(train_data), gamma=hp.learning_rate_decay)\n criterion = TacotronLoss(hp.guided_attention_steps, hp.guided_attention_toleration, hp.guided_attention_gain)\n\n # load model weights and optimizer, scheduler states from checkpoint state dictionary\n initial_epoch = 0\n if args.checkpoint:\n # load model state dict (can be imcomplete if pretraining part of the model)\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in checkpoint_state['model'].items() if k in model_dict}\n model_dict.update(pretrained_dict) \n model.load_state_dict(model_dict)\n # other states from checkpoint -- optimizer, scheduler, loss, epoch\n if 'epoch' in checkpoint_state.keys() and checkpoint_state['epoch'] is not None:\n initial_epoch = checkpoint_state['epoch'] + 1\n if 'optimizer' in checkpoint_state.keys() and checkpoint_state['optimizer'] is not None:\n optimizer.load_state_dict(checkpoint_state['optimizer'])\n if 'scheduler' in checkpoint_state.keys() and checkpoint_state['scheduler'] is not None:\n scheduler.load_state_dict(checkpoint_state['scheduler'])\n if 'criterion' in checkpoint_state.keys() and checkpoint_state['criterion'] is not None:\n criterion.load_state_dict(checkpoint_state['criterion'])\n \n \n # initialize logger\n# log_dir = os.path.join(args.base_directory, \"logs\", f'{hp.version}-{datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")}')\n# Logger.initialize(log_dir, args.flush_seconds)\n\n # training loop\n\n # For finetuning\n print('Pretraining:')\n print(hp.speaker_number)\n print(hp.unique_speakers)\n print(hp.dataset,\n hp.batch_size,\n hp.epochs,\n hp.checkpoint_each_epochs,\n hp.learning_rate,\n hp.perfect_sampling)\n \n# for param in model.parameters():\n# param.requires_grad = False\n\n best_eval = float('inf')\n for epoch in range(initial_epoch, hp.epochs):\n print(f'Epoch {epoch} training')\n train(args.logging_start, epoch, train_data, model, criterion, optimizer) \n if hp.learning_rate_decay_start - hp.learning_rate_decay_each < epoch * len(train_data):\n scheduler.step()\n print(f'Epoch {epoch} evaluate')\n eval_loss = evaluate(epoch, eval_data, model, criterion) \n if (epoch + 1) % hp.checkpoint_each_epochs == 0:\n # save checkpoint together with hyper-parameters, optimizer and scheduler states\n checkpoint_file = f'{checkpoint_dir}/finetuned'\n state_dict = {\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'parameters': hp.state_dict(),\n 'criterion': criterion.state_dict()\n }\n torch.save(state_dict, checkpoint_file)\n print('Checkpoint saved')\n"
] | [
[
"torch.sigmoid",
"numpy.random.seed",
"torch.no_grad",
"torch.optim.Adam",
"torch.save",
"numpy.min",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.sum"
]
] |
jasonplato/RL_for_AutonomousGreenhouse | [
"e814f3dc42a9ae684a1a6198c31dc900a8636d34"
] | [
"A3C.py"
] | [
"import multiprocessing # 多线程模块\nimport threading # 线程模块\nimport queue\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport os\nimport shutil # 拷贝文件用\nimport matplotlib.pyplot as plt\nfrom FeudalBatchProcessor import FeudalBatchProcessor\nimport policy_utils\nfrom LSTMmodel import SingleStepLSTM\n\nGame = 'CartPole-v0'\nN_workers = multiprocessing.cpu_count() # 独立玩家个体数为cpu数\n# MAX_GLOBAL_EP = 2000 # 中央大脑最大回合数\nMAX_GLOBALE_STEP = 100000 # 中央大脑最大步数\nGLOBAL_NET_SCOPE = 'Global_Net' # 中央大脑的名字\nUPDATE_GLOBALE_ITER = 10 # 中央大脑每N次提升一次\nGAMMA = 0.9 # 衰减度\nLR_A = 0.0001 # Actor网络学习率\nLR_C = 0.001 # Critic 网络学习率\nbeta_start = 0.01\nbeta_end = 0.001\ndecay_steps = 50000\n\nGLOBALE_RUNNING_R = [] # 存储总的reward\n# GLOBALE_EP = 0 # 中央大脑步数\nGLOBALE_STEP = 0 # 中央大脑步数\n\nenv = gym.make(Game) # 定义游戏环境\n\nN_S = env.observation_space.shape[0] # 观测值个数\nN_A = env.action_space.n # 行为值个数\n\n\nclass ACnet(object): # 这个class即可用于生产global net,也可生成 worker net,因为结构相同\n def __init__(self, scope, globalAC=None, global_step=None): # scope 用于确定生成什么网络\n # global GLOBALE_STEP\n # self.global_step = GLOBALE_STEP\n if scope == GLOBAL_NET_SCOPE: # 创建中央大脑\n with tf.variable_scope(scope):\n self.global_step = tf.get_variable(\"global_step\", [], tf.int32,\n initializer=tf.constant_initializer(0, dtype=tf.int32),\n trainable=False)\n self.obs_space = N_S\n self.act_space = N_A\n self.k = 16\n self.g_dim = 256\n self.c = 10\n self.vf_hidden_size = 128 # for value function network\n self.alpha = 0.5 # for build loss\n self.batch_processor = FeudalBatchProcessor(self.c)\n self.build_model() # build feudal policy model\n\n else: # 创建worker两个网络的具体步骤\n with tf.variable_scope(scope): # 这里的scope传入的是worker的名字\n self.global_step = globalAC.global_step\n self.obs_space = N_S\n self.act_space = N_A\n self.k = 16\n self.g_dim = 256\n self.c = 10\n self.vf_hidden_size = 128 # for value function network\n self.alpha = 0.5 # for build loss\n self.batch_processor = FeudalBatchProcessor(self.c)\n self.build_model() # build feudal policy model\n\n with tf.name_scope('local_grad'):\n grads = tf.gradients(self.loss, self.var_list)\n grads, _ = tf.clip_by_global_norm(grads, 40)\n\n with tf.name_scope('sync'): # worker和global的同步过程\n with tf.name_scope('pull'): # 获取global参数,复制到local—net\n self.pull_params_op = tf.group(*[v1.assign(v2)\n for v1, v2 in zip(self.var_list, globalAC.var_list)])\n with tf.name_scope('push'): # 将参数传送到gloabl中去\n self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list))\n # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义\n # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中\n self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0])\n self.train_op = tf.group(self.update_params_op, self.inc_step)\n # GLOBALE_STEP += tf.shape(self.obs)[0]\n\n def build_model(self):\n \"\"\"\n Builds the manager and worker models.\n \"\"\"\n with tf.variable_scope('FeUdal'):\n self.build_placeholders()\n self.build_perception()\n self.build_manager()\n self.build_worker()\n self.build_loss()\n self.var_list = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)\n # for v in self.var_list:\n # print v.name\n\n self.state_in = [self.worker_lstm.state_in[0],\n self.worker_lstm.state_in[1],\n self.manager_lstm.state_in[0],\n self.manager_lstm.state_in[1]\n ]\n self.state_out = [self.worker_lstm.state_out[0],\n self.worker_lstm.state_out[1],\n self.manager_lstm.state_out[0],\n self.manager_lstm.state_out[1]\n ]\n # for v in self.var_list:\n # print v\n\n def build_placeholders(self):\n # standard for all policies\n self.obs = tf.placeholder(tf.float32, [None,\n self.obs_space]) # ! self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space))\n # ! self.obs_space = env.observation_space.shape\n self.r = tf.placeholder(tf.float32, (None,1))\n self.ac = tf.placeholder(tf.float32, (None, self.act_space))\n self.adv = tf.placeholder(tf.float32, [None]) # unused\n\n # specific to FeUdal\n self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim))\n self.ri = tf.placeholder(tf.float32, (None,))\n self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim))\n\n def build_perception(self):\n self._obs = tf.expand_dims(self.obs, -1) # !\n self._obs = tf.expand_dims(self._obs, -1) # !\n conv1 = tf.layers.conv2d(inputs=self._obs,\n filters=16,\n kernel_size=[2, 1], # ! kernel_size = [8,8]\n activation=tf.nn.elu,\n strides=1) # ! strides = 4\n conv2 = tf.layers.conv2d(inputs=conv1,\n filters=32,\n kernel_size=[2, 1], # ! kernel_size = [4,4]\n activation=tf.nn.elu,\n strides=1) # ! strides = 2\n\n flattened_filters = policy_utils.flatten(conv2)\n self.z = tf.layers.dense(inputs=flattened_filters,\n units=256,\n activation=tf.nn.elu)\n\n def build_manager(self):\n with tf.variable_scope('manager'):\n # Calculate manager internal state\n self.s = tf.layers.dense(inputs=self.z,\n units=self.g_dim,\n activation=tf.nn.elu)\n\n # Calculate manager output g\n x = tf.expand_dims(self.s, [0])\n self.manager_lstm = SingleStepLSTM(x,\n self.g_dim,\n step_size=tf.shape(self.obs)[:1])\n g_hat = self.manager_lstm.output\n self.g = tf.nn.l2_normalize(g_hat, dim=1)\n\n self.manager_vf = self.build_value(g_hat)\n\n def build_worker(self):\n with tf.variable_scope('worker'):\n num_acts = self.act_space\n\n # Calculate U\n self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]),\n size=num_acts * self.k,\n step_size=tf.shape(self.obs)[:1])\n flat_logits = self.worker_lstm.output\n\n self.worker_vf = self.build_value(flat_logits)\n\n U = tf.reshape(flat_logits, [-1, num_acts, self.k])\n\n # Calculate w\n cut_g = tf.stop_gradient(self.g)\n cut_g = tf.expand_dims(cut_g, [1])\n gstack = tf.concat([self.prev_g, cut_g], axis=1)\n\n self.last_c_g = gstack[:, 1:]\n # print self.last_c_g\n gsum = tf.reduce_sum(gstack, axis=1)\n phi = tf.get_variable(\"phi\", (self.g_dim, self.k))\n w = tf.matmul(gsum, phi)\n w = tf.expand_dims(w, [2])\n # Calculate policy and sample\n logits = tf.reshape(tf.matmul(U, w), [-1, num_acts])\n self.pi = tf.nn.softmax(logits)\n self.log_pi = tf.nn.log_softmax(logits)\n self.sample = policy_utils.categorical_sample(\n tf.reshape(logits, [-1, num_acts]), num_acts)[0, :]\n\n def build_value(self, _input):\n with tf.variable_scope('VF'):\n hidden = tf.layers.dense(inputs=_input,\n units=self.vf_hidden_size,\n activation=tf.nn.elu)\n\n w = tf.get_variable(\"weights\", (self.vf_hidden_size, 1))\n return tf.matmul(hidden, w)\n\n def build_loss(self):\n cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1])\n dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1)\n gcut = tf.stop_gradient(self.g)\n mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001\n dcos = dot / mag\n manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos)\n\n cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1])\n log_p = tf.reduce_sum(self.log_pi * self.ac, [1])\n worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p\n worker_loss = -tf.reduce_sum(worker_loss, axis=0)\n\n Am = self.r - self.manager_vf\n manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am))\n\n Aw = (self.r + self.alpha * self.ri) - self.worker_vf\n worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw))\n\n entropy = -tf.reduce_sum(self.pi * self.log_pi)\n\n beta = tf.train.polynomial_decay(beta_start, self.global_step,\n end_learning_rate=beta_end,\n decay_steps=decay_steps,\n power=1)\n\n # worker_loss = tf.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy])\n self.loss = worker_loss + manager_loss + \\\n worker_vf_loss + manager_vf_loss - \\\n entropy * beta\n\n def update_global(self, feed_dict): # 定义更新global参数函数\n SESS.run([self.update_params_op], feed_dict) # 分别更新actor和critic网络\n\n def pull_global(self): # 定义更新local参数函数\n SESS.run([self.pull_params_op])\n\n def action(self, ob, g, cw, hw, cm, hm): # 定义选择动作函数\n # ob = ob[np.newaxis, :]\n ob = ob.reshape([-1, self.obs_space])\n return SESS.run([self.sample, self.manager_vf, self.g, self.s, self.last_c_g] + self.state_out,\n feed_dict={self.obs: ob, self.state_in[0]: cw, self.state_in[1]: hw, self.state_in[2]: cm,\n self.state_in[3]: hm, self.prev_g: g})\n # return np.random.choice(range(probs.shape[1]), p=probs.ravel()) # 从probs中按概率选取出某一个动作\n\n def value(self, ob, g, cw, hw, cm, hm):\n sess = tf.get_default_session()\n return sess.run(self.manager_vf,\n {self.obs: [ob], self.state_in[0]: cw, self.state_in[1]: hw,\n self.state_in[2]: cm, self.state_in[3]: hm,\n self.prev_g: g})[0]\n\n def get_initial_features(self):\n return np.zeros((1, 1, self.g_dim), np.float32), self.worker_lstm.state_init + self.manager_lstm.state_init\n\n def update_batch(self, batch):\n return self.batch_processor.process_batch(batch)\n\n\nclass Worker(object):\n def __init__(self, name, globalAC): # 传入的name是worker的名字,globalAC是已经建立好的中央大脑GLOBALE—AC\n self.env = gym.make(Game).unwrapped\n self.name = name # worker的名字\n self.global_AC = globalAC\n self.local_AC = ACnet(scope=name, globalAC=globalAC) # 第二个参数当传入的是已经建立好的GLOBALE—AC时创建的是local net\n # 建立worker的AC网络\n self.runner = policy_utils.RunnerThread(self.env, self.local_AC, 20, visualise=0)\n\n def pull_batch_from_queue(self):\n \"\"\"\n self explanatory: take a rollout from the queue of the thread runner.\n \"\"\"\n rollout = self.runner.queue.get(timeout=600.0)\n while not rollout.terminal:\n try:\n rollout.extend(self.runner.queue.get_nowait())\n except queue.Empty:\n break\n return rollout\n\n def start(self, sess, summary_writer):\n self.runner.start_runner(sess, summary_writer)\n\n def work(self): # 定义worker运行的的具体过程\n global GLOBALE_STEP, MAX_GLOBALE_STEP\n # global GLOBALE_RUNNING_R, GLOBALE_EP # 两个全局变量,R是所有worker的总reward,ep是所有worker的总episode\n # total_step = 1 # 本worker的总步数\n # buffer_s, buffer_a, buffer_r = [], [], [] # state,action,reward的缓存\n SESS.run(self.local_AC.pull_params_op)\n self.start(SESS, summary_writer=0)\n global_step = SESS.run(self.global_AC.global_step)\n # print(type(GLOBALE_STEP < MAX_GLOBALE_STEP))\n while not COORD.should_stop() and global_step < MAX_GLOBALE_STEP: # 停止本worker运行的条件\n # 本循环一次是一个回合\n\n # s = self.env.reset() # 初始化环境\n if self.name == 'W_0': # 只有worker0才将动画图像显示\n self.env.render()\n ep_r = 0 # 本回合总的reward\n SESS.run(self.local_AC.pull_params_op)\n rollout = self.pull_batch_from_queue()\n batch = policy_utils.process_rollout(rollout, gamma=.99)\n batch = self.local_AC.update_batch(batch)\n # batch.ri = [item for sublist in batch.ri for item in sublist]\n # returns = [item for sublist in batch.returns for item in sublist]\n # batch._replace(returns=returns)\n # print(\"batch.returns.shape:\",batch.returns.shape)\n # print(\"batch.ri.shape:\",batch.ri.le)\n fetches = [self.local_AC.train_op]\n feed_dict = {\n self.local_AC.obs: batch.obs,\n self.global_AC.obs: batch.obs,\n\n self.local_AC.ac: batch.a,\n self.global_AC.ac: batch.a,\n\n self.local_AC.r: batch.returns,\n self.global_AC.r: batch.returns,\n\n self.local_AC.s_diff: batch.s_diff,\n self.global_AC.s_diff: batch.s_diff,\n\n self.local_AC.prev_g: batch.gsum,\n self.global_AC.prev_g: batch.gsum,\n\n self.local_AC.ri: batch.ri,\n self.global_AC.ri: batch.ri\n }\n\n for i in range(len(self.local_AC.state_in)):\n feed_dict[self.local_AC.state_in[i]] = batch.features[i]\n feed_dict[self.global_AC.state_in[i]] = batch.features[i]\n\n fetched = SESS.run(fetches, feed_dict=feed_dict)\n # while True: # 本循环一次是一步\n # if self.name == 'W_0': # 只有worker0才将动画图像显示\n # self.env.render()\n #\n # fetched = self.AC.action(last_state, *last_features) # 将当前状态state传入AC网络选择动作action\n # action, value_, g, s, last_c_g, features = fetched[0], fetched[1], \\\n # fetched[2], fetched[3], \\\n # fetched[4], fetched[5:]\n # a = action.argmax()\n # state, reward, done, info = self.env.step(a) # 行动并获得新的状态和回报等信息\n # rollout.add(last_state,action,reward,value_,g,s,done,last_features)\n #\n # if done: reward = -5 # 如果结束了,reward给一个惩罚数\n #\n # ep_r += reward # 记录本回合总体reward\n # # buffer_s.append(s) # 将当前状态,行动和回报加入缓存\n # # buffer_a.append(a)\n # # buffer_r.append(r)\n # last_state = state\n # last_features = features\n # if total_step % UPDATE_GLOBALE_ITER == 0 or done: # 每iter步完了或者或者到达终点了,进行同步sync操作\n # if done:\n # v_s_ = 0 # 如果结束了,设定对未来的评价值为0\n # else:\n # v_s_ = SESS.run(self.AC.v, feed_dict={self.AC.s: s_[np.newaxis, :]})[\n # 0, 0] # 如果是中间步骤,则用AC网络分析下一个state的v评价\n #\n # buffer_v_target = []\n # for r in buffer_r[::-1]: # 将下一个state的v评价进行一个反向衰减传递得到每一步的v现实\n # v_s_ = r + GAMMA * v_s_\n # buffer_v_target.append(v_s_) # 将每一步的v现实都加入缓存中\n # buffer_v_target.reverse() # 反向后,得到本系列操作每一步的v现实(v-target)\n #\n # buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(\n # buffer_v_target)\n #\n # feed_dict = {\n # self.AC.obs: buffer_s, # 本次走过的所有状态,用于计算v估计\n # self.AC.ac: buffer_a, # 本次进行过的所有操作,用于计算a—loss\n # self.AC.v: buffer_v_target # 走过的每一个state的v现实值,用于计算td\n # }\n #\n # self.AC.update_global(feed_dict) # update—global的具体过程在AC类中定义,feed-dict如上\n #\n # buffer_s, buffer_a, buffer_r = [], [], [] # 清空缓存\n #\n # self.AC.pull_global() # 从global—net提取出参数赋值给local—net\n #\n # s = s_ # 跳转到下一个状态\n # total_step += 1 # 本回合总步数加1\n #\n # if done: # 如果本回合结束了\n # if len(GLOBALE_RUNNING_R) == 0: # 如果尚未记录总体running\n # GLOBALE_RUNNING_R.append(ep_r)\n # else:\n # GLOBALE_RUNNING_R.append(0.9 * GLOBALE_RUNNING_R[-1] + 0.1 * ep_r)\n #\n # print(self.name, 'EP:', GLOBALE_EP)\n # GLOBALE_EP += 1 # 加一回合\n # break # 结束本回合\n\n # global_step = SESS.run(self.global_AC.global_step)\n\n\nif __name__ == '__main__':\n SESS = tf.Session()\n\n with tf.device('/cpu:0'):\n OPT = tf.train.AdamOptimizer(1e-4) # 后续主要是使用该optimizer中的apply—gradients操作\n # OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # 定义critic训练过程\n GLOBAL_AC = ACnet(scope=GLOBAL_NET_SCOPE) # 创建中央大脑GLOBALE_AC,只创建结构(A和C的参数)\n workers = []\n for i in range(N_workers): # N—workers等于cpu数量\n i_name = 'W_%i' % i # worker name\n workers.append(Worker(name=i_name, globalAC=GLOBAL_AC)) # 创建独立的worker\n\n COORD = tf.train.Coordinator() # 多线程\n SESS.run(tf.global_variables_initializer()) # 初始化所有参数\n\n worker_threads = []\n for worker in workers: # 并行过程\n job = lambda: worker.work() # worker的工作目标,此处调用Worker类中的work\n t = threading.Thread(target=job) # 每一个线程完成一个worker的工作目标\n t.start() # 启动每一个worker\n worker_threads.append(t) # 每一个worker的工作都加入thread中\n COORD.join(worker_threads) # 合并几个worker,当每一个worker都运行完再继续后面步骤\n\n plt.plot(np.arange(len(GLOBALE_RUNNING_R)), GLOBALE_RUNNING_R) # 绘制reward图像\n plt.xlabel('step')\n plt.ylabel('Total moving reward')\n plt.show()\n"
] | [
[
"tensorflow.get_default_session",
"tensorflow.constant_initializer",
"tensorflow.group",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.gradients",
"tensorflow.nn.softmax",
"tensorflow.global_variables_initializer",
"tensorflow.square",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.norm",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.layers.dense",
"tensorflow.nn.log_softmax",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Coordinator",
"tensorflow.expand_dims",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.train.polynomial_decay",
"tensorflow.placeholder",
"tensorflow.layers.conv2d",
"tensorflow.reduce_sum",
"tensorflow.get_variable",
"tensorflow.name_scope",
"matplotlib.pyplot.show",
"tensorflow.clip_by_global_norm",
"tensorflow.multiply",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"tensorflow.device",
"tensorflow.stop_gradient",
"tensorflow.nn.l2_normalize"
]
] |
scottprahl/ofiber | [
"aaad9aa368d28a0bcc0f34e12377fc3bcad80180"
] | [
"ofiber/cylinder_step.py"
] | [
"# pylint: disable=invalid-name\n# pylint: disable=no-name-in-module\n\"\"\"\nUseful routines for step-index cylindrical waveguides.\n\nSee <https://ofiber.readthedocs.io> for usage examples.\n\nBased on chapter 8 of A. Ghatak, K. Thyagarajan, An Introduction to Fiber\nOptics, Cambridge University Press, 1998\n\nFunctions to calculate and plot modes for step index fibers. Specifically::\n\n LP_mode_value(V, ell, em)\n LP_mode_values(V, ell)\n LP_core_irradiance(V, b, ell)\n LP_clad_irradiance(V, b, ell)\n LP_total_irradiance(V, b, ell)\n LP_radial_field(V, b, ell, r_over_a)\n LP_radial_irradiance(V, b, ell, r_over_a)\n gaussian_envelope_Omega(V)\n gaussian_radial_irradiance(V, r_over_a)\n plot_LP_modes(V, ell)\n\nFunctions to estimate losses::\n\n angular_misalignment_loss_db(n, w, theta, lambda0)\n bending_loss_db(n1, Delta, a, Rc, lambda0)\n longitudinal_misalignment_loss_db(n1, w, D, lambda0)\n transverse_misalignment_loss_db(w1, w2, u)\n\nFunctions to find equivalent core diameters::\n\n MFR(V)\n MFD(V)\n PetermannW(V)\n PetermannW_Approx(V)\n\nAnd finally, a couple of routines to help with waveguide dispersion\ncalculations::\n\n V_d2bV_by_V(V, ell)\n V_d2bV_by_V_Approx(V, ell)\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import brentq\nfrom scipy.special import jn\nfrom scipy.special import jn_zeros\nfrom scipy.special import kn\n\n__all__ = ('LP_mode_value',\n 'LP_mode_values',\n 'plot_LP_modes',\n 'LP_core_irradiance',\n 'LP_clad_irradiance',\n 'LP_total_irradiance',\n 'LP_radial_field',\n 'LP_radial_irradiance',\n 'gaussian_envelope_Omega',\n 'gaussian_radial_irradiance',\n 'transverse_misalignment_loss_db',\n 'angular_misalignment_loss_db',\n 'longitudinal_misalignment_loss_db',\n 'bending_loss_db',\n 'MFR',\n 'MFD',\n 'PetermannW',\n 'PetermannW_Approx',\n 'V_d2bV_by_V',\n 'V_d2bV_by_V_Approx')\n\n\ndef _LHS_eqn_8_40(b, V, ell):\n \"\"\"\n Calculate the left hand side of the eigenvalue eqn 8.40 in Ghatak.\n\n Also works for ell=0 (but is multiplied by -1 relative to eqn 8.41).\n This is private method that should not be needed outside this module.\n\n Args:\n b: normalized propagation constant [-]\n V: V-parameter for fiber [-]\n ell: desired fiber mode [-]\n Returns:\n LHS of equation 8.40 [-]\n \"\"\"\n U = V * np.sqrt(1 - b)\n return U * jn(ell - 1, U) / jn(ell, U)\n\n\ndef _RHS_eqn_8_40(b, V, ell):\n \"\"\"\n Calculate the right hand side of the eigenvalue eqn 8.40 in Ghatak.\n\n Also works for ell=0 (but is multiplied by -1 relative to eqn 8.41).\n This is private method that should not be needed outside this module.\n\n Args:\n b: normalized propagation constant [-]\n V: V-parameter for fiber [-]\n ell: desired fiber mode [-]\n Returns:\n RHS of equation 8.40 [-]\n \"\"\"\n W = V * np.sqrt(b)\n return -W * kn(ell - 1, W) / kn(ell, W)\n\n\ndef _cyl_mode_eqn(b, *args):\n \"\"\"\n Return the difference of RHS and LHS of 8.40 in Ghatak.\n\n This function is zero when a guided mode exists in the step index fiber.\n This is a private function and should not be needed outside this module.\n\n Args:\n b: normalized propagation constant [-]\n arg[0]: V-parameter for optical fiber [-]\n arg[1]: desired fiber mode [-]\n Returns:\n LHS-RHS of equation 8.40 [-]\n \"\"\"\n V = args[0]\n ell = args[1]\n g1 = _LHS_eqn_8_40(b, V, ell)\n g2 = _RHS_eqn_8_40(b, V, ell)\n return g1 - g2\n\n\ndef LP_mode_value(V, ell, em):\n \"\"\"\n Calculate guided b for mode (ell,em) in a circular step-index fiber.\n\n b is the normalized propagation constant. Each guided mode in an optical\n fiber has a specific value of b that depends on the fiber parameter V\n and the mode number.\n\n If no mode exists, a value of None is returned\n\n The LP_lm is specified by the (ell,em) to avoid confusion between the\n number 1 and the letter l.\n\n For cylindrical fibers, em is a positive integer: thus there are modes\n LP_01, LP_02, but not LP_10.\n\n Args:\n V: V-parameter for optical fiber [-]\n ell: primary fiber mode (integer) [-]\n em: secondary fiber mode (integer>0) [-]\n Returns:\n guided normalized propagation constant for mode (ell,em) [-]\n \"\"\"\n if ell < 0:\n ell *= -1 # negative ells are same as positive ones\n\n if em <= 0:\n return None # modes start with 1, e.g., LP_01\n\n if V <= 0:\n return None # V must be positive\n\n abit = 1e-3\n\n # set up bounds for this mode\n jnz = jn_zeros(ell, em)\n lo = max(0, 1 - (jnz[em - 1] / V)**2) + abit\n\n if em == 1:\n hi = 1 - abit\n else:\n hi = 1 - (jnz[em - 2] / V)**2 - abit\n\n if hi < lo:\n return None # no such mode\n\n try:\n b = brentq(_cyl_mode_eqn, lo, hi, args=(V, ell))\n except ValueError: # happens when both hi and lo values have same sign\n return None # therefore no such mode exists\n\n return b\n\n\ndef LP_mode_values(V, ell):\n \"\"\"\n Calculate all guided b for mode ell in a circular step-index fiber.\n\n If there is no such mode, returns an empty array\n\n Note that in the returned array b[0] will correspond to LP_ell,1\n\n Args:\n V: V-parameter for optical fiber [-]\n ell: primary fiber mode (integer) [-]\n Returns:\n array of normalized propagation constant for mode ell [-]\n \"\"\"\n all_b = np.array([])\n for em in range(1, 10):\n b = LP_mode_value(V, ell, em)\n if b is None:\n break\n all_b = np.append(all_b, b)\n\n return all_b\n\n\ndef plot_LP_modes(V, ell):\n \"\"\"\n Produce a plot show possible eigenvalue solutions for step index fiber.\n\n The solutions correspond to places where the curves cross one another. No\n crossing means that there is no guided mode for that mode value.\n\n Args:\n V: V-parameter for optical fiber [-]\n ell: primary fiber mode (integer) [-]\n Returns:\n graph for mode ell [matplotlib.pyplot object]\n \"\"\"\n abit = 1e-5\n pltmin = -2 * V\n pltmax = 2 * V\n\n b = np.linspace(abit, 1 - abit, 251)\n\n g1 = _LHS_eqn_8_40(b, V, ell)\n g2 = _RHS_eqn_8_40(b, V, ell)\n\n # remove points so confusing vertical retrace lines are not shown\n np.place(g1, g1 < pltmin, np.nan)\n np.place(g2, g2 < pltmin, np.nan)\n\n plt.plot([0, 1], [0, 0], ':k')\n plt.plot(b, g1)\n plt.plot(b, g2)\n\n # plot and label all the crossings\n all_b = LP_mode_values(V, ell)\n for i, bb in enumerate(all_b):\n y = _LHS_eqn_8_40(bb, V, ell)\n plt.scatter([bb], [y], s=30)\n plt.annotate(r' LP$_{%d%d}$' % (ell, i + 1), xy=(bb, y), va='top')\n\n plt.title(r'Modes for $\\ell$=%d when V=%.3f' % (ell, V))\n plt.xlabel('b')\n plt.ylim(pltmin, pltmax)\n plt.xlim(0, 1)\n\n return plt\n\n\ndef LP_core_irradiance(V, b, ell):\n \"\"\"\n Calculate the core irradiance for a step-index fiber.\n\n See Ghatak equation 8.56. The returned value is the total\n core power divided by the area of the core.\n\n Args:\n V: V-parameter for fiber [-]\n b: normalized propagation constant [-]\n ell: desired fiber mode [-]\n Returns:\n total core power over core area [-]\n \"\"\"\n U = V * np.sqrt(1 - b)\n return 1 - jn(ell + 1, U) * jn(ell - 1, U) / jn(ell, U)**2\n\n\ndef LP_clad_irradiance(V, b, ell):\n \"\"\"\n Calculate the cladding irradiance for a step-index fiber.\n\n See Ghatak equation 8.57. The returned value is the total\n cladding power divided by the area of the core.\n\n Args:\n V: V-parameter for fiber [-]\n b: normalized propagation constant [-]\n ell: desired fiber mode [-]\n Returns:\n total cladding power over core area [-]\n \"\"\"\n W = V * np.sqrt(b)\n return kn(ell + 1, W) * kn(ell - 1, W) / kn(ell, W)**2 - 1\n\n\ndef LP_total_irradiance(V, b, ell):\n \"\"\"\n Calculate the total irradiance for a step-index fiber.\n\n See Ghatak equation 8.58. The returned value is the total\n power (cladding + core) divided by the area of the core.\n\n Args:\n V: V-parameter for fiber [-]\n b: normalized propagation constant [-]\n ell: desired fiber mode [-]\n Returns:\n total power over core area [-]\n \"\"\"\n U = V * np.sqrt(1 - b)\n W = V * np.sqrt(b)\n val = V**2 / U**2 * kn(ell + 1, W)\n val *= kn(ell - 1, W) / kn(ell, W)**2\n return val\n\n\ndef LP_radial_field(V, b, ell, r_over_a):\n \"\"\"\n Calculate the normalized field in a step-index fiber.\n\n Args:\n V: V-parameter for fiber [-]\n b: normalized propagation constant [-]\n ell: desired fiber mode [-]\n r_over_a: (radial position)/(core radius) [-]\n Returns:\n normalized field at point r_over_a [-]\n \"\"\"\n U = V * np.sqrt(1 - b)\n W = V * np.sqrt(b)\n r = abs(r_over_a) # same value for negative radii\n\n A = jn(ell, U * r) / jn(ell, U)\n B = kn(ell, W * r) / kn(ell, W)\n values = np.where(r < 1, A, B)\n return values / np.sqrt(LP_total_irradiance(V, b, ell))\n\n\ndef LP_radial_irradiance(V, b, ell, r_over_a):\n \"\"\"\n Calculate the normalized irradiance in a step-index fiber.\n\n The normalization is done such that\n integral_over_space/(area of core) = 1\n or\n 2*np.trapz(LP(r_over_a)*r_over_a, r_over_a) =1\n\n Args:\n V: V-parameter for fiber [-]\n b: normalized propagation constant [-]\n ell: desired fiber mode [-]\n r_over_a: (radial position)/(core radius) [-]\n Returns:\n normalized irradiance at points r_over_a [-]\n \"\"\"\n field = LP_radial_field(V, b, ell, r_over_a)\n return field**2\n\n\ndef gaussian_envelope_Omega(V):\n \"\"\"\n Calculate the normalized irradiance in a step-index fiber.\n\n The normalization is done assuming\n the Gaussian envelope approximation for the LP_01 mode.\n\n Args:\n V: V-parameter for fiber [-]\n Returns:\n Omega_over_core_radius [-]\n \"\"\"\n b = LP_mode_value(V, 0, 1)\n U = V * np.sqrt(1 - b)\n W = V * np.sqrt(b)\n Omega_over_a = jn(0, U) * V/U * kn(1, W)/kn(0, W)\n return Omega_over_a\n\n\ndef gaussian_radial_irradiance(V, r_over_a):\n \"\"\"\n Calculate the normalized irradiance in a step-index fiber.\n\n The normalization is done assuming\n the Gaussian envelope approximation for the LP_01 mode. The result\n is normalized such that\n np.trapz(Gaussian(r_over_a)*r_over_a, r_over_a) = 1/2\n\n Args:\n V: V-parameter for fiber [-]\n r_over_a: (radial position)/(core radius) [-]\n Returns:\n normalized irradiance at points r_over_a [-]\n \"\"\"\n Omega_over_a = gaussian_envelope_Omega(V)\n return 1/Omega_over_a**2 * np.exp(-r_over_a**2/Omega_over_a**2)\n\n\ndef transverse_misalignment_loss_db(w1, w2, u):\n \"\"\"\n Calculate the loss due to transverse fiber misalignment.\n\n See Ghatak eqn 8.69\n\n Args:\n w1: mode field radius of first fiber [m]\n w2: mode field radius of second fiber [m]\n u: transverse misalignment [m]\n Returns:\n transverse misalignment loss in dB [-]\n \"\"\"\n sq = w1**2 + w2**2\n loss = (2 * w1 * w2 / sq)**2 * np.exp(-2 * u**2 / sq)\n return -10 * np.log10(loss)\n\n\ndef angular_misalignment_loss_db(n, w, theta, lambda0):\n \"\"\"\n Calculate the loss due to angular fiber misalignment.\n\n See Ghatak eqn 8.75\n\n Args:\n n: index between fiber ends [-]\n w: mode field radius [m]\n theta: angular misalignment [radians]\n lambda0: wavelength in vacuum [m]\n Returns:\n angular misalignment loss in dB [-]\n \"\"\"\n return 4.34 * (np.pi * w * theta * n / lambda0)**2\n\n\ndef longitudinal_misalignment_loss_db(n1, w, D, lambda0):\n \"\"\"\n Calculate the loss due to longitudinal fiber misalignment.\n\n See Ghatak eqn 8.81\n\n Args:\n n: index between fiber ends [-]\n w: mode field radius [m]\n D: longitudinal fiber separation [m]\n lambda0: wavelength in vacuum [m]\n Returns:\n longitudinal misalignment loss dB [-]\n \"\"\"\n dhat = D * lambda0 / (2 * np.pi * n1 * w**2)\n return 10 * np.log10(1 + dhat**2)\n\n\ndef _bending_loss_db_scalar(n1, Delta, a, Rc, lambda0):\n \"\"\"\n Calculate the bending loss in dB/m.\n\n The bending loss is given by eqn 10.29 in Ghatak. This private method\n only works for scalar values.\n\n Args:\n a: core radius [m]\n n1: core index [-]\n Delta: refractive index difference [-]\n Rc: radius of curvature in [m]\n lambda0: wavelength in vacuum in [m]\n Returns:\n bending loss in dB/m [1/m]\n \"\"\"\n k0 = 2 * np.pi / lambda0\n V = k0 * a * n1 * np.sqrt(2 * Delta)\n b = LP_mode_value(V, 0, 1)\n if b is None:\n return np.nan\n U = V * np.sqrt(1 - b)\n W = V * np.sqrt(b)\n val = 4.343 * np.sqrt(np.pi / 4 / a / Rc)\n val *= (U / V / kn(1, W))**2\n val *= W**-1.5\n val *= np.exp(-2 * W**3 * Rc / 3 / k0**2 / a**3 / n1**2)\n return val\n\n\ndef bending_loss_db(n1, Delta, a, Rc, lambda0):\n \"\"\"\n Calculate the bending loss in dB/m.\n\n This is a convenience method that works when a is an array.\n\n Args:\n a: core radius [m]\n n1: core index [-]\n Delta: refractive index difference [-]\n Rc: radius of curvature in [m]\n lambda0: wavelength in vacuum in [m]\n Returns:\n bending loss in dB/m [1/m]\n \"\"\"\n if np.isscalar(a):\n alpha = _bending_loss_db_scalar(n1, Delta, a, Rc, lambda0)\n else:\n alpha = np.empty_like(a)\n for i, aa in enumerate(a):\n alpha[i] = _bending_loss_db_scalar(n1, Delta, aa, Rc, lambda0)\n return alpha\n\n\ndef MFR(V):\n \"\"\"\n Approximate the mode field radius for a step-index single mode fiber.\n\n The approximation is fairly accurate for V>1. In the multimode range\n (V > 2.405), it applies to the fundamental mode.\n\n D. Marcuse, \"Loss analysis of single-mode fiber splices\", Bell Syst.\n Tech. J., 56, 703 (1977)\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n approximate mode field radius normalized by the core radius [--]\n \"\"\"\n return 0.65 + 1.619 * V**-1.5 + 2.879 * V**-6\n\n\ndef MFD(V):\n \"\"\"\n Approximate the mode field diameter for a step-index single mode fiber.\n\n See MFR() for details.\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n approximate mode field diameter normalized by the core radius [--]\n \"\"\"\n return 2 * MFR(V)\n\n\ndef _PetermannW_scalar(V):\n \"\"\"\n Calculate the Petermann-2 radius for a step-index fiber.\n\n This private method only works when V is a scalar.\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n approximate Petermann-2 radius normalized by core radius [--]\n \"\"\"\n b = LP_mode_value(V, 0, 1)\n if b is None:\n return np.nan\n U = V * np.sqrt(1 - b)\n W = V * np.sqrt(b)\n denom = W * jn(0, U)\n return np.sqrt(2) * jn(1, U) / denom\n\n\ndef PetermannW(V):\n \"\"\"\n Calculate the Petermann-2 radius for a step-index fiber.\n\n This is a convenience function that works when V is an array.\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n approximate Petermann-2 radius normalized by core radius [--]\n \"\"\"\n if np.isscalar(V):\n wp = _PetermannW_scalar(V)\n else:\n wp = np.empty_like(V)\n for i, VV in enumerate(V):\n wp[i] = _PetermannW_scalar(VV)\n return wp\n\n\ndef PetermannW_Approx(V):\n \"\"\"\n Approximate the Petermann-2 radius for a step-index fiber.\n\n The approximation is valid for single mode fibers (1.5<V<2.5). The result\n is the ratio of the Petermann-2 radius to the core radius.\n\n C. D. Hussey and F. Martinez, “Approximate analytical forms for\n the propagation characteristics of single-mode optical fibres”,\n Electron. Lett. 21, 1103 (1985).\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n approximate Petermann-2 radius normalized by core radius [--]\n \"\"\"\n return MFR(V) - 0.016 - 1.567 * V**-7\n\n\ndef _V_d2bV_by_V_scalar(V, ell):\n \"\"\"\n Calculate V*d^2(bV)/dV^2 for mode ell of a step-index fiber.\n\n This private function only works for scalar values of V and ell. It\n finds V*d^2(bV)/dV^2 for mode ell of a step-index fiber using eqn 10.14\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n V*d^2(bV)/dV^2 [--]\n \"\"\"\n b = LP_mode_value(V, ell, 1)\n if b is None:\n return 0\n\n U = V * np.sqrt(1 - b)\n W = V * np.sqrt(b)\n\n kappa_ell = kn(ell, W)**2 / kn(ell - 1, W)\n kappa_ell /= kn(ell + 1, W)\n summ = 3 * W**2 - 2 * kappa_ell * (W**2 - U**2)\n val = W * (W**2 + U**2 * kappa_ell) * (kappa_ell - 1)\n val *= (kn(ell - 1, W) + kn(ell + 1, W))\n val /= kn(ell, W)\n summ += val\n return 2 * U**2 * kappa_ell / V**2 / W**2 * summ\n\n\ndef V_d2bV_by_V(V, ell):\n \"\"\"\n Calculate V*d^2(bV)/dV^2 for mode ell of a step-index fiber.\n\n This value is needed to determine the waveguide dispersion. This\n routine is a convenience function that works when V is an array.\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n V*d^2(bV)/dV^2 [--]\n \"\"\"\n if np.isscalar(V):\n return _V_d2bV_by_V_scalar(V, ell)\n\n v_by_v = np.empty_like(V)\n for i, VV in enumerate(V):\n v_by_v[i] = _V_d2bV_by_V_scalar(VV, ell)\n\n return v_by_v\n\n\ndef V_d2bV_by_V_Approx(V):\n \"\"\"\n Approximate V*d^2(bV)/dV^2 for single mode fiber.\n\n This value is needed to determine the waveguide dispersion. This\n approximation is for the fundamental mode in the fiber and is good\n to 1% when 1.4<V<2.4. Approximation by Marcuse (1979)\n\n Args:\n V: V-parameter of the fiber [--]\n Returns:\n V*d^2(bV)/dV^2 [--]\n \"\"\"\n return 0.080 + 0.549 * (2.834 - V)**2\n"
] | [
[
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.xlim",
"numpy.exp",
"numpy.where",
"scipy.optimize.brentq",
"scipy.special.jn",
"numpy.sqrt",
"numpy.append",
"numpy.log10",
"numpy.empty_like",
"numpy.array",
"scipy.special.jn_zeros",
"scipy.special.kn",
"numpy.place",
"matplotlib.pyplot.title",
"numpy.isscalar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.linspace"
]
] |
ThibHlln/unifhy | [
"4105932ed7dfec34d428c1f2d2f85ec25ea522ed"
] | [
"tests/tests/test_utils/test_record.py"
] | [
"import numpy as np\nfrom netCDF4 import Dataset\n\nimport unifhy\nfrom ..test_time import get_dummy_output_time_and_bounds\nfrom ..test_component import time_resolutions\n\n# expected raw values for states/transfers/outputs after main run\n# (null initial conditions, no spinup run, 12-day period)\nexp_records_raw = {\n 'same_t': {\n 'surfacelayer': {\n 'state_a': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n 'state_b': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32],\n 'transfer_i': [6, 10, 20, 40, 56, 84, 132, 172, 236, 340, 428, 564, 780, 964, 1244, 1684],\n 'transfer_j': [8, 12, 15, 42, 57, 69, 153, 201, 240, 495, 642, 762, 1530, 1974, 2337, 4644],\n 'output_x': [5, 4, 27, 38, 46, 126, 170, 205, 456, 599, 715, 1479, 1919, 2278, 4581, 5912]\n },\n 'subsurface': {\n 'state_a': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n 'state_b': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32],\n 'transfer_k': [2, 3, 28, 41, 51, 133, 179, 216, 469, 614, 732, 1498, 1940, 2301, 4606, 5939],\n 'transfer_m': [3, 11, 17, 29, 51, 69, 99, 149, 191, 257, 363, 453, 591, 809, 995, 1277],\n 'output_x': [0, -1, 22, 33, 41, 121, 165, 200, 451, 594, 710, 1474, 1914, 2273, 4576, 5907]\n },\n 'openwater': {\n 'state_a': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n 'transfer_l': [1, 8, 25, 38, 63, 108, 145, 206, 307, 392, 525, 738, 919, 1196, 1633, 2006],\n 'transfer_n': [0, 24, 36, 45, 126, 171, 207, 459, 603, 720, 1485, 1926, 2286, 4590, 5922, 7011],\n 'transfer_o': [3, 11, 15, 18, 45, 60, 72, 156, 204, 243, 498, 645, 765, 1533, 1977, 2340],\n 'output_x': [3, 27, 39, 48, 129, 174, 210, 462, 606, 723, 1488, 1929, 2289, 4593, 5925, 7014],\n 'output_y': [-1, 4, 19, 30, 53, 96, 131, 190, 289, 372, 503, 714, 893, 1168, 1603, 1974]\n }\n },\n 'diff_t': {\n 'surfacelayer': {\n 'state_a': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n 'state_b': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32],\n 'transfer_i': [6, 9, 13, 16, 20, 23, 47, 50, 54, 57, 104, 107, 111, 114, 201.5, 204.5],\n 'transfer_j': [8, 10, 12, 14, 24, 26, 28, 30, 72, 74, 76, 78, 153, 155, 157, 159],\n 'output_x': [5, 4, 15, 14, 34, 33, 56, 55, 78, 77, 148, 147, 218, 217, 337.5, 336.5]\n },\n 'subsurface': {\n 'state_a': [1, 2, 3, 4],\n 'state_b': [2, 4, 6, 8],\n 'transfer_k': [8, 48, 121, 290.75],\n 'transfer_m': [10, 31.5, 73.25, 142.375],\n 'output_x': [6, 44, 115, 282.75]\n },\n 'openwater': {\n 'state_a': [1, 2, 3, 4, 5, 6, 7, 8],\n 'transfer_l': [1, 2, 23, 24, 68, 69, 153.5, 154.5],\n 'transfer_n': [12, 33, 57, 81, 153, 225, 346.5, 468],\n 'transfer_o': [7, 14, 22, 30, 54, 78, 118.5, 159],\n 'output_x': [15, 36, 60, 84, 156, 228, 349.5, 471],\n 'output_y': [-1, -2, 17, 16, 58, 57, 139.5, 138.5]\n }\n }\n}\n\n\ndef aggregate_raw_record(values, method, slice_):\n length = len(values)\n\n if method == 'sum':\n result = [sum(values[slice_ * i:(slice_ * (i + 1))])\n for i in range(0, length // slice_)]\n elif method == 'mean':\n result = [sum(values[slice_ * i:(slice_ * (i + 1))]) / slice_\n for i in range(0, length // slice_)]\n elif method == 'minimum':\n result = [min(values[slice_ * i:(slice_ * (i + 1))])\n for i in range(0, length // slice_)]\n elif method == 'maximum':\n result = [max(values[slice_ * i:(slice_ * (i + 1))])\n for i in range(0, length // slice_)]\n else: # method == 'point'\n result = [values[slice_ * (i + 1) - 1]\n for i in range(0, length // slice_)]\n\n return np.array(result)\n\n\ndef get_expected_record(time_, component, name, delta, method):\n category = component.category\n\n # map to default for alias methods\n method = unifhy._utils.record._methods_map[method]\n\n # aggregate raw record using method and relevant slices\n expected_record = aggregate_raw_record(\n exp_records_raw[time_][category][name],\n method,\n delta // component.timedomain.timedelta\n )\n\n # get expected temporal dimensions\n expected_time, expected_bounds = get_dummy_output_time_and_bounds(\n time_resolutions[category][time_], delta\n )\n\n return expected_time, expected_bounds, expected_record\n\n\ndef get_produced_record(component, name, delta, method):\n rtol, atol = unifhy.rtol(), unifhy.atol()\n\n # map to default for alias methods\n method = unifhy._utils.record._methods_map[method]\n\n # load record from stream file\n with Dataset(component._record_streams[delta].file, 'r') as f:\n values = f.variables['_'.join([name, method])][:]\n time = f.variables['time'][:]\n bounds = f.variables['time_bounds'][:]\n\n # check that array is homogeneous (i.e. min = max)\n axis = tuple(range(1, values.ndim))\n min_ = np.amin(values, axis=axis)\n max_ = np.amax(values, axis=axis)\n np.testing.assert_allclose(min_, max_, atol, rtol)\n\n return time, bounds, min_\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.amax",
"numpy.amin"
]
] |
SwamyDev/gym-quickcheck | [
"d271f509c11998c9f210c5d8131906f712553123"
] | [
"tests/test_alternation.py"
] | [
"import numpy as np\nimport pytest\nfrom gym import utils\nfrom more_itertools import last\nfrom pytest import approx\n\nfrom gym_quickcheck.envs.alteration_env import AlternationEnv\nfrom tests.aux import assert_that, follows_contract, assert_obs_eq, unpack_reward, unpack_obs, until_done, \\\n run_example\n\n\[email protected]\ndef env():\n return AlternationEnv()\n\n\[email protected]\ndef obs_shape(env):\n return env.observation_space.shape\n\n\[email protected]\ndef make_observation(obs_shape, make_observation_of):\n def obs_fac(agent_pos):\n return make_observation_of(obs_shape, agent_pos)\n\n return obs_fac\n\n\[email protected]\ndef sample_reset(env, obs_shape):\n def sample_reset_func(n):\n total = np.zeros(obs_shape)\n for _ in range(n):\n total += env.reset()\n return total / n\n\n return sample_reset_func\n\n\[email protected]\ndef alternate_right_left():\n direction = 0\n\n def alternate():\n nonlocal direction\n direction = (direction + 1) % 2\n return direction\n\n return alternate\n\n\[email protected]\ndef make_state_string():\n def state_fac(agent_pos, reward=None):\n s = ['#', '#']\n if reward is None:\n color = 'gray'\n elif reward:\n color = 'green'\n else:\n color = 'red'\n s[agent_pos] = utils.colorize(s[agent_pos], color=color, highlight=True)\n return \"\".join(s)\n\n return state_fac\n\n\ndef test_adherence_to_gym_contract(env, gym_interface, gym_properties):\n assert_that(env, follows_contract(gym_interface, gym_properties))\n\n\ndef test_agent_starts_randomly_left_or_right(sample_reset):\n avg_obs = sample_reset(10000)\n assert left(avg_obs) == approx(0.5, rel=0.1) and right(avg_obs) == approx(0.5, rel=0.1)\n\n\ndef left(obs):\n return obs[0]\n\n\ndef right(obs):\n return obs[1]\n\n\ndef test_alternate_the_agent_position(env, make_observation):\n force_reset(env, left)\n assert_obs_eq(unpack_obs(env.step(go_right())), make_observation(agent_pos=1))\n force_reset(env, right)\n assert_obs_eq(unpack_obs(env.step(go_left())), make_observation(agent_pos=0))\n\n\ndef force_reset(env, pos):\n obs = env.reset()\n while pos(obs) != 1:\n obs = env.reset()\n return obs\n\n\ndef go_right():\n return 0\n\n\ndef go_left():\n return 1\n\n\ndef test_not_alternating_does_not_change_agent_position(env, make_observation):\n force_reset(env, left)\n assert_obs_eq(unpack_obs(env.step(go_left())), make_observation(agent_pos=0))\n force_reset(env, right)\n assert_obs_eq(unpack_obs(env.step(go_right())), make_observation(agent_pos=1))\n\n\ndef test_environment_is_done_after_episode_length_is_reached(env):\n env.reset()\n assert sum(1 for _ in until_done(env, 0)) == env.len_episode\n\n\ndef test_alternating_position_gives_reward(env, alternate_right_left):\n force_reset(env, left)\n total_reward = sum(unpack_reward(t) for t in until_done(env, alternate_right_left))\n assert total_reward == approx(env.reward_range[1], rel=env.reward.std * 6)\n\n\ndef test_keeping_at_the_same_position_causes_penalties(env):\n force_reset(env, left)\n total_penalty = sum(unpack_reward(t) for t in until_done(env, go_left()))\n assert total_penalty == approx(env.reward_range[0], rel=env.penalty.std * 6)\n\n\ndef test_resetting_environment(env):\n env.reset()\n all(_ for _ in until_done(env, go_left()))\n env.reset()\n assert sum(1 for _ in until_done(env, 0)) == env.len_episode\n\n\ndef test_on_average_random_agent_performs_poorly(env, sample_average_reward):\n assert sample_average_reward(env, 100) <= env.reward_range[1] * 0.5\n\n\ndef test_render_writes_current_state_to_stdout(env, make_state_string, capstdout):\n force_reset(env, left)\n env.render()\n assert capstdout.read() == \"\\n\" + make_state_string(agent_pos=0) + \"\\n\"\n env.step(0)\n env.render()\n assert capstdout.read() == \"(Right)\\n\" + make_state_string(agent_pos=1, reward=True) + \"\\n\"\n env.step(0)\n env.render()\n assert capstdout.read() == \"(Right)\\n\" + make_state_string(agent_pos=1, reward=False) + \"\\n\"\n env.step(1)\n env.render()\n assert capstdout.read() == \"(Left)\\n\" + make_state_string(agent_pos=0, reward=True) + \"\\n\"\n\n\ndef test_alternation_example(request, capstdout):\n example = request.session.fspath / \"examples/alternation.py\"\n lines = run_example(example)\n assert \"Observation: \" in last(lines)\n"
] | [
[
"numpy.zeros"
]
] |
wjjmjh/cogent3 | [
"e10f4f933921d52b000096b7c016190a1602add6"
] | [
"src/cogent3/recalculation/calculation.py"
] | [
"#!/usr/bin/env python\n\nimport os\nimport time\nimport warnings\n\nimport numpy\n\nfrom cogent3.maths.optimisers import ParameterOutOfBoundsError, maximise\nfrom cogent3.maths.solve import find_root\n\n\nFloat = numpy.core.numerictypes.sctype2char(float)\n\n\nTRACE_DEFAULT = \"COGENT3_TRACE\" in os.environ\nTRACE_SCALE = 100000\n\n__author__ = \"Peter Maxwell\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Peter Maxwell\", \"Gavin Huttley\", \"Daniel McDonald\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.6.30a\"\n__maintainer__ = \"Peter Maxwell\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n# This is the 'live' layer of the recalculation system\n# Cells and OptPars are held by a Calculator\n# For docstring see definitions.py\n\n\nclass CalculationInterupted(Exception):\n pass\n\n\nclass OptPar(object):\n \"\"\"One parameter, as seen by the optimiser, eg: length of one edge.\n An OptPar reports changes to the ParameterValueSet for its parameter.\n \"\"\"\n\n is_constant = False\n recycled = False\n args = ()\n # Use of __slots__ here and in Cell gives 8% speedup on small calculators.\n __slots__ = [\n \"clients\",\n \"client_ranks\",\n \"name\",\n \"lower\",\n \"default_value\",\n \"upper\",\n \"scope\",\n \"order\",\n \"label\",\n \"consequences\",\n \"rank\",\n ]\n\n def __init__(self, name, scope, bounds):\n self.clients = []\n self.client_ranks = []\n self.name = name\n for (attr, v) in zip([\"lower\", \"default_value\", \"upper\"], bounds):\n setattr(self, attr, float(v))\n\n # controls order in optimiser - group for LF\n self.scope = scope\n self.order = (len(scope), scope and min(scope), name)\n self.label = self.name\n\n def add_client(self, client):\n self.clients.append(client)\n\n def __lt__(self, other):\n # optimisation is more efficient if params for one edge are neighbours\n return self.order < other.order\n\n def __eq__(self, other):\n # optimisation is more efficient if params for one edge are neighbours\n return self.order == other.order\n\n def __ne__(self, other):\n # optimisation is more efficient if params for one edge are neighbours\n return self.order != other.order\n\n def __repr__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.label)\n\n def get_optimiser_bounds(self):\n lower = self.transform_to_optimiser(self.lower)\n upper = self.transform_to_optimiser(self.upper)\n return (lower, upper)\n\n def transform_from_optimiser(self, value):\n return value\n\n def transform_to_optimiser(self, value):\n return value\n\n\nclass LogOptPar(OptPar):\n # For ratios, optimiser sees log(param value). Conversions to/from\n # optimiser representation are only done by Calculator.change(),\n # .get_value_array() and .getBoundsArrrays().\n\n def transform_from_optimiser(self, value):\n return numpy.exp(value)\n\n def transform_to_optimiser(self, value):\n try:\n return numpy.log(value)\n except OverflowError:\n raise OverflowError(\"log(%s)\" % value)\n\n\nclass EvaluatedCell(object):\n __slots__ = [\n \"client_ranks\",\n \"rank\",\n \"calc\",\n \"args\",\n \"is_constant\",\n \"clients\",\n \"failure_count\",\n \"name\",\n \"arg_ranks\",\n \"consequences\",\n \"recycled\",\n \"default\",\n ]\n\n def __init__(self, name, calc, args, recycling=None, default=None):\n self.name = name\n self.rank = None\n self.calc = calc\n self.default = default\n self.args = tuple(args)\n\n self.recycled = recycling\n if recycling:\n self.args = (self,) + self.args\n\n self.is_constant = True\n for arg in args:\n arg.add_client(self)\n if not arg.is_constant:\n self.is_constant = False\n\n self.clients = []\n self.client_ranks = []\n self.failure_count = 0\n\n def add_client(self, client):\n self.clients.append(client)\n\n def update(self, data):\n data[self.rank] = self.calc(*[data[arg_rank] for arg_rank in self.arg_ranks])\n\n def prime(self, data_sets):\n if self.is_constant:\n # Just calc once\n self.update(data_sets[0])\n for data in data_sets[1:]:\n data[self.rank] = data_sets[0][self.rank]\n else:\n for data in data_sets:\n self.update(data)\n\n def report_error(self, detail, data):\n self.failure_count += 1\n if self.failure_count <= 5:\n print((\"%s in calculating %s:\", detail.__class__.__name__, self.name))\n if self.failure_count == 5:\n print(\"Additional failures of this type will not be reported.\")\n if self.failure_count < 2:\n print(\"%s inputs were:\", len(self.arg_ranks))\n for (i, arg) in enumerate(self.arg_ranks):\n print(\"%s: \" % i + repr(data[arg]))\n\n\nclass ConstCell(object):\n __slots__ = [\"name\", \"scope\", \"value\", \"rank\", \"consequences\", \"clients\"]\n\n recycled = False\n is_constant = True\n args = ()\n\n def __init__(self, name, value):\n self.name = name\n self.clients = []\n self.value = value\n\n def add_client(self, client):\n self.clients.append(client)\n\n\nclass Calculator(object):\n \"\"\"A complete hierarchical function with N evaluation steps to call\n for each change of inputs. Made by a ParameterController.\"\"\"\n\n def __init__(self, cells, defns, trace=None, with_undo=True):\n if trace is None:\n trace = TRACE_DEFAULT\n self.with_undo = with_undo\n self.results_by_id = defns\n self.opt_pars = []\n other_cells = []\n for cell in cells:\n if isinstance(cell, OptPar):\n self.opt_pars.append(cell)\n else:\n other_cells.append(cell)\n self._cells = self.opt_pars + other_cells\n data_sets = [[0], [0, 1]][self.with_undo]\n self.cell_values = [[None] * len(self._cells) for switch in data_sets]\n self.arg_ranks = [[] for cell in self._cells]\n for (i, cell) in enumerate(self._cells):\n cell.rank = i\n cell.consequences = {}\n if isinstance(cell, OptPar):\n for switch in data_sets:\n self.cell_values[switch][i] = cell.default_value\n elif isinstance(cell, ConstCell):\n for switch in data_sets:\n self.cell_values[switch][i] = cell.value\n elif isinstance(cell, EvaluatedCell):\n cell.arg_ranks = []\n for arg in cell.args:\n if hasattr(arg, \"client_ranks\"):\n arg.client_ranks.append(i)\n self.arg_ranks[i].append(arg.rank)\n cell.arg_ranks.append(arg.rank)\n\n try:\n cell.prime(self.cell_values)\n except KeyboardInterrupt:\n raise\n except Exception as detail:\n print((\"Failed initial calculation of %s\" % cell.name))\n raise\n else:\n raise RuntimeError(\"Unexpected Cell type %s\" % type(cell))\n\n self._switch = 0\n self.recycled_cells = [cell.rank for cell in self._cells if cell.recycled]\n self.spare = [None] * len(self._cells)\n\n for cell in self._cells[::-1]:\n for arg in cell.args:\n arg.consequences[cell.rank] = True\n arg.consequences.update(cell.consequences)\n\n self._programs = {}\n # Just for timings pre-calc these\n for opt_par in self.opt_pars:\n self.cells_changed_by([(opt_par.rank, None)])\n\n self.last_values = self.get_value_array()\n self.last_undo = []\n self.elapsed_time = 0.0\n self.evaluations = 0\n self.set_tracing(trace)\n self.optimised = False\n\n def graphviz(self):\n \"\"\"Returns a string in the 'dot' graph description language used by the\n program 'Graphviz'. One box per cell, grouped by Defn.\"\"\"\n\n lines = [\"digraph G {\\n rankdir = LR\\n ranksep = 1\\n\"]\n evs = []\n for cell in self._cells:\n if cell.name not in evs:\n evs.append(cell.name)\n nodes = dict([(name, []) for name in evs])\n edges = []\n for cell in self._cells:\n if hasattr(cell, \"name\"):\n nodes[cell.name].append(cell)\n for arg in cell.args:\n if arg is not cell:\n edges.append(\n '\"%s\":%s -> \"%s\":%s'\n % (arg.name, arg.rank, cell.name, cell.rank)\n )\n for name in evs:\n all_const = True\n some_const = False\n enodes = [name.replace(\"edge\", \"QQQ\")]\n for cell in nodes[name]:\n value = self._get_current_cell_value(cell)\n if isinstance(value, float):\n label = \"%5.2e\" % value\n else:\n label = \"[]\"\n label = \"<%s> %s\" % (cell.rank, label)\n enodes.append(label)\n all_const = all_const and cell.is_constant\n some_const = some_const or cell.is_constant\n enodes = \"|\".join(enodes)\n colour = [\"\", \" fillcolor=gray90, style=filled,\"][some_const]\n colour = [colour, \" fillcolor=gray, style=filled,\"][all_const]\n lines.append(\n '\"%s\" [shape = \"record\",%s label=\"%s\"];' % (name, colour, enodes)\n )\n lines.extend(edges)\n lines.append(\"}\")\n return \"\\n\".join(lines).replace(\"edge\", \"egde\").replace(\"QQQ\", \"edge\")\n\n def optimise(self, **kw):\n x = self.get_value_array()\n bounds = self.get_bounds_vectors()\n maximise(self, x, bounds, **kw)\n self.optimised = True\n\n def set_tracing(self, trace=False):\n \"\"\"With 'trace' true every evaluated is printed. Useful for profiling\n and debugging.\"\"\"\n\n self.trace = trace\n if trace:\n print()\n n_opars = len(self.opt_pars)\n n_cells = len([c for c in self._cells if not c.is_constant])\n print(n_opars, \"OptPars and\", n_cells - n_opars, \"derived values\")\n print(\"OptPars: \", \", \".join([par.name for par in self.opt_pars]))\n print(\"Times in 1/%sths of a second\" % TRACE_SCALE)\n\n groups = []\n groupd = {}\n for cell in self._cells:\n if cell.is_constant or not isinstance(cell, EvaluatedCell):\n continue\n if cell.name not in groupd:\n group = []\n groups.append((cell.name, group))\n groupd[cell.name] = group\n groupd[cell.name].append(cell)\n\n widths = []\n for (name, cells) in groups:\n width = 4 + len(cells)\n widths.append(min(15, width))\n self._cellsGroupedForDisplay = list(zip(groups, widths))\n for ((name, cells), width) in self._cellsGroupedForDisplay:\n print(name[:width].ljust(width), \"|\", end=\" \")\n print()\n for width in widths:\n print(\"-\" * width, \"|\", end=\" \")\n print()\n\n def get_value_array(self):\n \"\"\"This being a caching function, you can ask it for its current\n input! Handy for initialising the optimiser.\"\"\"\n values = [\n p.transform_to_optimiser(self._get_current_cell_value(p))\n for p in self.opt_pars\n ]\n return values\n\n # get_bounds_vectors and testoptparvector make up the old LikelihoodFunction\n # interface expected by the optimiser.\n\n def get_bounds_vectors(self):\n \"\"\"2 arrays: minimums, maximums\"\"\"\n lower = numpy.zeros([len(self.opt_pars)], Float)\n upper = numpy.zeros([len(self.opt_pars)], Float)\n for (i, opt_par) in enumerate(self.opt_pars):\n (lb, ub) = opt_par.get_optimiser_bounds()\n lower[i] = lb\n upper[i] = ub\n return (lower, upper)\n\n def fuzz(self, random_series=None, seed=None):\n # Slight randomisation suitable for removing right-on-the-\n # ridge starting points before local optimisation.\n if random_series is None:\n import random\n\n random_series = random.Random()\n if seed is not None:\n random_series.seed(seed)\n X = self.get_value_array()\n for (i, (l, u)) in enumerate(zip(*self.get_bounds_vectors())):\n sign = random_series.choice([-1, +1])\n step = random_series.uniform(+0.05, +0.025)\n X[i] = max(l, min(u, (1.0 + sign * step * X[i])))\n self.testoptparvector(X)\n self.optimised = False\n\n def testoptparvector(self, values):\n \"\"\"AKA self(). Called by optimisers. Returns the output value\n after doing any recalculation required for the new input 'values'\n array\"\"\"\n\n assert len(values) == len(self.opt_pars)\n changes = [\n (i, new)\n for (i, (old, new)) in enumerate(zip(self.last_values, values))\n if old != new\n ]\n return self.change(changes)\n\n __call__ = testoptparvector\n\n def testfunction(self):\n \"\"\"Return the current output value without changing any inputs\"\"\"\n return self._get_current_cell_value(self._cells[-1])\n\n def change(self, changes):\n \"\"\"Returns the output value after applying 'changes', a list of\n (optimisable_parameter_ordinal, new_value) tuples.\"\"\"\n\n t0 = time.time()\n self.evaluations += 1\n\n # If ALL of the changes made in the last step are reversed in this step\n # then it is safe to undo them first, taking advantage of the 1-deep\n # cache.\n if self.with_undo and self.last_undo:\n for (i, v) in self.last_undo:\n if (i, v) not in changes:\n break\n else:\n changes = [ch for ch in changes if ch not in self.last_undo]\n self._switch = not self._switch\n for (i, v) in self.last_undo:\n self.last_values[i] = v\n\n self.last_undo = []\n program = self.cells_changed_by(changes)\n\n if self.with_undo:\n self._switch = not self._switch\n data = self.cell_values[self._switch]\n base = self.cell_values[not self._switch]\n\n # recycle and undo interact in bad ways\n for rank in self.recycled_cells:\n if data[rank] is not base[rank]:\n self.spare[rank] = data[rank]\n data[:] = base[:]\n for cell in program:\n if cell.recycled:\n if data[cell.rank] is base[cell.rank]:\n data[cell.rank] = self.spare[cell.rank]\n assert data[cell.rank] is not base[cell.rank]\n else:\n data = self.cell_values[self._switch]\n\n # Set new OptPar values\n changed_optpars = []\n for (i, v) in changes:\n if i < len(self.opt_pars):\n assert isinstance(v * 1.0, float), v\n changed_optpars.append((i, self.last_values[i]))\n self.last_values[i] = v\n data[i] = self.opt_pars[i].transform_from_optimiser(v)\n else:\n data[i] = v\n\n try:\n if self.trace:\n self.tracing_update(changes, program, data)\n else:\n self.plain_update(program, data)\n\n # if non-optimiser parameter was set then undo is invalid\n if self.last_undo and max(self.last_undo)[0] >= len(self.opt_pars):\n self.last_undo = []\n else:\n self.last_undo = changed_optpars\n\n except CalculationInterupted as detail:\n if self.with_undo:\n self._switch = not self._switch\n for (i, v) in changed_optpars:\n self.last_values[i] = v\n self.last_undo = []\n (cell, exception) = detail.args\n raise exception\n\n finally:\n self.elapsed_time += time.time() - t0\n\n return self.cell_values[self._switch][-1]\n\n def cells_changed_by(self, changes):\n # What OptPars have been changed determines cells to update\n change_key = list(dict(changes).keys())\n change_key.sort()\n change_key = tuple(change_key)\n if change_key in self._programs:\n program = self._programs[change_key]\n else:\n # Make a list of the cells to update and cache it.\n consequences = {}\n for i in change_key:\n consequences.update(self._cells[i].consequences)\n self._programs[change_key] = program = [\n cell for cell in self._cells if cell.rank in consequences\n ]\n return program\n\n def plain_update(self, program, data):\n try:\n for cell in program:\n data[cell.rank] = cell.calc(*[data[a] for a in cell.arg_ranks])\n except ParameterOutOfBoundsError as detail:\n # Non-fatal error, just cancel this calculation.\n raise CalculationInterupted(cell, detail)\n except ArithmeticError as detail:\n # Non-fatal but unexpected error. Warn and cancel this calculation.\n cell.report_error(detail, data)\n raise CalculationInterupted(cell, detail)\n\n def tracing_update(self, changes, program, data):\n # Does the same thing as plain_update, but also produces lots of\n # output showing how long each step of the calculation takes.\n # One line per call, '-' for undo, '+' for calculation\n\n exception = None\n elapsed = {}\n for cell in program:\n try:\n t0 = time.time()\n data[cell.rank] = cell.calc(*[data[a] for a in cell.arg_ranks])\n t1 = time.time()\n except (ParameterOutOfBoundsError, ArithmeticError) as exception:\n error_cell = cell\n break\n elapsed[cell.rank] = t1 - t0\n\n tds = []\n for ((name, cells), width) in self._cellsGroupedForDisplay:\n text = \"\".join([\" +\"[cell.rank in elapsed] for cell in cells])\n elap = sum([elapsed.get(cell.rank, 0) for cell in cells])\n if len(text) > width - 4:\n edge_width = min(len(text), (width - 4 - 3)) // 2\n elipsis = [\" \", \"...\"][not not text.strip()]\n text = text[:edge_width] + elipsis + text[-edge_width:]\n tds.append(\"%s%4s\" % (text, int(TRACE_SCALE * elap + 0.5) or \"\"))\n\n par_descs = []\n for (i, v) in changes:\n cell = self._cells[i]\n if isinstance(cell, OptPar):\n par_descs.append(\"%s=%8.6f\" % (cell.name, v))\n else:\n par_descs.append(\"%s=?\" % cell.name)\n par_descs = \", \".join(par_descs)[:22].ljust(22)\n print(\" | \".join(tds + [\"\"]), end=\" \")\n if exception:\n print(\"%15s | %s\" % (\"\", par_descs))\n error_cell.report_error(exception, data)\n raise CalculationInterupted(cell, exception)\n else:\n print(\"%-15s | %s\" % (repr(data[-1])[:15], par_descs))\n\n def measure_evals_per_second(self, time_limit=1.0, wall=True, sa=False):\n # Returns an estimate of the number of evaluations per second\n # an each-optpar-in-turn simulated annealing type optimiser\n # can achive, spending not much more than 'time_limit' doing\n # so. 'wall'=False causes process time to be used instead of\n # wall time.\n # 'sa' makes it simulated-annealing-like, with frequent backtracks\n if wall:\n now = time.time\n else:\n now = time.clock\n x = self.get_value_array()\n samples = []\n elapsed = 0.0\n rounds_per_sample = 2\n while elapsed < time_limit and len(samples) < 5:\n time.sleep(0.01)\n t0 = now()\n last = []\n for j in range(rounds_per_sample):\n for (i, v) in enumerate(x):\n # Not a real change, but works like one.\n self.change(last + [(i, v)])\n if sa and (i + j) % 2:\n last = [(i, v)]\n else:\n last = []\n # Use one agreed on delta otherwise different cpus will finish the\n # loop at different times causing chaos.\n delta = now() - t0\n if delta < 0.1:\n # time.clock is low res, so need to ensure each sample\n # is long enough to take SOME time.\n rounds_per_sample *= 2\n continue\n else:\n rate = rounds_per_sample * len(x) / delta\n samples.append(rate)\n elapsed += delta\n\n if wall:\n samples.sort()\n return samples[len(samples) // 2]\n else:\n return sum(samples) / len(samples)\n\n def _get_current_cell_value(self, cell):\n return self.cell_values[self._switch][cell.rank]\n\n def get_current_cell_values_for_defn(self, defn):\n cells = self.results_by_id[id(defn)]\n return [self.cell_values[self._switch][cell.rank] for cell in cells]\n\n def __get_bounded_root(self, func, origX, direction, bound, xtol):\n return find_root(\n func,\n origX,\n direction,\n bound,\n xtol=xtol,\n expected_exception=(ParameterOutOfBoundsError, ArithmeticError),\n )\n\n def _get_current_cell_interval(self, opt_par, dropoff, xtol=None):\n # (min, opt, max) tuples for each parameter where f(min) ==\n # f(max) == f(opt)-dropoff. Uses None when a bound is hit.\n # assert self.optimised, \"Call optimise() first\"\n origY = self.testfunction()\n (lower, upper) = opt_par.get_optimiser_bounds()\n opt_value = self._get_current_cell_value(opt_par)\n origX = opt_par.transform_to_optimiser(opt_value)\n\n def func(x):\n Y = self.change([(opt_par.rank, x)])\n return Y - (origY - dropoff)\n\n try:\n lowX = self.__get_bounded_root(func, origX, -1, lower, xtol)\n highX = self.__get_bounded_root(func, origX, +1, upper, xtol)\n finally:\n func(origX)\n\n triple = []\n for x in [lowX, origX, highX]:\n if x is not None:\n x = opt_par.transform_from_optimiser(x)\n triple.append(x)\n return tuple(triple)\n"
] | [
[
"numpy.core.numerictypes.sctype2char",
"numpy.exp",
"numpy.log"
]
] |
creatist/XueLangTianchi | [
"7deaf2936045f9f2d44035ec35bd9039f054b2cf"
] | [
"Resnet152/main.py"
] | [
"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# power by Mr.Li\nfrom Resnet152.utils.config import opt\nimport os\nimport torch as t\nimport Resnet152.models as models\nimport torch.backends.cudnn as cudnn\nfrom Resnet152.data.XueLangDataSet import XueLangDataSet #加载转换后的数据集\nfrom torch.utils.data import DataLoader #数据加载器\nfrom torch.autograd import Variable\nfrom torchnet import meter #仪表 用来显示loss等图形\nfrom Resnet152.utils.visualize import Visualizer #可视化visdom\nfrom tqdm import tqdm #显示进度条\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\n\n\ndef train(**kwargs):\n\n print(\"开始训练\")\n # 定义一个网络模型对象\n # 通过config文件中模型名称来加载模型\n netWork = getattr(models, opt.model)()\n print('当前使用的模型为'+opt.model)\n\n # 定义可视化对象\n vis = Visualizer(opt.env+opt.model)\n\n # 先将模型加载到内存中,即CPU中\n map_location = lambda storage, loc: storage\n if opt.load_model_path:\n netWork.load_state_dict(t.load(opt.load_model_path, map_location=map_location))\n if opt.use_gpu:\n netWork.cuda()\n\n # step2: 加载数据\n train_data = XueLangDataSet(opt.data_root, train=True)\n #train=False test=False 则为验证集\n val_data=XueLangDataSet(opt.data_root,train=False)\n # 数据集加载器\n train_dataloader = DataLoader(train_data, opt.batch_size, shuffle=True, num_workers=opt.num_workers)\n val_dataloader = DataLoader(val_data, opt.val_batch_size, shuffle=True, num_workers=opt.num_workers)\n # criterion 损失函数和optimizer优化器\n # 分类损失函数使用交叉熵\n criterion = t.nn.CrossEntropyLoss()\n lr = opt.lr\n # 优化器使用Adam\n if opt.fixed_weight:\n # 选择固定部分权重参数\n if opt.model is 'ResNet18_bo' or opt.model is 'ResNet152_bo':\n # ResNet18_bo和ResNet152网络只更新最后的全连接层\n print(opt.model+'网络只更新最后的全连接层')\n optimizer = t.optim.Adam(netWork.model_bo.fc.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)\n if opt.model is 'VGG16_bo' or opt.model is 'VGG19_bo':\n print(opt.model+'网络只更新分类层')\n optimizer = t.optim.Adam(netWork.classifier.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)\n if opt.model is 'DenseNet_bo':\n print(opt.model+'网络只更新最后的全连接层')\n optimizer = t.optim.Adam(netWork.classifier.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)\n else:\n # 更新全部参数(只vgg19做了更改)\n print(opt.model + '网络更新全部参数')\n optimizer = t.optim.Adam(netWork.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)\n\n\n\n # 统计指标meters 仪表 显示损失的图形\n #计算所有数的平均数和标准差,来统计一个epoch中损失的平均值\n loss_meter=meter.AverageValueMeter()\n # 定义初始的loss\n previous_loss = 1e100\n best_val_auc= 0\n for epoch in range(opt.max_epoch):\n # 清空仪表信息\n loss_meter.reset()\n # 迭代数据集加载器\n for ii, (data_origin,label) in enumerate(train_dataloader):\n # 训练模型\n # input_img为模型输入图像\n input_img = Variable(data_origin)\n # label_img为对应标签\n label_img = Variable(label)\n # 将数据转到GPU\n if opt.use_gpu:\n input_img = input_img.cuda()\n label_img = label_img.cuda()\n # 优化器梯度清零\n optimizer.zero_grad()\n # 前向传播,得到网络产生的输出值label_output\n label_output = netWork(input_img)\n\n # 损失为交叉熵\n loss = criterion(label_output, label_img)\n # 反向传播 自动求梯度 loss进行反向传播\n loss.backward()\n # 更新优化器的可学习参数 optimizer优化器进行更新参数\n optimizer.step()\n # 更新仪表 并可视化\n loss_meter.add(loss.data[0])\n # 每print_freq次可视化loss\n if ii % opt.print_freq == opt.print_freq - 1:\n # plot是自定义的方法\n vis.plot('训练集loss', loss_meter.value()[0])\n # 一个epoch之后保存模型\n t.save(netWork,opt.checkpoint_root+opt.model+'.pth')\n print(\"第\"+str(epoch)+\"次epoch完成==============================================\")\n # 当前时刻的一些信息\n vis.log(\"epoch:{epoch},lr:{lr},loss:{loss}\".format(\n epoch=epoch, loss=loss_meter.value()[0], lr=lr))\n\n # 更新学习率 如果损失开始升高,则降低学习率\n if loss_meter.value()[0] > previous_loss:\n lr = lr * opt.lr_decay\n # 第二种降低学习率的方法:不会有moment等信息的丢失\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n previous_loss = loss_meter.value()[0]\n\n\n # 在验证集上进行验证,保存在验证集上效果最好的模型\n # 模型调整为验证模式\n netWork.eval()\n predict_label=[]\n real_label=[]\n for ii, (val_data_origin, val_label) in enumerate(val_dataloader):\n # 训练模型\n # input_img为模型输入图像\n val_input_img = Variable(val_data_origin, volatile=True)\n # label_img为对应标签\n val_label_img = val_label\n # 将数据转到GPU\n if opt.use_gpu:\n val_input_img = val_input_img.cuda()\n # 前向传播,得到网络产生的输出值label_output\n val_label_output = netWork(val_input_img)\n # 将结果合并\n val_predict_score = t.nn.functional.softmax(val_label_output,dim=1)[:,1].cpu().data.numpy().flatten()\n val_label_img=val_label_img.numpy().flatten()\n for i in range(len(val_label_img)):\n predict_label.append(val_predict_score[i])\n real_label.append(val_label_img[i])\n # 过完一遍验证集,计算整个验证集上的AUC\n validation_auc_sklearn = roc_auc_score(real_label, predict_label)\n\n # 画出验证集的auc sklearn\n vis.plot('验证集的auc', validation_auc_sklearn)\n # 模型恢复为训练模式\n netWork.train()\n\n # 保存到目前为止 在验证集上的AUC最大的模型\n if best_val_auc < validation_auc_sklearn:\n best_val_auc = validation_auc_sklearn\n print('当前得到最好的验证集的AUC为 %.5f' % best_val_auc)\n netWork.save(netWork,opt.checkpoint_root + 'auc'+str(validation_auc_sklearn)+'.pth')\n print(\"============训练完毕=============\")\n\n\ndef test(**kwargs):\n\n print(\"开始测试\")\n # 定义一个网络模型对象\n # 通过config文件中模型名称来加载模型,并调整为验证模式\n netWork = getattr(models, opt.model)().eval()\n print('当前测试使用的模型为'+opt.model)\n # 先将模型加载到内存中,即CPU中\n map_location = lambda storage, loc: storage\n if opt.load_model_path:\n netWork.load_state_dict(t.load(opt.load_model_path, map_location=map_location))\n\n # 将模型转到GPU\n if opt.use_gpu:\n netWork.cuda()\n # step2: 加载数据\n test_data = XueLangDataSet(opt.test_data_root, test=True)\n test_dataloader=DataLoader(test_data,batch_size=32,shuffle=False,num_workers=opt.num_workers)\n #存放预测结果\n results = []\n # 迭代数据集加载器\n for ii, (test_data_origin,test_img_name) in enumerate(test_dataloader):\n # test_input_img为模型输入图像\n test_input_img = Variable(test_data_origin,volatile=True)\n if opt.use_gpu:\n test_input_img=test_input_img.cuda()\n test_label=netWork(test_input_img)\n # 概率 通过softmax可得概率 一张图得到多个结果 shape:[X,2]\n test_label_score = t.nn.functional.softmax(test_label,dim=1)\n test_label_score_batch=test_label_score[:, 1].cpu().data.numpy()\n test_label_score_batch[test_label_score_batch==1]=0.999999\n test_label_score_batch[test_label_score_batch<0.000001]=0.000001\n for i in range(len(test_img_name)):\n batch_results = [(test_img_name[i],test_label_score_batch[i])]\n results = results + batch_results\n # 将测试结果写入csv文件中\n write_csv(results, opt.result_file)\n print(\"============测试完毕=============\")\n\n\n\ndef write_csv(results,file_name):\n import csv\n #调整为写入模式\n with open(file_name,'w') as f:\n writer=csv.writer(f)\n # 写入标题\n writer.writerow(['filename','probability'])\n #写入元组数据\n writer.writerows(results)\nif __name__ == '__main__':\n train()\n # test()\n\n"
] | [
[
"torch.autograd.Variable",
"torch.save",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.functional.softmax",
"sklearn.metrics.roc_auc_score",
"torch.nn.CrossEntropyLoss"
]
] |
jeremy24/rnn-classifier | [
"3a79b8b835af7fd2866513b2dcf4b3d812051db2"
] | [
"data_loader.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\n\nimport os\nimport math\nimport time\nimport re\nimport json\nimport glob\n# from multiprocessing import Process, RawValue, Lock\n# from multiprocessing.dummy import Pool as ThreadPool\nfrom six.moves import cPickle\nimport numpy as np\n\nfrom decorators import *\nfrom process_real_data import process_ann_files\n\nclass TextLoader(object):\n\tdef __init__(self, data_dir, save_dir, batch_size, seq_length,\n\t\t\t\t encoding='utf-8', todo=1000000,\n\t\t\t\t labeler_fn=None, is_training=False,\n\t\t\t\t read_only=False, max_word_length=None,\n\t\t\t\t using_real_data=False):\n\n\t\tself.data_dir = data_dir\n\t\tself.batch_size = batch_size\n\t\tself.seq_length = seq_length\n\t\tself.encoding = encoding\n\t\tself.labeler_fn = None\n\t\tself.save_dir = save_dir\n\t\tself.pointer = 0\n\t\tself.max_word_length = max_word_length\n\t\tself.using_real_data = using_real_data\n\n\t\t# self.vocab = dict()\n\t\tself.chars = dict()\n\t\tself.tensor = None\n\t\tself.labels = list()\n\t\tself.num_chars = 0\n\t\tself.num_classes = 0\n\t\tself.read_only = read_only\n\t\tself.label_ratio = None\n\n\t\tself._test_batches = None\n\t\tself._train_batches = None\n\n\t\tself.num_batches = 0\n\t\tself.batches = list()\n\t\tself.ratio = None\n\n\t\tself.replace_multiple_spaces = True\n\t\tself.is_training = bool(is_training)\n\n\t\t# make it predictably random\n\t\t# np.random.seed(int(time.time()))\n\t\tnp.random.seed(5)\n\n\t\tif labeler_fn is None and self.is_training:\n\t\t\tprint(\"\\nNO LABELER FUNCTION PROVIDED\\n\")\n\t\t\tself.labeler_fn = lambda l: [str(s).isalpha() for s in l]\n\t\telif not self.is_training:\n\t\t\tprint(\"\\nLabeler function not needed since model is not training\")\n\t\t\t# This will now throw an error if something tries to call it\n\t\t\t# since we should not ever be labeling data if not training\n\t\t\t# at least for now\n\t\t\tself.labeler_fn = None\n\t\telse:\n\t\t\tprint(\"User provided a labeler fn:\", labeler_fn)\n\t\t\tself.labeler_fn = labeler_fn\n\n\t\tinput_dir = \"./inputs\"\n\n\t\tif self.is_training:\n\t\t\tif self.have_saved_data():\n\t\t\t\tself.load_preprocessed()\n\t\t\telse:\n\t\t\t\tself.preprocess(input_dir, todo=todo)\n\t\telse:\n\t\t\tif self.have_saved_data():\n\t\t\t\tself.load_preprocessed()\n\t\t\telse:\n\t\t\t\tprint(\"\\nWe are not training and we have no preprocessed data!\\n\")\n\t\t\t\tassert False\n\n\t\tself.reset_batch_pointer()\n\n\tdef preprocess_helper(self, seq, raw_str):\n\t\t\"\"\"Take in ALL the x data and return {x: list, y: list}\"\"\"\n\t\tstart = time.time()\n\n\t\tprint(\"\\tSeq type: \", type(seq))\n\t\tseq = np.array(seq)\n\t\tord_mapper = np.vectorize(ord)\n\t\tencoded_seq = ord_mapper(seq)\n\t\tself.chars = np.unique(seq)\n\n\t\tprint(\"Extracted out all chars, have: \", len(self.vocab), \" Took: \", time.time() - start)\n\t\tlabels = np.array(self.labeler_fn(raw_str), dtype=np.uint16)\n\t\tencoded_seq = np.array(encoded_seq, dtype=np.uint16)\n\t\tlabels = np.ndarray.flatten(np.array(labels))\n\t\tassert len(encoded_seq) == len(labels), \"Lens don't match {} != {}\".format(len(encoded_seq), len(labels))\n\t\tassert len(self.chars) == len(self.vocab), \"char and vocab lens mismatch\"\n\t\treturn {\"x\": encoded_seq, \"y\": labels}\n\n\t@staticmethod\n\tdef trim_to_max_word_length(data, max_length):\n\t\tassert type(max_length) == int, \"Max word length must be an int\"\n\t\tassert type(data) == str, \"Data passed to trim_to_max_word_length must be a string, got: {}\".format(type(data))\n\t\tprint(\"\\nHave a max word length of {:,}\".format(max_length))\n\t\tprint(\"\\tStarting length: {:,}\".format(len(str(data))))\n\t\tlocal_data = str(data).split()\n\t\tkeep = \"\"\n\t\tfor x in local_data:\n\t\t\tif len(str(x)) <= max_length:\n\t\t\t\tkeep += str(x) + \" \"\n\t\tprint(\"\\tNew length: {:,}\".format(len(keep)))\n\t\treturn keep\n\n\tdef real_data_helper(self, dir_path):\n\t\tprint(\"\\tdirpath: [{}] type: {}\".format(dir_path, type(dir_path)))\n\t\text = \".labeled\"\n\t\tfolder = \"labeled_data\"\n\t\tassert os.path.exists(dir_path)\n\t\treplacement = chr(1)\n\t\tprocess_ann_files(dir_path, replace_char=replacement, ext=ext, folder=folder)\n\t\tprint(\"\\tDone processing ann files\")\n\t\tlabeled_dir = os.path.join(dir_path, folder)\n\n\n\t\tseq = \"\"\n\t\tseq_ = \"\"\n\t\tinput_dir = dir_path + \"/**/*.txt\"\n\t\tprint(\"\\tInput files: \", input_dir)\n\t\tfiles = glob.glob(input_dir, recursive=True)\n\t\tassert len(files) > 0, \"Input directory is empty\"\n\t\tprint(\"\\tProcessing {:,} input files\".format(len(files)))\n\t\tfor filename in files:\n\t\t\twith open(filename, \"r\") as fin:\n\t\t\t\tfor line in fin:\n\t\t\t\t\tseq += line\n\t\t\tlabels = filename.split(\"/\")\n\t\t\tlabels[-1] = folder + \"/\" + labels[-1] + ext\n\t\t\tlabels = \"/\".join(labels)\n\n\t\t\twith open(labels, \"r\") as fin:\n\t\t\t\tfor line in fin:\n\t\t\t\t\tseq_ += line\n\n\t\tseq_ = list(seq_)\n\t\tseq = list(seq)\n\t\tfor i in range(len(seq_)):\n\t\t\tseq_[i] = seq_[i] == replacement\n\n\t\tseq = np.array(seq)\n\t\tlabels = np.array(seq_)\n\n\t\tord_mapper = np.vectorize(ord)\n\t\tencoded_seq = ord_mapper(seq)\n\t\tself.chars = np.unique(seq)\n\n\t\tassert len(encoded_seq) == len(labels)\n\t\tassert len(set(labels)) == 2\n\t\t# exit(1)\n\t\treturn {\"x\": encoded_seq, \"y\": labels}\n\n\tdef preprocess(self, input_path, todo=float(\"inf\")):\n\n\n\t\tdata = None\n\t\tstart = time.time()\n\n\t\tprint(\"\\nProcessing files from input path: {}\".format(input_path))\n\n\t\t# get labels for the data\n\t\tif self.using_real_data:\n\t\t\tprint(\"\\tUsing real data\")\n\t\t\tpreprocess_data = self.real_data_helper(input_path)\n\t\telse:\n\t\t\tprint(\"\\tUsing other data\")\n\t\t\tfiles = os.listdir(input_path)\n\t\t\tfiles = [os.path.join(input_path, filename) for filename in files if filename[0] != \".\"]\n\n\t\t\tfor filename in files:\n\t\t\t\tprint(\"\\t{}\".format(filename))\n\t\t\t\tdata = data + \"\\n\\n\" if data is not None else \"\"\n\t\t\t\twith open(filename, \"r\") as f:\n\t\t\t\t\tfor line in f:\n\t\t\t\t\t\tdata += line\n\n\t\t\tprint(\"\\n\")\n\n\t\t\tif self.max_word_length is not None:\n\t\t\t\tdata = self.trim_to_max_word_length(data, self.max_word_length)\n\n\t\t\tself.chars = list()\n\t\t\t# self.vocab = dict()\n\t\t\t# self.vocab_size = 0\n\n\t\t\tmin_percent = .05 # 0.20\n\n\t\t\tif \"MODEL_DATA_MIN_PERCENT\" in os.environ:\n\t\t\t\ttry:\n\t\t\t\t\tpassed_value = float(os.environ[\"MODEL_DATA_MIN_PERCENT\"])\n\t\t\t\t\tif 0.0 < passed_value <= 1.0:\n\t\t\t\t\t\tmin_percent = passed_value\n\t\t\t\t\t\tprint(\"Min percent passed in from env and was changed to: \", min_percent)\n\t\t\t\t\telif 0.0 < passed_value <= 100.0:\n\t\t\t\t\t\tmin_percent = passed_value / 100.0\n\t\t\t\t\t\tprint(\"Min percent passed in from env and was changed to: \", min_percent)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"\\nInvalid value passed in for min percent, not using: \", passed_value)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint(\"\\nMin percent passed as env variable is not a valid float, not using it: \",\n\t\t\t\t\t\t os.environ[\"MODEL_DATA_MIN_PERCENT\"], \"\\n\")\n\n\t\t\tif todo < len(data) * min_percent:\n\t\t\t\tprint(\"todo of {:,} is less than {}% of {:,}, changing...\"\n\t\t\t\t\t .format(todo, int(min_percent * 100), len(data)))\n\n\t\t\t\ttodo = len(data) * min_percent\n\t\t\t\ttodo = int(todo)\n\n\t\t\tprint(\"Preprocessing {:,} items from data\".format(todo))\n\t\t\tprint(\"Replacing spaces: {}\".format(self.replace_multiple_spaces))\n\t\t\tprint(\"Trimming data to length of todo\")\n\n\t\t\tif self.replace_multiple_spaces:\n\t\t\t\tprint(\"\\nStripping multiple newlines\")\n\t\t\t\tprint(\"\\tBefore: {:,}\".format(len(data)))\n\t\t\t\t# data = re.sub(r\"[\\n]{3,}\", \"\\n\", data)\n\t\t\t\tdata = re.sub(r\"[\\t]{2}\", \"\\t\", data)\n\t\t\t\tdata = re.sub(r\"[\\t]{2}\", \"\\t\", data)\n\t\t\t\tdata = re.sub(r\"[\\n]{2}\", \"\\n\", data)\n\t\t\t\tprint(\"\\tAfter: {:,}\".format(len(data)))\n\n\t\t\tdata = data[:todo]\n\t\t\t# flatten = lambda l: [item for sublist in l for item in sublist]\n\t\t\tstart = time.time()\n\t\t\tseqs = list(data)\n\n\t\t\t# make sure its flat\n\t\t\tseqs = np.ndarray.flatten(np.array(seqs))\n\n\t\t\tlabel_start = time.time()\n\n\t\t\tprint(\"Starting preprocess {:,} items\"\n\t\t\t\t .format(len(seqs)))\n\n\t\t\tpreprocess_data = self.preprocess_helper(seqs, data)\n\t\t\tprint(\"Labels generated in {:,.3f}\".format(time.time() - label_start))\n\n\t\t# if/else is done so grab the correct data out\n\t\tencoded = preprocess_data[\"x\"]\n\t\tlabels = preprocess_data[\"y\"]\n\n\n\n\t\t# drop any dupes\n\t\tself.chars = list(set(self.chars))\n\n\t\tassert len(self.chars) == len(self.vocab)\n\t\tassert len(encoded) == len(labels)\n\n\t\t# these are what make_batches looks for\n\t\tself.tensor = encoded\n\t\tself.labels = labels\n\n\t\tnum_true_labels = np.sum(self.labels)\n\n\t\tself.ratio = num_true_labels / len(self.labels)\n\n\t\tprint(\"Have {:,} true labeled chars out of {:,} {:.4f}%\"\n\t\t\t .format(num_true_labels, len(self.labels), self.ratio * 100.0))\n\n\t\tself.ratio = 1.0 / self.ratio\n\n\t\tassert len(self.labels) == len(self.tensor)\n\n\t\t# total number of chars in sample\n\t\tself.num_chars = len(self.tensor)\n\t\tself.num_classes = len(set(self.labels))\n\n\t\tprint(\"Processing took {:.3f} vocab size: {}\"\n\t\t\t .format(time.time() - start, self.vocab_size))\n\n\t\tself.save_vocab_data()\n\t\tself.create_batches()\n\n\t@property\n\tdef vocab_size(self):\n\t\t\"\"\"\n\t\t\tA dynamic getter for the vocabulary size, just in case it changes\n\t\t\t:return: len(self.vocab)\n\t\t\"\"\"\n\t\tassert self.vocab is not None, \"Cannot get vocab size, vocab is None\"\n\t\treturn len(self.vocab)\n\n\t@staticmethod\n\t@format_float(precision=3)\n\tdef to_gb(num):\n\t\treturn num / math.pow(2, 30)\n\n\tdef sample_batches(self, size=30):\n\t\tprint(\"\\nSAMPLE\")\n\t\tbatch = self.test_batches[0]\n\n\t\t# batch[batch_num][x|y][index_in_seq]\n\t\tx = batch[0][1][:size]\n\t\ty = np.array(batch[1][1][:size])\n\n\t\tassert len(batch) == 2, \"Batch has more than x, y pairs in it\"\n\t\tprint(\"\\tBatch size: {:,}\".format(len(batch[0])))\n\t\tprint(\"\\tSeq length: {:,}\".format(len(batch[0][0])))\n\t\tprint(\"\\tSample size: {:,}\".format(size))\n\n\t\tassert len(x) == len(y)\n\t\tz = [self.reverse_vocab[idx] for idx in x]\n\t\tprint(\"X: \", \"\".join(z).replace(\"\\n\", \" \"))\n\t\tprint(\"Y: \", \"\".join([str(item) for item in y]))\n\t\tprint(\"END SAMPLE\\n\")\n\n\tdef trim_data(self):\n\t\t# chop off the end to make sure we have an even number of items\n\t\tsize = self.seq_length * self.batch_size\n\t\tchop_line = (len(self.tensor) % size)\n\t\tchop_line = len(self.tensor) - chop_line\n\t\tprint(\"size: {:,} len: {:,} chopline: {:,}\".format(size, len(self.tensor), chop_line))\n\t\tself.tensor = self.tensor[:chop_line]\n\t\tself.labels = self.labels[:chop_line]\n\t\tassert len(self.tensor) == len(self.labels)\n\t\tself.num_batches = len(self.tensor) // size\n\n\tdef create_batches(self):\n\t\tself.trim_data()\n\t\tprint(\"Batch size: {:,} have {:,} batches\"\n\t\t\t .format(self.batch_size, self.num_batches))\n\n\t\t# When the data (tensor)is too small,\n\t\t# let's give them a better error message\n\t\tif self.num_batches < 1:\n\t\t\tassert False, \"Not enough data. Make seq_length and batch_size small.\"\n\n\t\tprint(\"Total self.tensor size: \", self.to_gb(self.tensor.nbytes), \"GB\")\n\t\tprint(\"Num Batches to make: \", self.num_batches)\n\n\t\txdata = np.array(self.tensor, dtype=np.uint16)\n\t\tydata = np.array(self.labels, dtype=np.uint16)\n\n\t\tassert len(ydata) == len(xdata), \"Data lengths don't match: {} != {}\".format(len(xdata), len(ydata))\n\n\t\tprint(\"\\nHave {} different labels\".format(self.vocab_size))\n\t\tprint(\"len: {:,}\".format(len(xdata)))\n\n\t\tnum_chunks = int(len(xdata) / (self.seq_length * self.batch_size))\n\n\t\tprint(\"Splitting {} into {} chunks\".format(len(xdata), num_chunks))\n\t\tprint(\"{:,} / {:,} = {:,}\".format(len(xdata), (self.seq_length * self.batch_size), num_chunks))\n\t\tprint(\"seq len: {:,} batch size: {:,}\".format(self.seq_length, self.batch_size))\n\n\t\tx_batches = np.split(xdata, num_chunks)\n\t\tx_batches = [np.split(x, int(len(x) / self.seq_length)) for x in x_batches]\n\n\t\ty_batches = np.split(ydata, num_chunks)\n\t\ty_batches = [np.split(y, int(len(y) / self.seq_length)) for y in y_batches]\n\n\t\tprint(\"\\n{} batches of {} items with {} length strings\\n\".format(\n\t\t\tlen(x_batches),\n\t\t\tlen(x_batches[0]),\n\t\t\tlen(x_batches[0][0])\n\t\t))\n\n\t\t# z = x_batches[0]\n\t\t# print(\"\\nA sample of the data: (## signifies the boundaries between sequences)\\n\")\n\t\t# print(\"{}##{}##{}##{}##{}##\".format(\"\".join([chr(a) for a in z[0]]),\n\t\t# \t\t\"\".join([chr(a) for a in z[1]]),\n\t\t# \t\t\"\".join([chr(a) for a in z[2]]),\n\t\t# \t\t\"\".join([chr(a) for a in z[3]]),\n\t\t# \t\t\"\".join([chr(a) for a in z[4]])).replace(\"\\t\", \"\\t\"))\n\n\t\t# only drop up to a quarter of the batches\n\t\tmax_drop = len(x_batches) / 4\n\n\t\t# drop some sparsely labeled x,y sets to improve the ratio if we can\n\t\tself.batches, sums, dropped = self.drop_sparse(self.batch_size, max_drop, y_batches, x_batches)\n\n\t\tprint(\"\\nSums:\")\n\t\tprint(\"\\tAvg: \", np.mean(sums), \"\\n\\tMin: \", np.min(sums), \"\\n\\tMax: \", np.max(sums))\n\t\tbatch_members = len(y_batches[0][0]) * len(y_batches[0])\n\t\tpercent = [round((np.mean(s) / batch_members) * 100, 3) for s in sums]\n\t\tprint(\"\\nLabel Ratios:\")\n\t\tprint(\"\\n\\tAvg: \", round(np.mean(percent)), \"%\\n\\tMin: \", np.min(percent),\n\t\t\t \"%\\n\\tMax: \", np.max(percent), \"%\\n\\tMedian: \", np.median(percent), \"%\")\n\n\t\t# this save call will init both the train and test batch properties\n\t\t# on the object and will cause the data to be subdivided correctly\n\t\t# after this they return their local copy and will not touch the batches property\n\t\t# so after the call we set self.batches to None to prevent it being used\n\t\tself.save_test_train()\n\t\tself.batches = None\n\n\t\tself.sample_batches()\n\n\t\tprint(\"Train Batches: {}\tTest Batches: {}\"\n\t\t\t .format(len(self.train_batches), len(self.test_batches)))\n\n\t\tprint(\"Build batches done...\")\n\n\t\tif not self.have_saved_data():\n\t\t\tprint(\"\\nData failed to save!\\n\")\n\t\t\texit(1)\n\n\t@staticmethod\n\tdef drop_sparse(batch_size, max_drop, y_batches, x_batches):\n\t\t\"\"\"Does not actually drop anything\"\"\"\n\t\tprint(\"\\nDropping sparse labeled rows\")\n\t\tdata = list()\n\t\tsums = list()\n\t\tdropped = 0\n\t\tfor i in range(len(y_batches)):\n\t\t\tx, y = x_batches[i], y_batches[i]\n\t\t\ty = np.array(y)\n\t\t\t# if less than 5% are highlighted, make eligible to dropping\n\t\t\t# if dropped < max_drop and np.sum(np.ndarray.flatten(y)) < batch_size * .05:\n\t\t\t# \tdropped += 1\n\t\t\t# \tcontinue\n\t\t\tsums.append(np.sum(y))\n\t\t\titem = np.array([x, y])\n\t\t\titem.flags.writeable = False\n\t\t\tdata.append(item)\n\t\tprint(\"\\tDropped\", dropped, \"batches\")\n\t\treturn data, sums, dropped\n\n\t@property\n\tdef reverse_vocab(self):\n\t\t\"\"\"Make a reverse lookup dict for the vocabulary characters\"\"\"\n\t\treturn {v: k for k, v in self.vocab.items()}\n\n\t@property\n\tdef test_batches(self):\n\t\t\"\"\"The first time this is called it will split off its chunk of the batches\"\"\"\n\t\tif self._test_batches is None:\n\t\t\tif not self.is_training:\n\t\t\t\tmsg = \"Don't have testing batches from file\"\n\t\t\t\traise Exception(msg)\n\t\t\tsize = int(self.num_batches * 0.20)\n\t\t\tself._test_batches = self.batches[:size]\n\t\treturn self._test_batches if self._test_batches is not None else list()\n\n\t@property\n\tdef train_batches(self):\n\t\t\"\"\"The first time this is called it will split off its chunk of the batches\"\"\"\n\t\tif self._train_batches is None:\n\t\t\tif not self.is_training:\n\t\t\t\tmsg = \"Don't have training batches from file\"\n\t\t\t\traise Exception(msg)\n\t\t\tsize = int(self.num_batches * 0.20)\n\t\t\tself._train_batches = self.batches[size:]\n\t\treturn self._train_batches if self._train_batches is not None else list()\n\n\tdef save_test_train(self):\n\t\t\"\"\"Save train and test data to disk in .npy format\"\"\"\n\t\ttrain_file = os.path.join(self.save_dir, \"train_batches.npy\")\n\t\ttest_file = os.path.join(self.save_dir, \"test_batches.npy\")\n\t\ttry:\n\t\t\tnp.save(train_file, self.train_batches)\n\t\t\tnp.save(test_file, self.test_batches)\n\t\texcept Exception as ex:\n\t\t\tprint(\"Unable to save test/train data: \", ex)\n\t\t\texit(1)\n\n\tdef save_vocab_data(self):\n\t\t\"\"\"Save the vocabulary data to disk\"\"\"\n\t\tvocab_file = os.path.join(self.save_dir, \"vocab.pkl\")\n\t\tif self.read_only:\n\t\t\tprint(\"\\nNot saving vocab, in read only mode\\n\")\n\t\t\treturn\n\t\ttry:\n\t\t\tprint(\"Dumping vocab to file...\")\n\t\t\twith open(vocab_file, 'wb') as fout:\n\t\t\t\tcPickle.dump(self.chars, fout)\n\t\texcept Exception as ex:\n\t\t\tprint(\"Error saving vocab: \", ex)\n\t\t\texit(1)\n\n\tdef have_saved_data(self):\n\t\t\"\"\"Check if we have the appropriate saved data files\"\"\"\n\t\ttrain_file = os.path.join(self.save_dir, \"train_batches.npy\")\n\t\ttest_file = os.path.join(self.save_dir, \"test_batches.npy\")\n\t\tvocab_file = os.path.join(self.save_dir, \"vocab.pkl\")\n\t\tif not os.path.exists(train_file):\n\t\t\tprint(\"Don't have train file:\", train_file)\n\t\t\treturn False\n\t\tif not os.path.exists(test_file):\n\t\t\tprint(\"Don't have test file:\", test_file)\n\t\t\treturn False\n\t\tif not os.path.exists(vocab_file):\n\t\t\tprint(\"Don't have vocab file:\", vocab_file)\n\t\t\treturn False\n\t\treturn True\n\n\tdef load_preprocessed_vocab(self):\n\t\t\"\"\"\n\t\t \tLoad in a saved vocab file\n\t\t\"\"\"\n\t\ttry:\n\t\t\tvocab_file = os.path.join(self.save_dir, \"vocab.pkl\")\n\t\t\twith open(vocab_file, 'rb') as f:\n\t\t\t\tself.chars = cPickle.load(f)\n\t\t\t\tassert len(self.chars) > 0, \"Loaded vocabulary is empty!\"\n\t\texcept Exception as ex:\n\t\t\tprint(\"Unable to load preprocessed vocab file: \", ex)\n\t\t\texit(1)\n\n\t@property\n\tdef vocab(self):\n\t\t\"\"\"\n\t\t\tThe vocab map, this is computed every call.\n\t\t\tIt is NOT cached just in case the vocab changes\n\t\t\"\"\"\n\t\tret = {}\n\t\tfor x in self.chars:\n\t\t\tret[x] = ord(x)\n\t\treturn ret\n\n\tdef load_preprocessed(self):\n\t\t\"\"\"\n\t\t\tLoad in preprocessed text data, thiw will circumvent the normal\n\t\t\tway we initialize the train and test batch split by directly setting the values\n\t\t\"\"\"\n\t\tassert self.have_saved_data(), \"Can't load preprocessed data, files are missing\"\n\n\t\tprint(\"Load preprocessed data:\")\n\t\ttrain_file = os.path.join(self.save_dir, \"train_batches.npy\")\n\t\ttest_file = os.path.join(self.save_dir, \"test_batches.npy\")\n\n\t\tself.load_preprocessed_vocab()\n\n\t\t# self.vocab_size = len(self.chars)\n\n\n\t\tprint(\"\\tloading test and train files...\")\n\t\tself._train_batches = np.load(train_file)\n\t\tself._test_batches = np.load(test_file)\n\t\tprint(\"\\tTest size: \", len(self._test_batches))\n\t\tprint(\"\\tTrain size: \", len(self._train_batches))\n\t\tassert len(self._test_batches) > 0\n\t\tassert len(self._train_batches) > 0\n\t\tself.batches = None\n\t\tself.num_batches = None\n\n\t\twith open(os.path.join(self.save_dir, \"hyper_params.json\"), \"r\") as saved_args:\n\t\t\tsaved = json.load(saved_args)\n\t\t\ttry:\n\t\t\t\tself.num_batches = saved[\"num_batches\"] or None\n\t\t\t\tself.label_ratio = saved[\"label_ratio\"] or None\n\t\t\t\tself.num_classes = saved[\"num_classes\"] or None\n\t\t\t\tself.num_chars = saved[\"num_chars\"] or None\n\t\t\t\tprint(\"\\tRatio: \", self.label_ratio)\n\t\t\t\tprint(\"\\tNum classes: \", self.num_classes)\n\t\t\texcept KeyError as ex:\n\t\t\t\tprint(\"data_loader is missing a saved params key: \", ex)\n\t\t\t\texit(1)\n\n\tdef next_batch(self):\n\t\t\"\"\"DEPRECATED\"\"\"\n\t\t# x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]\n\t\titem = self.train_batches[self.pointer]\n\t\t# make immutable\n\t\tself.pointer += 1\n\t\tif self.pointer == len(self.train_batches):\n\t\t\tself.reset_batch_pointer()\n\t\treturn item[0], item[1]\n\n\tdef next_train_batch(self):\n\t\t\"\"\"Fetch teh next training batch\"\"\"\n\t\titem = self.train_batches[self.pointer]\n\t\tself.pointer += 1\n\t\tif self.pointer >= len(self.train_batches):\n\t\t\tself.reset_batch_pointer()\n\t\treturn item[0], item[1]\n\n\tdef next_test_batch(self):\n\t\t\"\"\"Fetch the next test batch\"\"\"\n\t\titem = self.test_batches[self.pointer]\n\t\tself.pointer += 1\n\t\tif self.pointer >= len(self.test_batches):\n\t\t\tself.reset_batch_pointer()\n\t\treturn item[0], item[1]\n\n\tdef reset_batch_pointer(self, quiet=False):\n\t\t\"\"\"Reset the batch pointer\"\"\"\n\t\tif not quiet:\n\t\t\tprint(\"Reseting batch pointer...\")\n\t\tself.pointer = 0\n"
] | [
[
"numpy.max",
"numpy.array",
"numpy.vectorize",
"numpy.random.seed",
"numpy.sum",
"numpy.median",
"numpy.load",
"numpy.min",
"numpy.split",
"numpy.mean",
"numpy.save",
"numpy.unique"
]
] |
heasungkim/reinforcementLearning | [
"e6114aa2665294b0b53b5693c9c45007ec445ef1"
] | [
"main2_Agent.py"
] | [
"\nimport tensorflow as tf\nimport numpy as np\nfrom replayBuffer import ReplayBuffer\n\nclass Agent_DDPG(object):\n def __init__(self, action_size, state_size, action_limit,):\n self.memory_size = 10000\n self.replayBuffer = ReplayBuffer(self.memory_size)\n self.sess = tf.Session()\n\n self.discount_factor = 0.9\n self.action_variance = 3\n self.critic_learning_rate = 0.001\n self.actor_learning_rate = 0.002\n self.batch_size = 32\n \n self.action_size, self.state_size, self.action_limit = action_size, state_size, action_limit,\n self.input_state = tf.placeholder(tf.float32, [None, state_size], 's')\n self.input_state_ = tf.placeholder(tf.float32, [None, state_size], 's_')\n self.R = tf.placeholder(tf.float32, [None, 1], 'r')\n\n with tf.variable_scope('Actor'):\n self.a = self.build_actor_network(self.input_state, scope='eval', trainable=True)\n a_ = self.build_actor_network(self.input_state_, scope='tar', trainable=False)\n with tf.variable_scope('Critic'):\n q_eval = self.build_critic_network(self.input_state, self.a, scope='eval', trainable=True)\n q_target = self.build_critic_network(self.input_state_, a_, scope='target', trainable=False)\n\n self.actor_evaluation_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')\n self.actor_target_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/tar')\n self.critic_evaluation_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')\n self.critic_target_params = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/tar')\n\n self.replace = [tf.assign(t, (1 - 0.01 ) * t + 0.01 * e)\n for t, e in zip(self.actor_target_params + self.critic_target_params,\n self.actor_evaluation_params + self.critic_evaluation_params)]\n\n '''\n dJ/dtheta = E[ dQ/dtheta ] \n\n '''\n # Actor Loss 는 Q로부터 내려오는 값을 maximize 하면 된다(논문 참조)\n self.a_loss = tf.reduce_mean(q_eval) # maximize the q\n # Maximize Q 를 해야하므로 learning rate에 '-' 를 붙인다.\n self.atrain = tf.train.AdamOptimizer(-self.actor_learning_rate).minimize(tf.reduce_mean(q_eval),\n var_list=self.actor_evaluation_params)\n\n # self.c_train 을 호출할때 self.a 에 배치의 action을 넣게 된다.\n # Placeholder가 아닌 self.a 에 직접 값을 대입하는 것!\n # s a r s_ 를 이용해서 critic을 업데이트 하는데, 정석으로 구한 y가 트루 라벨, 뉴럴넷에 값을 넣고 나오는 것이 우리의 prediction이다.\n # True Label, y = r(s,u_t(s)) + gamma*Q(s_, u_t(s_))\n q_true = self.R + self.discount_factor * q_target\n\n # Prediction, Q = q_eval\n # 우리가 mseLoss를 구하려면 q_eval을 구해야 하므로 self.input_state에 피딩을 해 주어야 함.\n # 또한 q_true 를 구하기 위해 self.R 과 q_target에 들어갈 self.input_state_ 도 피딩 해주어야 함.\n self.mseloss = tf.losses.mean_squared_error(labels=q_true, predictions=q_eval)\n # 이 부분은 오직 Critic net을 업데이트하기위한 Loss이다. 때문에 var_list를 Critic evaluation network로 지정해주어야한다.\n self.ctrain = tf.train.AdamOptimizer(self.critic_learning_rate).minimize(self.mseloss, var_list=self.critic_evaluation_params)\n\n\n # 네트워크를 만들고 항상 초기화를 해준다.\n self.sess.run(tf.global_variables_initializer())\n\n self.actor_loss_history = []\n self.critic_loss_history = []\n\n def store_transition(self, s, a, r, s_):\n self.replayBuffer.add(s,a,r,s_)\n\n def choose_action(self, s):\n return np.clip(np.random.normal(self.sess.run(self.a, {self.input_state: s[np.newaxis, :]})[0] , self.action_variance), -2, 2)\n\n def learn(self):\n if self.replayBuffer.count() > self.batch_size:\n self.action_variance *= .9995\n self.sess.run(self.replace)\n\n batch = self.replayBuffer.get_batch(self.batch_size)\n batch_s = np.asarray([x[0] for x in batch])\n batch_a = np.asarray([x[1] for x in batch])\n batch_r = np.asarray([[x[2]] for x in batch])\n batch_s_ = np.asarray([x[3] for x in batch])\n\n actor_loss, _ = self.sess.run([self.a_loss, self.atrain], {self.input_state: batch_s})\n critic_loss, _ = self.sess.run([self.mseloss, self.ctrain], {self.input_state: batch_s, self.a: batch_a, self.R: batch_r, self.input_state_: batch_s_})\n\n self.actor_loss_history.append(actor_loss)\n self.critic_loss_history.append(critic_loss)\n\n def build_actor_network(self, s, scope, trainable):\n actor_hidden_size = 30\n with tf.variable_scope(scope):\n hidden1 = tf.layers.dense(s, actor_hidden_size, activation=tf.nn.relu, name='l1', trainable=trainable)\n a = tf.layers.dense(hidden1, self.action_size, activation=tf.nn.tanh, name='a', trainable=trainable)\n return tf.multiply(a, self.action_limit, name='scaled_a')\n\n def build_critic_network(self, s, a, scope, trainable):\n with tf.variable_scope(scope):\n critic_hidden_size = 30\n hidden1 = tf.layers.dense(s, critic_hidden_size, name='s1', trainable=trainable) \\\n + tf.layers.dense(a, critic_hidden_size, name='a1', trainable=trainable) \\\n + tf.get_variable('b1', [1, critic_hidden_size], trainable=trainable)\n hidden1 = tf.nn.relu(hidden1)\n return tf.layers.dense(hidden1, 1, trainable=trainable)\n\n def plot_loss(self):\n import matplotlib\n import matplotlib.pyplot as plt\n matplotlib.font_manager._rebuild()\n plt.rcParams['font.family'] = 'Times New Roman'\n plt.rcParams.update({'font.size': 25})\n matplotlib.rc('text', usetex=True)\n plt.title('$\\mathit{history}$', fontsize=25)\n ms = 0.1\n me = 1\n line_width = 0.1\n plt.ylabel('Loss')\n plt.xlabel('Training steps')\n\n actor_loss_mean = sum(self.actor_loss_history)/len(self.actor_loss_history)\n self.actor_loss_history /= actor_loss_mean\n critic_loss_mean = sum(self.critic_loss_history)/len(self.critic_loss_history)\n self.critic_loss_history /= critic_loss_mean\n\n plt.plot(np.arange(len(self.actor_loss_history)), self.actor_loss_history, '-p', color='b', markevery=me, label=r'actor loss', lw=line_width,\n markersize=ms)\n plt.plot(np.arange(len(self.critic_loss_history)), self.critic_loss_history, '--^', color='r', markevery=me, label=r'critic loss', lw=line_width, markersize=ms)\n\n plt.grid()\n ax = plt.subplot(111)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.ylim(0, 10)\n plt.show()\n\n def plot_reward(self, reward_history):\n import matplotlib.pyplot as plt\n plt.plot(np.arange(len(reward_history)), reward_history)\n plt.ylabel('Reward')\n plt.xlabel('Episodes')\n plt.grid()\n plt.show()\n\n"
] | [
[
"tensorflow.losses.mean_squared_error",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.layers.dense",
"tensorflow.get_collection",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.rcParams.update",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.relu",
"matplotlib.font_manager._rebuild",
"tensorflow.Session",
"matplotlib.pyplot.title",
"matplotlib.rc",
"tensorflow.placeholder",
"tensorflow.get_variable",
"matplotlib.pyplot.show",
"tensorflow.multiply",
"tensorflow.assign",
"numpy.asarray",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ylabel",
"tensorflow.reduce_mean"
]
] |
Ankur3107/transformers-1 | [
"8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c"
] | [
"tests/test_feature_extraction_speech_to_text.py"
] | [
"# coding=utf-8\n# Copyright 2021 HuggingFace Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport itertools\nimport random\nimport unittest\n\nimport numpy as np\n\nfrom transformers import is_speech_available\nfrom transformers.testing_utils import require_torch, require_torchaudio\n\nfrom .test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin\n\n\nif is_speech_available():\n from transformers import Speech2TextFeatureExtractor\n\nglobal_rng = random.Random()\n\n\ndef floats_list(shape, scale=1.0, rng=None, name=None):\n \"\"\"Creates a random float32 tensor\"\"\"\n if rng is None:\n rng = global_rng\n\n values = []\n for batch_idx in range(shape[0]):\n values.append([])\n for _ in range(shape[1]):\n values[-1].append(rng.random() * scale)\n\n return values\n\n\n@require_torch\n@require_torchaudio\nclass Speech2TextFeatureExtractionTester(unittest.TestCase):\n def __init__(\n self,\n parent,\n batch_size=7,\n min_seq_length=400,\n max_seq_length=2000,\n feature_size=24,\n num_mel_bins=24,\n padding_value=0.0,\n sampling_rate=16_000,\n return_attention_mask=True,\n do_normalize=True,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.min_seq_length = min_seq_length\n self.max_seq_length = max_seq_length\n self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)\n self.feature_size = feature_size\n self.num_mel_bins = num_mel_bins\n self.padding_value = padding_value\n self.sampling_rate = sampling_rate\n self.return_attention_mask = return_attention_mask\n self.do_normalize = do_normalize\n\n def prepare_feat_extract_dict(self):\n return {\n \"feature_size\": self.feature_size,\n \"num_mel_bins\": self.num_mel_bins,\n \"padding_value\": self.padding_value,\n \"sampling_rate\": self.sampling_rate,\n \"return_attention_mask\": self.return_attention_mask,\n \"do_normalize\": self.do_normalize,\n }\n\n def prepare_inputs_for_common(self, equal_length=False, numpify=False):\n def _flatten(list_of_lists):\n return list(itertools.chain(*list_of_lists))\n\n if equal_length:\n speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]\n else:\n # make sure that inputs increase in size\n speech_inputs = [\n floats_list((x, self.feature_size))\n for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)\n ]\n if numpify:\n speech_inputs = [np.asarray(x) for x in speech_inputs]\n return speech_inputs\n\n\n@require_torch\n@require_torchaudio\nclass Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):\n\n feature_extraction_class = Speech2TextFeatureExtractor if is_speech_available() else None\n\n def setUp(self):\n self.feat_extract_tester = Speech2TextFeatureExtractionTester(self)\n\n def _check_zero_mean_unit_variance(self, input_vector):\n self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3))\n self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3))\n\n def test_call(self):\n # Tests that all call wrap to encode_plus and batch_encode_plus\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n # create three inputs of length 800, 1000, and 1200\n speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]\n np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]\n\n # Test feature size\n input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors=\"np\").input_features\n self.assertTrue(input_features.ndim == 3)\n self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)\n\n # Test not batched input\n encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors=\"np\").input_features\n encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors=\"np\").input_features\n self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))\n\n # Test batched\n encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors=\"np\").input_features\n encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors=\"np\").input_features\n for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):\n self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))\n\n def test_cepstral_mean_and_variance_normalization(self):\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]\n\n paddings = [\"longest\", \"max_length\", \"do_not_pad\"]\n max_lengths = [None, 16, None]\n for max_length, padding in zip(max_lengths, paddings):\n inputs = feature_extractor(\n speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True\n )\n input_features = inputs.input_features\n attention_mask = inputs.attention_mask\n fbank_feat_lengths = [np.sum(x) for x in attention_mask]\n\n self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])\n self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])\n self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])\n\n def test_cepstral_mean_and_variance_normalization_np(self):\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]\n\n paddings = [\"longest\", \"max_length\", \"do_not_pad\"]\n max_lengths = [None, 16, None]\n for max_length, padding in zip(max_lengths, paddings):\n inputs = feature_extractor(\n speech_inputs, max_length=max_length, padding=padding, return_tensors=\"np\", return_attention_mask=True\n )\n input_features = inputs.input_features\n attention_mask = inputs.attention_mask\n fbank_feat_lengths = [np.sum(x) for x in attention_mask]\n\n self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])\n self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)\n self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])\n self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)\n self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])\n\n def test_cepstral_mean_and_variance_normalization_trunc_max_length(self):\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]\n inputs = feature_extractor(\n speech_inputs,\n padding=\"max_length\",\n max_length=4,\n truncation=True,\n return_tensors=\"np\",\n return_attention_mask=True,\n )\n input_features = inputs.input_features\n attention_mask = inputs.attention_mask\n fbank_feat_lengths = np.sum(attention_mask == 1, axis=1)\n\n self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])\n self._check_zero_mean_unit_variance(input_features[1])\n self._check_zero_mean_unit_variance(input_features[2])\n\n def test_cepstral_mean_and_variance_normalization_trunc_longest(self):\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]\n inputs = feature_extractor(\n speech_inputs,\n padding=\"longest\",\n max_length=4,\n truncation=True,\n return_tensors=\"np\",\n return_attention_mask=True,\n )\n input_features = inputs.input_features\n attention_mask = inputs.attention_mask\n fbank_feat_lengths = np.sum(attention_mask == 1, axis=1)\n\n self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])\n self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])\n self._check_zero_mean_unit_variance(input_features[2])\n\n # make sure that if max_length < longest -> then pad to max_length\n self.assertEqual(input_features.shape, (3, 4, 24))\n\n speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]\n inputs = feature_extractor(\n speech_inputs,\n padding=\"longest\",\n max_length=16,\n truncation=True,\n return_tensors=\"np\",\n return_attention_mask=True,\n )\n input_features = inputs.input_features\n attention_mask = inputs.attention_mask\n fbank_feat_lengths = np.sum(attention_mask == 1, axis=1)\n\n self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])\n self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])\n self._check_zero_mean_unit_variance(input_features[2])\n\n # make sure that if max_length < longest -> then pad to max_length\n self.assertEqual(input_features.shape, (3, 6, 24))\n"
] | [
[
"numpy.asarray",
"numpy.sum",
"numpy.mean",
"numpy.allclose",
"numpy.var"
]
] |
mzegar/node-rapids | [
"27c9e2468372df4fae3779d859089b54c8d32c4f"
] | [
"modules/demo/ipc/graph/python/test_data.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport cudf\nimport cugraph\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom .convert_matrix import from_cudf_edgelist\nfrom .graph_components import (\n annotate_nodes,\n annotate_edges\n)\n\n\ndef make_synthetic_dataset(**kwargs):\n kwargs.update(direct=True)\n df = cudf.DataFrame.from_pandas(pd.DataFrame({\n \"src\": [0, 1, 2, 3],\n \"dst\": [1, 2, 3, 0],\n \"colors\": [1, 1, 2, 2],\n \"bool\": [True, False, True, True],\n \"char\": [\"a\", \"b\", \"c\", \"d\"],\n \"str\": [\"a\", \"b\", \"c\", \"d\"],\n \"ustr\": [u\"a\", u\"b\", u\"c\", u\"d\"],\n \"emoji\": [\"😋\", \"😋😋\", \"😋\", \"😋\"],\n \"int\": [0, 1, 2, 3],\n \"num\": [0.5, 1.5, 2.5, 3.5],\n \"date_str\": [\n \"2018-01-01 00:00:00\",\n \"2018-01-02 00:00:00\",\n \"2018-01-03 00:00:00\",\n \"2018-01-05 00:00:00\",\n ],\n \"date\": [\n dt.datetime(2018, 1, 1),\n dt.datetime(2018, 1, 1),\n dt.datetime(2018, 1, 1),\n dt.datetime(2018, 1, 1),\n ],\n \"time\": [\n pd.Timestamp(\"2018-01-05\"),\n pd.Timestamp(\"2018-01-05\"),\n pd.Timestamp(\"2018-01-05\"),\n pd.Timestamp(\"2018-01-05\"),\n ],\n }))\n return make_and_shape_hypergraph(df, **kwargs)\n\n\ndef make_and_shape_hypergraph(df, **kwargs):\n hyper = cugraph.hypergraph(df, **kwargs)\n del hyper[\"events\"]\n del hyper[\"entities\"]\n SOURCE = kwargs.get(\"SOURCE\", \"src\")\n TARGET = kwargs.get(\"TARGET\", \"dst\")\n NODEID = kwargs.get(\"NODEID\", \"node_id\")\n EVENTID = kwargs.get(\"EVENTID\", \"event_id\")\n CATEGORY = kwargs.get(\"CATEGORY\", \"category\")\n nodes = hyper[\"nodes\"][[NODEID, CATEGORY]]\n edges = hyper[\"edges\"][[SOURCE, TARGET]]\n # Create graph\n graph, nodes, edges = from_cudf_edgelist(edges, SOURCE, TARGET)\n nodes[\"name\"] = nodes[\"node\"]\n # Add vis components \n nodes = annotate_nodes(graph, nodes, edges)\n edges = annotate_edges(graph, nodes, edges)\n return graph, nodes, edges\n"
] | [
[
"pandas.Timestamp"
]
] |
QWDs4Z/elgkjeknf25 | [
"15ed98b4b3bb9cf3437b8ed7a8dbdf92e0e56c83"
] | [
"Algorithm_4/Result_Alg_4_part1/Draw_figures_Alg4_1.py"
] | [
"import matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom pylab import cm\r\na,b = np.loadtxt('FindFault_Alg4_firstTest.csv',unpack=True,delimiter=',',skiprows=1);\r\nfig = plt.figure();\r\nplt.scatter(a,b,color= \"green\",marker= \".\", s=0.5);\r\nplt.xlabel('The number of infection-based ciphertexts (N)',fontsize=16)\r\nplt.ylabel('The number of Sboxes',fontsize=16)\r\n\r\n\r\nplt.savefig(\"Alg4_Simulation_part1_1.png\");\r\n\r\nplt.show()\r\n\r\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter"
]
] |
JasonMA606/Student_Performance_ML-DL | [
"ec87d3782371f55e9dd73b57a13b856df78ef7b4"
] | [
"scikit-learn_model/train.py"
] | [
"from __future__ import print_function\n\nimport argparse\nimport os\nimport pandas as pd\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\n\n# TODO: Import any additional libraries you need to define a model\n\n\n# Provided model load function\ndef model_fn(model_dir):\n \"\"\"Load model from the model_dir. This is the same model that is saved\n in the main if statement.\n \"\"\"\n print(\"Loading model.\")\n\n # load using joblib\n model = joblib.load(os.path.join(model_dir, \"model.joblib\"))\n print(\"Done loading model.\")\n\n return model\n\n\n# TODO: Complete the main code\nif __name__ == '__main__':\n\n # All of the model parameters and training parameters are sent as arguments\n # when this script is executed, during a training job\n\n # Here we set up an argument parser to easily access the parameters\n parser = argparse.ArgumentParser()\n\n # SageMaker parameters, like the directories for training data and saving models; set automatically\n # Do not need to change\n parser.add_argument('--output-data-dir', type=str,\n default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str,\n default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str,\n default=os.environ['SM_CHANNEL_TRAIN'])\n\n # TODO: Add any additional arguments that you will need to pass into your model\n parser.add_argument('--n-estimators', type=int, default=100)\n parser.add_argument('--output-dim', type=int, default=2)\n\n # args holds all passed-in arguments\n args = parser.parse_args()\n\n # Read in csv training file\n training_dir = args.data_dir\n train_data = pd.read_csv(os.path.join(\n training_dir, \"train.csv\"), header=None, names=None)\n\n # Labels are in the first column\n train_y = train_data.iloc[:, 0:args.output_dim]\n train_x = train_data.iloc[:, args.output_dim:]\n\n ## --- Your code here --- ##\n\n model = RandomForestClassifier(n_estimators=args.n_estimators)\n\n model.fit(train_x, train_y)\n\n ## --- End of your code --- ##\n\n # Save the trained model\n joblib.dump(model, os.path.join(args.model_dir, \"model.joblib\"))\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier"
]
] |
vir-k01/graph-pde | [
"f7bcf22d3f3c58b30769edfa57b86727154850d2"
] | [
"multipole-graph-neural-operator/neurips2_MGKN.py"
] | [
"import torch\nimport numpy as np\n\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nfrom torch_geometric.data import Data, DataLoader\nimport matplotlib.pyplot as plt\nfrom utilities import *\nfrom torch_geometric.nn import GCNConv, NNConv\n\nfrom timeit import default_timer\nimport scipy.io\n\ntorch.manual_seed(0)\nnp.random.seed(0)\n\n\n\nclass KernelInduced(torch.nn.Module):\n def __init__(self, width, ker_width, depth, ker_in, points, level, in_width=1, out_width=1):\n super(KernelInduced, self).__init__()\n self.depth = depth\n self.width = width\n self.level = level\n self.points = points\n self.points_total = np.sum(points)\n\n # in\n self.fc_in = torch.nn.Linear(in_width, width)\n # self.fc_in_list = []\n # for l in range(level):\n # self.fc_in_list.append(torch.nn.Linear(in_width, width))\n # self.fc_in_list = torch.nn.ModuleList(self.fc_in_list)\n\n # K12 K23 K34 ...\n self.conv_down_list = []\n for l in range(1, level):\n ker_width_l = ker_width // (2 ** l)\n kernel_l = DenseNet([ker_in, ker_width_l, width ** 2], torch.nn.ReLU)\n self.conv_down_list.append(NNConv(width, width, kernel_l, aggr='mean', root_weight=False, bias=False))\n self.conv_down_list = torch.nn.ModuleList(self.conv_down_list)\n\n # K11 K22 K33\n self.conv_list = []\n for l in range(level):\n ker_width_l = ker_width // (2 ** l)\n kernel_l = DenseNet([ker_in, ker_width_l, ker_width_l, width ** 2], torch.nn.ReLU)\n self.conv_list.append(NNConv(width, width, kernel_l, aggr='mean', root_weight=False, bias=False))\n self.conv_list = torch.nn.ModuleList(self.conv_list)\n\n # K21 K32 K43\n self.conv_up_list = []\n for l in range(1, level):\n ker_width_l = ker_width // (2 ** l)\n kernel_l = DenseNet([ker_in, ker_width_l, width ** 2], torch.nn.ReLU)\n self.conv_up_list.append(NNConv(width, width, kernel_l, aggr='mean', root_weight=False, bias=False))\n self.conv_up_list = torch.nn.ModuleList(self.conv_up_list)\n\n # out\n self.fc_out1 = torch.nn.Linear(width, ker_width)\n self.fc_out2 = torch.nn.Linear(ker_width, 1)\n\n\n def forward(self, data):\n edge_index_down, edge_attr_down, range_down = data.edge_index_down, data.edge_attr_down, data.edge_index_down_range\n edge_index_mid, edge_attr_mid, range_mid = data.edge_index_mid, data.edge_attr_mid, data.edge_index_range\n edge_index_up, edge_attr_up, range_up = data.edge_index_up, data.edge_attr_up, data.edge_index_up_range\n\n x = self.fc_in(data.x)\n\n for t in range(self.depth):\n # if single graph\n l = 0\n x = x + self.conv_list[l](x, edge_index_mid[:, range_mid[l, 0]:range_mid[l, 1]],\n edge_attr_mid[range_mid[l, 0]:range_mid[l, 1], :])\n x = F.relu(x)\n # #downward\n # for l in range(self.level-1):\n # x = x + self.conv_down_list[l](x, edge_index_down[:,range_down[l,0]:range_down[l,1]], edge_attr_down[range_down[l,0]:range_down[l,1],:])\n # x = F.relu(x)\n #\n # #upward\n # for l in reversed(range(self.level)):\n # x = x + self.conv_list[l](x, edge_index_mid[:,range_mid[l,0]:range_mid[l,1]], edge_attr_mid[range_mid[l,0]:range_mid[l,1],:])\n # x = F.relu(x)\n # if l > 0:\n # x = x + self.conv_up_list[l-1](x, edge_index_up[:,range_up[l-1,0]:range_up[l-1,1]], edge_attr_up[range_up[l-1,0]:range_up[l-1,1],:])\n # x = F.relu(x)\n\n\n x = F.relu(self.fc_out1(x[:self.points[0]]))\n x = self.fc_out2(x)\n return x\n\n\n\nTRAIN_PATH = 'data/piececonst_r241_N1024_smooth1.mat'\nTEST_PATH = 'data/piececonst_r241_N1024_smooth2.mat'\n\n\nr = 1\ns = int(((241 - 1)/r) + 1)\nn = s**2\nk = 1\n\n# this is too large\n# m = [6400, 1600, 400, 100, 25]\n# radius_inner = [0.5/16, 0.5/8, 0.5/4, 0.5/2, 0.5]\n# radius_inter = [0.5/16 * 1.41, 0.5/8* 1.41, 0.5/4* 1.41, 0.5/2* 1.41]\n\nfor case in range(1):\n\n # this is done in experiment 1\n # if case == 0:\n # m = [1600, 400, 100, 25]\n # radius_inner = [ 0.5/8, 0.5/4, 0.5/2, 0.5]\n # radius_inter = [0.5/8* 1.41, 0.5/4* 1.41, 0.5/2* 1.41]\n\n if case == 1:\n m = [1600, 400, 100]\n radius_inner = [ 0.5/8, 0.5/4, 0.5/2]\n radius_inter = [0.5/8* 1.41, 0.5/4* 1.41]\n\n # if case == 0:\n # m = [1600, 400]\n # radius_inner = [ 0.5/8, 0.5/4]\n # radius_inter = [0.5/8* 1.41]\n\n if case == 0:\n m = [25, 25]\n radius_inner = [ 0.5, 0.5/4]\n radius_inter = [0.5/8* 1.41]\n\n level = len(m)\n print('resolution', s)\n\n ntrain = 100\n ntest = 100\n\n # don't change this\n batch_size = 1\n batch_size2 = 1\n\n width = 64\n ker_width = 256\n depth = 4\n edge_features = 6\n node_features = 6\n\n epochs = 200\n learning_rate = 0.1 / ntrain\n scheduler_step = 10\n scheduler_gamma = 0.8\n\n\n\n path = 'neurips1_multigraph_s'+str(s)+'_ntrain'+str(ntrain)+'_kerwidth'+str(ker_width) + '_lenm1' #+ str(len(m))\n path_model = 'model/' + path\n path_train_err = 'results/' + path + 'train.txt'\n path_test_err = 'results/' + path + 'test.txt'\n path_runtime = 'results/' + path + 'time.txt'\n path_image = 'results/' + path\n\n runtime = np.zeros(2,)\n\n t1 = default_timer()\n\n\n reader = MatReader(TRAIN_PATH)\n train_a = reader.read_field('coeff')[:ntrain,::r,::r].reshape(ntrain,-1)\n train_a_smooth = reader.read_field('Kcoeff')[:ntrain,::r,::r].reshape(ntrain,-1)\n train_a_gradx = reader.read_field('Kcoeff_x')[:ntrain,::r,::r].reshape(ntrain,-1)\n train_a_grady = reader.read_field('Kcoeff_y')[:ntrain,::r,::r].reshape(ntrain,-1)\n train_u = reader.read_field('sol')[:ntrain,::r,::r].reshape(ntrain,-1)\n\n reader.load_file(TEST_PATH)\n test_a = reader.read_field('coeff')[:ntest,::r,::r].reshape(ntest,-1)\n test_a_smooth = reader.read_field('Kcoeff')[:ntest,::r,::r].reshape(ntest,-1)\n test_a_gradx = reader.read_field('Kcoeff_x')[:ntest,::r,::r].reshape(ntest,-1)\n test_a_grady = reader.read_field('Kcoeff_y')[:ntest,::r,::r].reshape(ntest,-1)\n test_u = reader.read_field('sol')[:ntest,::r,::r].reshape(ntest,-1)\n\n\n a_normalizer = GaussianNormalizer(train_a)\n train_a = a_normalizer.encode(train_a)\n test_a = a_normalizer.encode(test_a)\n as_normalizer = GaussianNormalizer(train_a_smooth)\n train_a_smooth = as_normalizer.encode(train_a_smooth)\n test_a_smooth = as_normalizer.encode(test_a_smooth)\n agx_normalizer = GaussianNormalizer(train_a_gradx)\n train_a_gradx = agx_normalizer.encode(train_a_gradx)\n test_a_gradx = agx_normalizer.encode(test_a_gradx)\n agy_normalizer = GaussianNormalizer(train_a_grady)\n train_a_grady = agy_normalizer.encode(train_a_grady)\n test_a_grady = agy_normalizer.encode(test_a_grady)\n\n u_normalizer = UnitGaussianNormalizer(train_u)\n train_u = u_normalizer.encode(train_u)\n # test_u = y_normalizer.encode(test_u)\n\n meshgenerator = RandomMultiMeshGenerator([[0, 1], [0, 1]], [s, s], level=level, sample_sizes=m)\n data_train = []\n for j in range(ntrain):\n for i in range(k):\n idx, idx_all = meshgenerator.sample()\n grid, grid_all = meshgenerator.get_grid()\n edge_index, edge_index_down, edge_index_up = meshgenerator.ball_connectivity(radius_inner, radius_inter)\n edge_index_range, edge_index_down_range, edge_index_up_range = meshgenerator.get_edge_index_range()\n edge_attr, edge_attr_down, edge_attr_up = meshgenerator.attributes(theta=train_a[j, :])\n x = torch.cat([grid_all, train_a[j, idx_all].reshape(-1, 1),\n train_a_smooth[j, idx_all].reshape(-1, 1),\n train_a_gradx[j, idx_all].reshape(-1, 1),\n train_a_grady[j, idx_all].reshape(-1, 1)\n ], dim=1)\n data_train.append(Data(x=x, y=train_u[j, idx[0]],\n edge_index_mid=edge_index, edge_index_down=edge_index_down,\n edge_index_up=edge_index_up,\n edge_index_range=edge_index_range, edge_index_down_range=edge_index_down_range,\n edge_index_up_range=edge_index_up_range,\n edge_attr_mid=edge_attr, edge_attr_down=edge_attr_down, edge_attr_up=edge_attr_up,\n sample_idx=idx[0]))\n\n print(x.shape)\n print(edge_index_range)\n print(edge_index_down_range)\n print(edge_index_up_range)\n\n print(edge_index.shape, edge_attr.shape)\n print(edge_index_down.shape, edge_attr_down.shape)\n print(edge_index_up.shape, edge_attr_up.shape)\n\n meshgenerator = RandomMultiMeshGenerator([[0, 1], [0, 1]], [s, s], level=level, sample_sizes=m)\n data_test = []\n for j in range(ntest):\n for i in range(k):\n idx, idx_all = meshgenerator.sample()\n grid, grid_all = meshgenerator.get_grid()\n edge_index, edge_index_down, edge_index_up = meshgenerator.ball_connectivity(radius_inner, radius_inter)\n edge_index_range, edge_index_down_range, edge_index_up_range = meshgenerator.get_edge_index_range()\n edge_attr, edge_attr_down, edge_attr_up = meshgenerator.attributes(theta=test_a[j, :])\n x = torch.cat([grid_all, test_a[j, idx_all].reshape(-1, 1),\n test_a_smooth[j, idx_all].reshape(-1, 1),\n test_a_gradx[j, idx_all].reshape(-1, 1),\n test_a_grady[j, idx_all].reshape(-1, 1)\n ], dim=1)\n data_test.append(Data(x=x, y=test_u[j, idx[0]],\n edge_index_mid=edge_index, edge_index_down=edge_index_down,\n edge_index_up=edge_index_up,\n edge_index_range=edge_index_range, edge_index_down_range=edge_index_down_range,\n edge_index_up_range=edge_index_up_range,\n edge_attr_mid=edge_attr, edge_attr_down=edge_attr_down, edge_attr_up=edge_attr_up,\n sample_idx=idx[0]))\n #\n train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(data_test, batch_size=batch_size2, shuffle=False)\n\n t2 = default_timer()\n\n print('preprocessing finished, time used:', t2-t1)\n device = torch.device('cuda')\n\n # print('use pre-train model')\n # model = torch.load('model/multigraph2241_n100')\n\n model = KernelInduced(width=width, ker_width=ker_width, depth=depth, ker_in=edge_features,\n points=m, level=level, in_width=node_features, out_width=1).cuda()\n\n # model = KernelInduced_SUM(width=width, ker_width=ker_width, depth=depth, ker_in=edge_features,\n # points=m, level=level, in_width=node_features, out_width=1).cuda()\n\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=5e-4)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma)\n\n myloss = LpLoss(size_average=False)\n u_normalizer.cuda()\n ttrain = np.zeros((epochs, ))\n ttest = np.zeros((epochs,))\n model.train()\n for ep in range(epochs):\n t1 = default_timer()\n train_mse = 0.0\n train_l2 = 0.0\n for batch in train_loader:\n batch = batch.to(device)\n\n optimizer.zero_grad()\n out = model(batch)\n mse = F.mse_loss(out.view(-1, 1), batch.y.view(-1,1))\n # mse.backward()\n\n l2 = myloss(\n u_normalizer.decode(out.view(batch_size, -1), sample_idx=batch.sample_idx.view(batch_size, -1)),\n u_normalizer.decode(batch.y.view(batch_size, -1), sample_idx=batch.sample_idx.view(batch_size, -1)))\n l2.backward()\n\n optimizer.step()\n train_mse += mse.item()\n train_l2 += l2.item()\n\n scheduler.step()\n t2 = default_timer()\n ttrain[ep] = train_l2 / (ntrain * k)\n\n print(ep, t2 - t1, train_mse / len(train_loader), train_l2 / (ntrain * k))\n\n runtime[0] = t2 - t1\n\n t1 = default_timer()\n\n model.eval()\n test_l2 = 0.0\n with torch.no_grad():\n for batch in test_loader:\n batch = batch.to(device)\n out = model(batch)\n out = u_normalizer.decode(out.view(batch_size2, -1), sample_idx=batch.sample_idx.view(batch_size2, -1))\n test_l2 += myloss(out, batch.y.view(batch_size2, -1)).item()\n # test_l2 += myloss(out.view(batch_size2,-1), y_normalizer.encode(batch.y.view(batch_size2, -1))).item()\n\n ttest[ep] = test_l2 / ntest\n t2 = default_timer()\n print(ep, t2 - t1, test_l2 / ntest)\n\n runtime[1] = t2 - t1\n\n np.savetxt(path_train_err, ttrain)\n np.savetxt(path_test_err, ttest)\n np.savetxt(path_runtime, runtime)\n torch.save(model, path_model)\n\n"
] | [
[
"torch.nn.Linear",
"torch.device",
"torch.optim.lr_scheduler.StepLR",
"numpy.savetxt",
"numpy.zeros",
"torch.nn.ModuleList",
"numpy.random.seed",
"numpy.sum",
"torch.save",
"torch.no_grad",
"torch.manual_seed",
"torch.nn.functional.relu"
]
] |
ypraveen/pandas | [
"50ebb24880d9d516a6dacf9a28117289fb9eae97"
] | [
"pandas/core/arrays/datetimelike.py"
] | [
"from datetime import datetime, timedelta\nimport operator\nfrom typing import Any, Sequence, Type, Union, cast\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib\nfrom pandas._libs.tslibs.c_timestamp import integer_op_not_supported\nfrom pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period\nfrom pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds\nfrom pandas._libs.tslibs.timestamps import RoundTo, round_nsint64\nfrom pandas._typing import DatetimeLikeScalar\nfrom pandas.compat import set_function_name\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning\nfrom pandas.util._decorators import Appender, Substitution\nfrom pandas.util._validators import validate_fillna_kwargs\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_float_dtype,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_string_dtype,\n is_timedelta64_dtype,\n is_unsigned_integer_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.inference import is_array_like\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna\n\nfrom pandas.core import missing, nanops, ops\nfrom pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts\nfrom pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin\nimport pandas.core.common as com\nfrom pandas.core.indexers import check_array_indexer\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\nfrom pandas.core.ops.invalid import invalid_comparison, make_invalid_op\n\nfrom pandas.tseries import frequencies\nfrom pandas.tseries.offsets import DateOffset, Tick\n\n\ndef _datetimelike_array_cmp(cls, op):\n \"\"\"\n Wrap comparison operations to convert Timestamp/Timedelta/Period-like to\n boxed scalars/arrays.\n \"\"\"\n opname = f\"__{op.__name__}__\"\n nat_result = opname == \"__ne__\"\n\n @unpack_zerodim_and_defer(opname)\n def wrapper(self, other):\n\n if isinstance(other, str):\n try:\n # GH#18435 strings get a pass from tzawareness compat\n other = self._scalar_from_string(other)\n except ValueError:\n # failed to parse as Timestamp/Timedelta/Period\n return invalid_comparison(self, other, op)\n\n if isinstance(other, self._recognized_scalars) or other is NaT:\n other = self._scalar_type(other)\n self._check_compatible_with(other)\n\n other_i8 = self._unbox_scalar(other)\n\n result = op(self.view(\"i8\"), other_i8)\n if isna(other):\n result.fill(nat_result)\n\n elif not is_list_like(other):\n return invalid_comparison(self, other, op)\n\n elif len(other) != len(self):\n raise ValueError(\"Lengths must match\")\n\n else:\n if isinstance(other, list):\n # TODO: could use pd.Index to do inference?\n other = np.array(other)\n\n if not isinstance(other, (np.ndarray, type(self))):\n return invalid_comparison(self, other, op)\n\n if is_object_dtype(other):\n # We have to use comp_method_OBJECT_ARRAY instead of numpy\n # comparison otherwise it would fail to raise when\n # comparing tz-aware and tz-naive\n with np.errstate(all=\"ignore\"):\n result = ops.comp_method_OBJECT_ARRAY(\n op, self.astype(object), other\n )\n o_mask = isna(other)\n\n elif not type(self)._is_recognized_dtype(other.dtype):\n return invalid_comparison(self, other, op)\n\n else:\n # For PeriodDType this casting is unnecessary\n other = type(self)._from_sequence(other)\n self._check_compatible_with(other)\n\n result = op(self.view(\"i8\"), other.view(\"i8\"))\n o_mask = other._isnan\n\n if o_mask.any():\n result[o_mask] = nat_result\n\n if self._hasnans:\n result[self._isnan] = nat_result\n\n return result\n\n return set_function_name(wrapper, opname, cls)\n\n\nclass AttributesMixin:\n _data: np.ndarray\n\n @classmethod\n def _simple_new(cls, values, **kwargs):\n raise AbstractMethodError(cls)\n\n @property\n def _scalar_type(self) -> Type[DatetimeLikeScalar]:\n \"\"\"The scalar associated with this datelike\n\n * PeriodArray : Period\n * DatetimeArray : Timestamp\n * TimedeltaArray : Timedelta\n \"\"\"\n raise AbstractMethodError(self)\n\n def _scalar_from_string(\n self, value: str\n ) -> Union[Period, Timestamp, Timedelta, NaTType]:\n \"\"\"\n Construct a scalar type from a string.\n\n Parameters\n ----------\n value : str\n\n Returns\n -------\n Period, Timestamp, or Timedelta, or NaT\n Whatever the type of ``self._scalar_type`` is.\n\n Notes\n -----\n This should call ``self._check_compatible_with`` before\n unboxing the result.\n \"\"\"\n raise AbstractMethodError(self)\n\n def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:\n \"\"\"\n Unbox the integer value of a scalar `value`.\n\n Parameters\n ----------\n value : Union[Period, Timestamp, Timedelta]\n\n Returns\n -------\n int\n\n Examples\n --------\n >>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP\n 10000000000\n \"\"\"\n raise AbstractMethodError(self)\n\n def _check_compatible_with(\n self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False\n ) -> None:\n \"\"\"\n Verify that `self` and `other` are compatible.\n\n * DatetimeArray verifies that the timezones (if any) match\n * PeriodArray verifies that the freq matches\n * Timedelta has no verification\n\n In each case, NaT is considered compatible.\n\n Parameters\n ----------\n other\n setitem : bool, default False\n For __setitem__ we may have stricter compatiblity resrictions than\n for comparisons.\n\n Raises\n ------\n Exception\n \"\"\"\n raise AbstractMethodError(self)\n\n\nclass DatelikeOps:\n \"\"\"\n Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.\n \"\"\"\n\n @Substitution(\n URL=\"https://docs.python.org/3/library/datetime.html\"\n \"#strftime-and-strptime-behavior\"\n )\n def strftime(self, date_format):\n \"\"\"\n Convert to Index using specified date_format.\n\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format\n doc <%(URL)s>`__.\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%%Y-%%m-%%d\").\n\n Returns\n -------\n ndarray\n NumPy ndarray of formatted strings.\n\n See Also\n --------\n to_datetime : Convert the given argument to datetime.\n DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.\n DatetimeIndex.round : Round the DatetimeIndex to the specified freq.\n DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.\n\n Examples\n --------\n >>> rng = pd.date_range(pd.Timestamp(\"2018-03-10 09:00\"),\n ... periods=3, freq='s')\n >>> rng.strftime('%%B %%d, %%Y, %%r')\n Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',\n 'March 10, 2018, 09:00:02 AM'],\n dtype='object')\n \"\"\"\n result = self._format_native_types(date_format=date_format, na_rep=np.nan)\n return result.astype(object)\n\n\nclass TimelikeOps:\n \"\"\"\n Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.\n \"\"\"\n\n _round_doc = \"\"\"\n Perform {op} operation on the data to the specified `freq`.\n\n Parameters\n ----------\n freq : str or Offset\n The frequency level to {op} the index to. Must be a fixed\n frequency like 'S' (second) not 'ME' (month end). See\n :ref:`frequency aliases <timeseries.offset_aliases>` for\n a list of possible `freq` values.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n Only relevant for DatetimeIndex:\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n\n .. versionadded:: 0.24.0\n\n nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \\\ndefault 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DatetimeIndex, TimedeltaIndex, or Series\n Index of the same type for a DatetimeIndex or TimedeltaIndex,\n or a Series with the same index for a Series.\n\n Raises\n ------\n ValueError if the `freq` cannot be converted.\n\n Examples\n --------\n **DatetimeIndex**\n\n >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')\n >>> rng\n DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',\n '2018-01-01 12:01:00'],\n dtype='datetime64[ns]', freq='T')\n \"\"\"\n\n _round_example = \"\"\">>> rng.round('H')\n DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',\n '2018-01-01 12:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.round(\"H\")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n _floor_example = \"\"\">>> rng.floor('H')\n DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',\n '2018-01-01 12:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.floor(\"H\")\n 0 2018-01-01 11:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n _ceil_example = \"\"\">>> rng.ceil('H')\n DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',\n '2018-01-01 13:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.ceil(\"H\")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 13:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n def _round(self, freq, mode, ambiguous, nonexistent):\n # round the local times\n if is_datetime64tz_dtype(self):\n # operate on naive timestamps, then convert back to aware\n naive = self.tz_localize(None)\n result = naive._round(freq, mode, ambiguous, nonexistent)\n aware = result.tz_localize(\n self.tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n return aware\n\n values = self.view(\"i8\")\n result = round_nsint64(values, mode, freq)\n result = self._maybe_mask_results(result, fill_value=NaT)\n return self._simple_new(result, dtype=self.dtype)\n\n @Appender((_round_doc + _round_example).format(op=\"round\"))\n def round(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)\n\n @Appender((_round_doc + _floor_example).format(op=\"floor\"))\n def floor(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)\n\n @Appender((_round_doc + _ceil_example).format(op=\"ceil\"))\n def ceil(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)\n\n\nclass DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):\n \"\"\"\n Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray\n\n Assumes that __new__/__init__ defines:\n _data\n _freq\n\n and that the inheriting class has methods:\n _generate_range\n \"\"\"\n\n @property\n def ndim(self) -> int:\n return self._data.ndim\n\n @property\n def shape(self):\n return self._data.shape\n\n def reshape(self, *args, **kwargs):\n # Note: we drop any freq\n data = self._data.reshape(*args, **kwargs)\n return type(self)(data, dtype=self.dtype)\n\n def ravel(self, *args, **kwargs):\n # Note: we drop any freq\n data = self._data.ravel(*args, **kwargs)\n return type(self)(data, dtype=self.dtype)\n\n @property\n def _box_func(self):\n \"\"\"\n box function to get object from internal representation\n \"\"\"\n raise AbstractMethodError(self)\n\n def _box_values(self, values):\n \"\"\"\n apply box func to passed values\n \"\"\"\n return lib.map_infer(values, self._box_func)\n\n def __iter__(self):\n return (self._box_func(v) for v in self.asi8)\n\n @property\n def asi8(self) -> np.ndarray:\n \"\"\"\n Integer representation of the values.\n\n Returns\n -------\n ndarray\n An ndarray with int64 dtype.\n \"\"\"\n # do not cache or you'll create a memory leak\n return self._data.view(\"i8\")\n\n @property\n def _ndarray_values(self):\n return self._data\n\n # ----------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=\"NaT\", date_format=None):\n \"\"\"\n Helper method for astype when converting to strings.\n\n Returns\n -------\n ndarray[str]\n \"\"\"\n raise AbstractMethodError(self)\n\n def _formatter(self, boxed=False):\n # TODO: Remove Datetime & DatetimeTZ formatters.\n return \"'{}'\".format\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n @property\n def nbytes(self):\n return self._data.nbytes\n\n def __array__(self, dtype=None) -> np.ndarray:\n # used for Timedelta/DatetimeArray, overwritten by PeriodArray\n if is_object_dtype(dtype):\n return np.array(list(self), dtype=object)\n return self._data\n\n @property\n def size(self) -> int:\n \"\"\"The number of elements in this array.\"\"\"\n return np.prod(self.shape)\n\n def __len__(self) -> int:\n return len(self._data)\n\n def __getitem__(self, key):\n \"\"\"\n This getitem defers to the underlying array, which by-definition can\n only handle list-likes, slices, and integer scalars\n \"\"\"\n\n is_int = lib.is_integer(key)\n if lib.is_scalar(key) and not is_int:\n raise IndexError(\n \"only integers, slices (`:`), ellipsis (`...`), \"\n \"numpy.newaxis (`None`) and integer or boolean \"\n \"arrays are valid indices\"\n )\n\n getitem = self._data.__getitem__\n if is_int:\n val = getitem(key)\n if lib.is_scalar(val):\n # i.e. self.ndim == 1\n return self._box_func(val)\n return type(self)(val, dtype=self.dtype)\n\n if com.is_bool_indexer(key):\n # first convert to boolean, because check_array_indexer doesn't\n # allow object dtype\n key = np.asarray(key, dtype=bool)\n key = check_array_indexer(self, key)\n if key.all():\n key = slice(0, None, None)\n else:\n key = lib.maybe_booleans_to_slice(key.view(np.uint8))\n elif isinstance(key, list) and len(key) == 1 and isinstance(key[0], slice):\n # see https://github.com/pandas-dev/pandas/issues/31299, need to allow\n # this for now (would otherwise raise in check_array_indexer)\n pass\n else:\n key = check_array_indexer(self, key)\n\n is_period = is_period_dtype(self)\n if is_period:\n freq = self.freq\n else:\n freq = None\n if isinstance(key, slice):\n if self.freq is not None and key.step is not None:\n freq = key.step * self.freq\n else:\n freq = self.freq\n elif key is Ellipsis:\n # GH#21282 indexing with Ellipsis is similar to a full slice,\n # should preserve `freq` attribute\n freq = self.freq\n\n result = getitem(key)\n if result.ndim > 1:\n # To support MPL which performs slicing with 2 dim\n # even though it only has 1 dim by definition\n return result\n\n return self._simple_new(result, dtype=self.dtype, freq=freq)\n\n def __setitem__(\n self,\n key: Union[int, Sequence[int], Sequence[bool], slice],\n value: Union[NaTType, Any, Sequence[Any]],\n ) -> None:\n # I'm fudging the types a bit here. \"Any\" above really depends\n # on type(self). For PeriodArray, it's Period (or stuff coercible\n # to a period in from_sequence). For DatetimeArray, it's Timestamp...\n # I don't know if mypy can do that, possibly with Generics.\n # https://mypy.readthedocs.io/en/latest/generics.html\n if lib.is_scalar(value) and not isna(value):\n value = com.maybe_box_datetimelike(value)\n\n if is_list_like(value):\n is_slice = isinstance(key, slice)\n\n if lib.is_scalar(key):\n raise ValueError(\"setting an array element with a sequence.\")\n\n if not is_slice:\n key = cast(Sequence, key)\n if len(key) != len(value) and not com.is_bool_indexer(key):\n msg = (\n f\"shape mismatch: value array of length '{len(key)}' \"\n \"does not match indexing result of length \"\n f\"'{len(value)}'.\"\n )\n raise ValueError(msg)\n elif not len(key):\n return\n\n value = type(self)._from_sequence(value, dtype=self.dtype)\n self._check_compatible_with(value, setitem=True)\n value = value.asi8\n elif isinstance(value, self._scalar_type):\n self._check_compatible_with(value, setitem=True)\n value = self._unbox_scalar(value)\n elif is_valid_nat_for_dtype(value, self.dtype):\n value = iNaT\n else:\n msg = (\n f\"'value' should be a '{self._scalar_type.__name__}', 'NaT', \"\n f\"or array of those. Got '{type(value).__name__}' instead.\"\n )\n raise TypeError(msg)\n\n key = check_array_indexer(self, key)\n self._data[key] = value\n self._maybe_clear_freq()\n\n def _maybe_clear_freq(self):\n # inplace operations like __setitem__ may invalidate the freq of\n # DatetimeArray and TimedeltaArray\n pass\n\n def astype(self, dtype, copy=True):\n # Some notes on cases we don't have to handle here in the base class:\n # 1. PeriodArray.astype handles period -> period\n # 2. DatetimeArray.astype handles conversion between tz.\n # 3. DatetimeArray.astype handles datetime -> period\n from pandas import Categorical\n\n dtype = pandas_dtype(dtype)\n\n if is_object_dtype(dtype):\n return self._box_values(self.asi8)\n elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):\n return self._format_native_types()\n elif is_integer_dtype(dtype):\n # we deliberately ignore int32 vs. int64 here.\n # See https://github.com/pandas-dev/pandas/issues/24381 for more.\n values = self.asi8\n\n if is_unsigned_integer_dtype(dtype):\n # Again, we ignore int32 vs. int64\n values = values.view(\"uint64\")\n\n if copy:\n values = values.copy()\n return values\n elif (\n is_datetime_or_timedelta_dtype(dtype)\n and not is_dtype_equal(self.dtype, dtype)\n ) or is_float_dtype(dtype):\n # disallow conversion between datetime/timedelta,\n # and conversions for any datetimelike to float\n msg = f\"Cannot cast {type(self).__name__} to dtype {dtype}\"\n raise TypeError(msg)\n elif is_categorical_dtype(dtype):\n return Categorical(self, dtype=dtype)\n else:\n return np.asarray(self, dtype=dtype)\n\n def view(self, dtype=None):\n if dtype is None or dtype is self.dtype:\n return type(self)(self._data, dtype=self.dtype)\n return self._data.view(dtype=dtype)\n\n # ------------------------------------------------------------------\n # ExtensionArray Interface\n\n def unique(self):\n result = unique1d(self.asi8)\n return type(self)(result, dtype=self.dtype)\n\n def _validate_fill_value(self, fill_value):\n \"\"\"\n If a fill_value is passed to `take` convert it to an i8 representation,\n raising ValueError if this is not possible.\n\n Parameters\n ----------\n fill_value : object\n\n Returns\n -------\n fill_value : np.int64\n\n Raises\n ------\n ValueError\n \"\"\"\n if isna(fill_value):\n fill_value = iNaT\n elif isinstance(fill_value, self._recognized_scalars):\n self._check_compatible_with(fill_value)\n fill_value = self._scalar_type(fill_value)\n fill_value = self._unbox_scalar(fill_value)\n else:\n raise ValueError(\n f\"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'.\"\n )\n return fill_value\n\n def take(self, indices, allow_fill=False, fill_value=None):\n if allow_fill:\n fill_value = self._validate_fill_value(fill_value)\n\n new_values = take(\n self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value\n )\n\n return type(self)(new_values, dtype=self.dtype)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n\n # do not pass tz to set because tzlocal cannot be hashed\n dtypes = {str(x.dtype) for x in to_concat}\n if len(dtypes) != 1:\n raise ValueError(\"to_concat must have the same dtype (tz)\", dtypes)\n\n obj = to_concat[0]\n dtype = obj.dtype\n\n values = np.concatenate([x.asi8 for x in to_concat])\n\n if is_period_dtype(to_concat[0].dtype):\n new_freq = obj.freq\n else:\n # GH 3232: If the concat result is evenly spaced, we can retain the\n # original frequency\n new_freq = None\n to_concat = [x for x in to_concat if len(x)]\n\n if obj.freq is not None and all(x.freq == obj.freq for x in to_concat):\n pairs = zip(to_concat[:-1], to_concat[1:])\n if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):\n new_freq = obj.freq\n\n return cls._simple_new(values, dtype=dtype, freq=new_freq)\n\n def copy(self):\n values = self.asi8.copy()\n return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)\n\n def _values_for_factorize(self):\n return self.asi8, iNaT\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values, dtype=original.dtype)\n\n def _values_for_argsort(self):\n return self._data\n\n # ------------------------------------------------------------------\n # Additional array methods\n # These are not part of the EA API, but we implement them because\n # pandas assumes they're there.\n\n def searchsorted(self, value, side=\"left\", sorter=None):\n \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted array `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n Parameters\n ----------\n value : array_like\n Values to insert into `self`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n indices : array of ints\n Array of insertion points with the same shape as `value`.\n \"\"\"\n if isinstance(value, str):\n try:\n value = self._scalar_from_string(value)\n except ValueError:\n raise TypeError(\"searchsorted requires compatible dtype or scalar\")\n\n elif is_valid_nat_for_dtype(value, self.dtype):\n value = NaT\n\n elif isinstance(value, self._recognized_scalars):\n value = self._scalar_type(value)\n\n elif isinstance(value, np.ndarray):\n if not type(self)._is_recognized_dtype(value):\n raise TypeError(\n \"searchsorted requires compatible dtype or scalar, \"\n f\"not {type(value).__name__}\"\n )\n value = type(self)(value)\n self._check_compatible_with(value)\n\n if not (isinstance(value, (self._scalar_type, type(self))) or (value is NaT)):\n raise TypeError(f\"Unexpected type for 'value': {type(value)}\")\n\n if isinstance(value, type(self)):\n self._check_compatible_with(value)\n value = value.asi8\n else:\n value = self._unbox_scalar(value)\n\n # TODO: Use datetime64 semantics for sorting, xref GH#29844\n return self.asi8.searchsorted(value, side=side, sorter=sorter)\n\n def repeat(self, repeats, *args, **kwargs):\n \"\"\"\n Repeat elements of an array.\n\n See Also\n --------\n numpy.ndarray.repeat\n \"\"\"\n nv.validate_repeat(args, kwargs)\n values = self._data.repeat(repeats)\n return type(self)(values.view(\"i8\"), dtype=self.dtype)\n\n def value_counts(self, dropna=False):\n \"\"\"\n Return a Series containing counts of unique values.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include counts of NaT values.\n\n Returns\n -------\n Series\n \"\"\"\n from pandas import Series, Index\n\n if dropna:\n values = self[~self.isna()]._data\n else:\n values = self._data\n\n cls = type(self)\n\n result = value_counts(values, sort=False, dropna=dropna)\n index = Index(\n cls(result.index.view(\"i8\"), dtype=self.dtype), name=result.index.name\n )\n return Series(result.values, index=index, name=result.name)\n\n def map(self, mapper):\n # TODO(GH-23179): Add ExtensionArray.map\n # Need to figure out if we want ExtensionArray.map first.\n # If so, then we can refactor IndexOpsMixin._map_values to\n # a standalone function and call from here..\n # Else, just rewrite _map_infer_values to do the right thing.\n from pandas import Index\n\n return Index(self).map(mapper).array\n\n # ------------------------------------------------------------------\n # Null Handling\n\n def isna(self):\n return self._isnan\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _isnan(self):\n \"\"\"\n return if each value is nan\n \"\"\"\n return self.asi8 == iNaT\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _hasnans(self):\n \"\"\"\n return if I have any nans; enables various perf speedups\n \"\"\"\n return bool(self._isnan.any())\n\n def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):\n \"\"\"\n Parameters\n ----------\n result : a ndarray\n fill_value : object, default iNaT\n convert : str, dtype or None\n\n Returns\n -------\n result : ndarray with values replace by the fill_value\n\n mask the result if needed, convert to the provided dtype if its not\n None\n\n This is an internal routine.\n \"\"\"\n\n if self._hasnans:\n if convert:\n result = result.astype(convert)\n if fill_value is None:\n fill_value = np.nan\n result[self._isnan] = fill_value\n return result\n\n def fillna(self, value=None, method=None, limit=None):\n # TODO(GH-20300): remove this\n # Just overriding to ensure that we avoid an astype(object).\n # Either 20300 or a `_values_for_fillna` would avoid this duplication.\n if isinstance(value, ABCSeries):\n value = value.array\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\n f\"Length of 'value' does not match. Got ({len(value)}) \"\n f\" expected {len(self)}\"\n )\n value = value[mask]\n\n if mask.any():\n if method is not None:\n if method == \"pad\":\n func = missing.pad_1d\n else:\n func = missing.backfill_1d\n\n values = self._data\n if not is_period_dtype(self):\n # For PeriodArray self._data is i8, which gets copied\n # by `func`. Otherwise we need to make a copy manually\n # to avoid modifying `self` in-place.\n values = values.copy()\n\n new_values = func(values, limit=limit, mask=mask)\n if is_datetime64tz_dtype(self):\n # we need to pass int64 values to the constructor to avoid\n # re-localizing incorrectly\n new_values = new_values.view(\"i8\")\n new_values = type(self)(new_values, dtype=self.dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n # ------------------------------------------------------------------\n # Frequency Properties/Methods\n\n @property\n def freq(self):\n \"\"\"\n Return the frequency object if it is set, otherwise None.\n \"\"\"\n return self._freq\n\n @freq.setter\n def freq(self, value):\n if value is not None:\n value = frequencies.to_offset(value)\n self._validate_frequency(self, value)\n\n self._freq = value\n\n @property\n def freqstr(self):\n \"\"\"\n Return the frequency object as a string if its set, otherwise None.\n \"\"\"\n if self.freq is None:\n return None\n return self.freq.freqstr\n\n @property # NB: override with cache_readonly in immutable subclasses\n def inferred_freq(self):\n \"\"\"\n Tryies to return a string representing a frequency guess,\n generated by infer_freq. Returns None if it can't autodetect the\n frequency.\n \"\"\"\n if self.ndim != 1:\n return None\n try:\n return frequencies.infer_freq(self)\n except ValueError:\n return None\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _resolution(self):\n return frequencies.Resolution.get_reso_from_freq(self.freqstr)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def resolution(self):\n \"\"\"\n Returns day, hour, minute, second, millisecond or microsecond\n \"\"\"\n return frequencies.Resolution.get_str(self._resolution)\n\n @classmethod\n def _validate_frequency(cls, index, freq, **kwargs):\n \"\"\"\n Validate that a frequency is compatible with the values of a given\n Datetime Array/Index or Timedelta Array/Index\n\n Parameters\n ----------\n index : DatetimeIndex or TimedeltaIndex\n The index on which to determine if the given frequency is valid\n freq : DateOffset\n The frequency to validate\n \"\"\"\n if is_period_dtype(cls):\n # Frequency validation is not meaningful for Period Array/Index\n return None\n\n inferred = index.inferred_freq\n if index.size == 0 or inferred == freq.freqstr:\n return None\n\n try:\n on_freq = cls._generate_range(\n start=index[0], end=None, periods=len(index), freq=freq, **kwargs\n )\n if not np.array_equal(index.asi8, on_freq.asi8):\n raise ValueError\n except ValueError as e:\n if \"non-fixed\" in str(e):\n # non-fixed frequencies are not meaningful for timedelta64;\n # we retain that error message\n raise e\n # GH#11587 the main way this is reached is if the `np.array_equal`\n # check above is False. This can also be reached if index[0]\n # is `NaT`, in which case the call to `cls._generate_range` will\n # raise a ValueError, which we re-raise with a more targeted\n # message.\n raise ValueError(\n f\"Inferred frequency {inferred} from passed values \"\n f\"does not conform to passed frequency {freq.freqstr}\"\n )\n\n # monotonicity/uniqueness properties are called via frequencies.infer_freq,\n # see GH#23789\n\n @property\n def _is_monotonic_increasing(self):\n return algos.is_monotonic(self.asi8, timelike=True)[0]\n\n @property\n def _is_monotonic_decreasing(self):\n return algos.is_monotonic(self.asi8, timelike=True)[1]\n\n @property\n def _is_unique(self):\n return len(unique1d(self.asi8)) == len(self)\n\n # ------------------------------------------------------------------\n # Arithmetic Methods\n _create_comparison_method = classmethod(_datetimelike_array_cmp)\n\n # pow is invalid for all three subclasses; TimedeltaArray will override\n # the multiplication and division ops\n __pow__ = make_invalid_op(\"__pow__\")\n __rpow__ = make_invalid_op(\"__rpow__\")\n __mul__ = make_invalid_op(\"__mul__\")\n __rmul__ = make_invalid_op(\"__rmul__\")\n __truediv__ = make_invalid_op(\"__truediv__\")\n __rtruediv__ = make_invalid_op(\"__rtruediv__\")\n __floordiv__ = make_invalid_op(\"__floordiv__\")\n __rfloordiv__ = make_invalid_op(\"__rfloordiv__\")\n __mod__ = make_invalid_op(\"__mod__\")\n __rmod__ = make_invalid_op(\"__rmod__\")\n __divmod__ = make_invalid_op(\"__divmod__\")\n __rdivmod__ = make_invalid_op(\"__rdivmod__\")\n\n def _add_datetimelike_scalar(self, other):\n # Overridden by TimedeltaArray\n raise TypeError(f\"cannot add {type(self).__name__} and {type(other).__name__}\")\n\n _add_datetime_arraylike = _add_datetimelike_scalar\n\n def _sub_datetimelike_scalar(self, other):\n # Overridden by DatetimeArray\n assert other is not NaT\n raise TypeError(f\"cannot subtract a datelike from a {type(self).__name__}\")\n\n _sub_datetime_arraylike = _sub_datetimelike_scalar\n\n def _sub_period(self, other):\n # Overridden by PeriodArray\n raise TypeError(f\"cannot subtract Period from a {type(self).__name__}\")\n\n def _add_offset(self, offset):\n raise AbstractMethodError(self)\n\n def _add_delta(self, other):\n \"\"\"\n Add a timedelta-like, Tick or TimedeltaIndex-like object\n to self, yielding an int64 numpy array\n\n Parameters\n ----------\n delta : {timedelta, np.timedelta64, Tick,\n TimedeltaIndex, ndarray[timedelta64]}\n\n Returns\n -------\n result : ndarray[int64]\n\n Notes\n -----\n The result's name is set outside of _add_delta by the calling\n method (__add__ or __sub__), if necessary (i.e. for Indexes).\n \"\"\"\n if isinstance(other, (Tick, timedelta, np.timedelta64)):\n new_values = self._add_timedeltalike_scalar(other)\n elif is_timedelta64_dtype(other):\n # ndarray[timedelta64] or TimedeltaArray/index\n new_values = self._add_delta_tdi(other)\n\n return new_values\n\n def _add_timedeltalike_scalar(self, other):\n \"\"\"\n Add a delta of a timedeltalike\n return the i8 result view\n \"\"\"\n if isna(other):\n # i.e np.timedelta64(\"NaT\"), not recognized by delta_to_nanoseconds\n new_values = np.empty(self.shape, dtype=\"i8\")\n new_values[:] = iNaT\n return new_values\n\n inc = delta_to_nanoseconds(other)\n new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view(\n \"i8\"\n )\n new_values = self._maybe_mask_results(new_values)\n return new_values.view(\"i8\")\n\n def _add_delta_tdi(self, other):\n \"\"\"\n Add a delta of a TimedeltaIndex\n return the i8 result view\n \"\"\"\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n # ndarray[timedelta64]; wrap in TimedeltaIndex for op\n from pandas.core.arrays import TimedeltaArray\n\n other = TimedeltaArray._from_sequence(other)\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n new_values = checked_add_with_arr(\n self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan\n )\n if self._hasnans or other._hasnans:\n mask = (self._isnan) | (other._isnan)\n new_values[mask] = iNaT\n return new_values.view(\"i8\")\n\n def _add_nat(self):\n \"\"\"\n Add pd.NaT to self\n \"\"\"\n if is_period_dtype(self):\n raise TypeError(\n f\"Cannot add {type(self).__name__} and {type(NaT).__name__}\"\n )\n\n # GH#19124 pd.NaT is treated like a timedelta for both timedelta\n # and datetime dtypes\n result = np.zeros(self.shape, dtype=np.int64)\n result.fill(iNaT)\n return type(self)(result, dtype=self.dtype, freq=None)\n\n def _sub_nat(self):\n \"\"\"\n Subtract pd.NaT from self\n \"\"\"\n # GH#19124 Timedelta - datetime is not in general well-defined.\n # We make an exception for pd.NaT, which in this case quacks\n # like a timedelta.\n # For datetime64 dtypes by convention we treat NaT as a datetime, so\n # this subtraction returns a timedelta64 dtype.\n # For period dtype, timedelta64 is a close-enough return dtype.\n result = np.zeros(self.shape, dtype=np.int64)\n result.fill(iNaT)\n return result.view(\"timedelta64[ns]\")\n\n def _sub_period_array(self, other):\n \"\"\"\n Subtract a Period Array/Index from self. This is only valid if self\n is itself a Period Array/Index, raises otherwise. Both objects must\n have the same frequency.\n\n Parameters\n ----------\n other : PeriodIndex or PeriodArray\n\n Returns\n -------\n result : np.ndarray[object]\n Array of DateOffset objects; nulls represented by NaT.\n \"\"\"\n if not is_period_dtype(self):\n raise TypeError(\n f\"cannot subtract {other.dtype}-dtype from {type(self).__name__}\"\n )\n\n if self.freq != other.freq:\n msg = DIFFERENT_FREQ.format(\n cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr\n )\n raise IncompatibleFrequency(msg)\n\n new_values = checked_add_with_arr(\n self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan\n )\n\n new_values = np.array([self.freq.base * x for x in new_values])\n if self._hasnans or other._hasnans:\n mask = (self._isnan) | (other._isnan)\n new_values[mask] = NaT\n return new_values\n\n def _addsub_object_array(self, other: np.ndarray, op):\n \"\"\"\n Add or subtract array-like of DateOffset objects\n\n Parameters\n ----------\n other : np.ndarray[object]\n op : {operator.add, operator.sub}\n\n Returns\n -------\n result : same class as self\n \"\"\"\n assert op in [operator.add, operator.sub]\n if len(other) == 1:\n return op(self, other[0])\n\n warnings.warn(\n \"Adding/subtracting array of DateOffsets to \"\n f\"{type(self).__name__} not vectorized\",\n PerformanceWarning,\n )\n\n # For EA self.astype('O') returns a numpy array, not an Index\n left = self.astype(\"O\")\n\n res_values = op(left, np.array(other))\n kwargs = {}\n if not is_period_dtype(self):\n kwargs[\"freq\"] = \"infer\"\n try:\n res = type(self)._from_sequence(res_values, **kwargs)\n except ValueError:\n # e.g. we've passed a Timestamp to TimedeltaArray\n res = res_values\n return res\n\n def _time_shift(self, periods, freq=None):\n \"\"\"\n Shift each value by `periods`.\n\n Note this is different from ExtensionArray.shift, which\n shifts the *position* of each element, padding the end with\n missing values.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift by.\n freq : pandas.DateOffset, pandas.Timedelta, or str\n Frequency increment to shift by.\n \"\"\"\n if freq is not None and freq != self.freq:\n if isinstance(freq, str):\n freq = frequencies.to_offset(freq)\n offset = periods * freq\n result = self + offset\n return result\n\n if periods == 0:\n # immutable so OK\n return self.copy()\n\n if self.freq is None:\n raise NullFrequencyError(\"Cannot shift with no freq\")\n\n start = self[0] + periods * self.freq\n end = self[-1] + periods * self.freq\n\n # Note: in the DatetimeTZ case, _generate_range will infer the\n # appropriate timezone from `start` and `end`, so tz does not need\n # to be passed explicitly.\n return self._generate_range(start=start, end=end, periods=None, freq=self.freq)\n\n @unpack_zerodim_and_defer(\"__add__\")\n def __add__(self, other):\n\n # scalar others\n if other is NaT:\n result = self._add_nat()\n elif isinstance(other, (Tick, timedelta, np.timedelta64)):\n result = self._add_delta(other)\n elif isinstance(other, DateOffset):\n # specifically _not_ a Tick\n result = self._add_offset(other)\n elif isinstance(other, (datetime, np.datetime64)):\n result = self._add_datetimelike_scalar(other)\n elif lib.is_integer(other):\n # This check must come after the check for np.timedelta64\n # as is_integer returns True for these\n if not is_period_dtype(self):\n raise integer_op_not_supported(self)\n result = self._time_shift(other)\n\n # array-like others\n elif is_timedelta64_dtype(other):\n # TimedeltaIndex, ndarray[timedelta64]\n result = self._add_delta(other)\n elif is_object_dtype(other):\n # e.g. Array/Index of DateOffset objects\n result = self._addsub_object_array(other, operator.add)\n elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):\n # DatetimeIndex, ndarray[datetime64]\n return self._add_datetime_arraylike(other)\n elif is_integer_dtype(other):\n if not is_period_dtype(self):\n raise integer_op_not_supported(self)\n result = self._addsub_int_array(other, operator.add)\n else:\n # Includes Categorical, other ExtensionArrays\n # For PeriodDtype, if self is a TimedeltaArray and other is a\n # PeriodArray with a timedelta-like (i.e. Tick) freq, this\n # operation is valid. Defer to the PeriodArray implementation.\n # In remaining cases, this will end up raising TypeError.\n return NotImplemented\n\n if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):\n from pandas.core.arrays import TimedeltaArray\n\n return TimedeltaArray(result)\n return result\n\n def __radd__(self, other):\n # alias for __add__\n return self.__add__(other)\n\n @unpack_zerodim_and_defer(\"__sub__\")\n def __sub__(self, other):\n\n # scalar others\n if other is NaT:\n result = self._sub_nat()\n elif isinstance(other, (Tick, timedelta, np.timedelta64)):\n result = self._add_delta(-other)\n elif isinstance(other, DateOffset):\n # specifically _not_ a Tick\n result = self._add_offset(-other)\n elif isinstance(other, (datetime, np.datetime64)):\n result = self._sub_datetimelike_scalar(other)\n elif lib.is_integer(other):\n # This check must come after the check for np.timedelta64\n # as is_integer returns True for these\n if not is_period_dtype(self):\n raise integer_op_not_supported(self)\n result = self._time_shift(-other)\n\n elif isinstance(other, Period):\n result = self._sub_period(other)\n\n # array-like others\n elif is_timedelta64_dtype(other):\n # TimedeltaIndex, ndarray[timedelta64]\n result = self._add_delta(-other)\n elif is_object_dtype(other):\n # e.g. Array/Index of DateOffset objects\n result = self._addsub_object_array(other, operator.sub)\n elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):\n # DatetimeIndex, ndarray[datetime64]\n result = self._sub_datetime_arraylike(other)\n elif is_period_dtype(other):\n # PeriodIndex\n result = self._sub_period_array(other)\n elif is_integer_dtype(other):\n if not is_period_dtype(self):\n raise integer_op_not_supported(self)\n result = self._addsub_int_array(other, operator.sub)\n else:\n # Includes ExtensionArrays, float_dtype\n return NotImplemented\n\n if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):\n from pandas.core.arrays import TimedeltaArray\n\n return TimedeltaArray(result)\n return result\n\n def __rsub__(self, other):\n if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self.dtype):\n # ndarray[datetime64] cannot be subtracted from self, so\n # we need to wrap in DatetimeArray/Index and flip the operation\n if lib.is_scalar(other):\n # i.e. np.datetime64 object\n return Timestamp(other) - self\n if not isinstance(other, DatetimeLikeArrayMixin):\n # Avoid down-casting DatetimeIndex\n from pandas.core.arrays import DatetimeArray\n\n other = DatetimeArray(other)\n return other - self\n elif (\n is_datetime64_any_dtype(self.dtype)\n and hasattr(other, \"dtype\")\n and not is_datetime64_any_dtype(other.dtype)\n ):\n # GH#19959 datetime - datetime is well-defined as timedelta,\n # but any other type - datetime is not well-defined.\n raise TypeError(\n f\"cannot subtract {type(self).__name__} from {type(other).__name__}\"\n )\n elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other):\n # TODO: Can we simplify/generalize these cases at all?\n raise TypeError(f\"cannot subtract {type(self).__name__} from {other.dtype}\")\n elif is_timedelta64_dtype(self.dtype):\n if lib.is_integer(other) or is_integer_dtype(other):\n # need to subtract before negating, since that flips freq\n # -self flips self.freq, messing up results\n return -(self - other)\n\n return (-self) + other\n\n return -(self - other)\n\n def __iadd__(self, other): # type: ignore\n result = self + other\n self[:] = result[:]\n\n if not is_period_dtype(self):\n # restore freq, which is invalidated by setitem\n self._freq = result._freq\n return self\n\n def __isub__(self, other): # type: ignore\n result = self - other\n self[:] = result[:]\n\n if not is_period_dtype(self):\n # restore freq, which is invalidated by setitem\n self._freq = result._freq\n return self\n\n # --------------------------------------------------------------\n # Reductions\n\n def _reduce(self, name, axis=0, skipna=True, **kwargs):\n op = getattr(self, name, None)\n if op:\n return op(skipna=skipna, **kwargs)\n else:\n return super()._reduce(name, skipna, **kwargs)\n\n def min(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Array or minimum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.min\n Index.min : Return the minimum value in an Index.\n Series.min : Return the minimum value in a Series.\n \"\"\"\n nv.validate_min(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())\n if isna(result):\n # Period._from_ordinal does not handle np.nan gracefully\n return NaT\n return self._box_func(result)\n\n def max(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Array or maximum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.max\n Index.max : Return the maximum value in an Index.\n Series.max : Return the maximum value in a Series.\n \"\"\"\n # TODO: skipna is broken with max.\n # See https://github.com/pandas-dev/pandas/issues/24265\n nv.validate_max(args, kwargs)\n nv.validate_minmax_axis(axis)\n\n mask = self.isna()\n if skipna:\n values = self[~mask].asi8\n elif mask.any():\n return NaT\n else:\n values = self.asi8\n\n if not len(values):\n # short-circuit for empty max / min\n return NaT\n\n result = nanops.nanmax(values, skipna=skipna)\n # Don't have to worry about NA `result`, since no NA went in.\n return self._box_func(result)\n\n def mean(self, skipna=True):\n \"\"\"\n Return the mean value of the Array.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n skipna : bool, default True\n Whether to ignore any NaT elements.\n\n Returns\n -------\n scalar\n Timestamp or Timedelta.\n\n See Also\n --------\n numpy.ndarray.mean : Returns the average of array elements along a given axis.\n Series.mean : Return the mean value in a Series.\n\n Notes\n -----\n mean is only defined for Datetime and Timedelta dtypes, not for Period.\n \"\"\"\n if is_period_dtype(self):\n # See discussion in GH#24757\n raise TypeError(\n f\"mean is not implemented for {type(self).__name__} since the \"\n \"meaning is ambiguous. An alternative is \"\n \"obj.to_timestamp(how='start').mean()\"\n )\n\n mask = self.isna()\n if skipna:\n values = self[~mask]\n elif mask.any():\n return NaT\n else:\n values = self\n\n if not len(values):\n # short-circuit for empty max / min\n return NaT\n\n result = nanops.nanmean(values.view(\"i8\"), skipna=skipna)\n # Don't have to worry about NA `result`, since no NA went in.\n return self._box_func(result)\n\n\nDatetimeLikeArrayMixin._add_comparison_ops()\n\n# -------------------------------------------------------------------\n# Shared Constructor Helpers\n\n\ndef validate_periods(periods):\n \"\"\"\n If a `periods` argument is passed to the Datetime/Timedelta Array/Index\n constructor, cast it to an integer.\n\n Parameters\n ----------\n periods : None, float, int\n\n Returns\n -------\n periods : None or int\n\n Raises\n ------\n TypeError\n if periods is None, float, or int\n \"\"\"\n if periods is not None:\n if lib.is_float(periods):\n periods = int(periods)\n elif not lib.is_integer(periods):\n raise TypeError(f\"periods must be a number, got {periods}\")\n return periods\n\n\ndef validate_endpoints(closed):\n \"\"\"\n Check that the `closed` argument is among [None, \"left\", \"right\"]\n\n Parameters\n ----------\n closed : {None, \"left\", \"right\"}\n\n Returns\n -------\n left_closed : bool\n right_closed : bool\n\n Raises\n ------\n ValueError : if argument is not among valid values\n \"\"\"\n left_closed = False\n right_closed = False\n\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == \"left\":\n left_closed = True\n elif closed == \"right\":\n right_closed = True\n else:\n raise ValueError(\"Closed has to be either 'left', 'right' or None\")\n\n return left_closed, right_closed\n\n\ndef validate_inferred_freq(freq, inferred_freq, freq_infer):\n \"\"\"\n If the user passes a freq and another freq is inferred from passed data,\n require that they match.\n\n Parameters\n ----------\n freq : DateOffset or None\n inferred_freq : DateOffset or None\n freq_infer : bool\n\n Returns\n -------\n freq : DateOffset or None\n freq_infer : bool\n\n Notes\n -----\n We assume at this point that `maybe_infer_freq` has been called, so\n `freq` is either a DateOffset object or None.\n \"\"\"\n if inferred_freq is not None:\n if freq is not None and freq != inferred_freq:\n raise ValueError(\n f\"Inferred frequency {inferred_freq} from passed \"\n \"values does not conform to passed frequency \"\n f\"{freq.freqstr}\"\n )\n elif freq is None:\n freq = inferred_freq\n freq_infer = False\n\n return freq, freq_infer\n\n\ndef maybe_infer_freq(freq):\n \"\"\"\n Comparing a DateOffset to the string \"infer\" raises, so we need to\n be careful about comparisons. Make a dummy variable `freq_infer` to\n signify the case where the given freq is \"infer\" and set freq to None\n to avoid comparison trouble later on.\n\n Parameters\n ----------\n freq : {DateOffset, None, str}\n\n Returns\n -------\n freq : {DateOffset, None}\n freq_infer : bool\n \"\"\"\n freq_infer = False\n if not isinstance(freq, DateOffset):\n # if a passed freq is None, don't infer automatically\n if freq != \"infer\":\n freq = frequencies.to_offset(freq)\n else:\n freq_infer = True\n freq = None\n return freq, freq_infer\n"
] | [
[
"pandas.core.dtypes.common.is_string_dtype",
"numpy.array_equal",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.core.algorithms.value_counts",
"pandas.core.ops.common.unpack_zerodim_and_defer",
"pandas._libs.tslibs.timedeltas.delta_to_nanoseconds",
"pandas.core.ops.invalid.invalid_comparison",
"pandas.compat.numpy.function.validate_max",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas._libs.tslibs.period.IncompatibleFrequency",
"pandas._libs.lib.is_float",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas._libs.lib.is_scalar",
"pandas._libs.lib.is_integer",
"pandas.core.dtypes.missing.isna",
"numpy.concatenate",
"pandas.core.algorithms.take",
"numpy.empty",
"pandas._libs.tslibs.timestamps.round_nsint64",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.nanops.nanmax",
"pandas.core.dtypes.inference.is_array_like",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.compat.numpy.function.validate_minmax_axis",
"numpy.prod",
"pandas._libs.tslibs.c_timestamp.integer_op_not_supported",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.compat.numpy.function.validate_repeat",
"pandas.core.dtypes.missing.is_valid_nat_for_dtype",
"pandas.core.arrays.TimedeltaArray",
"pandas.core.indexers.check_array_indexer",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.errors.AbstractMethodError",
"numpy.array",
"pandas.core.arrays.TimedeltaArray._from_sequence",
"numpy.zeros",
"pandas.core.dtypes.common.is_period_dtype",
"pandas._libs.Timestamp",
"pandas.tseries.frequencies.Resolution.get_str",
"pandas.core.algorithms.checked_add_with_arr",
"pandas.core.algorithms.unique1d",
"pandas.errors.NullFrequencyError",
"pandas.core.common.maybe_box_datetimelike",
"pandas.tseries.frequencies.Resolution.get_reso_from_freq",
"pandas.core.dtypes.common.is_list_like",
"pandas.tseries.frequencies.infer_freq",
"pandas.core.ops.invalid.make_invalid_op",
"pandas.compat.numpy.function.validate_min",
"pandas._libs.lib.map_infer",
"pandas._libs.algos.is_monotonic",
"pandas.tseries.frequencies.to_offset",
"pandas.Index",
"numpy.asarray",
"numpy.errstate",
"pandas.core.arrays.DatetimeArray",
"pandas.Categorical",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.Series",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_categorical_dtype"
]
] |
nihilesh/sharpner | [
"bb74a20cebb5de284b50b1b81dc5b4b7fca34b7c"
] | [
"Python/matplotlib/tutorials_python/intermediate/tight_layout_guide.py"
] | [
"\"\"\"\n==================\nTight Layout guide\n==================\n\nHow to use tight-layout to fit plots within your figure cleanly.\n\n*tight_layout* automatically adjusts subplot params so that the\nsubplot(s) fits in to the figure area. This is an experimental\nfeature and may not work for some cases. It only checks the extents\nof ticklabels, axis labels, and titles.\n\nAn alternative to *tight_layout* is :doc:`constrained_layout\n</tutorials/intermediate/constrainedlayout_guide>`.\n\n\nSimple Example\n==============\n\nIn matplotlib, the location of axes (including subplots) are specified in\nnormalized figure coordinates. It can happen that your axis labels or\ntitles (or sometimes even ticklabels) go outside the figure area, and are thus\nclipped.\n\n\"\"\"\n\n# sphinx_gallery_thumbnail_number = 7\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rcParams['savefig.facecolor'] = \"0.8\"\n\n\ndef example_plot(ax, fontsize=12):\n ax.plot([1, 2])\n\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n\nplt.close('all')\nfig, ax = plt.subplots()\nexample_plot(ax, fontsize=24)\n\n###############################################################################\n# To prevent this, the location of axes needs to be adjusted. For\n# subplots, this can be done by adjusting the subplot params\n# (:ref:`howto-subplots-adjust`). Matplotlib v1.1 introduced\n# `.Figure.tight_layout` that does this automatically for you.\n\nfig, ax = plt.subplots()\nexample_plot(ax, fontsize=24)\nplt.tight_layout()\n\n###############################################################################\n# Note that :func:`matplotlib.pyplot.tight_layout` will only adjust the\n# subplot params when it is called. In order to perform this adjustment each\n# time the figure is redrawn, you can call ``fig.set_tight_layout(True)``, or,\n# equivalently, set :rc:`figure.autolayout` to ``True``.\n#\n# When you have multiple subplots, often you see labels of different\n# axes overlapping each other.\n\nplt.close('all')\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\n\n###############################################################################\n# :func:`~matplotlib.pyplot.tight_layout` will also adjust spacing between\n# subplots to minimize the overlaps.\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\nplt.tight_layout()\n\n###############################################################################\n# :func:`~matplotlib.pyplot.tight_layout` can take keyword arguments of\n# *pad*, *w_pad* and *h_pad*. These control the extra padding around the\n# figure border and between subplots. The pads are specified in fraction\n# of fontsize.\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\nplt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n\n###############################################################################\n# :func:`~matplotlib.pyplot.tight_layout` will work even if the sizes of\n# subplots are different as far as their grid specification is\n# compatible. In the example below, *ax1* and *ax2* are subplots of a 2x2\n# grid, while *ax3* is of a 1x2 grid.\n\nplt.close('all')\nfig = plt.figure()\n\nax1 = plt.subplot(221)\nax2 = plt.subplot(223)\nax3 = plt.subplot(122)\n\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\n\nplt.tight_layout()\n\n###############################################################################\n# It works with subplots created with\n# :func:`~matplotlib.pyplot.subplot2grid`. In general, subplots created\n# from the gridspec (:doc:`/tutorials/intermediate/gridspec`) will work.\n\nplt.close('all')\nfig = plt.figure()\n\nax1 = plt.subplot2grid((3, 3), (0, 0))\nax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)\nax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)\nax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\n\nexample_plot(ax1)\nexample_plot(ax2)\nexample_plot(ax3)\nexample_plot(ax4)\n\nplt.tight_layout()\n\n###############################################################################\n# Although not thoroughly tested, it seems to work for subplots with\n# aspect != \"auto\" (e.g., axes with images).\n\narr = np.arange(100).reshape((10, 10))\n\nplt.close('all')\nfig = plt.figure(figsize=(5, 4))\n\nax = plt.subplot(111)\nim = ax.imshow(arr, interpolation=\"none\")\n\nplt.tight_layout()\n\n###############################################################################\n# Caveats\n# =======\n#\n# * `~matplotlib.pyplot.tight_layout` considers all artists on the axes by\n# default. To remove an artist from the layout calculation you can call\n# `.Artist.set_in_layout`.\n#\n# * ``tight_layout`` assumes that the extra space needed for artists is\n# independent of the original location of axes. This is often true, but there\n# are rare cases where it is not.\n#\n# * ``pad=0`` can clip some texts by a few pixels. This may be a bug or\n# a limitation of the current algorithm and it is not clear why it\n# happens. Meanwhile, use of pad larger than 0.3 is recommended.\n#\n# Use with GridSpec\n# =================\n#\n# GridSpec has its own `.GridSpec.tight_layout` method (the pyplot api\n# `.pyplot.tight_layout` also works).\n\nimport matplotlib.gridspec as gridspec\n\nplt.close('all')\nfig = plt.figure()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig)\n\n###############################################################################\n# You may provide an optional *rect* parameter, which specifies the bounding\n# box that the subplots will be fit inside. The coordinates must be in\n# normalized figure coordinates and the default is (0, 0, 1, 1).\n\nfig = plt.figure()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\n###############################################################################\n# For example, this can be used for a figure with multiple gridspecs.\n\nfig = plt.figure()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\ngs2 = gridspec.GridSpec(3, 1)\n\nfor ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\nax.set_xlabel(\"x-label\", fontsize=12)\n\ngs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)\n\n# We may try to match the top and bottom of two grids ::\ntop = min(gs1.top, gs2.top)\nbottom = max(gs1.bottom, gs2.bottom)\n\ngs1.update(top=top, bottom=bottom)\ngs2.update(top=top, bottom=bottom)\nplt.show()\n\n###############################################################################\n# While this should be mostly good enough, adjusting top and bottom may\n# require adjustment of hspace also. To update hspace & vspace, we call\n# `.GridSpec.tight_layout` again with updated rect argument. Note that the\n# rect argument specifies the area including the ticklabels, etc. Thus, we\n# will increase the bottom (which is 0 for the normal case) by the difference\n# between the *bottom* from above and the bottom of each gridspec. Same thing\n# for the top.\n\nfig = plt.gcf()\n\ngs1 = gridspec.GridSpec(2, 1)\nax1 = fig.add_subplot(gs1[0])\nax2 = fig.add_subplot(gs1[1])\n\nexample_plot(ax1)\nexample_plot(ax2)\n\ngs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\ngs2 = gridspec.GridSpec(3, 1)\n\nfor ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\nax.set_xlabel(\"x-label\", fontsize=12)\n\ngs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)\n\ntop = min(gs1.top, gs2.top)\nbottom = max(gs1.bottom, gs2.bottom)\n\ngs1.update(top=top, bottom=bottom)\ngs2.update(top=top, bottom=bottom)\n\ntop = min(gs1.top, gs2.top)\nbottom = max(gs1.bottom, gs2.bottom)\n\ngs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),\n 0.5, 1 - (gs1.top-top)])\ngs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),\n None, 1 - (gs2.top-top)],\n h_pad=0.5)\n\n###############################################################################\n# Legends and Annotations\n# =======================\n#\n# Pre Matplotlib 2.2, legends and annotations were excluded from the bounding\n# box calculations that decide the layout. Subsequently these artists were\n# added to the calculation, but sometimes it is undesirable to include them.\n# For instance in this case it might be good to have the axes shring a bit\n# to make room for the legend:\n\nfig, ax = plt.subplots(figsize=(4, 3))\nlines = ax.plot(range(10), label='A simple plot')\nax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)\nfig.tight_layout()\nplt.show()\n\n###############################################################################\n# However, sometimes this is not desired (quite often when using\n# ``fig.savefig('outname.png', bbox_inches='tight')``). In order to\n# remove the legend from the bounding box calculation, we simply set its\n# bounding ``leg.set_in_layout(False)`` and the legend will be ignored.\n\nfig, ax = plt.subplots(figsize=(4, 3))\nlines = ax.plot(range(10), label='B simple plot')\nleg = ax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)\nleg.set_in_layout(False)\nfig.tight_layout()\nplt.show()\n\n###############################################################################\n# Use with AxesGrid1\n# ==================\n#\n# While limited, :mod:`mpl_toolkits.axes_grid1` is also supported.\n\nfrom mpl_toolkits.axes_grid1 import Grid\n\nplt.close('all')\nfig = plt.figure()\ngrid = Grid(fig, rect=111, nrows_ncols=(2, 2),\n axes_pad=0.25, label_mode='L',\n )\n\nfor ax in grid:\n example_plot(ax)\nax.title.set_visible(False)\n\nplt.tight_layout()\n\n###############################################################################\n# Colorbar\n# ========\n#\n# If you create a colorbar with `.Figure.colorbar`, the created colorbar is\n# drawn in a Subplot as long as the parent axes is also a Subplot, so\n# `.Figure.tight_layout` will work.\n\nplt.close('all')\narr = np.arange(100).reshape((10, 10))\nfig = plt.figure(figsize=(4, 4))\nim = plt.imshow(arr, interpolation=\"none\")\n\nplt.colorbar(im)\n\nplt.tight_layout()\n\n###############################################################################\n# Another option is to use AxesGrid1 toolkit to\n# explicitly create an axes for colorbar.\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nplt.close('all')\narr = np.arange(100).reshape((10, 10))\nfig = plt.figure(figsize=(4, 4))\nim = plt.imshow(arr, interpolation=\"none\")\n\ndivider = make_axes_locatable(plt.gca())\ncax = divider.append_axes(\"right\", \"5%\", pad=\"3%\")\nplt.colorbar(im, cax=cax)\n\nplt.tight_layout()\n"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot"
]
] |
KonradUdoHannes/konstanz-open-data-api | [
"cfd2cc0c3a054600195069655882e7cc450ead61"
] | [
"opencity/fetch_dataset_list.py"
] | [
"import os\nimport sys\nimport time\nimport math\nimport requests\nimport pandas as pd\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\nfrom colorama import init, Fore, Back, Style\n\ninit() # colorama\n\n# from .config import Config as cf\n\n\nclass DataSetUrlFetcher:\n \"\"\"\n\tHandles the fetching and storing of all available datasets\n\t\"\"\"\n def __init__(self, cf, interactive = True):\n self.cf = cf\n self._interactive = interactive\n self.current_list = self.read_curr_packages()\n\n def read_curr_packages(self):\n try:\n data_frame = pd.read_csv(self.cf.CURRENT_PACKAGE_LIST_FILE)\n except Exception as e:\n if self._interactive:\n print(\n f\"{Fore.RED}There is no file with name {self.cf.CURRENT_PACKAGE_LIST_FILENAME} in the directory: {self.cf.PKG_FOLDER}{Style.RESET_ALL}\"\n )\n inp = input(\n \"The file is needed, do you wish to proceed (and let it be created)? [y/N]\\n> \"\n )\n if inp.lower() == \"n\":\n print(f\"{Fore.RED}> EXITING\")\n sys.exit(0)\n return 0\n resp = self.fetch()\n if isinstance(resp, int):\n print(f\"Error: status_code = {resp}\")\n return False\n\n data_frame = self._parse_data(resp)\n self._store(data_frame)\n \n return data_frame\n\n def fetch(self):\n \"\"\"\n\t\tbasic fetch method for the self.cf.CURRENT_PACKAGE_LIST_URL\n\n\t\tPARAMETERS:\n\t\t-----------\n\t\tNone\n\n\t\tRETURNS:\n\t\t-----------\n\t\tJson: current packages (success)\n\t\tInt: Status code (error)\n\t\t\"\"\"\n response = requests.get(self.cf.CURRENT_PACKAGE_LIST_URL) # , header =\n if response.status_code == 200:\n return response.json()\n return response.status_code\n \n def _get_names(self):\n try:\n # os.path.join(self.cf.CWD, 'names.csv')\n names = pd.read_csv(self.cf.GH_NAMES_FILE_URL, sep=\";\")\n except Exception as e:\n print(\n f\"{Fore.RED}An error occured while trying to read the names to id file: {Style.RESET_ALL}\\n {e}\"\n )\n # names.to_csv(self.cf.NAMES_FILENAME, index=False)\n return names\n \n \n \n def _store(self, data_frame: pd.DataFrame) -> bool:\n \"\"\"\n\t\twrites dataframe to file\n\n\t\tPARAMETERS:\n\t\t-----------\n\t\tdata_frame: DataFrame\n\t\t\tthe respective DataFrame to store\n\n\t\tRETURNS:\n\t\t-----------\n\t\tsucess: Boolean\n\t\t\tindicates wether the storing was successfull\n\t\t\"\"\"\n if not isinstance(data_frame, pd.DataFrame):\n print(f\"Expected DataFrame, got {type(data_frame)}\")\n return False\n\n try:\n \n #name_list = self._get_names()\n\n # name_list = pd.read_csv(names_file, sep=';')\n #merged_list = pd.merge(data_frame, name_list, how='left', on='id')\n #if merged_list['name'].isnull().values.any():\n # idx = merged_list.index[merged_list['name'].isnull()].tolist()\n # print(idx)\n data_frame.to_csv(self.cf.CURRENT_PACKAGE_LIST_FILE,\n encoding='utf-8',\n index=False)\n return True\n except Exception as writing_file_error:\n print(writing_file_error)\n return False\n\n def _parse_data(self, data):\n \"\"\"\n\t\tparse data from json into DataFrame\n\n\t\tPARAMETERS:\n\t\t-----------\n\t\tdata: string (json)\n\t\t\tjson string fetched for a resource\n\n\t\tRETURNS:\n\t\t-----------\n\t\t\tDataFrame with all info\n\t\t\"\"\"\n if not \"success\" in data:\n return False\n\n results = data[\"result\"][0]\n \n out = list()\n for item in tqdm(results):\n tags = []\n try:\n for tag_item in item[\"tags\"]:\n tags.append(tag_item[\"name\"])\n out.append({\n \"id\":\n item[\"id\"],\n \"title\":\n item[\"title\"],\n \"source\":\n item[\"url\"],\n \"url\":\n \"https://offenedaten-konstanz.de/api/3/action/package_show?id=\"\n + item[\"id\"],\n \"created\":\n item[\"metadata_created\"],\n \"modified\":\n item[\"metadata_modified\"],\n \"notes\":\n item[\"notes\"],\n #BeautifulSoup(item[\"notes\"], \"lxml\").text,\n \"tags\":\n tags\n })\n except:\n item_name = item[\"name\"]\n print(item[\"name\"] + \" item has not all information needed, hence omitted.\"\n ) # concerns unvollständige data sets on OpenData website\n \n data_frame = pd.DataFrame.from_dict(out)\n name_list = self._get_names()\n\n # name_list = pd.read_csv(names_file, sep=';')\n merged_list = pd.merge(data_frame, name_list, how='left', on='id')\n if merged_list['name'].isnull().values.any():\n idx = merged_list.index[merged_list['name'].isnull()].tolist()\n for i in idx:\n merged_list['name'][i] = merged_list['title'][i]\n \n return merged_list\n\n def update(self):\n \"\"\"\n\t\tupdate method which handles the fetching, parsing\n\t\tand storing of the info\n\n\t\tPARAMETERS:\n\t\t-----------\n\t\tNone\n\n\t\tRETURNS:\n\t\t-----------\n\t\tsuccess: Boolean\n\t\t\twether the operation was successfull\n\t\t\"\"\"\n resp = self.fetch()\n if isinstance(resp, int):\n print(f\"Error: status_code = {resp}\")\n return False\n\n data_frame = self._parse_data(resp)\n # check if names are missing !!!!\n #print(data_frame)\n\n #store_status = self._store(\"st\")\n store_status = self._store(data_frame)\n if not store_status:\n print(\"Error while storing data\")\n return False\n\n return True\n\n\n# dsuf = DataSetUrlFetcher()\n# s = dsuf.update()\n"
] | [
[
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"pandas.merge"
]
] |
riven314/Stylised-Controllable-Image-Captioning | [
"50fee52a02e98603cda0cb4b082950c565b96a0f"
] | [
"src/decoder.py"
] | [
"import os\n\nimport torch\nimport torch.nn as nn\n\nfrom src.dropouts import WeightDropout, InputDropout, RNNDropout, EmbeddingDropout\n\n\nclass Attention(nn.Module):\n \"\"\"\n Attention Network.\n \"\"\"\n\n def __init__(self, encoder_dim, decoder_dim, attention_dim):\n \"\"\"\n :param encoder_dim: feature size of encoded images\n :param decoder_dim: size of decoder's RNN\n :param attention_dim: size of the attention network\n \"\"\"\n super(Attention, self).__init__()\n self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded image\n self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output\n self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmax-ed\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights\n\n def forward(self, encoder_out, decoder_hidden):\n \"\"\"\n Forward propagation.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)\n :param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)\n :return: attention weighted encoding, weights\n \"\"\"\n att1 = self.encoder_att(encoder_out) # (batch_size, num_pixels, attention_dim)\n att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)\n\n att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) # (batch_size, num_pixels)\n alpha = self.softmax(att) # (batch_size, num_pixels)\n attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim=1) # (batch_size, encoder_dim)\n\n return attention_weighted_encoding, alpha\n\n\nclass DecoderWithAttention(nn.Module):\n \"\"\" Decoder without regularization \"\"\"\n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, \n encoder_dim = 2048, style_dim = 128, dropout = 0.5):\n \"\"\"\n :param attention_dim: size of attention network\n :param embed_dim: embedding size\n :param decoder_dim: size of decoder's RNN\n :param vocab_size: size of vocabulary\n :param encoder_dim: feature size of encoded images\n :param dropout: dropout\n \"\"\"\n super(DecoderWithAttention, self).__init__()\n\n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.decoder_dim = decoder_dim\n self.vocab_size = vocab_size\n self.dropout = dropout\n self.length_class_embed_dim = style_dim\n\n self.attention = Attention(encoder_dim, decoder_dim, attention_dim) # attention network\n\n self.dropout = nn.Dropout(p=self.dropout)\n self.decode_step = nn.LSTMCell(embed_dim + encoder_dim + style_dim + style_dim, \n decoder_dim, bias=True) # decoding LSTMCell\n self.init_h = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial hidden state of LSTMCell\n self.init_c = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial cell state of LSTMCell\n self.f_beta = nn.Linear(decoder_dim, encoder_dim) # linear layer to create a sigmoid-activated gate\n self.sigmoid = nn.Sigmoid()\n\n # embedding layers\n self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer\n self.length_class_embedding = nn.Embedding(3, style_dim)\n self.is_emoji_embedding = nn.Embedding(2, style_dim)\n\n self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary\n self.init_weights() # initialize some layers with the uniform distribution\n\n def init_weights(self):\n \"\"\"\n Initializes some parameters with values from the uniform distribution, for easier convergence.\n \"\"\"\n self._init_embedding()\n self._init_fc()\n\n def _init_embedding(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.length_class_embedding.weight.data.uniform_(-0.1, 0.1)\n self.is_emoji_embedding.weight.data.uniform_(-0.1, 0.1)\n\n def _init_fc(self):\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def load_pretrained_embeddings(self, embeddings):\n \"\"\"\n Loads embedding layer with pre-trained embeddings.\n\n :param embeddings: pre-trained embeddings\n \"\"\"\n weight = torch.from_numpy(embeddings).float()\n self.embedding.weight = nn.Parameter(weight)\n self.embedding.weight.requires_grad = False\n\n def fine_tune_embeddings(self, fine_tune=True):\n \"\"\"\n Allow fine-tuning of embedding layer? (Only makes sense to not-allow if using pre-trained embeddings).\n\n :param fine_tune: Allow?\n \"\"\"\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n def init_hidden_state(self, encoder_out):\n \"\"\"\n Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)\n :return: hidden state, cell state\n \"\"\"\n mean_encoder_out = encoder_out.mean(dim = 1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n return h, c\n\n def forward(self, encoder_out, encoded_captions, caption_lengths, length_class, is_emoji):\n \"\"\"\n Forward propagation.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)\n :param encoded_captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)\n :param caption_lengths: caption lengths, a tensor of dimension (batch_size, 1)\n :param length_class: a Long tensor of dim (batch_size, 1)\n :param is_emoji: a Long tensor of dim (batch_size, 1)\n :return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices\n \"\"\"\n\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n\n # Flatten image\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n\n # Sort input data by decreasing lengths; why? apparent below\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n length_class = length_class[sort_ind]\n is_emoji = is_emoji[sort_ind]\n\n # Embedding\n embeddings = self.embedding(encoded_captions) # (batch_size, max_caption_length, embed_dim)\n\n # style embedding\n length_class = length_class.squeeze()\n is_emoji = is_emoji.squeeze()\n len_class_embedding = self.length_class_embedding(length_class)\n is_emoji_embedding = self.is_emoji_embedding(is_emoji)\n\n # Initialize LSTM state\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n\n # We won't decode at the <end> position, since we've finished generating as soon as we generate <end>\n # So, decoding lengths are actual lengths - 1\n decode_lengths = (caption_lengths - 1).tolist()\n\n # Create tensors to hold word predicion scores and alphas\n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(encoder_out.device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(encoder_out.device)\n\n # At each time-step, decode by\n # attention-weighing the encoder's output based on the decoder's previous hidden state output\n # then generate a new word in the decoder with the previous word and the attention weighted encoding\n\n for t in range(max(decode_lengths)):\n \n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n\n # concat with word embedding, image-attention encoding, style embedding\n cat_embeddings = torch.cat([\n embeddings[:batch_size_t, t, :], len_class_embedding[:batch_size_t], \n is_emoji_embedding[:batch_size_t], attention_weighted_encoding\n ], dim=1)\n\n h, c = self.decode_step(cat_embeddings, (h[:batch_size_t], c[:batch_size_t])) # (batch_size_t, decoder_dim)\n preds = self.fc(self.dropout(h)) # (batch_size_t, vocab_size)\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n\n return predictions, encoded_captions, decode_lengths, alphas, sort_ind\n\n\nclass RegularizedDecoderWithAttention(nn.Module):\n \"\"\" Decoder without regularization \"\"\"\n\n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, \n encoder_dim = 2048, style_dim = 128, \n embed_p = 0.1, weight_p = 0.5, input_p = 0.6, output_p = 0.3):\n \"\"\"\n :param attention_dim: size of attention network\n :param embed_dim: embedding size\n :param decoder_dim: size of decoder's RNN\n :param vocab_size: size of vocabulary\n :param encoder_dim: feature size of encoded images\n :param hidden_p: hidden state dropout \n :param embed_p: embedding droppout\n :param weight_p: hidden-to-hidden weight matrix dropout\n \"\"\"\n super(RegularizedDecoderWithAttention, self).__init__()\n\n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.decoder_dim = decoder_dim\n self.vocab_size = vocab_size\n self.length_class_embed_dim = style_dim\n\n self.attention = Attention(encoder_dim, decoder_dim, attention_dim) # attention network\n\n decode_step = nn.LSTMCell(embed_dim + encoder_dim + style_dim + style_dim, decoder_dim, bias=True)\n self.decode_step_dp = WeightDropout(decode_step, weight_p, layer_names = ['weight_hh'])\n self.init_h = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial hidden state of LSTMCell\n self.init_c = nn.Linear(encoder_dim, decoder_dim) # linear layer to find initial cell state of LSTMCell\n self.f_beta = nn.Linear(decoder_dim, encoder_dim) # linear layer to create a sigmoid-activated gate\n self.sigmoid = nn.Sigmoid()\n\n # embedding layers\n self.embedding = nn.Embedding(vocab_size, embed_dim) # embedding layer\n self.embedding_dp = EmbeddingDropout(self.embedding, embed_p)\n self.length_class_embedding = nn.Embedding(3, style_dim)\n self.is_emoji_embedding = nn.Embedding(2, style_dim)\n\n self.fc = nn.Linear(decoder_dim, vocab_size) # linear layer to find scores over vocabulary\n self.init_weights() # initialize some layers with the uniform distribution\n\n # dropout layers\n self.input_dp = RNNDropout(p = input_p)\n self.output_dp = RNNDropout(p = output_p)\n\n\n def init_weights(self):\n \"\"\"\n Initializes some parameters with values from the uniform distribution, for easier convergence.\n \"\"\"\n self._init_embedding()\n self._init_fc()\n\n def _init_embedding(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.length_class_embedding.weight.data.uniform_(-0.1, 0.1)\n self.is_emoji_embedding.weight.data.uniform_(-0.1, 0.1)\n\n def _init_fc(self):\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def load_pretrained_embeddings(self, embeddings):\n \"\"\"\n Loads embedding layer with pre-trained embeddings.\n\n :param embeddings: pre-trained embeddings\n \"\"\"\n weight = torch.from_numpy(embeddings).float()\n self.embedding.weight = nn.Parameter(weight)\n self.embedding.weight.requires_grad = False\n\n def fine_tune_embeddings(self, fine_tune = True):\n \"\"\"\n Allow fine-tuning of embedding layer? (Only makes sense to not-allow if using pre-trained embeddings).\n\n :param fine_tune: Allow?\n \"\"\"\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n def init_hidden_state(self, encoder_out):\n \"\"\"\n Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)\n :return: hidden state, cell state\n \"\"\"\n mean_encoder_out = encoder_out.mean(dim = 1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n return h, c\n\n def forward(self, encoder_out, encoded_captions, caption_lengths, length_class, is_emoji):\n \"\"\"\n Forward propagation.\n\n :param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)\n :param encoded_captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)\n :param caption_lengths: caption lengths, a tensor of dimension (batch_size, 1)\n :param length_class: a Long tensor of dim (batch_size, 1)\n :param is_emoji: a Long tensor of dim (batch_size, 1)\n :return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices\n \"\"\"\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n\n # Flatten image\n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n\n # Sort input data by decreasing lengths; why? apparent below\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim = 0, descending=True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n length_class = length_class[sort_ind]\n is_emoji = is_emoji[sort_ind]\n\n # Embedding\n embeddings = self.embedding_dp(encoded_captions) # (batch_size, max_caption_length, embed_dim)\n\n # style embedding\n length_class = length_class.squeeze()\n is_emoji = is_emoji.squeeze()\n len_class_embedding = self.length_class_embedding(length_class)\n is_emoji_embedding = self.is_emoji_embedding(is_emoji)\n\n # Initialize LSTM state\n h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)\n\n # We won't decode at the <end> position, since we've finished generating as soon as we generate <end>\n # So, decoding lengths are actual lengths - 1\n decode_lengths = (caption_lengths - 1).tolist()\n\n # Create tensors to hold word predicion scores and alphas\n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(encoder_out.device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(encoder_out.device)\n\n # At each time-step, decode by\n # attention-weighing the encoder's output based on the decoder's previous hidden state output\n # then generate a new word in the decoder with the previous word and the attention weighted encoding\n\n for t in range(max(decode_lengths)):\n \n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) # gating scalar, (batch_size_t, encoder_dim)\n attention_weighted_encoding = gate * attention_weighted_encoding\n\n # concat with word embedding, image-attention encoding, style embedding\n \n reset_input_mask = True if t == 0 else False\n style_embeddings = torch.cat([\n embeddings[:batch_size_t, t, :], \n len_class_embedding[:batch_size_t], \n is_emoji_embedding[:batch_size_t]\n ], dim = 1)\n dp_style_embeddings = self.input_dp(style_embeddings, reset_mask = reset_input_mask)\n cat_embeddings = torch.cat([dp_style_embeddings, attention_weighted_encoding], dim = 1)\n \n reset_decoder_mask = True if t == 0 else False\n # (batch_size_t, decoder_dim)\n h, c = self.decode_step_dp(cat_embeddings, \n (h[:batch_size_t], c[:batch_size_t]), \n reset_mask = reset_decoder_mask) \n \n reset_output_mask = True if t == 0 else False\n preds = self.fc(self.output_dp(h, reset_mask = reset_output_mask)) # (batch_size_t, vocab_size)\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n\n return predictions, encoded_captions, decode_lengths, alphas, sort_ind"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.LSTMCell",
"torch.nn.Softmax",
"torch.nn.Sigmoid",
"torch.nn.Parameter",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.nn.Embedding"
]
] |
YeoLab/flotilla | [
"31da64567e59003c2b9c03fc8f4eb27ee62e299c"
] | [
"flotilla/visualize/ipython_interact.py"
] | [
"\"\"\"\nNamed `ipython_interact.py` rather than just `interact.py` to differentiate\nbetween IPython interactive visualizations vs D3 interactive visualizations.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n\nimport os\n# import sys\nimport warnings\n\nimport ipywidgets\nimport matplotlib.pyplot as plt\n\nfrom .network import NetworkerViz\nfrom ..util import natural_sort, link_to_list\n\n\ndefault_classifier = 'ExtraTreesClassifier'\ndefault_regressor = 'ExtraTreesRegressor'\n\n\ndef _print_locals(locals_iteritems):\n print(\"locals:\")\n for k, v in locals_iteritems:\n if k == 'self':\n continue\n print(k, \":\", v)\n\n\ndef get_feature_subsets_options(study, data_types):\n \"\"\"Given a study and list of data types, get the relevant feature\n subsets\n \"\"\"\n feature_subsets = ['custom']\n if 'expression' in data_types:\n try:\n feature_subsets.extend(study.expression.feature_subsets.keys())\n except AttributeError:\n pass\n if 'splicing' in data_types:\n try:\n feature_subsets.extend(study.splicing.feature_subsets.keys())\n except AttributeError:\n pass\n # Cast to \"set\" to get rid of duplicates, then back to list because you\n # can't sort a set, then back to list after sorting because you get\n # an iterator... yeah ....\n feature_subsets = list(natural_sort(list(set(feature_subsets))))\n # Make sure \"variant\" is first because all datasets have that\n # first remove 'variant' if it is there, then add it at the front\n try:\n feature_subsets.pop(feature_subsets.index('variant'))\n except ValueError:\n pass\n feature_subsets.insert(0, 'variant')\n return feature_subsets\n\n\ndef interactive_pca(study,\n data_types=('expression', 'splicing'),\n sample_subsets=None,\n feature_subsets=None,\n color_samples_by=None,\n featurewise=False,\n x_pc=(1, 10),\n y_pc=(1, 10),\n show_point_labels=False,\n list_link='', plot_violins=False,\n scale_by_variance=True,\n savefile='figures/last.pca.pdf'):\n \"\"\"\n interactive_pca\n \"\"\"\n\n # study.plot_study_sample_legend()\n if feature_subsets is None:\n feature_subsets = get_feature_subsets_options(study, data_types)\n if sample_subsets is None:\n sample_subsets = study.default_sample_subsets\n color_samples_by = study.metadata.data.columns.tolist()\n\n def do_interact(data_type='expression',\n sample_subset=study.default_sample_subsets,\n feature_subset=study.default_feature_subsets,\n featurewise=False,\n list_link='',\n x_pc=1,\n y_pc=2,\n plot_violins=False,\n show_point_labels=False,\n color_samples_by=study.metadata.phenotype_col,\n bokeh=False,\n scale_by_variance=True,\n most_variant_features=False,\n std_multiplier=(0, 5.0)):\n _print_locals(locals().iteritems())\n if feature_subset != \"custom\" and list_link != \"\":\n raise ValueError(\n \"Set feature_subset to \\\"custom\\\" if you use list_link\")\n if feature_subset == \"custom\" and list_link == \"\":\n raise ValueError(\"Use a list_link if feature_subset is \\\"custom\\\"\")\n if feature_subset == 'custom':\n feature_subset = link_to_list(list_link)\n elif feature_subset not in study.default_feature_subsets[data_type]:\n warnings.warn(\"This feature_subset ('{}') is not available in \"\n \"this data type ('{}'). Falling back on all \"\n \"features.\".format(feature_subset, data_type))\n return study.plot_pca(sample_subset=sample_subset,\n data_type=data_type,\n featurewise=featurewise,\n x_pc=x_pc,\n y_pc=y_pc,\n show_point_labels=show_point_labels,\n feature_subset=feature_subset,\n plot_violins=plot_violins,\n color_samples_by=color_samples_by,\n bokeh=bokeh,\n std_multiplier=std_multiplier,\n scale_by_variance=scale_by_variance,\n most_variant_features=most_variant_features)\n\n gui = ipywidgets.interact(do_interact,\n data_type=data_types,\n sample_subset=sample_subsets,\n feature_subset=feature_subsets + ['custom'],\n featurewise=featurewise,\n x_pc=x_pc,\n y_pc=y_pc,\n show_point_labels=show_point_labels,\n list_link=list_link, plot_violins=plot_violins,\n color_samples_by=color_samples_by,\n scale_by_variance=scale_by_variance)\n\n def save(w):\n # Make the directory if it's not already there\n filename, extension = os.path.splitext(savefilename_widget.value)\n extension = extension[1:]\n study.maybe_make_directory(savefilename_widget.value)\n gui.widget.result.fig_reduced.savefig(savefilename_widget.value,\n format=extension)\n # add \"violins\" after provided filename, before extension\n violins_file = '{}.{}'.format(\"_\".join([filename, 'violins']),\n extension)\n try:\n gui.widget.result.fig_violins.savefig(\n violins_file, format=extension.lstrip('.'))\n except AttributeError:\n pass\n\n html_widget = ipywidgets.HTML(value=\"<hr>\")\n savefilename_widget = ipywidgets.Text(description=\"file name\",\n value=\"last_pca.pdf\")\n savebutton_widget = ipywidgets.Button(description=\"save figure below\")\n gui.widget.children = list(gui.widget.children) + [html_widget,\n savefilename_widget,\n savebutton_widget]\n savebutton_widget.on_click(save)\n return gui\n\n\ndef interactive_graph(study, data_types=('expression', 'splicing'),\n sample_subsets=None,\n feature_subsets=None,\n featurewise=False,\n cov_std_cut=(0.1, 3),\n degree_cut=(0, 10),\n n_pcs=(2, 100),\n draw_labels=False,\n feature_of_interest=\"RBFOX2\",\n weight_fun=None,\n use_pc_1=True, use_pc_2=True, use_pc_3=True,\n use_pc_4=True):\n\n # TODO not sure why nested functions are required for this\n def do_interact(data_type='expression',\n sample_subset=study.default_sample_subsets,\n feature_subset=study.default_feature_subsets,\n weight_fun=NetworkerViz.weight_funs,\n featurewise=False,\n use_pc_1=True, use_pc_2=True, use_pc_3=True,\n use_pc_4=True, degree_cut=1,\n cov_std_cut=1.8, n_pcs=5,\n feature_of_interest=\"RBFOX2\",\n draw_labels=False):\n _print_locals(locals().iteritems())\n if data_type == 'expression':\n assert (feature_subset in\n study.expression.feature_subsets.keys())\n if data_type == 'splicing':\n assert (feature_subset in\n study.splicing.feature_subsets.keys())\n study.plot_graph(data_type=data_type,\n sample_subset=sample_subset,\n feature_subset=feature_subset,\n featurewise=featurewise, draw_labels=draw_labels,\n degree_cut=degree_cut, cov_std_cut=cov_std_cut,\n n_pcs=n_pcs,\n feature_of_interest=feature_of_interest,\n use_pc_1=use_pc_1, use_pc_2=use_pc_2,\n use_pc_3=use_pc_3,\n use_pc_4=use_pc_4,\n weight_function=weight_fun)\n\n if feature_subsets is None:\n feature_subsets = get_feature_subsets_options(study, data_types)\n if sample_subsets is None:\n sample_subsets = study.default_sample_subsets\n if weight_fun is None:\n weight_fun = NetworkerViz.weight_funs\n gui = ipywidgets.interact(do_interact,\n data_type=data_types,\n sample_subset=sample_subsets,\n feature_subset=feature_subsets,\n featurewise=featurewise,\n cov_std_cut=cov_std_cut,\n degree_cut=degree_cut,\n n_pcs=n_pcs,\n draw_labels=draw_labels,\n weight_fun=weight_fun,\n feature_of_interest=feature_of_interest,\n use_pc_1=use_pc_1, use_pc_2=use_pc_2,\n use_pc_3=use_pc_3, use_pc_4=use_pc_4)\n\n def save(w):\n # Make the directory if it's not already there\n filename, extension = os.path.splitext(savefilename_widget.value)\n extension = extension[1:]\n study.maybe_make_directory(savefilename_widget.value)\n plt.gcf().savefig(savefilename_widget.value,\n format=extension.lstrip('.'))\n\n html_widget = ipywidgets.HTML(value=\"<hr>\")\n savefilename_widget = ipywidgets.Text(description='file name',\n value=\"last_graph.pdf\")\n savebutton_widget = ipywidgets.Button(description=\"save figure below\")\n savebutton_widget.on_click(save)\n gui.widget.children = list(gui.widget.children) + [html_widget,\n savefilename_widget,\n savebutton_widget]\n return gui\n\n\ndef interactive_classifier(study, data_types=('expression', 'splicing'),\n sample_subsets=None,\n feature_subsets=None,\n categorical_variables=None,\n predictor_types=None,\n score_coefficient=(0.1, 20),\n draw_labels=False):\n\n def do_interact(data_type,\n sample_subset,\n feature_subset,\n predictor_type=default_classifier,\n categorical_variable='outlier',\n score_coefficient=2,\n plot_violins=False,\n show_point_labels=False):\n _print_locals(locals().iteritems())\n study.plot_classifier(trait=categorical_variable,\n feature_subset=feature_subset,\n sample_subset=sample_subset,\n predictor_name=predictor_type,\n score_coefficient=score_coefficient,\n data_type=data_type,\n plot_violins=plot_violins,\n show_point_labels=show_point_labels)\n\n if feature_subsets is None:\n feature_subsets = get_feature_subsets_options(study, data_types)\n feature_subsets.insert(0, 'variant')\n if sample_subsets is None:\n sample_subsets = study.default_sample_subsets\n if categorical_variables is None:\n categorical_variables = [i for i in study.default_sample_subsets\n if\n not i.startswith(\"~\") and i != 'all_samples']\n if predictor_types is None:\n predictor_types = \\\n study.predictor_config_manager.predictor_configs.keys()\n # study.plot_study_sample_legend()\n gui = ipywidgets.interact(do_interact,\n data_type=data_types,\n sample_subset=sample_subsets,\n feature_subset=feature_subsets,\n categorical_variable=categorical_variables,\n score_coefficient=score_coefficient,\n draw_labels=draw_labels,\n predictor_type=predictor_types)\n\n def save(w):\n # Make the directory if it's not already there\n filename, extension = os.path.splitext(savefilename_widget.value)\n extension = extension[1:]\n study.maybe_make_directory(savefilename_widget.value)\n gui.widget.result.fig_reduced.savefig(savefilename_widget.value,\n format=extension)\n # add \"violins\" after provided filename, before extension\n violins_file = '{}.{}'.format(\"_\".join([filename, 'violins']),\n extension)\n try:\n gui.widget.result.fig_violins.savefig(\n violins_file, format=extension.lstrip('.'))\n except AttributeError:\n pass\n\n html_widget = ipywidgets.HTML(value=\"<hr>\")\n savefilename_widget = ipywidgets.Text(description='file name',\n value=\"last_classifier.pdf\")\n savebutton_widget = ipywidgets.Button(description=\"save figure below\")\n savebutton_widget.on_click(save)\n gui.widget.children = list(gui.widget.children) + [html_widget,\n savefilename_widget,\n savebutton_widget]\n return gui\n\n\ndef interactive_clustermap(study):\n\n def do_interact(data_type='expression',\n sample_subset=study.default_sample_subsets,\n feature_subset=study.default_feature_subset,\n metric='euclidean',\n method='median',\n list_link='',\n scale_fig_by_data=True,\n fig_width='', fig_height=''):\n _print_locals(locals().iteritems())\n if feature_subset != \"custom\" and list_link != \"\":\n raise ValueError(\n \"set feature_subset to \\\"custom\\\" to use list_link\")\n if feature_subset == \"custom\" and list_link == \"\":\n raise ValueError(\"use a custom list name please\")\n if feature_subset == 'custom':\n feature_subset = list_link\n elif feature_subset not in study.default_feature_subsets[data_type]:\n warnings.warn(\"This feature_subset ('{}') is not available in \"\n \"this data type ('{}'). Falling back on all \"\n \"features.\".format(feature_subset, data_type))\n return study.plot_clustermap(\n sample_subset=sample_subset, feature_subset=feature_subset,\n data_type=data_type, metric=metric, method=method,\n scale_fig_by_data=scale_fig_by_data)\n\n feature_subsets = get_feature_subsets_options(study,\n ['expression', 'splicing'])\n method = ('average', 'weighted', 'single', 'complete', 'ward')\n metric = ('euclidean', 'seuclidean', 'sqeuclidean', 'chebyshev',\n 'cosine', 'cityblock', 'mahalonobis', 'minowski', 'jaccard')\n gui = ipywidgets.interact(do_interact,\n data_type=('expression', 'splicing'),\n sample_subset=study.default_sample_subsets,\n feature_subset=feature_subsets,\n metric=metric,\n method=method)\n\n def save(w):\n filename, extension = os.path.splitext(savefilename_widget.value)\n extension = extension[1:]\n study.maybe_make_directory(savefilename_widget.value)\n gui.widget.result.savefig(savefilename_widget.value,\n format=extension.lstrip('.'))\n\n html_widget = ipywidgets.HTML(value=\"<hr>\")\n savefilename_widget = ipywidgets.Text(description='file name',\n value=\"last_clustermap.pdf\")\n savebutton_widget = ipywidgets.Button(description=\"save figure below\")\n savebutton_widget.on_click(save)\n gui.widget.children = list(gui.widget.children) + [html_widget,\n savefilename_widget,\n savebutton_widget]\n return gui\n\n\ndef interactive_correlations(study):\n\n def do_interact(data_type='expression',\n sample_subset=study.default_sample_subsets,\n feature_subset=study.default_feature_subset,\n metric='euclidean', method='average',\n list_link='',\n scale_fig_by_data=True,\n fig_width='', fig_height='', featurewise=False):\n _print_locals(locals().iteritems())\n if feature_subset != \"custom\" and list_link != \"\":\n raise ValueError(\n \"set feature_subset to \\\"custom\\\" to use list_link\")\n if feature_subset == \"custom\" and list_link == \"\":\n raise ValueError(\"use a custom list name please\")\n if feature_subset == 'custom':\n feature_subset = list_link\n elif feature_subset not in study.default_feature_subsets[data_type]:\n warnings.warn(\"This feature_subset ('{}') is not available in \"\n \"this data type ('{}'). Falling back on all \"\n \"features.\".format(feature_subset, data_type))\n return study.plot_correlations(\n sample_subset=sample_subset, feature_subset=feature_subset,\n data_type=data_type, scale_fig_by_data=scale_fig_by_data,\n method=method, metric=metric, featurewise=featurewise)\n\n feature_subsets = get_feature_subsets_options(study,\n ['expression', 'splicing'])\n method = ('average', 'weighted', 'single', 'complete', 'ward')\n metric = ('euclidean', 'seuclidean', 'sqeuclidean', 'chebyshev',\n 'cosine', 'cityblock', 'mahalonobis', 'minowski', 'jaccard')\n gui = ipywidgets.interact(do_interact,\n data_type=('expression', 'splicing'),\n sample_subset=study.default_sample_subsets,\n feature_subset=feature_subsets,\n metric=metric,\n method=method,\n featurewise=False)\n\n def save(w):\n filename, extension = os.path.splitext(savefilename_widget.value)\n extension = extension[1:]\n study.maybe_make_directory(savefilename_widget.value)\n gui.widget.result.savefig(savefilename_widget.value,\n format=extension.lstrip('.'))\n\n html_widget = ipywidgets.HTML(value=\"<hr>\")\n savefilename_widget = ipywidgets.Text(description='file name',\n value=\"last_correlations.pdf\")\n savebutton_widget = ipywidgets.Button(description=\"save figure below\")\n savebutton_widget.on_click(save)\n gui.widget.children = list(gui.widget.children) + [html_widget,\n savefilename_widget,\n savebutton_widget]\n return gui\n"
] | [
[
"matplotlib.pyplot.gcf"
]
] |
b1nhm1nh/jesse-ctf | [
"98e519ba6a08af5dd8dd5bae09617a6056f3b5e4"
] | [
"tests/test_helpers.py"
] | [
"import os\n\nimport arrow\nimport numpy as np\nimport pytest\n\nimport jesse.helpers as jh\n\n\ndef test_app_currency():\n from jesse.routes import router\n from jesse.enums import exchanges, timeframes\n router.initiate(\n [{'exchange': exchanges.BITFINEX, 'symbol': 'ETH-USD', 'timeframe': timeframes.HOUR_3, 'strategy': 'Test19'}])\n assert jh.app_currency() == 'USD'\n\n\ndef test_app_mode():\n assert jh.app_mode() == 'backtest'\n\n\ndef test_arrow_to_timestamp():\n arrow_time = arrow.get('2015-08-01')\n assert jh.arrow_to_timestamp(arrow_time) == 1438387200000\n\n\ndef test_base_asset():\n assert jh.base_asset('BTC-USDT') == 'BTC'\n assert jh.base_asset('BTC-USD') == 'BTC'\n assert jh.base_asset('DEFI-USDT') == 'DEFI'\n assert jh.base_asset('DEFI-USD') == 'DEFI'\n\n\ndef test_binary_search():\n arr = [0, 11, 22, 33, 44, 54, 55]\n\n assert jh.binary_search(arr, 22) == 2\n assert jh.binary_search(arr, 222) == -1\n\n\ndef test_clean_orderbook_list():\n assert np.array_equal(\n jh.clean_orderbook_list([\n ['10', '11'],\n ['11', '14'],\n ['12', '13'],\n ['13', '133'],\n ['14', '12'],\n ]),\n [\n [10, 11],\n [11, 14],\n [12, 13],\n [13, 133],\n [14, 12],\n ]\n )\n\n\ndef test_color():\n msg_text = 'msg'\n msg_color = 'black'\n assert jh.color(msg_text, msg_color) == '\\x1b[30mmsg\\x1b[0m'\n\n\ndef test_convert_number():\n old_max = 119\n old_min = 40\n new_max = 4.0\n new_min = 0.5\n old_value = 41\n\n assert jh.convert_number(old_max, old_min, new_max, new_min, old_value) == 0.5443037974683544\n\n\ndef test_dashless_symbol():\n assert jh.dashless_symbol('BTC-USD') == 'BTCUSD'\n assert jh.dashless_symbol('BTC-USDT') == 'BTCUSDT'\n assert jh.dashless_symbol('1INCH-USDT') == '1INCHUSDT'\n assert jh.dashless_symbol('SC-USDT') == 'SCUSDT'\n\n # make sure that it works even if it's already dashless\n assert jh.dashless_symbol('BTCUSDT') == 'BTCUSDT'\n\n\ndef test_dashy_symbol():\n assert jh.dashy_symbol('BTCUSD') == 'BTC-USD'\n assert jh.dashy_symbol('BTCUSDT') == 'BTC-USDT'\n assert jh.dashy_symbol('BTC-USDT') == 'BTC-USDT'\n\n\ndef test_date_diff_in_days():\n date_1 = arrow.get('2015-12-23 18:40:48', 'YYYY-MM-DD HH:mm:ss')\n date_2 = arrow.get('2017-11-15 13:18:20', 'YYYY-MM-DD HH:mm:ss')\n diff = jh.date_diff_in_days(date_1, date_2)\n assert diff == 692\n\n\ndef test_date_to_timestamp():\n assert jh.date_to_timestamp('2015-08-01') == 1438387200000\n\n\ndef test_dna_to_hp():\n strategy_hp = [\n {'name': 'hp1', 'type': float, 'min': 0.01, 'max': 1.0, 'default': 0.09},\n {'name': 'hp2', 'type': int, 'min': 1, 'max': 10, 'default': 2},\n ]\n dna = \".6\"\n assert jh.dna_to_hp(strategy_hp, dna) == {'hp1': 0.08518987341772151, 'hp2': 3}\n\n\ndef test_dump_exception():\n # uses database, which is not existing during testing\n pass\n\n\ndef test_estimate_average_price():\n assert jh.estimate_average_price(100, 7200, 0, 0) == 7200\n\n with pytest.raises(TypeError):\n jh.estimate_average_price(100, 7200, 0, None)\n jh.estimate_average_price(100, 7200, None, 0)\n jh.estimate_average_price(100, None, 0, 0)\n jh.estimate_average_price(None, 7200, 0, 0)\n\n\ndef test_estimate_PNL():\n # profit\n assert jh.estimate_PNL(2, 50, 60, 'long') == 20\n assert jh.estimate_PNL(2, 60, 50, 'short') == 20\n\n # loss\n assert jh.estimate_PNL(2, 50, 60, 'short') == -20\n assert jh.estimate_PNL(2, 60, 50, 'long') == -20\n\n # profit with fee\n assert jh.estimate_PNL(1, 10, 20, 'long', 0.002) == 9.94\n # loss with fee\n assert jh.estimate_PNL(1, 10, 20, 'short', 0.002) == -10.06\n\n with pytest.raises(TypeError):\n jh.estimate_PNL(1, 200, 220, 1)\n jh.estimate_PNL(1, 200, 'invalid_input', 'short')\n jh.estimate_PNL(1, 'invalid_input', 220, 'short')\n jh.estimate_PNL('invalid_input', 200, 220, 'short')\n\n\ndef test_estimate_PNL_percentage():\n # profit\n assert jh.estimate_PNL_percentage(1, 200, 220, 'long') == 10\n assert jh.estimate_PNL_percentage(1, 200, 180, 'short') == 10\n\n # loss\n assert jh.estimate_PNL_percentage(1, 200, 180, 'long') == -10\n assert jh.estimate_PNL_percentage(1, 200, 220, 'short') == -10\n\n with pytest.raises(TypeError):\n jh.estimate_PNL_percentage(1, 200, 220, 1)\n jh.estimate_PNL_percentage(1, 200, 'invalid_input', 'short')\n jh.estimate_PNL_percentage(1, 'invalid_input', 220, 'short')\n jh.estimate_PNL_percentage('invalid_input', 200, 220, 'short')\n\n\ndef test_file_exists():\n assert jh.file_exists('tests/test_helpers.py') is True\n\n\ndef test_floor_with_precision():\n assert jh.floor_with_precision(1.123) == 1\n assert jh.floor_with_precision(1.123, 1) == 1.1\n assert jh.floor_with_precision(1.123, 2) == 1.12\n assert jh.floor_with_precision(1.123, 3) == 1.123\n assert jh.floor_with_precision(1.123, 4) == 1.123\n\n\ndef test_format_currency():\n assert jh.format_currency(100_000_000) == '100,000,000'\n assert jh.format_currency(100_000_000.23) == '100,000,000.23'\n\n\ndef test_generate_unique_id():\n assert jh.is_valid_uuid(jh.generate_unique_id()) is True\n assert jh.is_valid_uuid('asdfasdfasdfasfsadfsd') is False\n\n\ndef test_get_candle_source():\n candle = np.array(([1575547200000, 146.51, 147.03, 149.02, 146.51, 64788.46651],\n [1553817660000, 4092.56783507, 4092.5, 4092.56783507, 4092.5, 9.0847059]))\n close = jh.get_candle_source(candle, source_type=\"close\")\n assert close[-1] == 4092.5\n high = jh.get_candle_source(candle, source_type=\"high\")\n assert high[-1] == 4092.56783507\n low = jh.get_candle_source(candle, source_type=\"low\")\n assert low[-1] == 4092.5\n open = jh.get_candle_source(candle, source_type=\"open\")\n assert open[-1] == 4092.56783507\n volume = jh.get_candle_source(candle, source_type=\"volume\")\n assert volume[-1] == 9.0847059\n hl2 = jh.get_candle_source(candle, source_type=\"hl2\")\n assert hl2[-1] == 4092.533917535\n hlc3 = jh.get_candle_source(candle, source_type=\"hlc3\")\n assert hlc3[-1] == 4092.52261169\n ohlc4 = jh.get_candle_source(candle, source_type=\"ohlc4\")\n assert ohlc4[-1] == 4092.533917535\n\n\ndef test_get_config(monkeypatch):\n # assert when config does NOT exist (must return passed default)\n assert jh.get_config('aaaaaaa', 2020) == 2020\n # assert when config does exist\n assert jh.get_config('env.logging.order_submission', 2020) is True\n # assert env is took\n monkeypatch.setenv(\"ENV_DATABASES_POSTGRES_HOST\", \"db\")\n assert jh.get_config('env.databases.postgres_host', 'default') == 'db'\n monkeypatch.delenv(\"ENV_DATABASES_POSTGRES_HOST\")\n # assert env is took with space\n monkeypatch.setenv(\"ENV_EXCHANGES_BINANCE_FUTURES_SETTLEMENT_CURRENCY\", 'BUSD')\n assert jh.get_config('env.exchanges.Binance Futures.settlement_currency', 'USDT') == 'BUSD'\n monkeypatch.delenv(\"ENV_EXCHANGES_BINANCE_FUTURES_SETTLEMENT_CURRENCY\")\n\n\ndef test_get_strategy_class():\n from jesse.strategies import Strategy\n assert issubclass(jh.get_strategy_class(\"Test01\"), Strategy)\n\n\ndef test_insecure_hash():\n assert jh.insecure_hash(\"test\") == \"098f6bcd4621d373cade4e832627b4f6\"\n\n\ndef test_insert_list():\n my_list = [0, 1, 2, 3]\n\n assert jh.insert_list(2, 22, my_list) == [0, 1, 22, 2, 3]\n assert jh.insert_list(0, 22, my_list) == [22, 0, 1, 2, 3]\n assert jh.insert_list(-1, 22, my_list) == [0, 1, 2, 3, 22]\n\n # assert list is untouched\n assert my_list == [0, 1, 2, 3]\n\n\ndef test_is_backtesting():\n assert jh.is_backtesting() is True\n\n\ndef test_is_collecting_data():\n assert jh.is_collecting_data() is False\n\n\ndef test_is_debuggable():\n debug_item = 'order_submission'\n assert jh.is_debuggable(debug_item) is False\n\n\ndef test_is_debugging():\n assert jh.is_debugging() is False\n\n\ndef test_is_importing_candles():\n assert jh.is_importing_candles() is False\n\n\ndef test_is_live():\n assert jh.is_live() is False\n\n\ndef test_is_livetrading():\n assert jh.is_livetrading() is False\n\n\ndef test_is_optimizing():\n assert jh.is_optimizing() is False\n\n\ndef test_is_paper_trading():\n assert jh.is_paper_trading() is False\n\n\ndef test_is_test_driving():\n assert jh.is_test_driving() is False\n\n\ndef test_is_unit_testing():\n assert jh.is_unit_testing() is True\n\n\ndef test_key():\n exchange = \"Exchange\"\n symbol = \"BTC-USD\"\n timeframe = \"6h\"\n assert jh.key(exchange, symbol) == \"Exchange-BTC-USD\"\n assert jh.key(exchange, symbol, timeframe) == \"Exchange-BTC-USD-6h\"\n\n\ndef test_max_timeframe():\n assert jh.max_timeframe(['1m', '3m']) == '3m'\n assert jh.max_timeframe(['3m', '5m']) == '5m'\n assert jh.max_timeframe(['15m', '5m']) == '15m'\n assert jh.max_timeframe(['30m', '15m']) == '30m'\n assert jh.max_timeframe(['30m', '1h']) == '1h'\n assert jh.max_timeframe(['1h', '2h']) == '2h'\n assert jh.max_timeframe(['2h', '3h']) == '3h'\n assert jh.max_timeframe(['4h', '3h']) == '4h'\n assert jh.max_timeframe(['6h', '4h']) == '6h'\n assert jh.max_timeframe(['8h', '4h']) == '8h'\n assert jh.max_timeframe(['6h', '1D']) == '1D'\n\n\ndef test_normalize():\n assert jh.normalize(10, 0, 20) == 0.5\n assert jh.normalize(20, 0, 20) == 1\n assert jh.normalize(0, 0, 20) == 0\n\n\ndef test_now_to_timestamp():\n from jesse.store import store\n assert jh.now_to_timestamp() == store.app.time\n\n\ndef test_np_ffill():\n arr = np.array([0, 1, np.nan, np.nan])\n res = jh.np_ffill(arr)\n expected = np.array([0, 1, 1, 1])\n\n np.equal(res, expected)\n\n\ndef test_np_shift():\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n res = jh.np_shift(arr, -3)\n expected = np.array([4, 5, 6, 7, 8, 9, 0, 0, 0])\n\n np.equal(res, expected)\n\n\ndef test_opposite_side():\n assert jh.opposite_side('buy') == 'sell'\n assert jh.opposite_side('sell') == 'buy'\n\n\ndef test_opposite_type():\n assert jh.opposite_type('long') == 'short'\n assert jh.opposite_type('short') == 'long'\n\n\ndef test_orderbook_insertion_index_search():\n ascending_arr = [\n [10, 232],\n [11, 33232],\n [12, 233],\n [33, 21323],\n [44, 23123],\n [55, 2321],\n [66, 23213]\n ]\n\n assert jh.orderbook_insertion_index_search(ascending_arr, [7, 2]) == (False, 0)\n assert jh.orderbook_insertion_index_search(ascending_arr, [2, 2]) == (False, 0)\n assert jh.orderbook_insertion_index_search(ascending_arr, [32, 2]) == (False, 3)\n assert jh.orderbook_insertion_index_search(ascending_arr, [34, 2]) == (False, 4)\n assert jh.orderbook_insertion_index_search(ascending_arr, [1, 2]) == (False, 0)\n assert jh.orderbook_insertion_index_search(ascending_arr, [66, 2]) == (True, 6)\n assert jh.orderbook_insertion_index_search(ascending_arr, [77, 2]) == (False, 7)\n\n descending_arr = [\n [66, 232],\n [55, 33232],\n [44, 233],\n [33, 21323],\n [2, 23123],\n ]\n\n assert jh.orderbook_insertion_index_search(descending_arr, [77, 2], ascending=False) == (False, 0)\n assert jh.orderbook_insertion_index_search(descending_arr, [2, 2], ascending=False) == (True, 4)\n assert jh.orderbook_insertion_index_search(descending_arr, [65, 2], ascending=False) == (False, 1)\n assert jh.orderbook_insertion_index_search(descending_arr, [1, 2], ascending=False) == (False, 5)\n\n\ndef test_orderbook_trim_price():\n # bids\n assert jh.orderbook_trim_price(101.12, False, .1) == 101.1\n assert jh.orderbook_trim_price(101.1, False, .1) == 101.1\n\n assert jh.orderbook_trim_price(10.12, False, .01) == 10.12\n assert jh.orderbook_trim_price(10.1, False, .01) == 10.1\n assert jh.orderbook_trim_price(10.122, False, .01) == 10.12\n assert jh.orderbook_trim_price(1.1223, False, .001) == 1.122\n\n # asks\n assert jh.orderbook_trim_price(101.12, True, .1) == 101.2\n assert jh.orderbook_trim_price(101.1, True, .1) == 101.1\n assert jh.orderbook_trim_price(10.12, True, .01) == 10.12\n assert jh.orderbook_trim_price(10.122, True, .01) == 10.13\n assert jh.orderbook_trim_price(1.1223, True, .001) == 1.123\n\n\ndef test_prepare_qty():\n assert jh.prepare_qty(10, 'sell') == -10\n assert jh.prepare_qty(-10, 'buy') == 10\n assert jh.prepare_qty(0, 'close') == 0.0\n\n with pytest.raises(ValueError):\n jh.prepare_qty(-10, 'invalid_input')\n\n\ndef test_python_version():\n import sys\n assert jh.python_version() == sys.version_info[:2]\n\n\ndef test_quote_asset():\n assert jh.quote_asset('BTC-USDT') == 'USDT'\n assert jh.quote_asset('DEFI-USDT') == 'USDT'\n assert jh.quote_asset('DEFI-EUR') == 'EUR'\n\n\ndef test_random_str():\n assert len(jh.random_str(10)) == 10\n\n\ndef test_readable_duration():\n assert jh.readable_duration(604312) == \"6 days, 23 hours\"\n\n\ndef test_relative_to_absolute():\n from pathlib import Path\n assert jh.relative_to_absolute(\"tests/test_helpers.py\") == str(Path(__file__).absolute())\n\n\ndef test_round_price_for_live_mode():\n np.testing.assert_equal(\n jh.round_price_for_live_mode(np.array([0.0003209123456, 0.0004209123456]), 7),\n np.array([0.0003209, 0.0004209])\n )\n\n\ndef test_round_qty_for_live_mode():\n np.testing.assert_equal(\n jh.round_qty_for_live_mode(np.array([100.3209123456, 100.4299123456]), 2),\n np.array([100.32, 100.42])\n )\n\n np.testing.assert_equal(\n jh.round_qty_for_live_mode(np.array([0]), 1),\n np.array([0.1])\n )\n\n np.testing.assert_equal(\n jh.round_qty_for_live_mode(np.array([0]), 2),\n np.array([0.01])\n )\n\n np.testing.assert_equal(\n jh.round_qty_for_live_mode(np.array([0]), 3),\n np.array([0.001])\n )\n\n\ndef test_round_decimals_down():\n assert jh.round_decimals_down(100.329, 2) == 100.32\n\n\ndef test_secure_hash():\n assert jh.secure_hash('test') == \"9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\"\n\n\ndef test_should_execute_silently():\n assert jh.should_execute_silently() is True\n\n\ndef test_side_to_type():\n assert jh.side_to_type(\"buy\") == \"long\"\n assert jh.side_to_type(\"sell\") == \"short\"\n\n # make sure title case works as well\n assert jh.side_to_type(\"Buy\") == \"long\"\n assert jh.side_to_type(\"Sell\") == \"short\"\n\n\ndef test_string_after_character():\n assert jh.string_after_character('btcusdt@bookTicker', '@') == 'bookTicker'\n assert jh.string_after_character('9000|24628', '|') == '24628'\n\n\ndef test_style():\n assert jh.style('test', 'bold') == \"\\x1b[1mtest\\x1b[0m\"\n assert jh.style('test', 'u') == \"\\x1b[4mtest\\x1b[0m\"\n\n\ndef test_terminate_app():\n # uses database, which is not existing during testing\n pass\n\n\ndef test_timeframe_to_one_minutes():\n assert jh.timeframe_to_one_minutes('1m') == 1\n assert jh.timeframe_to_one_minutes('3m') == 3\n assert jh.timeframe_to_one_minutes('5m') == 5\n assert jh.timeframe_to_one_minutes('15m') == 15\n assert jh.timeframe_to_one_minutes('30m') == 30\n assert jh.timeframe_to_one_minutes('1h') == 60\n assert jh.timeframe_to_one_minutes('2h') == 60 * 2\n assert jh.timeframe_to_one_minutes('3h') == 60 * 3\n assert jh.timeframe_to_one_minutes('4h') == 60 * 4\n assert jh.timeframe_to_one_minutes('6h') == 60 * 6\n assert jh.timeframe_to_one_minutes('8h') == 60 * 8\n assert jh.timeframe_to_one_minutes('1D') == 60 * 24\n\n\ndef test_timestamp_to_arrow():\n arrow_time = arrow.get('2015-08-01')\n assert jh.timestamp_to_arrow(1438387200000) == arrow_time\n\n\ndef test_timestamp_to_date():\n assert jh.timestamp_to_date(1438387200000) == \"2015-08-01\"\n\n\ndef test_timestamp_to_time():\n assert jh.timestamp_to_time(1558770180000) == '2019-05-25T07:43:00+00:00'\n\n\ndef test_today_to_timestamp():\n assert jh.today_to_timestamp() == arrow.utcnow().floor('day').int_timestamp * 1000\n\n\ndef test_type_to_side():\n assert jh.type_to_side('long') == 'buy'\n assert jh.type_to_side('short') == 'sell'\n\n # validate that if sent any other string, it will raise ValueError\n with pytest.raises(ValueError):\n jh.type_to_side('invalid')\n\n\ndef test_unique_list():\n a = [\n ('Binance', 'BTC', '1m'),\n ('Binance', 'BTC', '5m'),\n ('Binance', 'BTC', '15m'),\n ('Binance', 'BTC', '5m'),\n ('Binance', 'BTC', '1m'),\n ('Binance', 'BTC', '15m'),\n ]\n\n expected = [\n ('Binance', 'BTC', '1m'),\n ('Binance', 'BTC', '5m'),\n ('Binance', 'BTC', '15m'),\n ]\n\n assert jh.unique_list(a) == expected\n\n\ndef test_closing_side():\n assert jh.closing_side('Long') == 'sell'\n assert jh.closing_side('Short') == 'buy'\n\n\ndef test_merge_dicts():\n client = {\n 'extra': {\n 'name': 'Saleh',\n 'new_key': 12\n },\n 'age': 28\n }\n\n server = {\n 'extra': {\n 'name': 'Ocean',\n 'water': 100\n },\n }\n\n expected_result = {'age': 28, 'extra': {'name': 'Ocean', 'water': 100, 'new_key': 12}}\n\n assert expected_result == jh.merge_dicts(client, server)\n\n\ndef test_get_pid():\n assert os.getpid() == jh.get_pid()\n\n\ndef test_convert_to_env_name():\n assert jh.convert_to_env_name('Testnet Binance Futures') == 'TESTNET_BINANCE_FUTURES'\n assert jh.convert_to_env_name('Testnet Binance') == 'TESTNET_BINANCE'\n\n\ndef test_str_or_none():\n assert jh.str_or_none('test') == 'test'\n assert jh.str_or_none(None) is None\n assert jh.str_or_none('') is ''\n assert jh.str_or_none(3009004354) == '3009004354'\n assert jh.str_or_none(b'3009004354') == '3009004354'\n\n\ndef test_float_or_none():\n assert jh.float_or_none(1.23) == 1.23\n assert jh.float_or_none(1) == 1.0\n assert jh.float_or_none(None) is None\n assert jh.float_or_none('') is None\n assert jh.float_or_none(b'1.23') == 1.23\n assert jh.float_or_none('1.23') == 1.23\n\n\ndef test_get_class_name():\n class TestClass:\n pass\n\n assert jh.get_class_name(TestClass) == 'TestClass'\n\n # if string is passed, it will return the string\n assert jh.get_class_name('TestClass') == 'TestClass'\n"
] | [
[
"numpy.equal",
"numpy.array"
]
] |
Treadco/sharp-tick | [
"14cac0c707c4c6322f5b984e74b8ca96e4e332d4"
] | [
"tick_syn.py"
] | [
"#!/usr/bin/python\n# (c) 2017 Treadco software.\n# this defaults to python 2 on my machine\n\nimport numpy as np\nimport sys,os\nfrom PIL import Image\nfrom PIL import ImageChops\nfrom pylab import *\n\nimport kernel\n\ndef normalize( a ):\n# written generally\n# a tuple resolves to an index\n# just need to find the right tuple\n lindx = []\n for i in range(0, a.ndim):\n lindx.append(0)\n div = 1.0/a[tuple(lindx)]\n return a.__mul__(div)\n\ndef generate_psf( a):\n# first get the autocorrelation function\n acf = normalize(np.fft.ifftn( np.absolute( np.fft.fftn(rescale(a,1.0))).__ipow__(2)))\n lindx = []\n for i in range(0, a.ndim):\n lindx.append(0)\n volume = a.size\n acf = rescale(np.real(acf),1.0/volume)\n acf[tuple(lindx)] = 0.0\n return np.real(acf)\n\ndef jacobi_step( a, n):\n# peform n steps of jacobi sharpening on a\n aft = np.fft.fftn(a)\n psf = np.fft.fftn(generate_psf(a))\n b = a.__mul__(1.0) # make a copy\n for i in range(0,n):\n delta = np.real(np.fft.ifftn( np.multiply(aft,psf)))\n# b = np.add( b, np.subtract(a,delta)) \n b = np.subtract(a,delta)\n aft = np.fft.fftn(b)\n return np.real(b)\n\ndef jacobi_step_with_kernel( a, kern, n):\n# peform n steps of jacobi sharpening on a\n# for i in range(0,10):\n# print(i,a[i,0],kern[i,0]);\n# sys.stdout.flush()\n aft = np.fft.fftn(a)\n sys.stdout.flush()\n psf = np.fft.fftn(kern)\n b = a.__mul__(1.0) # make a copy\n for i in range(0,n):\n delta = np.real(np.fft.ifftn( np.multiply(aft,psf)))\n# b = np.add( b, np.subtract(a,delta)) \n b = np.subtract(a,delta)\n aft = np.fft.fftn(b)\n return np.real(b)\n\ndef rescale(a, upper):\n amax = a.max()\n amin = a.min()\n amax -= amin\n return (a.__sub__(amin)).__mul__(upper/amax)\n# b = a.__sub__(amin)\n# c = b.__mul__(upper/amax)\n# return c\n\ndef main():\n try:\n image = Image.open(sys.argv[1])\n except IOError:\n print(\"Could not open the input \\nUsage tick_jpg inputfile.\")\n sys.exit()\n\n r,g,b = image.split()\n rr = np.real(np.array(r))\n gr = np.real(np.array(g))\n br = np.real(np.array(b))\n# too big kern = kernel.gaussian(rr, 30.0)\n# kern = kernel.gaussian(rr, 20.0)\n kern = kernel.gaussian(rr, 10.0)\n kern[0,0] = 0.0\n rp = jacobi_step_with_kernel(rr,kern,5) \n gp = jacobi_step_with_kernel(gr,kern,5) \n bp = jacobi_step_with_kernel(br,kern,5) \n rn = Image.fromarray(np.uint8(rescale(rp,255.0)))\n gn = Image.fromarray(np.uint8(rescale(gp,255.0)))\n bn = Image.fromarray(np.uint8(rescale(bp,255.0)))\n inew = Image.merge(\"RGB\",(rn,gn,bn))\n inew.save('after.jpg')\n ix = ImageChops.subtract(inew, image,0.1)\n ix.save('difference.jpg')\nmain()\n"
] | [
[
"numpy.array",
"numpy.fft.fftn",
"numpy.real",
"numpy.multiply",
"numpy.subtract"
]
] |
201419/Optimizer-PyTorch | [
"5db2164fef8d419d4a1486c923f6835f54f0b091"
] | [
"optimistic.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training import optimizer\nimport tensorflow as tf\n\n\n# Adapted from https://raw.githubusercontent.com/openai/iaf/master/tf_utils/adamax.py\n\nclass OptimisticMirrorDescentOptimizer(optimizer.Optimizer):\n def __init__(self, learning_rate=0.001, use_locking=False, name=\"OMD\"):\n super(OptimisticMirrorDescentOptimizer, self).__init__(use_locking,\n name)\n self._lr = learning_rate\n # Tensor versions of the constructor arguments, created in _prepare().\n self._lr_t = None\n\n def _prepare(self):\n self._lr_t = ops.convert_to_tensor(self._lr, name=\"learning_rate\")\n\n def _create_slots(self, var_list):\n # Create slots for the first and second moments.\n for v in var_list:\n self._zeros_slot(v, \"g\", self._name)\n\n def _apply_dense(self, grad, var):\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\n\n g_t = grad\n g_t_1 = self.get_slot(var, \"g\")\n g_t = g_t_1.assign(g_t)\n\n var_update = state_ops.assign_sub(var,\n 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t\n return control_flow_ops.group(*[var_update, g_t])\n\n def _apply_sparse(self, grad, var):\n raise NotImplementedError(\"Sparse gradient updates are not supported.\")\n\n\nclass OptimisticAdamOptimizer(optimizer.Optimizer):\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999,\n epsilon=1e-8,\n use_locking=False, name=\"Adamirror\"):\n\n super(OptimisticAdamOptimizer, self).__init__(use_locking, name)\n self._lr = learning_rate\n self._beta1 = beta1\n self._beta2 = beta2\n\n # Tensor versions of the constructor arguments, created in _prepare().\n self._lr_t = None\n self._beta1_t = None\n self._beta2_t = None\n\n def _prepare(self):\n self._lr_t = ops.convert_to_tensor(self._lr, name=\"learning_rate\")\n self._beta1_t = ops.convert_to_tensor(self._beta1, name=\"beta1\")\n self._beta2_t = ops.convert_to_tensor(self._beta2, name=\"beta2\")\n\n def _create_slots(self, var_list):\n # Create slots for the first and second moments.\n for v in var_list:\n self._zeros_slot(v, \"m\", self._name)\n self._zeros_slot(v, \"v\", self._name)\n self._zeros_slot(v, \"g\", self._name)\n\n def _apply_dense(self, grad, var):\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\n if var.dtype.base_dtype == tf.float16:\n eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.\n else:\n eps = 1e-8\n\n v = self.get_slot(var, \"v\")\n v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad))\n m = self.get_slot(var, \"m\")\n m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad)\n v_t_hat = tf.div(v_t, 1. - beta2_t)\n m_t_hat = tf.div(m_t, 1. - beta1_t)\n\n g_t = tf.div(m_t_hat, tf.sqrt(v_t_hat) + eps)\n g_t_1 = self.get_slot(var, \"g\")\n g_t = g_t_1.assign(g_t)\n\n var_update = state_ops.assign_sub(var,\n 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t\n return control_flow_ops.group(*[var_update, m_t, v_t, g_t])\n\n def _apply_sparse(self, grad, var):\n raise NotImplementedError(\"Sparse gradient updates are not supported.\")\n\nclass RegularizeGradientDescentOptimizer(optimizer.Optimizer):\n def __init__(self, learning_rate=0.001, lambd=0.5, use_locking=False, name=\"RGD\"):\n super(RegularizeGradientDescentOptimizer, self).__init__(use_locking,\n name)\n self._lr = learning_rate\n self._lambda = lambd\n # Tensor versions of the constructor arguments, created in _prepare().\n self._lr_t = None\n self._lambda_t = None\n\n def _prepare(self):\n self._lr_t = ops.convert_to_tensor(self._lr, name=\"learning_rate\")\n self._lambda_t = ops.convert_to_tensor(self._lambda, name=\"lambda\")\n\n def _apply_dense(self, grad, var):\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\n lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype)\n\n g_t = grad\n var_update = state_ops.assign_sub(var,\n lr_t * (g_t - lambda_t * var) )\n return control_flow_ops.group(*[var_update])\n\n def _apply_sparse(self, grad, var):\n raise NotImplementedError(\"Sparse gradient updates are not supported.\")\n"
] | [
[
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.sqrt",
"tensorflow.div",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.square"
]
] |
long-long-float/py-videocore | [
"f2a0ef174a936f7a6e11a9e24f34fb555acb84c7"
] | [
"tests/test_vpm.py"
] | [
"'Test of VPM read and write'\n\nimport numpy as np\n\nfrom videocore.assembler import qpu\nfrom videocore.driver import Driver\n\n#=================================== 32 bit ===================================\n\n@qpu\ndef horizontal_32bit_read(asm):\n mov(ra0, uniform)\n ldi(rb0, 4*16*16)\n for i in range(4):\n setup_dma_load(Y=16*i, mode='32bit horizontal', nrows=16)\n start_dma_load(ra0)\n iadd(ra0, ra0, rb0)\n wait_dma_load()\n\n setup_vpm_read(Y=16, nrows=16)\n setup_vpm_write(Y=0)\n for i in range(16):\n mov(r0, vpm)\n mov(vpm, r0)\n\n setup_dma_store(mode='32bit horizontal', nrows=16)\n start_dma_store(uniform)\n wait_dma_store()\n exit()\n\ndef test_horizontal_32bit_read():\n with Driver() as drv:\n X = drv.alloc((64, 16), dtype='uint32')\n X[:] = np.arange(64*16).reshape(64, 16).astype('uint32')\n Y = drv.alloc((16, 16), dtype='uint32')\n\n drv.execute(\n n_threads=1,\n program=drv.program(horizontal_32bit_read),\n uniforms=[X.address, Y.address]\n )\n\n assert np.all(X[16:32] == Y)\n\n@qpu\ndef horizontal_32bit_write(asm):\n mov(ra0, uniform)\n ldi(rb0, 4*16*16)\n for i in range(4):\n setup_dma_load(Y=16*i, mode='32bit horizontal', nrows=16)\n start_dma_load(ra0)\n iadd(ra0, ra0, rb0)\n wait_dma_load()\n\n setup_vpm_read(Y=0, nrows=16)\n setup_vpm_write(Y=16)\n for i in range(16):\n mov(r0, vpm)\n mov(vpm, r0)\n\n setup_dma_store(Y=16, mode='32bit horizontal', nrows=16)\n start_dma_store(uniform)\n wait_dma_store()\n exit()\n\ndef test_horizontal_32bit_write():\n with Driver() as drv:\n X = drv.alloc((64, 16), dtype='uint32')\n X[:] = np.arange(64*16).reshape(64, 16).astype('uint32')\n Y = drv.alloc((16, 16), dtype='uint32')\n\n drv.execute(\n n_threads=1,\n program=drv.program(horizontal_32bit_write),\n uniforms=[X.address, Y.address]\n )\n\n\n assert np.all(X[:16] == Y)\n\n@qpu\ndef vertical_32bit_read(asm):\n mov(ra0, uniform)\n ldi(rb0, 4*16*16)\n for i in range(4):\n setup_dma_load(Y=16*i, mode='32bit horizontal', nrows=16)\n start_dma_load(ra0)\n iadd(ra0, ra0, rb0)\n wait_dma_load()\n\n setup_vpm_read(nrows=16, Y=16, X=0, mode='32bit vertical')\n setup_vpm_write(Y=0, X=0, mode='32bit vertical')\n for i in range(16):\n mov(r0, vpm)\n mov(vpm, r0)\n\n setup_dma_store(mode='32bit horizontal', nrows=16)\n start_dma_store(uniform)\n wait_dma_store()\n exit()\n\ndef test_vertical_32bit_read():\n with Driver() as drv:\n X = drv.alloc((64, 16), dtype='uint32')\n X[:] = np.arange(64*16).reshape(64, 16).astype('uint32')\n Y = drv.alloc((16, 16), dtype='uint32')\n\n drv.execute(\n n_threads=1,\n program=drv.program(vertical_32bit_read),\n uniforms=[X.address, Y.address]\n )\n\n assert np.all(X[16:32] == Y)\n\n@qpu\ndef vertical_32bit_write(asm):\n mov(ra0, uniform)\n ldi(rb0, 4*16*16)\n for i in range(4):\n setup_dma_load(Y=16*i, mode='32bit horizontal', nrows=16)\n start_dma_load(ra0)\n iadd(ra0, ra0, rb0)\n wait_dma_load()\n\n setup_vpm_read(nrows=16, Y=0, X=0, mode='32bit vertical')\n setup_vpm_write(Y=16, X=0, mode='32bit vertical')\n for i in range(16):\n mov(r0, vpm)\n mov(vpm, r0)\n\n setup_dma_store(Y=16, mode='32bit horizontal', nrows=16)\n start_dma_store(uniform)\n wait_dma_store()\n exit()\n\ndef test_vertical_32bit_write():\n with Driver() as drv:\n X = drv.alloc((64, 16), dtype='uint32')\n X[:] = np.arange(64*16).reshape(64, 16).astype('uint32')\n Y = drv.alloc((16, 16), dtype='uint32')\n\n drv.execute(\n n_threads=1,\n program=drv.program(vertical_32bit_write),\n uniforms=[X.address, Y.address]\n )\n\n assert np.all(X[:16] == Y)\n"
] | [
[
"numpy.all",
"numpy.arange"
]
] |
lee1043/assessed-cloud-fbks | [
"34829616644365ccab34488592a58288d7f068e5"
] | [
"code/zelinka_analysis.py"
] | [
"import MV2 as MV\nimport numpy as np\nimport MV2 as MV\n\n###########################################################################\ndef map_SWkern_to_lon(Ksw,albcsmap):\n \"\"\"\n Map each location's clear-sky surface albedo to the correct albedo bin\n \"\"\"\n\n albcsmap=MV.masked_greater(albcsmap,1.0)\n albcsmap=MV.masked_less(albcsmap,0.0)\n from scipy.interpolate import interp1d\n # Ksw is size 12,7,7,lats,3\n # albcsmap is size A,lats,lons\n albcs=np.arange(0.0,1.5,0.5) \n A=albcsmap.shape[0]\n TT=Ksw.shape[1]\n PP=Ksw.shape[2]\n lenlat=Ksw.shape[3]\n lenlon=albcsmap.shape[2]\n SWkernel_map = MV.zeros((A,TT,PP,lenlat,lenlon))\n SWkernel_map = MV.masked_where(SWkernel_map==0,SWkernel_map)\n \n for M in range(A):\n MM=M\n while MM>11:\n MM=MM-12\n for LA in range(lenlat):\n alon=albcsmap[M,LA,:] \n # interp1d can't handle mask but it can deal with NaN (?)\n try:\n alon2=MV.where(alon.mask,np.nan,alon) \n except:\n alon2=alon\n if np.ma.count(alon2)>1: # at least 1 unmasked value\n if len(np.where(Ksw[MM,:,:,LA,:]>0))==0:\n SWkernel_map[M,:,:,LA,:] = 0\n else:\n f = interp1d(albcs,Ksw[MM,:,:,LA,:],axis=2)\n ynew = f(alon2.data)\n ynew=MV.masked_where(alon2.mask,ynew)\n SWkernel_map[M,:,:,LA,:] = ynew\n else:\n continue\n\n\n return SWkernel_map\n\n###########################################################################\ndef KT_decomposition_general(c1,c2,Klw,Ksw):\n \"\"\"\n this function takes in a (month,TAU,CTP,lat,lon) matrix and performs the \n decomposition of Zelinka et al 2013 doi:10.1175/JCLI-D-12-00555.1\n \"\"\"\n\n \n # To help with broadcasting, move month axis to the end so that TAU,CTP are first\n c1 = np.array(np.moveaxis(c1,0,-1))\n c2 = np.array(np.moveaxis(c2,0,-1))\n Klw = np.moveaxis(Klw,0,-1)\n Ksw = np.moveaxis(Ksw,0,-1)\n \n sum_c=np.ma.sum(np.ma.sum(c1,0),0) # Eq. B2\n dc = c2-c1 \n sum_dc=np.ma.sum(np.ma.sum(dc,0),0)\n dc_prop = c1*(sum_dc/sum_c)\n dc_star = dc - dc_prop # Eq. B1\n\n # LW components\n Klw0 = np.ma.sum(np.ma.sum(Klw*c1/sum_c,0),0) # Eq. B4\n Klw_prime = Klw - Klw0 # Eq. B3\n B7a = np.ma.sum(c1/sum_c,1,keepdims=True) # need to keep this as [TAU,1,...]\n Klw_p_prime = np.ma.sum(Klw_prime*B7a,0) # Eq. B7\n Klw_t_prime = np.ma.sum(Klw_prime*np.ma.sum(c1/sum_c,0),1) # Eq. B8 \n Klw_resid_prime = Klw_prime - np.expand_dims(Klw_p_prime,0) - np.expand_dims(Klw_t_prime,1) # Eq. B9\n dRlw_true = np.ma.sum(np.ma.sum(Klw*dc,1),0) # LW total\n dRlw_prop = Klw0*sum_dc # LW amount component\n dRlw_dctp = np.ma.sum(Klw_p_prime*np.ma.sum(dc_star,0),0) # LW altitude component\n dRlw_dtau = np.ma.sum(Klw_t_prime*np.ma.sum(dc_star,1),0) # LW optical depth component\n dRlw_resid = np.ma.sum(np.ma.sum(Klw_resid_prime*dc_star,1),0) # LW residual\n dRlw_sum = dRlw_prop + dRlw_dctp + dRlw_dtau + dRlw_resid # sum of LW components -- should equal LW total\n\n # SW components\n Ksw0 = np.ma.sum(np.ma.sum(Ksw*c1/sum_c,0),0) # Eq. B4\n Ksw_prime = Ksw - Ksw0 # Eq. B3\n B7a = np.ma.sum(c1/sum_c,1,keepdims=True) # need to keep this as [TAU,1,...]\n Ksw_p_prime = np.ma.sum(Ksw_prime*B7a,0) # Eq. B7\n Ksw_t_prime = np.ma.sum(Ksw_prime*np.ma.sum(c1/sum_c,0),1) # Eq. B8 \n Ksw_resid_prime = Ksw_prime - np.expand_dims(Ksw_p_prime,0) - np.expand_dims(Ksw_t_prime,1) # Eq. B9 \n dRsw_true = np.ma.sum(np.ma.sum(Ksw*dc,1),0) # SW total\n dRsw_prop = Ksw0*sum_dc # SW amount component\n dRsw_dctp = np.ma.sum(Ksw_p_prime*np.ma.sum(dc_star,0),0) # SW altitude component\n dRsw_dtau = np.ma.sum(Ksw_t_prime*np.ma.sum(dc_star,1),0) # SW optical depth component\n dRsw_resid = np.ma.sum(np.ma.sum(Ksw_resid_prime*dc_star,1),0) # SW residual\n dRsw_sum = dRsw_prop + dRsw_dctp + dRsw_dtau + dRsw_resid # sum of SW components -- should equal SW total\n\n # Set SW fields to zero where the sun is down\n RR = Ksw0.mask\n dRsw_true = MV.where(RR,0,dRsw_true)\n dRsw_prop = MV.where(RR,0,dRsw_prop)\n dRsw_dctp = MV.where(RR,0,dRsw_dctp)\n dRsw_dtau = MV.where(RR,0,dRsw_dtau)\n dRsw_resid = MV.where(RR,0,dRsw_resid)\n\n # Move month axis back to the beginning \n dRlw_true = MV.array(np.moveaxis(dRlw_true,-1,0))\n dRlw_prop = MV.array(np.moveaxis(dRlw_prop,-1,0))\n dRlw_dctp = MV.array(np.moveaxis(dRlw_dctp,-1,0))\n dRlw_dtau = MV.array(np.moveaxis(dRlw_dtau,-1,0))\n dRlw_resid = MV.array(np.moveaxis(dRlw_resid,-1,0))\n dRsw_true = MV.array(np.moveaxis(dRsw_true,-1,0))\n dRsw_prop = MV.array(np.moveaxis(dRsw_prop,-1,0))\n dRsw_dctp = MV.array(np.moveaxis(dRsw_dctp,-1,0))\n dRsw_dtau = MV.array(np.moveaxis(dRsw_dtau,-1,0))\n dRsw_resid = MV.array(np.moveaxis(dRsw_resid,-1,0))\n dc_star = MV.array(np.moveaxis(dc_star,-1,0))\n dc_prop = MV.array(np.moveaxis(dc_prop,-1,0))\n \n return (dRlw_true,dRlw_prop,dRlw_dctp,dRlw_dtau,dRlw_resid,dRsw_true,dRsw_prop,dRsw_dctp,dRsw_dtau,dRsw_resid,dc_star,dc_prop)\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.ma.sum",
"numpy.where",
"numpy.arange",
"numpy.moveaxis",
"numpy.ma.count",
"numpy.expand_dims"
]
] |
zxxia/RL-CC | [
"d3d3be0097d69ee07b06363ad531cf2479029d74"
] | [
"src/simulator/network_simulator/pcc/aurora/aurora_old.py"
] | [
"###########################################################################################\n# Implementation of Implicit Quantile Networks (IQN)\n# Author for codes: sungyubkim, Chu Kun([email protected])\n# Paper: https://arxiv.org/abs/1806.06923v1\n# Reference: https://github.com/sungyubkim/Deep_RL_with_pytorch\n###########################################################################################\n\nimport csv\nimport logging\nimport multiprocessing as mp\nimport os\nfrom syslog import LOG_SYSLOG\nimport time\nimport types\nfrom typing import List, Tuple, Union\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nfrom mpi4py.MPI import COMM_WORLD\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\nfrom stable_baselines import PPO1, logger\nfrom stable_baselines.common.callbacks import BaseCallback\nfrom stable_baselines.common.policies import FeedForwardPolicy\nfrom stable_baselines.common.schedules import LinearSchedule\n\nfrom simulator.network_simulator.pcc.aurora import aurora_environment\nfrom simulator.network_simulator.pcc.aurora.schedulers import Scheduler, TestScheduler\nfrom simulator.network_simulator.constants import BITS_PER_BYTE, BYTES_PER_PACKET\nfrom simulator.trace import generate_trace, Trace, generate_traces\nfrom simulator.network_simulator.pcc.aurora.replay_memory import ReplayBuffer, PrioritizedReplayBuffer\nfrom common.utils import set_tf_loglevel, pcc_aurora_reward\nfrom plot_scripts.plot_packet_log import plot\nfrom plot_scripts.plot_time_series import plot as plot_simulation_log\n\n\nif type(tf.contrib) != types.ModuleType: # if it is LazyLoader\n tf.contrib._warning = None\n\nset_tf_loglevel(logging.FATAL)\n\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nimport random\nimport os\nimport pickle\nimport time\nfrom collections import deque\nimport matplotlib.pyplot as plt\n\n# Parameters\nimport argparse\n\n\n'''DQN settings'''\n# target policy sync interval\nTARGET_REPLACE_ITER = 1\n# simulator steps for start learning\nLEARN_START = int(1e+3)\n# (prioritized) experience replay memory size\nMEMORY_CAPACITY = int(1e+5)\n# simulator steps for learning interval\nLEARN_FREQ = 1\n# quantile numbers for IQN\nN_QUANT = 8\nN_ACTION = 32\n# quantiles\nQUANTS = np.linspace(0.0, 1.0, N_QUANT + 1)[1:]\n\n'''Environment Settings'''\n# gamma for MDP\nGAMMA = 0.99\n\n\n'''Training settings'''\n# mini-batch size\nBATCH_SIZE = 32\n# learning rage\nLR = 2e-6\n\n\n'''Save&Load Settings'''\n\n# check save/load\nSAVE = True\nLOAD = False\n# paths for predction net, target net, result log\nPRED_PATH = './model/iqn_pred_net_risk.pkl'\nTARGET_PATH = './model/iqn_target_net_risk.pkl'\n\n\nACTION_MAP = [-1.0, -0.9, -0.8, -0.7, -0.6,\n -0.5, -0.45, -0.4, -0.35, \n -0.3, -0.25, -0.2, -0.15,\n -0.1, -0.05, -0.01,\n 0.01, 0.05, 0.1,\n 0.15, 0.2, 0.25, 0.3,\n 0.35, 0.4, 0.45, 0.5,\n 0.6, 0.7, 0.8, 0.9, 1.0,]\n\n'''\ndef calculate_huber_loss(td_errors, k=1.0):\n \"\"\"\n Calculate huber loss element-wisely depending on kappa k.\n \"\"\"\n loss = torch.where(td_errors.abs() <= k, 0.5 * td_errors.pow(2), k * (td_errors.abs() - 0.5 * k))\n #assert loss.shape == (td_errors.shape[0], 8, 8), \"huber loss has wrong shape\"\n return loss\n'''\n\nclass NoisyLinear(nn.Module):\n def __init__(self, in_features, out_features, sigma=1):\n super(NoisyLinear, self).__init__()\n\n # Learnable parameters.\n self.mu_W = nn.Parameter(\n torch.FloatTensor(out_features, in_features))\n self.sigma_W = nn.Parameter(\n torch.FloatTensor(out_features, in_features))\n self.mu_bias = nn.Parameter(torch.FloatTensor(out_features))\n self.sigma_bias = nn.Parameter(torch.FloatTensor(out_features))\n\n # Factorized noise parameters.\n self.register_buffer('eps_p', torch.FloatTensor(in_features))\n self.register_buffer('eps_q', torch.FloatTensor(out_features))\n\n self.in_features = in_features\n self.out_features = out_features\n self.sigma = sigma\n\n self.reset()\n self.sample()\n\n def reset(self):\n bound = 1 / np.sqrt(self.in_features)\n self.mu_W.data.uniform_(-bound, bound)\n self.mu_bias.data.uniform_(-bound, bound)\n self.sigma_W.data.fill_(self.sigma / np.sqrt(self.in_features))\n self.sigma_bias.data.fill_(self.sigma / np.sqrt(self.out_features))\n\n def f(self, x):\n return x.normal_().sign().mul(x.abs().sqrt())\n\n def sample(self):\n self.eps_p.copy_(self.f(self.eps_p))\n self.eps_q.copy_(self.f(self.eps_q))\n\n def forward(self, x):\n if self.training:\n weight = self.mu_W + self.sigma_W * self.eps_q.ger(self.eps_p)\n bias = self.mu_bias + self.sigma_bias * self.eps_q.clone()\n else:\n weight = self.mu_W\n bias = self.mu_bias\n\n return F.linear(x, weight, bias)\n\nclass ConvNet(nn.Module):\n def __init__(self, alpha = 1):\n super(ConvNet, self).__init__()\n\n # Noisy\n linear = NoisyLinear\n\n self.phi = linear(N_QUANT, 30)\n self.fc = linear(30, 64)\n self.fc_m = linear(64, 64)\n \n # action value distribution\n self.fc_q = linear(64, N_ACTION)\n self.alpha = alpha\n \n def forward(self, x):\n batch_size = x.shape[0]\n\n # Rand Initlialization\n taus = torch.rand(batch_size, N_QUANT)\n\n # Risk\n taus = taus * self.alpha\n \n i_pi = np.pi * torch.arange(start=1, end=N_QUANT+1).view(1, 1, N_QUANT)\n\n # Calculate cos(i * \\pi * \\tau).\n cosines = torch.cos(\n taus.view(batch_size, N_QUANT, 1) * i_pi\n ).view(batch_size * N_QUANT, N_QUANT)\n\n # Calculate embeddings of taus.\n # phi_j(tau) = RELU(sum(cos(π*i*τ)*w_ij + b_j))\n rand_feat = F.relu(self.phi(cosines).view(batch_size, N_QUANT, 30))\n\n #logger.log(rand_feat.shape)\n x = x.view(x.size(0), -1).unsqueeze(1) # (m, 1, 30)\n #logger.log(x)\n # Zτ(x,a) ≈ f(ψ(x) @ φ(τ))a @表示按元素相乘\n x = x * rand_feat # (m, N_QUANT, 30)\n #logger.log(x.shape)\n x = F.relu(self.fc_m(F.relu(self.fc(x)))) # (m, N_QUANT, 64)\n #logger.log(x.shape)\n\n # note that output of IQN is quantile values of value distribution\n action_value = self.fc_q(x).transpose(1, 2) # (m, N_ACTIONS, N_QUANT)\n\n return action_value, taus\n\n def set_train(self):\n for m in self.modules():\n if isinstance(m, NoisyLinear):\n m.training = True\n \n def set_test(self):\n for m in self.modules():\n if isinstance(m, NoisyLinear):\n m.training = False\n\n def sample_noise(self):\n for m in self.modules():\n if isinstance(m, NoisyLinear):\n m.sample()\n\n def save(self, PATH):\n torch.save(self.state_dict(),PATH)\n\n def load(self, PATH):\n self.load_state_dict(torch.load(PATH))\n\nclass DQN(object):\n def __init__(self):\n self.pred_net, self.target_net = ConvNet(), ConvNet()\n # sync evac target\n self.update_target(self.target_net, self.pred_net, 1.0)\n \n # simulator step counter\n self.memory_counter = 0\n # target network step counter\n self.learn_step_counter = 0\n\n self.optimizer = torch.optim.Adam(self.pred_net.parameters(), lr=LR)\n\n #self.replay_buffer = ReplayBuffer(MEMORY_CAPACITY)\n # replay\n self.replay_buffer = PrioritizedReplayBuffer(MEMORY_CAPACITY, 0.6)\n self.beta_schedule = LinearSchedule(2e+4, initial_p=0.4, final_p=1.0)\n \n # Update target network\n def update_target(self, target, pred, update_rate):\n # update target network parameters using predcition network\n for target_param, pred_param in zip(target.parameters(), pred.parameters()):\n target_param.data.copy_((1.0 - update_rate) \\\n * target_param.data + update_rate*pred_param.data)\n \n def set_train(self):\n self.pred_net.set_train()\n self.target_net.set_train()\n \n def set_test(self):\n self.pred_net.set_test()\n self.target_net.set_test()\n\n def save_model(self):\n # save prediction network and target network\n self.pred_net.save(PRED_PATH)\n self.target_net.save(TARGET_PATH)\n\n def load_model(self):\n # load prediction network and target network\n self.pred_net.load('./model/iqn_pred_net.pkl')\n self.target_net.load('./model/iqn_target_net.pkl')\n \n def load_model_risk(self):\n self.pred_net.load('./model/iqn_pred_net_risk.pkl')\n self.target_net.load('./model/iqn_target_net_risk.pkl')\n\n def choose_action(self, x, EPSILON):\n \t# x:state\n x = torch.FloatTensor(x)\n x = torch.reshape(x, (1, 30))\n\n # epsilon-greedy\n if np.random.uniform() >= EPSILON:\n # greedy case\n #logger.log(x)\n action_value, tau = self.pred_net(x) \t# (N_ENVS, N_ACTIONS, N_QUANT)\n\n #logger.log(\"Value: \", action_value)\n # logger.log(\"Tau: \", tau)\n\n # Min\n action_value = action_value.mean(dim=2)\n #action_value, _ = torch.min(action_value, dim=2)\n action = torch.argmax(action_value, dim=1).data.cpu().numpy()\n else:\n # random exploration case\n action = np.random.randint(0, N_ACTION)\n return int(action)\n\n def store_transition(self, s, a, r, s_, done):\n self.memory_counter += 1\n self.replay_buffer.add(s, a, r, s_, float(done))\n\n '''\n def learn(self):\n self.learn_step_counter += 1\n\n # target parameter update\n if self.learn_step_counter % TARGET_REPLACE_ITER == 0:\n self.update_target(self.target_net, self.pred_net, 1e-3)\n \n # Noisy\n self.pred_net.sample_noise()\n self.target_net.sample_noise()\n\n # b_s, b_a, b_r, b_s_, b_d = self.replay_buffer.sample(BATCH_SIZE) \n # b_w, b_idxes = np.ones_like(b_r), None\n # replay\n experience = self.replay_buffer.sample(BATCH_SIZE, beta=self.beta_schedule.value(self.learn_step_counter))\n (states, actions, rewards, next_states, dones, idx, weights) = experience\n \n states = torch.FloatTensor(states)\n next_states = torch.FloatTensor(np.float32(next_states))\n actions = torch.LongTensor(actions).unsqueeze(1)\n rewards = torch.FloatTensor(rewards).unsqueeze(1) \n dones = torch.FloatTensor(dones).unsqueeze(1)\n weights = torch.FloatTensor(weights).unsqueeze(1)\n\n Q_targets_next, _ = self.target_net(next_states)\n Q_targets_next = Q_targets_next.detach() #(batch, num_tau, actions)\n q_t_n = Q_targets_next.mean(dim=1)\n # calculate log-pi \n logsum = torch.logsumexp(\\\n (Q_targets_next - Q_targets_next.max(2)[0].unsqueeze(-1))/self.entropy_tau, 2).unsqueeze(-1) #logsum trick\n tau_log_pi_next = Q_targets_next - Q_targets_next.max(2)[0].unsqueeze(-1) - self.entropy_tau*logsum\n \n pi_target = F.softmax(q_t_n/self.entropy_tau, dim=1).unsqueeze(1)\n\n Q_target = (self.GAMMA**self.n_step * (pi_target * (Q_targets_next-tau_log_pi_next)*(1 - dones.unsqueeze(-1))).sum(2)).unsqueeze(1)\n\n q_k_target = self.qnetwork_target.get_qvalues(states).detach()\n v_k_target = q_k_target.max(1)[0].unsqueeze(-1) # (8,8,1)\n tau_log_pik = q_k_target - v_k_target - self.entropy_tau*torch.logsumexp(\\\n (q_k_target - v_k_target)/self.entropy_tau, 1).unsqueeze(-1)\n\n munchausen_addon = tau_log_pik.gather(1, actions) #.unsqueeze(-1).expand(self.BATCH_SIZE, self.N, 1)\n \n # calc munchausen reward:\n munchausen_reward = (rewards + self.alpha*torch.clamp(munchausen_addon, min=self.lo, max=0)).unsqueeze(-1)\n # Compute Q targets for current states \n Q_targets = munchausen_reward + Q_target\n # Get expected Q values from local model\n q_k, taus = self.qnetwork_local(states, self.N)\n Q_expected = q_k.gather(2, actions.unsqueeze(-1).expand(self.BATCH_SIZE, self.N, 1))\n\n # Quantile Huber loss\n td_error = Q_targets - Q_expected\n huber_l = calculate_huber_loss(td_error, 1.0)\n quantil_l = abs(taus -(td_error.detach() < 0).float()) * huber_l / 1.0\n \n loss = quantil_l.sum(dim=1).mean(dim=1, keepdim=True)* weights # , keepdim=True if per weights get multipl\n loss = loss.mean()\n \n # backprop loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # replay\n td_error = td_error.sum(dim=1).mean(dim=1,keepdim=True)\n self.replay_buffer.update_priorities(idx, abs(td_error.data.cpu().numpy()) + 1e-6)\n\n return loss\n '''\n\n def learn(self):\n self.learn_step_counter += 1\n\n # target parameter update\n if self.learn_step_counter % TARGET_REPLACE_ITER == 0:\n self.update_target(self.target_net, self.pred_net, 1e-3)\n \n # Noisy\n self.pred_net.sample_noise()\n self.target_net.sample_noise()\n\n # b_s, b_a, b_r, b_s_, b_d = self.replay_buffer.sample(BATCH_SIZE) \n # b_w, b_idxes = np.ones_like(b_r), None\n # replay\n experience = self.replay_buffer.sample(BATCH_SIZE, beta=self.beta_schedule.value(self.learn_step_counter))\n (b_s, b_a, b_r, b_s_, b_d, b_w, b_idxes) = experience\n \n b_s = torch.FloatTensor(b_s)\n b_a = torch.LongTensor(b_a)\n b_r = torch.FloatTensor(b_r)\n b_s_ = torch.FloatTensor(b_s_)\n b_d = torch.FloatTensor(b_d)\n\n # action value distribution prediction\n q_eval, q_eval_tau = self.pred_net(b_s) \t# (m, N_ACTIONS, N_QUANT), (N_QUANT, 1)\n mb_size = q_eval.size(0)\n # squeeze去掉第一维\n # torch.stack函数是将矩阵进行叠加,默认dim=0,即将[]中的n个矩阵变成n维\n # index_select函数是进行索引查找。\n q_eval = torch.stack([q_eval[i].index_select(0, b_a[i]) for i in range(mb_size)]).squeeze(1) \n # (m, N_QUANT)\n # 在q_eval第二维后面加一个维度\n q_eval = q_eval.unsqueeze(2) \t\t\t\t# (m, N_QUANT, 1)\n # note that dim 1 is for present quantile, dim 2 is for next quantile\n \n # get next state value\n q_next, q_next_tau = self.target_net(b_s_) \t\t\t\t# (m, N_ACTIONS, N_QUANT), (N_QUANT, 1)\n\n # Min\n best_actions = q_next.mean(dim=2).argmax(dim=1) \t\t# (m)\n #action_value, _ = torch.min(q_next, dim=2)\n #best_actions = action_value.argmax(dim = 1)\n\n q_next = torch.stack([q_next[i].index_select(0, best_actions[i]) for i in range(mb_size)]).squeeze(1)\n # q_nest: (m, N_QUANT)\n # q_target = R + gamma * (1 - terminate) * q_next\n q_target = b_r.unsqueeze(1) + GAMMA * (1. -b_d.unsqueeze(1)) * q_next \n # q_target: (m, N_QUANT)\n # detach表示该Variable不更新参数\n q_target = q_target.unsqueeze(1).detach() # (m , 1, N_QUANT)\n\n # quantile Huber loss\n u = q_target.detach() - q_eval \t\t# (m, N_QUANT, N_QUANT)\n #tau = q_eval_tau.unsqueeze(0) \t\t# (1, N_QUANT, 1)\n # note that tau is for present quantile\n # w = |tau - delta(u<0)|\n\n weight = torch.abs(q_eval_tau[..., None] - u.le(0.).float()) # (m, N_QUANT, N_QUANT)\n loss = F.smooth_l1_loss(q_eval, q_target.detach(), reduction='none')\n # (m, N_QUANT, N_QUANT)\n loss = torch.mean(weight * loss, dim=1).mean(dim=1)\n \n # calculate importance weighted loss\n b_w = torch.Tensor(b_w)\n loss = torch.mean(b_w * loss)\n \n # backprop loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # replay\n u = u.sum(dim=1).mean(dim=1,keepdim=True)\n self.replay_buffer.update_priorities(b_idxes, abs(u.data.cpu().numpy()) + 1e-6)\n\n return loss\n\ndef Test(config_file):\n traces = generate_traces(config_file, 20, duration=30)\n traces = generate_traces(config_file, 100, duration=30)\n\n distri = [0 for i in range(N_ACTION)]\n\n iqn = DQN()\n #iqn.load_model()\n #iqn.set_test()\n\n iqn_risk = DQN()\n iqn_risk.load_model_risk()\n iqn_risk.set_test()\n \n rewards = [[],[]]\n dqns = [iqn, iqn_risk]\n\n\n for i in range(1, 2):\n for trace in traces:\n # logger.log(trace.bandwidths)\n test_scheduler = TestScheduler(trace)\n env = gym.make('AuroraEnv-v0', trace_scheduler=test_scheduler)\n\n done = False\n s = np.array(env.reset())\n\n while not done:\n a = dqns[i].choose_action(s, 0)\n s, r, done, infos = env.step(ACTION_MAP[int(a)])\n distri[a] += 1\n\n rewards[i].append(r)\n\n '''\n sender_mi = env.senders[0].history.back() #get_run_data()\n throughput = sender_mi.get(\"recv rate\") # bits/sec\n send_rate = sender_mi.get(\"send rate\") # bits/sec\n latency = sender_mi.get(\"avg latency\")\n loss = sender_mi.get(\"loss ratio\")\n send_ratio = sender_mi.get('send ratio')\n\n \n logger.log(\"Thp: \", throughput,\n \" | Send Rate: \", send_rate,\n \" | Action: \", ACTION_MAP[int(a)],\n \" | Send Raio: \", send_ratio,\n \" | Latency: \", latency,\n \" | Loss: \", loss,\n \" | Real Reward: \", r)\n '''\n\n rewards[i].sort()\n \n for ratio in [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99]:\n logger.log(\"Ratio: \", ratio)\n # logger.log(\"IQN: \", rewards[0][int(ratio * len(rewards[0]))])\n logger.log(\"IQN: \", rewards[1][int(ratio * len(rewards[1]))])\n \n for i in range(N_ACTION):\n logger.log(\"Action \", i, \" : \", distri[i])\n\n\ndef Validation(traces, dqn: DQN):\n dqn.set_test()\n rewards = []\n\n for trace in traces:\n test_scheduler = TestScheduler(trace)\n env = gym.make('AuroraEnv-v0', trace_scheduler=test_scheduler)\n\n done = False\n s = np.array(env.reset())\n\n while not done:\n a = dqn.choose_action(s, 0)\n s, r, done, infos = env.step(ACTION_MAP[int(a)])\n\n rewards.append(r)\n \n return sum(rewards) / len(rewards)\n\n\nclass Aurora():\n cc_name = 'aurora'\n def __init__(self, seed: int, log_dir: str, timesteps_per_actorbatch: int,\n pretrained_model_path: str = \"\", gamma: float = 0.99,\n tensorboard_log=None, record_pkt_log: bool = False):\n self.record_pkt_log = record_pkt_log\n self.comm = COMM_WORLD\n self.seed = seed\n self.log_dir = log_dir\n self.pretrained_model_path = pretrained_model_path\n self.steps_trained = 0\n self.model = DQN()\n\n def train(self, config_file: str, total_timesteps: int,\n train_scheduler: Scheduler,\n tb_log_name: str = \"\", # training_traces: List[Trace] = [],\n validation_traces: List[Trace] = [],\n # real_trace_prob: float = 0\n ):\n\n env = gym.make('AuroraEnv-v0', trace_scheduler=train_scheduler)\n env.seed(self.seed)\n\n dqn = DQN()\n dqn.set_train()\n\n test_reward = -250\n\n validation_traces = []\n for i in range(20):\n validation_traces.append(Trace.load_from_file(\"./validation/\" + str(i)))\n\n # model load with check\n if LOAD and os.path.isfile(PRED_PATH) and os.path.isfile(TARGET_PATH):\n dqn.load_model()\n logger.log('Load complete!')\n else:\n result = []\n logger.log('Initialize results!')\n\n logger.log('Collecting experience...')\n\n # check learning time\n start_time = time.time()\n number = 0\n loss = []\n\n EPSILON = 1.0\n # Total simulation step\n STEP_NUM = int(1e+5)\n # save frequency\n SAVE_FREQ = int(2e+1)\n\n for step in range(1, STEP_NUM+1):\n done = False\n s = np.array(env.reset())\n\n while not done:\n # Noisy\n a = dqn.choose_action(s, 0)\n\n # take action and get next state\n s_, r, done, infos = env.step(ACTION_MAP[int(a)])\n s_ = np.array(s_)\n\n # clip rewards for numerical stability\n # clip_r = np.sign(r)\n\n # annealing the epsilon(exploration strategy)\n if number <= int(1e+4):\n EPSILON -= 0.9/1e+4\n elif number <= int(2e+4):\n EPSILON -= 0.09/1e+4\n \n number += 1\n\n # store the transition\n dqn.store_transition(s, a, r, s_, done)\n\n # if memory fill 50K and mod 4 = 0(for speed issue), learn pred net\n if (LEARN_START <= dqn.memory_counter) and (dqn.memory_counter % LEARN_FREQ == 0):\n loss.append(dqn.learn().item())\n \n s = s_\n\n # logger.log log and save\n if step % SAVE_FREQ == 0:\n time_interval = round(time.time() - start_time, 2)\n\n # logger.log log\n logger.log('Used Step: ', dqn.memory_counter,\n '| Used Trace: ', step,\n '| Used Time:', time_interval,\n '| Loss:', round(sum(loss) / len(loss), 3))\n\n loss = []\n validation_reward = Validation(validation_traces, dqn)\n\n if step > 900 and validation_reward > test_reward:\n test_reward = validation_reward\n dqn.save_model()\n logger.log(\"Save model\")\n\n # logger.log log\n logger.log('Mean ep 100 return: ', validation_reward)\n dqn.set_train()\n\n logger.log(\"The training is done!\")"
] | [
[
"torch.rand",
"numpy.array",
"torch.argmax",
"torch.arange",
"torch.FloatTensor",
"tensorflow.compat.v1.logging.set_verbosity",
"torch.nn.functional.linear",
"numpy.random.uniform",
"torch.LongTensor",
"numpy.random.randint",
"numpy.sqrt",
"torch.load",
"numpy.linspace",
"torch.Tensor",
"torch.mean",
"torch.reshape"
]
] |
milomacphail/machine_learning_models | [
"d38dcb0ae593163c98754eb3dfa6cf149c968ae1"
] | [
"Multilinear Regression/backward_multilinear_regression.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 24 11:50:59 2020\n\n@author: milom\n\"\"\"\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv(\"50_Startups.csv\")\nX = dataset.iloc[:,:-1].values\nY = dataset.iloc[:, 4].values\n\n#Encoding Catgorical Data\n#Independent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelEncoder_x = LabelEncoder()\nX[:, 3] = labelEncoder_x.fit_transform(X[:, 3])\noneHotEncoder = OneHotEncoder(categorical_features = [3])\nX = oneHotEncoder.fit_transform(X).toarray()\n\n#Avoiding DVT\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\"\"\"\n\n#Fitting multiple linear regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, Y_train)\n\n#Run model\ny_pred = regressor.predict(X_test)\n\n#Import backward elimination model\nimport statsmodels.api as sm\nX = np.append(arr = np.ones((50, 1)).astype(int), values = X, axis = 1)\nX_opt = X[:,[0, 1, 2, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = Y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:,[0, 1, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = Y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:,[0, 3, 4, 5]]\nregressor_OLS = sm.OLS(endog = Y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:,[0, 3, 5]]\nregressor_OLS = sm.OLS(endog = Y, exog=X_opt).fit()\nregressor_OLS.summary()\nX_opt = X[:,[0, 3]]\nregressor_OLS = sm.OLS(endog = Y, exog=X_opt).fit()\nregressor_OLS.summary()\n\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.linear_model.LinearRegression",
"numpy.ones",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.