repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
RexBarker/Deep-Flow
|
[
"6310007009d2bfe150f1e4b29c7588f720c4bba2"
] |
[
"dataset/FlowInitial.py"
] |
[
"import torch\nimport os\nimport random\nimport cv2\nimport cvbase as cvb\nimport numpy as np\nimport torch.utils.data as data\nimport utils.image as im\nimport utils.region_fill as rf\n\n\nclass FlowSeq(data.Dataset):\n\n def __init__(self, config, isTest=False):\n super(FlowSeq, self).__init__()\n self.config = config\n self.data_items = []\n self.size = self.config.IMAGE_SHAPE\n self.res_size = self.config.RES_SHAPE\n self.isTest = isTest\n self.data_list = config.EVAL_LIST if isTest else config.TRAIN_LIST\n with open(self.data_list, 'r') as f:\n for line in f:\n line = line.strip()\n line = line.strip(' ')\n line_split = line.split(' ')\n\n flow_dir = line_split[0:11]\n if self.config.DATA_ROOT is not None:\n flow_dir = [os.path.join(self.config.DATA_ROOT, x) for x in flow_dir]\n if self.config.get_mask:\n mask_dir = line_split[11:22]\n if not self.config.FIX_MASK:\n mask_dir = [os.path.join(self.config.MASK_ROOT, x) for x in mask_dir]\n else:\n mask_dir = [os.path.join(self.config.MASK_ROOT) for x in mask_dir]\n video_class_no = int(line_split[-1])\n if not self.isTest:\n self.data_items.append((flow_dir, video_class_no))\n else:\n output_dirs = line_split[-2]\n if self.config.get_mask:\n self.data_items.append((flow_dir, video_class_no, mask_dir, output_dirs))\n else:\n self.data_items.append((flow_dir, video_class_no, output_dirs))\n\n def __len__(self):\n return len(self.data_items)\n\n def __getitem__(self, idx):\n\n flow_dir = self.data_items[idx][0]\n video_class_no = self.data_items[idx][1]\n if self.config.get_mask:\n mask_dir = self.data_items[idx][2]\n if self.isTest:\n output_dirs = self.data_items[idx][-1]\n flow_set = []\n mask_set = []\n flow_mask_cat_set = []\n flow_masked_set = []\n\n if self.config.MASK_MODE == 'bbox':\n tmp_bbox = im.random_bbox(self.config)\n tmp_mask = im.bbox2mask(self.config, tmp_bbox)\n tmp_mask = tmp_mask[0, 0, :, :]\n fix_mask = np.expand_dims(tmp_mask, axis=2)\n elif self.config.MASK_MODE == 'mid-bbox':\n tmp_mask = im.mid_bbox_mask(self.config)\n tmp_mask = tmp_mask[0, 0, :, :]\n fix_mask = np.expand_dims(tmp_mask, axis=2)\n\n for i in range(11):\n tmp_flow = cvb.read_flow(flow_dir[i])\n if self.config.get_mask:\n #tmp_mask = cv2.imread(mask_dir[i],\n # cv2.IMREAD_UNCHANGED)\n tmp_mask = cv2.imread(mask_dir[i])\n tmp_mask = self._mask_tf(tmp_mask)\n else:\n if self.config.FIX_MASK:\n tmp_mask = fix_mask.copy()\n else:\n tmp_bbox = im.random_bbox(self.config)\n tmp_mask = im.bbox2mask(self.config, tmp_bbox)\n tmp_mask = tmp_mask[0, 0, :, :]\n tmp_mask = np.expand_dims(tmp_mask, axis=2)\n tmp_flow = self._flow_tf(tmp_flow)\n tmp_flow_masked = tmp_flow * (1. - tmp_mask)\n\n if self.config.INITIAL_HOLE:\n tmp_flow_resized = cv2.resize(tmp_flow, (self.size[1] // 2, self.size[0] // 2))\n tmp_mask_resized = cv2.resize(tmp_mask, (self.size[1] // 2, self.size[0] // 2), cv2.INTER_NEAREST)\n tmp_flow_masked_small = tmp_flow_resized\n tmp_flow_masked_small[:, :, 0] = rf.regionfill(tmp_flow_resized[:, :, 0], tmp_mask_resized)\n tmp_flow_masked_small[:, :, 1] = rf.regionfill(tmp_flow_resized[:, :, 1], tmp_mask_resized)\n\n tmp_flow_masked = tmp_flow_masked + \\\n tmp_mask * cv2.resize(tmp_flow_masked_small, (self.size[1], self.size[0]))\n\n flow_masked_set.append(tmp_flow_masked)\n flow_set.append(tmp_flow)\n mask_set.append(tmp_mask)\n mask_set.append(tmp_mask)\n tmp_flow_mask_cat = np.concatenate((tmp_flow_masked, tmp_mask), axis=2)\n flow_mask_cat_set.append(tmp_flow_mask_cat)\n\n flow_mask_cat = np.concatenate(flow_mask_cat_set, axis=2)\n flow_masked = np.concatenate(flow_masked_set, axis=2)\n gt_flow = np.concatenate(flow_set, axis=2)\n mask = np.concatenate(mask_set, axis=2)\n\n flow_mask_cat = torch.from_numpy(flow_mask_cat).permute(2, 0, 1).contiguous().float()\n flow_masked = torch.from_numpy(flow_masked).permute(2, 0, 1).contiguous().float()\n gt_flow = torch.from_numpy(gt_flow).permute(2, 0, 1).contiguous().float()\n mask = torch.from_numpy(mask).permute(2, 0, 1).contiguous().float()\n\n if self.isTest:\n return flow_mask_cat, flow_masked, gt_flow, mask, output_dirs\n\n return flow_mask_cat, flow_masked, gt_flow, mask\n\n def _img_tf(self, img):\n img = cv2.resize(img, (self.size[1], self.size[0]))\n img = img / 127.5 - 1\n\n return img\n\n def _mask_tf(self, mask):\n mask = cv2.resize(mask, (self.size[1], self.size[0]),\n interpolation=cv2.INTER_NEAREST)\n if self.config.enlarge_mask:\n enlarge_kernel = np.ones((self.config.enlarge_kernel, self.config.enlarge_kernel),\n np.uint8)\n tmp_mask = cv2.dilate(mask[:, :, 0], enlarge_kernel, iterations=1)\n mask[(tmp_mask > 0), :] = 255\n\n mask = mask[:,:,0]\n mask = np.expand_dims(mask, axis=2)\n mask = mask / 255\n\n return mask\n\n def _flow_tf(self, flow):\n origin_shape = flow.shape\n flow = cv2.resize(flow, (self.res_size[1], self.res_size[0]))\n flow[:, :, 0] = flow[:, :, 0].clip(-1. * origin_shape[1], origin_shape[1]) / origin_shape[1] * self.res_size[1]\n flow[:, :, 1] = flow[:, :, 1].clip(-1. * origin_shape[0], origin_shape[0]) / origin_shape[0] * self.res_size[0]\n\n return flow\n"
] |
[
[
"numpy.concatenate",
"numpy.expand_dims",
"torch.from_numpy",
"numpy.ones"
]
] |
aibodygym/GAST-Net-3DPoseEstimation
|
[
"97a364affe5cd4f68fab030e0210187333fff25e"
] |
[
"lib/pose/hrnet/pose_estimation/gen_kpts.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport os.path as osp\nimport argparse\nimport time\nimport numpy as np\nfrom tqdm import tqdm\nimport json\nimport torch\nimport torch.backends.cudnn as cudnn\nimport cv2\n\nimport _init_paths\nfrom _init_paths import get_path\nfrom utils.utilitys import plot_keypoint, PreProcess, write, load_json\nfrom config import cfg, update_config\nfrom utils.transforms import *\nfrom utils.inference import get_final_preds\nimport models\nsys.path.pop(0)\n\npre_dir, cur_dir, chk_root, data_root, lib_root, output_root = get_path(__file__)\ncfg_dir = pre_dir + '/experiments/coco/hrnet/'\nmodel_dir = chk_root + 'hrnet/pose_coco/'\n\n# Loading human detector model\nsys.path.insert(0, lib_root)\nfrom detector import load_model as yolo_model\nfrom detector import yolo_human_det as yolo_det\nfrom track.sort import Sort\nsys.path.pop(0)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train keypoints network')\n # general\n parser.add_argument('--cfg', type=str, default=cfg_dir + 'w48_384x288_adam_lr1e-3.yaml',\n help='experiment configure file name')\n parser.add_argument('opts', nargs=argparse.REMAINDER, default=None,\n help=\"Modify config options using the command-line\")\n parser.add_argument('--modelDir', type=str, default=model_dir + 'pose_hrnet_w48_384x288.pth',\n help='The model directory')\n parser.add_argument('--det-dim', type=int, default=416,\n help='The input dimension of the detected image')\n parser.add_argument('--thred-score', type=float, default=0.70,\n help='The threshold of object Confidence')\n parser.add_argument('-a', '--animation', action='store_true',\n help='output animation')\n parser.add_argument('-np', '--num-person', type=int, default=1,\n help='The maximum number of estimated poses')\n parser.add_argument(\"-v\", \"--video\", type=str, default='camera',\n help=\"input video file name\")\n args = parser.parse_args()\n\n return args\n\n\ndef reset_config(args):\n update_config(cfg, args)\n\n # cudnn related setting\n cudnn.benchmark = cfg.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED\n\n\n# load model\ndef model_load(config):\n print('Loading HRNet model ...')\n # lib/models/pose_hrnet.py:get_pose_net\n model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(config, is_train=False)\n if torch.cuda.is_available():\n model = model.cuda()\n\n state_dict = torch.load(config.OUTPUT_DIR)\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k # remove module.\n # print(name,'\\t')\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n model.eval()\n print('HRNet network successfully loaded')\n return model\n\n\ndef load_default_model():\n args = parse_args()\n reset_config(args)\n\n print('Loading HRNet model ...')\n # lib/models/pose_hrnet.py:get_pose_net\n model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg, is_train=False)\n if torch.cuda.is_available():\n model = model.cuda()\n\n state_dict = torch.load(cfg.OUTPUT_DIR)\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k # remove module.\n # print(name,'\\t')\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n model.eval()\n print('HRNet network successfully loaded')\n return model\n\n\ndef gen_img_kpts(image, human_model, pose_model, human_sort, det_dim=416, num_peroson=2):\n \"\"\"\n :param image: Input image matrix instead of image path\n :param human_model: The YOLOv3 model\n :param pose_model: The HRNet model\n :param human_sort: Input initialized sort tracker\n :param det_dim: The input dimension of YOLOv3. [160, 320, 416]\n :param num_peroson: The number of tracked people\n\n :return:\n kpts: (M, N, 2)\n scores: (M, N, 1)\n bboxs_track: (x1, y1, x2, y2, ID)\n human_sort: Updated human_sort\n \"\"\"\n\n args = parse_args()\n reset_config(args)\n\n thred_score = args.thred_score\n\n bboxs, bbox_scores = yolo_det(image, human_model, reso=det_dim, confidence=thred_score)\n\n if bboxs is None or not bboxs.any():\n return None, None, None\n\n # Using Sort to track people\n # people_track: Num_bbox × [x1, y1, x2, y2, ID]\n people_track = human_sort.update(bboxs)\n\n # Track the first two people in the video and remove the ID\n if people_track.shape[0] == 1:\n bboxs_track = people_track[-1].reshape(1, 5)\n else:\n people_track_ = people_track[-num_peroson:].reshape(num_peroson, 5)\n bboxs_track = people_track_[::-1]\n\n with torch.no_grad():\n # bbox is coordinate location\n inputs, origin_img, center, scale = PreProcess(image, bboxs_track, cfg, num_peroson)\n inputs = inputs[:, [2, 1, 0]]\n\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n output = pose_model(inputs)\n\n # compute coordinate\n preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))\n\n kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)\n scores = np.zeros((num_peroson, 17, 1), dtype=np.float32)\n for i, kpt in enumerate(preds):\n kpts[i] = kpt\n for i, score in enumerate(maxvals):\n scores[i] = score\n\n human_indexes = []\n for i in range(len(bboxs_track)):\n human_indexes.append(bboxs_track[i, -1])\n\n return kpts, scores, human_indexes\n\n\ndef gen_video_kpts(video, det_dim=416, num_peroson=1, gen_output=False):\n # Updating configuration\n args = parse_args()\n reset_config(args)\n\n cap = cv2.VideoCapture(video)\n assert cap.isOpened(), 'Cannot capture source'\n\n # Loading detector and pose model, initialize sort for track\n human_model = yolo_model(inp_dim=det_dim)\n pose_model = model_load(cfg)\n people_sort = Sort()\n\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n # video_length = 1000\n\n # collect keypoints coordinate\n print('Generating 2D pose ...')\n\n kpts_result = []\n scores_result = []\n for i in tqdm(range(video_length)):\n ret, frame = cap.read()\n if not ret:\n continue\n # start = time.time()\n try:\n bboxs, scores = yolo_det(frame, human_model, reso=det_dim, confidence=args.thred_score)\n\n if bboxs is None or not bboxs.any():\n print('No person detected!')\n # print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))\n continue\n\n # Using Sort to track people\n people_track = people_sort.update(bboxs)\n\n # Track the first two people in the video and remove the ID\n if people_track.shape[0] == 1:\n people_track_ = people_track[-1, :-1].reshape(1, 4)\n elif people_track.shape[0] >= 2:\n people_track_ = people_track[-num_peroson:, :-1].reshape(num_peroson, 4)\n people_track_ = people_track_[::-1]\n else:\n continue\n\n track_bboxs = []\n for bbox in people_track_:\n bbox = [round(i, 2) for i in list(bbox)]\n track_bboxs.append(bbox)\n\n except Exception as e:\n print(e)\n exit(0)\n continue\n\n with torch.no_grad():\n # bbox is coordinate location\n inputs, origin_img, center, scale = PreProcess(frame, track_bboxs, cfg, num_peroson)\n inputs = inputs[:, [2, 1, 0]]\n\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n output = pose_model(inputs)\n\n # compute coordinate\n preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))\n\n if gen_output:\n kpts = np.zeros((num_peroson, 17, 2), dtype=np.float32)\n scores = np.zeros((num_peroson, 17), dtype=np.float32)\n for i, kpt in enumerate(preds):\n kpts[i] = kpt\n\n for i, score in enumerate(maxvals):\n scores[i] = score.squeeze()\n\n kpts_result.append(kpts)\n scores_result.append(scores)\n\n else:\n index_bboxs = [bbox + [i] for i, bbox in enumerate(track_bboxs)]\n list(map(lambda x: write(x, frame), index_bboxs))\n plot_keypoint(frame, preds, maxvals, 0.3)\n\n # print('FPS of the video is {:5.2f}'.format(1 / (time.time() - start)))\n cv2.imshow('frame', frame)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n\n if gen_output:\n keypoints = np.array(kpts_result)\n scores = np.array(scores_result)\n\n keypoints = keypoints.transpose(1, 0, 2, 3) # (T, M, N, 2) --> (M, T, N, 2)\n scores = scores.transpose(1, 0, 2) # (T, M, N) --> (M, T, N)\n return keypoints, scores\n\n\ndef generate_ntu_kpts_json(video_path, kpts_file):\n args = parse_args()\n reset_config(args)\n\n # Loading detector and pose model, initialize sort for track\n human_model = yolo_model()\n pose_model = model_load(cfg)\n people_sort = Sort()\n\n with torch.no_grad():\n cap = cv2.VideoCapture(video_path)\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # collect keypoints information\n kpts_info = dict()\n data = []\n\n for i in tqdm(range(video_length)):\n frame_info = {'frame_index': i + 1}\n\n ret, frame = cap.read()\n try:\n bboxs, scores = yolo_det(frame, human_model, confidence=args.thred_score)\n\n if bboxs is None or not bboxs.any():\n print('No person detected!')\n continue\n # Using Sort to track people\n people_track = people_sort.update(bboxs)\n\n # Track the first two people in the video and remove the ID\n if people_track.shape[0] == 1:\n people_track_ = people_track[-1, :-1].reshape(1, 4)\n elif people_track.shape[0] >= 2:\n people_track_ = people_track[-2:, :-1].reshape(2, 4)\n people_track_ = people_track_[::-1]\n else:\n skeleton = {'skeleton': [{'pose': [], 'score': [], 'bbox': []}]}\n frame_info.update(skeleton)\n data.append(frame_info)\n\n continue\n\n track_bboxs = []\n for bbox in people_track_:\n bbox = [round(i, 3) for i in list(bbox)]\n track_bboxs.append(bbox)\n\n except Exception as e:\n print(e)\n continue\n\n # bbox is coordinate location\n inputs, origin_img, center, scale = PreProcess(frame, bboxs, cfg, args.num_person)\n inputs = inputs[:, [2, 1, 0]]\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n output = pose_model(inputs.cuda())\n # compute coordinate\n preds, maxvals = get_final_preds(cfg, output.clone().cpu().numpy(), np.asarray(center),\n np.asarray(scale))\n\n skeleton = []\n for num, bbox in enumerate(track_bboxs):\n pose = preds[num].tolist()\n score = maxvals[num].tolist()\n pose = round_list(pose)\n score = round_list(score)\n\n one_skeleton = {'pose': pose,\n 'score': score,\n 'bbox': bbox}\n skeleton.append(one_skeleton)\n\n frame_info.update({'skeleton': skeleton})\n data.append(frame_info)\n\n kpts_info.update({'data': data})\n with open(kpts_file, 'w') as fw:\n json.dump(kpts_info, fw)\n print('Finishing!')\n\n\ndef round_list(input_list, decimals=3):\n dim = len(input_list)\n\n for i in range(dim):\n for j in range(len(input_list[i])):\n input_list[i][j] = round(input_list[i][j], decimals)\n\n return input_list"
] |
[
[
"torch.load",
"numpy.asarray",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.array",
"numpy.zeros"
]
] |
lyschoening/tfx
|
[
"ff87a97db07642e57e2c84cf50682dc5996f99a4",
"ff87a97db07642e57e2c84cf50682dc5996f99a4"
] |
[
"tfx/components/base/base_driver.py",
"tfx/experimental/pipeline_testing/pipeline_recorder_utils.py"
] |
[
"# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Abstract TFX driver class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom typing import Any, Dict, List, Text\n\nimport absl\nimport tensorflow as tf\n\nfrom tfx import types\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import metadata\nfrom tfx.types import artifact_utils\n\n\ndef _generate_output_uri(base_output_dir: Text,\n name: Text,\n execution_id: int,\n is_single_artifact: bool = True,\n index: int = 0) -> Text:\n \"\"\"Generate uri for output artifact.\"\"\"\n if is_single_artifact:\n # TODO(b/145680633): Consider differentiating different types of uris.\n return os.path.join(base_output_dir, name, str(execution_id))\n\n return os.path.join(base_output_dir, name, str(execution_id), str(index))\n\n\ndef _prepare_output_paths(artifact: types.Artifact):\n \"\"\"Create output directories for output artifact.\"\"\"\n if tf.io.gfile.exists(artifact.uri):\n msg = 'Output artifact uri %s already exists' % artifact.uri\n absl.logging.warning(msg)\n # TODO(b/158689199): We currently simply return as a short-term workaround\n # to unblock execution retires. A comprehensive solution to guarantee\n # idempotent executions is needed.\n return\n\n # TODO(zhitaoli): Consider refactoring this out into something\n # which can handle permission bits.\n absl.logging.debug('Creating output artifact uri %s as directory',\n artifact.uri)\n tf.io.gfile.makedirs(artifact.uri)\n # TODO(b/147242148): Avoid special-casing the \"split_names\" property.\n if artifact.type.PROPERTIES and 'split_names' in artifact.type.PROPERTIES:\n split_names = artifact_utils.decode_split_names(artifact.split_names)\n for split in split_names:\n split_dir = os.path.join(artifact.uri, split)\n absl.logging.debug('Creating output split %s as directory', split_dir)\n tf.io.gfile.makedirs(split_dir)\n\n\nclass BaseDriver(object):\n \"\"\"BaseDriver is the base class of all custom drivers.\n\n This can also be used as the default driver of a component if no custom logic\n is needed.\n\n Attributes:\n _metadata_handler: An instance of Metadata.\n \"\"\"\n\n def __init__(self, metadata_handler: metadata.Metadata):\n self._metadata_handler = metadata_handler\n\n def verify_input_artifacts(\n self, artifacts_dict: Dict[Text, List[types.Artifact]]) -> None:\n \"\"\"Verify that all artifacts have existing uri.\n\n Args:\n artifacts_dict: key -> types.Artifact for inputs.\n\n Raises:\n RuntimeError: if any input as an empty or non-existing uri.\n \"\"\"\n for single_artifacts_list in artifacts_dict.values():\n for artifact in single_artifacts_list:\n if not artifact.uri:\n raise RuntimeError('Artifact %s does not have uri' % artifact)\n if not tf.io.gfile.exists(artifact.uri):\n raise RuntimeError('Artifact uri %s is missing' % artifact.uri)\n\n def _log_properties(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]):\n \"\"\"Log inputs, outputs, and executor properties in a standard format.\"\"\"\n absl.logging.debug('Starting %s driver.', self.__class__.__name__)\n absl.logging.debug('Inputs for %s are: %s', self.__class__.__name__,\n input_dict)\n absl.logging.debug('Execution properties for %s are: %s',\n self.__class__.__name__, exec_properties)\n absl.logging.debug('Outputs for %s are: %s', self.__class__.__name__,\n output_dict)\n\n def resolve_input_artifacts(\n self,\n input_dict: Dict[Text, types.Channel],\n exec_properties: Dict[Text, Any], # pylint: disable=unused-argument\n driver_args: data_types.DriverArgs,\n pipeline_info: data_types.PipelineInfo,\n ) -> Dict[Text, List[types.Artifact]]:\n \"\"\"Resolve input artifacts from metadata.\n\n Subclasses might override this function for customized artifact properties\n resolution logic. However please note that this function is supposed to be\n called in normal cases (except head of the pipeline) since it handles\n artifact info passing from upstream components.\n\n Args:\n input_dict: key -> Channel mapping for inputs generated in logical\n pipeline.\n exec_properties: Dict of other execution properties, e.g., configs.\n driver_args: An instance of data_types.DriverArgs with driver\n configuration properties.\n pipeline_info: An instance of data_types.PipelineInfo, holding pipeline\n related properties including component_type and component_id.\n\n Returns:\n Final artifacts that will be used in execution.\n\n Raises:\n ValueError: if in interactive mode, the given input channels have not been\n resolved.\n \"\"\"\n result = {}\n for name, input_channel in input_dict.items():\n if driver_args.interactive_resolution:\n artifacts = list(input_channel.get())\n for artifact in artifacts:\n # Note: when not initialized, artifact.uri is '' and artifact.id is 0.\n if not artifact.uri or not artifact.id:\n raise ValueError((\n 'Unresolved input channel %r for input %r was passed in '\n 'interactive mode. When running in interactive mode, upstream '\n 'components must first be run with '\n '`interactive_context.run(component)` before their outputs can '\n 'be used in downstream components.') % (artifact, name))\n result[name] = artifacts\n else:\n result[name] = self._metadata_handler.search_artifacts(\n artifact_name=input_channel.output_key,\n pipeline_info=pipeline_info,\n producer_component_id=input_channel.producer_component_id)\n # TODO(ccy): add this code path to interactive resolution.\n for artifact in result[name]:\n if isinstance(artifact, types.ValueArtifact):\n # Resolve the content of file into value field for value artifacts.\n _ = artifact.read()\n return result\n\n def resolve_exec_properties(\n self,\n exec_properties: Dict[Text, Any],\n pipeline_info: data_types.PipelineInfo, # pylint: disable=unused-argument\n component_info: data_types.ComponentInfo, # pylint: disable=unused-argument\n ) -> Dict[Text, Any]:\n \"\"\"Resolve execution properties.\n\n Subclasses might override this function for customized execution properties\n resolution logic.\n\n Args:\n exec_properties: Original execution properties passed in.\n pipeline_info: An instance of data_types.PipelineInfo, holding pipeline\n related properties including pipeline_name, pipeline_root and run_id\n component_info: An instance of data_types.ComponentInfo, holding component\n related properties including component_type and component_id.\n\n Returns:\n Final execution properties that will be used in execution.\n \"\"\"\n return exec_properties\n\n def _prepare_output_artifacts(\n self,\n input_artifacts: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, types.Channel],\n exec_properties: Dict[Text, Any],\n execution_id: int,\n pipeline_info: data_types.PipelineInfo,\n component_info: data_types.ComponentInfo,\n ) -> Dict[Text, List[types.Artifact]]:\n \"\"\"Prepare output artifacts by assigning uris to each artifact.\"\"\"\n del exec_properties\n\n base_output_dir = os.path.join(pipeline_info.pipeline_root,\n component_info.component_id)\n\n result = {}\n for name, channel in output_dict.items():\n if channel.matching_channel_name:\n # Decides the artifact count for output Channel at runtime based on the\n # artifact count in specified input Channel.\n count = len(input_artifacts[channel.matching_channel_name])\n output_list = [channel.type() for _ in range(count)]\n else:\n # TODO(b/161490287): use `[channel.type()]` explicitly.\n output_list = list(channel.get())\n\n is_single_artifact = len(output_list) == 1\n for i, artifact in enumerate(output_list):\n artifact.uri = _generate_output_uri(base_output_dir, name, execution_id,\n is_single_artifact, i)\n _prepare_output_paths(artifact)\n\n result[name] = output_list\n\n return result\n\n def pre_execution(\n self,\n input_dict: Dict[Text, types.Channel],\n output_dict: Dict[Text, types.Channel],\n exec_properties: Dict[Text, Any],\n driver_args: data_types.DriverArgs,\n pipeline_info: data_types.PipelineInfo,\n component_info: data_types.ComponentInfo,\n ) -> data_types.ExecutionDecision:\n \"\"\"Handle pre-execution logic.\n\n There are four steps:\n 1. Fetches input artifacts from metadata and checks whether uri exists.\n 2. Registers execution.\n 3. Decides whether a new execution is needed.\n 4a. If (3), prepare output artifacts.\n 4b. If not (3), fetch cached output artifacts.\n\n Args:\n input_dict: key -> Channel for inputs.\n output_dict: key -> Channel for outputs. Uris of the outputs are not\n assigned.\n exec_properties: Dict of other execution properties.\n driver_args: An instance of data_types.DriverArgs class.\n pipeline_info: An instance of data_types.PipelineInfo, holding pipeline\n related properties including pipeline_name, pipeline_root and run_id\n component_info: An instance of data_types.ComponentInfo, holding component\n related properties including component_type and component_id.\n\n Returns:\n data_types.ExecutionDecision object.\n\n Raises:\n RuntimeError: if any input as an empty uri.\n \"\"\"\n # Step 1. Fetch inputs from metadata.\n exec_properties = self.resolve_exec_properties(exec_properties,\n pipeline_info,\n component_info)\n input_artifacts = self.resolve_input_artifacts(input_dict, exec_properties,\n driver_args, pipeline_info)\n self.verify_input_artifacts(artifacts_dict=input_artifacts)\n absl.logging.debug('Resolved input artifacts are: %s', input_artifacts)\n # Step 2. Register execution in metadata.\n contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(\n pipeline_info)\n execution = self._metadata_handler.register_execution(\n input_artifacts=input_artifacts,\n exec_properties=exec_properties,\n pipeline_info=pipeline_info,\n component_info=component_info,\n contexts=contexts)\n use_cached_results = False\n output_artifacts = None\n\n if driver_args.enable_cache:\n # Step 3. Decide whether a new execution is needed.\n output_artifacts = self._metadata_handler.get_cached_outputs(\n input_artifacts=input_artifacts,\n exec_properties=exec_properties,\n pipeline_info=pipeline_info,\n component_info=component_info)\n if output_artifacts is not None:\n # If cache should be used, updates execution to reflect that. Note that\n # with this update, publisher should / will be skipped.\n self._metadata_handler.update_execution(\n execution=execution,\n component_info=component_info,\n output_artifacts=output_artifacts,\n execution_state=metadata.EXECUTION_STATE_CACHED,\n contexts=contexts)\n use_cached_results = True\n else:\n absl.logging.debug('Cached results not found, move on to new execution')\n # Step 4a. New execution is needed. Prepare output artifacts.\n output_artifacts = self._prepare_output_artifacts(\n input_artifacts=input_artifacts,\n output_dict=output_dict,\n exec_properties=exec_properties,\n execution_id=execution.id,\n pipeline_info=pipeline_info,\n component_info=component_info)\n absl.logging.debug(\n 'Output artifacts skeleton for the upcoming execution are: %s',\n output_artifacts)\n # Updates the execution to reflect refreshed output artifacts and\n # execution properties.\n self._metadata_handler.update_execution(\n execution=execution,\n component_info=component_info,\n output_artifacts=output_artifacts,\n exec_properties=exec_properties,\n contexts=contexts)\n absl.logging.debug(\n 'Execution properties for the upcoming execution are: %s',\n exec_properties)\n\n return data_types.ExecutionDecision(input_artifacts, output_artifacts,\n exec_properties, execution.id,\n use_cached_results)\n",
"# Lint as: python3\n# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Recording pipeline from MLMD metadata.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nfrom typing import Iterable, List, Mapping, Optional, Text, Tuple\n\nfrom absl import logging\nimport tensorflow as tf\nfrom tfx.orchestration import metadata\nfrom tfx.utils import io_utils\n\nfrom ml_metadata.proto import metadata_store_pb2\n\n\ndef _get_paths(metadata_connection: metadata.Metadata,\n executions: List[metadata_store_pb2.Execution],\n output_dir: Text) -> Iterable[Tuple[Text, Text]]:\n \"\"\"Yields tuple with source and destination artifact uris.\n\n The destination artifact uris are located in the output_dir. The source\n artifact uris are retrieved using execution ids. Artifact index is used\n for saving multiple output artifacts with same key.\n\n Args:\n metadata_connection: Instance of metadata.Metadata for I/O to MLMD.\n executions: List of executions of a pipeline run.\n output_dir: Directory path where the pipeline outputs should be recorded.\n\n Yields:\n Iterable over tuples of source uri and destination uri.\n\n Raises:\n ValueError if artifact key and index are not recorded in MLMD event.\n \"\"\"\n for execution in executions:\n component_id = execution.properties[\n metadata._EXECUTION_TYPE_KEY_COMPONENT_ID].string_value # pylint: disable=protected-access\n # ResolverNode is ignored because it doesn't have a executor that can be\n # replaced with stub.\n if component_id.startswith('ResolverNode'):\n continue\n eid = [execution.id]\n events = metadata_connection.store.get_events_by_execution_ids(eid)\n output_events = [\n x for x in events if x.type == metadata_store_pb2.Event.OUTPUT\n ]\n for event in output_events:\n steps = event.path.steps\n if not steps or not steps[0].HasField('key'):\n raise ValueError('Artifact key is not recorded in the MLMD.')\n name = steps[0].key\n artifacts = metadata_connection.store.get_artifacts_by_id(\n [event.artifact_id])\n for artifact in artifacts:\n src_uri = artifact.uri\n if len(steps) < 2 or not steps[1].HasField('index'):\n raise ValueError('Artifact index is not recorded in the MLMD.')\n artifact_index = steps[1].index\n dest_uri = os.path.join(output_dir, component_id, name,\n str(artifact_index))\n yield (src_uri, dest_uri)\n\n\ndef _get_execution_dict(\n metadata_connection: metadata.Metadata\n) -> Mapping[Text, List[metadata_store_pb2.Execution]]:\n \"\"\"Returns a dictionary holding list of executions for all run_id in MLMD.\n\n Args:\n metadata_connection: Instance of metadata.Metadata for I/O to MLMD.\n\n Returns:\n A dictionary that holds list of executions for a run_id.\n \"\"\"\n execution_dict = collections.defaultdict(list)\n for execution in metadata_connection.store.get_executions():\n execution_run_id = execution.properties['run_id'].string_value\n execution_dict[execution_run_id].append(execution)\n return execution_dict\n\n\ndef _get_latest_executions(\n metadata_connection: metadata.Metadata,\n pipeline_name: Text) -> List[metadata_store_pb2.Execution]:\n \"\"\"Fetches executions associated with the latest context.\n\n Args:\n metadata_connection: Instance of metadata.Metadata for I/O to MLMD.\n pipeline_name: Name of the pipeline to rerieve the latest executions for.\n\n Returns:\n List of executions for the latest run of a pipeline with the given\n pipeline_name.\n \"\"\"\n pipeline_run_contexts = [\n c for c in metadata_connection.store.get_contexts_by_type(\n metadata._CONTEXT_TYPE_PIPELINE_RUN) # pylint: disable=protected-access\n if c.properties['pipeline_name'].string_value == pipeline_name\n ]\n latest_context = max(\n pipeline_run_contexts, key=lambda c: c.last_update_time_since_epoch)\n return metadata_connection.store.get_executions_by_context(latest_context.id)\n\n\ndef record_pipeline(output_dir: Text, metadata_db_uri: Optional[Text],\n host: Optional[Text], port: Optional[int],\n pipeline_name: Optional[Text],\n run_id: Optional[Text]) -> None:\n \"\"\"Record pipeline run with run_id to output_dir.\n\n For the beam pipeline, metadata_db_uri is required. For KFP pipeline,\n host and port should be specified. If run_id is not specified, then\n pipeline_name ought to be specified in order to fetch the latest execution\n for the specified pipeline.\n\n Args:\n output_dir: Directory path where the pipeline outputs should be recorded.\n metadata_db_uri: Uri to metadata db.\n host: Hostname of the metadata grpc server\n port: Port number of the metadata grpc server.\n pipeline_name: Pipeline name, which is required if run_id isn't specified.\n run_id: Pipeline execution run_id.\n\n Raises:\n ValueError: In cases of invalid arguments:\n - metadata_db_uri is None or host and/or port is None.\n - run_id is None and pipeline_name is None.\n FileNotFoundError: if the source artifact uri does not already exist.\n \"\"\"\n if host is not None and port is not None:\n metadata_config = metadata_store_pb2.MetadataStoreClientConfig(\n host=host, port=port)\n elif metadata_db_uri is not None:\n metadata_config = metadata.sqlite_metadata_connection_config(\n metadata_db_uri)\n else:\n raise ValueError('For KFP, host and port are required. '\n 'For beam pipeline, metadata_db_uri is required.')\n\n with metadata.Metadata(metadata_config) as metadata_connection:\n if run_id is None:\n if pipeline_name is None:\n raise ValueError('If the run_id is not specified,'\n ' pipeline_name should be specified')\n # fetch executions of the most recently updated execution context.\n executions = _get_latest_executions(metadata_connection, pipeline_name)\n else:\n execution_dict = _get_execution_dict(metadata_connection)\n if run_id in execution_dict:\n executions = execution_dict[run_id]\n else:\n raise ValueError(\n 'run_id {} is not recorded in the MLMD metadata'.format(run_id))\n\n for src_uri, dest_uri in _get_paths(metadata_connection, executions,\n output_dir):\n if not tf.io.gfile.exists(src_uri):\n raise FileNotFoundError('{} does not exist'.format(src_uri))\n io_utils.copy_dir(src_uri, dest_uri)\n logging.info('Pipeline Recorded at %s', output_dir)\n"
] |
[
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.makedirs"
],
[
"tensorflow.io.gfile.exists"
]
] |
aidandunlop/traffic_light_recognition
|
[
"497efe7fe678b88db5331ba446fc45c240276e3f"
] |
[
"traffic_lights/lib/Evaluator.py"
] |
[
"###########################################################################################\n# #\n# Evaluator class: Implements the most popular metrics for object detection #\n# #\n# Developed by: Rafael Padilla ([email protected]) #\n# SMT - Signal Multimedia and Telecommunications Lab #\n# COPPE - Universidade Federal do Rio de Janeiro #\n# Last modification: Oct 9th 2018 #\n###########################################################################################\n# flake8: noqa\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport sys\nimport os\nfrom .utils import MethodAveragePrecision, BBType, BBFormat\n\n\n# from BoundingBox import *\n# from BoundingBoxes import *\n\n\nclass Evaluator:\n def GetPascalVOCMetrics(\n self,\n boundingboxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n ):\n \"\"\"Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n ret = (\n []\n ) # list containing metrics (precision, recall, average precision) of each class\n # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])\n groundTruths = []\n # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])\n detections = []\n # Get all classes\n classes = []\n # Loop through all bounding boxes and separate them into GTs and detections\n for bb in boundingboxes.getBoundingBoxes():\n # [imageName, class, confidence, (bb coordinates XYX2Y2)]\n if bb.getBBType() == BBType.GroundTruth:\n groundTruths.append(\n [\n bb.getImageName(),\n bb.getClassId(),\n 1,\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2),\n ]\n )\n else:\n detections.append(\n [\n bb.getImageName(),\n bb.getClassId(),\n bb.getConfidence(),\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2),\n ]\n )\n # get class\n if bb.getClassId() not in classes:\n classes.append(bb.getClassId())\n classes = sorted(classes)\n # Precision x Recall is obtained individually by each class\n # Loop through by classes\n for c in classes:\n # Get only detection of class c\n dects = []\n [dects.append(d) for d in detections if d[1] == c]\n # Get only ground truths of class c\n gts = []\n [gts.append(g) for g in groundTruths if g[1] == c]\n npos = len(gts)\n # sort detections by decreasing confidence\n dects = sorted(dects, key=lambda conf: conf[2], reverse=True)\n TP = np.zeros(len(dects))\n FP = np.zeros(len(dects))\n # create dictionary with amount of gts for each image\n det = Counter([cc[0] for cc in gts])\n for key, val in det.items():\n det[key] = np.zeros(val)\n # print(\"Evaluating class: %s (%d detections)\" % (str(c), len(dects)))\n # Loop through detections\n for d in range(len(dects)):\n # print('dect %s => %s' % (dects[d][0], dects[d][3],))\n # Find ground truth image\n gt = [gt for gt in gts if gt[0] == dects[d][0]]\n iouMax = sys.float_info.min\n for j in range(len(gt)):\n # print('Ground truth gt => %s' % (gt[j][3],))\n iou = Evaluator.iou(dects[d][3], gt[j][3])\n if iou > iouMax:\n iouMax = iou\n jmax = j\n # Assign detection as true positive/don't care/false positive\n if iouMax >= IOUThreshold:\n if det[dects[d][0]][jmax] == 0:\n TP[d] = 1 # count as true positive\n det[dects[d][0]][jmax] = 1 # flag as already 'seen'\n # print(\"TP\")\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # - A detected \"cat\" is overlaped with a GT \"cat\" with IOU >= IOUThreshold.\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # compute precision, recall and average precision\n acc_FP = np.cumsum(FP)\n acc_TP = np.cumsum(TP)\n rec = acc_TP / npos\n prec = np.divide(acc_TP, (acc_FP + acc_TP))\n # Depending on the method, call the right implementation\n if method == MethodAveragePrecision.EveryPointInterpolation:\n [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)\n else:\n [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)\n # add class result in the dictionary to be returned\n r = {\n \"class\": c,\n \"precision\": prec,\n \"recall\": rec,\n \"AP\": ap,\n \"interpolated precision\": mpre,\n \"interpolated recall\": mrec,\n \"total positives\": npos,\n \"total TP\": np.sum(TP),\n \"total FP\": np.sum(FP),\n }\n ret.append(r)\n return ret\n\n def PlotPrecisionRecallCurve(\n self,\n boundingBoxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n showAP=False,\n showInterpolatedPrecision=False,\n savePath=None,\n showGraphic=True,\n ):\n \"\"\"PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)\n result = None\n # Each resut represents a class\n for result in results:\n if result is None:\n raise IOError(\"Error: Class %d could not be found.\" % classId)\n\n classId = result[\"class\"]\n precision = result[\"precision\"]\n recall = result[\"recall\"]\n average_precision = result[\"AP\"]\n mpre = result[\"interpolated precision\"]\n mrec = result[\"interpolated recall\"]\n npos = result[\"total positives\"]\n total_tp = result[\"total TP\"]\n total_fp = result[\"total FP\"]\n\n plt.close()\n if showInterpolatedPrecision:\n if method == MethodAveragePrecision.EveryPointInterpolation:\n plt.plot(\n mrec, mpre, \"--r\", label=\"Interpolated precision (every point)\"\n )\n elif method == MethodAveragePrecision.ElevenPointInterpolation:\n # Uncomment the line below if you want to plot the area\n # plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')\n # Remove duplicates, getting only the highest precision of each recall value\n nrec = []\n nprec = []\n for idx in range(len(mrec)):\n r = mrec[idx]\n if r not in nrec:\n idxEq = np.argwhere(mrec == r)\n nrec.append(r)\n nprec.append(max([mpre[int(id)] for id in idxEq]))\n plt.plot(nrec, nprec, \"or\", label=\"11-point interpolated precision\")\n plt.plot(recall, precision, label=\"Precision\")\n plt.xlabel(\"recall\")\n plt.ylabel(\"precision\")\n if showAP:\n ap_str = \"{0:.2f}%\".format(average_precision * 100)\n # ap_str = \"{0:.4f}%\".format(average_precision * 100)\n plt.title(\n \"Precision x Recall curve \\nClass: %s, AP: %s\"\n % (str(classId), ap_str)\n )\n else:\n plt.title(\"Precision x Recall curve \\nClass: %s\" % str(classId))\n plt.legend(shadow=True)\n plt.grid()\n ############################################################\n # Uncomment the following block to create plot with points #\n ############################################################\n # plt.plot(recall, precision, 'bo')\n # labels = ['R', 'Y', 'J', 'A', 'U', 'C', 'M', 'F', 'D', 'B', 'H', 'P', 'E', 'X', 'N', 'T',\n # 'K', 'Q', 'V', 'I', 'L', 'S', 'G', 'O']\n # dicPosition = {}\n # dicPosition['left_zero'] = (-30,0)\n # dicPosition['left_zero_slight'] = (-30,-10)\n # dicPosition['right_zero'] = (30,0)\n # dicPosition['left_up'] = (-30,20)\n # dicPosition['left_down'] = (-30,-25)\n # dicPosition['right_up'] = (20,20)\n # dicPosition['right_down'] = (20,-20)\n # dicPosition['up_zero'] = (0,30)\n # dicPosition['up_right'] = (0,30)\n # dicPosition['left_zero_long'] = (-60,-2)\n # dicPosition['down_zero'] = (-2,-30)\n # vecPositions = [\n # dicPosition['left_down'],\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['right_zero'], #'R', 'Y', 'J', 'A',\n # dicPosition['left_up'],\n # dicPosition['left_up'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'U', 'C', 'M', 'F',\n # dicPosition['left_zero'],\n # dicPosition['right_up'],\n # dicPosition['right_down'],\n # dicPosition['down_zero'], #'D', 'B', 'H', 'P'\n # dicPosition['left_up'],\n # dicPosition['up_zero'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'E', 'X', 'N', 'T',\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['left_zero_long'],\n # dicPosition['left_zero_slight'], # 'K', 'Q', 'V', 'I',\n # dicPosition['right_down'],\n # dicPosition['left_down'],\n # dicPosition['right_up'],\n # dicPosition['down_zero']\n # ] # 'L', 'S', 'G', 'O'\n # for idx in range(len(labels)):\n # box = dict(boxstyle='round,pad=.5',facecolor='yellow',alpha=0.5)\n # plt.annotate(labels[idx],\n # xy=(recall[idx],precision[idx]), xycoords='data',\n # xytext=vecPositions[idx], textcoords='offset points',\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"),\n # bbox=box)\n if savePath is not None:\n plt.savefig(os.path.join(savePath, classId + \".png\"))\n if showGraphic is True:\n plt.show()\n # plt.waitforbuttonpress()\n plt.pause(0.05)\n return results\n\n @staticmethod\n def CalculateAveragePrecision(rec, prec):\n mrec = []\n mrec.append(0)\n [mrec.append(e) for e in rec]\n mrec.append(1)\n mpre = []\n mpre.append(0)\n [mpre.append(e) for e in prec]\n mpre.append(0)\n for i in range(len(mpre) - 1, 0, -1):\n mpre[i - 1] = max(mpre[i - 1], mpre[i])\n ii = []\n for i in range(len(mrec) - 1):\n if mrec[1:][i] != mrec[0:-1][i]:\n ii.append(i + 1)\n ap = 0\n for i in ii:\n ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i])\n # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]\n return [ap, mpre[0 : len(mpre) - 1], mrec[0 : len(mpre) - 1], ii]\n\n @staticmethod\n # 11-point interpolated average precision\n def ElevenPointInterpolatedAP(rec, prec):\n # def CalculateAveragePrecision2(rec, prec):\n mrec = []\n # mrec.append(0)\n [mrec.append(e) for e in rec]\n # mrec.append(1)\n mpre = []\n # mpre.append(0)\n [mpre.append(e) for e in prec]\n # mpre.append(0)\n recallValues = np.linspace(0, 1, 11)\n recallValues = list(recallValues[::-1])\n rhoInterp = []\n recallValid = []\n # For each recallValues (0, 0.1, 0.2, ... , 1)\n for r in recallValues:\n # Obtain all recall values higher or equal than r\n argGreaterRecalls = np.argwhere(mrec[:] >= r)\n pmax = 0\n # If there are recalls above r\n if argGreaterRecalls.size != 0:\n pmax = max(mpre[argGreaterRecalls.min() :])\n recallValid.append(r)\n rhoInterp.append(pmax)\n # By definition AP = sum(max(precision whose recall is above r))/11\n ap = sum(rhoInterp) / 11\n # Generating values for the plot\n rvals = []\n rvals.append(recallValid[0])\n [rvals.append(e) for e in recallValid]\n rvals.append(0)\n pvals = []\n pvals.append(0)\n [pvals.append(e) for e in rhoInterp]\n pvals.append(0)\n # rhoInterp = rhoInterp[::-1]\n cc = []\n for i in range(len(rvals)):\n p = (rvals[i], pvals[i - 1])\n if p not in cc:\n cc.append(p)\n p = (rvals[i], pvals[i])\n if p not in cc:\n cc.append(p)\n recallValues = [i[0] for i in cc]\n rhoInterp = [i[1] for i in cc]\n return [ap, rhoInterp, recallValues, None]\n\n # For each detections, calculate IOU with reference\n @staticmethod\n def _getAllIOUs(reference, detections):\n ret = []\n bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n # img = np.zeros((200,200,3), np.uint8)\n for d in detections:\n bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n iou = Evaluator.iou(bbReference, bb)\n # Show blank image with the bounding boxes\n # img = add_bb_into_image(img, d, color=(255,0,0), thickness=2, label=None)\n # img = add_bb_into_image(img, reference, color=(0,255,0), thickness=2, label=None)\n ret.append((iou, reference, d)) # iou, reference, detection\n # cv2.imshow(\"comparing\",img)\n # cv2.waitKey(0)\n # cv2.destroyWindow(\"comparing\")\n # sort by iou (from highest to lowest)\n return sorted(ret, key=lambda i: i[0], reverse=True)\n\n @staticmethod\n def iou(boxA, boxB):\n # if boxes dont intersect\n if Evaluator._boxesIntersect(boxA, boxB) is False:\n return 0\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)\n # intersection over union\n iou = interArea / union\n assert iou >= 0\n return iou\n\n # boxA = (Ax1,Ay1,Ax2,Ay2)\n # boxB = (Bx1,By1,Bx2,By2)\n @staticmethod\n def _boxesIntersect(boxA, boxB):\n if boxA[0] > boxB[2]:\n return False # boxA is right of boxB\n if boxB[0] > boxA[2]:\n return False # boxA is left of boxB\n if boxA[3] < boxB[1]:\n return False # boxA is above boxB\n if boxA[1] > boxB[3]:\n return False # boxA is below boxB\n return True\n\n @staticmethod\n def _getIntersectionArea(boxA, boxB):\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # intersection area\n return (xB - xA + 1) * (yB - yA + 1)\n\n @staticmethod\n def _getUnionAreas(boxA, boxB, interArea=None):\n area_A = Evaluator._getArea(boxA)\n area_B = Evaluator._getArea(boxB)\n if interArea is None:\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n return float(area_A + area_B - interArea)\n\n @staticmethod\n def _getArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.pause",
"numpy.linspace",
"numpy.cumsum",
"numpy.argwhere",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"numpy.divide",
"matplotlib.pyplot.ylabel"
]
] |
hanqiu-hq/GFNet
|
[
"3dfc282831fbfbe25ad01d28e86d7d99056ea2cb"
] |
[
"infer.py"
] |
[
"import argparse\nimport datetime\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport json\n\nfrom pathlib import Path\n\nfrom timm.data import Mixup\nfrom timm.models import create_model\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\nfrom timm.scheduler import create_scheduler\nfrom timm.optim import create_optimizer\nfrom timm.utils import NativeScaler, get_state_dict, ModelEma\n\nfrom datasets import build_dataset\nfrom engine import train_one_epoch, evaluate\nfrom losses import DistillationLoss\nfrom samplers import RASampler\nimport utils\nfrom functools import partial\n\nfrom gfnet import GFNet, GFNetPyramid, _cfg\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('GFNet evaluation script', add_help=False)\n parser.add_argument('--batch-size', default=128, type=int)\n parser.add_argument('--arch', default='deit_small', type=str, help='Name of model to train')\n parser.add_argument('--input-size', default=224, type=int, help='images input size')\n parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,\n help='dataset path')\n parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],\n type=str, help='Image Net dataset path')\n parser.add_argument('--inat-category', default='name',\n choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],\n type=str, help='semantic granularity')\n parser.add_argument('--seed', default=0, type=int)\n parser.add_argument('--model-path', default='', help='resume from checkpoint')\n parser.add_argument('--num_workers', default=10, type=int)\n parser.add_argument('--pin-mem', action='store_true',\n help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\n parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',\n help='')\n parser.set_defaults(pin_mem=True)\n return parser\n\n\ndef main(args):\n\n cudnn.benchmark = True\n dataset_val, _ = build_dataset(is_train=False, args=args)\n\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val,\n batch_size=128,\n num_workers=args.num_workers,\n pin_memory=args.pin_mem,\n drop_last=False\n )\n\n if args.arch == 'gfnet-xs':\n model = GFNet(\n img_size=args.input_size, \n patch_size=16, embed_dim=384, depth=12, mlp_ratio=4,\n norm_layer=partial(nn.LayerNorm, eps=1e-6)\n )\n elif args.arch == 'gfnet-ti':\n model = GFNet(\n img_size=args.input_size, \n patch_size=16, embed_dim=256, depth=12, mlp_ratio=4,\n norm_layer=partial(nn.LayerNorm, eps=1e-6)\n )\n elif args.arch == 'gfnet-s':\n model = GFNet(\n img_size=args.input_size, \n patch_size=16, embed_dim=384, depth=19, mlp_ratio=4,\n norm_layer=partial(nn.LayerNorm, eps=1e-6)\n )\n elif args.arch == 'gfnet-b':\n model = GFNet(\n img_size=args.input_size, \n patch_size=16, embed_dim=512, depth=19, mlp_ratio=4,\n norm_layer=partial(nn.LayerNorm, eps=1e-6)\n )\n elif args.arch == 'gfnet-h-ti':\n model = GFNetPyramid(\n img_size=args.input_size, \n patch_size=4, embed_dim=[64, 128, 256, 512], depth=[3, 3, 10, 3],\n mlp_ratio=[4, 4, 4, 4],\n norm_layer=partial(nn.LayerNorm, eps=1e-6), drop_path_rate=0.1,\n )\n elif args.arch == 'gfnet-h-s':\n model = GFNetPyramid(\n img_size=args.input_size, \n patch_size=4, embed_dim=[96, 192, 384, 768], depth=[3, 3, 10, 3],\n mlp_ratio=[4, 4, 4, 4],\n norm_layer=partial(nn.LayerNorm, eps=1e-6), drop_path_rate=0.2, init_values=1e-5\n )\n elif args.arch == 'gfnet-h-b':\n model = GFNetPyramid(\n img_size=args.input_size, \n patch_size=4, embed_dim=[96, 192, 384, 768], depth=[3, 3, 27, 3],\n mlp_ratio=[4, 4, 4, 4],\n norm_layer=partial(nn.LayerNorm, eps=1e-6), drop_path_rate=0.4, init_values=1e-6\n )\n else:\n raise NotImplementedError\n\n model_path = args.model_path\n model.default_cfg = _cfg()\n\n checkpoint = torch.load(model_path, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"model\"])\n\n print('## model has been successfully loaded')\n\n model = model.cuda()\n\n n_parameters = sum(p.numel() for p in model.parameters())\n print('number of params:', n_parameters)\n\n criterion = torch.nn.CrossEntropyLoss().cuda()\n validate(data_loader_val, model, criterion)\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef validate(val_loader, model, criterion):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n model.eval()\n\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n images = images.cuda()\n target = target.cuda()\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % 20 == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('GFNet evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n main(args)\n"
] |
[
[
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.load"
]
] |
skywalk163/parl_ball
|
[
"b1529c2429a1dc9a0805e04acc4719a1b3ad678e"
] |
[
"agent.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#-*- coding: utf-8 -*-\n\nimport numpy as np\nimport paddle.fluid as fluid\nimport parl\nfrom parl import layers\n\n\nclass Agent(parl.Agent):\n def __init__(self, algorithm, obs_dim, act_dim):\n self.obs_dim = obs_dim\n self.act_dim = act_dim\n super(Agent, self).__init__(algorithm)\n\n def build_program(self):\n self.pred_program = fluid.Program()\n self.learn_program = fluid.Program()\n\n with fluid.program_guard(self.pred_program): # 搭建计算图用于 预测动作,定义输入输出变量\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n self.act_prob = self.alg.predict(obs)\n\n with fluid.program_guard(\n self.learn_program): # 搭建计算图用于 更新policy网络,定义输入输出变量\n obs = layers.data(\n name='obs', shape=[self.obs_dim], dtype='float32')\n act = layers.data(name='act', shape=[1], dtype='int64')\n reward = layers.data(name='reward', shape=[], dtype='float32')\n self.cost = self.alg.learn(obs, act, reward)\n\n def sample(self, obs):\n obs = np.expand_dims(obs, axis=0) # 增加一维维度\n act_prob = self.fluid_executor.run(\n self.pred_program,\n feed={'obs': obs.astype('float32')},\n fetch_list=[self.act_prob])[0]\n act_prob = np.squeeze(act_prob, axis=0) # 减少一维维度\n act = np.random.choice(range(self.act_dim), p=act_prob) # 根据动作概率选取动作\n return act\n\n def predict(self, obs):\n obs = np.expand_dims(obs, axis=0)\n act_prob = self.fluid_executor.run(\n self.pred_program,\n feed={'obs': obs.astype('float32')},\n fetch_list=[self.act_prob])[0]\n act_prob = np.squeeze(act_prob, axis=0)\n act = np.argmax(act_prob) # 根据动作概率选择概率最高的动作\n return act\n\n def learn(self, obs, act, reward):\n act = np.expand_dims(act, axis=-1)\n feed = {\n 'obs': obs.astype('float32'),\n 'act': act.astype('int64'),\n 'reward': reward.astype('float32')\n }\n #int 64 to float TypeError: Cannot cast ufunc subtract output from dtype('float64') to dtype('int64') with casting rule 'same_kind'\n cost = self.fluid_executor.run(\n self.learn_program, feed=feed, fetch_list=[self.cost])[0]\n return cost\n"
] |
[
[
"numpy.squeeze",
"numpy.expand_dims",
"numpy.argmax"
]
] |
l532857663/spark
|
[
"481f0792944d9a77f0fe8b5e2596da1d600b9d0a"
] |
[
"python/pyspark/sql/tests.py"
] |
[
"# -*- encoding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for pyspark.sql; additional tests are implemented as doctests in\nindividual modules.\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport pydoc\nimport shutil\nimport tempfile\nimport pickle\nimport functools\nimport time\nimport datetime\nimport array\nimport ctypes\nimport py4j\n\ntry:\n import xmlrunner\nexcept ImportError:\n xmlrunner = None\n\nif sys.version_info[:2] <= (2, 6):\n try:\n import unittest2 as unittest\n except ImportError:\n sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')\n sys.exit(1)\nelse:\n import unittest\n\n_have_pandas = False\ntry:\n import pandas\n _have_pandas = True\nexcept:\n # No Pandas, but that's okay, we'll skip those tests\n pass\n\nfrom pyspark import SparkContext\nfrom pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row\nfrom pyspark.sql.types import *\nfrom pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier\nfrom pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings\nfrom pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings\nfrom pyspark.tests import QuietTest, ReusedPySparkTestCase, SparkSubmitTests\nfrom pyspark.sql.functions import UserDefinedFunction, sha2, lit\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException\n\n\n_have_arrow = False\ntry:\n import pyarrow\n _have_arrow = True\nexcept:\n # No Arrow, but that's okay, we'll skip those tests\n pass\n\n\nclass UTCOffsetTimezone(datetime.tzinfo):\n \"\"\"\n Specifies timezone in UTC offset\n \"\"\"\n\n def __init__(self, offset=0):\n self.ZERO = datetime.timedelta(hours=offset)\n\n def utcoffset(self, dt):\n return self.ZERO\n\n def dst(self, dt):\n return self.ZERO\n\n\nclass ExamplePointUDT(UserDefinedType):\n \"\"\"\n User-defined type (UDT) for ExamplePoint.\n \"\"\"\n\n @classmethod\n def sqlType(self):\n return ArrayType(DoubleType(), False)\n\n @classmethod\n def module(cls):\n return 'pyspark.sql.tests'\n\n @classmethod\n def scalaUDT(cls):\n return 'org.apache.spark.sql.test.ExamplePointUDT'\n\n def serialize(self, obj):\n return [obj.x, obj.y]\n\n def deserialize(self, datum):\n return ExamplePoint(datum[0], datum[1])\n\n\nclass ExamplePoint:\n \"\"\"\n An example class to demonstrate UDT in Scala, Java, and Python.\n \"\"\"\n\n __UDT__ = ExamplePointUDT()\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return \"ExamplePoint(%s,%s)\" % (self.x, self.y)\n\n def __str__(self):\n return \"(%s,%s)\" % (self.x, self.y)\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and \\\n other.x == self.x and other.y == self.y\n\n\nclass PythonOnlyUDT(UserDefinedType):\n \"\"\"\n User-defined type (UDT) for ExamplePoint.\n \"\"\"\n\n @classmethod\n def sqlType(self):\n return ArrayType(DoubleType(), False)\n\n @classmethod\n def module(cls):\n return '__main__'\n\n def serialize(self, obj):\n return [obj.x, obj.y]\n\n def deserialize(self, datum):\n return PythonOnlyPoint(datum[0], datum[1])\n\n @staticmethod\n def foo():\n pass\n\n @property\n def props(self):\n return {}\n\n\nclass PythonOnlyPoint(ExamplePoint):\n \"\"\"\n An example class to demonstrate UDT in only Python\n \"\"\"\n __UDT__ = PythonOnlyUDT()\n\n\nclass MyObject(object):\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n\nclass DataTypeTests(unittest.TestCase):\n # regression test for SPARK-6055\n def test_data_type_eq(self):\n lt = LongType()\n lt2 = pickle.loads(pickle.dumps(LongType()))\n self.assertEqual(lt, lt2)\n\n # regression test for SPARK-7978\n def test_decimal_type(self):\n t1 = DecimalType()\n t2 = DecimalType(10, 2)\n self.assertTrue(t2 is not t1)\n self.assertNotEqual(t1, t2)\n t3 = DecimalType(8)\n self.assertNotEqual(t2, t3)\n\n # regression test for SPARK-10392\n def test_datetype_equal_zero(self):\n dt = DateType()\n self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))\n\n # regression test for SPARK-17035\n def test_timestamp_microsecond(self):\n tst = TimestampType()\n self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)\n\n def test_empty_row(self):\n row = Row()\n self.assertEqual(len(row), 0)\n\n\nclass SQLTests(ReusedPySparkTestCase):\n\n @classmethod\n def setUpClass(cls):\n ReusedPySparkTestCase.setUpClass()\n cls.tempdir = tempfile.NamedTemporaryFile(delete=False)\n os.unlink(cls.tempdir.name)\n cls.spark = SparkSession(cls.sc)\n cls.testData = [Row(key=i, value=str(i)) for i in range(100)]\n cls.df = cls.spark.createDataFrame(cls.testData)\n\n @classmethod\n def tearDownClass(cls):\n ReusedPySparkTestCase.tearDownClass()\n cls.spark.stop()\n shutil.rmtree(cls.tempdir.name, ignore_errors=True)\n\n def test_sqlcontext_reuses_sparksession(self):\n sqlContext1 = SQLContext(self.sc)\n sqlContext2 = SQLContext(self.sc)\n self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)\n\n def tearDown(self):\n super(SQLTests, self).tearDown()\n\n # tear down test_bucketed_write state\n self.spark.sql(\"DROP TABLE IF EXISTS pyspark_bucket\")\n\n def test_row_should_be_read_only(self):\n row = Row(a=1, b=2)\n self.assertEqual(1, row.a)\n\n def foo():\n row.a = 3\n self.assertRaises(Exception, foo)\n\n row2 = self.spark.range(10).first()\n self.assertEqual(0, row2.id)\n\n def foo2():\n row2.id = 2\n self.assertRaises(Exception, foo2)\n\n def test_range(self):\n self.assertEqual(self.spark.range(1, 1).count(), 0)\n self.assertEqual(self.spark.range(1, 0, -1).count(), 1)\n self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)\n self.assertEqual(self.spark.range(-2).count(), 0)\n self.assertEqual(self.spark.range(3).count(), 3)\n\n def test_duplicated_column_names(self):\n df = self.spark.createDataFrame([(1, 2)], [\"c\", \"c\"])\n row = df.select('*').first()\n self.assertEqual(1, row[0])\n self.assertEqual(2, row[1])\n self.assertEqual(\"Row(c=1, c=2)\", str(row))\n # Cannot access columns\n self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())\n self.assertRaises(AnalysisException, lambda: df.select(df.c).first())\n self.assertRaises(AnalysisException, lambda: df.select(df[\"c\"]).first())\n\n def test_column_name_encoding(self):\n \"\"\"Ensure that created columns has `str` type consistently.\"\"\"\n columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns\n self.assertEqual(columns, ['name', 'age'])\n self.assertTrue(isinstance(columns[0], str))\n self.assertTrue(isinstance(columns[1], str))\n\n def test_explode(self):\n from pyspark.sql.functions import explode, explode_outer, posexplode_outer\n d = [\n Row(a=1, intlist=[1, 2, 3], mapfield={\"a\": \"b\"}),\n Row(a=1, intlist=[], mapfield={}),\n Row(a=1, intlist=None, mapfield=None),\n ]\n rdd = self.sc.parallelize(d)\n data = self.spark.createDataFrame(rdd)\n\n result = data.select(explode(data.intlist).alias(\"a\")).select(\"a\").collect()\n self.assertEqual(result[0][0], 1)\n self.assertEqual(result[1][0], 2)\n self.assertEqual(result[2][0], 3)\n\n result = data.select(explode(data.mapfield).alias(\"a\", \"b\")).select(\"a\", \"b\").collect()\n self.assertEqual(result[0][0], \"a\")\n self.assertEqual(result[0][1], \"b\")\n\n result = [tuple(x) for x in data.select(posexplode_outer(\"intlist\")).collect()]\n self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])\n\n result = [tuple(x) for x in data.select(posexplode_outer(\"mapfield\")).collect()]\n self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])\n\n result = [x[0] for x in data.select(explode_outer(\"intlist\")).collect()]\n self.assertEqual(result, [1, 2, 3, None, None])\n\n result = [tuple(x) for x in data.select(explode_outer(\"mapfield\")).collect()]\n self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])\n\n def test_and_in_expression(self):\n self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= \"2\")).count())\n self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= \"2\"))\n self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < \"2\")).count())\n self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < \"2\")\n self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())\n self.assertRaises(ValueError, lambda: not self.df.key == 1)\n\n def test_udf_with_callable(self):\n d = [Row(number=i, squared=i**2) for i in range(10)]\n rdd = self.sc.parallelize(d)\n data = self.spark.createDataFrame(rdd)\n\n class PlusFour:\n def __call__(self, col):\n if col is not None:\n return col + 4\n\n call = PlusFour()\n pudf = UserDefinedFunction(call, LongType())\n res = data.select(pudf(data['number']).alias('plus_four'))\n self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)\n\n def test_udf_with_partial_function(self):\n d = [Row(number=i, squared=i**2) for i in range(10)]\n rdd = self.sc.parallelize(d)\n data = self.spark.createDataFrame(rdd)\n\n def some_func(col, param):\n if col is not None:\n return col + param\n\n pfunc = functools.partial(some_func, param=4)\n pudf = UserDefinedFunction(pfunc, LongType())\n res = data.select(pudf(data['number']).alias('plus_four'))\n self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)\n\n def test_udf(self):\n self.spark.catalog.registerFunction(\"twoArgs\", lambda x, y: len(x) + y, IntegerType())\n [row] = self.spark.sql(\"SELECT twoArgs('test', 1)\").collect()\n self.assertEqual(row[0], 5)\n\n def test_udf2(self):\n self.spark.catalog.registerFunction(\"strlen\", lambda string: len(string), IntegerType())\n self.spark.createDataFrame(self.sc.parallelize([Row(a=\"test\")]))\\\n .createOrReplaceTempView(\"test\")\n [res] = self.spark.sql(\"SELECT strlen(a) FROM test WHERE strlen(a) > 1\").collect()\n self.assertEqual(4, res[0])\n\n def test_chained_udf(self):\n self.spark.catalog.registerFunction(\"double\", lambda x: x + x, IntegerType())\n [row] = self.spark.sql(\"SELECT double(1)\").collect()\n self.assertEqual(row[0], 2)\n [row] = self.spark.sql(\"SELECT double(double(1))\").collect()\n self.assertEqual(row[0], 4)\n [row] = self.spark.sql(\"SELECT double(double(1) + 1)\").collect()\n self.assertEqual(row[0], 6)\n\n def test_single_udf_with_repeated_argument(self):\n # regression test for SPARK-20685\n self.spark.catalog.registerFunction(\"add\", lambda x, y: x + y, IntegerType())\n row = self.spark.sql(\"SELECT add(1, 1)\").first()\n self.assertEqual(tuple(row), (2, ))\n\n def test_multiple_udfs(self):\n self.spark.catalog.registerFunction(\"double\", lambda x: x * 2, IntegerType())\n [row] = self.spark.sql(\"SELECT double(1), double(2)\").collect()\n self.assertEqual(tuple(row), (2, 4))\n [row] = self.spark.sql(\"SELECT double(double(1)), double(double(2) + 2)\").collect()\n self.assertEqual(tuple(row), (4, 12))\n self.spark.catalog.registerFunction(\"add\", lambda x, y: x + y, IntegerType())\n [row] = self.spark.sql(\"SELECT double(add(1, 2)), add(double(2), 1)\").collect()\n self.assertEqual(tuple(row), (6, 5))\n\n def test_udf_in_filter_on_top_of_outer_join(self):\n from pyspark.sql.functions import udf\n left = self.spark.createDataFrame([Row(a=1)])\n right = self.spark.createDataFrame([Row(a=1)])\n df = left.join(right, on='a', how='left_outer')\n df = df.withColumn('b', udf(lambda x: 'x')(df.a))\n self.assertEqual(df.filter('b = \"x\"').collect(), [Row(a=1, b='x')])\n\n def test_udf_in_filter_on_top_of_join(self):\n # regression test for SPARK-18589\n from pyspark.sql.functions import udf\n left = self.spark.createDataFrame([Row(a=1)])\n right = self.spark.createDataFrame([Row(b=1)])\n f = udf(lambda a, b: a == b, BooleanType())\n df = left.crossJoin(right).filter(f(\"a\", \"b\"))\n self.assertEqual(df.collect(), [Row(a=1, b=1)])\n\n def test_udf_without_arguments(self):\n self.spark.catalog.registerFunction(\"foo\", lambda: \"bar\")\n [row] = self.spark.sql(\"SELECT foo()\").collect()\n self.assertEqual(row[0], \"bar\")\n\n def test_udf_with_array_type(self):\n d = [Row(l=list(range(3)), d={\"key\": list(range(5))})]\n rdd = self.sc.parallelize(d)\n self.spark.createDataFrame(rdd).createOrReplaceTempView(\"test\")\n self.spark.catalog.registerFunction(\"copylist\", lambda l: list(l), ArrayType(IntegerType()))\n self.spark.catalog.registerFunction(\"maplen\", lambda d: len(d), IntegerType())\n [(l1, l2)] = self.spark.sql(\"select copylist(l), maplen(d) from test\").collect()\n self.assertEqual(list(range(3)), l1)\n self.assertEqual(1, l2)\n\n def test_broadcast_in_udf(self):\n bar = {\"a\": \"aa\", \"b\": \"bb\", \"c\": \"abc\"}\n foo = self.sc.broadcast(bar)\n self.spark.catalog.registerFunction(\"MYUDF\", lambda x: foo.value[x] if x else '')\n [res] = self.spark.sql(\"SELECT MYUDF('c')\").collect()\n self.assertEqual(\"abc\", res[0])\n [res] = self.spark.sql(\"SELECT MYUDF('')\").collect()\n self.assertEqual(\"\", res[0])\n\n def test_udf_with_filter_function(self):\n df = self.spark.createDataFrame([(1, \"1\"), (2, \"2\"), (1, \"2\"), (1, \"2\")], [\"key\", \"value\"])\n from pyspark.sql.functions import udf, col\n from pyspark.sql.types import BooleanType\n\n my_filter = udf(lambda a: a < 2, BooleanType())\n sel = df.select(col(\"key\"), col(\"value\")).filter((my_filter(col(\"key\"))) & (df.value < \"2\"))\n self.assertEqual(sel.collect(), [Row(key=1, value='1')])\n\n def test_udf_with_aggregate_function(self):\n df = self.spark.createDataFrame([(1, \"1\"), (2, \"2\"), (1, \"2\"), (1, \"2\")], [\"key\", \"value\"])\n from pyspark.sql.functions import udf, col, sum\n from pyspark.sql.types import BooleanType\n\n my_filter = udf(lambda a: a == 1, BooleanType())\n sel = df.select(col(\"key\")).distinct().filter(my_filter(col(\"key\")))\n self.assertEqual(sel.collect(), [Row(key=1)])\n\n my_copy = udf(lambda x: x, IntegerType())\n my_add = udf(lambda a, b: int(a + b), IntegerType())\n my_strlen = udf(lambda x: len(x), IntegerType())\n sel = df.groupBy(my_copy(col(\"key\")).alias(\"k\"))\\\n .agg(sum(my_strlen(col(\"value\"))).alias(\"s\"))\\\n .select(my_add(col(\"k\"), col(\"s\")).alias(\"t\"))\n self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])\n\n def test_udf_in_generate(self):\n from pyspark.sql.functions import udf, explode\n df = self.spark.range(5)\n f = udf(lambda x: list(range(x)), ArrayType(LongType()))\n row = df.select(explode(f(*df))).groupBy().sum().first()\n self.assertEqual(row[0], 10)\n\n df = self.spark.range(3)\n res = df.select(\"id\", explode(f(df.id))).collect()\n self.assertEqual(res[0][0], 1)\n self.assertEqual(res[0][1], 0)\n self.assertEqual(res[1][0], 2)\n self.assertEqual(res[1][1], 0)\n self.assertEqual(res[2][0], 2)\n self.assertEqual(res[2][1], 1)\n\n range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))\n res = df.select(\"id\", explode(range_udf(df.id))).collect()\n self.assertEqual(res[0][0], 0)\n self.assertEqual(res[0][1], -1)\n self.assertEqual(res[1][0], 0)\n self.assertEqual(res[1][1], 0)\n self.assertEqual(res[2][0], 1)\n self.assertEqual(res[2][1], 0)\n self.assertEqual(res[3][0], 1)\n self.assertEqual(res[3][1], 1)\n\n def test_udf_with_order_by_and_limit(self):\n from pyspark.sql.functions import udf\n my_copy = udf(lambda x: x, IntegerType())\n df = self.spark.range(10).orderBy(\"id\")\n res = df.select(df.id, my_copy(df.id).alias(\"copy\")).limit(1)\n res.explain(True)\n self.assertEqual(res.collect(), [Row(id=0, copy=0)])\n\n def test_udf_registration_returns_udf(self):\n df = self.spark.range(10)\n add_three = self.spark.udf.register(\"add_three\", lambda x: x + 3, IntegerType())\n\n self.assertListEqual(\n df.selectExpr(\"add_three(id) AS plus_three\").collect(),\n df.select(add_three(\"id\").alias(\"plus_three\")).collect()\n )\n\n def test_non_existed_udf(self):\n spark = self.spark\n self.assertRaisesRegexp(AnalysisException, \"Can not load class non_existed_udf\",\n lambda: spark.udf.registerJavaFunction(\"udf1\", \"non_existed_udf\"))\n\n def test_non_existed_udaf(self):\n spark = self.spark\n self.assertRaisesRegexp(AnalysisException, \"Can not load class non_existed_udaf\",\n lambda: spark.udf.registerJavaUDAF(\"udaf1\", \"non_existed_udaf\"))\n\n def test_multiLine_json(self):\n people1 = self.spark.read.json(\"python/test_support/sql/people.json\")\n people_array = self.spark.read.json(\"python/test_support/sql/people_array.json\",\n multiLine=True)\n self.assertEqual(people1.collect(), people_array.collect())\n\n def test_multiline_csv(self):\n ages_newlines = self.spark.read.csv(\n \"python/test_support/sql/ages_newlines.csv\", multiLine=True)\n expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\\nI am Jeo'),\n Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),\n Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\\n\\nI love Spark!')]\n self.assertEqual(ages_newlines.collect(), expected)\n\n def test_ignorewhitespace_csv(self):\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n self.spark.createDataFrame([[\" a\", \"b \", \" c \"]]).write.csv(\n tmpPath,\n ignoreLeadingWhiteSpace=False,\n ignoreTrailingWhiteSpace=False)\n\n expected = [Row(value=u' a,b , c ')]\n readback = self.spark.read.text(tmpPath)\n self.assertEqual(readback.collect(), expected)\n shutil.rmtree(tmpPath)\n\n def test_read_multiple_orc_file(self):\n df = self.spark.read.orc([\"python/test_support/sql/orc_partitioned/b=0/c=0\",\n \"python/test_support/sql/orc_partitioned/b=1/c=1\"])\n self.assertEqual(2, df.count())\n\n def test_udf_with_input_file_name(self):\n from pyspark.sql.functions import udf, input_file_name\n from pyspark.sql.types import StringType\n sourceFile = udf(lambda path: path, StringType())\n filePath = \"python/test_support/sql/people1.json\"\n row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()\n self.assertTrue(row[0].find(\"people1.json\") != -1)\n\n def test_udf_with_input_file_name_for_hadooprdd(self):\n from pyspark.sql.functions import udf, input_file_name\n from pyspark.sql.types import StringType\n\n def filename(path):\n return path\n\n sameText = udf(filename, StringType())\n\n rdd = self.sc.textFile('python/test_support/sql/people.json')\n df = self.spark.read.json(rdd).select(input_file_name().alias('file'))\n row = df.select(sameText(df['file'])).first()\n self.assertTrue(row[0].find(\"people.json\") != -1)\n\n rdd2 = self.sc.newAPIHadoopFile(\n 'python/test_support/sql/people.json',\n 'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',\n 'org.apache.hadoop.io.LongWritable',\n 'org.apache.hadoop.io.Text')\n\n df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))\n row2 = df2.select(sameText(df2['file'])).first()\n self.assertTrue(row2[0].find(\"people.json\") != -1)\n\n def test_udf_defers_judf_initalization(self):\n # This is separate of UDFInitializationTests\n # to avoid context initialization\n # when udf is called\n\n from pyspark.sql.functions import UserDefinedFunction\n\n f = UserDefinedFunction(lambda x: x, StringType())\n\n self.assertIsNone(\n f._judf_placeholder,\n \"judf should not be initialized before the first call.\"\n )\n\n self.assertIsInstance(f(\"foo\"), Column, \"UDF call should return a Column.\")\n\n self.assertIsNotNone(\n f._judf_placeholder,\n \"judf should be initialized after UDF has been called.\"\n )\n\n def test_udf_with_string_return_type(self):\n from pyspark.sql.functions import UserDefinedFunction\n\n add_one = UserDefinedFunction(lambda x: x + 1, \"integer\")\n make_pair = UserDefinedFunction(lambda x: (-x, x), \"struct<x:integer,y:integer>\")\n make_array = UserDefinedFunction(\n lambda x: [float(x) for x in range(x, x + 3)], \"array<double>\")\n\n expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])\n actual = (self.spark.range(1, 2).toDF(\"x\")\n .select(add_one(\"x\"), make_pair(\"x\"), make_array(\"x\"))\n .first())\n\n self.assertTupleEqual(expected, actual)\n\n def test_udf_shouldnt_accept_noncallable_object(self):\n from pyspark.sql.functions import UserDefinedFunction\n from pyspark.sql.types import StringType\n\n non_callable = None\n self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())\n\n def test_udf_with_decorator(self):\n from pyspark.sql.functions import lit, udf\n from pyspark.sql.types import IntegerType, DoubleType\n\n @udf(IntegerType())\n def add_one(x):\n if x is not None:\n return x + 1\n\n @udf(returnType=DoubleType())\n def add_two(x):\n if x is not None:\n return float(x + 2)\n\n @udf\n def to_upper(x):\n if x is not None:\n return x.upper()\n\n @udf()\n def to_lower(x):\n if x is not None:\n return x.lower()\n\n @udf\n def substr(x, start, end):\n if x is not None:\n return x[start:end]\n\n @udf(\"long\")\n def trunc(x):\n return int(x)\n\n @udf(returnType=\"double\")\n def as_double(x):\n return float(x)\n\n df = (\n self.spark\n .createDataFrame(\n [(1, \"Foo\", \"foobar\", 3.0)], (\"one\", \"Foo\", \"foobar\", \"float\"))\n .select(\n add_one(\"one\"), add_two(\"one\"),\n to_upper(\"Foo\"), to_lower(\"Foo\"),\n substr(\"foobar\", lit(0), lit(3)),\n trunc(\"float\"), as_double(\"one\")))\n\n self.assertListEqual(\n [tpe for _, tpe in df.dtypes],\n [\"int\", \"double\", \"string\", \"string\", \"string\", \"bigint\", \"double\"]\n )\n\n self.assertListEqual(\n list(df.first()),\n [2, 3.0, \"FOO\", \"foo\", \"foo\", 3, 1.0]\n )\n\n def test_udf_wrapper(self):\n from pyspark.sql.functions import udf\n from pyspark.sql.types import IntegerType\n\n def f(x):\n \"\"\"Identity\"\"\"\n return x\n\n return_type = IntegerType()\n f_ = udf(f, return_type)\n\n self.assertTrue(f.__doc__ in f_.__doc__)\n self.assertEqual(f, f_.func)\n self.assertEqual(return_type, f_.returnType)\n\n class F(object):\n \"\"\"Identity\"\"\"\n def __call__(self, x):\n return x\n\n f = F()\n return_type = IntegerType()\n f_ = udf(f, return_type)\n\n self.assertTrue(f.__doc__ in f_.__doc__)\n self.assertEqual(f, f_.func)\n self.assertEqual(return_type, f_.returnType)\n\n f = functools.partial(f, x=1)\n return_type = IntegerType()\n f_ = udf(f, return_type)\n\n self.assertTrue(f.__doc__ in f_.__doc__)\n self.assertEqual(f, f_.func)\n self.assertEqual(return_type, f_.returnType)\n\n def test_basic_functions(self):\n rdd = self.sc.parallelize(['{\"foo\":\"bar\"}', '{\"foo\":\"baz\"}'])\n df = self.spark.read.json(rdd)\n df.count()\n df.collect()\n df.schema\n\n # cache and checkpoint\n self.assertFalse(df.is_cached)\n df.persist()\n df.unpersist(True)\n df.cache()\n self.assertTrue(df.is_cached)\n self.assertEqual(2, df.count())\n\n df.createOrReplaceTempView(\"temp\")\n df = self.spark.sql(\"select foo from temp\")\n df.count()\n df.collect()\n\n def test_apply_schema_to_row(self):\n df = self.spark.read.json(self.sc.parallelize([\"\"\"{\"a\":2}\"\"\"]))\n df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)\n self.assertEqual(df.collect(), df2.collect())\n\n rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))\n df3 = self.spark.createDataFrame(rdd, df.schema)\n self.assertEqual(10, df3.count())\n\n def test_infer_schema_to_local(self):\n input = [{\"a\": 1}, {\"b\": \"coffee\"}]\n rdd = self.sc.parallelize(input)\n df = self.spark.createDataFrame(input)\n df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)\n self.assertEqual(df.schema, df2.schema)\n\n rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))\n df3 = self.spark.createDataFrame(rdd, df.schema)\n self.assertEqual(10, df3.count())\n\n def test_apply_schema_to_dict_and_rows(self):\n schema = StructType().add(\"b\", StringType()).add(\"a\", IntegerType())\n input = [{\"a\": 1}, {\"b\": \"coffee\"}]\n rdd = self.sc.parallelize(input)\n for verify in [False, True]:\n df = self.spark.createDataFrame(input, schema, verifySchema=verify)\n df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)\n self.assertEqual(df.schema, df2.schema)\n\n rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))\n df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)\n self.assertEqual(10, df3.count())\n input = [Row(a=x, b=str(x)) for x in range(10)]\n df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)\n self.assertEqual(10, df4.count())\n\n def test_create_dataframe_schema_mismatch(self):\n input = [Row(a=1)]\n rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))\n schema = StructType([StructField(\"a\", IntegerType()), StructField(\"b\", StringType())])\n df = self.spark.createDataFrame(rdd, schema)\n self.assertRaises(Exception, lambda: df.show())\n\n def test_serialize_nested_array_and_map(self):\n d = [Row(l=[Row(a=1, b='s')], d={\"key\": Row(c=1.0, d=\"2\")})]\n rdd = self.sc.parallelize(d)\n df = self.spark.createDataFrame(rdd)\n row = df.head()\n self.assertEqual(1, len(row.l))\n self.assertEqual(1, row.l[0].a)\n self.assertEqual(\"2\", row.d[\"key\"].d)\n\n l = df.rdd.map(lambda x: x.l).first()\n self.assertEqual(1, len(l))\n self.assertEqual('s', l[0].b)\n\n d = df.rdd.map(lambda x: x.d).first()\n self.assertEqual(1, len(d))\n self.assertEqual(1.0, d[\"key\"].c)\n\n row = df.rdd.map(lambda x: x.d[\"key\"]).first()\n self.assertEqual(1.0, row.c)\n self.assertEqual(\"2\", row.d)\n\n def test_infer_schema(self):\n d = [Row(l=[], d={}, s=None),\n Row(l=[Row(a=1, b='s')], d={\"key\": Row(c=1.0, d=\"2\")}, s=\"\")]\n rdd = self.sc.parallelize(d)\n df = self.spark.createDataFrame(rdd)\n self.assertEqual([], df.rdd.map(lambda r: r.l).first())\n self.assertEqual([None, \"\"], df.rdd.map(lambda r: r.s).collect())\n df.createOrReplaceTempView(\"test\")\n result = self.spark.sql(\"SELECT l[0].a from test where d['key'].d = '2'\")\n self.assertEqual(1, result.head()[0])\n\n df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)\n self.assertEqual(df.schema, df2.schema)\n self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())\n self.assertEqual([None, \"\"], df2.rdd.map(lambda r: r.s).collect())\n df2.createOrReplaceTempView(\"test2\")\n result = self.spark.sql(\"SELECT l[0].a from test2 where d['key'].d = '2'\")\n self.assertEqual(1, result.head()[0])\n\n def test_infer_nested_schema(self):\n NestedRow = Row(\"f1\", \"f2\")\n nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {\"row1\": 1.0}),\n NestedRow([2, 3], {\"row2\": 2.0})])\n df = self.spark.createDataFrame(nestedRdd1)\n self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])\n\n nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),\n NestedRow([[2, 3], [3, 4]], [2, 3])])\n df = self.spark.createDataFrame(nestedRdd2)\n self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])\n\n from collections import namedtuple\n CustomRow = namedtuple('CustomRow', 'field1 field2')\n rdd = self.sc.parallelize([CustomRow(field1=1, field2=\"row1\"),\n CustomRow(field1=2, field2=\"row2\"),\n CustomRow(field1=3, field2=\"row3\")])\n df = self.spark.createDataFrame(rdd)\n self.assertEqual(Row(field1=1, field2=u'row1'), df.first())\n\n def test_create_dataframe_from_objects(self):\n data = [MyObject(1, \"1\"), MyObject(2, \"2\")]\n df = self.spark.createDataFrame(data)\n self.assertEqual(df.dtypes, [(\"key\", \"bigint\"), (\"value\", \"string\")])\n self.assertEqual(df.first(), Row(key=1, value=\"1\"))\n\n def test_select_null_literal(self):\n df = self.spark.sql(\"select null as col\")\n self.assertEqual(Row(col=None), df.first())\n\n def test_apply_schema(self):\n from datetime import date, datetime\n rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,\n date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),\n {\"a\": 1}, (2,), [1, 2, 3], None)])\n schema = StructType([\n StructField(\"byte1\", ByteType(), False),\n StructField(\"byte2\", ByteType(), False),\n StructField(\"short1\", ShortType(), False),\n StructField(\"short2\", ShortType(), False),\n StructField(\"int1\", IntegerType(), False),\n StructField(\"float1\", FloatType(), False),\n StructField(\"date1\", DateType(), False),\n StructField(\"time1\", TimestampType(), False),\n StructField(\"map1\", MapType(StringType(), IntegerType(), False), False),\n StructField(\"struct1\", StructType([StructField(\"b\", ShortType(), False)]), False),\n StructField(\"list1\", ArrayType(ByteType(), False), False),\n StructField(\"null1\", DoubleType(), True)])\n df = self.spark.createDataFrame(rdd, schema)\n results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,\n x.date1, x.time1, x.map1[\"a\"], x.struct1.b, x.list1, x.null1))\n r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),\n datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)\n self.assertEqual(r, results.first())\n\n df.createOrReplaceTempView(\"table2\")\n r = self.spark.sql(\"SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, \" +\n \"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, \" +\n \"float1 + 1.5 as float1 FROM table2\").first()\n\n self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))\n\n from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type\n rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),\n {\"a\": 1}, (2,), [1, 2, 3])])\n abstract = \"byte1 short1 float1 time1 map1{} struct1(b) list1[]\"\n schema = _parse_schema_abstract(abstract)\n typedSchema = _infer_schema_type(rdd.first(), schema)\n df = self.spark.createDataFrame(rdd, typedSchema)\n r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {\"a\": 1}, Row(b=2), [1, 2, 3])\n self.assertEqual(r, tuple(df.first()))\n\n def test_struct_in_map(self):\n d = [Row(m={Row(i=1): Row(s=\"\")})]\n df = self.sc.parallelize(d).toDF()\n k, v = list(df.head().m.items())[0]\n self.assertEqual(1, k.i)\n self.assertEqual(\"\", v.s)\n\n def test_convert_row_to_dict(self):\n row = Row(l=[Row(a=1, b='s')], d={\"key\": Row(c=1.0, d=\"2\")})\n self.assertEqual(1, row.asDict()['l'][0].a)\n df = self.sc.parallelize([row]).toDF()\n df.createOrReplaceTempView(\"test\")\n row = self.spark.sql(\"select l, d from test\").head()\n self.assertEqual(1, row.asDict()[\"l\"][0].a)\n self.assertEqual(1.0, row.asDict()['d']['key'].c)\n\n def test_udt(self):\n from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier\n from pyspark.sql.tests import ExamplePointUDT, ExamplePoint\n\n def check_datatype(datatype):\n pickled = pickle.loads(pickle.dumps(datatype))\n assert datatype == pickled\n scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())\n python_datatype = _parse_datatype_json_string(scala_datatype.json())\n assert datatype == python_datatype\n\n check_datatype(ExamplePointUDT())\n structtype_with_udt = StructType([StructField(\"label\", DoubleType(), False),\n StructField(\"point\", ExamplePointUDT(), False)])\n check_datatype(structtype_with_udt)\n p = ExamplePoint(1.0, 2.0)\n self.assertEqual(_infer_type(p), ExamplePointUDT())\n _make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))\n self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))\n\n check_datatype(PythonOnlyUDT())\n structtype_with_udt = StructType([StructField(\"label\", DoubleType(), False),\n StructField(\"point\", PythonOnlyUDT(), False)])\n check_datatype(structtype_with_udt)\n p = PythonOnlyPoint(1.0, 2.0)\n self.assertEqual(_infer_type(p), PythonOnlyUDT())\n _make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))\n self.assertRaises(\n ValueError,\n lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))\n\n def test_simple_udt_in_df(self):\n schema = StructType().add(\"key\", LongType()).add(\"val\", PythonOnlyUDT())\n df = self.spark.createDataFrame(\n [(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],\n schema=schema)\n df.show()\n\n def test_nested_udt_in_df(self):\n schema = StructType().add(\"key\", LongType()).add(\"val\", ArrayType(PythonOnlyUDT()))\n df = self.spark.createDataFrame(\n [(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],\n schema=schema)\n df.collect()\n\n schema = StructType().add(\"key\", LongType()).add(\"val\",\n MapType(LongType(), PythonOnlyUDT()))\n df = self.spark.createDataFrame(\n [(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],\n schema=schema)\n df.collect()\n\n def test_complex_nested_udt_in_df(self):\n from pyspark.sql.functions import udf\n\n schema = StructType().add(\"key\", LongType()).add(\"val\", PythonOnlyUDT())\n df = self.spark.createDataFrame(\n [(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],\n schema=schema)\n df.collect()\n\n gd = df.groupby(\"key\").agg({\"val\": \"collect_list\"})\n gd.collect()\n udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))\n gd.select(udf(*gd)).collect()\n\n def test_udt_with_none(self):\n df = self.spark.range(0, 10, 1, 1)\n\n def myudf(x):\n if x > 0:\n return PythonOnlyPoint(float(x), float(x))\n\n self.spark.catalog.registerFunction(\"udf\", myudf, PythonOnlyUDT())\n rows = [r[0] for r in df.selectExpr(\"udf(id)\").take(2)]\n self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])\n\n def test_infer_schema_with_udt(self):\n from pyspark.sql.tests import ExamplePoint, ExamplePointUDT\n row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))\n df = self.spark.createDataFrame([row])\n schema = df.schema\n field = [f for f in schema.fields if f.name == \"point\"][0]\n self.assertEqual(type(field.dataType), ExamplePointUDT)\n df.createOrReplaceTempView(\"labeled_point\")\n point = self.spark.sql(\"SELECT point FROM labeled_point\").head().point\n self.assertEqual(point, ExamplePoint(1.0, 2.0))\n\n row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))\n df = self.spark.createDataFrame([row])\n schema = df.schema\n field = [f for f in schema.fields if f.name == \"point\"][0]\n self.assertEqual(type(field.dataType), PythonOnlyUDT)\n df.createOrReplaceTempView(\"labeled_point\")\n point = self.spark.sql(\"SELECT point FROM labeled_point\").head().point\n self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))\n\n def test_apply_schema_with_udt(self):\n from pyspark.sql.tests import ExamplePoint, ExamplePointUDT\n row = (1.0, ExamplePoint(1.0, 2.0))\n schema = StructType([StructField(\"label\", DoubleType(), False),\n StructField(\"point\", ExamplePointUDT(), False)])\n df = self.spark.createDataFrame([row], schema)\n point = df.head().point\n self.assertEqual(point, ExamplePoint(1.0, 2.0))\n\n row = (1.0, PythonOnlyPoint(1.0, 2.0))\n schema = StructType([StructField(\"label\", DoubleType(), False),\n StructField(\"point\", PythonOnlyUDT(), False)])\n df = self.spark.createDataFrame([row], schema)\n point = df.head().point\n self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))\n\n def test_udf_with_udt(self):\n from pyspark.sql.tests import ExamplePoint, ExamplePointUDT\n row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))\n df = self.spark.createDataFrame([row])\n self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())\n udf = UserDefinedFunction(lambda p: p.y, DoubleType())\n self.assertEqual(2.0, df.select(udf(df.point)).first()[0])\n udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())\n self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])\n\n row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))\n df = self.spark.createDataFrame([row])\n self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())\n udf = UserDefinedFunction(lambda p: p.y, DoubleType())\n self.assertEqual(2.0, df.select(udf(df.point)).first()[0])\n udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())\n self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])\n\n def test_parquet_with_udt(self):\n from pyspark.sql.tests import ExamplePoint, ExamplePointUDT\n row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))\n df0 = self.spark.createDataFrame([row])\n output_dir = os.path.join(self.tempdir.name, \"labeled_point\")\n df0.write.parquet(output_dir)\n df1 = self.spark.read.parquet(output_dir)\n point = df1.head().point\n self.assertEqual(point, ExamplePoint(1.0, 2.0))\n\n row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))\n df0 = self.spark.createDataFrame([row])\n df0.write.parquet(output_dir, mode='overwrite')\n df1 = self.spark.read.parquet(output_dir)\n point = df1.head().point\n self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))\n\n def test_union_with_udt(self):\n from pyspark.sql.tests import ExamplePoint, ExamplePointUDT\n row1 = (1.0, ExamplePoint(1.0, 2.0))\n row2 = (2.0, ExamplePoint(3.0, 4.0))\n schema = StructType([StructField(\"label\", DoubleType(), False),\n StructField(\"point\", ExamplePointUDT(), False)])\n df1 = self.spark.createDataFrame([row1], schema)\n df2 = self.spark.createDataFrame([row2], schema)\n\n result = df1.union(df2).orderBy(\"label\").collect()\n self.assertEqual(\n result,\n [\n Row(label=1.0, point=ExamplePoint(1.0, 2.0)),\n Row(label=2.0, point=ExamplePoint(3.0, 4.0))\n ]\n )\n\n def test_column_operators(self):\n ci = self.df.key\n cs = self.df.value\n c = ci == cs\n self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))\n rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)\n self.assertTrue(all(isinstance(c, Column) for c in rcc))\n cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]\n self.assertTrue(all(isinstance(c, Column) for c in cb))\n cbool = (ci & ci), (ci | ci), (~ci)\n self.assertTrue(all(isinstance(c, Column) for c in cbool))\n css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\\\n cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)\n self.assertTrue(all(isinstance(c, Column) for c in css))\n self.assertTrue(isinstance(ci.cast(LongType()), Column))\n self.assertRaisesRegexp(ValueError,\n \"Cannot apply 'in' operator against a column\",\n lambda: 1 in cs)\n\n def test_column_getitem(self):\n from pyspark.sql.functions import col\n\n self.assertIsInstance(col(\"foo\")[1:3], Column)\n self.assertIsInstance(col(\"foo\")[0], Column)\n self.assertIsInstance(col(\"foo\")[\"bar\"], Column)\n self.assertRaises(ValueError, lambda: col(\"foo\")[0:10:2])\n\n def test_column_select(self):\n df = self.df\n self.assertEqual(self.testData, df.select(\"*\").collect())\n self.assertEqual(self.testData, df.select(df.key, df.value).collect())\n self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())\n\n def test_freqItems(self):\n vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]\n df = self.sc.parallelize(vals).toDF()\n items = df.stat.freqItems((\"a\", \"b\"), 0.4).collect()[0]\n self.assertTrue(1 in items[0])\n self.assertTrue(-2.0 in items[1])\n\n def test_aggregator(self):\n df = self.df\n g = df.groupBy()\n self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))\n self.assertEqual([Row(**{\"AVG(key#0)\": 49.5})], g.mean().collect())\n\n from pyspark.sql import functions\n self.assertEqual((0, u'99'),\n tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))\n self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])\n self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])\n\n def test_first_last_ignorenulls(self):\n from pyspark.sql import functions\n df = self.spark.range(0, 100)\n df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias(\"id\"))\n df3 = df2.select(functions.first(df2.id, False).alias('a'),\n functions.first(df2.id, True).alias('b'),\n functions.last(df2.id, False).alias('c'),\n functions.last(df2.id, True).alias('d'))\n self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())\n\n def test_approxQuantile(self):\n df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()\n aq = df.stat.approxQuantile(\"a\", [0.1, 0.5, 0.9], 0.1)\n self.assertTrue(isinstance(aq, list))\n self.assertEqual(len(aq), 3)\n self.assertTrue(all(isinstance(q, float) for q in aq))\n aqs = df.stat.approxQuantile([\"a\", \"b\"], [0.1, 0.5, 0.9], 0.1)\n self.assertTrue(isinstance(aqs, list))\n self.assertEqual(len(aqs), 2)\n self.assertTrue(isinstance(aqs[0], list))\n self.assertEqual(len(aqs[0]), 3)\n self.assertTrue(all(isinstance(q, float) for q in aqs[0]))\n self.assertTrue(isinstance(aqs[1], list))\n self.assertEqual(len(aqs[1]), 3)\n self.assertTrue(all(isinstance(q, float) for q in aqs[1]))\n aqt = df.stat.approxQuantile((\"a\", \"b\"), [0.1, 0.5, 0.9], 0.1)\n self.assertTrue(isinstance(aqt, list))\n self.assertEqual(len(aqt), 2)\n self.assertTrue(isinstance(aqt[0], list))\n self.assertEqual(len(aqt[0]), 3)\n self.assertTrue(all(isinstance(q, float) for q in aqt[0]))\n self.assertTrue(isinstance(aqt[1], list))\n self.assertEqual(len(aqt[1]), 3)\n self.assertTrue(all(isinstance(q, float) for q in aqt[1]))\n self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))\n self.assertRaises(ValueError, lambda: df.stat.approxQuantile((\"a\", 123), [0.1, 0.9], 0.1))\n self.assertRaises(ValueError, lambda: df.stat.approxQuantile([\"a\", 123], [0.1, 0.9], 0.1))\n\n def test_corr(self):\n import math\n df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()\n corr = df.stat.corr(\"a\", \"b\")\n self.assertTrue(abs(corr - 0.95734012) < 1e-6)\n\n def test_cov(self):\n df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()\n cov = df.stat.cov(\"a\", \"b\")\n self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)\n\n def test_crosstab(self):\n df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()\n ct = df.stat.crosstab(\"a\", \"b\").collect()\n ct = sorted(ct, key=lambda x: x[0])\n for i, row in enumerate(ct):\n self.assertEqual(row[0], str(i))\n self.assertTrue(row[1], 1)\n self.assertTrue(row[2], 1)\n\n def test_math_functions(self):\n df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()\n from pyspark.sql import functions\n import math\n\n def get_values(l):\n return [j[0] for j in l]\n\n def assert_close(a, b):\n c = get_values(b)\n diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]\n return sum(diff) == len(a)\n assert_close([math.cos(i) for i in range(10)],\n df.select(functions.cos(df.a)).collect())\n assert_close([math.cos(i) for i in range(10)],\n df.select(functions.cos(\"a\")).collect())\n assert_close([math.sin(i) for i in range(10)],\n df.select(functions.sin(df.a)).collect())\n assert_close([math.sin(i) for i in range(10)],\n df.select(functions.sin(df['a'])).collect())\n assert_close([math.pow(i, 2 * i) for i in range(10)],\n df.select(functions.pow(df.a, df.b)).collect())\n assert_close([math.pow(i, 2) for i in range(10)],\n df.select(functions.pow(df.a, 2)).collect())\n assert_close([math.pow(i, 2) for i in range(10)],\n df.select(functions.pow(df.a, 2.0)).collect())\n assert_close([math.hypot(i, 2 * i) for i in range(10)],\n df.select(functions.hypot(df.a, df.b)).collect())\n\n def test_rand_functions(self):\n df = self.df\n from pyspark.sql import functions\n rnd = df.select('key', functions.rand()).collect()\n for row in rnd:\n assert row[1] >= 0.0 and row[1] <= 1.0, \"got: %s\" % row[1]\n rndn = df.select('key', functions.randn(5)).collect()\n for row in rndn:\n assert row[1] >= -4.0 and row[1] <= 4.0, \"got: %s\" % row[1]\n\n # If the specified seed is 0, we should use it.\n # https://issues.apache.org/jira/browse/SPARK-9691\n rnd1 = df.select('key', functions.rand(0)).collect()\n rnd2 = df.select('key', functions.rand(0)).collect()\n self.assertEqual(sorted(rnd1), sorted(rnd2))\n\n rndn1 = df.select('key', functions.randn(0)).collect()\n rndn2 = df.select('key', functions.randn(0)).collect()\n self.assertEqual(sorted(rndn1), sorted(rndn2))\n\n def test_array_contains_function(self):\n from pyspark.sql.functions import array_contains\n\n df = self.spark.createDataFrame([([\"1\", \"2\", \"3\"],), ([],)], ['data'])\n actual = df.select(array_contains(df.data, 1).alias('b')).collect()\n # The value argument can be implicitly castable to the element's type of the array.\n self.assertEqual([Row(b=True), Row(b=False)], actual)\n\n def test_between_function(self):\n df = self.sc.parallelize([\n Row(a=1, b=2, c=3),\n Row(a=2, b=1, c=3),\n Row(a=4, b=1, c=4)]).toDF()\n self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],\n df.filter(df.a.between(df.b, df.c)).collect())\n\n def test_struct_type(self):\n from pyspark.sql.types import StructType, StringType, StructField\n struct1 = StructType().add(\"f1\", StringType(), True).add(\"f2\", StringType(), True, None)\n struct2 = StructType([StructField(\"f1\", StringType(), True),\n StructField(\"f2\", StringType(), True, None)])\n self.assertEqual(struct1, struct2)\n\n struct1 = StructType().add(\"f1\", StringType(), True).add(\"f2\", StringType(), True, None)\n struct2 = StructType([StructField(\"f1\", StringType(), True)])\n self.assertNotEqual(struct1, struct2)\n\n struct1 = (StructType().add(StructField(\"f1\", StringType(), True))\n .add(StructField(\"f2\", StringType(), True, None)))\n struct2 = StructType([StructField(\"f1\", StringType(), True),\n StructField(\"f2\", StringType(), True, None)])\n self.assertEqual(struct1, struct2)\n\n struct1 = (StructType().add(StructField(\"f1\", StringType(), True))\n .add(StructField(\"f2\", StringType(), True, None)))\n struct2 = StructType([StructField(\"f1\", StringType(), True)])\n self.assertNotEqual(struct1, struct2)\n\n # Catch exception raised during improper construction\n with self.assertRaises(ValueError):\n struct1 = StructType().add(\"name\")\n\n struct1 = StructType().add(\"f1\", StringType(), True).add(\"f2\", StringType(), True, None)\n for field in struct1:\n self.assertIsInstance(field, StructField)\n\n struct1 = StructType().add(\"f1\", StringType(), True).add(\"f2\", StringType(), True, None)\n self.assertEqual(len(struct1), 2)\n\n struct1 = StructType().add(\"f1\", StringType(), True).add(\"f2\", StringType(), True, None)\n self.assertIs(struct1[\"f1\"], struct1.fields[0])\n self.assertIs(struct1[0], struct1.fields[0])\n self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))\n with self.assertRaises(KeyError):\n not_a_field = struct1[\"f9\"]\n with self.assertRaises(IndexError):\n not_a_field = struct1[9]\n with self.assertRaises(TypeError):\n not_a_field = struct1[9.9]\n\n def test_parse_datatype_string(self):\n from pyspark.sql.types import _all_atomic_types, _parse_datatype_string\n for k, t in _all_atomic_types.items():\n if t != NullType:\n self.assertEqual(t(), _parse_datatype_string(k))\n self.assertEqual(IntegerType(), _parse_datatype_string(\"int\"))\n self.assertEqual(DecimalType(1, 1), _parse_datatype_string(\"decimal(1 ,1)\"))\n self.assertEqual(DecimalType(10, 1), _parse_datatype_string(\"decimal( 10,1 )\"))\n self.assertEqual(DecimalType(11, 1), _parse_datatype_string(\"decimal(11,1)\"))\n self.assertEqual(\n ArrayType(IntegerType()),\n _parse_datatype_string(\"array<int >\"))\n self.assertEqual(\n MapType(IntegerType(), DoubleType()),\n _parse_datatype_string(\"map< int, double >\"))\n self.assertEqual(\n StructType([StructField(\"a\", IntegerType()), StructField(\"c\", DoubleType())]),\n _parse_datatype_string(\"struct<a:int, c:double >\"))\n self.assertEqual(\n StructType([StructField(\"a\", IntegerType()), StructField(\"c\", DoubleType())]),\n _parse_datatype_string(\"a:int, c:double\"))\n self.assertEqual(\n StructType([StructField(\"a\", IntegerType()), StructField(\"c\", DoubleType())]),\n _parse_datatype_string(\"a INT, c DOUBLE\"))\n\n def test_metadata_null(self):\n from pyspark.sql.types import StructType, StringType, StructField\n schema = StructType([StructField(\"f1\", StringType(), True, None),\n StructField(\"f2\", StringType(), True, {'a': None})])\n rdd = self.sc.parallelize([[\"a\", \"b\"], [\"c\", \"d\"]])\n self.spark.createDataFrame(rdd, schema)\n\n def test_save_and_load(self):\n df = self.df\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n df.write.json(tmpPath)\n actual = self.spark.read.json(tmpPath)\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n\n schema = StructType([StructField(\"value\", StringType(), True)])\n actual = self.spark.read.json(tmpPath, schema)\n self.assertEqual(sorted(df.select(\"value\").collect()), sorted(actual.collect()))\n\n df.write.json(tmpPath, \"overwrite\")\n actual = self.spark.read.json(tmpPath)\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n\n df.write.save(format=\"json\", mode=\"overwrite\", path=tmpPath,\n noUse=\"this options will not be used in save.\")\n actual = self.spark.read.load(format=\"json\", path=tmpPath,\n noUse=\"this options will not be used in load.\")\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n\n defaultDataSourceName = self.spark.conf.get(\"spark.sql.sources.default\",\n \"org.apache.spark.sql.parquet\")\n self.spark.sql(\"SET spark.sql.sources.default=org.apache.spark.sql.json\")\n actual = self.spark.read.load(path=tmpPath)\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n self.spark.sql(\"SET spark.sql.sources.default=\" + defaultDataSourceName)\n\n csvpath = os.path.join(tempfile.mkdtemp(), 'data')\n df.write.option('quote', None).format('csv').save(csvpath)\n\n shutil.rmtree(tmpPath)\n\n def test_save_and_load_builder(self):\n df = self.df\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n df.write.json(tmpPath)\n actual = self.spark.read.json(tmpPath)\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n\n schema = StructType([StructField(\"value\", StringType(), True)])\n actual = self.spark.read.json(tmpPath, schema)\n self.assertEqual(sorted(df.select(\"value\").collect()), sorted(actual.collect()))\n\n df.write.mode(\"overwrite\").json(tmpPath)\n actual = self.spark.read.json(tmpPath)\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n\n df.write.mode(\"overwrite\").options(noUse=\"this options will not be used in save.\")\\\n .option(\"noUse\", \"this option will not be used in save.\")\\\n .format(\"json\").save(path=tmpPath)\n actual =\\\n self.spark.read.format(\"json\")\\\n .load(path=tmpPath, noUse=\"this options will not be used in load.\")\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n\n defaultDataSourceName = self.spark.conf.get(\"spark.sql.sources.default\",\n \"org.apache.spark.sql.parquet\")\n self.spark.sql(\"SET spark.sql.sources.default=org.apache.spark.sql.json\")\n actual = self.spark.read.load(path=tmpPath)\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n self.spark.sql(\"SET spark.sql.sources.default=\" + defaultDataSourceName)\n\n shutil.rmtree(tmpPath)\n\n def test_stream_trigger(self):\n df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')\n\n # Should take at least one arg\n try:\n df.writeStream.trigger()\n except ValueError:\n pass\n\n # Should not take multiple args\n try:\n df.writeStream.trigger(once=True, processingTime='5 seconds')\n except ValueError:\n pass\n\n # Should take only keyword args\n try:\n df.writeStream.trigger('5 seconds')\n self.fail(\"Should have thrown an exception\")\n except TypeError:\n pass\n\n def test_stream_read_options(self):\n schema = StructType([StructField(\"data\", StringType(), False)])\n df = self.spark.readStream\\\n .format('text')\\\n .option('path', 'python/test_support/sql/streaming')\\\n .schema(schema)\\\n .load()\n self.assertTrue(df.isStreaming)\n self.assertEqual(df.schema.simpleString(), \"struct<data:string>\")\n\n def test_stream_read_options_overwrite(self):\n bad_schema = StructType([StructField(\"test\", IntegerType(), False)])\n schema = StructType([StructField(\"data\", StringType(), False)])\n df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \\\n .schema(bad_schema)\\\n .load(path='python/test_support/sql/streaming', schema=schema, format='text')\n self.assertTrue(df.isStreaming)\n self.assertEqual(df.schema.simpleString(), \"struct<data:string>\")\n\n def test_stream_save_options(self):\n df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \\\n .withColumn('id', lit(1))\n for q in self.spark._wrapped.streams.active:\n q.stop()\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n self.assertTrue(df.isStreaming)\n out = os.path.join(tmpPath, 'out')\n chk = os.path.join(tmpPath, 'chk')\n q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \\\n .format('parquet').partitionBy('id').outputMode('append').option('path', out).start()\n try:\n self.assertEqual(q.name, 'this_query')\n self.assertTrue(q.isActive)\n q.processAllAvailable()\n output_files = []\n for _, _, files in os.walk(out):\n output_files.extend([f for f in files if not f.startswith('.')])\n self.assertTrue(len(output_files) > 0)\n self.assertTrue(len(os.listdir(chk)) > 0)\n finally:\n q.stop()\n shutil.rmtree(tmpPath)\n\n def test_stream_save_options_overwrite(self):\n df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')\n for q in self.spark._wrapped.streams.active:\n q.stop()\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n self.assertTrue(df.isStreaming)\n out = os.path.join(tmpPath, 'out')\n chk = os.path.join(tmpPath, 'chk')\n fake1 = os.path.join(tmpPath, 'fake1')\n fake2 = os.path.join(tmpPath, 'fake2')\n q = df.writeStream.option('checkpointLocation', fake1)\\\n .format('memory').option('path', fake2) \\\n .queryName('fake_query').outputMode('append') \\\n .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)\n\n try:\n self.assertEqual(q.name, 'this_query')\n self.assertTrue(q.isActive)\n q.processAllAvailable()\n output_files = []\n for _, _, files in os.walk(out):\n output_files.extend([f for f in files if not f.startswith('.')])\n self.assertTrue(len(output_files) > 0)\n self.assertTrue(len(os.listdir(chk)) > 0)\n self.assertFalse(os.path.isdir(fake1)) # should not have been created\n self.assertFalse(os.path.isdir(fake2)) # should not have been created\n finally:\n q.stop()\n shutil.rmtree(tmpPath)\n\n def test_stream_status_and_progress(self):\n df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')\n for q in self.spark._wrapped.streams.active:\n q.stop()\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n self.assertTrue(df.isStreaming)\n out = os.path.join(tmpPath, 'out')\n chk = os.path.join(tmpPath, 'chk')\n\n def func(x):\n time.sleep(1)\n return x\n\n from pyspark.sql.functions import col, udf\n sleep_udf = udf(func)\n\n # Use \"sleep_udf\" to delay the progress update so that we can test `lastProgress` when there\n # were no updates.\n q = df.select(sleep_udf(col(\"value\")).alias('value')).writeStream \\\n .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)\n try:\n # \"lastProgress\" will return None in most cases. However, as it may be flaky when\n # Jenkins is very slow, we don't assert it. If there is something wrong, \"lastProgress\"\n # may throw error with a high chance and make this test flaky, so we should still be\n # able to detect broken codes.\n q.lastProgress\n\n q.processAllAvailable()\n lastProgress = q.lastProgress\n recentProgress = q.recentProgress\n status = q.status\n self.assertEqual(lastProgress['name'], q.name)\n self.assertEqual(lastProgress['id'], q.id)\n self.assertTrue(any(p == lastProgress for p in recentProgress))\n self.assertTrue(\n \"message\" in status and\n \"isDataAvailable\" in status and\n \"isTriggerActive\" in status)\n finally:\n q.stop()\n shutil.rmtree(tmpPath)\n\n def test_stream_await_termination(self):\n df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')\n for q in self.spark._wrapped.streams.active:\n q.stop()\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n self.assertTrue(df.isStreaming)\n out = os.path.join(tmpPath, 'out')\n chk = os.path.join(tmpPath, 'chk')\n q = df.writeStream\\\n .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)\n try:\n self.assertTrue(q.isActive)\n try:\n q.awaitTermination(\"hello\")\n self.fail(\"Expected a value exception\")\n except ValueError:\n pass\n now = time.time()\n # test should take at least 2 seconds\n res = q.awaitTermination(2.6)\n duration = time.time() - now\n self.assertTrue(duration >= 2)\n self.assertFalse(res)\n finally:\n q.stop()\n shutil.rmtree(tmpPath)\n\n def test_stream_exception(self):\n sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')\n sq = sdf.writeStream.format('memory').queryName('query_explain').start()\n try:\n sq.processAllAvailable()\n self.assertEqual(sq.exception(), None)\n finally:\n sq.stop()\n\n from pyspark.sql.functions import col, udf\n from pyspark.sql.utils import StreamingQueryException\n bad_udf = udf(lambda x: 1 / 0)\n sq = sdf.select(bad_udf(col(\"value\")))\\\n .writeStream\\\n .format('memory')\\\n .queryName('this_query')\\\n .start()\n try:\n # Process some data to fail the query\n sq.processAllAvailable()\n self.fail(\"bad udf should fail the query\")\n except StreamingQueryException as e:\n # This is expected\n self.assertTrue(\"ZeroDivisionError\" in e.desc)\n finally:\n sq.stop()\n self.assertTrue(type(sq.exception()) is StreamingQueryException)\n self.assertTrue(\"ZeroDivisionError\" in sq.exception().desc)\n\n def test_query_manager_await_termination(self):\n df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')\n for q in self.spark._wrapped.streams.active:\n q.stop()\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n self.assertTrue(df.isStreaming)\n out = os.path.join(tmpPath, 'out')\n chk = os.path.join(tmpPath, 'chk')\n q = df.writeStream\\\n .start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)\n try:\n self.assertTrue(q.isActive)\n try:\n self.spark._wrapped.streams.awaitAnyTermination(\"hello\")\n self.fail(\"Expected a value exception\")\n except ValueError:\n pass\n now = time.time()\n # test should take at least 2 seconds\n res = self.spark._wrapped.streams.awaitAnyTermination(2.6)\n duration = time.time() - now\n self.assertTrue(duration >= 2)\n self.assertFalse(res)\n finally:\n q.stop()\n shutil.rmtree(tmpPath)\n\n def test_help_command(self):\n # Regression test for SPARK-5464\n rdd = self.sc.parallelize(['{\"foo\":\"bar\"}', '{\"foo\":\"baz\"}'])\n df = self.spark.read.json(rdd)\n # render_doc() reproduces the help() exception without printing output\n pydoc.render_doc(df)\n pydoc.render_doc(df.foo)\n pydoc.render_doc(df.take(1))\n\n def test_access_column(self):\n df = self.df\n self.assertTrue(isinstance(df.key, Column))\n self.assertTrue(isinstance(df['key'], Column))\n self.assertTrue(isinstance(df[0], Column))\n self.assertRaises(IndexError, lambda: df[2])\n self.assertRaises(AnalysisException, lambda: df[\"bad_key\"])\n self.assertRaises(TypeError, lambda: df[{}])\n\n def test_column_name_with_non_ascii(self):\n if sys.version >= '3':\n columnName = \"数量\"\n self.assertTrue(isinstance(columnName, str))\n else:\n columnName = unicode(\"数量\", \"utf-8\")\n self.assertTrue(isinstance(columnName, unicode))\n schema = StructType([StructField(columnName, LongType(), True)])\n df = self.spark.createDataFrame([(1,)], schema)\n self.assertEqual(schema, df.schema)\n self.assertEqual(\"DataFrame[数量: bigint]\", str(df))\n self.assertEqual([(\"数量\", 'bigint')], df.dtypes)\n self.assertEqual(1, df.select(\"数量\").first()[0])\n self.assertEqual(1, df.select(df[\"数量\"]).first()[0])\n\n def test_access_nested_types(self):\n df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b=\"b\"), d={\"k\": \"v\"})]).toDF()\n self.assertEqual(1, df.select(df.l[0]).first()[0])\n self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])\n self.assertEqual(1, df.select(df.r.a).first()[0])\n self.assertEqual(\"b\", df.select(df.r.getField(\"b\")).first()[0])\n self.assertEqual(\"v\", df.select(df.d[\"k\"]).first()[0])\n self.assertEqual(\"v\", df.select(df.d.getItem(\"k\")).first()[0])\n\n def test_field_accessor(self):\n df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b=\"b\"), d={\"k\": \"v\"})]).toDF()\n self.assertEqual(1, df.select(df.l[0]).first()[0])\n self.assertEqual(1, df.select(df.r[\"a\"]).first()[0])\n self.assertEqual(1, df.select(df[\"r.a\"]).first()[0])\n self.assertEqual(\"b\", df.select(df.r[\"b\"]).first()[0])\n self.assertEqual(\"b\", df.select(df[\"r.b\"]).first()[0])\n self.assertEqual(\"v\", df.select(df.d[\"k\"]).first()[0])\n\n def test_infer_long_type(self):\n longrow = [Row(f1='a', f2=100000000000000)]\n df = self.sc.parallelize(longrow).toDF()\n self.assertEqual(df.schema.fields[1].dataType, LongType())\n\n # this saving as Parquet caused issues as well.\n output_dir = os.path.join(self.tempdir.name, \"infer_long_type\")\n df.write.parquet(output_dir)\n df1 = self.spark.read.parquet(output_dir)\n self.assertEqual('a', df1.first().f1)\n self.assertEqual(100000000000000, df1.first().f2)\n\n self.assertEqual(_infer_type(1), LongType())\n self.assertEqual(_infer_type(2**10), LongType())\n self.assertEqual(_infer_type(2**20), LongType())\n self.assertEqual(_infer_type(2**31 - 1), LongType())\n self.assertEqual(_infer_type(2**31), LongType())\n self.assertEqual(_infer_type(2**61), LongType())\n self.assertEqual(_infer_type(2**71), LongType())\n\n def test_filter_with_datetime(self):\n time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)\n date = time.date()\n row = Row(date=date, time=time)\n df = self.spark.createDataFrame([row])\n self.assertEqual(1, df.filter(df.date == date).count())\n self.assertEqual(1, df.filter(df.time == time).count())\n self.assertEqual(0, df.filter(df.date > date).count())\n self.assertEqual(0, df.filter(df.time > time).count())\n\n def test_filter_with_datetime_timezone(self):\n dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))\n dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))\n row = Row(date=dt1)\n df = self.spark.createDataFrame([row])\n self.assertEqual(0, df.filter(df.date == dt2).count())\n self.assertEqual(1, df.filter(df.date > dt2).count())\n self.assertEqual(0, df.filter(df.date < dt2).count())\n\n def test_time_with_timezone(self):\n day = datetime.date.today()\n now = datetime.datetime.now()\n ts = time.mktime(now.timetuple())\n # class in __main__ is not serializable\n from pyspark.sql.tests import UTCOffsetTimezone\n utc = UTCOffsetTimezone()\n utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds\n # add microseconds to utcnow (keeping year,month,day,hour,minute,second)\n utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))\n df = self.spark.createDataFrame([(day, now, utcnow)])\n day1, now1, utcnow1 = df.first()\n self.assertEqual(day1, day)\n self.assertEqual(now, now1)\n self.assertEqual(now, utcnow1)\n\n # regression test for SPARK-19561\n def test_datetime_at_epoch(self):\n epoch = datetime.datetime.fromtimestamp(0)\n df = self.spark.createDataFrame([Row(date=epoch)])\n first = df.select('date', lit(epoch).alias('lit_date')).first()\n self.assertEqual(first['date'], epoch)\n self.assertEqual(first['lit_date'], epoch)\n\n def test_decimal(self):\n from decimal import Decimal\n schema = StructType([StructField(\"decimal\", DecimalType(10, 5))])\n df = self.spark.createDataFrame([(Decimal(\"3.14159\"),)], schema)\n row = df.select(df.decimal + 1).first()\n self.assertEqual(row[0], Decimal(\"4.14159\"))\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n df.write.parquet(tmpPath)\n df2 = self.spark.read.parquet(tmpPath)\n row = df2.first()\n self.assertEqual(row[0], Decimal(\"3.14159\"))\n\n def test_dropna(self):\n schema = StructType([\n StructField(\"name\", StringType(), True),\n StructField(\"age\", IntegerType(), True),\n StructField(\"height\", DoubleType(), True)])\n\n # shouldn't drop a non-null row\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', 50, 80.1)], schema).dropna().count(),\n 1)\n\n # dropping rows with a single null value\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, 80.1)], schema).dropna().count(),\n 0)\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, 80.1)], schema).dropna(how='any').count(),\n 0)\n\n # if how = 'all', only drop rows if all values are null\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, 80.1)], schema).dropna(how='all').count(),\n 1)\n self.assertEqual(self.spark.createDataFrame(\n [(None, None, None)], schema).dropna(how='all').count(),\n 0)\n\n # how and subset\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),\n 1)\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),\n 0)\n\n # threshold\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),\n 1)\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, None)], schema).dropna(thresh=2).count(),\n 0)\n\n # threshold and subset\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),\n 1)\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),\n 0)\n\n # thresh should take precedence over how\n self.assertEqual(self.spark.createDataFrame(\n [(u'Alice', 50, None)], schema).dropna(\n how='any', thresh=2, subset=['name', 'age']).count(),\n 1)\n\n def test_fillna(self):\n schema = StructType([\n StructField(\"name\", StringType(), True),\n StructField(\"age\", IntegerType(), True),\n StructField(\"height\", DoubleType(), True),\n StructField(\"spy\", BooleanType(), True)])\n\n # fillna shouldn't change non-null values\n row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()\n self.assertEqual(row.age, 10)\n\n # fillna with int\n row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()\n self.assertEqual(row.age, 50)\n self.assertEqual(row.height, 50.0)\n\n # fillna with double\n row = self.spark.createDataFrame(\n [(u'Alice', None, None, None)], schema).fillna(50.1).first()\n self.assertEqual(row.age, 50)\n self.assertEqual(row.height, 50.1)\n\n # fillna with bool\n row = self.spark.createDataFrame(\n [(u'Alice', None, None, None)], schema).fillna(True).first()\n self.assertEqual(row.age, None)\n self.assertEqual(row.spy, True)\n\n # fillna with string\n row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna(\"hello\").first()\n self.assertEqual(row.name, u\"hello\")\n self.assertEqual(row.age, None)\n\n # fillna with subset specified for numeric cols\n row = self.spark.createDataFrame(\n [(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()\n self.assertEqual(row.name, None)\n self.assertEqual(row.age, 50)\n self.assertEqual(row.height, None)\n self.assertEqual(row.spy, None)\n\n # fillna with subset specified for string cols\n row = self.spark.createDataFrame(\n [(None, None, None, None)], schema).fillna(\"haha\", subset=['name', 'age']).first()\n self.assertEqual(row.name, \"haha\")\n self.assertEqual(row.age, None)\n self.assertEqual(row.height, None)\n self.assertEqual(row.spy, None)\n\n # fillna with subset specified for bool cols\n row = self.spark.createDataFrame(\n [(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()\n self.assertEqual(row.name, None)\n self.assertEqual(row.age, None)\n self.assertEqual(row.height, None)\n self.assertEqual(row.spy, True)\n\n # fillna with dictionary for boolean types\n row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({\"a\": True}).first()\n self.assertEqual(row.a, True)\n\n def test_bitwise_operations(self):\n from pyspark.sql import functions\n row = Row(a=170, b=75)\n df = self.spark.createDataFrame([row])\n result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()\n self.assertEqual(170 & 75, result['(a & b)'])\n result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()\n self.assertEqual(170 | 75, result['(a | b)'])\n result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()\n self.assertEqual(170 ^ 75, result['(a ^ b)'])\n result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()\n self.assertEqual(~75, result['~b'])\n\n def test_expr(self):\n from pyspark.sql import functions\n row = Row(a=\"length string\", b=75)\n df = self.spark.createDataFrame([row])\n result = df.select(functions.expr(\"length(a)\")).collect()[0].asDict()\n self.assertEqual(13, result[\"length(a)\"])\n\n def test_replace(self):\n schema = StructType([\n StructField(\"name\", StringType(), True),\n StructField(\"age\", IntegerType(), True),\n StructField(\"height\", DoubleType(), True)])\n\n # replace with int\n row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()\n self.assertEqual(row.age, 20)\n self.assertEqual(row.height, 20.0)\n\n # replace with double\n row = self.spark.createDataFrame(\n [(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()\n self.assertEqual(row.age, 82)\n self.assertEqual(row.height, 82.1)\n\n # replace with string\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()\n self.assertEqual(row.name, u\"Ann\")\n self.assertEqual(row.age, 10)\n\n # replace with subset specified by a string of a column name w/ actual change\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()\n self.assertEqual(row.age, 20)\n\n # replace with subset specified by a string of a column name w/o actual change\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()\n self.assertEqual(row.age, 10)\n\n # replace with subset specified with one column replaced, another column not in subset\n # stays unchanged.\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()\n self.assertEqual(row.name, u'Alice')\n self.assertEqual(row.age, 20)\n self.assertEqual(row.height, 10.0)\n\n # replace with subset specified but no column will be replaced\n row = self.spark.createDataFrame(\n [(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()\n self.assertEqual(row.name, u'Alice')\n self.assertEqual(row.age, 10)\n self.assertEqual(row.height, None)\n\n # replace with lists\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()\n self.assertTupleEqual(row, (u'Ann', 10, 80.1))\n\n # replace with dict\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()\n self.assertTupleEqual(row, (u'Alice', 11, 80.1))\n\n # test backward compatibility with dummy value\n dummy_value = 1\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()\n self.assertTupleEqual(row, (u'Bob', 10, 80.1))\n\n # test dict with mixed numerics\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()\n self.assertTupleEqual(row, (u'Alice', -10, 90.5))\n\n # replace with tuples\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()\n self.assertTupleEqual(row, (u'Bob', 10, 80.1))\n\n # replace multiple columns\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()\n self.assertTupleEqual(row, (u'Alice', 20, 90.0))\n\n # test for mixed numerics\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()\n self.assertTupleEqual(row, (u'Alice', 20, 90.5))\n\n row = self.spark.createDataFrame(\n [(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()\n self.assertTupleEqual(row, (u'Alice', 20, 90.5))\n\n # replace with boolean\n row = (self\n .spark.createDataFrame([(u'Alice', 10, 80.0)], schema)\n .selectExpr(\"name = 'Bob'\", 'age <= 15')\n .replace(False, True).first())\n self.assertTupleEqual(row, (True, True))\n\n # should fail if subset is not list, tuple or None\n with self.assertRaises(ValueError):\n self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()\n\n # should fail if to_replace and value have different length\n with self.assertRaises(ValueError):\n self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace([\"Alice\", \"Bob\"], [\"Eve\"]).first()\n\n # should fail if when received unexpected type\n with self.assertRaises(ValueError):\n from datetime import datetime\n self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()\n\n # should fail if provided mixed type replacements\n with self.assertRaises(ValueError):\n self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace([\"Alice\", 10], [\"Eve\", 20]).first()\n\n with self.assertRaises(ValueError):\n self.spark.createDataFrame(\n [(u'Alice', 10, 80.1)], schema).replace({u\"Alice\": u\"Bob\", 10: 20}).first()\n\n def test_capture_analysis_exception(self):\n self.assertRaises(AnalysisException, lambda: self.spark.sql(\"select abc\"))\n self.assertRaises(AnalysisException, lambda: self.df.selectExpr(\"a + b\"))\n\n def test_capture_parse_exception(self):\n self.assertRaises(ParseException, lambda: self.spark.sql(\"abc\"))\n\n def test_capture_illegalargument_exception(self):\n self.assertRaisesRegexp(IllegalArgumentException, \"Setting negative mapred.reduce.tasks\",\n lambda: self.spark.sql(\"SET mapred.reduce.tasks=-1\"))\n df = self.spark.createDataFrame([(1, 2)], [\"a\", \"b\"])\n self.assertRaisesRegexp(IllegalArgumentException, \"1024 is not in the permitted values\",\n lambda: df.select(sha2(df.a, 1024)).collect())\n try:\n df.select(sha2(df.a, 1024)).collect()\n except IllegalArgumentException as e:\n self.assertRegexpMatches(e.desc, \"1024 is not in the permitted values\")\n self.assertRegexpMatches(e.stackTrace,\n \"org.apache.spark.sql.functions\")\n\n def test_with_column_with_existing_name(self):\n keys = self.df.withColumn(\"key\", self.df.key).select(\"key\").collect()\n self.assertEqual([r.key for r in keys], list(range(100)))\n\n # regression test for SPARK-10417\n def test_column_iterator(self):\n\n def foo():\n for x in self.df.key:\n break\n\n self.assertRaises(TypeError, foo)\n\n # add test for SPARK-10577 (test broadcast join hint)\n def test_functions_broadcast(self):\n from pyspark.sql.functions import broadcast\n\n df1 = self.spark.createDataFrame([(1, \"1\"), (2, \"2\")], (\"key\", \"value\"))\n df2 = self.spark.createDataFrame([(1, \"1\"), (2, \"2\")], (\"key\", \"value\"))\n\n # equijoin - should be converted into broadcast join\n plan1 = df1.join(broadcast(df2), \"key\")._jdf.queryExecution().executedPlan()\n self.assertEqual(1, plan1.toString().count(\"BroadcastHashJoin\"))\n\n # no join key -- should not be a broadcast join\n plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()\n self.assertEqual(0, plan2.toString().count(\"BroadcastHashJoin\"))\n\n # planner should not crash without a join\n broadcast(df1)._jdf.queryExecution().executedPlan()\n\n def test_generic_hints(self):\n from pyspark.sql import DataFrame\n\n df1 = self.spark.range(10e10).toDF(\"id\")\n df2 = self.spark.range(10e10).toDF(\"id\")\n\n self.assertIsInstance(df1.hint(\"broadcast\"), DataFrame)\n self.assertIsInstance(df1.hint(\"broadcast\", []), DataFrame)\n\n # Dummy rules\n self.assertIsInstance(df1.hint(\"broadcast\", \"foo\", \"bar\"), DataFrame)\n self.assertIsInstance(df1.hint(\"broadcast\", [\"foo\", \"bar\"]), DataFrame)\n\n plan = df1.join(df2.hint(\"broadcast\"), \"id\")._jdf.queryExecution().executedPlan()\n self.assertEqual(1, plan.toString().count(\"BroadcastHashJoin\"))\n\n def test_toDF_with_schema_string(self):\n data = [Row(key=i, value=str(i)) for i in range(100)]\n rdd = self.sc.parallelize(data, 5)\n\n df = rdd.toDF(\"key: int, value: string\")\n self.assertEqual(df.schema.simpleString(), \"struct<key:int,value:string>\")\n self.assertEqual(df.collect(), data)\n\n # different but compatible field types can be used.\n df = rdd.toDF(\"key: string, value: string\")\n self.assertEqual(df.schema.simpleString(), \"struct<key:string,value:string>\")\n self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])\n\n # field names can differ.\n df = rdd.toDF(\" a: int, b: string \")\n self.assertEqual(df.schema.simpleString(), \"struct<a:int,b:string>\")\n self.assertEqual(df.collect(), data)\n\n # number of fields must match.\n self.assertRaisesRegexp(Exception, \"Length of object\",\n lambda: rdd.toDF(\"key: int\").collect())\n\n # field types mismatch will cause exception at runtime.\n self.assertRaisesRegexp(Exception, \"FloatType can not accept\",\n lambda: rdd.toDF(\"key: float, value: string\").collect())\n\n # flat schema values will be wrapped into row.\n df = rdd.map(lambda row: row.key).toDF(\"int\")\n self.assertEqual(df.schema.simpleString(), \"struct<value:int>\")\n self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])\n\n # users can use DataType directly instead of data type string.\n df = rdd.map(lambda row: row.key).toDF(IntegerType())\n self.assertEqual(df.schema.simpleString(), \"struct<value:int>\")\n self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])\n\n def test_join_without_on(self):\n df1 = self.spark.range(1).toDF(\"a\")\n df2 = self.spark.range(1).toDF(\"b\")\n\n try:\n self.spark.conf.set(\"spark.sql.crossJoin.enabled\", \"false\")\n self.assertRaises(AnalysisException, lambda: df1.join(df2, how=\"inner\").collect())\n\n self.spark.conf.set(\"spark.sql.crossJoin.enabled\", \"true\")\n actual = df1.join(df2, how=\"inner\").collect()\n expected = [Row(a=0, b=0)]\n self.assertEqual(actual, expected)\n finally:\n # We should unset this. Otherwise, other tests are affected.\n self.spark.conf.unset(\"spark.sql.crossJoin.enabled\")\n\n # Regression test for invalid join methods when on is None, Spark-14761\n def test_invalid_join_method(self):\n df1 = self.spark.createDataFrame([(\"Alice\", 5), (\"Bob\", 8)], [\"name\", \"age\"])\n df2 = self.spark.createDataFrame([(\"Alice\", 80), (\"Bob\", 90)], [\"name\", \"height\"])\n self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how=\"invalid-join-type\"))\n\n # Cartesian products require cross join syntax\n def test_require_cross(self):\n from pyspark.sql.functions import broadcast\n\n df1 = self.spark.createDataFrame([(1, \"1\")], (\"key\", \"value\"))\n df2 = self.spark.createDataFrame([(1, \"1\")], (\"key\", \"value\"))\n\n # joins without conditions require cross join syntax\n self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())\n\n # works with crossJoin\n self.assertEqual(1, df1.crossJoin(df2).count())\n\n def test_conf(self):\n spark = self.spark\n spark.conf.set(\"bogo\", \"sipeo\")\n self.assertEqual(spark.conf.get(\"bogo\"), \"sipeo\")\n spark.conf.set(\"bogo\", \"ta\")\n self.assertEqual(spark.conf.get(\"bogo\"), \"ta\")\n self.assertEqual(spark.conf.get(\"bogo\", \"not.read\"), \"ta\")\n self.assertEqual(spark.conf.get(\"not.set\", \"ta\"), \"ta\")\n self.assertRaisesRegexp(Exception, \"not.set\", lambda: spark.conf.get(\"not.set\"))\n spark.conf.unset(\"bogo\")\n self.assertEqual(spark.conf.get(\"bogo\", \"colombia\"), \"colombia\")\n\n def test_current_database(self):\n spark = self.spark\n spark.catalog._reset()\n self.assertEquals(spark.catalog.currentDatabase(), \"default\")\n spark.sql(\"CREATE DATABASE some_db\")\n spark.catalog.setCurrentDatabase(\"some_db\")\n self.assertEquals(spark.catalog.currentDatabase(), \"some_db\")\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.setCurrentDatabase(\"does_not_exist\"))\n\n def test_list_databases(self):\n spark = self.spark\n spark.catalog._reset()\n databases = [db.name for db in spark.catalog.listDatabases()]\n self.assertEquals(databases, [\"default\"])\n spark.sql(\"CREATE DATABASE some_db\")\n databases = [db.name for db in spark.catalog.listDatabases()]\n self.assertEquals(sorted(databases), [\"default\", \"some_db\"])\n\n def test_list_tables(self):\n from pyspark.sql.catalog import Table\n spark = self.spark\n spark.catalog._reset()\n spark.sql(\"CREATE DATABASE some_db\")\n self.assertEquals(spark.catalog.listTables(), [])\n self.assertEquals(spark.catalog.listTables(\"some_db\"), [])\n spark.createDataFrame([(1, 1)]).createOrReplaceTempView(\"temp_tab\")\n spark.sql(\"CREATE TABLE tab1 (name STRING, age INT) USING parquet\")\n spark.sql(\"CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet\")\n tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)\n tablesDefault = sorted(spark.catalog.listTables(\"default\"), key=lambda t: t.name)\n tablesSomeDb = sorted(spark.catalog.listTables(\"some_db\"), key=lambda t: t.name)\n self.assertEquals(tables, tablesDefault)\n self.assertEquals(len(tables), 2)\n self.assertEquals(len(tablesSomeDb), 2)\n self.assertEquals(tables[0], Table(\n name=\"tab1\",\n database=\"default\",\n description=None,\n tableType=\"MANAGED\",\n isTemporary=False))\n self.assertEquals(tables[1], Table(\n name=\"temp_tab\",\n database=None,\n description=None,\n tableType=\"TEMPORARY\",\n isTemporary=True))\n self.assertEquals(tablesSomeDb[0], Table(\n name=\"tab2\",\n database=\"some_db\",\n description=None,\n tableType=\"MANAGED\",\n isTemporary=False))\n self.assertEquals(tablesSomeDb[1], Table(\n name=\"temp_tab\",\n database=None,\n description=None,\n tableType=\"TEMPORARY\",\n isTemporary=True))\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.listTables(\"does_not_exist\"))\n\n def test_list_functions(self):\n from pyspark.sql.catalog import Function\n spark = self.spark\n spark.catalog._reset()\n spark.sql(\"CREATE DATABASE some_db\")\n functions = dict((f.name, f) for f in spark.catalog.listFunctions())\n functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions(\"default\"))\n self.assertTrue(len(functions) > 200)\n self.assertTrue(\"+\" in functions)\n self.assertTrue(\"like\" in functions)\n self.assertTrue(\"month\" in functions)\n self.assertTrue(\"to_date\" in functions)\n self.assertTrue(\"to_timestamp\" in functions)\n self.assertTrue(\"to_unix_timestamp\" in functions)\n self.assertTrue(\"current_database\" in functions)\n self.assertEquals(functions[\"+\"], Function(\n name=\"+\",\n description=None,\n className=\"org.apache.spark.sql.catalyst.expressions.Add\",\n isTemporary=True))\n self.assertEquals(functions, functionsDefault)\n spark.catalog.registerFunction(\"temp_func\", lambda x: str(x))\n spark.sql(\"CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'\")\n spark.sql(\"CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'\")\n newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())\n newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions(\"some_db\"))\n self.assertTrue(set(functions).issubset(set(newFunctions)))\n self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))\n self.assertTrue(\"temp_func\" in newFunctions)\n self.assertTrue(\"func1\" in newFunctions)\n self.assertTrue(\"func2\" not in newFunctions)\n self.assertTrue(\"temp_func\" in newFunctionsSomeDb)\n self.assertTrue(\"func1\" not in newFunctionsSomeDb)\n self.assertTrue(\"func2\" in newFunctionsSomeDb)\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.listFunctions(\"does_not_exist\"))\n\n def test_list_columns(self):\n from pyspark.sql.catalog import Column\n spark = self.spark\n spark.catalog._reset()\n spark.sql(\"CREATE DATABASE some_db\")\n spark.sql(\"CREATE TABLE tab1 (name STRING, age INT) USING parquet\")\n spark.sql(\"CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet\")\n columns = sorted(spark.catalog.listColumns(\"tab1\"), key=lambda c: c.name)\n columnsDefault = sorted(spark.catalog.listColumns(\"tab1\", \"default\"), key=lambda c: c.name)\n self.assertEquals(columns, columnsDefault)\n self.assertEquals(len(columns), 2)\n self.assertEquals(columns[0], Column(\n name=\"age\",\n description=None,\n dataType=\"int\",\n nullable=True,\n isPartition=False,\n isBucket=False))\n self.assertEquals(columns[1], Column(\n name=\"name\",\n description=None,\n dataType=\"string\",\n nullable=True,\n isPartition=False,\n isBucket=False))\n columns2 = sorted(spark.catalog.listColumns(\"tab2\", \"some_db\"), key=lambda c: c.name)\n self.assertEquals(len(columns2), 2)\n self.assertEquals(columns2[0], Column(\n name=\"nickname\",\n description=None,\n dataType=\"string\",\n nullable=True,\n isPartition=False,\n isBucket=False))\n self.assertEquals(columns2[1], Column(\n name=\"tolerance\",\n description=None,\n dataType=\"float\",\n nullable=True,\n isPartition=False,\n isBucket=False))\n self.assertRaisesRegexp(\n AnalysisException,\n \"tab2\",\n lambda: spark.catalog.listColumns(\"tab2\"))\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.listColumns(\"does_not_exist\"))\n\n def test_cache(self):\n spark = self.spark\n spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView(\"tab1\")\n spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView(\"tab2\")\n self.assertFalse(spark.catalog.isCached(\"tab1\"))\n self.assertFalse(spark.catalog.isCached(\"tab2\"))\n spark.catalog.cacheTable(\"tab1\")\n self.assertTrue(spark.catalog.isCached(\"tab1\"))\n self.assertFalse(spark.catalog.isCached(\"tab2\"))\n spark.catalog.cacheTable(\"tab2\")\n spark.catalog.uncacheTable(\"tab1\")\n self.assertFalse(spark.catalog.isCached(\"tab1\"))\n self.assertTrue(spark.catalog.isCached(\"tab2\"))\n spark.catalog.clearCache()\n self.assertFalse(spark.catalog.isCached(\"tab1\"))\n self.assertFalse(spark.catalog.isCached(\"tab2\"))\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.isCached(\"does_not_exist\"))\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.cacheTable(\"does_not_exist\"))\n self.assertRaisesRegexp(\n AnalysisException,\n \"does_not_exist\",\n lambda: spark.catalog.uncacheTable(\"does_not_exist\"))\n\n def test_read_text_file_list(self):\n df = self.spark.read.text(['python/test_support/sql/text-test.txt',\n 'python/test_support/sql/text-test.txt'])\n count = df.count()\n self.assertEquals(count, 4)\n\n def test_BinaryType_serialization(self):\n # Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808\n schema = StructType([StructField('mybytes', BinaryType())])\n data = [[bytearray(b'here is my data')],\n [bytearray(b'and here is some more')]]\n df = self.spark.createDataFrame(data, schema=schema)\n df.collect()\n\n # test for SPARK-16542\n def test_array_types(self):\n # This test need to make sure that the Scala type selected is at least\n # as large as the python's types. This is necessary because python's\n # array types depend on C implementation on the machine. Therefore there\n # is no machine independent correspondence between python's array types\n # and Scala types.\n # See: https://docs.python.org/2/library/array.html\n\n def assertCollectSuccess(typecode, value):\n row = Row(myarray=array.array(typecode, [value]))\n df = self.spark.createDataFrame([row])\n self.assertEqual(df.first()[\"myarray\"][0], value)\n\n # supported string types\n #\n # String types in python's array are \"u\" for Py_UNICODE and \"c\" for char.\n # \"u\" will be removed in python 4, and \"c\" is not supported in python 3.\n supported_string_types = []\n if sys.version_info[0] < 4:\n supported_string_types += ['u']\n # test unicode\n assertCollectSuccess('u', u'a')\n if sys.version_info[0] < 3:\n supported_string_types += ['c']\n # test string\n assertCollectSuccess('c', 'a')\n\n # supported float and double\n #\n # Test max, min, and precision for float and double, assuming IEEE 754\n # floating-point format.\n supported_fractional_types = ['f', 'd']\n assertCollectSuccess('f', ctypes.c_float(1e+38).value)\n assertCollectSuccess('f', ctypes.c_float(1e-38).value)\n assertCollectSuccess('f', ctypes.c_float(1.123456).value)\n assertCollectSuccess('d', sys.float_info.max)\n assertCollectSuccess('d', sys.float_info.min)\n assertCollectSuccess('d', sys.float_info.epsilon)\n\n # supported signed int types\n #\n # The size of C types changes with implementation, we need to make sure\n # that there is no overflow error on the platform running this test.\n supported_signed_int_types = list(\n set(_array_signed_int_typecode_ctype_mappings.keys())\n .intersection(set(_array_type_mappings.keys())))\n for t in supported_signed_int_types:\n ctype = _array_signed_int_typecode_ctype_mappings[t]\n max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)\n assertCollectSuccess(t, max_val - 1)\n assertCollectSuccess(t, -max_val)\n\n # supported unsigned int types\n #\n # JVM does not have unsigned types. We need to be very careful to make\n # sure that there is no overflow error.\n supported_unsigned_int_types = list(\n set(_array_unsigned_int_typecode_ctype_mappings.keys())\n .intersection(set(_array_type_mappings.keys())))\n for t in supported_unsigned_int_types:\n ctype = _array_unsigned_int_typecode_ctype_mappings[t]\n assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)\n\n # all supported types\n #\n # Make sure the types tested above:\n # 1. are all supported types\n # 2. cover all supported types\n supported_types = (supported_string_types +\n supported_fractional_types +\n supported_signed_int_types +\n supported_unsigned_int_types)\n self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))\n\n # all unsupported types\n #\n # Keys in _array_type_mappings is a complete list of all supported types,\n # and types not in _array_type_mappings are considered unsupported.\n # `array.typecodes` are not supported in python 2.\n if sys.version_info[0] < 3:\n all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])\n else:\n all_types = set(array.typecodes)\n unsupported_types = all_types - set(supported_types)\n # test unsupported types\n for t in unsupported_types:\n with self.assertRaises(TypeError):\n a = array.array(t)\n self.spark.createDataFrame([Row(myarray=a)]).collect()\n\n def test_bucketed_write(self):\n data = [\n (1, \"foo\", 3.0), (2, \"foo\", 5.0),\n (3, \"bar\", -1.0), (4, \"bar\", 6.0),\n ]\n df = self.spark.createDataFrame(data, [\"x\", \"y\", \"z\"])\n\n def count_bucketed_cols(names, table=\"pyspark_bucket\"):\n \"\"\"Given a sequence of column names and a table name\n query the catalog and return number o columns which are\n used for bucketing\n \"\"\"\n cols = self.spark.catalog.listColumns(table)\n num = len([c for c in cols if c.name in names and c.isBucket])\n return num\n\n # Test write with one bucketing column\n df.write.bucketBy(3, \"x\").mode(\"overwrite\").saveAsTable(\"pyspark_bucket\")\n self.assertEqual(count_bucketed_cols([\"x\"]), 1)\n self.assertSetEqual(set(data), set(self.spark.table(\"pyspark_bucket\").collect()))\n\n # Test write two bucketing columns\n df.write.bucketBy(3, \"x\", \"y\").mode(\"overwrite\").saveAsTable(\"pyspark_bucket\")\n self.assertEqual(count_bucketed_cols([\"x\", \"y\"]), 2)\n self.assertSetEqual(set(data), set(self.spark.table(\"pyspark_bucket\").collect()))\n\n # Test write with bucket and sort\n df.write.bucketBy(2, \"x\").sortBy(\"z\").mode(\"overwrite\").saveAsTable(\"pyspark_bucket\")\n self.assertEqual(count_bucketed_cols([\"x\"]), 1)\n self.assertSetEqual(set(data), set(self.spark.table(\"pyspark_bucket\").collect()))\n\n # Test write with a list of columns\n df.write.bucketBy(3, [\"x\", \"y\"]).mode(\"overwrite\").saveAsTable(\"pyspark_bucket\")\n self.assertEqual(count_bucketed_cols([\"x\", \"y\"]), 2)\n self.assertSetEqual(set(data), set(self.spark.table(\"pyspark_bucket\").collect()))\n\n # Test write with bucket and sort with a list of columns\n (df.write.bucketBy(2, \"x\")\n .sortBy([\"y\", \"z\"])\n .mode(\"overwrite\").saveAsTable(\"pyspark_bucket\"))\n self.assertSetEqual(set(data), set(self.spark.table(\"pyspark_bucket\").collect()))\n\n # Test write with bucket and sort with multiple columns\n (df.write.bucketBy(2, \"x\")\n .sortBy(\"y\", \"z\")\n .mode(\"overwrite\").saveAsTable(\"pyspark_bucket\"))\n self.assertSetEqual(set(data), set(self.spark.table(\"pyspark_bucket\").collect()))\n\n @unittest.skipIf(not _have_pandas, \"Pandas not installed\")\n def test_to_pandas(self):\n import numpy as np\n schema = StructType().add(\"a\", IntegerType()).add(\"b\", StringType())\\\n .add(\"c\", BooleanType()).add(\"d\", FloatType())\n data = [\n (1, \"foo\", True, 3.0), (2, \"foo\", True, 5.0),\n (3, \"bar\", False, -1.0), (4, \"bar\", False, 6.0),\n ]\n df = self.spark.createDataFrame(data, schema)\n types = df.toPandas().dtypes\n self.assertEquals(types[0], np.int32)\n self.assertEquals(types[1], np.object)\n self.assertEquals(types[2], np.bool)\n self.assertEquals(types[3], np.float32)\n\n def test_create_dataframe_from_array_of_long(self):\n import array\n data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]\n df = self.spark.createDataFrame(data)\n self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))\n\n\nclass HiveSparkSubmitTests(SparkSubmitTests):\n\n def test_hivecontext(self):\n # This test checks that HiveContext is using Hive metastore (SPARK-16224).\n # It sets a metastore url and checks if there is a derby dir created by\n # Hive metastore. If this derby dir exists, HiveContext is using\n # Hive metastore.\n metastore_path = os.path.join(tempfile.mkdtemp(), \"spark16224_metastore_db\")\n metastore_URL = \"jdbc:derby:;databaseName=\" + metastore_path + \";create=true\"\n hive_site_dir = os.path.join(self.programDir, \"conf\")\n hive_site_file = self.createTempFile(\"hive-site.xml\", (\"\"\"\n |<configuration>\n | <property>\n | <name>javax.jdo.option.ConnectionURL</name>\n | <value>%s</value>\n | </property>\n |</configuration>\n \"\"\" % metastore_URL).lstrip(), \"conf\")\n script = self.createTempFile(\"test.py\", \"\"\"\n |import os\n |\n |from pyspark.conf import SparkConf\n |from pyspark.context import SparkContext\n |from pyspark.sql import HiveContext\n |\n |conf = SparkConf()\n |sc = SparkContext(conf=conf)\n |hive_context = HiveContext(sc)\n |print(hive_context.sql(\"show databases\").collect())\n \"\"\")\n proc = subprocess.Popen(\n [self.sparkSubmit, \"--master\", \"local-cluster[1,1,1024]\",\n \"--driver-class-path\", hive_site_dir, script],\n stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"default\", out.decode('utf-8'))\n self.assertTrue(os.path.exists(metastore_path))\n\n\nclass SQLTests2(ReusedPySparkTestCase):\n\n @classmethod\n def setUpClass(cls):\n ReusedPySparkTestCase.setUpClass()\n cls.spark = SparkSession(cls.sc)\n\n @classmethod\n def tearDownClass(cls):\n ReusedPySparkTestCase.tearDownClass()\n cls.spark.stop()\n\n # We can't include this test into SQLTests because we will stop class's SparkContext and cause\n # other tests failed.\n def test_sparksession_with_stopped_sparkcontext(self):\n self.sc.stop()\n sc = SparkContext('local[4]', self.sc.appName)\n spark = SparkSession.builder.getOrCreate()\n df = spark.createDataFrame([(1, 2)], [\"c\", \"c\"])\n df.collect()\n\n\nclass UDFInitializationTests(unittest.TestCase):\n def tearDown(self):\n if SparkSession._instantiatedSession is not None:\n SparkSession._instantiatedSession.stop()\n\n if SparkContext._active_spark_context is not None:\n SparkContext._active_spark_contex.stop()\n\n def test_udf_init_shouldnt_initalize_context(self):\n from pyspark.sql.functions import UserDefinedFunction\n\n UserDefinedFunction(lambda x: x, StringType())\n\n self.assertIsNone(\n SparkContext._active_spark_context,\n \"SparkContext shouldn't be initialized when UserDefinedFunction is created.\"\n )\n self.assertIsNone(\n SparkSession._instantiatedSession,\n \"SparkSession shouldn't be initialized when UserDefinedFunction is created.\"\n )\n\n\nclass HiveContextSQLTests(ReusedPySparkTestCase):\n\n @classmethod\n def setUpClass(cls):\n ReusedPySparkTestCase.setUpClass()\n cls.tempdir = tempfile.NamedTemporaryFile(delete=False)\n try:\n cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()\n except py4j.protocol.Py4JError:\n cls.tearDownClass()\n raise unittest.SkipTest(\"Hive is not available\")\n except TypeError:\n cls.tearDownClass()\n raise unittest.SkipTest(\"Hive is not available\")\n os.unlink(cls.tempdir.name)\n cls.spark = HiveContext._createForTesting(cls.sc)\n cls.testData = [Row(key=i, value=str(i)) for i in range(100)]\n cls.df = cls.sc.parallelize(cls.testData).toDF()\n\n @classmethod\n def tearDownClass(cls):\n ReusedPySparkTestCase.tearDownClass()\n shutil.rmtree(cls.tempdir.name, ignore_errors=True)\n\n def test_save_and_load_table(self):\n df = self.df\n tmpPath = tempfile.mkdtemp()\n shutil.rmtree(tmpPath)\n df.write.saveAsTable(\"savedJsonTable\", \"json\", \"append\", path=tmpPath)\n actual = self.spark.createExternalTable(\"externalJsonTable\", tmpPath, \"json\")\n self.assertEqual(sorted(df.collect()),\n sorted(self.spark.sql(\"SELECT * FROM savedJsonTable\").collect()))\n self.assertEqual(sorted(df.collect()),\n sorted(self.spark.sql(\"SELECT * FROM externalJsonTable\").collect()))\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n self.spark.sql(\"DROP TABLE externalJsonTable\")\n\n df.write.saveAsTable(\"savedJsonTable\", \"json\", \"overwrite\", path=tmpPath)\n schema = StructType([StructField(\"value\", StringType(), True)])\n actual = self.spark.createExternalTable(\"externalJsonTable\", source=\"json\",\n schema=schema, path=tmpPath,\n noUse=\"this options will not be used\")\n self.assertEqual(sorted(df.collect()),\n sorted(self.spark.sql(\"SELECT * FROM savedJsonTable\").collect()))\n self.assertEqual(sorted(df.select(\"value\").collect()),\n sorted(self.spark.sql(\"SELECT * FROM externalJsonTable\").collect()))\n self.assertEqual(sorted(df.select(\"value\").collect()), sorted(actual.collect()))\n self.spark.sql(\"DROP TABLE savedJsonTable\")\n self.spark.sql(\"DROP TABLE externalJsonTable\")\n\n defaultDataSourceName = self.spark.getConf(\"spark.sql.sources.default\",\n \"org.apache.spark.sql.parquet\")\n self.spark.sql(\"SET spark.sql.sources.default=org.apache.spark.sql.json\")\n df.write.saveAsTable(\"savedJsonTable\", path=tmpPath, mode=\"overwrite\")\n actual = self.spark.createExternalTable(\"externalJsonTable\", path=tmpPath)\n self.assertEqual(sorted(df.collect()),\n sorted(self.spark.sql(\"SELECT * FROM savedJsonTable\").collect()))\n self.assertEqual(sorted(df.collect()),\n sorted(self.spark.sql(\"SELECT * FROM externalJsonTable\").collect()))\n self.assertEqual(sorted(df.collect()), sorted(actual.collect()))\n self.spark.sql(\"DROP TABLE savedJsonTable\")\n self.spark.sql(\"DROP TABLE externalJsonTable\")\n self.spark.sql(\"SET spark.sql.sources.default=\" + defaultDataSourceName)\n\n shutil.rmtree(tmpPath)\n\n def test_window_functions(self):\n df = self.spark.createDataFrame([(1, \"1\"), (2, \"2\"), (1, \"2\"), (1, \"2\")], [\"key\", \"value\"])\n w = Window.partitionBy(\"value\").orderBy(\"key\")\n from pyspark.sql import functions as F\n sel = df.select(df.value, df.key,\n F.max(\"key\").over(w.rowsBetween(0, 1)),\n F.min(\"key\").over(w.rowsBetween(0, 1)),\n F.count(\"key\").over(w.rowsBetween(float('-inf'), float('inf'))),\n F.row_number().over(w),\n F.rank().over(w),\n F.dense_rank().over(w),\n F.ntile(2).over(w))\n rs = sorted(sel.collect())\n expected = [\n (\"1\", 1, 1, 1, 1, 1, 1, 1, 1),\n (\"2\", 1, 1, 1, 3, 1, 1, 1, 1),\n (\"2\", 1, 2, 1, 3, 2, 1, 1, 1),\n (\"2\", 2, 2, 2, 3, 3, 3, 2, 2)\n ]\n for r, ex in zip(rs, expected):\n self.assertEqual(tuple(r), ex[:len(r)])\n\n def test_window_functions_without_partitionBy(self):\n df = self.spark.createDataFrame([(1, \"1\"), (2, \"2\"), (1, \"2\"), (1, \"2\")], [\"key\", \"value\"])\n w = Window.orderBy(\"key\", df.value)\n from pyspark.sql import functions as F\n sel = df.select(df.value, df.key,\n F.max(\"key\").over(w.rowsBetween(0, 1)),\n F.min(\"key\").over(w.rowsBetween(0, 1)),\n F.count(\"key\").over(w.rowsBetween(float('-inf'), float('inf'))),\n F.row_number().over(w),\n F.rank().over(w),\n F.dense_rank().over(w),\n F.ntile(2).over(w))\n rs = sorted(sel.collect())\n expected = [\n (\"1\", 1, 1, 1, 4, 1, 1, 1, 1),\n (\"2\", 1, 1, 1, 4, 2, 2, 2, 1),\n (\"2\", 1, 2, 1, 4, 3, 2, 2, 2),\n (\"2\", 2, 2, 2, 4, 4, 4, 3, 2)\n ]\n for r, ex in zip(rs, expected):\n self.assertEqual(tuple(r), ex[:len(r)])\n\n def test_window_functions_cumulative_sum(self):\n df = self.spark.createDataFrame([(\"one\", 1), (\"two\", 2)], [\"key\", \"value\"])\n from pyspark.sql import functions as F\n\n # Test cumulative sum\n sel = df.select(\n df.key,\n F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))\n rs = sorted(sel.collect())\n expected = [(\"one\", 1), (\"two\", 3)]\n for r, ex in zip(rs, expected):\n self.assertEqual(tuple(r), ex[:len(r)])\n\n # Test boundary values less than JVM's Long.MinValue and make sure we don't overflow\n sel = df.select(\n df.key,\n F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))\n rs = sorted(sel.collect())\n expected = [(\"one\", 1), (\"two\", 3)]\n for r, ex in zip(rs, expected):\n self.assertEqual(tuple(r), ex[:len(r)])\n\n # Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow\n frame_end = Window.unboundedFollowing + 1\n sel = df.select(\n df.key,\n F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))\n rs = sorted(sel.collect())\n expected = [(\"one\", 3), (\"two\", 2)]\n for r, ex in zip(rs, expected):\n self.assertEqual(tuple(r), ex[:len(r)])\n\n def test_collect_functions(self):\n df = self.spark.createDataFrame([(1, \"1\"), (2, \"2\"), (1, \"2\"), (1, \"2\")], [\"key\", \"value\"])\n from pyspark.sql import functions\n\n self.assertEqual(\n sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),\n [1, 2])\n self.assertEqual(\n sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),\n [1, 1, 1, 2])\n self.assertEqual(\n sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),\n [\"1\", \"2\"])\n self.assertEqual(\n sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),\n [\"1\", \"2\", \"2\", \"2\"])\n\n def test_limit_and_take(self):\n df = self.spark.range(1, 1000, numPartitions=10)\n\n def assert_runs_only_one_job_stage_and_task(job_group_name, f):\n tracker = self.sc.statusTracker()\n self.sc.setJobGroup(job_group_name, description=\"\")\n f()\n jobs = tracker.getJobIdsForGroup(job_group_name)\n self.assertEqual(1, len(jobs))\n stages = tracker.getJobInfo(jobs[0]).stageIds\n self.assertEqual(1, len(stages))\n self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)\n\n # Regression test for SPARK-10731: take should delegate to Scala implementation\n assert_runs_only_one_job_stage_and_task(\"take\", lambda: df.take(1))\n # Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)\n assert_runs_only_one_job_stage_and_task(\"collect_limit\", lambda: df.limit(1).collect())\n\n def test_datetime_functions(self):\n from pyspark.sql import functions\n from datetime import date, datetime\n df = self.spark.range(1).selectExpr(\"'2017-01-22' as dateCol\")\n parse_result = df.select(functions.to_date(functions.col(\"dateCol\"))).first()\n self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])\n\n @unittest.skipIf(sys.version_info < (3, 3), \"Unittest < 3.3 doesn't support mocking\")\n def test_unbounded_frames(self):\n from unittest.mock import patch\n from pyspark.sql import functions as F\n from pyspark.sql import window\n import importlib\n\n df = self.spark.range(0, 3)\n\n def rows_frame_match():\n return \"ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING\" in df.select(\n F.count(\"*\").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))\n ).columns[0]\n\n def range_frame_match():\n return \"RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING\" in df.select(\n F.count(\"*\").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))\n ).columns[0]\n\n with patch(\"sys.maxsize\", 2 ** 31 - 1):\n importlib.reload(window)\n self.assertTrue(rows_frame_match())\n self.assertTrue(range_frame_match())\n\n with patch(\"sys.maxsize\", 2 ** 63 - 1):\n importlib.reload(window)\n self.assertTrue(rows_frame_match())\n self.assertTrue(range_frame_match())\n\n with patch(\"sys.maxsize\", 2 ** 127 - 1):\n importlib.reload(window)\n self.assertTrue(rows_frame_match())\n self.assertTrue(range_frame_match())\n\n importlib.reload(window)\n\n\nclass DataTypeVerificationTests(unittest.TestCase):\n\n def test_verify_type_exception_msg(self):\n self.assertRaisesRegexp(\n ValueError,\n \"test_name\",\n lambda: _make_type_verifier(StringType(), nullable=False, name=\"test_name\")(None))\n\n schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])\n self.assertRaisesRegexp(\n TypeError,\n \"field b in field a\",\n lambda: _make_type_verifier(schema)([[\"data\"]]))\n\n def test_verify_type_ok_nullable(self):\n obj = None\n types = [IntegerType(), FloatType(), StringType(), StructType([])]\n for data_type in types:\n try:\n _make_type_verifier(data_type, nullable=True)(obj)\n except Exception:\n self.fail(\"verify_type(%s, %s, nullable=True)\" % (obj, data_type))\n\n def test_verify_type_not_nullable(self):\n import array\n import datetime\n import decimal\n\n schema = StructType([\n StructField('s', StringType(), nullable=False),\n StructField('i', IntegerType(), nullable=True)])\n\n class MyObj:\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n # obj, data_type\n success_spec = [\n # String\n (\"\", StringType()),\n (u\"\", StringType()),\n (1, StringType()),\n (1.0, StringType()),\n ([], StringType()),\n ({}, StringType()),\n\n # UDT\n (ExamplePoint(1.0, 2.0), ExamplePointUDT()),\n\n # Boolean\n (True, BooleanType()),\n\n # Byte\n (-(2**7), ByteType()),\n (2**7 - 1, ByteType()),\n\n # Short\n (-(2**15), ShortType()),\n (2**15 - 1, ShortType()),\n\n # Integer\n (-(2**31), IntegerType()),\n (2**31 - 1, IntegerType()),\n\n # Long\n (2**64, LongType()),\n\n # Float & Double\n (1.0, FloatType()),\n (1.0, DoubleType()),\n\n # Decimal\n (decimal.Decimal(\"1.0\"), DecimalType()),\n\n # Binary\n (bytearray([1, 2]), BinaryType()),\n\n # Date/Timestamp\n (datetime.date(2000, 1, 2), DateType()),\n (datetime.datetime(2000, 1, 2, 3, 4), DateType()),\n (datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),\n\n # Array\n ([], ArrayType(IntegerType())),\n ([\"1\", None], ArrayType(StringType(), containsNull=True)),\n ([1, 2], ArrayType(IntegerType())),\n ((1, 2), ArrayType(IntegerType())),\n (array.array('h', [1, 2]), ArrayType(IntegerType())),\n\n # Map\n ({}, MapType(StringType(), IntegerType())),\n ({\"a\": 1}, MapType(StringType(), IntegerType())),\n ({\"a\": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),\n\n # Struct\n ({\"s\": \"a\", \"i\": 1}, schema),\n ({\"s\": \"a\", \"i\": None}, schema),\n ({\"s\": \"a\"}, schema),\n ({\"s\": \"a\", \"f\": 1.0}, schema),\n (Row(s=\"a\", i=1), schema),\n (Row(s=\"a\", i=None), schema),\n (Row(s=\"a\", i=1, f=1.0), schema),\n ([\"a\", 1], schema),\n ([\"a\", None], schema),\n ((\"a\", 1), schema),\n (MyObj(s=\"a\", i=1), schema),\n (MyObj(s=\"a\", i=None), schema),\n (MyObj(s=\"a\"), schema),\n ]\n\n # obj, data_type, exception class\n failure_spec = [\n # String (match anything but None)\n (None, StringType(), ValueError),\n\n # UDT\n (ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),\n\n # Boolean\n (1, BooleanType(), TypeError),\n (\"True\", BooleanType(), TypeError),\n ([1], BooleanType(), TypeError),\n\n # Byte\n (-(2**7) - 1, ByteType(), ValueError),\n (2**7, ByteType(), ValueError),\n (\"1\", ByteType(), TypeError),\n (1.0, ByteType(), TypeError),\n\n # Short\n (-(2**15) - 1, ShortType(), ValueError),\n (2**15, ShortType(), ValueError),\n\n # Integer\n (-(2**31) - 1, IntegerType(), ValueError),\n (2**31, IntegerType(), ValueError),\n\n # Float & Double\n (1, FloatType(), TypeError),\n (1, DoubleType(), TypeError),\n\n # Decimal\n (1.0, DecimalType(), TypeError),\n (1, DecimalType(), TypeError),\n (\"1.0\", DecimalType(), TypeError),\n\n # Binary\n (1, BinaryType(), TypeError),\n\n # Date/Timestamp\n (\"2000-01-02\", DateType(), TypeError),\n (946811040, TimestampType(), TypeError),\n\n # Array\n ([\"1\", None], ArrayType(StringType(), containsNull=False), ValueError),\n ([1, \"2\"], ArrayType(IntegerType()), TypeError),\n\n # Map\n ({\"a\": 1}, MapType(IntegerType(), IntegerType()), TypeError),\n ({\"a\": \"1\"}, MapType(StringType(), IntegerType()), TypeError),\n ({\"a\": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),\n ValueError),\n\n # Struct\n ({\"s\": \"a\", \"i\": \"1\"}, schema, TypeError),\n (Row(s=\"a\"), schema, ValueError), # Row can't have missing field\n (Row(s=\"a\", i=\"1\"), schema, TypeError),\n ([\"a\"], schema, ValueError),\n ([\"a\", \"1\"], schema, TypeError),\n (MyObj(s=\"a\", i=\"1\"), schema, TypeError),\n (MyObj(s=None, i=\"1\"), schema, ValueError),\n ]\n\n # Check success cases\n for obj, data_type in success_spec:\n try:\n _make_type_verifier(data_type, nullable=False)(obj)\n except Exception:\n self.fail(\"verify_type(%s, %s, nullable=False)\" % (obj, data_type))\n\n # Check failure cases\n for obj, data_type, exp in failure_spec:\n msg = \"verify_type(%s, %s, nullable=False) == %s\" % (obj, data_type, exp)\n with self.assertRaises(exp, msg=msg):\n _make_type_verifier(data_type, nullable=False)(obj)\n\n\[email protected](not _have_arrow, \"Arrow not installed\")\nclass ArrowTests(ReusedPySparkTestCase):\n\n @classmethod\n def setUpClass(cls):\n ReusedPySparkTestCase.setUpClass()\n cls.spark = SparkSession(cls.sc)\n cls.spark.conf.set(\"spark.sql.execution.arrow.enable\", \"true\")\n cls.schema = StructType([\n StructField(\"1_str_t\", StringType(), True),\n StructField(\"2_int_t\", IntegerType(), True),\n StructField(\"3_long_t\", LongType(), True),\n StructField(\"4_float_t\", FloatType(), True),\n StructField(\"5_double_t\", DoubleType(), True)])\n cls.data = [(\"a\", 1, 10, 0.2, 2.0),\n (\"b\", 2, 20, 0.4, 4.0),\n (\"c\", 3, 30, 0.8, 6.0)]\n\n def assertFramesEqual(self, df_with_arrow, df_without):\n msg = (\"DataFrame from Arrow is not equal\" +\n (\"\\n\\nWith Arrow:\\n%s\\n%s\" % (df_with_arrow, df_with_arrow.dtypes)) +\n (\"\\n\\nWithout:\\n%s\\n%s\" % (df_without, df_without.dtypes)))\n self.assertTrue(df_without.equals(df_with_arrow), msg=msg)\n\n def test_unsupported_datatype(self):\n schema = StructType([StructField(\"array\", ArrayType(IntegerType(), False), True)])\n df = self.spark.createDataFrame([([1, 2, 3],)], schema=schema)\n with QuietTest(self.sc):\n self.assertRaises(Exception, lambda: df.toPandas())\n\n def test_null_conversion(self):\n df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +\n self.data)\n pdf = df_null.toPandas()\n null_counts = pdf.isnull().sum().tolist()\n self.assertTrue(all([c == 1 for c in null_counts]))\n\n def test_toPandas_arrow_toggle(self):\n df = self.spark.createDataFrame(self.data, schema=self.schema)\n self.spark.conf.set(\"spark.sql.execution.arrow.enable\", \"false\")\n pdf = df.toPandas()\n self.spark.conf.set(\"spark.sql.execution.arrow.enable\", \"true\")\n pdf_arrow = df.toPandas()\n self.assertFramesEqual(pdf_arrow, pdf)\n\n def test_pandas_round_trip(self):\n import pandas as pd\n import numpy as np\n data_dict = {}\n for j, name in enumerate(self.schema.names):\n data_dict[name] = [self.data[i][j] for i in range(len(self.data))]\n # need to convert these to numpy types first\n data_dict[\"2_int_t\"] = np.int32(data_dict[\"2_int_t\"])\n data_dict[\"4_float_t\"] = np.float32(data_dict[\"4_float_t\"])\n pdf = pd.DataFrame(data=data_dict)\n df = self.spark.createDataFrame(self.data, schema=self.schema)\n pdf_arrow = df.toPandas()\n self.assertFramesEqual(pdf_arrow, pdf)\n\n def test_filtered_frame(self):\n df = self.spark.range(3).toDF(\"i\")\n pdf = df.filter(\"i < 0\").toPandas()\n self.assertEqual(len(pdf.columns), 1)\n self.assertEqual(pdf.columns[0], \"i\")\n self.assertTrue(pdf.empty)\n\n\nif __name__ == \"__main__\":\n from pyspark.sql.tests import *\n if xmlrunner:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))\n else:\n unittest.main()\n"
] |
[
[
"numpy.int32",
"numpy.float32",
"pandas.DataFrame"
]
] |
yfur/dl-chainer
|
[
"c1917710c80fd6b3dc4cded81700b92bbc349302"
] |
[
"1_shinno/chapter4_iris3.py"
] |
[
"import numpy as np\nimport chainer\nfrom chainer import cuda, Function, gradient_check, Variable\nfrom chainer import optimizers, serializers, utils\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nfrom sklearn import datasets\nimport time\n\n''' 1. Data preparation and settings '''\n# Load iris dataset from scikit-learn\niris = datasets.load_iris()\nX = iris.data.astype(np.float32)\nY = iris.target.astype(np.int32)\nN = Y.size\n\nindex = np.arange(N)\n# train data with odd index\nxtrain = X[index[index % 2 != 0], :]\nytrain = Y[index[index % 2 != 0]]\n# test data with even index\nxtest = X[index[index % 2 == 0],:]\nyans = Y[index[index % 2 == 0]]\n\n''' 2. Definition of a model with Chain class '''\nclass IrisChain(Chain):\n def __init__(self):\n super(IrisChain, self).__init__(\n l1=L.Linear(4,6),\n l2=L.Linear(6,3),\n )\n\n def __call__(self,x,y):\n return F.softmax_cross_entropy(self.fwd(x), y) # softmax cross entropy\n\n def fwd(self,x):\n h1 = F.sigmoid(self.l1(x))\n h2 = self.l2(h1)\n return h2\n\n\n''' 3. Initialization '''\nmodel = IrisChain()\noptimizer = optimizers.SGD()\noptimizer.setup(model)\n\n''' 4. Learning '''\n# batch\nn = 75\nbs = 25\nfor j in range(5000):\n accum_loss = None\n sffindex = np.random.permutation(n)\n for i in range(0, n, bs):\n x = Variable(xtrain[sffindex[i:(i + bs) if (i + bs) < n else n]])\n y = Variable(ytrain[sffindex[i:(i + bs) if (i + bs) < n else n]])\n model.zerograds()\n loss = model(x,y)\n accum_loss = loss if accum_loss is None else accum_loss + loss\n loss.backward()\n optimizer.update()\n\n''' 5. Testing '''\nxt = Variable(xtest, volatile='on')\nyy = model.fwd(xt)\n\nans = yy.data\nnrow, ncol = ans.shape\nok = 0\nfor i in range(nrow):\n cls = np.argmax(ans[i,:])\n print(ans[i,:], cls)\n if cls == yans[i]:\n ok += 1\n\nprint('{0:d} / {1:d} = {2:f}'.format(ok, nrow, ok/nrow))\nprint(ans[0])\n"
] |
[
[
"numpy.arange",
"numpy.random.permutation",
"sklearn.datasets.load_iris",
"numpy.argmax"
]
] |
RunsStudio/CAV-exclusive-lane-by-VISSIM
|
[
"a06844384c37468582e8d23463bd7c37d68b7533"
] |
[
"source/result_analysis/result_analysis/overall_drawpic.py"
] |
[
"import numpy as np\nfrom matplotlib import rcParams\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef ANOVA_cal_by_stat(data_value):\n da = pd.DataFrame(data_value)\n\n da.columns.name = '场景'\n df1 = da.melt().dropna()\n from statsmodels.formula.api import ols\n from statsmodels.stats.anova import anova_lm\n\n model = ols('value ~ C(场景)', df1).fit()\n print(anova_lm(model))\ndef convert_dict(original_dict,i,j):\n key_list=[]\n value_list=[]\n for key in original_dict:\n key_list.append(key)\n value_list.append(original_dict[key])\n key_list[i],key_list[j]=key_list[j],key_list[i]\n value_list[i],value_list[j]=value_list[j],value_list[i]\n new_dict={}\n i=0\n for item in key_list:\n new_dict[item]=value_list[i]\n i+=1\n return new_dict\n\ndef overall_evaluation(pt):\n UNIQUE_FONT_SITE = 12\n plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\n plt.rcParams['axes.unicode_minus'] = False\n file_name=('C:\\\\Users\\\\run\\\\Desktop\\\\毕设仿真\\\\travel_time_result.txt')\n file_object=open(file_name,'r')\n file_context = file_object.read().splitlines()\n file_name=('C:\\\\Users\\\\run\\\\Desktop\\\\毕设仿真\\\\travel_time_result_2.txt')\n save_dir=('C:\\\\Users\\\\run\\\\Desktop\\\\毕设\\\\结果-图\\\\')\n file_object=open(file_name,'r')\n file_context += file_object.read().splitlines()\n # file_name=('C:\\\\Users\\\\run\\\\Desktop\\\\毕设仿真\\\\travel_time_result_platExam.txt')\n # file_object=open(file_name,'r')\n # file_context += file_object.read().splitlines()\n result_dict_type_NVeh={}\n result_dict_type_TravelTm={}\n manage_lane_type = ['无专用道','内侧硬隔离','内侧软隔离','外侧硬隔离','外侧软隔离','无专用道-hCAV']\n for line in file_context:\n ls=line.split(',')\n ml_type=ls[0]\n penetrate_ratio=ls[1]\n peak_type=ls[2]\n simu_times=ls[3]\n evaluation_times=ls[4]\n veh_type=ls[5]\n Nveh=ls[6]\n travelTm=ls[7]\n if travelTm==None:\n continue\n\n key=ml_type+'_'+peak_type+'_'+penetrate_ratio\n\n if int(penetrate_ratio)<=40 and int(penetrate_ratio)>=5and int(evaluation_times)<6900 and veh_type=='All' and peak_type==pt :\n if key not in result_dict_type_NVeh:\n result_dict_type_NVeh[key]=[]\n result_dict_type_NVeh[key].append(int(Nveh))\n else:\n result_dict_type_NVeh[key].append(int(Nveh))\n if key not in result_dict_type_TravelTm:\n result_dict_type_TravelTm[key]=[]\n result_dict_type_TravelTm[key].append(float(travelTm))\n else:\n result_dict_type_TravelTm[key].append(float(travelTm))\n overall_dict_NVeh={}\n overall_dict_TravelTm={}\n overall_dict_NVeh_err={}\n overall_dict_TravelTm_err={}\n for key in result_dict_type_NVeh:\n print(key,',', result_dict_type_NVeh[key])\n # print(len(result_dict_type_NVeh[key]))\n # print(np.mean(result_dict_type_NVeh[key]),np.std(result_dict_type_NVeh[key]))\n if key.split('_')[0] not in overall_dict_NVeh:\n overall_dict_NVeh[key.split('_')[0]]=[]\n overall_dict_NVeh_err[key.split('_')[0]]=[]\n overall_dict_NVeh[key.split('_')[0]].append(np.mean(result_dict_type_NVeh[key]))\n overall_dict_NVeh_err[key.split('_')[0]].append(np.std(result_dict_type_NVeh[key]))\n else:\n overall_dict_NVeh[key.split('_')[0]].append(np.mean(result_dict_type_NVeh[key]))\n overall_dict_NVeh_err[key.split('_')[0]].append(np.std(result_dict_type_NVeh[key]))\n\n # print(len(result_dict_type_NVeh[key]))\n ANOVA_cal_by_stat(result_dict_type_NVeh)\n for key in result_dict_type_TravelTm:\n\n print(key,',',result_dict_type_TravelTm[key])\n # print(len(result_dict_type_TravelTm[key]))\n # print(np.mean(result_dict_type_TravelTm[key]),np.std(result_dict_type_TravelTm[key]))\n if key.split('_')[0] not in overall_dict_TravelTm:\n overall_dict_TravelTm[key.split('_')[0]]=[]\n overall_dict_TravelTm_err[key.split('_')[0]]=[]\n overall_dict_TravelTm[key.split('_')[0]].append(np.mean(result_dict_type_TravelTm[key]))\n overall_dict_TravelTm_err[key.split('_')[0]].append(np.std(result_dict_type_TravelTm[key]))\n else:\n overall_dict_TravelTm[key.split('_')[0]].append(np.mean(result_dict_type_TravelTm[key]))\n overall_dict_TravelTm_err[key.split('_')[0]].append(np.std(result_dict_type_TravelTm[key]))\n line_types = ['o-', '<--', 'v:','s:', '*--']\n i=0\n for t in overall_dict_NVeh:\n Xs=[5,10,15,20,30,40]\n plt.errorbar(x=Xs, y=overall_dict_NVeh[t], yerr=overall_dict_NVeh_err[t], fmt=line_types[i],markersize = 4, elinewidth=1.5, capsize=3)\n i+=1\n # print(overall_dict_NVeh[t])\n\n plt.legend(manage_lane_type,fontsize=UNIQUE_FONT_SITE-2)\n plt.xlabel('渗透率(%)',fontsize=UNIQUE_FONT_SITE)\n plt.ylabel('通行车辆流率(辆/10分钟)',fontsize=UNIQUE_FONT_SITE)\n # plt.ylim(400,540)\n plt.ylim(400,550)\n plt.xticks(fontsize=UNIQUE_FONT_SITE)\n plt.yticks(fontsize=UNIQUE_FONT_SITE)\n plt.savefig(save_dir+pt+'_NVEH.png',dpi=300,bbox_inches='tight',pad_inches=0.05)\n plt.show()\n i=0\n # print(overall_dict_TravelTm)\n overall_dict_TravelTm=convert_dict(overall_dict_TravelTm,1,2)\n for t in overall_dict_TravelTm:\n Xs=[5,10,15,20,30,40]\n plt.errorbar(x=Xs, y=overall_dict_TravelTm[t], yerr=overall_dict_TravelTm_err[t],fmt=line_types[i],markersize = 4, elinewidth=1.5, capsize=3)\n i+=1\n print(overall_dict_TravelTm[t])\n plt.legend(manage_lane_type,fontsize=UNIQUE_FONT_SITE-2)\n\n plt.xlabel('渗透率(%)',fontsize=UNIQUE_FONT_SITE)\n plt.ylabel('通行时间(s)',fontsize=UNIQUE_FONT_SITE)\n plt.xticks(fontsize=UNIQUE_FONT_SITE)\n plt.yticks(fontsize=UNIQUE_FONT_SITE)\n # plt.ylim(280,340)\n plt.savefig(save_dir+pt+'_TravelTm.png',dpi=300,bbox_inches='tight',pad_inches=0.05)\n plt.show()\nif __name__=='__main__':\n overall_evaluation('sub-peak')"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
amiralansary/BrainSurfaceTK
|
[
"17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef",
"17e3ef5e1c5d6e1a75293fbe031977ec3fbe0fef"
] |
[
"models/volume3d/main/train_validate.py",
"scripts/classification/PointNet/run_pointnet_classification.py"
] |
[
"import os\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.model_selection import train_test_split\nfrom torch import nn\nfrom torch.nn import Module, Conv3d, ConvTranspose3d, Linear, ReLU, Sequential, Linear, Flatten, L1Loss, BatchNorm3d, \\\n Dropout, BatchNorm1d\nfrom torch.optim import Adam, lr_scheduler\nfrom torch.utils.data import Dataset, DataLoader\nfrom ..utils.utils import plot_preds\nfrom ..utils.models import ImageSegmentationDataset, Part3, resample_image, PrintTensor\nimport os.path as osp\nPATH_TO_VOLUME3D = osp.join(osp.dirname(osp.realpath(__file__)), '..') + '/'\n\ndef save_graphs_train(fn, num_epochs, training_loss, val_loss_epoch5):\n '''\n Saves all the necessary graphs\n :param fn: path to folder where to save\n :param num_epochs: epoch list\n :param training_loss: loss list\n :param test_loss_epoch5: test loss list\n :param writer: tensorboard writer\n :return:\n '''\n\n path = osp.join(osp.dirname(osp.realpath(__file__)), '..', f'{fn}/')\n\n\n plt.plot([epoch for epoch in range(num_epochs)], training_loss, color='b', label='Train')\n plt.plot([5 * i for i in range(len(val_loss_epoch5))], val_loss_epoch5, color='r', label='Val')\n plt.title(\"Loss\")\n plt.xlabel(\"Number of Epochs\")\n plt.ylabel(\"Loss\")\n plt.ylim(0, 5)\n plt.xlim(-5, num_epochs + 5)\n plt.legend()\n plt.savefig(path + f'graph.png')\n\n plt.close()\n\n\ndef save_to_log(model, params, fn, final_MAE, num_epochs, batch_size, lr, feats, gamma, smoothen, edgen, dropout_p, img_spacing, img_size, scheduler_freq):\n '''\n Save all the information about the run to log\n '''\n\n print(f\"Average Loss on whole val set: {final_MAE}\")\n\n result = f\"\"\"\n ########################################################################\n \n ***** Score = {final_MAE} *****\n\n 2. Number of epochs:\n num_epochs = {num_epochs}\n\n Batch size during training\n batch_size = {batch_size}\n\n Learning rate for optimizers\n lr = {lr}\n\n Size of feature amplifier\n Feature Amplifier: {feats}\n\n\n Gamma (using sched)\n Gamma: {gamma}\n Frequency of step: {scheduler_freq}\n\n 7. Image spacing and size\n img_spacing = {img_spacing}\n img_size = {img_size}\n\n Smooth:\n smoothen = {smoothen}\n\n Edgen:\n edgen = {edgen}\n\n Amount of dropout:\n dropout_p = {dropout_p}\n\n Total number of parameters is: {params}\n\n Model:\n {model.__str__()}\n ########################################################################\n \"\"\"\n\n with open(f'{fn}/log.txt', 'a+') as log:\n log.write('\\n')\n log.write(result)\n log.write('\\n')\n torch.save(model, f'{fn}/model.pth')\n\n path = osp.join(fn, '../')\n with open(path + 'all_log.txt', 'a+') as log:\n log.write('\\n')\n log.write(f'SUBJECT #{fn[-1]}: Validation = {final_MAE}, ')\n\n\ndef train_validate(lr, feats, num_epochs, gamma, batch_size, dropout_p, dataset_train, dataset_val, fn, number_here, scheduler_freq, writer):\n '''\n Main train-val loop. Train on training data and evaluate on validation data.\n\n :param lr: learning rate\n :param feats: feature amplifier (multiplier of the number of parameters in the CNN)\n :param num_epochs:\n :param gamma: scheduler gamma\n :param batch_size\n :param dropout_p: dropout proba\n :param dataset_train\n :param dataset_val\n :param fn: saving folder\n :param scheduler_freq\n :param writer: tensorboard\n :return: model, params, final_MAE\n '''\n\n # 1. Display GPU Settings:\n cuda_dev = '0' # GPU device 0 (can be changed if multiple GPUs are available)\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:\" + cuda_dev if use_cuda else \"cpu\")\n print('Device: ' + str(device))\n if use_cuda:\n print('GPU: ' + str(torch.cuda.get_device_name(int(cuda_dev))))\n\n # 2. Define loss function\n loss_function = L1Loss()\n\n # 3. Print parameters\n print(f\"Learning Rate: {lr} and Feature Amplifier: {feats}, Num_epochs: {num_epochs}, Gamma: {gamma}\")\n\n # 4. Define collector lists\n folds_val_scores = []\n training_loss = []\n val_loss_epoch5 = []\n i_fold_val_scores = []\n\n # 5. Create data loaders\n train_loader = DataLoader(dataset_train, batch_size=batch_size)\n val_loader = DataLoader(dataset_val, batch_size=batch_size)\n\n # 6. Define a model\n model = Part3(feats, dropout_p).to(device=device)\n\n # 7. Print parameters\n params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f\"Total Params: {params}\")\n\n # 8. Create an optimizer + LR scheduler\n optimizer = Adam(model.parameters(), lr, weight_decay=0.005)\n scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=gamma, last_epoch=-1)\n\n # 9. Proceed to train\n for epoch in range(num_epochs):\n model.train()\n epoch_loss = []\n for batch_data, batch_labels in train_loader:\n batch_labels = batch_labels.to(device=device)\n batch_data = batch_data.to(device=device) # move to device, e.g. GPU\n batch_preds = model(batch_data)\n loss = loss_function(batch_preds, batch_labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n epoch_loss.append(loss.item())\n\n\n training_MAE = np.mean(epoch_loss)\n training_loss.append(training_MAE)\n\n writer.add_scalar('MAE Loss/train', training_MAE, epoch)\n\n\n if epoch % scheduler_freq == 0:\n scheduler.step()\n\n # 10. Validate every N epochs\n if (epoch % 5 == 0):\n val_loss = []\n model.eval()\n pred_ages = []\n actual_ages = []\n with torch.no_grad():\n for batch_data, batch_labels in val_loader:\n batch_data = batch_data.to(device=device) # move to device, e.g. GPU\n batch_labels = batch_labels.to(device=device)\n batch_preds = model(batch_data)\n\n pred_ages.append([batch_preds[i].item() for i in range(len(batch_preds))])\n actual_ages.append([batch_labels[i].item() for i in range(len(batch_labels))])\n\n loss = loss_function(batch_preds, batch_labels)\n val_loss.append(loss.item())\n\n mean_val_error5 = np.mean(val_loss)\n val_loss_epoch5.append(mean_val_error5)\n\n plot_preds(pred_ages, actual_ages, writer, epoch, test=False)\n\n print(f\"Epoch: {epoch}:: Learning Rate: {scheduler.get_lr()[0]}\")\n print(\n f\"{number_here}:: Maxiumum Age Error: {np.round(np.max(epoch_loss))} Average Age Error: {training_MAE}, MAE Validation: {mean_val_error5}\")\n\n writer.add_scalar('Max Age Error/validate', np.round(np.max(epoch_loss)), epoch)\n writer.add_scalar('MAE Loss/validate', mean_val_error5, epoch)\n\n\n # 11. Validate the last time\n model.eval()\n pred_ages = []\n actual_ages = []\n with torch.no_grad():\n for batch_data, batch_labels in val_loader:\n batch_data = batch_data.to(device=device) # move to device, e.g. GPU\n batch_labels = batch_labels.to(device=device)\n batch_preds = model(batch_data)\n\n pred_ages.append([batch_preds[i].item() for i in range(len(batch_preds))])\n actual_ages.append([batch_labels[i].item() for i in range(len(batch_labels))])\n\n loss = loss_function(batch_preds, batch_labels)\n i_fold_val_scores.append(loss.item())\n\n plot_preds(pred_ages, actual_ages, writer, epoch, test=False)\n\n # 12. Summarise the results\n mean_fold_score = np.mean(i_fold_val_scores)\n val_loss_epoch5.append(mean_fold_score)\n print(f\"Mean Age Error: {mean_fold_score}\")\n\n folds_val_scores.append(mean_fold_score)\n\n final_MAE = np.mean(folds_val_scores)\n\n save_graphs_train(fn, num_epochs, training_loss, val_loss_epoch5)\n\n return model, params, final_MAE",
"import os.path as osp\n\nPATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..')\nimport sys\n\nsys.path.append(PATH_TO_ROOT)\n\nimport os\nimport time\nimport pickle\nimport csv\n\nimport torch\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom models.pointnet.src.models.pointnet2_classification import Net\nfrom models.pointnet.main.pointnet2_classification import train, test_classification\nfrom models.pointnet.src.utils import get_data_path, data\n\nPATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..') + '/'\nPATH_TO_POINTNET = osp.join(osp.dirname(osp.realpath(__file__)), '..', '..', '..', 'models', 'pointnet') + '/'\n\nif __name__ == '__main__':\n\n PATH_TO_ROOT = osp.join(osp.dirname(osp.realpath(__file__)), '..') + '/'\n\n num_workers = 2\n local_features = []\n global_features = []\n\n #################################################\n ########### EXPERIMENT DESCRIPTION ##############\n #################################################\n recording = False\n REPROCESS = False\n\n data_nativeness = 'native'\n data_compression = \"10k\"\n data_type = 'pial'\n hemisphere = 'both'\n\n comment = 'comment'\n # additional_comment = ''\n\n #################################################\n ############ EXPERIMENT DESCRIPTION #############\n #################################################\n\n # 1. Model Parameters\n ################################################\n lr = 0.001\n batch_size = 2\n gamma = 0.9875\n scheduler_step_size = 2\n target_class = 'gender'\n task = 'classification'\n numb_epochs = 1\n number_of_points = 10000\n\n ################################################\n\n ########## INDICES FOR DATA SPLIT #############\n with open(PATH_TO_POINTNET + 'src/names.pk', 'rb') as f:\n indices = pickle.load(f)\n ###############################################\n\n data_folder, files_ending = get_data_path(data_nativeness, data_compression, data_type, hemisphere=hemisphere)\n\n train_dataset, test_dataset, validation_dataset, train_loader, test_loader, val_loader, num_labels = data(\n data_folder,\n files_ending,\n data_type,\n target_class,\n task,\n REPROCESS,\n local_features,\n global_features,\n indices,\n batch_size,\n num_workers=2,\n data_nativeness=data_nativeness,\n data_compression=data_compression,\n hemisphere=hemisphere\n )\n\n if len(local_features) > 0:\n numb_local_features = train_dataset[0].x.size(1)\n else:\n numb_local_features = 0\n numb_global_features = len(global_features)\n\n # 7. Create the model\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = Net(numb_local_features, numb_global_features).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n scheduler = StepLR(optimizer, step_size=scheduler_step_size, gamma=gamma)\n\n print(f'number of param: {sum(p.numel() for p in model.parameters() if p.requires_grad)}')\n\n #################################################\n ############# EXPERIMENT LOGGING ################\n #################################################\n writer = None\n results_folder = None\n if recording:\n\n # Tensorboard writer.\n writer = SummaryWriter(log_dir='runs/' + task + '/' + comment, comment=comment)\n\n results_folder = 'runs/' + task + '/' + comment + '/results'\n model_dir = 'runs/' + task + '/' + comment + '/models'\n\n if not osp.exists(results_folder):\n os.makedirs(results_folder)\n\n if not osp.exists(model_dir):\n os.makedirs(model_dir)\n\n with open(results_folder + '/configuration.txt', 'w', newline='') as config_file:\n config_file.write('Learning rate - ' + str(lr) + '\\n')\n config_file.write('Batch size - ' + str(batch_size) + '\\n')\n config_file.write('Local features - ' + str(local_features) + '\\n')\n config_file.write('Global feature - ' + str(global_features) + '\\n')\n config_file.write('Number of points - ' + str(number_of_points) + '\\n')\n config_file.write('Data res - ' + data_compression + '\\n')\n config_file.write('Data type - ' + data_type + '\\n')\n config_file.write('Data nativeness - ' + data_nativeness + '\\n')\n # config_file.write('Additional comments - With rotate transforms' + '\\n')\n\n with open(results_folder + '/results.csv', 'w', newline='') as results_file:\n result_writer = csv.writer(results_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n result_writer.writerow(['Patient ID', 'Session ID', 'Prediction', 'Label', 'Error'])\n\n #################################################\n #################################################\n\n best_val_acc = 0.0\n\n # MAIN TRAINING LOOP\n for epoch in range(1, numb_epochs + 1):\n start = time.time()\n train(model, train_loader, epoch, device,\n optimizer, scheduler, writer)\n\n val_acc = test_classification(model, val_loader,\n indices['Val'], device,\n recording, results_folder,\n epoch=epoch)\n\n if recording:\n writer.add_scalar('Acc/val', val_acc, epoch)\n\n end = time.time()\n print('Time: ' + str(end - start))\n\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n torch.save(model.state_dict(), model_dir + '/model_best.pt')\n print('Saving Model'.center(60, '-'))\n writer.add_scalar('Time/epoch', end - start, epoch)\n\n test_classification(model, test_loader, indices['Test'], device, recording, results_folder, val=False)\n\n if recording:\n # save the last model\n torch.save(model.state_dict(), model_dir + '/model_last.pt')\n\n # Eval best model on test\n model.load_state_dict(torch.load(model_dir + '/model_best.pt'))\n\n with open(results_folder + '/results.csv', 'a', newline='') as results_file:\n result_writer = csv.writer(results_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n result_writer.writerow(['Best model!'])\n\n test_classification(model, test_loader, indices['Test'], device, recording, results_folder, val=False)\n"
] |
[
[
"matplotlib.pyplot.legend",
"torch.optim.lr_scheduler.StepLR",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.savefig",
"numpy.max",
"matplotlib.pyplot.xlim",
"numpy.mean",
"torch.save",
"matplotlib.pyplot.close",
"torch.cuda.is_available",
"torch.device",
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"torch.nn.L1Loss",
"matplotlib.pyplot.ylabel"
],
[
"torch.load",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.StepLR"
]
] |
SRatna/Behavioral-Cloning-CNN
|
[
"5da816d43a5a631f74d6740dd6fcc8a2fb221a9a"
] |
[
"model.py"
] |
[
"import os\nimport csv\nimport cv2\nimport numpy as np\nfrom math import ceil\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers import Cropping2D, Lambda\n\nsamples = []\nwith open('./data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n next(reader)\n for line in reader:\n samples.append(line)\n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n center_image = cv2.imread('./data/IMG/'+batch_sample[0].split('/')[-1])\n left_image = cv2.imread('./data/IMG/'+batch_sample[1].split('/')[-1])\n right_image = cv2.imread('./data/IMG/'+batch_sample[2].split('/')[-1])\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n images.append(left_image)\n angles.append(center_angle+0.25)\n images.append(right_image)\n angles.append(center_angle-0.25)\n images.append(cv2.flip(center_image, 1))\n angles.append(center_angle*-1)\n\n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)\n\n# Set our batch size\nbatch_size = 64\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=batch_size)\nvalidation_generator = generator(validation_samples, batch_size=batch_size)\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: x/127.5 - 1., input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((74,20), (0,0))))\nmodel.add(Conv2D(24, (5, 5), strides=(2, 2)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(36, (5, 5), strides=(2, 2)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(48, (5, 5), strides=(2, 2)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3), strides=(1, 1)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3), strides=(1, 1)))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(1164))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100))\nmodel.add(Activation('relu'))\nmodel.add(Dense(50))\nmodel.add(Activation('relu'))\nmodel.add(Dense(10))\nmodel.add(Activation('relu'))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator,\n steps_per_epoch=ceil(len(train_samples)/batch_size),\n validation_data=validation_generator,\n validation_steps=ceil(len(validation_samples)/batch_size),\n epochs=5, verbose=1)\n\nmodel.save('model.h5')"
] |
[
[
"sklearn.utils.shuffle",
"numpy.array",
"sklearn.model_selection.train_test_split"
]
] |
RuiBai1999/myrepo
|
[
"ea8d618995a51079c79a6291af2ca02b01b846ea"
] |
[
"core/model/meta/anil.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n@inproceedings{DBLP:conf/iclr/RaghuRBV20,\n author = {Aniruddh Raghu and\n Maithra Raghu and\n Samy Bengio and\n Oriol Vinyals},\n title = {Rapid Learning or Feature Reuse? Towards Understanding the Effectiveness\n of {MAML}},\n booktitle = {8th International Conference on Learning Representations, {ICLR} 2020,\n Addis Ababa, Ethiopia, April 26-30, 2020},\n year = {2020},\n url = {https://openreview.net/forum?id=rkgMkCEtPB}\n}\nhttps://arxiv.org/abs/1909.09157\n\"\"\"\nimport torch\nfrom torch import nn\n\nfrom core.utils import accuracy\nfrom .meta_model import MetaModel\nfrom ..backbone.utils import convert_maml_module\n\n\nclass ANILLayer(nn.Module):\n def __init__(self, feat_dim, hid_dim, way_num):\n super(ANILLayer, self).__init__()\n self.layers = nn.Sequential(\n # nn.Linear(feat_dim, hid_dim),\n nn.Linear(feat_dim, way_num)\n )\n\n def forward(self, x):\n return self.layers(x)\n\n\nclass ANIL(MetaModel):\n def __init__(self, inner_param, feat_dim, hid_dim=640, **kwargs):\n super(ANIL, self).__init__(**kwargs)\n self.feat_dim = feat_dim\n self.loss_func = nn.CrossEntropyLoss()\n self.classifier = ANILLayer(feat_dim=feat_dim, hid_dim=hid_dim, way_num=self.way_num)\n self.inner_param = inner_param\n\n convert_maml_module(self.classifier)\n\n def set_forward(self, batch):\n image, global_target = batch\n image = image.to(self.device)\n\n feat = self.emb_func(image)\n support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=1)\n episode_size = support_feat.size(0)\n\n output_list = []\n for i in range(episode_size):\n self.set_forward_adaptation(support_feat[i], support_target[i])\n output = self.classifier(query_feat[i])\n output_list.append(output)\n\n output = torch.cat(output_list, dim=0)\n acc = accuracy(output.squeeze(), query_target.reshape(-1))\n return output, acc\n\n def set_forward_loss(self, batch):\n image, global_target = batch\n image = image.to(self.device)\n\n feat = self.emb_func(image)\n support_feat, query_feat, support_target, query_target = self.split_by_episode(feat, mode=1)\n episode_size = support_feat.size(0)\n\n output_list = []\n for i in range(episode_size):\n self.set_forward_adaptation(support_feat[i], support_target[i])\n output = self.classifier(query_feat[i])\n output_list.append(output)\n\n output = torch.cat(output_list, dim=0)\n loss = self.loss_func(output, query_target.reshape(-1))\n acc = accuracy(output.squeeze(), query_target.reshape(-1))\n return output, acc, loss\n\n def set_forward_adaptation(self, support_feat, support_target):\n lr = self.inner_param[\"lr\"]\n fast_parameters = list(self.classifier.parameters())\n for parameter in self.classifier.parameters():\n parameter.fast = None\n\n self.emb_func.train()\n self.classifier.train()\n\n for i in range(self.inner_param[\"iter\"]):\n output = self.classifier(support_feat)\n loss = self.loss_func(output, support_target)\n grad = torch.autograd.grad(loss, fast_parameters, create_graph=True)\n fast_parameters = []\n\n for k, weight in enumerate(self.classifier.parameters()):\n if weight.fast is None:\n weight.fast = weight - lr * grad[k]\n else:\n weight.fast = weight.fast - lr * grad[k]\n fast_parameters.append(weight.fast)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.autograd.grad",
"torch.cat"
]
] |
gantech/wind-energy
|
[
"e9b14dcbf41f9c74bad9dc8593cc683071d6c6ea"
] |
[
"Pedersen_N07/compare_cases.py"
] |
[
"# coding: utf-8\nimport load_data, sys\nsys.path.insert(1, '../utilities')\nimport windspectra\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nl_cases = {\n 'amrwind-ksgs': load_data.AMRWindStats('AmrWindKsgs'),\n 'naluwind-smag': load_data.NaluWindStats('NaluWindRun01'),\n 'naluwind-ksgs': load_data.NaluWindStats('NaluWindRun02'),\n 'Pederson:2014': load_data.PedersonData('pedersen2014_data')\n }\n\nwith PdfPages('nalu_amr_comparison_n07.pdf') as pfpgs:\n plt.style.use('singleColumn')\n fig = plt.figure()\n for l,c in l_cases.items():\n plt.plot(c.hvelmag, c.z, label=l)\n plt.ylim()\n plt.xlabel(r'$< | \\vec{u}_{\\textrm{horiz}} | >$')\n plt.ylabel('z')\n plt.legend(loc=0)\n plt.ylim(0,750)\n plt.grid()\n plt.tight_layout()\n pfpgs.savefig()\n plt.close(fig)\n\n fig = plt.figure()\n for l,c in l_cases.items():\n plt.plot(c.T, c.z, label=l)\n plt.xlabel(r'$<T>$')\n plt.ylabel('z')\n plt.legend(loc=0)\n plt.ylim(0,1000)\n plt.grid()\n plt.tight_layout()\n pfpgs.savefig()\n plt.close(fig)\n\n plt.style.use('singleColumn.square')\n deltaX=3000.0/288 # Grid spacing\n for i, z in enumerate(l_cases['naluwind-ksgs'].ps_data['z']):\n f, suu, svv, sww = l_cases['naluwind-ksgs'].point_spectra(z)\n utau = l_cases['naluwind-ksgs'].istats['ustar']\n hvelmag = l_cases['naluwind-ksgs'].interp_hvelmag(z)\n f_a, suu_a, svv_a, sww_a = l_cases['amrwind-ksgs'].point_spectra(z)\n utau_a = float(l_cases['amrwind-ksgs'].istats['ustar'])\n fmax = 0.6*hvelmag/(8*deltaX)\n \n fig,ax = plt.subplots()\n lineu, = ax.loglog(f, windspectra.getKaimal(f, z, hvelmag ),label='Kaimal - u')\n ax.loglog(f, f * suu/(utau * utau) , '--', label='naluwind-ksgs - u')\n ax.loglog(f_a, f_a * suu_a/(utau_a * utau_a) , '-.', label='amr-wind-ksgs - u')\n plt.vlines(fmax, 5e-4, 20,lw=3, linestyle='-.')\n plt.xlim(1e-4,2)\n plt.ylim(1e-3,15)\n plt.grid()\n plt.xlabel('Frequency $f$ [Hz]')\n plt.ylabel('$f\\cdot S_{u}/u_{\\\\tau}^2$ [-]')\n plt.title('Avg spectra [Neutral N07, z={:.1f}m]'.format(z))\n plt.legend(loc='upper right')\n plt.tight_layout()\n pfpgs.savefig()\n plt.close(fig)\n \n fig,ax = plt.subplots()\n linev, = ax.loglog(f, windspectra.getKaimal(f, z, hvelmag, params=windspectra.vKaimalconst),label='Kaimal - v')\n ax.loglog(f, f * svv/(utau * utau) , '--', label='naluwind-ksgs - v')\n ax.loglog(f_a, f_a * svv_a/(utau_a * utau_a) , '-.', label='amr-wind-ksgs - v')\n plt.vlines(fmax, 5e-4, 20,lw=3, linestyle='-.')\n plt.xlim(1e-4,2)\n plt.ylim(1e-3,15)\n plt.grid()\n plt.xlabel('Frequency $f$ [Hz]')\n plt.ylabel('$f\\cdot S_{v}/u_{\\\\tau}^2$ [-]')\n plt.title('Avg spectra [Neutral N07, z={:.1f}m]'.format(z))\n plt.legend(loc='upper right')\n plt.tight_layout()\n pfpgs.savefig()\n plt.close(fig)\n \n fig,ax = plt.subplots()\n linew, = ax.loglog(f, windspectra.getKaimal(f, z, hvelmag, params=windspectra.wKaimalconst),label='Kaimal - w')\n ax.loglog(f, f * sww/(utau * utau) , '--', label='naluwind-ksgs - w')\n ax.loglog(f_a, f_a * sww_a/(utau_a * utau_a) , '-.', label='amr-wind-ksgs - w')\n plt.vlines(fmax, 5e-4, 20,lw=3, linestyle='-.')\n plt.xlim(1e-4,2)\n plt.ylim(1e-3,15)\n plt.grid()\n plt.xlabel('Frequency $f$ [Hz]')\n plt.ylabel('$f\\cdot S_{w}/u_{\\\\tau}^2$ [-]')\n plt.title('Avg spectra [Neutral N07, z={:.1f}m]'.format(z))\n plt.legend(loc='upper right')\n plt.tight_layout()\n pfpgs.savefig()\n plt.close(fig)\n"
] |
[
[
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
XG293/SupConLoss
|
[
"04d069a6d5bc37ac4df80c94327bc6f7da891589"
] |
[
"main_multi_supcon.py"
] |
[
"from __future__ import print_function\n\nimport os\nimport sys\nimport argparse\nimport time\nimport math\n\nimport tensorboard_logger as tb_logger\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torchvision import transforms, datasets\n\nfrom util import TwoCropTransform, AverageMeter\nfrom util import adjust_learning_rate, warmup_learning_rate\nfrom util import set_optimizer, save_model\nfrom networks.resnet_big import SupConResNet\nfrom networks.wide_resnet import SupConWideResNet\nfrom multi_losses import MultiSupConLoss\nfrom datasets.dataset import SimpleDataset\ntry:\n import apex\n from apex import amp, optimizers\nexcept ImportError:\n pass\n\n\ndef parse_option():\n parser = argparse.ArgumentParser('argument for training')\n\n parser.add_argument('--print_freq', type=int, default=10,\n help='print frequency')\n parser.add_argument('--save_freq', type=int, default=50,\n help='save frequency')\n parser.add_argument('--batch_size', type=int, default=256,\n help='batch_size')\n parser.add_argument('--num_workers', type=int, default=12,\n help='num of workers to use')\n parser.add_argument('--epochs', type=int, default=1000,\n help='number of training epochs')\n\n # optimization\n parser.add_argument('--learning_rate', type=float, default=0.05,\n help='learning rate')\n parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900',\n help='where to decay lr, can be a list')\n parser.add_argument('--lr_decay_rate', type=float, default=0.1,\n help='decay rate for learning rate')\n parser.add_argument('--weight_decay', type=float, default=1e-4,\n help='weight decay')\n parser.add_argument('--momentum', type=float, default=0.9,\n help='momentum')\n\n # model dataset\n parser.add_argument('--model', type=str, default='resnet50')\n parser.add_argument('--dataset', type=str, default='cifar10',\n choices=['cifar10', 'cifar100', 'SD198-20', 'path'], help='dataset')\n parser.add_argument('--mean', type=str, help='mean of dataset in path in form of str tuple')\n parser.add_argument('--std', type=str, help='std of dataset in path in form of str tuple')\n parser.add_argument('--data_folder', type=str, default=None, help='path to custom dataset')\n parser.add_argument('--size', type=int, default=224, help='parameter for RandomResizedCrop')\n\n # method\n parser.add_argument('--method', type=str, default='MultiSupCon',\n choices=['MultiSupCon'], help='choose method')\n\n # temperature\n parser.add_argument('--temp', type=float, default=0.07,\n help='temperature for loss function')\n\n # other setting\n parser.add_argument('--cosine', action='store_true',\n help='using cosine annealing')\n parser.add_argument('--syncBN', action='store_true',\n help='using synchronized batch normalization')\n parser.add_argument('--warm', action='store_true',\n help='warm-up for large batch training')\n parser.add_argument('--trial', type=str, default='0',\n help='id for recording multiple runs')\n\n opt = parser.parse_args()\n\n # check if dataset is path that passed required arguments\n if opt.dataset == 'path':\n assert opt.data_folder is not None \\\n and opt.mean is not None \\\n and opt.std is not None\n\n # set the path according to the environment\n if opt.data_folder is None:\n opt.data_folder = './datasets/'\n opt.model_path = './save/SupCon/{}_models'.format(opt.dataset)\n opt.tb_path = './save/SupCon/{}_tensorboard'.format(opt.dataset)\n\n iterations = opt.lr_decay_epochs.split(',')\n opt.lr_decay_epochs = list([])\n for it in iterations:\n opt.lr_decay_epochs.append(int(it))\n\n opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\\\n format(opt.method, opt.dataset, opt.model, opt.learning_rate,\n opt.weight_decay, opt.batch_size, opt.temp, opt.trial)\n\n if opt.cosine:\n opt.model_name = '{}_cosine'.format(opt.model_name)\n\n # warm-up for large-batch training,\n if opt.batch_size > 256:\n opt.warm = True\n if opt.warm:\n opt.model_name = '{}_warm'.format(opt.model_name)\n opt.warmup_from = 0.01\n opt.warm_epochs = 10\n if opt.cosine:\n eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)\n opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (\n 1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2\n else:\n opt.warmup_to = opt.learning_rate\n\n opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)\n if not os.path.isdir(opt.tb_folder):\n os.makedirs(opt.tb_folder)\n\n opt.save_folder = os.path.join(opt.model_path, opt.model_name)\n if not os.path.isdir(opt.save_folder):\n os.makedirs(opt.save_folder)\n\n return opt\n\n\ndef set_loader(opt):\n # construct data loader\n if opt.dataset == 'cifar10':\n mean = (0.4914, 0.4822, 0.4465)\n std = (0.2023, 0.1994, 0.2010)\n elif opt.dataset == 'cifar100':\n mean = (0.5071, 0.4867, 0.4408)\n std = (0.2675, 0.2565, 0.2761)\n elif opt.dataset == 'SD198-20':\n mean= (0.485, 0.456, 0.406) \n std=(0.229, 0.224, 0.225)\n elif opt.dataset == 'path':\n mean = eval(opt.mean)\n std = eval(opt.std)\n else:\n raise ValueError('dataset not supported: {}'.format(opt.dataset))\n normalize = transforms.Normalize(mean=mean, std=std)\n # ['RandomResizedCrop', 'ImageJitter', 'RandomRotation', 'RandomHorizontalFlip', 'ToTensor', 'Normalize']\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(size=opt.size, scale=(0.8, 1.2)),\n transforms.RandomRotation(degrees=30),\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)\n ], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n transforms.ToTensor(),\n normalize,\n ])\n\n if opt.dataset == 'cifar10':\n train_dataset = datasets.CIFAR10(root=opt.data_folder,\n transform=TwoCropTransform(train_transform),\n download=True)\n elif opt.dataset == 'cifar100':\n train_dataset = datasets.CIFAR100(root=opt.data_folder,\n transform=TwoCropTransform(train_transform),\n download=True)\n elif opt.dataset == 'SD198-20':\n ## opt.data_folder = '/home/slidm/SkinLesionData/SD-198-20/base.json'\n train_dataset = SimpleDataset(data_file=opt.data_folder,\n transform=TwoCropTransform(train_transform))\n elif opt.dataset == 'path':\n train_dataset = datasets.ImageFolder(root=opt.data_folder,\n transform=TwoCropTransform(train_transform))\n else:\n raise ValueError(opt.dataset)\n\n train_sampler = None\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),\n num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)\n\n return train_loader\n\n\ndef set_model(opt):\n model = SupConWideResNet(name='wrn28_10', head='mlp', feat_dim=256)\n criterion = MultiSupConLoss(temperature=opt.temp)\n\n # enable synchronized Batch Normalization\n if opt.syncBN:\n model = apex.parallel.convert_syncbn_model(model)\n\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n model.encoder = torch.nn.DataParallel(model.encoder)\n model = model.cuda()\n criterion = criterion.cuda()\n cudnn.benchmark = True\n\n return model, criterion\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, opt):\n \"\"\"one epoch training\"\"\"\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n end = time.time()\n for idx, (images, labels) in enumerate(train_loader):\n data_time.update(time.time() - end)\n \n images = torch.cat([images[0], images[1]], dim=0) # cat 2 [16, 3, 224, 224] to 1 [32, 3, 224, 224]\n \n if torch.cuda.is_available():\n images = images.cuda(non_blocking=True)\n labels = labels.cuda(non_blocking=True)\n bsz = labels.shape[0]\n\n # warm-up learning rate\n warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)\n\n # compute loss\n features = model(images)\n #print(\"Features size:\", features.size()) Features size: torch.Size([32, 128])\n f1, f2 = torch.split(features, [bsz, bsz], dim=0)\n features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)\n \n if opt.method == 'MultiSupCon':\n loss = criterion(features, labels)\n else:\n raise ValueError('contrastive method not supported: {}'.\n format(opt.method))\n\n # update metric\n losses.update(loss.item(), bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if (idx + 1) % opt.print_freq == 0:\n print('Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})'.format(\n epoch, idx + 1, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses))\n sys.stdout.flush()\n\n return losses.avg\n\n\ndef main():\n opt = parse_option()\n\n\n # build data loader\n train_loader = set_loader(opt)\n\n # build model and criterion\n model, criterion = set_model(opt)\n\n # build optimizer\n optimizer = set_optimizer(opt, model)\n\n # tensorboard\n logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)\n\n # training routine\n for epoch in range(1, opt.epochs + 1):\n adjust_learning_rate(opt, optimizer, epoch)\n\n # train for one epoch\n time1 = time.time()\n loss = train(train_loader, model, criterion, optimizer, epoch, opt)\n time2 = time.time()\n print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))\n\n # tensorboard logger\n logger.log_value('loss', loss, epoch)\n logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n\n if epoch % opt.save_freq == 0:\n save_file = os.path.join(\n opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))\n save_model(model, optimizer, opt, epoch, save_file)\n\n # save the last model\n save_file = os.path.join(\n opt.save_folder, 'last.pth')\n save_model(model, optimizer, opt, opt.epochs, save_file)\n\n\nif __name__ == '__main__':\n main()\n torch.cuda.empty_cache()\n\n\n# python main_supcon.py --dataset SD198-20 --data_folder /home/slidm/SkinLesionData/SD-198-20/base.json --batch_size 1024 --learning_rate 0.5 --temp 0.1 --cosine\n# CUDA_VISIBLE_DEVICES=1,2,3,4 python main_supcon.py --dataset SD198-20 --data_folder /home/slidm/SkinLesionData/SD-198-20/base.json --batch_size 32 --learning_rate 0.06 --temp 0.07 --cosine\n# CUDA_VISIBLE_DEVICES=2,3 python main_multi_supcon.py --dataset SD198-20 --data_folder /home/slidm/SkinLesionData/SD-198-20/base.json --batch_size 16 --learning_rate 0.06 --temp 0.07 --cosine --syncBN"
] |
[
[
"torch.cat",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.split",
"torch.cuda.device_count"
]
] |
learnerzhang/trade
|
[
"762ae0eb52562c86da11876b8a3c2660b19b0f7d"
] |
[
"api_modules/server/pipeline/syncup_data.py"
] |
[
"# -*- encoding: UTF-8 -*-\nfrom api_modules.server.utils import stock_utils\nfrom api_modules.server.utils.config import ConfigUtils\nfrom multiprocessing import Pool\nimport baostock as bs\nimport pandas as pd\nimport logging\nimport os\n\nlogging.basicConfig(format='%(asctime)s %(message)s', filename='sequoia.log')\nlogging.getLogger().setLevel(logging.INFO)\n\n\"\"\"\n 同步stock数据\n\"\"\"\ndef get_all_stock_names():\n # 登陆系统 ####\n lg = bs.login()\n # 显示登陆返回信息\n print('login respond error_code:' + lg.error_code + ', error_msg:' + lg.error_msg)\n dt = stock_utils.get_recently_trade_date()\n dt = '2020-08-03'\n k_rs = bs.query_all_stock(day=dt)\n print(k_rs)\n data_list = []\n while (k_rs.error_code == '0') & k_rs.next():\n # 获取一条记录,将记录合并在一起\n data_list.append(k_rs.get_row_data())\n result = pd.DataFrame(data_list, columns=k_rs.fields)\n print(result.tail())\n result.to_csv(ConfigUtils.get_stock(\"STOCK_NAME\"), index=False)\n print(\"init all stock names\")\n bs.logout()\n\n\ndef get_all_stock_industries():\n lg = bs.login()\n print('login respond error_code:'+lg.error_code)\n print('login respond error_msg:'+lg.error_msg)\n\n # 获取行业分类数据\n rs = bs.query_stock_industry(date='2020-08-01')\n # rs = bs.query_stock_basic(code_name=\"浦发银行\")\n print('query_stock_industry error_code:'+rs.error_code)\n print('query_stock_industry respond error_msg:'+rs.error_msg)\n\n # 打印结果集\n industry_list = []\n while (rs.error_code == '0') & rs.next():\n # 获取一条记录,将记录合并在一起\n industry_list.append(rs.get_row_data())\n result = pd.DataFrame(industry_list, columns=rs.fields)\n # 结果集输出到csv文件\n result.to_csv(ConfigUtils.get_stock(\"STOCK_INDUSTRY\"), index=False)\n print(result)\n # 登出系统\n bs.logout()\n\n\ndef loading_stock(rows, st, et):\n for index, row in rows:\n code = row['code']\n name = row['code_name']\n k_rs = bs.query_history_k_data_plus(code, ConfigUtils.get_stock(\"STOCK_FIELDS\"), start_date=st, end_date=et)\n data_list = []\n while (k_rs.error_code == '0') & k_rs.next():\n # 获取一条记录,将记录合并在一起\n data_list.append(k_rs.get_row_data())\n result = pd.DataFrame(data_list, columns=k_rs.fields)\n print(result.tail())\n if not os.path.exists(ConfigUtils.get_stock(\"DATA_DIR\")):\n os.makedirs(ConfigUtils.get_stock(\"DATA_DIR\"))\n result.to_csv(os.path.join(ConfigUtils.get_stock(\"DATA_DIR\"), code+\"_\"+name+\".csv\"), index=False)\n print(\"Downloading :\" + code + \" , name :\" + name)\n\n\ndef update_trades():\n try:\n et = stock_utils.get_recently_trade_date()\n st = ConfigUtils.get_stock(\"START_DATE\")\n # 登陆系统 ####\n lg = bs.login()\n # 显示登陆返回信息\n print('login respond error_code:' + lg.error_code + ', error_msg:' + lg.error_msg)\n\n pd_names = pd.read_csv(ConfigUtils.get_stock(\"STOCK_NAME\"))\n data = list(pd_names.iterrows())\n\n # multi processing\n number_kernel = 1\n size = int((len(data) + number_kernel -1) / number_kernel)\n p = Pool(number_kernel)\n for i in range(number_kernel):\n \n start = size * i\n _end = size * (i+1)\n end = len(data) if _end > len(data) else _end\n\n p.apply_async(loading_stock, args=(data[start: end], st, et))\n \n p.close()\n p.join()\n print('all subprocesses done.')\n bs.logout()\n except IOError as e:\n print(\"Update Data Error \", e)\n\n\nif __name__ == '__main__':\n get_all_stock_names()\n # get_all_stock_industries()\n # update_trades()\n"
] |
[
[
"pandas.DataFrame"
]
] |
Yard1/tune-sklearn
|
[
"2b9eecc6fb28963b9b4b80ceb89c138e86fb21c7"
] |
[
"tests/test_randomizedsearch.py"
] |
[
"from tune_sklearn import TuneSearchCV\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom sklearn.datasets import make_classification\nfrom scipy.stats import expon\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn import datasets\nfrom skopt.space.space import Real\nfrom ray.tune.schedulers import MedianStoppingRule\nimport unittest\nimport os\n\n\nclass RandomizedSearchTest(unittest.TestCase):\n def test_random_search_cv_results(self):\n # Make a dataset with a lot of noise to get various kind of prediction\n # errors across CV folds and parameter settings\n X, y = make_classification(\n n_samples=50, n_features=50, n_informative=3, random_state=0)\n\n # scipy.stats dists now supports `seed` but we still support scipy 0.12\n # which doesn't support the seed. Hence the assertions in the test for\n # random_search alone should not depend on randomization.\n n_splits = 3\n n_search_iter = 30\n params = dict(C=expon(scale=10), gamma=expon(scale=0.1))\n random_search = TuneSearchCV(\n SVC(),\n n_iter=n_search_iter,\n cv=n_splits,\n param_distributions=params,\n return_train_score=True,\n n_jobs=2)\n random_search.fit(X, y)\n\n param_keys = (\"param_C\", \"param_gamma\")\n score_keys = (\n \"mean_test_score\",\n \"mean_train_score\",\n \"rank_test_score\",\n \"rank_train_score\",\n \"split0_test_score\",\n \"split1_test_score\",\n \"split2_test_score\",\n \"split0_train_score\",\n \"split1_train_score\",\n \"split2_train_score\",\n \"std_test_score\",\n \"std_train_score\",\n \"time_total_s\",\n )\n n_cand = n_search_iter\n\n def test_check_cv_results_array_types(cv_results, param_keys,\n score_keys):\n # Check if the search `cv_results`'s array are of correct types\n self.assertTrue(\n all(\n isinstance(cv_results[param], np.ma.MaskedArray)\n for param in param_keys))\n self.assertTrue(\n all(cv_results[key].dtype == object for key in param_keys))\n self.assertFalse(\n any(\n isinstance(cv_results[key], np.ma.MaskedArray)\n for key in score_keys))\n self.assertTrue(\n all(cv_results[key].dtype == np.float64 for key in score_keys\n if not key.startswith(\"rank\")))\n self.assertEquals(cv_results[\"rank_test_score\"].dtype, np.int32)\n\n def test_check_cv_results_keys(cv_results, param_keys, score_keys,\n n_cand):\n # Test the search.cv_results_ contains all the required results\n assert_array_equal(\n sorted(cv_results.keys()),\n sorted(param_keys + score_keys + (\"params\", )))\n self.assertTrue(\n all(cv_results[key].shape == (n_cand, )\n for key in param_keys + score_keys))\n\n cv_results = random_search.cv_results_\n # Check results structure\n test_check_cv_results_array_types(cv_results, param_keys, score_keys)\n test_check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)\n # For random_search, all the param array vals should be unmasked\n self.assertFalse(\n any(cv_results[\"param_C\"].mask)\n or any(cv_results[\"param_gamma\"].mask))\n\n def test_local_dir(self):\n digits = datasets.load_digits()\n x = digits.data\n y = digits.target\n\n clf = SGDClassifier()\n parameter_grid = {\n \"alpha\": Real(1e-4, 1e-1, 1),\n \"epsilon\": Real(0.01, 0.1)\n }\n\n scheduler = MedianStoppingRule(grace_period=10.0)\n\n tune_search = TuneSearchCV(\n clf,\n parameter_grid,\n early_stopping=scheduler,\n max_iters=10,\n local_dir=\"./test-result\")\n tune_search.fit(x, y)\n\n self.assertTrue(len(os.listdir(\"./test-result\")) != 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"sklearn.datasets.make_classification",
"scipy.stats.expon",
"sklearn.datasets.load_digits",
"sklearn.svm.SVC",
"sklearn.linear_model.SGDClassifier"
]
] |
Lactozilla/corrscope
|
[
"9d7da86019b20c9aef75d10fcafd028d4b782b46"
] |
[
"corrscope/wave.py"
] |
[
"import copy\nimport enum\nfrom typing import Union, List\n\nimport numpy as np\n\nimport corrscope.utils.scipy.wavfile as wavfile\nfrom corrscope.config import CorrError, TypedEnumDump\n\nFLOAT = np.single\n\n# Depends on FLOAT\nfrom corrscope.utils.windows import rightpad\n\n\[email protected]\nclass Flatten(str, TypedEnumDump):\n \"\"\" How to flatten a stereo signal. (Channels beyond first 2 are ignored.)\n\n Flatten(0) == Flatten.Stereo == Flatten['Stereo']\n \"\"\"\n\n # Keep both channels.\n Stereo = \"stereo\"\n\n # Mono\n Mono = \"1\" # NOT publicly exposed\n\n # Take sum or difference.\n SumAvg = \"1 1\"\n DiffAvg = \"1, -1\"\n\n def __str__(self):\n return self.value\n\n # Both our app and GUI treat:\n # - Flatten.SumAvg -> \"sum of all channels\"\n # - \"1 1\" -> \"assert nchan == 2, left + right\".\n # - \"1 0\" -> \"assert nchan == 2, left\".\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(self.value)\n\n modes: List[\"Flatten\"]\n\n\nassert \"1\" == str(Flatten.Mono)\nassert not \"1\" == Flatten.Mono\nassert not Flatten.Mono == \"1\"\n\nFlattenOrStr = Union[Flatten, str]\n\n\ndef calc_flatten_matrix(flatten: FlattenOrStr, stereo_nchan: int) -> np.ndarray:\n \"\"\" Raises CorrError on invalid input.\n\n If flatten is Flatten.Stereo, returns shape=(nchan,nchan) identity matrix.\n - (N,nchan) @ (nchan,nchan) = (N,nchan).\n\n Otherwise, returns shape=(nchan) flattening matrix.\n - (N,nchan) @ (nchan) = (N)\n\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html#numpy.matmul\n '''\n If the second argument is 1-D,\n it is promoted to a matrix by appending a 1 to its dimensions.\n After matrix multiplication the appended 1 is removed.\"\n '''\n \"\"\"\n\n if flatten is Flatten.Stereo:\n # 2D identity (results in 2-dim data)\n flatten_matrix = np.eye(stereo_nchan, dtype=FLOAT)\n\n # 1D (results in 1-dim data)\n elif flatten is Flatten.SumAvg:\n flatten_matrix = np.ones(stereo_nchan, dtype=FLOAT) / stereo_nchan\n\n elif flatten is Flatten.DiffAvg:\n flatten_matrix = calc_flatten_matrix(str(flatten), stereo_nchan)\n flatten_matrix = rightpad(flatten_matrix, stereo_nchan, 0)\n\n else:\n words = flatten.replace(\",\", \" \").split()\n try:\n flatten_matrix = np.array([FLOAT(word) for word in words])\n except ValueError as e:\n raise CorrError(\"Invalid stereo flattening matrix\") from e\n\n flatten_abs_sum = np.sum(np.abs(flatten_matrix))\n if flatten_abs_sum == 0:\n raise CorrError(\"Stereo flattening matrix must have nonzero elements\")\n\n flatten_matrix /= flatten_abs_sum\n\n assert flatten_matrix.dtype == FLOAT, flatten_matrix.dtype\n return flatten_matrix\n\n\n_rejected_modes = {Flatten.Mono}\nFlatten.modes = [f for f in Flatten.__members__.values() if f not in _rejected_modes]\n\n\nclass Wave:\n smp_s: int\n data: np.ndarray\n\n _flatten: FlattenOrStr\n flatten_matrix: np.ndarray\n\n @property\n def flatten(self) -> Flatten:\n \"\"\"\n If data is stereo:\n - flatten can be Stereo (2D) or Sum/Diff(Avg) (1D).\n\n If data is mono:\n - flatten can be Stereo (2D) or Mono (1D).\n - If flatten != Stereo, set flatten = Mono.\n \"\"\"\n return self._flatten\n\n @flatten.setter\n def flatten(self, flatten: FlattenOrStr) -> None:\n # Reject invalid modes (including Mono).\n if flatten in _rejected_modes:\n # Flatten.Mono not in Flatten.modes.\n raise CorrError(\n f\"Wave {self.wave_path} has invalid flatten mode {flatten} \"\n f\"not a numeric string, nor in {Flatten.modes}\"\n )\n\n # If self.is_mono, converts all non-Stereo modes to Mono.\n self._flatten = flatten\n if self.is_mono and flatten != Flatten.Stereo:\n self._flatten = Flatten.Mono\n\n self.flatten_matrix = calc_flatten_matrix(self._flatten, self.stereo_nchan)\n\n def __init__(\n self,\n wave_path: str,\n amplification: float = 1.0,\n flatten: FlattenOrStr = Flatten.SumAvg,\n ):\n self.wave_path = wave_path\n self.amplification = amplification\n self.offset = 0\n\n # self.data: 2-D array of shape (nsamp, nchan)\n self.smp_s, self.data = wavfile.read(wave_path, mmap=True)\n\n assert self.data.ndim in [1, 2]\n self.is_mono = self.data.ndim == 1\n self.return_channels = False\n\n # Cast self.data to stereo (nsamp, nchan)\n if self.is_mono:\n self.data.shape = (-1, 1)\n\n self.nsamp, self.stereo_nchan = self.data.shape\n\n # Depends on self.stereo_nchan\n self.flatten = flatten\n\n # Calculate scaling factor.\n dtype = self.data.dtype\n\n def is_type(parent: type) -> bool:\n return np.issubdtype(dtype, parent)\n\n # Numpy types: https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html\n if is_type(np.integer):\n max_int = np.iinfo(dtype).max + 1\n assert max_int & (max_int - 1) == 0 # power of 2\n\n if is_type(np.unsignedinteger):\n self.center = max_int // 2\n self.max_val = max_int // 2\n\n elif is_type(np.signedinteger):\n self.center = 0\n self.max_val = max_int\n\n elif is_type(np.floating):\n self.center = 0\n self.max_val = 1\n\n else:\n raise CorrError(f\"unexpected wavfile dtype {dtype}\")\n\n def with_flatten(self, flatten: FlattenOrStr, return_channels: bool) -> \"Wave\":\n new = copy.copy(self)\n new.flatten = flatten\n new.return_channels = return_channels\n return new\n\n def with_offset(self, offset: float):\n \"\"\"offset is applied *after* amplification,\n and corresponds directly to the output signal.\"\"\"\n\n new = copy.copy(self)\n new.offset = offset\n return new\n\n def __getitem__(self, index: Union[int, slice]) -> np.ndarray:\n \"\"\" Copies self.data[item], converted to a FLOAT within range [-1, 1). \"\"\"\n # subok=False converts data from memmap (slow) to ndarray (faster).\n data: np.ndarray = self.data[index].astype(FLOAT, subok=False, copy=True)\n\n # Flatten stereo to mono.\n data = data @ self.flatten_matrix\n\n data -= self.center\n data *= self.amplification / self.max_val\n data += self.offset\n\n if self.return_channels and len(data.shape) == 1:\n data = data.reshape(-1, 1)\n return data\n\n def _get(self, begin: int, end: int, subsampling: int) -> np.ndarray:\n \"\"\" Copies self.data[begin:end] with zero-padding. \"\"\"\n if 0 <= begin and end <= self.nsamp:\n return self[begin:end:subsampling]\n\n region_len = end - begin\n\n def constrain(idx: int) -> int:\n delta = 0\n if idx < 0:\n delta = 0 - idx # delta > 0\n assert idx + delta == 0\n\n if idx > self.nsamp:\n delta = self.nsamp - idx # delta < 0\n assert idx + delta == self.nsamp\n\n return delta\n\n begin_index = constrain(begin)\n end_index = region_len + constrain(end)\n del end\n data = self[begin + begin_index : begin + end_index : subsampling]\n\n # Compute subsampled output ranges\n out_len = region_len // subsampling\n out_begin = begin_index // subsampling\n out_end = out_begin + len(data)\n # len(data) == ceil((end_index - begin_index) / subsampling)\n\n out = np.zeros((out_len, *data.shape[1:]), dtype=FLOAT)\n\n out[out_begin:out_end] = data\n\n return out\n\n def get_around(self, sample: int, return_nsamp: int, stride: int) -> np.ndarray:\n \"\"\" Returns `return_nsamp` samples, centered around `sample`,\n sampled with spacing `stride`.\n result[N//2] == self[sample].\n See designNotes.md and CorrelationTrigger docstring.\n\n Copies self.data[...]. \"\"\"\n\n begin = sample - (return_nsamp // 2) * stride\n end = begin + return_nsamp * stride\n return self._get(begin, end, stride)\n\n def get_s(self) -> float:\n \"\"\"\n :return: time (seconds)\n \"\"\"\n return self.nsamp / self.smp_s\n"
] |
[
[
"numpy.abs",
"numpy.eye",
"numpy.issubdtype",
"numpy.ones",
"numpy.iinfo",
"numpy.zeros"
]
] |
joergdietrich/NFW
|
[
"58b0ff6b5382461e6053e12c75d35543dd3f8b13"
] |
[
"NFW/tests/test_mass_concentration.py"
] |
[
"import numpy as np\nfrom numpy.testing import (TestCase, assert_array_equal, assert_equal,\n assert_almost_equal, assert_array_almost_equal,\n assert_raises)\nfrom numpy.testing.decorators import knownfailureif\n\nimport astropy.cosmology\nfrom astropy import units as u\ntry:\n from astropy.tests.helper import assert_quantity_allclose\nexcept ImportError:\n # Monkey patching failing travis test for numpy-1.8\n def assert_quantity_allclose(x, y):\n x = x.to(y.unit)\n np.testing.assert_allclose(x.value, y.value)\n\n\nfrom NFW import mass_concentration\nfrom NFW.nfw import NFW\n\n\nclass TestMc(TestCase):\n @classmethod\n def setup_class(cls):\n cls._cosmo = astropy.cosmology.FlatLambdaCDM(70, 0.3, Tcmb0=0)\n astropy.cosmology.default_cosmology.set(cls._cosmo)\n\n def test_duffy_concentration(self):\n m200 = 1e13, 5e13, 1e14, 1e15\n zl = 1, 0.5, 1, 0.3\n result = (3.71065258,\n 3.71071859,\n 3.05809022,\n 3.08589409)\n c = mass_concentration.duffy_concentration(m200, zl, self._cosmo)\n assert_almost_equal(c, result)\n assert(isinstance(c, np.ndarray))\n # Assure results stay the same\n m200 = u.Quantity(m200, u.solMass)\n c = mass_concentration.duffy_concentration(m200, zl, self._cosmo)\n assert_almost_equal(c, result)\n c = mass_concentration.duffy_concentration(m200[0], zl[0],\n self._cosmo)\n assert(isinstance(c, float))\n\n def test_dolag_concentration(self):\n m200 = 1e13, 5e13, 1e14, 1e15\n zl = 1, 0.5, 1, 0.3\n result = (6.28910161,\n 7.11594213,\n 4.97265823,\n 6.04888398)\n c = mass_concentration.dolag_concentration(m200, zl, self._cosmo)\n assert_almost_equal(c, result)\n assert(isinstance(c, np.ndarray))\n # Assure results stay the same\n m200 = u.Quantity(m200, u.solMass)\n c = mass_concentration.dolag_concentration(m200, zl, self._cosmo)\n assert_almost_equal(c, result)\n c = mass_concentration.dolag_concentration(m200[0], zl[0],\n self._cosmo)\n assert(isinstance(c, float))\n\n def _mdelta_to_mdelta_via_m200(self, m_in, func, overdensity_in,\n overdensity_out, z):\n m200 = mass_concentration.mdelta_to_m200(m_in, func, overdensity_in,\n (z, self._cosmo))\n nfw = NFW(m200, func(m200, z, self._cosmo), z)\n m_out = nfw.mass_Delta(overdensity_out)\n return m_out\n\n def test_mdelta_to_mdelta(self):\n func = mass_concentration.duffy_concentration\n # Consistency\n z = 0.3\n m_in = u.Quantity(5e14, u.solMass)\n mdelta = mass_concentration.mdelta_to_mdelta(5e14, func, 500, 200,\n (z, self._cosmo))\n c = func(mdelta, z, self._cosmo)\n nfw = NFW(mdelta, c, z)\n m_out = nfw.mass_Delta(500)\n assert_quantity_allclose(m_in, m_out)\n\n mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 200, 500,\n (z, self._cosmo))\n nfw = NFW(m_in, func(m_in, z, self._cosmo), z)\n mdelta2 = nfw.mass_Delta(500)\n assert_quantity_allclose(mdelta1, mdelta2)\n # common cases:\n m_in = u.Quantity(1e14, u.solMass)\n z = 0\n mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 2500, 500,\n (z, self._cosmo))\n mdelta2 = self._mdelta_to_mdelta_via_m200(m_in, func, 2500, 500, z)\n assert_quantity_allclose(mdelta1, mdelta2)\n\n # Test some extreme cases\n # first almost equal input and output overdensities\n m_in = u.Quantity(1e14, u.solMass)\n z = 1\n m200 = mass_concentration.mdelta_to_mdelta(m_in, func, 199, 200,\n (z, self._cosmo))\n m_out = mass_concentration.mdelta_to_mdelta(m200, func, 200, 199,\n (z, self._cosmo))\n assert_quantity_allclose(m_in, m_out)\n\n # identical input/output overdensity\n mdelta = mass_concentration.mdelta_to_mdelta(1e14, func, 200, 200,\n (1, self._cosmo))\n assert_equal(mdelta.value, 1e14)\n\n # Large overdensity_in, small overdensity_out\n m_in = 1e15\n z = 0\n mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 2500, 50,\n (z, self._cosmo))\n mdelta2 = self._mdelta_to_mdelta_via_m200(m_in, func, 2500, 50, z)\n assert_quantity_allclose(mdelta1, mdelta2)\n\n # Small overdensity_in, large overdensity_out, small halo mass\n m_in = 1e9\n z = 1\n mdelta1 = mass_concentration.mdelta_to_mdelta(m_in, func, 50, 2500,\n (z, self._cosmo))\n mdelta2 = self._mdelta_to_mdelta_via_m200(m_in, func, 50, 2500, z)\n assert_quantity_allclose(mdelta1, mdelta2)\n\n def test_mdelta_to_m200(self):\n m_in = u.Quantity(2e14, u.solMass)\n z = 0.2\n func = mass_concentration.duffy_concentration\n delta_in = 450\n # consistency with mdelta_to_mdelta\n md1 = mass_concentration.mdelta_to_m200(m_in, func, delta_in,\n (z, self._cosmo))\n md2 = mass_concentration.mdelta_to_mdelta(m_in, func,\n delta_in, 200,\n (z, self._cosmo))\n assert_quantity_allclose(md1, md2)\n # consistency with mass_Delta in NFW\n nfw = NFW(md1, func(md1, z, self._cosmo), z)\n m_out = nfw.mass_Delta(450)\n assert_quantity_allclose(m_in, m_out)\n\n def test_m200_to_mdelta(self):\n m_in = u.Quantity(4e14, u.solMass)\n z = 0.45\n func = mass_concentration.duffy_concentration\n mdelta = mass_concentration.m200_to_mdelta(m_in, func, 500,\n (z, self._cosmo))\n nfw = NFW(m_in, func(m_in, z, self._cosmo), z)\n m500 = nfw.mass_Delta(500)\n assert_quantity_allclose(mdelta, m500)\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_allclose",
"numpy.testing.assert_equal"
]
] |
ytyaru/Python.Pyxel.Reversi.20200419000000
|
[
"25b715943ad39c20cd6b5e9ee124a2195919a735"
] |
[
"src/game.py"
] |
[
"#!/usr/bin/env python3\n# coding: utf8\nimport os, enum, random, numpy, pyxel\nfrom abc import ABCMeta, abstractmethod\n\nclass App:\n def __init__(self):\n self.__window = Window()\n globals()['Window'] = self.__window\n self.__scene = SceneManager()\n pyxel.run(self.update, self.draw)\n def update(self):\n self.__scene.update()\n def draw(self):\n self.__scene.draw()\n\nclass Window:\n def __init__(self):\n pyxel.init(self.Width, self.Height, border_width=self.BorderWidth, caption=self.Caption, fps=60)\n @property\n def Width(self): return 128\n @property\n def Height(self): return 128\n @property\n def Caption(self): return \"Ping Pong\"\n @property\n def BorderWidth(self): return 0\n def update(self): pass\n def draw(self): pyxel.cls(0)\n\nclass SceneType(enum.IntEnum):\n Start = 0\n Play = 1\n Score = 2\n\nclass SceneManager:\n def __init__(self):\n self.__scenes = [StartScene(), PlayScene(), ScoreScene()]\n self.__now = SceneType.Start\n def init(self, *args, **kwargs):\n pass\n def update(self):\n next_scene = self.__scenes[self.__now].update()\n if isinstance(next_scene, SceneType):\n self.__now = next_scene\n self.__scenes[self.__now].init()\n elif isinstance(next_scene, tuple) and isinstance(next_scene[0], SceneType):\n self.__now = next_scene[0]\n if 2 <= len(next_scene): self.__scenes[self.__now].init(*next_scene[1])\n elif 3 <= len(next_scene): self.__scenes[self.__now].init(*next_scene[1], **next_scene[2])\n else: self.__scenes[self.__now].init()\n def draw(self):\n self.__scenes[self.__now].draw()\n\nclass Scene(metaclass=ABCMeta):\n @abstractmethod\n def init(self, *args, **kwargs): pass\n @abstractmethod\n def update(self): pass\n @abstractmethod\n def draw(self): pass\n\nclass StartScene(Scene):\n def __init__(self): pass\n def init(self, *args, **kwargs): pass\n def update(self):\n if pyxel.btn(pyxel.KEY_SPACE):\n return SceneType.Play\n def draw(self):\n pyxel.cls(0)\n pyxel.text(Window.Width // 2 - (4*16/2), Window.Height // 2 - (8*2), 'Push SPACE key !', 7)\n\nclass ScoreScene(Scene):\n def __init__(self):\n pass\n def init(self, *args, **kwargs):\n self.__board = Board()\n self.__stone = args[0]\n self.__stones = self.__stone.Stones\n self.__white = numpy.count_nonzero(self.__stones == StoneType.White)\n self.__black = numpy.count_nonzero(self.__stones == StoneType.Black)\n def update(self):\n if pyxel.btn(pyxel.KEY_R):\n return SceneType.Play\n def draw(self):\n pyxel.cls(0)\n self.__board.draw()\n self.__stone.draw()\n x = (Window.Width // 2) - ((8+8*3)//2)\n y = Window.Height // 2 - (8*2//2)\n pyxel.rect(x, y, 16+4, 16, 4)\n pyxel.circ(x+4, y+4, 4, 7)\n pyxel.text(x+4+8, y+2, str(self.__white), 7)\n pyxel.circ(x+4, y+4+8, 4, 0)\n pyxel.text(x+4+8, y+2+8, str(self.__black), 7)\n pyxel.text((Window.Width // 2) - (4*10//2), y+2+8+8, 'Push R key', 8)\n\nclass PlayScene(Scene):\n def __init__(self):\n self.init()\n def init(self, *args, **kwargs):\n self.__board = Board()\n self.__stone = Stone()\n self.__setter = StoneSetter()\n self.__setter.calc_candidates(self.__stone.Stones)\n# for c in cand: print(c)\n def update(self):\n if self.__setter.is_gameover(self.__stone.Stones):\n print('GameOver!!!!!!!!')\n return SceneType.Score, [self.__stone]\n if self.__setter.update(): # クリック時(石を置いたとき)\n # 自石を置いて敵石をめくる\n self.__stone.set(self.__setter.MousePos[0], self.__setter.MousePos[1], self.__setter.Stone)\n self.__setter.get_reverse_stones(self.__stone.Stones, self.__setter.MousePos[0], self.__setter.MousePos[1])\n for r in self.__setter.Reverses:\n self.__stone.Stones[r[1]][r[0]] = self.__setter.Stone\n # 次のターン\n self.__setter.next_turn()\n self.__setter.calc_candidates(self.__stone.Stones)\n while 0 == len(self.__setter.Candidates):\n self.__setter.next_turn()\n self.__setter.calc_candidates(self.__stone.Stones)\n if 2 <= self.__setter.PassCount: return SceneType.Score, [self.__stone]\n\n def draw(self):\n pyxel.cls(0)\n self.__board.draw()\n self.__stone.draw()\n self.__setter.draw()\n \nclass Board:\n TileSize = 16\n TileNum = 8\n Color = 11\n def __init__(self):\n self.__tile_size = 16\n self.__tile_num = 8\n def update(self):\n pass\n def draw(self):\n pyxel.rect(0, 0, Board.TileSize * Board.TileNum, Board.TileSize * Board.TileNum, 11)\n for x in range(Board.TileNum):\n pyxel.line(Board.TileSize * x, 0, Board.TileSize * x, Board.TileSize * Board.TileNum, 5)\n for y in range(Board.TileNum):\n pyxel.line(0, Board.TileSize * y, Board.TileSize * Board.TileNum, Board.TileSize * y, 5)\n\n# https://stackoverflow.com/questions/38773832/is-it-possible-to-add-a-value-named-none-to-enum-type\nStoneType = enum.IntEnum(\"StoneType\", \"None White Black\")\n#class StoneType(enum.IntEnum):\n# None = 0\n# White = 1\n# Black = 1\n#StoneType = enum.IntEnum('StoneType', {'None':0, 'White':1, 'Black':2})\nclass Stone:\n def __init__(self):\n self.__r = Board.TileSize // 2\n self.init()\n def init(self, *args, **kwargs):\n self.__stones = numpy.zeros((Board.TileNum, Board.TileNum))\n self.__stones[3][3] = StoneType.White\n self.__stones[3][4] = StoneType.Black\n self.__stones[4][3] = StoneType.Black\n self.__stones[4][4] = StoneType.White\n @property\n def R(self): return self.__r\n @property\n def Stones(self): return self.__stones\n def set(self, x, y, stone):\n if not isinstance(stone, StoneType): raise Exception('stones should be StoneType.')\n self.__stones[y][x] = stone\n print('set!', x, y, stone)\n def update(self):\n pass\n def draw(self):\n for y in range(Board.TileNum):\n for x in range(Board.TileNum):\n if self.__stones[y][x] == 0: continue\n pyxel.circ(x * Board.TileSize + self.R, y * Board.TileSize + self.R, self.R, self.__get_color(self.__stones[y][x]))\n def __get_color(self, stone):\n if stone == StoneType.White: return 7\n elif stone == StoneType.Black: return 0\n\n\nclass StoneSetter:\n def __init__(self):\n pyxel.mouse(True)\n self.__stone = StoneType.White\n self.__candidates = [] # 石を置けるマスの候補(マス座標)\n self.__reverses = [] # めくる敵石(マス座標)\n self.__pass_count = 0\n self.__mouse_pos = [0, 0]\n self.__flash_wait = 60\n @property\n def Candidates(self): return self.__candidates\n @property\n def MousePos(self): return self.__mouse_pos \n @property\n def Stone(self): return self.__stone\n @property\n def Reverses(self): return self.__reverses\n @property\n def PassCount(self): return self.__pass_count\n def next_turn(self):\n self.__stone = StoneType.Black if self.__stone == StoneType.White else StoneType.White\n def calc_candidates(self, stones):\n self.__candidates.clear()\n for y in range(Board.TileNum):\n for x in range(Board.TileNum):\n if 0 != stones[y][x]: continue\n for a in self.get_adjacents():\n if self.in_board(x, y, a):\n# print(x, y, a)\n if self.is_enemy_stone(stones[(y+a[1])][(x+a[0])]): # 隣が敵石である\n if self.exist_my_stone(stones, x, y, a): # 敵石をはさんだ位置に自石がある\n self.__candidates.append((x, y))\n print('cand', self.__stone, self.__candidates)\n # 候補がなければパス。両者パスなら終局\n if 0 == len(self.__candidates): self.__pass_count += 1\n else: self.__pass_count = 0\n return self.__candidates\n def get_adjacents(self): # 隣接マスのうち敵石があるマスの方向を取得する[[0, 1, 2][3, 4, 5][6, 7, 8]]\n return ((-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1))\n def in_board(self, x, y, a, i=1):\n if (x + a[0]*i) < 0: return False\n if (Board.TileNum- 1) < (x + a[0]*i): return False\n if (y + a[1]*i) < 0: return False\n if (Board.TileNum - 1) < (y + a[1]*i): return False\n return True\n def is_enemy_stone(self, stone):\n if stone == 0: return False\n elif stone == self.__stone: return False\n else: return True\n def exist_my_stone(self, stones, x, y, a): # 2つ以上先に自石があるか(敵石をはさんだ位置に自石があるか)\n for i in range(2, Board.TileNum):\n if self.in_board(x, y, a, i):\n if self.__stone == stones[y+(a[1]*i)][x+(a[0]*i)]: return True\n elif 0 == stones[y+(a[1]*i)][x+(a[0]*i)]: return False\n return False\n def draw(self):\n for c in self.Candidates:\n pyxel.circ(c[0] * Board.TileSize + Board.TileSize//2, c[1] * Board.TileSize + Board.TileSize//2, Board.TileSize//2, self.__get_color(c))\n\n def __get_color(self, c):\n if StoneType.White == self.__stone:\n if self.is_enter_mouse(c): return 10\n else: return 7 if self.__flash_wait // 2 - 1 < pyxel.frame_count % self.__flash_wait else Board.Color\n else:\n if self.is_enter_mouse(c): return 5\n else: return 0 if self.__flash_wait // 2 - 1 < pyxel.frame_count % self.__flash_wait else Board.Color\n\n def is_enter_mouse(self, c):\n return (c[0] == self.__mouse_pos[0] and c[1] == self.__mouse_pos[1])\n \n def update(self):\n self.__mouse_pos[0] = pyxel.mouse_x // Board.TileSize\n self.__mouse_pos[1] = pyxel.mouse_y // Board.TileSize\n if pyxel.btnr(pyxel.MOUSE_LEFT_BUTTON):\n for c in self.Candidates:\n if c[0] == self.__mouse_pos[0] and \\\n c[1] == self.__mouse_pos[1]:\n return True\n return False\n\n def get_reverse_stones(self, stones, x, y):\n self.__reverses.clear()\n for a in self.get_adjacents(): # 隣接マス\n if self.in_board(x, y, a): # 隣接マスのうちボード内にあるマス\n if self.is_enemy_stone(stones[(y+a[1])][(x+a[0])]): # 隣が敵石である\n if self.exist_my_stone(stones, x, y, a): # 敵石をはさんだ位置に自石がある\n # 設置した自石との間にある敵石をリバース対象としてリストアップする\n self.__reverses.extend(self.__get_reverse_stones(stones, x, y, a))\n print(self.__get_reverse_stones(stones, x, y, a))\n print('reverse', self.__reverses)\n def __get_reverse_stones(self, stones, x, y, a):\n targets = []\n for i in range(1, Board.TileNum): # 2つ以上先に自石があるか(敵石をはさんだ位置に自石があるか)\n targets.append((x+(a[0]*i), y+(a[1]*i)))\n if self.__stone == stones[y+(a[1]*i)][x+(a[0]*i)]:\n targets.pop()\n return targets\n elif 0 == stones[y+(a[1]*i)][x+(a[0]*i)]: return []\n return []\n\n def is_gameover(self, stones):\n # すべてのマスが石で埋まった\n if numpy.count_nonzero(stones) == (Board.TileNum ** 2):\n print('すべてのマスが石で埋まった')\n return True\n # 両者ともに挟める石がない\n if 2 <= self.__pass_count:\n print('両者ともに挟める石がない')\n return True\n return False\n\nApp()\n"
] |
[
[
"numpy.zeros",
"numpy.count_nonzero"
]
] |
alex-ip/metadata_sync
|
[
"a6b9de7e1fe1d3bae8669ec41cd09407e3e0afbc"
] |
[
"metadata_sync/update_acdd_metadata.py"
] |
[
"'''\nUtility to update ACDD global attributes in NetCDF files using metadata sourced from GeoNetwork\nCreated on Apr 7, 2016\n\n@author: Alex Ip, Geoscience Australia\n'''\nimport os\nimport netCDF4\nimport logging\nimport yaml\nimport numpy as np\nimport argparse\n\nfrom geophys_utils import NetCDFGridUtils, NetCDFLineUtils, get_spatial_ref_from_crs\nfrom metadata_sync.metadata import XMLMetadata\nfrom geophys_utils import DataStats\nfrom metadata_json import write_json_metadata\nfrom _metadata_sync_utils import get_xml_from_uuid, find_files\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO) # Initial logging level for this module\n\n#GA_GEONETWORK_URL = 'https://internal.ecat.ga.gov.au/geonetwork/srv/eng' # internally-visible eCat CSW\nGA_GEONETWORK_URL = 'http://ecat.ga.gov.au/geonetwork/srv/eng' # internally-visible eCat CSW\nDECIMAL_PLACES = 12 # Number of decimal places to which geometry values should be rounded\n\n# YAML file containing mapping from XML to ACDD expressed as a list of <acdd_attribute_name>:<xpath> tuples\n# Note: List may contain tuples with duplicate <acdd_attribute_name> values which are evaluated as a searchlist\nDEFAULT_MAPPING_FILE = 'ga_xml2acdd_mapping.yaml' \n\ndef update_nc_metadata(netcdf_path, xml2nc_mapping, do_stats=False, xml_path=None):\n '''\n Function to import all available metadata and set attributes in NetCDF file.\n Should be overridden in subclasses for each specific format but called first to perform initialisations\n '''\n assert os.path.exists(netcdf_path), 'NetCDF file %s does not exist.' % netcdf_path\n \n try:\n netcdf_dataset = netCDF4.Dataset(netcdf_path, mode='r+')\n except Exception as e:\n logger.error('Unable to open NetCDF file %s: %s',\n (netcdf_path, e.message))\n raise\n\n uuid = netcdf_dataset.uuid # This will fail if no uuid attribute found\n \n xml_metadata = XMLMetadata(xml_path)\n if not xml_path: # Need to read XML from catalogue\n xml_metadata.read_string(get_xml_from_uuid(GA_GEONETWORK_URL, uuid))\n \n set_netcdf_metadata_attributes(netcdf_dataset, xml_metadata, xml2nc_mapping, do_stats=do_stats)\n \n netcdf_dataset.close()\n\n write_json_metadata(uuid, os.path.dirname(netcdf_path))\n logger.info('Finished updating ACDD metadata in netCDF file %s' % netcdf_path)\n\ndef set_netcdf_metadata_attributes(netcdf_dataset, xml_metadata, xml2nc_mapping, to_crs='EPSG:4326', do_stats=False):\n '''\n Function to set all NetCDF metadata attributes using xml2nc_mapping to map from XML metadata to ACDD attributes\n Parameter:\n to_crs: EPSG or WKT for spatial metadata\n do_stats: Boolean flag indicating whether minmax stats should be determined (slow)\n '''\n\n try:\n netcdf_utils = NetCDFGridUtils(netcdf_dataset)\n except:\n netcdf_utils = NetCDFLineUtils(netcdf_dataset)\n \n wgs84_bbox = np.array(netcdf_utils.wgs84_bbox)\n xmin = min(wgs84_bbox[:, 0])\n ymin = min(wgs84_bbox[:, 1])\n xmax = max(wgs84_bbox[:, 0])\n ymax = max(wgs84_bbox[:, 1])\n \n\n attribute_dict = dict(zip(['geospatial_lon_min', 'geospatial_lat_min', 'geospatial_lon_max', 'geospatial_lat_max'],\n [xmin, ymin, xmax, ymax]\n )\n )\n try:\n attribute_dict['geospatial_lon_resolution'] = netcdf_utils.nominal_pixel_degrees[0]\n attribute_dict['geospatial_lat_resolution'] = netcdf_utils.nominal_pixel_degrees[1]\n attribute_dict['geospatial_lon_units'] = 'degrees'\n attribute_dict['geospatial_lat_units'] = 'degrees'\n except:\n pass\n\n convex_hull = netcdf_utils.get_convex_hull(to_crs)\n attribute_dict['geospatial_bounds'] = 'POLYGON((' + ', '.join([' '.join(\n ['%.4f' % ordinate for ordinate in coordinates]) for coordinates in convex_hull]) + '))'\n\n attribute_dict['geospatial_bounds_crs'] = get_spatial_ref_from_crs(to_crs).ExportToPrettyWkt()\n\n for key, value in attribute_dict.items():\n setattr(netcdf_dataset, key, value)\n\n # Set attributes defined in self.METADATA_MAPPING\n # Scan list in reverse to give priority to earlier entries\n #TODO: Improve this coding - it's a bit crap\n keys_read = []\n for key, metadata_path in xml2nc_mapping:\n # Skip any keys already read\n if key in keys_read:\n continue\n\n value = xml_metadata.get_metadata(metadata_path.split('/'))\n if value is not None:\n logger.debug('Setting %s to %s', key, value)\n # TODO: Check whether hierarchical metadata required\n setattr(netcdf_dataset, key, value)\n keys_read.append(key)\n else:\n logger.warning(\n 'WARNING: Metadata path %s not found', metadata_path)\n\n unread_keys = sorted(\n list(set([item[0] for item in xml2nc_mapping]) - set(keys_read)))\n if unread_keys:\n logger.warning(\n 'WARNING: No value found for metadata attribute(s) %s' % ', '.join(unread_keys))\n\n # Ensure only one DOI is stored - could be multiple, comma-separated\n # entries\n if hasattr(netcdf_dataset, 'doi'):\n url_list = [url.strip()\n for url in netcdf_dataset.doi.split(',')]\n doi_list = [url for url in url_list if url.startswith(\n 'http://dx.doi.org/')]\n if len(url_list) > 1: # If more than one URL in list\n try: # Give preference to proper DOI URL\n url = doi_list[0] # Use first (preferably only) DOI URL\n except:\n url = url_list[0] # Just use first URL if no DOI found\n url = url.replace('&', '&')\n netcdf_dataset.doi = url\n\n # Set metadata_link to NCI metadata URL\n netcdf_dataset.metadata_link = 'https://pid.nci.org.au/dataset/%s' % netcdf_dataset.uuid\n\n netcdf_dataset.Conventions = 'CF-1.6, ACDD-1.3'\n\n if do_stats:\n datastats = DataStats(netcdf_dataset=netcdf_dataset,\n netcdf_path=None, \n max_bytes=netcdf_utils.max_bytes)\n datastats.data_variable.actual_range = np.array(\n [datastats.value('min'), datastats.value('max')], dtype='float32')\n\n # Remove old fields - remove this later\n if hasattr(netcdf_dataset, 'id'):\n del netcdf_dataset.id\n if hasattr(netcdf_dataset, 'ga_uuid'):\n del netcdf_dataset.ga_uuid\n if hasattr(netcdf_dataset, 'keywords_vocabulary'):\n del netcdf_dataset.keywords_vocabulary\n \n netcdf_dataset.sync()\n \n\ndef main():\n # Define command line arguments\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"-n\", \"--netcdf_dir\", help=\"NetCDF root directory\", type=str, required=True)\n parser.add_argument(\"-f\", \"--file_template\", help='NetCDF filename template (default=\"*.nc\")', type=str, default=\"*.nc\")\n parser.add_argument(\"-m\", \"--mapping_file\", help=\"XML to ACDD mapping configuration file path\", type=str)\n parser.add_argument(\"-x\", \"--xml_dir\", help=\"XML directory for input files (optional)\", type=str)\n \n args = parser.parse_args()\n \n xml2nc_mapping_path = args.mapping_file or os.path.join(os.path.dirname(__file__), 'config', DEFAULT_MAPPING_FILE)\n \n xml2nc_mapping_file = open(xml2nc_mapping_path)\n xml2nc_mapping = yaml.load(xml2nc_mapping_file)\n xml2nc_mapping_file.close()\n logger.debug('xml2nc_mapping = %s' % xml2nc_mapping)\n\n \n for nc_path in find_files(args.netcdf_dir, args.file_template):\n logger.info('Updating ACDD metadata in netCDF file %s' % nc_path)\n \n if args.xml_dir:\n xml_path = os.path.abspath(os.path.join(args.xml_dir, os.path.splitext(os.path.basename(nc_path))[0] + '.xml'))\n else:\n xml_path = None\n\n try:\n update_nc_metadata(nc_path, xml2nc_mapping, do_stats=True, xml_path=xml_path)\n except Exception as e:\n logger.error('Metadata update failed: %s' % e.message)\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array"
]
] |
ekmungi/ml_courses
|
[
"b7c0cfc0bcde0c319def0704afc22ca98799e8af"
] |
[
"object_detector_ssd/data/coco.py"
] |
[
"from .config import HOME\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\nCOCO_ROOT = osp.join(HOME, 'data/coco/')\nIMAGES = 'images'\nANNOTATIONS = 'annotations'\nCOCO_API = 'PythonAPI'\nINSTANCES_SET = 'instances_{}.json'\nCOCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire', 'hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave oven', 'toaster', 'sink',\n 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush')\n\n\ndef get_label_map(label_file):\n label_map = {}\n labels = open(label_file, 'r')\n for line in labels:\n ids = line.split(',')\n label_map[int(ids[0])] = int(ids[1])\n return label_map\n\n\nclass COCOAnnotationTransform(object):\n \"\"\"Transforms a COCO annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n \"\"\"\n\n def __init__(self):\n self.label_map = get_label_map(osp.join(COCO_ROOT, 'coco_labels.txt'))\n\n def __call__(self, target, width, height):\n \"\"\"\n Args:\n target (dict): COCO target json annotation as a python dict\n height (int): height\n width (int): width\n Returns:\n a list containing lists of bounding boxes [bbox coords, class idx]\n \"\"\"\n scale = np.array([width, height, width, height])\n res = []\n for obj in target:\n if 'bbox' in obj:\n bbox = obj['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n label_idx = self.label_map[obj['category_id']] - 1\n final_box = list(np.array(bbox)/scale)\n final_box.append(label_idx)\n res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]\n else:\n print(\"no bbox problem!\")\n\n return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]\n\n\nclass COCODetection(data.Dataset):\n \"\"\"`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n set_name (string): Name of the specific set of COCO images.\n transform (callable, optional): A function/transform that augments the\n raw images`\n target_transform (callable, optional): A function/transform that takes\n in the target (bbox) and transforms it.\n \"\"\"\n\n def __init__(self, root, image_set='trainval35k', transform=None,\n target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):\n sys.path.append(osp.join(root, COCO_API))\n from pycocotools.coco import COCO\n self.root = osp.join(root, IMAGES, image_set)\n self.coco = COCO(osp.join(root, ANNOTATIONS,\n INSTANCES_SET.format(image_set)))\n self.ids = list(self.coco.imgToAnns.keys())\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target, height, width).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n img_id = self.ids[index]\n target = self.coco.imgToAnns[img_id]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n\n target = self.coco.loadAnns(ann_ids)\n path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])\n assert osp.exists(path), 'Image path does not exist: {}'.format(path)\n img = cv2.imread(osp.join(self.root, path))\n height, width, _ = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4],\n target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n cv2 img\n '''\n img_id = self.ids[index]\n path = self.coco.loadImgs(img_id)[0]['file_name']\n return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n return self.coco.loadAnns(ann_ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(\n tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(\n tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n"
] |
[
[
"numpy.array",
"numpy.expand_dims",
"torch.from_numpy"
]
] |
aninda-github/DeepLearning-OpenCV
|
[
"cd4726fd0a1df35d18695380407bcc9739235edb"
] |
[
"classification/classification.py"
] |
[
"import tensorflow as tf\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# We will be using GPU to train our model. I have a Nvidia GTX 1650 card with compute capability of 7.5 and 896 CUDA\n# cores. Hence let's find how the device is referenced.\nif tf.test.gpu_device_name():\n print('Default GPU Device:{}'.format(tf.test.gpu_device_name()))\nelse:\n print('Please install GPU version of TF')\n\n# We will load the breast cancer data from the sklearn datasets Line 2\ndata = load_breast_cancer()\n\n# Let's see how the data type looks like and let's see what features are there.\nprint(type(data))\nprint(data.keys())\nprint(data.data.shape)\nprint(data.target)\nprint(data.target_names)\nprint(data.target.shape)\nprint(data.feature_names)\n\n# Since we also want to evaluate the model hence we will split the dataset into Training and Validation datasets\nX_train, X_test, Y_train, Y_test = train_test_split(data.data, data.target, test_size=0.33)\nN, D = X_train.shape\nprint(N, D)\n\n# We need to normalize the data so that 1 big value feature does not overshadow the other features.\n# todo: But why is Normalisation needed????\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n# Now it's time to define our model\n# We need 1 Input layer and 1 Dense layer and for the Dense layer we need an activation function\n# Since we need to bring some non linearity in the activation sigmoid seems perfect match.\n# todo: But why bring non-linearity????\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(D,)),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\n# Now we need to setup what parameters we want the model to train with.\n# todo: What is ADAM? What is Binary cross entropy?\nmodel.compile(\n optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy']\n)\n\n# Now it's time to solve the geometry problem. Let's play Hide and Seek\n# Let's find that non linear curve which will somewhat fit with the existing training data\nr = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=1000)\n\n\nprint(\"Train Score:\", model.evaluate(X_train, Y_train))\nprint(\"Train Score:\", model.evaluate(X_test, Y_test))\n\nplt.plot(r.history['loss'], label='loss')\nplt.plot(r.history['val_loss'], label='val_loss')\nplt.legend()\n\nplt.plot(r.history['accuracy'], label='acc')\nplt.plot(r.history['val_accuracy'], label='val_acc')\nplt.legend()\nplt.show()\n\n# Make predictions\nP = model.predict(X_test)\nprint(P) # they are outputs of the sigmoid, interpreted as probabilities p(y = 1 | x)\n\n# Round to get the actual predictions\n# Note: has to be flattened since the targets are size (N,) while the predictions are size (N,1)\n\nP = np.round(P).flatten()\nprint(P)\n\n# Calculate the accuracy, compare it to evaluate() output\nprint(\"Manually calculated accuracy:\", np.mean(P == Y_test))\nprint(\"Evaluate output:\", model.evaluate(X_test, Y_test))"
] |
[
[
"matplotlib.pyplot.legend",
"sklearn.datasets.load_breast_cancer",
"tensorflow.test.gpu_device_name",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.plot",
"numpy.round",
"numpy.mean",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Input"
]
] |
alt113/flow
|
[
"28224d05acd4b03cdb78105d142619e4a01d3d5f",
"28224d05acd4b03cdb78105d142619e4a01d3d5f"
] |
[
"flow/visualize/visualizer_rllib.py",
"flow/scenarios/bottleneck.py"
] |
[
"\"\"\"Visualizer for rllib experiments.\n\nAttributes\n----------\nEXAMPLE_USAGE : str\n Example call to the function, which is\n ::\n\n python ./visualizer_rllib.py /tmp/ray/result_dir 1\n\nparser : ArgumentParser\n Command-line argument parser\n\"\"\"\n\nimport argparse\nfrom datetime import datetime\nimport gym\nimport numpy as np\nimport os\nimport sys\nimport time\n\nimport ray\ntry:\n from ray.rllib.agents.agent import get_agent_class\nexcept ImportError:\n from ray.rllib.agents.registry import get_agent_class\nfrom ray.tune.registry import register_env\n\nfrom flow.core.util import emission_to_csv\nfrom flow.utils.registry import make_create_env\nfrom flow.utils.rllib import get_flow_params\nfrom flow.utils.rllib import get_rllib_config\nfrom flow.utils.rllib import get_rllib_pkl\n\n\nEXAMPLE_USAGE = \"\"\"\nexample usage:\n python ./visualizer_rllib.py /ray_results/experiment_dir/result_dir 1\n\nHere the arguments are:\n1 - the path to the simulation results\n2 - the number of the checkpoint\n\"\"\"\n\n\ndef visualizer_rllib(args):\n \"\"\"Visualizer for RLlib experiments.\n\n This function takes args (see function create_parser below for\n more detailed information on what information can be fed to this\n visualizer), and renders the experiment associated with it.\n \"\"\"\n result_dir = args.result_dir if args.result_dir[-1] != '/' \\\n else args.result_dir[:-1]\n\n config = get_rllib_config(result_dir)\n # TODO(ev) backwards compatibility hack\n try:\n pkl = get_rllib_pkl(result_dir)\n except Exception:\n pass\n\n # check if we have a multiagent scenario but in a\n # backwards compatible way\n if config.get('multiagent', {}).get('policy_graphs', {}):\n multiagent = True\n config['multiagent'] = pkl['multiagent']\n else:\n multiagent = False\n\n # Run on only one cpu for rendering purposes\n config['num_workers'] = 0\n\n flow_params = get_flow_params(config)\n\n # hack for old pkl files\n # TODO(ev) remove eventually\n sim_params = flow_params['sim']\n setattr(sim_params, 'num_clients', 1)\n\n # Determine agent and checkpoint\n config_run = config['env_config']['run'] if 'run' in config['env_config'] \\\n else None\n if args.run and config_run:\n if args.run != config_run:\n print('visualizer_rllib.py: error: run argument '\n + '\\'{}\\' passed in '.format(args.run)\n + 'differs from the one stored in params.json '\n + '\\'{}\\''.format(config_run))\n sys.exit(1)\n if args.run:\n agent_cls = get_agent_class(args.run)\n elif config_run:\n agent_cls = get_agent_class(config_run)\n else:\n print('visualizer_rllib.py: error: could not find flow parameter '\n '\\'run\\' in params.json, '\n 'add argument --run to provide the algorithm or model used '\n 'to train the results\\n e.g. '\n 'python ./visualizer_rllib.py /tmp/ray/result_dir 1 --run PPO')\n sys.exit(1)\n\n sim_params.restart_instance = False\n dir_path = os.path.dirname(os.path.realpath(__file__))\n emission_path = '{0}/test_time_rollout/'.format(dir_path)\n sim_params.emission_path = emission_path if args.gen_emission else None\n\n # pick your rendering mode\n if args.render_mode == 'sumo_web3d':\n sim_params.num_clients = 2\n sim_params.render = False\n elif args.render_mode == 'drgb':\n sim_params.render = 'drgb'\n sim_params.pxpm = 4\n elif args.render_mode == 'sumo_gui':\n sim_params.render = True\n elif args.render_mode == 'no_render':\n sim_params.render = False\n if args.save_render:\n sim_params.render = 'drgb'\n sim_params.pxpm = 4\n sim_params.save_render = True\n\n # Create and register a gym+rllib env\n create_env, env_name = make_create_env(\n params=flow_params, version=0)\n register_env(env_name, create_env)\n\n # check if the environment is a single or multiagent environment, and\n # get the right address accordingly\n # single_agent_envs = [env for env in dir(flow.envs)\n # if not env.startswith('__')]\n\n # if flow_params['env_name'] in single_agent_envs:\n # env_loc = 'flow.envs'\n # else:\n # env_loc = 'flow.multiagent_envs'\n\n # Start the environment with the gui turned on and a path for the\n # emission file\n env_params = flow_params['env']\n env_params.restart_instance = False\n if args.evaluate:\n env_params.evaluate = True\n\n # lower the horizon if testing\n if args.horizon:\n config['horizon'] = args.horizon\n env_params.horizon = args.horizon\n\n # create the agent that will be used to compute the actions\n agent = agent_cls(env=env_name, config=config)\n checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num\n checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num\n agent.restore(checkpoint)\n\n if hasattr(agent, \"local_evaluator\") and \\\n os.environ.get(\"TEST_FLAG\") != 'True':\n env = agent.local_evaluator.env\n else:\n env = gym.make(env_name)\n\n if multiagent:\n rets = {}\n # map the agent id to its policy\n policy_map_fn = config['multiagent']['policy_mapping_fn'].func\n for key in config['multiagent']['policy_graphs'].keys():\n rets[key] = []\n else:\n rets = []\n\n if config['model']['use_lstm']:\n use_lstm = True\n if multiagent:\n state_init = {}\n # map the agent id to its policy\n policy_map_fn = config['multiagent']['policy_mapping_fn'].func\n size = config['model']['lstm_cell_size']\n for key in config['multiagent']['policy_graphs'].keys():\n state_init[key] = [np.zeros(size, np.float32),\n np.zeros(size, np.float32)\n ]\n else:\n state_init = [\n np.zeros(config['model']['lstm_cell_size'], np.float32),\n np.zeros(config['model']['lstm_cell_size'], np.float32)\n ]\n else:\n use_lstm = False\n\n env.restart_simulation(\n sim_params=sim_params, render=sim_params.render)\n\n final_outflows = []\n mean_speed = []\n for i in range(args.num_rollouts):\n vel = []\n state = env.reset()\n if multiagent:\n ret = {key: [0] for key in rets.keys()}\n else:\n ret = 0\n for _ in range(env_params.horizon):\n vehicles = env.unwrapped.k.vehicle\n vel.append(np.mean(vehicles.get_speed(vehicles.get_ids())))\n if multiagent:\n action = {}\n for agent_id in state.keys():\n if use_lstm:\n action[agent_id], state_init[agent_id], logits = \\\n agent.compute_action(\n state[agent_id], state=state_init[agent_id],\n policy_id=policy_map_fn(agent_id))\n else:\n action[agent_id] = agent.compute_action(\n state[agent_id], policy_id=policy_map_fn(agent_id))\n else:\n action = agent.compute_action(state)\n state, reward, done, _ = env.step(action)\n if multiagent:\n for actor, rew in reward.items():\n ret[policy_map_fn(actor)][0] += rew\n else:\n ret += reward\n if multiagent and done['__all__']:\n break\n if not multiagent and done:\n break\n\n if multiagent:\n for key in rets.keys():\n rets[key].append(ret[key])\n else:\n rets.append(ret)\n outflow = vehicles.get_outflow_rate(500)\n final_outflows.append(outflow)\n mean_speed.append(np.mean(vel))\n if multiagent:\n for agent_id, rew in rets.items():\n print('Round {}, Return: {} for agent {}'.format(\n i, ret, agent_id))\n else:\n print('Round {}, Return: {}'.format(i, ret))\n if multiagent:\n for agent_id, rew in rets.items():\n print('Average, std return: {}, {} for agent {}'.format(\n np.mean(rew), np.std(rew), agent_id))\n else:\n print('Average, std return: {}, {}'.format(\n np.mean(rets), np.std(rets)))\n print('Average, std speed: {}, {}'.format(\n np.mean(mean_speed), np.std(mean_speed)))\n print('Average, std outflow: {}, {}'.format(\n np.mean(final_outflows), np.std(final_outflows)))\n\n # terminate the environment\n env.unwrapped.terminate()\n\n # if prompted, convert the emission file into a csv file\n if args.gen_emission:\n time.sleep(0.1)\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n emission_filename = '{0}-emission.xml'.format(env.scenario.name)\n\n emission_path = \\\n '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename)\n\n emission_to_csv(emission_path)\n\n # if we wanted to save the render, here we create the movie\n if args.save_render:\n dirs = os.listdir(os.path.expanduser('~')+'/flow_rendering')\n dirs.sort(key=lambda date: datetime.strptime(date, \"%Y-%m-%d-%H%M%S\"))\n recent_dir = dirs[-1]\n # create the movie\n movie_dir = os.path.expanduser('~') + '/flow_rendering/' + recent_dir\n save_dir = os.path.expanduser('~') + '/flow_movies'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n os_cmd = \"cd \" + movie_dir + \" && ffmpeg -i frame_%06d.png\"\n os_cmd += \" -pix_fmt yuv420p \" + dirs[-1] + \".mp4\"\n os_cmd += \"&& cp \" + dirs[-1] + \".mp4 \" + save_dir + \"/\"\n os.system(os_cmd)\n\n\ndef create_parser():\n \"\"\"Create the parser to capture CLI arguments.\"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='[Flow] Evaluates a reinforcement learning agent '\n 'given a checkpoint.',\n epilog=EXAMPLE_USAGE)\n\n # required input parameters\n parser.add_argument(\n 'result_dir', type=str, help='Directory containing results')\n parser.add_argument('checkpoint_num', type=str, help='Checkpoint number.')\n\n # optional input parameters\n parser.add_argument(\n '--run',\n type=str,\n help='The algorithm or model to train. This may refer to '\n 'the name of a built-on algorithm (e.g. RLLib\\'s DQN '\n 'or PPO), or a user-defined trainable function or '\n 'class registered in the tune registry. '\n 'Required for results trained with flow-0.2.0 and before.')\n parser.add_argument(\n '--num-rollouts',\n type=int,\n default=1,\n help='The number of rollouts to visualize.')\n parser.add_argument(\n '--gen_emission',\n action='store_true',\n help='Specifies whether to generate an emission file from the '\n 'simulation')\n parser.add_argument(\n '--evaluate',\n action='store_true',\n help='Specifies whether to use the \\'evaluate\\' reward '\n 'for the environment.')\n parser.add_argument(\n '--render_mode',\n type=str,\n default='sumo_gui',\n help='Pick the render mode. Options include sumo_web3d, '\n 'rgbd and sumo_gui')\n parser.add_argument(\n '--save_render',\n action='store_true',\n help='saves the render to a file')\n parser.add_argument(\n '--horizon',\n type=int,\n help='Specifies the horizon.')\n return parser\n\n\nif __name__ == '__main__':\n parser = create_parser()\n args = parser.parse_args()\n ray.init(num_cpus=1)\n visualizer_rllib(args)\n",
"\"\"\"Contains the bottleneck scenario class.\"\"\"\n\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import TrafficLightParams\nfrom flow.scenarios.base_scenario import Scenario\nimport numpy as np\n\nADDITIONAL_NET_PARAMS = {\n # the factor multiplying number of lanes.\n \"scaling\": 1,\n # edge speed limit\n 'speed_limit': 23\n}\n\n\nclass BottleneckScenario(Scenario):\n \"\"\"Scenario class for bottleneck simulations.\n\n This network acts as a scalable representation of the Bay Bridge. It\n consists of a two-stage lane-drop bottleneck where 4n lanes reduce to 2n\n and then to n, where n is the scaling value. The length of the bottleneck\n is fixed.\n\n Requires from net_params:\n\n * **scaling** : the factor multiplying number of lanes\n * **speed_limit** : edge speed limit\n\n In order for right-of-way dynamics to take place at the intersection,\n set *no_internal_links* in net_params to False.\n\n Usage\n -----\n >>> from flow.core.params import NetParams\n >>> from flow.core.params import VehicleParams\n >>> from flow.core.params import InitialConfig\n >>> from flow.scenarios import BottleneckScenario\n >>>\n >>> scenario = BottleneckScenario(\n >>> name='bottleneck',\n >>> vehicles=VehicleParams(),\n >>> net_params=NetParams(\n >>> additional_params={\n >>> 'scaling': 1,\n >>> 'speed_limit': 1,\n >>> },\n >>> no_internal_links=False # we want junctions\n >>> )\n >>> )\n \"\"\"\n\n def __init__(self,\n name,\n vehicles,\n net_params,\n initial_config=InitialConfig(),\n traffic_lights=TrafficLightParams()):\n \"\"\"Instantiate the scenario class.\"\"\"\n for p in ADDITIONAL_NET_PARAMS.keys():\n if p not in net_params.additional_params:\n raise KeyError('Network parameter \"{}\" not supplied'.format(p))\n\n super().__init__(name, vehicles, net_params, initial_config,\n traffic_lights)\n\n def specify_nodes(self, net_params):\n \"\"\"See parent class.\"\"\"\n nodes = [\n {\n \"id\": \"1\",\n \"x\": 0,\n \"y\": 0\n }, # pre-toll\n {\n \"id\": \"2\",\n \"x\": 100,\n \"y\": 0\n }, # toll\n {\n \"id\": \"3\",\n \"x\": 410,\n \"y\": 0\n }, # light\n {\n \"id\": \"4\",\n \"x\": 550,\n \"y\": 0,\n \"type\": \"zipper\",\n \"radius\": 20\n }, # merge1\n {\n \"id\": \"5\",\n \"x\": 830,\n \"y\": 0,\n \"type\": \"zipper\",\n \"radius\": 20\n }, # merge2\n {\n \"id\": \"6\",\n \"x\": 985,\n \"y\": 0\n },\n # fake nodes used for visualization\n {\n \"id\": \"fake1\",\n \"x\": 0,\n \"y\": 1\n },\n {\n \"id\": \"fake2\",\n \"x\": 0,\n \"y\": 2\n }\n ] # post-merge2\n return nodes\n\n def specify_edges(self, net_params):\n \"\"\"See parent class.\"\"\"\n scaling = net_params.additional_params.get(\"scaling\", 1)\n speed = net_params.additional_params['speed_limit']\n assert (isinstance(scaling, int)), \"Scaling must be an int\"\n\n edges = [\n {\n \"id\": \"1\",\n \"from\": \"1\",\n \"to\": \"2\",\n \"length\": 100,\n \"spreadType\": \"center\",\n \"numLanes\": 4 * scaling,\n \"speed\": speed\n },\n {\n \"id\": \"2\",\n \"from\": \"2\",\n \"to\": \"3\",\n \"length\": 310,\n \"spreadType\": \"center\",\n \"numLanes\": 4 * scaling,\n \"speed\": speed\n },\n {\n \"id\": \"3\",\n \"from\": \"3\",\n \"to\": \"4\",\n \"length\": 140,\n \"spreadType\": \"center\",\n \"numLanes\": 4 * scaling,\n \"speed\": speed\n },\n {\n \"id\": \"4\",\n \"from\": \"4\",\n \"to\": \"5\",\n \"length\": 280,\n \"spreadType\": \"center\",\n \"numLanes\": 2 * scaling,\n \"speed\": speed\n },\n {\n \"id\": \"5\",\n \"from\": \"5\",\n \"to\": \"6\",\n \"length\": 155,\n \"spreadType\": \"center\",\n \"numLanes\": scaling,\n \"speed\": speed\n },\n # fake edge used for visualization\n {\n \"id\": \"fake_edge\",\n \"from\": \"fake1\",\n \"to\": \"fake2\",\n \"length\": 1,\n \"spreadType\": \"center\",\n \"numLanes\": scaling,\n \"speed\": speed\n }\n ]\n\n return edges\n\n def specify_connections(self, net_params):\n \"\"\"See parent class.\"\"\"\n scaling = net_params.additional_params.get(\"scaling\", 1)\n conn_dic = {}\n conn = []\n for i in range(4 * scaling):\n conn += [{\n \"from\": \"3\",\n \"to\": \"4\",\n \"fromLane\": i,\n \"toLane\": int(np.floor(i / 2))\n }]\n conn_dic[\"4\"] = conn\n conn = []\n for i in range(2 * scaling):\n conn += [{\n \"from\": \"4\",\n \"to\": \"5\",\n \"fromLane\": i,\n \"toLane\": int(np.floor(i / 2))\n }]\n conn_dic[\"5\"] = conn\n return conn_dic\n\n def specify_centroids(self, net_params):\n \"\"\"See parent class.\"\"\"\n centroids = []\n centroids += [{\n \"id\": \"1\",\n \"from\": None,\n \"to\": \"1\",\n \"x\": -30,\n \"y\": 0,\n }]\n centroids += [{\n \"id\": \"1\",\n \"from\": \"5\",\n \"to\": None,\n \"x\": 985 + 30,\n \"y\": 0,\n }]\n return centroids\n\n def specify_routes(self, net_params):\n \"\"\"See parent class.\"\"\"\n rts = {\n \"1\": [\"1\", \"2\", \"3\", \"4\", \"5\"],\n \"2\": [\"2\", \"3\", \"4\", \"5\"],\n \"3\": [\"3\", \"4\", \"5\"],\n \"4\": [\"4\", \"5\"],\n \"5\": [\"5\"]\n }\n\n return rts\n\n def specify_edge_starts(self):\n \"\"\"See parent class.\"\"\"\n return [(\"1\", 0), (\"2\", 100), (\"3\", 405), (\"4\", 425), (\"5\", 580)]\n\n def get_bottleneck_lanes(self, lane):\n \"\"\"Return the reduced number of lanes.\"\"\"\n return [int(lane / 2), int(lane / 4)]\n"
] |
[
[
"numpy.std",
"numpy.mean",
"numpy.zeros"
],
[
"numpy.floor"
]
] |
kaijennissen/gluon-ts
|
[
"754fdd4184e2c19b8d667eb97d5ae20d486e3cd3"
] |
[
"src/gluonts/model/rotbaum/_predictor.py"
] |
[
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import Iterator, List, Optional\nfrom pathlib import Path\nimport json\n\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nfrom itertools import chain\nimport concurrent.futures\nimport logging\n\n# First-party imports\nimport gluonts\nfrom gluonts.core.component import validated, equals\nfrom gluonts.core import fqname_for\nfrom gluonts.core.serde import dump_json, load_json\nfrom gluonts.dataset.common import Dataset\nfrom gluonts.model.forecast import Forecast\nfrom gluonts.model.forecast_generator import log_once\nfrom gluonts.model.predictor import RepresentablePredictor\nfrom gluonts.support.pandas import forecast_start\nfrom gluonts.dataset.loader import DataBatch\n\n# Relative imports\nfrom ._preprocess import PreprocessOnlyLagFeatures, Cardinality\nfrom ._model import QRX, QuantileReg, QRF\n\nlogger = logging.getLogger(__name__)\n\n\nclass RotbaumForecast(Forecast):\n \"\"\"\n Implements the quantile function in Forecast for TreePredictor,\n as well as a new estimate_dists function for estimating a sampling of the\n conditional distribution of the value of each of the steps in the\n forecast horizon (independently).\n \"\"\"\n\n @validated()\n def __init__(\n self,\n models: List,\n featurized_data: List,\n start_date: pd.Timestamp,\n freq,\n prediction_length: int,\n ):\n self.models = models\n self.featurized_data = featurized_data\n self.start_date = start_date\n self.freq = freq\n self.prediction_length = prediction_length\n self.item_id = None\n self.lead_time = None\n\n def quantile(self, q: float) -> np.array:\n \"\"\"\n Returns np.array, where the i^th entry is the estimate of the q\n quantile of the conditional distribution of the value of the i^th\n step in the forecast horizon.\n \"\"\"\n assert 0 <= q <= 1\n return np.array(\n list(\n chain.from_iterable(\n model.predict(self.featurized_data, q)\n for model in self.models\n )\n )\n )\n\n def estimate_dists(self) -> np.array:\n \"\"\"\n Returns np.array, where the i^th entry is an estimated sampling from\n the conditional distribution of the value of the i^th step in the\n forecast horizon.\n \"\"\"\n return np.array(\n list(\n chain.from_iterable(\n model.estimate_dist(self.featurized_data)\n for model in self.models\n )\n )\n )\n\n\nclass TreePredictor(RepresentablePredictor):\n \"\"\"\n A predictor that uses a QRX model for each of the steps in the forecast\n horizon. (In other words, there's a total of prediction_length many\n models being trained. In particular, this predictor does not learn a\n multivariate distribution.) The list of these models is saved under\n self.model_list.\n \"\"\"\n\n @validated()\n def __init__(\n self,\n freq: str,\n prediction_length: int,\n n_ignore_last: int = 0,\n lead_time: int = 0,\n max_n_datapts: int = 1000000,\n clump_size: int = 100, # Used only for \"QRX\" method.\n context_length: Optional[int] = None,\n use_feat_static_real: bool = False,\n use_feat_dynamic_real: bool = False,\n use_feat_dynamic_cat: bool = False,\n cardinality: Cardinality = \"auto\",\n one_hot_encode: bool = False,\n model_params: Optional[dict] = None,\n max_workers: Optional[int] = None,\n method: str = \"QRX\",\n quantiles=None, # Used only for \"QuantileRegression\" method.\n ) -> None:\n assert method in [\n \"QRX\",\n \"QuantileRegression\",\n \"QRF\",\n ], \"method has to be either 'QRX', 'QuantileRegression', or 'QRF'\"\n self.method = method\n self.lead_time = lead_time\n self.context_length = (\n context_length if context_length is not None else prediction_length\n )\n self.preprocess_object = PreprocessOnlyLagFeatures(\n self.context_length,\n forecast_horizon=prediction_length,\n stratify_targets=False,\n n_ignore_last=n_ignore_last,\n max_n_datapts=max_n_datapts,\n use_feat_static_real=use_feat_static_real,\n use_feat_dynamic_real=use_feat_dynamic_real,\n use_feat_dynamic_cat=use_feat_dynamic_cat,\n cardinality=cardinality,\n one_hot_encode=one_hot_encode,\n )\n\n assert (\n context_length is None or context_length > 0\n ), \"The value of `context_length` should be > 0\"\n assert (\n prediction_length > 0\n or use_feat_dynamic_cat\n or use_feat_dynamic_real\n or use_feat_static_real\n or cardinality\n != \"ignore\" # TODO: Figure out how to include 'auto' with no feat_static_cat in this check\n ), (\n \"The value of `prediction_length` should be > 0 or there should be features for model training and \"\n \"prediction \"\n )\n\n self.model_params = model_params if model_params else {}\n self.prediction_length = prediction_length\n self.freq = freq\n self.max_workers = max_workers\n self.clump_size = clump_size\n self.quantiles = quantiles\n self.model_list = None\n\n logger.info(\n \"If using the Evaluator class with a TreePredictor, set num_workers=0.\"\n )\n\n def train(self, training_data):\n assert training_data\n assert self.freq is not None\n if next(iter(training_data))[\"start\"].freq is not None:\n assert self.freq == next(iter(training_data))[\"start\"].freq\n self.preprocess_object.preprocess_from_list(\n ts_list=list(training_data), change_internal_variables=True\n )\n feature_data, target_data = (\n self.preprocess_object.feature_data,\n self.preprocess_object.target_data,\n )\n n_models = self.prediction_length\n logging.info(f\"Length of forecast horizon: {n_models}\")\n if self.method == \"QuantileRegression\":\n self.model_list = [\n QuantileReg(params=self.model_params, quantiles=self.quantiles)\n for _ in range(n_models)\n ]\n elif self.method == \"QRF\":\n self.model_list = [\n QRF(params=self.model_params) for _ in range(n_models)\n ]\n elif self.method == \"QRX\":\n self.model_list = [\n QRX(\n xgboost_params=self.model_params,\n clump_size=self.clump_size,\n )\n for _ in range(n_models)\n ]\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=self.max_workers\n ) as executor:\n for n_step, model in enumerate(self.model_list):\n logger.info(\n f\"Training model for step no. {n_step + 1} in the forecast\"\n f\" horizon\"\n )\n executor.submit(\n model.fit, feature_data, np.array(target_data)[:, n_step]\n )\n\n return self\n\n def predict(\n self, dataset: Dataset, num_samples: Optional[int] = None\n ) -> Iterator[Forecast]:\n \"\"\"\n Returns a dictionary taking each quantile to a list of floats,\n which are the predictions for that quantile as you run over\n (time_steps, time_series) lexicographically. So: first it would give\n the quantile prediction for the first time step for all time series,\n then the second time step for all time series ˜˜ , and so forth.\n \"\"\"\n context_length = self.preprocess_object.context_window_size\n\n if num_samples:\n log_once(\n \"Forecast is not sample based. Ignoring parameter `num_samples` from predict method.\"\n )\n\n for ts in dataset:\n featurized_data = self.preprocess_object.make_features(\n ts, starting_index=len(ts[\"target\"]) - context_length\n )\n yield RotbaumForecast(\n self.model_list,\n [featurized_data],\n start_date=forecast_start(ts),\n prediction_length=self.prediction_length,\n freq=self.freq,\n )\n"
] |
[
[
"numpy.array"
]
] |
robin-oval/compas
|
[
"e4dc751e95648c5ffb9449f239f3879d39f19887"
] |
[
"src/compas/geometry/_primitives/curve.py"
] |
[
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom math import factorial\n\nfrom compas.geometry.basic import scale_vector\nfrom compas.geometry.basic import normalize_vector\nfrom compas.geometry.basic import add_vectors\nfrom compas.geometry.basic import subtract_vectors\n\nfrom compas.geometry._primitives import Point\nfrom compas.geometry._primitives import Vector\n\n\n__all__ = ['Bezier']\n\n\ndef binomial(n, k):\n \"\"\"Returns the binomial coefficient of the :math:`x^k` term in the\n polynomial expansion of the binmoial power :math:`(1 + x)^n` [wikipedia2017j]_.\n\n Notes\n -----\n Arranging binomial coefficients into rows for successive values of `n`,\n and in which `k` ranges from 0 to `n`, gives a triangular array known as\n Pascal's triangle.\n\n Parameters\n ----------\n n : int\n The number of terms.\n k : int\n The index of the coefficient.\n\n Returns\n -------\n int\n The coefficient.\n\n \"\"\"\n return factorial(n) / float(factorial(k) * factorial(n - k))\n\n\ndef bernstein(n, k, t):\n \"\"\"k:sup:`th` of `n` + 1 Bernstein basis polynomials of degree `n`. A\n weighted linear combination of these basis polynomials is called a Bernstein\n polynomial [wikipedia2017k]_.\n\n Notes\n -----\n When constructing Bezier curves, the weights are simply the coordinates\n of the control points of the curve.\n\n Parameters\n ----------\n n : int\n The degree of the polynomial.\n k : int\n The number of the basis polynomial.\n t : float\n The variable.\n\n Returns\n -------\n float\n The value of the Bernstein basis polynomial at `t`.\n\n \"\"\"\n if k < 0:\n return 0\n if k > n:\n return 0\n return binomial(n, k) * t ** k * (1 - t) ** (n - k)\n\n\nclass BezierException(Exception):\n pass\n\n\nclass Bezier(object):\n \"\"\"A Bezier curve.\n\n A Bezier curve of degree `n` is a linear combination of `n` + 1 Bernstein\n basis polynomials of degree `n`.\n\n Parameters\n ----------\n points : sequence\n A sequence of control points, represented by their location in 3D space.\n\n Attributes\n ----------\n points : list\n The control points.\n degree : int\n The degree of the curve.\n\n \"\"\"\n def __init__(self, points):\n self._points = []\n self.points = points\n\n @property\n def points(self):\n \"\"\"The control points.\n\n Parameters\n ----------\n points : sequence\n A sequence of control point locations in 3d space.\n\n Returns\n -------\n list\n A list of ``Point`` objects.\n\n \"\"\"\n return self._points\n\n @points.setter\n def points(self, points):\n if points:\n self._points = [Point(*point) for point in points]\n\n @property\n def degree(self):\n \"\"\"The degree of the curve.\"\"\"\n return len(self.points) - 1\n\n def compute_point(self, t):\n \"\"\"Compute a point on the curve.\n\n Parameters\n ----------\n t : float\n The value of the curve parameter. Must be between 0 and 1.\n\n Returns\n -------\n Point\n the corresponding point on the curve.\n\n \"\"\"\n n = self.degree\n point = Point(0, 0, 0)\n for i, p in enumerate(self.points):\n b = bernstein(n, i, t)\n point += p * b\n return point\n\n def compute_tangent(self, t):\n n = self.degree\n v = Vector(0, 0, 0)\n for i, p in enumerate(self.points):\n a = bernstein(n - 1, i - 1, t)\n b = bernstein(n - 1, i, t)\n c = n * (a - b)\n v += p * c\n v.unitize()\n return v\n\n def compute_locus(self, resolution=100):\n \"\"\"Compute the locus of all points on the curve.\n\n Parameters\n ----------\n resolution : int\n The number of intervals at which a point on the\n curve should be computed. Defaults to 100.\n\n Returns\n -------\n list\n\n \"\"\"\n locus = []\n divisor = float(resolution - 1)\n for i in range(resolution):\n t = i / divisor\n locus.append(self.compute_point(t))\n return locus\n\n def draw(self, params=None):\n import matplotlib.pyplot as plt\n locus = self.compute_locus()\n x, y, _ = zip(*locus)\n plt.plot(x, y, '-b')\n x, y, _ = zip(* self.points)\n plt.plot(x, y, 'ro')\n if params is not None:\n for t in params:\n p0 = self.compute_point(t)\n v = self.compute_tangent(t)\n p1 = p0 + v\n plt.plot([p0[0], p1[0]], [p0[1], p1[1]], '-k')\n plt.plot([p0[0]], [p0[1]], 'ok')\n ax = plt.gca()\n ax.set_aspect('equal')\n plt.show()\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == '__main__':\n\n curve = Bezier([[0, 0, 0], [1, -1, 0], [2, +1, 0], [3, 0, 0]])\n curve.draw(params=[0.1, 0.2, 0.3, 0.4, 0.5])\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.show"
]
] |
mbollmann/perceptron
|
[
"05e00af80491bed67859f09a71f15a3f0cf4b51b"
] |
[
"test/helper_classes.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Contains helper classes, such as feature extractors, that are used in the tests.\n\"\"\"\n\nimport numpy as np\nfrom mmb_perceptron.feature_extractor import FeatureExtractor\nfrom mmb_perceptron.feature_extractor.generator import GenerativeExtractor\n\nclass BinaryFeatureExtractor(FeatureExtractor):\n _binary_featureset = ('bias', 'lhs_true', 'rhs_true')\n\n def _init_independent(self, dataset):\n self._label_mapper.extend(self._binary_featureset)\n _init_sequenced = _init_independent\n\n def _get_independent(self, x):\n features = {'bias': 1.0}\n if x.startswith(\"True\"):\n features['lhs_true'] = 1.0\n if x.endswith(\"True\"):\n features['rhs_true'] = 1.0\n return features\n\n def _get_sequenced(self, seq, pos, history=None):\n return self._get_independent(seq[pos])\n\n\nclass BinaryFeatureGenerator(BinaryFeatureExtractor, GenerativeExtractor):\n def _generate_independent(self, x, truth=None):\n a = 1 if x.startswith(\"True\") else 0\n b = 1 if x.endswith(\"True\") else 0\n f_false = {\n 'bias && false': 1.0,\n 'lhs_true && false': 1.0 if a == 1 else 0.0,\n 'rhs_true && false': 1.0 if b == 1 else 0.0\n }\n f_true = {\n 'bias && true': 1.0,\n 'lhs_true && true': 1.0 if a == 1 else 0.0,\n 'rhs_true && true': 1.0 if b == 1 else 0.0\n }\n if truth == 'True':\n return ([f_true, f_false], ['True', 'False'])\n else:\n return ([f_false, f_true], ['False', 'True'])\n\n\nclass CharacterLengthGenerator(GenerativeExtractor):\n def _generate_independent(self, x, truth=None):\n features, labels = [], []\n for d in range(1, 5):\n label = '{0}x{1}'.format(d, x[0])\n feature = {'inputlength==d': 1.0 if len(x) == d else 0.0,\n 'length_{0}'.format(d): 1.0,\n 'bias_{0}'.format(label): 1.0}\n insert_pos = 0 if label == truth else len(features)\n features.insert(insert_pos, feature)\n labels.insert(insert_pos, label)\n return (features, labels)\n\n\nclass ContextualFeatureExtractor(FeatureExtractor):\n _left_context_size = 1\n _right_context_size = 1\n\n def _get_sequenced(self, seq, pos, history=None):\n features = {'bias': 1.0}\n features['this==' + seq[pos]] = 1.0\n features['prev==' + seq[pos - 1]] = 1.0\n features['next==' + seq[pos + 1]] = 1.0\n features['prev_guess==' + history[pos - 1]] = 1.0\n return features\n\n\nclass ContextualFeatureGenerator(ContextualFeatureExtractor, GenerativeExtractor):\n _all_labels = [\"ONE\", \"TWO\", \"TWELVE\", \"ZERO\"]\n\n def _generate_sequenced(self, seq, pos, history=None, truth=None):\n features, labels = [], []\n old_feats = self._get_sequenced(seq, pos, history=history)\n # Combinatorial feature explosion -- i.e., we mimic what the\n # CombinatorialPerceptron does internally:\n for label in self._all_labels:\n feats = {'{0} && label={1}'.format(f, label): 1.0 for f in old_feats}\n insert_pos = 0 if label == truth else len(features)\n features.insert(insert_pos, feats)\n labels.insert(insert_pos, label)\n return (features, labels)\n\n\nclass NumberFeatureGenerator(GenerativeExtractor):\n def _init_independent(self, dataset):\n self._label_mapper.extend(range(6))\n\n def _generate_independent(self, x, truth=None):\n (a, b) = x\n f_false = {\n 'a && false': a,\n 'b && false': b,\n 'bias && false': 1.0\n }\n f_true = {\n 'a && true': a,\n 'b && true': b,\n 'bias && true': 1.0\n }\n if truth == 1:\n return ([f_true, f_false], [1, 0])\n else:\n return ([f_false, f_true], [0, 1])\n\n def _generate_vector_independent(self, x, truth=None, grow=True):\n (a, b) = x\n f_false = np.array([a,b,0,0,0,1])\n f_true = np.array([0,0,a,b,1,0])\n if truth == 1:\n return (np.array([f_true, f_false]), [1, 0])\n else:\n return (np.array([f_false, f_true]), [0, 1])\n\n\nclass ThreeClassesFeatureExtractor(FeatureExtractor):\n _featureset = ('bias', 'a', 'b', 'c', 'd')\n\n def _init_independent(self, dataset):\n self._label_mapper.extend(self._featureset)\n _init_sequenced = _init_independent\n\n def _get_independent(self, x):\n features = {'bias': 1.0}\n for feat in self._featureset:\n if feat in x:\n features[feat] = 1.0\n return features\n\n def _get_sequenced(self, seq, pos, history=None):\n return self._get_independent(seq[pos])\n\n"
] |
[
[
"numpy.array"
]
] |
lh-wang/ACC
|
[
"3387a2cd585bd9dee581ab53d64cb0e47d870bbd"
] |
[
"run_dataset.py"
] |
[
"import tensorflow as tf\nimport scipy.misc\nimport model\nimport cv2\nfrom subprocess import call\n\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\nsaver.restore(sess, \"save/model.ckpt\")\n\nimg = cv2.imread('steering_wheel_image.jpg',0)\nrows,cols = img.shape\n\nsmoothed_angle = 0\n\ni = 0\nwhile(cv2.waitKey(10) != ord('q')):\n full_image = scipy.misc.imread(\"driving_dataset/\" + str(i) + \".jpg\", mode=\"RGB\")\n image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0\n degrees = model.y.eval(feed_dict={model.x: [image], model.keep_prob: 1.0})[0][0] * 180.0 / scipy.pi\n call(\"clear\")\n print(\"Predicted steering angle: \" + str(degrees) + \" degrees\")\n cv2.imshow(\"frame\", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))\n #make smooth angle transitions by turning the steering wheel based on the difference of the current angle\n #and the predicted angle\n smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)\n M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)\n dst = cv2.warpAffine(img,M,(cols,rows))\n cv2.imshow(\"steering wheel\", dst)\n i += 1\n\ncv2.destroyAllWindows()\n"
] |
[
[
"tensorflow.train.Saver",
"tensorflow.InteractiveSession"
]
] |
Mutefish0/graduation-project
|
[
"b6c47e946a6ed2fe25389881828e15b5e522eeb5"
] |
[
"preprocess.py"
] |
[
"#coding=utf8\n\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport sys\nsys.path.append('tools')\nfrom tool import binImg2vectors, img2binImg, random_rgb\nfrom sklearn.cluster import KMeans\n\nim = Image.open('./res/B02_E36518_0.jpg')\n\nwidth, height = im.size\n\ngrey = im.convert('L')\n\nvectors = []\n\nfor w in xrange(width):\n for h in xrange(height):\n px = grey.getpixel((w, h))\n if(px > 100):\n vectors.append([w, h])\n\nkm = KMeans(n_clusters=7, n_init=12)\nkm.fit(vectors)\nmeans = km.cluster_centers_\n\ndraw = ImageDraw.Draw(grey)\n\n\"\"\"\nfor mean in means:\n draw.ellipse([mean[0]-1,mean[1]-1,mean[0]+1,mean[1]+1], fill='skyblue')\n\"\"\"\n\nmeans = sorted(means, cmp= lambda x,y: int(x[0]-y[0]))\n\nx1 = int(means[0][0])\nx2 = int(means[1][0])\nlineWidth = 2\nminSum = float('inf')\nsump = 0\nminX = 0\n\nfor x in range(x2)[x1:]:\n sump = 0\n for w in range(x + lineWidth + 1)[x:]:\n for h in xrange(height):\n sump += grey.getpixel((w, h))\n if sump < minSum:\n minSum = sump\n minX = x\n\ndraw.line(((minX,0), (minX,height)), 'red')\n\ngrey.show()\n"
] |
[
[
"sklearn.cluster.KMeans"
]
] |
raghavaro/strategize
|
[
"d8ec85b487fe5512689755cdc48c3ad7f76cff12"
] |
[
"src/strategize/analysis.py"
] |
[
"import numpy as np\n\ndef find_pareto_optimal_outcomes(game):\n # Assuming 2p game with 2 choices each \n num_rows = 4\n num_columns = 2\n # change shape from 2,2,2 to 4,2 \n utilities_row = np.reshape(game.u, (num_rows, num_columns))\n # add index to each utility\n numbers = np.vstack(np.arange(num_rows))\n utilities_row = np.concatenate((numbers, utilities_row), axis=1)\n \n # sort by player 1's utilities\n sorted_utilities_row = utilities_row[np.argsort(utilities_row[:,1])]\n \n dominated_indices = []\n\n for i in range(0, num_rows-1):\n original_index, p1_utility, p2_utility = sorted_utilities_row[i]\n for j in range(i+1, num_rows):\n if (sorted_utilities_row[j][2] >= p2_utility\n and sorted_utilities_row[j][1] > p1_utility)\\\n or (sorted_utilities_row[j][2] > p2_utility\n and sorted_utilities_row[j][1] >= p1_utility):\n dominated_indices.append(original_index)\n break\n \n # create an array for pareto efficiency that stores 1 for pareto efficient outcomes and 0 for pareto dominated outcomes\n pareto_efficiency_array = np.vstack(np.array([0 if x in dominated_indices else 1 for x in np.arange(num_rows)]))\n utilities = np.concatenate((utilities_row, pareto_efficiency_array), axis=1).reshape(2,2,4)\n\n return utilities"
] |
[
[
"numpy.reshape",
"numpy.arange",
"numpy.argsort",
"numpy.concatenate"
]
] |
onlyrichbrain/pyprobml
|
[
"cc98c88fd6334d65fe5bc7975c4f27ed9fcf21e8"
] |
[
"scripts/pcaDemo2d.py"
] |
[
"import superimport\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport os\nimport pyprobml_utils as pml\n\nnp.random.seed(42)\n\n# Generate Data\nn = 9\nvar = 3\ncorr = .5\n\ncov_mat = [[var, corr * var], [corr * var, var]]\nX = np.random.multivariate_normal([0, 0], cov_mat, n)\n\n# Project Data onto PCA line\nsubspace_dim = 1\npca = PCA(subspace_dim)\nX_reconstr = pca.inverse_transform(pca.fit_transform(X))\n\n# Create figure and save.\nfig, ax = plt.subplots(figsize=(5, 5))\n\n# Plot raw data\nax.scatter(X[:, 0], X[:, 1], marker='o', facecolor='none', edgecolor='red')\nX_mean = np.mean(X, axis=0)\nax.scatter(X_mean[0], X_mean[1], facecolor='red')\n\n# Plot PCA line\nlow_point = X_mean - 10 * pca.components_.reshape(-1)\nhigh_point = X_mean + 10 * pca.components_.reshape(-1)\nax.plot([low_point[0], high_point[0]], [low_point[1], high_point[1]], color='magenta')\nax.set_ylim(-5, 5)\nax.set_xlim(-5, 5)\n\n# Plot projected points\nax.scatter(X_reconstr[:, 0], X_reconstr[:, 1], marker='x')\n\n# Plot projection lines\nfor (xi1, xi2), (xi1_rec, xi2_rec) in zip(X, X_reconstr):\n ax.plot([xi1, xi1_rec], [xi2, xi2_rec], color='blue')\n\n\npml.savefig(\"pcaDemo2dProjection.pdf\")\n\nplt.show()"
] |
[
[
"numpy.random.seed",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.subplots",
"numpy.mean",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA"
]
] |
gasx-tesla/openpilot
|
[
"994cc307186146ee65bc41af7744d67a502142d3"
] |
[
"selfdrive/controls/lib/longitudinal_planner.py"
] |
[
"#!/usr/bin/env python3\nimport math\nimport numpy as np\nfrom common.numpy_fast import interp\n\nimport cereal.messaging as messaging\nfrom common.realtime import DT_MDL\nfrom selfdrive.modeld.constants import T_IDXS\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.controls.lib.longcontrol import LongCtrlState\nfrom selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc\nfrom selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC\nfrom selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N\nfrom selfdrive.swaglog import cloudlog\n\nLON_MPC_STEP = 0.2 # first step is 0.2s\nAWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted\nA_CRUISE_MIN = -1.2\nA_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]\nA_CRUISE_MAX_BP = [0., 15., 25., 40.]\n\n# Lookup table for turns\n_A_TOTAL_MAX_V = [1.7, 3.2]\n_A_TOTAL_MAX_BP = [20., 40.]\n\n\ndef get_max_accel(v_ego):\n return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)\n\n\ndef limit_accel_in_turns(v_ego, angle_steers, a_target, CP):\n \"\"\"\n This function returns a limited long acceleration allowed, depending on the existing lateral acceleration\n this should avoid accelerating when losing the target in turns\n \"\"\"\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego ** 2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = math.sqrt(max(a_total_max ** 2 - a_y ** 2, 0.))\n\n return [a_target[0], min(a_target[1], a_x_allowed)]\n\n\nclass Planner:\n def __init__(self, CP, init_v=0.0, init_a=0.0):\n self.CP = CP\n self.mpc = LongitudinalMpc()\n\n self.fcw = False\n\n self.v_desired = init_v\n self.a_desired = init_a\n self.alpha = np.exp(-DT_MDL / 2.0)\n\n self.v_desired_trajectory = np.zeros(CONTROL_N)\n self.a_desired_trajectory = np.zeros(CONTROL_N)\n self.j_desired_trajectory = np.zeros(CONTROL_N)\n\n def update(self, sm):\n v_ego = sm['carState'].vEgo\n a_ego = sm['carState'].aEgo\n\n v_cruise_kph = sm['controlsState'].vCruise\n v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)\n v_cruise = v_cruise_kph * CV.KPH_TO_MS\n\n long_control_state = sm['controlsState'].longControlState\n force_slow_decel = sm['controlsState'].forceDecel\n\n prev_accel_constraint = True\n enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)\n if not enabled or sm['carState'].gasPressed:\n self.v_desired = v_ego\n self.a_desired = a_ego\n # Smoothly changing between accel trajectory is only relevant when OP is driving\n prev_accel_constraint = False\n\n # Prevent divergence, smooth in current v_ego\n self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego\n self.v_desired = max(0.0, self.v_desired)\n\n accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]\n accel_limits_turns = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)\n if force_slow_decel:\n # if required so, force a smooth deceleration\n accel_limits_turns[1] = min(accel_limits_turns[1], AWARENESS_DECEL)\n accel_limits_turns[0] = min(accel_limits_turns[0], accel_limits_turns[1])\n # clip limits, cannot init MPC outside of bounds\n accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05)\n accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05)\n self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])\n self.mpc.set_cur_state(self.v_desired, self.a_desired)\n self.mpc.update(sm['carState'], sm['radarState'], v_cruise, prev_accel_constraint=prev_accel_constraint)\n self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)\n self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)\n self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)\n\n # TODO counter is only needed because radar is glitchy, remove once radar is gone\n self.fcw = self.mpc.crash_cnt > 5\n if self.fcw:\n cloudlog.info(\"FCW triggered\")\n\n # Interpolate 0.05 seconds and save as starting point for next iteration\n a_prev = self.a_desired\n self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))\n self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev) / 2.0\n\n def publish(self, sm, pm):\n plan_send = messaging.new_message('longitudinalPlan')\n\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])\n\n longitudinalPlan = plan_send.longitudinalPlan\n longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']\n longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']\n\n longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]\n longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]\n longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]\n\n longitudinalPlan.hasLead = sm['radarState'].leadOne.status\n longitudinalPlan.longitudinalPlanSource = self.mpc.source\n longitudinalPlan.fcw = self.fcw\n\n pm.send('longitudinalPlan', plan_send)\n"
] |
[
[
"numpy.exp",
"numpy.zeros",
"numpy.interp"
]
] |
physimals/quantiphyse-perfsim
|
[
"6cd209e9f9d51b52ffbd2a0b148cec6a0a64f0ca"
] |
[
"quantiphyse_datasim/struc_models.py"
] |
[
"\"\"\"\nData simulation Quantiphyse plugin\n\nStructural models, i.e. classes which return lists of different\nstructures and the corresponding partial volume maps\n\nAuthor: Martin Craig <[email protected]>\nCopyright (c) 2016-2017 University of Oxford, Martin Craig\n\"\"\"\n\nfrom __future__ import division, unicode_literals, absolute_import, print_function\n\nimport time\n\nimport numpy as np\n\nfrom quantiphyse.data import NumpyData, DataGrid, ImageVolumeManagement\nfrom quantiphyse.utils import QpException, get_plugins\nfrom quantiphyse.processes import Process\n\nfrom .model import Model, Parameter\n\ndef get_struc_models():\n ret = {}\n for cls in get_plugins(\"datasim-struc-models\"):\n ret[cls.NAME] = cls\n return ret\n\nclass StructureModel(Model):\n \"\"\"\n Base class for a structure model\n \n The ``structures`` property must be defined as a sequence of Parameter objects\n \"\"\"\n\n @property\n def structures(self):\n raise NotImplementedError()\n\n def get_simulated_data(self, data_model, param_values, output_param_maps=False):\n \"\"\"\n Generate simulated data for a given data model and parameter values\n\n :param data_model: DataModel which implements the ``timeseries`` method\n :param param_values: Mapping from structure name to sequence of parameter values\n :output_param_maps: If True, also output QpData containing maps of the \n parameter values used to generate simulated data in each voxel\n\n @return If ``output_param_maps`` is ``False``, QpData containing simulated timeseries data\n If ``output_param_maps`` is ``True``, tuple of simulated timeseries data and mapping\n of param name to parameter value map\n \"\"\"\n raise NotImplementedError()\n\nclass PartialVolumeStructureModel(Model):\n \"\"\"\n Structure model which defines the structure as a set of partial volume maps\n \"\"\"\n\n @property\n def structure_maps(self):\n \"\"\"\n :return: Mapping from name to a QpData instance containing partial volume\n maps (range 0-1) for each known structure\n \"\"\"\n raise NotImplementedError()\n\n def resamp(self, qpdata):\n \"\"\"\n Resample a map according to resampling options\n \"\"\"\n resamp_options = dict(self.options.get(\"resampling\", {}))\n if resamp_options:\n resamp_processes = get_plugins(\"processes\", \"ResampleProcess\")\n if len(resamp_processes) != 1:\n raise QpException(\"Can't identify Resampling process\")\n\n ivm = ImageVolumeManagement()\n ivm.add(qpdata)\n process = resamp_processes[0](ivm)\n resamp_options.update({\n \"data\" : qpdata.name,\n \"output-name\" : \"output_res\"\n })\n process.execute(resamp_options)\n while process.status == Process.RUNNING:\n time.sleep(1)\n\n if process.status == Process.FAILED:\n raise process.exception\n\n # FIXME hack\n process._complete()\n return ivm.data[\"output_res\"]\n else:\n return qpdata\n\n def get_simulated_data(self, data_model, param_values, output_param_maps=False):\n \"\"\"\n Generic implementation to generate test data from a set of partial volume maps\n \"\"\"\n if len(self.structure_maps) == 0:\n raise QpException(\"No structures defined\")\n\n # First check the PV maps - they should all be in the range 0-1 and not sum to\n # more than 1 in any voxel\n sum_map = None\n for name, pv_map in self.structure_maps.items():\n pv_data = pv_map.raw()\n if not np.all(pv_data >= 0):\n raise QpException(\"Partial volume map contained negative values: %s\" % name)\n if not np.all(pv_data <= 1):\n raise QpException(\"Partial volume map contained values > 1: %s\" % name)\n if sum_map is None:\n sum_map = np.zeros(pv_data.shape, dtype=np.float32)\n sum_map += pv_data\n\n if sum_map is not None and not np.all(sum_map <= 1):\n raise QpException(\"Partial volume maps sum to > 1 in at least one voxel\")\n\n output_data = None \n for name, pv_map in self.structure_maps.items():\n struc_values = param_values[name]\n\n # Check that there is exactly one parameter value per structure\n single_values = {}\n for k, v in struc_values.items():\n if isinstance(v, (float, int)):\n v = [v]\n if len(v) != 1:\n raise QpException(\"This structure model cannot handle multiple parameter values in a single structure\")\n single_values[k] = v[0]\n\n timeseries = data_model.get_timeseries(single_values, pv_map.grid.shape)\n if output_data is None:\n output_data = np.zeros(list(pv_map.grid.shape) + [timeseries.shape[-1]], dtype=np.float32)\n output_grid = pv_map.grid\n if output_param_maps:\n param_maps = {}\n for param in struc_values:\n param_maps[param] = np.zeros(pv_map.grid.shape, dtype=np.float32)\n\n struc_data = pv_map.raw()[..., np.newaxis] * timeseries\n output_data += struc_data\n\n if output_param_maps:\n for param, value in single_values.items():\n param_maps[param] += pv_map.raw() * value\n\n sim_data = NumpyData(output_data, grid=output_grid, name=\"sim_data\")\n\n if output_param_maps:\n for param in struc_values: \n param_maps[param] = NumpyData(param_maps[param], grid=output_grid, name=param)\n return sim_data, param_maps\n else:\n return sim_data\n\nclass UserPvModel(PartialVolumeStructureModel):\n \"\"\"\n Structural model where user supplies partial volume maps\n\n Three default tissue types are defined: Grey matter (GM), White matter (WM) and\n CSF. A partial volume map can be specified for each of these (but they are not\n all compulsary)\n\n The model also supports additional user-defined structures which come in three types:\n\n - Additional partial volume maps. These are simply added to the existing default \n structures. The total sum in any voxel must not exceed 1\n - Embeddings. These are added to the existing default maps, which are scaled to\n 1 - the embedding partial volume. For example where the embedding partial volume\n is 1 other structures have zero partial volume. Where the embedding partial volume\n is 0.8, all other structures are scaled by multiplying by 0.2.\n - Activation mask. These are ROIs that cause a specific tissue type to replaced by\n a different set of parameter values within the ROI. For example a GM activation\n mask will replace the 'usual' GM parameters with another set of parameters within\n the mask. This mask can be used to simulate activation of a particular region, e.g.\n derived from a brain atlas.\n \"\"\"\n NAME = \"user\"\n\n def __init__(self, ivm):\n PartialVolumeStructureModel.__init__(self, ivm, \"User specified partial volume maps\", title=\"Structure maps\")\n self.default_strucs = [\n Parameter(\"gm\", \"Grey matter\"),\n Parameter(\"wm\", \"White matter\"),\n Parameter(\"csf\", \"CSF\"),\n ]\n\n @property\n def structures(self):\n ret = [struc for struc in self.default_strucs if struc.name in self.options[\"pvmaps\"]]\n if self.options[\"additional\"] is not None:\n for struc in self.options[\"additional\"].values():\n ret.append(Parameter(**struc))\n return ret\n\n @property\n def structure_maps(self):\n pvmaps = self.options[\"pvmaps\"]\n try:\n ret = {}\n total_pv = None\n grid = None\n # First get the PV maps from the default structures. We also calculate the\n # sum of PVs in the maps as it may be needed when we insert embedded user-defined\n # structures. The first default structure map defines the grid FIXME should there\n # be an independent choice of grid?\n for struc in self.default_strucs:\n if struc.name in pvmaps:\n if grid is None:\n grid = self._ivm.data[pvmaps[struc.name]].grid\n ret[struc.name] = self._ivm.data[pvmaps[struc.name]].resample(grid)\n if total_pv is None:\n total_pv = np.zeros(grid.shape, dtype=np.float32)\n total_pv += ret[struc.name].raw()\n\n # Now deal with any user-defined additional structures. We handle these differently\n # depending on whether they are embeddings, activation masks or additional PV\n if self.options[\"additional\"] is not None:\n for struc in self.options.get(\"additional\", {}).values():\n if grid is None:\n grid = self._ivm.data[struc[\"pvmap\"]].grid\n qpdata = self._ivm.data[struc[\"pvmap\"]].resample(grid)\n struc_type = struc.get(\"struc_type\", \"\")\n if struc_type == \"embed\":\n self._add_embedding(struc, qpdata, ret)\n elif struc_type == \"act\":\n self._add_activation_mask(struc, qpdata, ret)\n elif struc_type == \"add\":\n # Just use data directly\n ret[struc[\"name\"]] = qpdata\n else:\n raise QpException(\"Unknown additional structure type: %s\" % struc_type)\n\n # Resample PV maps according to specification\n # Note that resampling/embedding/smoothing etc can lead to situations where the sum of the partial volumes\n # in the resampled maps is >1 in some voxels, even where this was not true in the\n # original maps. We need to detect this and rescale the affected voxels\n sum_maps = None\n for name in list(ret.keys()):\n resampled = self.resamp(ret[name])\n if sum_maps is None:\n sum_maps = np.zeros(resampled.raw().shape, dtype=np.float32)\n sum_maps += resampled.raw()\n ret[name] = resampled\n\n if sum_maps is not None and not np.all(sum_maps <= 1):\n # Resampling has messed up the PV sum a bit - rescale to fix this but only in affected voxels\n self.debug(\"Max PV in resampled maps is %f: rescaling in %i voxels\" % (np.max(sum_maps), np.count_nonzero(sum_maps > 1)))\n for name in list(ret.keys()):\n pv_map = ret[name].raw()\n pv_map[sum_maps > 1] /= sum_maps[sum_maps > 1]\n\n return ret\n except KeyError:\n raise\n\n def _add_embedding(self, struc, qpdata, strucs):\n \"\"\"\n Add an embedded structure\n\n For an embedding, the PV map is inserted into the output unchanged and existing\n structures PVs are downweighted by the PV of the embedding (e.g. if the embedding\n has a PV of 0.5 is a voxel then existing structures in that voxel are downweighted\n by a factor of 2). This will ensure that total PV does not exceed 1\n\n :param struc: Structure dictionary\n :param qpdata: Embedding PV map defined on same grid as existing data\n :param strucs: Dictionary of structure name to PV map for existing structures.\n Will be updated to include embedded structure.\n \"\"\"\n pv = qpdata.raw()\n # Valid name for QpData - structure name may contain spaces etc.\n qpdata_name = self._ivm.suggest_name(struc[\"name\"])\n if \"region\" in struc:\n # If we are using only a single region of an ROI, zero out all other regions\n pv[pv != struc[\"region\"]] = 0\n if qpdata.roi:\n # For any ROI map, voxels inside the ROI should have a PV of 1\n pv[pv > 0] = 1\n pv = pv.astype(np.float32)\n if \"sigma\" in struc:\n # Perform Gaussian smoothing\n pv = self._smooth_pv(NumpyData(pv, grid=qpdata.grid, name=qpdata_name), struc[\"sigma\"])\n reweighting = 1-pv\n for name, existing_map in strucs.items():\n new_map = NumpyData(existing_map.raw() * reweighting, grid=qpdata.grid, name=existing_map.name)\n strucs[name] = new_map\n strucs[struc[\"name\"]] = NumpyData(pv, grid=qpdata.grid, name=qpdata_name)\n\n def _smooth_pv(self, qpdata, sigma):\n \"\"\"\n Do Gaussian smoothing on a partial volume map\n\n Typically when the map is a discrete ROI and we want to smoothly blend it into\n the other tissue PV maps\n\n :param qpdata: PV map\n :param sigma: Gaussian kernel std.dev in mm\n :return: Smoothed PV map as Numpy array\n \"\"\"\n smooth_processes = get_plugins(\"processes\", \"SmoothingProcess\")\n if len(smooth_processes) != 1:\n raise QpException(\"Can't identify smoothing process\")\n\n ivm = ImageVolumeManagement()\n ivm.add(qpdata)\n process = smooth_processes[0](ivm)\n smooth_options = {\n \"data\" : qpdata.name,\n \"sigma\" : sigma,\n \"output-name\" : \"output_smooth\",\n }\n process.execute(smooth_options)\n while process.status == Process.RUNNING:\n time.sleep(1)\n\n if process.status == Process.FAILED:\n raise process.exception\n\n # FIXME hack\n process._complete()\n return ivm.data[\"output_smooth\"].raw()\n\n def _add_activation_mask(self, struc, qpdata, strucs):\n \"\"\"\n Add an activation mask\n\n This is a binary mask which splits a parent structure (e.g. GM) into two separate\n structures, inside and outside the mask, which can have different parameter properties\n\n :param struc: Structure dictionary. Must define the parent structure.\n :param qpdata: Activation mask map defined on same grid as existing data\n :param strucs: Dictionary of structure name to PV map for existing structures.\n Will be updated to include split parent structure.\n \"\"\"\n # Activation mask - replace parent structure\n parent_struc = struc.get(\"parent_struc\", None)\n qpdata_name = self._ivm.suggest_name(struc[\"name\"])\n if parent_struc is None:\n raise QpException(\"Parent structure not defined for activation mask: %s\" % struc[\"name\"])\n elif parent_struc not in strucs:\n raise QpException(\"Parent structure '%s' not found in structures list for activation mask: %s\" % (parent_struc, struc[\"name\"]))\n activation_mask = np.copy(qpdata.raw())\n if \"region\" in struc:\n # If a specific region is given, isolate it\n activation_mask[activation_mask != struc[\"region\"]] = 0\n\n if qpdata.roi:\n # If mask is an ROI, make it take values 0 and 1\n activation_mask[activation_mask <= 0] = 1\n activation_mask[activation_mask > 0] = 1\n\n # Activation structure takes over parent structure within the mask.\n # We do this by multiplication so the activation mask can in principle\n # be probabilistic\n activation_mask = activation_mask.astype(np.float32)\n parent_qpdata = strucs[parent_struc]\n parent_data = np.copy(parent_qpdata.raw())\n activation_data = np.copy(parent_qpdata.raw())\n activation_data *= activation_mask\n parent_data *= (1-activation_mask)\n strucs[parent_struc] = NumpyData(parent_data, grid=parent_qpdata.grid, name=parent_qpdata.name)\n strucs[struc[\"name\"]] = NumpyData(activation_data, grid=parent_qpdata.grid, name=qpdata_name)\n\nclass FastStructureModel(PartialVolumeStructureModel):\n \"\"\"\n Structural model which derives partial volume maps from a FAST segmentation\n \"\"\"\n NAME = \"fast\"\n\n def __init__(self, ivm):\n StructureModel.__init__(self, ivm, \"Partial volume maps from a FAST segmentation\")\n \n @property\n def structures(self):\n return [\n Parameter(\"gm\", \"Grey matter\"),\n Parameter(\"wm\", \"White matter\"),\n Parameter(\"csf\", \"CSF\"),\n ]\n\n @property\n def structure_maps(self):\n processes = get_plugins(\"processes\", \"FastProcess\")\n if len(processes) != 1:\n raise QpException(\"Can't identify Fast process\")\n \n struc = self.options.get(\"struc\", None)\n if struc not in self._ivm.data:\n raise QpException(\"Structural image not loaded: %s\" % struc)\n \n qpdata = self._ivm.data[struc]\n ivm = ImageVolumeManagement()\n ivm.add(qpdata)\n process = processes[0](ivm)\n fast_options = {\n \"data\" : qpdata.name,\n \"class\" : 3,\n \"type\" : self.options[\"type\"],\n \"output-pve\" : True,\n \"output-pveseg\" : False,\n }\n process.execute(fast_options)\n while process.status == Process.RUNNING:\n time.sleep(1)\n\n if process.status == Process.FAILED:\n raise process.exception\n\n # FIXME hack\n process._complete()\n\n return {\n \"gm\" : ivm.data[\"%s_pve_1\" % qpdata.name],\n \"wm\" : ivm.data[\"%s_pve_2\" % qpdata.name],\n \"csf\" : ivm.data[\"%s_pve_0\" % qpdata.name],\n }\n\ndef fslimage_to_qpdata(img, name=None, vol=None, region=None):\n \"\"\" Convert fsl.data.Image to QpData \"\"\"\n if not name: name = img.name\n if vol is not None:\n data = img.data[..., vol]\n else:\n data = img.data\n if region is not None:\n data = (data == region).astype(np.int)\n return NumpyData(data, grid=DataGrid(img.shape[:3], img.voxToWorldMat), name=name)\n\nclass FslStdStructureModel(PartialVolumeStructureModel):\n \"\"\"\n Structural model using standard FSL structural data\n\n FIXME not functional at present - not clear that FSL supplies relevant\n segmentation data out of the box!\n \"\"\"\n NAME = \"fsl\"\n\n ATLAS_PREFIXES = [\n \"MNI\",\n ]\n\n def __init__(self, ivm):\n StructureModel.__init__(self, ivm, \"FSL MNI standard data\")\n from fsl.data.atlases import AtlasRegistry\n self._registry = AtlasRegistry()\n self._registry.rescanAtlases()\n atlas_names = []\n self._atlases = {}\n for atlas in sorted(self._registry.listAtlases(), key=lambda x: x.name):\n for prefix in self.ATLAS_PREFIXES:\n if atlas.name.startswith(prefix):\n for pixdim in atlas.pixdims:\n name = atlas.name + \" %.2gx%.2gx%.2g mm\" % pixdim\n self._atlases[name] = (atlas, pixdim)\n\n @property\n def structures(self):\n atlas, pixdims = self._atlases[self.options[\"atlas\"]]\n structures = []\n for label in atlas.labels:\n structures.append(Parameter(label.name, label.name))\n return structures\n\n @property\n def structure_maps(self):\n atlas, pixdims = self._atlases[self.options[\"atlas\"]]\n structure_maps = {}\n atlas_map = self._registry.loadAtlas(atlas.atlasID, loadSummary=False, resolution=pixdims[0])\n for idx, label in enumerate(atlas.labels):\n structure_maps[label.name] = fslimage_to_qpdata(atlas_map, vol=idx, name=label.name)\n return structure_maps\n\nclass CheckerboardModel(StructureModel):\n \"\"\"\n Model which builds a checkerboard for up to 3 varying parameters\n\n This model differs from the usual 'partial volume' type models in that\n it expects multiple values for up to 3 parameters. It builds a 1/2/3D\n grid in which each varying parameter changes along a given dimension.\n There can only be a single 'structure' defined whose name is arbitrary.\n \"\"\"\n NAME = \"checkerboard\"\n\n def __init__(self, ivm):\n StructureModel.__init__(self, ivm, \"Checkerboard\")\n\n @property\n def structures(self):\n return [\n Parameter(\"data\", \"Sequence of test values\"),\n ]\n\n def get_simulated_data(self, data_model, param_values, output_param_maps=False):\n if len(param_values) != 1:\n raise QpException(\"Can only have a single structure in the checkerboard model\")\n param_values = list(param_values.values())[0]\n\n param_values_list = {}\n varying_params = []\n for param, values in param_values.items():\n if isinstance(values, (int, float)):\n values = [values]\n if len(values) > 1:\n varying_params.append(param)\n param_values_list[param] = values\n\n num_varying_params = len(varying_params)\n if num_varying_params > 3:\n raise QpException(\"At most 3 parameters can vary\")\n elif num_varying_params == 0:\n # Make it a square for simplicity\n num_varying_params = 2\n\n voxels_per_patch = self.options.get(\"voxels-per-patch\", 100)\n side_length = int(round(voxels_per_patch ** (1.0 / float(num_varying_params))))\n patch_dims = [side_length] * num_varying_params\n while len(patch_dims) < 3:\n patch_dims += [1,]\n\n repeats = [[0], [0], [0]]\n checkerboard_dims = []\n for idx, param in enumerate(varying_params):\n num_values = len(param_values_list[param])\n repeats[idx] = range(num_values)\n checkerboard_dims.append(patch_dims[idx] * num_values)\n for idx in range(len(varying_params), 3):\n checkerboard_dims.append(patch_dims[idx])\n\n output_data = None\n import itertools\n for indexes in itertools.product(*repeats):\n patch_values = dict(param_values)\n for idx, param in enumerate(varying_params):\n patch_values[param] = patch_values[param][indexes[idx]]\n \n timeseries = data_model.get_timeseries(patch_values)\n if output_data is None:\n output_data = np.zeros(list(checkerboard_dims) + [len(timeseries)], dtype=np.float32)\n if output_param_maps:\n param_maps = {}\n for param in param_values:\n param_maps[param] = np.zeros(checkerboard_dims, dtype=np.float32)\n\n slices = []\n for dim_idx, patch_idx in enumerate(indexes):\n dim_length = patch_dims[dim_idx]\n slices.append(slice(patch_idx*dim_length, (patch_idx+1)*dim_length))\n output_data[slices] = timeseries\n if output_param_maps:\n for param, value in patch_values.items():\n param_maps[param][slices] = value\n\n grid = DataGrid(checkerboard_dims, np.identity(4))\n sim_data = NumpyData(output_data, grid=grid, name=\"sim_data\")\n\n if output_param_maps:\n for param in param_values: \n param_maps[param] = NumpyData(param_maps[param], grid=grid, name=param)\n return sim_data, param_maps\n else:\n return sim_data\n"
] |
[
[
"numpy.all",
"numpy.max",
"numpy.identity",
"numpy.count_nonzero",
"numpy.zeros"
]
] |
xuhuan/rasa
|
[
"19fb70a05e3637efb9512a942c56cd12ad2c738e"
] |
[
"rasa/core/policies/unexpected_intent_policy.py"
] |
[
"import logging\nfrom rasa.core.featurizers.precomputation import MessageContainerForCoreFeaturization\nimport numpy as np\nimport tensorflow as tf\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Text, Dict, Type, TYPE_CHECKING\n\nfrom rasa.engine.graph import ExecutionContext\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.shared.nlu.training_data.features import Features\nfrom rasa.shared.core.domain import Domain\nfrom rasa.shared.core.trackers import DialogueStateTracker\nfrom rasa.shared.core.constants import SLOTS, ACTIVE_LOOP, ACTION_UNLIKELY_INTENT_NAME\nfrom rasa.shared.core.events import UserUttered, ActionExecuted\nfrom rasa.shared.nlu.constants import (\n INTENT,\n TEXT,\n ENTITIES,\n ACTION_NAME,\n SPLIT_ENTITIES_BY_COMMA,\n SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,\n)\nfrom rasa.nlu.extractors.extractor import EntityTagSpec\nfrom rasa.core.featurizers.tracker_featurizers import (\n TrackerFeaturizer2 as TrackerFeaturizer,\n)\nfrom rasa.core.featurizers.tracker_featurizers import (\n IntentMaxHistoryTrackerFeaturizer2 as IntentMaxHistoryTrackerFeaturizer,\n)\nfrom rasa.core.featurizers.single_state_featurizer import (\n IntentTokenizerSingleStateFeaturizer2 as IntentTokenizerSingleStateFeaturizer,\n)\nfrom rasa.shared.core.generator import TrackerWithCachedStates\nfrom rasa.core.constants import DIALOGUE, POLICY_MAX_HISTORY\nfrom rasa.core.policies.policy import PolicyPrediction\nfrom rasa.core.policies.ted_policy import (\n LABEL_KEY,\n LABEL_SUB_KEY,\n TEDPolicyGraphComponent as TEDPolicy,\n TED,\n SEQUENCE_LENGTH,\n SEQUENCE,\n PREDICTION_FEATURES,\n)\nfrom rasa.core.policies._unexpected_intent_policy import UnexpecTEDIntentPolicy\nfrom rasa.utils import train_utils\nfrom rasa.utils.tensorflow.models import RasaModel\nfrom rasa.utils.tensorflow.constants import (\n LABEL,\n DENSE_DIMENSION,\n ENCODING_DIMENSION,\n UNIDIRECTIONAL_ENCODER,\n TRANSFORMER_SIZE,\n NUM_TRANSFORMER_LAYERS,\n NUM_HEADS,\n BATCH_SIZES,\n BATCH_STRATEGY,\n EPOCHS,\n RANDOM_SEED,\n RANKING_LENGTH,\n LOSS_TYPE,\n SIMILARITY_TYPE,\n NUM_NEG,\n EVAL_NUM_EXAMPLES,\n EVAL_NUM_EPOCHS,\n REGULARIZATION_CONSTANT,\n SCALE_LOSS,\n EMBEDDING_DIMENSION,\n DROP_RATE_DIALOGUE,\n DROP_RATE_LABEL,\n DROP_RATE,\n DROP_RATE_ATTENTION,\n CONNECTION_DENSITY,\n KEY_RELATIVE_ATTENTION,\n VALUE_RELATIVE_ATTENTION,\n MAX_RELATIVE_POSITION,\n INNER,\n BALANCED,\n TENSORBOARD_LOG_DIR,\n TENSORBOARD_LOG_LEVEL,\n CHECKPOINT_MODEL,\n FEATURIZERS,\n ENTITY_RECOGNITION,\n IGNORE_INTENTS_LIST,\n BILOU_FLAG,\n LEARNING_RATE,\n CROSS_ENTROPY,\n SPARSE_INPUT_DROPOUT,\n DENSE_INPUT_DROPOUT,\n MASKED_LM,\n HIDDEN_LAYERS_SIZES,\n CONCAT_DIMENSION,\n TOLERANCE,\n LABEL_PAD_ID,\n POSITIVE_SCORES_KEY,\n NEGATIVE_SCORES_KEY,\n RANKING_KEY,\n SCORE_KEY,\n THRESHOLD_KEY,\n SEVERITY_KEY,\n QUERY_INTENT_KEY,\n NAME,\n)\nfrom rasa.utils.tensorflow import layers\nfrom rasa.utils.tensorflow.model_data import (\n RasaModelData,\n FeatureArray,\n Data,\n)\n\nimport rasa.utils.io as io_utils\nfrom rasa.core.exceptions import RasaCoreException\nfrom rasa.shared.utils import common\n\nif TYPE_CHECKING:\n from typing_extensions import TypedDict\n\n RankingCandidateMetadata = TypedDict(\n \"RankingCandidateMetadata\",\n {\n NAME: Text,\n SCORE_KEY: float,\n THRESHOLD_KEY: Optional[float],\n SEVERITY_KEY: Optional[float],\n },\n )\n\n UnexpecTEDIntentPolicyMetadata = TypedDict(\n \"UnexpecTEDIntentPolicyMetadata\",\n {QUERY_INTENT_KEY: Text, RANKING_KEY: List[\"RankingCandidateMetadata\"]},\n )\n\nlogger = logging.getLogger(__name__)\n\n# TODO: This is a workaround around until we have all components migrated to\n# `GraphComponent`.\nUnexpecTEDIntentPolicy = UnexpecTEDIntentPolicy\n\n\nclass UnexpecTEDIntentPolicyGraphComponent(TEDPolicy):\n \"\"\"`UnexpecTEDIntentPolicy` has the same model architecture as `TEDPolicy`.\n\n The difference is at a task level.\n Instead of predicting the next probable action, this policy\n predicts whether the last predicted intent is a likely intent\n according to the training stories and conversation context.\n \"\"\"\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"Returns the default config (see parent class for full docstring).\"\"\"\n return {\n # ## Architecture of the used neural network\n # Hidden layer sizes for layers before the embedding layers for user message\n # and labels.\n # The number of hidden layers is equal to the length\n # of the corresponding list.\n HIDDEN_LAYERS_SIZES: {TEXT: []},\n # Dense dimension to use for sparse features.\n DENSE_DIMENSION: {\n TEXT: 128,\n INTENT: 20,\n ACTION_NAME: 20,\n ENTITIES: 20,\n SLOTS: 20,\n ACTIVE_LOOP: 20,\n f\"{LABEL}_{INTENT}\": 20,\n },\n # Default dimension to use for concatenating sequence and sentence features.\n CONCAT_DIMENSION: {TEXT: 128},\n # Dimension size of embedding vectors before\n # the dialogue transformer encoder.\n ENCODING_DIMENSION: 50,\n # Number of units in transformer encoders\n TRANSFORMER_SIZE: {TEXT: 128, DIALOGUE: 128,},\n # Number of layers in transformer encoders\n NUM_TRANSFORMER_LAYERS: {TEXT: 1, DIALOGUE: 1,},\n # Number of attention heads in transformer\n NUM_HEADS: 4,\n # If 'True' use key relative embeddings in attention\n KEY_RELATIVE_ATTENTION: False,\n # If 'True' use value relative embeddings in attention\n VALUE_RELATIVE_ATTENTION: False,\n # Max position for relative embeddings. Only in effect\n # if key- or value relative attention are turned on\n MAX_RELATIVE_POSITION: 5,\n # Use a unidirectional or bidirectional encoder\n # for `text`, `action_text`, and `label_action_text`.\n UNIDIRECTIONAL_ENCODER: False,\n # ## Training parameters\n # Initial and final batch sizes:\n # Batch size will be linearly increased for each epoch.\n BATCH_SIZES: [64, 256],\n # Strategy used when creating batches.\n # Can be either 'sequence' or 'balanced'.\n BATCH_STRATEGY: BALANCED,\n # Number of epochs to train\n EPOCHS: 1,\n # Set random seed to any 'int' to get reproducible results\n RANDOM_SEED: None,\n # Initial learning rate for the optimizer\n LEARNING_RATE: 0.001,\n # ## Parameters for embeddings\n # Dimension size of embedding vectors\n EMBEDDING_DIMENSION: 20,\n # The number of incorrect labels. The algorithm will minimize\n # their similarity to the user input during training.\n NUM_NEG: 20,\n # Number of intents to store in ranking key of predicted action metadata.\n # Set this to `0` to include all intents.\n RANKING_LENGTH: 10,\n # If 'True' scale loss inverse proportionally to the confidence\n # of the correct prediction\n SCALE_LOSS: True,\n # ## Regularization parameters\n # The scale of regularization\n REGULARIZATION_CONSTANT: 0.001,\n # Dropout rate for embedding layers of dialogue features.\n DROP_RATE_DIALOGUE: 0.1,\n # Dropout rate for embedding layers of utterance level features.\n DROP_RATE: 0.0,\n # Dropout rate for embedding layers of label, e.g. action, features.\n DROP_RATE_LABEL: 0.0,\n # Dropout rate for attention.\n DROP_RATE_ATTENTION: 0.0,\n # Fraction of trainable weights in internal layers.\n CONNECTION_DENSITY: 0.2,\n # If 'True' apply dropout to sparse input tensors\n SPARSE_INPUT_DROPOUT: True,\n # If 'True' apply dropout to dense input tensors\n DENSE_INPUT_DROPOUT: True,\n # If 'True' random tokens of the input message will be masked.\n # Since there is no related loss term used inside TED, the masking\n # effectively becomes just input dropout applied to the text of user\n # utterances.\n MASKED_LM: False,\n # ## Evaluation parameters\n # How often calculate validation accuracy.\n # Small values may hurt performance, e.g. model accuracy.\n EVAL_NUM_EPOCHS: 20,\n # How many examples to use for hold out validation set\n # Large values may hurt performance, e.g. model accuracy.\n EVAL_NUM_EXAMPLES: 0,\n # If you want to use tensorboard to visualize training and validation\n # metrics, set this option to a valid output directory.\n TENSORBOARD_LOG_DIR: None,\n # Define when training metrics for tensorboard should be logged.\n # Either after every epoch or for every training step.\n # Valid values: 'epoch' and 'batch'\n TENSORBOARD_LOG_LEVEL: \"epoch\",\n # Perform model checkpointing\n CHECKPOINT_MODEL: False,\n # Specify what features to use as sequence and sentence features.\n # By default all features in the pipeline are used.\n FEATURIZERS: [],\n # List of intents to ignore for `action_unlikely_intent` prediction.\n IGNORE_INTENTS_LIST: [],\n # Tolerance for prediction of `action_unlikely_intent`.\n # For each intent, the tolerance is the percentage of\n # negative training instances (trackers for which\n # the corresponding intent is not the correct label) that\n # would be ignored by `UnexpecTEDIntentPolicy`. This is converted\n # into a similarity threshold by identifying the similarity\n # score for the (1 - tolerance) percentile of negative\n # examples. Any tracker with a similarity score below this\n # threshold will trigger an `action_unlikely_intent`.\n # Higher values of `tolerance` means the policy is more\n # \"tolerant\" to surprising paths in conversations and\n # hence will result in lesser number of `action_unlikely_intent`\n # triggers. Acceptable values are between 0.0 and 1.0 (inclusive).\n TOLERANCE: 0.0,\n # Split entities by comma, this makes sense e.g. for a list of\n # ingredients in a recipe, but it doesn't make sense for the parts of\n # an address\n SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,\n # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.\n SIMILARITY_TYPE: INNER,\n # If set to true, entities are predicted in user utterances.\n ENTITY_RECOGNITION: False,\n # 'BILOU_flag' determines whether to use BILOU tagging or not.\n # If set to 'True' labelling is more rigorous, however more\n # examples per entity are required.\n # Rule of thumb: you should have more than 100 examples per entity.\n BILOU_FLAG: False,\n # The type of the loss function, either 'cross_entropy' or 'margin'.\n LOSS_TYPE: CROSS_ENTROPY,\n }\n\n def __init__(\n self,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n model: Optional[RasaModel] = None,\n featurizer: Optional[TrackerFeaturizer] = None,\n fake_features: Optional[Dict[Text, List[Features]]] = None,\n entity_tag_specs: Optional[List[EntityTagSpec]] = None,\n label_quantiles: Optional[Dict[int, List[float]]] = None,\n ):\n \"\"\"Declares instance variables with default values.\"\"\"\n # Set all invalid / non configurable parameters\n config[ENTITY_RECOGNITION] = False\n config[BILOU_FLAG] = False\n config[SIMILARITY_TYPE] = INNER\n config[LOSS_TYPE] = CROSS_ENTROPY\n self.config = config\n\n super().__init__(\n self.config,\n model_storage,\n resource,\n execution_context,\n model,\n featurizer,\n fake_features,\n entity_tag_specs,\n )\n\n self.label_quantiles = label_quantiles or {}\n self.label_thresholds = (\n self._pick_thresholds(self.label_quantiles, self.config[TOLERANCE])\n if self.label_quantiles\n else {}\n )\n self.ignore_intent_list = self.config[IGNORE_INTENTS_LIST]\n\n common.mark_as_experimental_feature(\"UnexpecTED Intent Policy\")\n\n def _standard_featurizer(self) -> TrackerFeaturizer:\n return IntentMaxHistoryTrackerFeaturizer(\n IntentTokenizerSingleStateFeaturizer(),\n max_history=self.config.get(POLICY_MAX_HISTORY),\n )\n\n @staticmethod\n def model_class() -> Type[\"IntentTED\"]:\n \"\"\"Gets the class of the model architecture to be used by the policy.\n\n Returns:\n Required class.\n \"\"\"\n return IntentTED\n\n def _auto_update_configuration(self) -> None:\n self.config = train_utils.update_evaluation_parameters(self.config)\n self.config = train_utils.update_deprecated_sparsity_to_density(self.config)\n\n @classmethod\n def _metadata_filename(cls) -> Optional[Text]:\n return \"unexpected_intent_policy\"\n\n def _assemble_label_data(\n self, attribute_data: Data, domain: Domain\n ) -> RasaModelData:\n \"\"\"Constructs data regarding labels to be fed to the model.\n\n The resultant model data should contain the keys `label_intent`, `label`.\n `label_intent` will contain the sequence, sentence and mask features\n for all intent labels and `label` will contain the numerical label ids.\n\n Args:\n attribute_data: Feature data for all intent labels.\n domain: Domain of the assistant.\n\n Returns:\n Features of labels ready to be fed to the model.\n \"\"\"\n label_data = RasaModelData()\n label_data.add_data(attribute_data, key_prefix=f\"{LABEL_KEY}_\")\n label_data.add_lengths(\n f\"{LABEL}_{INTENT}\", SEQUENCE_LENGTH, f\"{LABEL}_{INTENT}\", SEQUENCE,\n )\n label_ids = np.arange(len(domain.intents))\n label_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n return label_data\n\n @staticmethod\n def _prepare_data_for_prediction(model_data: RasaModelData) -> RasaModelData:\n \"\"\"Transforms training model data to data usable for making model predictions.\n\n Transformation involves filtering out all features which\n are not useful at prediction time. This is important\n because the prediction signature will not contain these\n attributes and hence prediction will break.\n\n Args:\n model_data: Data used during model training.\n\n Returns:\n Transformed data usable for making predictions.\n \"\"\"\n filtered_data: Dict[Text, Dict[Text, Any]] = {\n key: features\n for key, features in model_data.data.items()\n if key in PREDICTION_FEATURES\n }\n return RasaModelData(data=filtered_data)\n\n def compute_label_quantiles_post_training(\n self, model_data: RasaModelData, label_ids: np.ndarray\n ) -> None:\n \"\"\"Computes quantile scores for prediction of `action_unlikely_intent`.\n\n Multiple quantiles are computed for each label\n so that an appropriate threshold can be picked at\n inference time according to the `tolerance` value specified.\n\n Args:\n model_data: Data used for training the model.\n label_ids: Numerical IDs of labels for each data point used during training.\n \"\"\"\n # `model_data` contains data attributes like `label` which were\n # used during training. These attributes are not present in\n # the `predict_data_signature`. Prediction through the model\n # will break if `model_data` is passed as it is through the model.\n # Hence, we first filter out the attributes inside `model_data`\n # to keep only those which should be present during prediction.\n model_prediction_data = self._prepare_data_for_prediction(model_data)\n prediction_scores = self.model.run_bulk_inference(model_prediction_data)\n label_id_scores = self._collect_label_id_grouped_scores(\n prediction_scores, label_ids\n )\n # For each label id, compute multiple quantile scores.\n # These quantile scores can be looked up during inference\n # to select a specific threshold according to the `tolerance`\n # value specified in the configuration.\n self.label_quantiles = self._compute_label_quantiles(label_id_scores)\n\n @staticmethod\n def _get_trackers_for_training(\n trackers: List[TrackerWithCachedStates],\n ) -> List[TrackerWithCachedStates]:\n \"\"\"Filters out the list of trackers which should not be used for training.\n\n `UnexpecTEDIntentPolicy` cannot be trained on trackers with:\n 1. `UserUttered` events with no intent.\n 2. `ActionExecuted` events with no action_name.\n\n Trackers with such events are filtered out.\n\n Args:\n trackers: All trackers available for training.\n\n Returns:\n Trackers which should be used for training.\n \"\"\"\n trackers_for_training = []\n for tracker in trackers:\n tracker_compatible = True\n for event in tracker.applied_events():\n if (isinstance(event, UserUttered) and event.intent_name is None) or (\n isinstance(event, ActionExecuted) and event.action_name is None\n ):\n tracker_compatible = False\n break\n if tracker_compatible:\n trackers_for_training.append(tracker)\n return trackers_for_training\n\n def run_training(\n self, model_data: RasaModelData, label_ids: Optional[np.ndarray] = None\n ) -> None:\n \"\"\"Feeds the featurized training data to the model.\n\n Args:\n model_data: Featurized training data.\n label_ids: Label ids corresponding to the data points in `model_data`.\n\n Raises:\n `RasaCoreException` if `label_ids` is None as it's needed for\n running post training procedures.\n \"\"\"\n if label_ids is None:\n raise RasaCoreException(\n f\"Incorrect usage of `run_training` \"\n f\"method of `{self.__class__.__name__}`.\"\n f\"`label_ids` cannot be left to `None`.\"\n )\n super().run_training(model_data, label_ids)\n self.compute_label_quantiles_post_training(model_data, label_ids)\n\n def _collect_action_metadata(\n self, domain: Domain, similarities: np.array, query_intent: Text\n ) -> \"UnexpecTEDIntentPolicyMetadata\":\n \"\"\"Collects metadata to be attached to the predicted action.\n\n Metadata schema looks like this:\n\n {\n \"query_intent\": <metadata of intent that was queried>,\n \"ranking\": <sorted list of metadata corresponding to all intents\n (truncated by `ranking_length` parameter)\n It also includes the `query_intent`.\n Sorting is based on predicted similarities.>\n }\n\n Each metadata dictionary looks like this:\n\n {\n \"name\": <name of intent>,\n \"score\": <predicted similarity score>,\n \"threshold\": <threshold used for intent>,\n \"severity\": <numerical difference between threshold and score>\n }\n\n Args:\n domain: Domain of the assistant.\n similarities: Predicted similarities for each intent.\n query_intent: Name of intent queried in this round of inference.\n\n Returns:\n Metadata to be attached.\n \"\"\"\n query_intent_index = domain.intents.index(query_intent)\n\n def _compile_metadata_for_label(\n label_name: Text, similarity_score: float, threshold: Optional[float],\n ) -> \"RankingCandidateMetadata\":\n severity = threshold - similarity_score if threshold else None\n return {\n NAME: label_name,\n SCORE_KEY: similarity_score,\n THRESHOLD_KEY: threshold,\n SEVERITY_KEY: severity,\n }\n\n metadata: \"UnexpecTEDIntentPolicyMetadata\" = {\n QUERY_INTENT_KEY: _compile_metadata_for_label(\n query_intent,\n similarities[0][domain.intents.index(query_intent)],\n self.label_thresholds.get(query_intent_index),\n )\n }\n\n # Ranking in descending order of predicted similarities\n sorted_similarities = sorted(\n [(index, similarity) for index, similarity in enumerate(similarities[0])],\n key=lambda x: -x[1],\n )\n\n if self.config[RANKING_LENGTH] > 0:\n sorted_similarities = sorted_similarities[: self.config[RANKING_LENGTH]]\n\n metadata[RANKING_KEY] = [\n _compile_metadata_for_label(\n domain.intents[intent_index],\n similarity,\n self.label_thresholds.get(intent_index),\n )\n for intent_index, similarity in sorted_similarities\n ]\n\n return metadata\n\n def predict_action_probabilities(\n self,\n tracker: DialogueStateTracker,\n domain: Domain,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n **kwargs: Any,\n ) -> PolicyPrediction:\n \"\"\"Predicts the next action the bot should take after seeing the tracker.\n\n Args:\n tracker: Tracker containing past conversation events.\n domain: Domain of the assistant.\n precomputations: Contains precomputed features and attributes.\n\n Returns:\n The policy's prediction (e.g. the probabilities for the actions).\n \"\"\"\n if self.model is None:\n return self._prediction(self._default_predictions(domain))\n\n # Prediction through the policy is skipped if:\n # 1. If the tracker does not contain any event of type `UserUttered`\n # till now.\n # 2. There is at least one event of type `ActionExecuted`\n # after the last `UserUttered` event.\n if self._should_skip_prediction(tracker):\n logger.debug(\n f\"Skipping predictions for {self.__class__.__name__} \"\n f\"as either there is no event of type `UserUttered` or \"\n f\"there is an event of type `ActionExecuted` after \"\n f\"the last `UserUttered`.\"\n )\n return self._prediction(self._default_predictions(domain))\n\n # create model data from tracker\n tracker_state_features = self._featurize_for_prediction(\n tracker, domain, precomputations\n )\n\n model_data = self._create_model_data(tracker_state_features)\n output = self.model.run_inference(model_data)\n\n # take the last prediction in the sequence\n all_similarities: np.ndarray = output[\"similarities\"]\n sequence_similarities = all_similarities[:, -1, :]\n\n # Check for unlikely intent\n query_intent = tracker.get_last_event_for(UserUttered).intent_name\n is_unlikely_intent = self._check_unlikely_intent(\n domain, sequence_similarities, query_intent\n )\n\n confidences = list(np.zeros(domain.num_actions))\n\n if is_unlikely_intent:\n confidences[domain.index_for_action(ACTION_UNLIKELY_INTENT_NAME)] = 1.0\n\n return self._prediction(\n confidences,\n action_metadata=self._collect_action_metadata(\n domain, sequence_similarities, query_intent\n ),\n )\n\n @staticmethod\n def _should_skip_prediction(tracker: DialogueStateTracker) -> bool:\n \"\"\"Checks if the policy should skip making a prediction.\n\n A prediction can be skipped if:\n 1. There is no event of type `UserUttered` in the tracker.\n 2. There is an event of type `ActionExecuted` after the last\n `UserUttered` event. This is to prevent the dialogue manager\n from getting stuck in a prediction loop.\n For example, if the last `ActionExecuted` event\n contained `action_unlikely_intent` predicted by\n `UnexpecTEDIntentPolicy` and\n if `UnexpecTEDIntentPolicy` runs inference\n on the same tracker, it will predict `action_unlikely_intent`\n again which would make the dialogue manager get stuck in a\n prediction loop.\n\n Returns:\n Whether prediction should be skipped.\n \"\"\"\n applied_events = tracker.applied_events()\n\n for event in reversed(applied_events):\n if isinstance(event, ActionExecuted):\n return True\n elif isinstance(event, UserUttered):\n return False\n # No event of type `ActionExecuted` and `UserUttered` means\n # that there is nothing for `UnexpecTEDIntentPolicy` to predict on.\n return True\n\n def _should_check_for_intent(self, intent: Text, domain: Domain) -> bool:\n \"\"\"Checks if the intent should raise `action_unlikely_intent`.\n\n Args:\n intent: Intent to be queried.\n domain: Domain of the assistant.\n\n Returns:\n Whether intent should raise `action_unlikely_intent` or not.\n \"\"\"\n if domain.intents.index(intent) not in self.label_thresholds:\n # This means the intent was never present in a story\n logger.debug(\n f\"Query intent index {domain.intents.index(intent)} not \"\n f\"found in label thresholds - {self.label_thresholds}. \"\n f\"Check for `{ACTION_UNLIKELY_INTENT_NAME}` prediction will be skipped.\"\n )\n return False\n if intent in self.config[IGNORE_INTENTS_LIST]:\n logger.debug(\n f\"Query intent `{intent}` found in \"\n f\"`{IGNORE_INTENTS_LIST}={self.config[IGNORE_INTENTS_LIST]}`. \"\n f\"Check for `{ACTION_UNLIKELY_INTENT_NAME}` prediction will be skipped.\"\n )\n return False\n\n return True\n\n def _check_unlikely_intent(\n self, domain: Domain, similarities: np.array, query_intent: Text\n ) -> bool:\n \"\"\"Checks if the query intent is probable according to model's predictions.\n\n If the similarity prediction for the intent\n is lower than the threshold calculated for that\n intent during training, the corresponding user\n intent is unlikely.\n\n Args:\n domain: Domain of the assistant.\n similarities: Predicted similarities for all intents.\n query_intent: Intent to be queried.\n\n Returns:\n Whether query intent is likely or not.\n \"\"\"\n logger.debug(f\"Querying for intent `{query_intent}`.\")\n\n if not self._should_check_for_intent(query_intent, domain):\n return False\n\n predicted_intent_scores = {\n index: similarities[0][index] for index, intent in enumerate(domain.intents)\n }\n sorted_intent_scores = sorted(\n [\n (domain.intents[label_index], score)\n for label_index, score in predicted_intent_scores.items()\n ],\n key=lambda x: x[1],\n )\n query_intent_id = domain.intents.index(query_intent)\n query_intent_similarity = similarities[0][query_intent_id]\n highest_likely_intent_id = domain.intents.index(sorted_intent_scores[-1][0])\n\n logger.debug(\n f\"Score for intent `{query_intent}` is \"\n f\"`{query_intent_similarity}`, while \"\n f\"threshold is `{self.label_thresholds[query_intent_id]}`.\"\n )\n logger.debug(\n f\"Top 5 intents (in ascending order) that \"\n f\"are likely here are: `{sorted_intent_scores[-5:]}`.\"\n )\n\n # If score for query intent is below threshold and\n # the query intent is not the top likely intent\n if (\n query_intent_similarity < self.label_thresholds[query_intent_id]\n and query_intent_id != highest_likely_intent_id\n ):\n logger.debug(\n f\"Intent `{query_intent}-{query_intent_id}` unlikely to occur here.\"\n )\n return True\n\n return False\n\n @staticmethod\n def _collect_label_id_grouped_scores(\n output_scores: Dict[Text, np.ndarray], label_ids: np.ndarray,\n ) -> Dict[int, Dict[Text, List[float]]]:\n \"\"\"Collects similarities predicted for each label id.\n\n For each `label_id`, we collect similarity scores across\n all trackers and categorize them into two buckets:\n 1. Similarity scores when `label_id` is the correct label.\n 2. Similarity scores when `label_id` is the wrong label.\n\n Args:\n output_scores: Model's predictions for each data point.\n label_ids: Numerical IDs of labels for each data point.\n\n Returns:\n Both buckets of similarity scores grouped by each unique label id.\n \"\"\"\n unique_label_ids = np.unique(label_ids).tolist()\n if LABEL_PAD_ID in unique_label_ids:\n unique_label_ids.remove(LABEL_PAD_ID)\n\n label_id_scores = {\n label_id: {POSITIVE_SCORES_KEY: [], NEGATIVE_SCORES_KEY: []}\n for label_id in unique_label_ids\n }\n\n for index, all_pos_labels in enumerate(label_ids):\n\n for candidate_label_id in unique_label_ids:\n if candidate_label_id in all_pos_labels:\n label_id_scores[candidate_label_id][POSITIVE_SCORES_KEY].append(\n output_scores[\"similarities\"][index, 0, candidate_label_id]\n )\n else:\n label_id_scores[candidate_label_id][NEGATIVE_SCORES_KEY].append(\n output_scores[\"similarities\"][index, 0, candidate_label_id]\n )\n\n return label_id_scores\n\n @staticmethod\n def _compute_label_quantiles(\n label_id_scores: Dict[int, Dict[Text, List[float]]]\n ) -> Dict[int, List[float]]:\n \"\"\"Computes multiple quantiles for each label id.\n\n The quantiles are computed over the negative scores\n collected for each label id. However, no quantile score\n can be greater than the minimum positive score collected\n for the corresponding label id.\n\n Args:\n label_id_scores: Scores collected for each label id\n over positive and negative trackers.\n\n Returns:\n Computed quantiles for each label id.\n \"\"\"\n label_quantiles = {}\n\n quantile_indices = [\n 1 - tolerance_value / 100.0 for tolerance_value in range(0, 100, 5)\n ]\n for label_id, prediction_scores in label_id_scores.items():\n positive_scores, negative_scores = (\n prediction_scores[POSITIVE_SCORES_KEY],\n prediction_scores[NEGATIVE_SCORES_KEY],\n )\n minimum_positive_score = min(positive_scores)\n if negative_scores:\n quantile_values = np.quantile(\n negative_scores, quantile_indices, interpolation=\"lower\"\n )\n label_quantiles[label_id] = [\n min(minimum_positive_score, value) for value in quantile_values\n ]\n else:\n label_quantiles[label_id] = [minimum_positive_score] * len(\n quantile_indices\n )\n\n return label_quantiles\n\n @staticmethod\n def _pick_thresholds(\n label_quantiles: Dict[int, List[float]], tolerance: float\n ) -> Dict[int, float]:\n \"\"\"Computes a threshold for each label id.\n\n Uses tolerance which is the percentage of negative\n trackers for which predicted score should be equal\n to or above the threshold.\n\n Args:\n label_quantiles: Quantiles computed for each label id\n tolerance: Specified tolerance value from the configuration.\n\n Returns:\n Computed thresholds\n \"\"\"\n label_thresholds = {}\n for label_id in label_quantiles:\n num_thresholds = len(label_quantiles[label_id])\n label_thresholds[label_id] = label_quantiles[label_id][\n min(int(tolerance * num_thresholds), num_thresholds - 1)\n ]\n return label_thresholds\n\n def persist_model_utilities(self, model_path: Path) -> None:\n \"\"\"Persists model's utility attributes like model weights, etc.\n\n Args:\n model_path: Path where model is to be persisted\n \"\"\"\n super().persist_model_utilities(model_path)\n io_utils.pickle_dump(\n model_path / f\"{self._metadata_filename()}.label_quantiles.pkl\",\n self.label_quantiles,\n )\n\n @classmethod\n def _load_model_utilities(cls, model_path: Path) -> Dict[Text, Any]:\n \"\"\"Loads model's utility attributes.\n\n Args:\n model_path: Path where model is to be persisted.\n \"\"\"\n model_utilties = super()._load_model_utilities(model_path)\n label_quantiles = io_utils.pickle_load(\n model_path / f\"{cls._metadata_filename()}.label_quantiles.pkl\"\n )\n model_utilties.update({\"label_quantiles\": label_quantiles})\n return model_utilties\n\n @classmethod\n def _update_loaded_params(cls, meta: Dict[Text, Any]) -> Dict[Text, Any]:\n meta = train_utils.override_defaults(cls.get_default_config(), meta)\n return meta\n\n @classmethod\n def _load_policy_with_model(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n featurizer: TrackerFeaturizer,\n model: \"IntentTED\",\n model_utilities: Dict[Text, Any],\n ) -> \"UnexpecTEDIntentPolicyGraphComponent\":\n return cls(\n config,\n model_storage,\n resource,\n execution_context,\n model=model,\n featurizer=featurizer,\n fake_features=model_utilities[\"fake_features\"],\n entity_tag_specs=model_utilities[\"entity_tag_specs\"],\n label_quantiles=model_utilities[\"label_quantiles\"],\n )\n\n\nclass IntentTED(TED):\n \"\"\"Follows TED's model architecture from https://arxiv.org/abs/1910.00486.\n\n However, it has been re-purposed to predict multiple\n labels (intents) instead of a single label (action).\n \"\"\"\n\n def _prepare_dot_product_loss(\n self, name: Text, scale_loss: bool, prefix: Text = \"loss\",\n ) -> None:\n self._tf_layers[f\"{prefix}.{name}\"] = self.dot_product_loss_layer(\n self.config[NUM_NEG],\n scale_loss,\n similarity_type=self.config[SIMILARITY_TYPE],\n )\n\n @property\n def dot_product_loss_layer(self) -> tf.keras.layers.Layer:\n \"\"\"Returns the dot-product loss layer to use.\n\n Multiple intents can be valid simultaneously, so `IntentTED` uses the\n `MultiLabelDotProductLoss`.\n\n Returns:\n The loss layer that is used by `_prepare_dot_product_loss`.\n \"\"\"\n return layers.MultiLabelDotProductLoss\n\n @staticmethod\n def _get_labels_embed(\n label_ids: tf.Tensor, all_labels_embed: tf.Tensor\n ) -> tf.Tensor:\n # instead of processing labels again, gather embeddings from\n # all_labels_embed using label ids\n\n indices = tf.cast(label_ids[:, :, 0], tf.int32)\n\n # Find padding indices. They should have a value equal to `LABEL_PAD_ID`\n padding_indices = tf.where(tf.equal(indices, LABEL_PAD_ID))\n\n # Create a tensor of values with sign opposite to `LABEL_PAD_ID` which\n # will serve as updates to original `indices`\n updates_to_indices = (\n tf.ones((tf.shape(padding_indices)[0]), dtype=tf.int32) * -1 * LABEL_PAD_ID\n )\n\n # Add the updates tensor to indices with padding.\n # So, effectively all indices with `LABEL_PAD_ID=-1`\n # become 0 because updates contain 1s.\n # This is fine because we don't change the original non-padding label\n # indices but only make the padding indices 'compatible'\n # for the `tf.gather` op below.\n indices_to_gather = tf.cast(\n tf.tensor_scatter_nd_add(indices, padding_indices, updates_to_indices),\n tf.int32,\n )\n\n labels_embed = tf.gather(all_labels_embed, indices_to_gather)\n\n return labels_embed\n\n def run_bulk_inference(self, model_data: RasaModelData) -> Dict[Text, np.ndarray]:\n \"\"\"Computes model's predictions for input data.\n\n Args:\n model_data: Data to be passed as input\n\n Returns:\n Predictions for the input data.\n \"\"\"\n self._training = False\n\n batch_size = (\n self.config[BATCH_SIZES]\n if isinstance(self.config[BATCH_SIZES], int)\n else self.config[BATCH_SIZES][0]\n )\n\n return self.run_inference(\n model_data, batch_size=batch_size, output_keys_expected=[\"similarities\"]\n )\n"
] |
[
[
"numpy.expand_dims",
"numpy.unique",
"tensorflow.shape",
"tensorflow.tensor_scatter_nd_add",
"tensorflow.equal",
"tensorflow.cast",
"numpy.quantile",
"tensorflow.gather",
"numpy.zeros"
]
] |
hongcheq/WADA
|
[
"56ce48fc26872851b615ecfba7d1aa678faa5a4c"
] |
[
"sim2_TOPO_vs_CTR_ENS_HCforcing_h2_Inteference_Modi_test/Modi_plus_macro_and_micro/Code/Q_related/15_g_term123_Amazon_mean_sfc_top_vertical_integral_time_series.py"
] |
[
"'''\nFunction: using output files under /DFS-L/DATA/pritchard/hongcheq/OLD/scratch/\nhongcheq/HCforcing_sim2_WADA_CTR_TOPO_ENSEMBLE_post-processing_h2_tapes_New_Modifications/MSE_decomp_Andes_Amazon\nMSE.nc LSE.nc DSE.nc\nDate: 2019/06/17\n'''\n\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\n\ndata_path = '/DFS-L/DATA/pritchard/hongcheq/OLD/scratch/hongcheq/\\\nHCforcing_sim2_WADA_CTR_TOPO_ENSEMBLE_post-processing_h2_tapes_New_Modifications/Qadv/'\nfile_names = ['var_vert_adv']\nname_strings = ['[-Omega*(dq/dp)]_ctr-topo','-Omega_topo*(dq/dp)_ctr-topo',\\\n'-Omega_ctr-topo*(dq/dp)_topo','-Omega_ctr-topo*(dq/dp)_ctr-topo']\n\nQadv_acc_file = 'Qt_PTEQ_Qadv'\n\ndata_vars = np.zeros((4,96)) # 4 vars x 96 hours\n\ncases = ['CTR_TOPO']\n\nfor i_case in range(len(cases)):\n for i in range(len(file_names)):\n ds = xr.open_dataset(data_path+file_names[i]+'.MF.nc', decode_times=False)\n data_vars[0,:] = ds['Amazon_mean_'+cases[i_case]]\n data_vars[1,:] = ds['Amazon_mean_term1']\n data_vars[2,:] = ds['Amazon_mean_term2']\n data_vars[3,:] = ds['Amazon_mean_term3']\n print(data_vars[i,:])\n print('==')\n\n # Plot the time series\n #fig = plt.figure()\n plt.subplot(1,1,i_case+1)\n x = np.arange(1,97,1)\n for i in range(len(data_vars[:,0])):\n plt.plot(x, data_vars[i,:], label = name_strings[i])\n\n plt.xticks(np.arange(0,101,10))\n #plt.ylim([-2.0, 5.0])\n plt.xlabel('time, hr')\n plt.ylabel('g/kg/hr')\n plt.title(cases[i_case]+', Amazon avg, sfc_top')\n plt.grid(True)\n\nplt.axhline(y=0, linewidth=1.5, color='k')\n#plt.legend(loc = 'best')\n#plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\nplt.legend(loc='lower right')\nplt.tight_layout()\nplt.show()\n#plt.savefig('./Term123_CTR_TOPO_Amazon_mean_Omega_dq_dp_decomp.png',dpi=500)\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] |
flohilde/eta_rmdp
|
[
"3ef9dad0dbbe4f8626dfe7d3a1e6a03b992e9ad1"
] |
[
"simulation.py"
] |
[
"import numpy as np\nimport parameters as param\nfrom generator import customer_generator\nfrom etd import get_all_restaurants_etd\nfrom dispatcher import Dispatcher, FakeDispatcher\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\n\ndef simulate(state_dict, spatial_distrib, nodes=None):\n r\"\"\"\n Full online simulation of the delivery process.\n\n Arguments\n ==========\n state_dict (Dict): Dictionary containing all relevant information for the simulation.\n spatial_distrib (callable): Spatial distribution to sample customer locations from.\n nodes (int): Number of nodes to simulate on in parallel.\n\n Returns\n ========\n Returns a list of simulated arrival times for a given (customer, restaurant) Tuple.\n\n \"\"\"\n # start simulation\n if nodes is None or nodes == 0:\n atd_list = []\n for i in range(param.n_simulations):\n # load copy of state\n t = state_dict[\"t\"]\n restaurants = [restaurant.full_deepcopy() for restaurant in state_dict[\"restaurants\"]]\n restaurant_dict = {}\n for restaurant in restaurants:\n restaurant_dict[restaurant.name] = restaurant\n customers = [customer.full_deepcopy() for customer in state_dict[\"customers\"]]\n customer_dict = {}\n for customer in customers:\n customer_dict[customer.name] = customer\n customer_sim = customer_dict[state_dict[\"customer\"]]\n # Repopulate vehicles\n vehicles = [vehicle.full_deepcopy(restaurant_dict=restaurant_dict,\n customer_dict=customer_dict)\n for vehicle in state_dict[\"vehicles\"]]\n dispatcher = Dispatcher(vehicles=vehicles, restaurants=restaurants)\n # create customers\n n_lunch = int(max(0, np.random.normal(loc=param.n_lunch_mu, scale=param.n_lunch_sigma)))\n n_dinner = int(max(0, np.random.normal(loc=param.n_dinner_mu, scale=param.n_dinner_sigma)))\n lunch = np.random.normal(loc=param.t_lunch_mu, scale=param.t_lunch_sigma, size=n_lunch)\n dinner = np.random.normal(loc=param.t_dinner_mu, scale=param.t_dinner_sigma, size=n_dinner)\n order_times = np.sort(np.hstack((lunch, dinner)))\n order_times = order_times[(t-1 < order_times) & (order_times < 1439)].tolist()\n # start loop\n while customer_sim.status == 0:\n # determine number of orders\n n_customer = 0\n while len(order_times) > 0 and ((t - 1) < order_times[0] < t):\n n_customer += 1\n order_times.pop(0)\n # generate customers accordingly\n new_customers = customer_generator(n_customer=n_customer,\n restaurants=restaurants,\n dispatcher=dispatcher,\n current_time=t,\n spatial_dist=spatial_distrib)\n # customers place orders\n for customer in new_customers:\n # calculate etd for every customer-restaurant tuple\n etd_dict = get_all_restaurants_etd(customer=customer, dispatcher=dispatcher,\n restaurants=customer.favorite_restaurants, current_time=t)\n # place an order and update routes accordingly\n customer.status = customer.order_restaurant(etd_dict=etd_dict, current_time=t)\n\n # Complete Actions\n # Restaurant actions\n for restaurant in restaurants:\n restaurant.check_queue(t)\n # Vehicle actions\n for vehicle in vehicles:\n vehicle.act(current_time=t)\n t += 1\n atd_list.append(customer_sim.atd)\n return atd_list\n\n if nodes is not None:\n def simulation_loop(days, seeds):\n np.random.seed(int(seeds))\n atd_list = []\n for d in days:\n # load copy of state\n t = state_dict[\"t\"]\n restaurants = [restaurant.full_deepcopy() for restaurant in state_dict[\"restaurants\"]]\n restaurant_dict = {}\n for restaurant in restaurants:\n restaurant_dict[restaurant.name] = restaurant\n customers = [customer.full_deepcopy() for customer in state_dict[\"customers\"]]\n customer_dict = {}\n for customer in customers:\n customer_dict[customer.name] = customer\n customer_sim = customer_dict[state_dict[\"customer\"]]\n # Repopulate vehicles\n vehicles = [vehicle.full_deepcopy(restaurant_dict=restaurant_dict,\n customer_dict=customer_dict)\n for vehicle in state_dict[\"vehicles\"]]\n dispatcher = Dispatcher(vehicles=vehicles, restaurants=restaurants)\n # create customers\n n_lunch = int(max(0, np.random.normal(loc=param.n_lunch_mu, scale=param.n_lunch_sigma)))\n n_dinner = int(max(0, np.random.normal(loc=param.n_dinner_mu, scale=param.n_dinner_sigma)))\n lunch = np.random.normal(loc=param.t_lunch_mu, scale=param.t_lunch_sigma, size=n_lunch)\n dinner = np.random.normal(loc=param.t_dinner_mu, scale=param.t_dinner_sigma, size=n_dinner)\n order_times = np.sort(np.hstack((lunch, dinner)))\n order_times = order_times[(t - 1 < order_times) & (order_times < 1439)].tolist()\n # start loop\n while customer_sim.status == 0:\n # determine number of orders\n n_customer = 0\n while len(order_times) > 0 and ((t - 1) < order_times[0] < t):\n n_customer += 1\n order_times.pop(0)\n # generate customers accordingly\n new_customers = customer_generator(n_customer=n_customer,\n restaurants=restaurants,\n dispatcher=dispatcher,\n current_time=t,\n spatial_dist=spatial_distrib)\n # customers place orders\n for customer in new_customers:\n # calculate etd for every customer-restaurant tuple\n etd_dict = get_all_restaurants_etd(customer=customer, dispatcher=dispatcher,\n restaurants=customer.favorite_restaurants, current_time=t)\n # place an order and update routes accordingly\n customer.status = customer.order_restaurant(etd_dict=etd_dict, current_time=t)\n\n \"\"\"Complete Actions\"\"\"\n # Restaurant actions\n for restaurant in restaurants:\n restaurant.check_queue(t)\n # Vehicle actions\n for vehicle in vehicles:\n vehicle.act(current_time=t)\n t += 1\n atd_list.append(customer_sim.atd)\n return atd_list\n\n pool = Pool(nodes=nodes)\n n = int(param.n_simulations / nodes)\n chunks = [range(k, k + n) for k in range(nodes)]\n\n # set seed\n ini_seed = int(state_dict[\"customer\"][2:])\n seed_param = list(range(ini_seed * 11111, ini_seed * 11111 + nodes))\n atd_list = pool.map(simulation_loop, chunks, seed_param)\n atd_list = [item for sublist in atd_list for item in sublist]\n return atd_list\n\n\ndef approx_simulate(state_dict, model, spatial_distribution):\n r\"\"\"\n Approximate full online simulation of the delivery process using a DNN.\n\n\n Arguments\n ==========\n state_dict (Dict): Dictionary containing all relevant information for the simulation.\n model (torch.nn): DeepInsertion model.\n spatial_distrib (callable): Spatial distribution to sample customer locations from.\n\n Returns\n ========\n Returns a list of simulated arrival times for a given (customer, restaurant) Tuple.\n\n \"\"\"\n # start simulation\n atd_list = []\n for i in range(param.n_approx_simulations):\n # load copy of state\n t = state_dict[\"t\"]\n restaurants = [restaurant.full_deepcopy() for restaurant in state_dict[\"restaurants\"]]\n restaurant_dict = {}\n for restaurant in restaurants:\n restaurant_dict[restaurant.name] = restaurant\n customers = [customer.full_deepcopy() for customer in state_dict[\"customers\"]]\n customer_dict = {}\n for customer in customers:\n customer_dict[customer.name] = customer\n customer_sim = customer_dict[state_dict[\"customer\"]]\n # Repopulate vehicles\n vehicles = [vehicle.full_deepcopy(restaurant_dict=restaurant_dict,\n customer_dict=customer_dict)\n for vehicle in state_dict[\"vehicles\"]]\n dispatcher = FakeDispatcher(vehicles=vehicles, restaurants=restaurants, model=model)\n # create customers\n n_lunch = int(max(0, np.random.normal(loc=param.n_lunch_mu, scale=param.n_lunch_sigma)))\n n_dinner = int(max(0, np.random.normal(loc=param.n_dinner_mu, scale=param.n_dinner_sigma)))\n lunch = np.random.normal(loc=param.t_lunch_mu, scale=param.t_lunch_sigma, size=n_lunch)\n dinner = np.random.normal(loc=param.t_dinner_mu, scale=param.t_dinner_sigma, size=n_dinner)\n order_times = np.sort(np.hstack((lunch, dinner)))\n order_times = order_times[(t-1 < order_times) & (order_times < 1439)].tolist()\n # start loop\n while customer_sim.status == 0:\n # determine number of orders\n n_customer = 0\n while len(order_times) > 0 and ((t - 1) < order_times[0] < t):\n n_customer += 1\n order_times.pop(0)\n # generate customers accordingly\n new_customers = customer_generator(n_customer=n_customer,\n restaurants=restaurants,\n dispatcher=dispatcher,\n current_time=t,\n spatial_dist=spatial_distribution)\n # customers place orders\n for customer in new_customers:\n # calculate etd for every customer-restaurant tuple\n etd_dict = get_all_restaurants_etd(customer=customer, dispatcher=dispatcher,\n restaurants=[customer.favorite_restaurants[0]], current_time=t)\n # place an order and update routes accordingly\n customer.status = customer.order_restaurant(etd_dict=etd_dict, current_time=t,\n ignore_time_constraint=True)\n\n \"\"\"Complete Actions\"\"\"\n # Restaurant actions\n for restaurant in restaurants:\n restaurant.check_queue(t)\n # Vehicle actions\n for vehicle in vehicles:\n vehicle.act(current_time=t)\n t += 1\n atd_list.append(customer_sim.atd)\n return atd_list\n\n\ndef scale(X, x_min=-1, x_max=1):\n r\"\"\"\n Scales the input of the DeepInsertion Model.\n\n Arguments\n ==========\n X (array): Unscaled input.\n x_min (float): Lower bound of scaled input.\n x_max (float): Upper bound of scaled input.\n\n Returns\n ========\n Array containing scaled input.\n\n \"\"\"\n X_min = np.array([0.0, 0.0, 3.52070206, 3.71164996, 0.0, 3.0, 0.0, 0.0, 3.0, -20.0,\n 0.0, 0.0, 0.0, 0.0, -20.0, 0.0, 0.0, 0.0, 0.0, -17.0, 0.0, 0.0, 0.0, 0.0,\n -19.0, 0.0, 0.0, 0.0, 0.0, -15.0, 0.0, 0.0, 0.0, 0.0, -6.0, 0.0, 0.0, 0.0, 0.0, -7.0,\n 0.0, 0.0, 0.0, 0.0, -4.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n X_max = np.array([15.35130731, 13.49346697, 12.46919309, 10.34123165, 51.0, 4.0, 15.35130731,\n 13.49346697, 35.0, 15.0, 4.0, 15.35130731, 13.49346697, 43.0, 15.0, 4.0, 15.35130731,\n 13.49346697, 32.0, 15.0, 4.0, 15.35130731, 13.49346697, 32.0, 15.0, 4.0, 15.35130731,\n 13.49346697, 34.0, 15.0, 4.0, 15.35130731, 13.49346697, 29.0, 15.0, 4.0, 15.35098815,\n 13.49346697, 28.0, 15.0, 4.0, 15.34983257, 13.41473213, 24.0, 15.0, 4.0, 15.33966073,\n 13.49346697, 21.0, 15.0, 4.0, 15.32294646, 12.45623837, 17.0, 15.0, 4.0, 15.15503898,\n 11.43302948, 14.0, 14.0, 4.0, 10.06925151, 10.76896486, 10.0, 12.0])\n\n nom = (X-X_min)*(x_max-x_min)\n denom = X_max - X_min\n denom[denom == 0] = 1\n return x_min + nom/denom\n"
] |
[
[
"numpy.hstack",
"numpy.random.normal",
"numpy.array"
]
] |
Sigel1/Mask_RCNN
|
[
"a0d3a99f9271968936ac7e687e72f6b33f5816b4"
] |
[
"mrcnn/model.py"
] |
[
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv1\")(x)\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv2\")(x)\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv3\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n \n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n"
] |
[
[
"numpy.amax",
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.image.non_max_suppression",
"tensorflow.equal",
"tensorflow.image.crop_and_resize",
"numpy.concatenate",
"numpy.max",
"tensorflow.abs",
"tensorflow.map_fn",
"numpy.any",
"tensorflow.pad",
"tensorflow.where",
"tensorflow.random_shuffle",
"numpy.where",
"tensorflow.add_n",
"numpy.divide",
"numpy.random.randint",
"tensorflow.boolean_mask",
"numpy.hstack",
"tensorflow.Variable",
"numpy.reshape",
"numpy.fliplr",
"numpy.arange",
"tensorflow.squeeze",
"numpy.stack",
"tensorflow.divide",
"tensorflow.stop_gradient",
"tensorflow.gather",
"numpy.copy",
"numpy.argmax",
"tensorflow.nn.top_k",
"tensorflow.argmax",
"numpy.zeros",
"numpy.log",
"tensorflow.gather_nd",
"tensorflow.unique",
"tensorflow.shape",
"numpy.random.choice",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.sparse_tensor_to_dense",
"numpy.delete",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.split",
"tensorflow.round",
"numpy.array",
"numpy.sum",
"tensorflow.size",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"numpy.abs",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"numpy.sort",
"numpy.ones",
"numpy.random.shuffle",
"tensorflow.log",
"numpy.broadcast_to",
"tensorflow.sqrt",
"numpy.empty",
"tensorflow.logical_and"
]
] |
crocs-muni/DiSSECT
|
[
"ecd4f5242ee32804fea0029081026c02dbaabdf6"
] |
[
"dissect/analysis/detail.py"
] |
[
"import sys\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(f\"USAGE: {sys.argv[0]} <FILE> <CURVE>\", file=sys.stderr)\n sys.exit(1)\n\n df = pd.read_csv(sys.argv[1], sep=\";\")\n curve = df[df[\"curve\"] == sys.argv[2]]\n details = []\n\n for col in df.columns[1:]:\n detail = {\n \"col\": col,\n \"mean\": df[col].mean(),\n \"median\": df[col].median(),\n \"value\": (val := curve[col].iloc[0]),\n \"mean_diff\": abs(df[col].mean() - val),\n \"median_diff\": abs(df[col].median() - val)\n }\n details.append(detail)\n\n details = sorted(details, key=lambda x: x[\"mean_diff\"], reverse=True)\n for detail in details:\n print(detail[\"col\"])\n print(f\"mean: {detail['mean']}\")\n print(f\"median: {detail['median']}\")\n print(f\"value: {detail['value']}\")\n print(f\"mean_diff: {detail['mean_diff']}\")\n print(f\"median_diff: {detail['median_diff']}\")\n print()\n\n print(\"Nearest neighbors:\")\n nbrs = NearestNeighbors(n_neighbors=10).fit(df[df.columns[1:]])\n distances, indices = nbrs.kneighbors(curve[df.columns[1:]])\n nbrs = df.iloc[indices[0], :].copy(deep=True)\n nbrs.reset_index(drop=True, inplace=True)\n nbrs[\"distance\"] = distances[0]\n for _, nbr in nbrs.iterrows():\n print(f\"{nbr['curve']}: {nbr['distance']}\")\n"
] |
[
[
"pandas.read_csv",
"sklearn.neighbors.NearestNeighbors"
]
] |
alberto139/CarND-Capstone
|
[
"4019ce28f01b0c22255e8cd28445fc18608dc735"
] |
[
"ros/src/tl_detector/light_classification/tl_classifier.py"
] |
[
"from styx_msgs.msg import TrafficLight\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nfrom collections import Counter\nimport object_class\nimport os\n\nclass TLClassifier(object):\n def __init__(self):\n model_path = os.getcwd() + \"/light_classification/frozen_inference_graph.pb\" \n self.conf_threshold = 0.5\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\n\n \n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n\n with tf.gfile.GFile(model_path, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with detection_graph.as_default():\n #self.sess = tf.Session()\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n self.tensor_dict = {}\n for key in ['num_detections','detection_boxes','detection_scores','detection_classes']:\n tensor_name = key + \":0\"\n if tensor_name in all_tensor_names:\n self.tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)\n\n self.image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n print(\"DONE with TLC INIT\")\n\n def get_classification(self, img):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #print(\"getting classification\")\n output_dict = self.sess.run(\n self.tensor_dict, feed_dict={\n self.image_tensor: np.expand_dims(img, 0)})\n output_dict['num_detections'] = int(\n output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n\n # Detection\n detections = []\n for i, element in enumerate(output_dict['detection_boxes']):\n if output_dict['detection_scores'][i] > self.conf_threshold:\n temp_object = object_class.Object(img, output_dict['detection_boxes'][i], output_dict['detection_classes'][i])\n detections.append(temp_object)\n\n\n # Get subimages for traffic lights\n detected_colors = []\n for i, tl in enumerate(detections):\n tl.subimg = tl.frame[tl.ymin:tl.ymax, tl.xmin:tl.xmax]\n #cv2.imshow(\"tl\" + str(i), tl.subimg)\n hsv = cv2.cvtColor(tl.subimg, cv2.COLOR_BGR2HSV)\n mask_green = cv2.inRange(hsv, (36, 100, 100), (70, 255,255)) # Green\n mask_yellow = cv2.inRange(hsv, (15, 180, 40), (35, 255,255)) # Yellow\n mask_red1 = cv2.inRange(hsv, (0, 70, 50), (10, 255,255)) # Red\n mask_red2 = cv2.inRange(hsv, (170, 70, 50), (180, 255,255)) # Red2\n mask_red = cv2.bitwise_or(mask_red1, mask_red2)\n\n green = (sum(sum(mask_green)))\n yellow = (sum(sum(mask_yellow)))\n red = (sum(sum(mask_red)))\n colors = [red, yellow, green]\n max_color = colors.index(max(colors))\n detected_colors.append(max_color)\n \n\n state = -1\n if detected_colors:\n #state = mode(detected_colors)\n count = Counter(detected_colors)\n \n state = count.most_common(1)[0][0]\n print(\"TL STATE: \" + str(state))\n #return state\n\n return state\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"numpy.expand_dims",
"tensorflow.gfile.GFile",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.get_default_graph",
"tensorflow.GraphDef"
]
] |
BAI-Yeqi/nuscenes-devkit
|
[
"f94dcd313feb8adc91e7d01312fb7d27cc77098e"
] |
[
"python-sdk/nuscenes/eval/panoptic/evaluate.py"
] |
[
"\"\"\"\nCode written by Motional and the Robot Learning Lab, University of Freiburg.\n\nScript to evaluate Panoptic nuScenes panoptic segmentation (PS) or panoptic tracking (PT) metrics.\nArgument \"task\" could be one of [\"segmentation\", \"tracking\"], check eval/panoptic/README.md for more details of the\ntasks. Note tracking results will be evaluated with both the PT and PS metrics.\n\nExample usage to evaluate tracking metrics.\n---------------------------------------------------------\npython python-sdk/nuscenes/eval/panoptic/evaluate.py --result_path /data/panoptic_pred_results --eval_set mini_val\n--task tracking --dataroot /data/sets/nuscenes --version v1.0-mini --out_dir /tmp/panoptic_eval_output\n---------------------------------------------------------\n\nNote, the panoptic prediction result directory should be as follows:\n└── panoptic_results_dir\n └── panoptic\n └── {test, train, val, mini_train, mini_val} <- Contains the .npz files; a .npz file contains panoptic labels\n │ of the points in a point cloud.\n └── {test, train, val, mini_train, mini_val}\n └── submission.json <- contains certain information about the submission.\n\"\"\"\nimport argparse\nimport json\nimport os\nfrom typing import Any, Dict\n\nimport numpy as np\nfrom nuscenes.eval.panoptic.panoptic_seg_evaluator import PanopticEval\nfrom nuscenes.eval.panoptic.panoptic_track_evaluator import PanopticTrackingEval\nfrom nuscenes.eval.panoptic.utils import PanopticClassMapper, get_samples_in_panoptic_eval_set\nfrom nuscenes.nuscenes import NuScenes\nfrom nuscenes.utils.data_io import load_bin_file\nfrom nuscenes.utils.splits import create_splits_scenes\nfrom tqdm import tqdm\n\n\nclass NuScenesPanopticEval:\n \"\"\"\n This is the official Panoptic nuScenes evaluation code. Results are written to the provided output_dir.\n Panoptic nuScenes uses the following metrics:\n - Panoptic Segmentation: we use the PQ (Panoptic Quality) metric: which is defined as:\n PQ = IOU/(TP + 0.5*FP + 0.5*FN).\n - Multi-object Panoptic Tracking: we use the PAT (Panoptic Tracking) metric, which is defined as:\n PAT = 2*PQ*TQ / (PQ + TQ) where TQ is as defined in the paper: \n Panoptic nuScenes: A Large-Scale Benchmark for LiDAR Panoptic Segmentation and Tracking \n (https://arxiv.org/pdf/2109.03805.pdf)\n \"\"\"\n\n def __init__(self,\n nusc: NuScenes,\n results_folder: str,\n eval_set: str,\n task: str,\n min_inst_points: int,\n out_dir: str = None,\n verbose: bool = False):\n \"\"\"\n :param nusc: A NuScenes object.\n :param results_folder: Path to the folder.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param task: What panoptic task to evaluate on, 'segmentation' for panoptic segmentation evaluation only;\n and 'tracking' for both panoptic segmentation and multi-object panoptic tracking evaluation.\n :param min_inst_points: minimal number of instance points.\n :param out_dir: Output directory. The results is saved as 'out_dir/{task}-result.json' file;\n :param verbose: Whether to print messages during the evaluation.\n \"\"\"\n assert hasattr(nusc, 'panoptic') and len(getattr(nusc, 'panoptic')) > 0,\\\n f'Error: no panoptic ground truths found in {nusc.version}'\n\n supported_tasks = ['segmentation', 'tracking']\n if task not in supported_tasks:\n raise ValueError(f'Supported task must be one of: {supported_tasks}, got: {task} !')\n\n results_npz_folder = os.path.join(results_folder, 'panoptic', eval_set)\n assert os.path.exists(results_npz_folder), \\\n f'Error: The folder containing the .npz files ({results_npz_folder}) does not exist.'\n\n self.nusc = nusc\n self.results_folder = results_folder\n self.eval_set = eval_set\n self.task = task\n self.verbose = verbose\n self.min_inst_points = min_inst_points\n self.out_dir = out_dir\n\n self.mapper = PanopticClassMapper(self.nusc)\n self.ignore_idx = self.mapper.ignore_class['index']\n self.id2name = {idx: name for name, idx in self.mapper.coarse_name_2_coarse_idx_mapping.items()}\n self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)\n\n self.things = self.mapper.things.keys()\n self.stuff = self.mapper.stuff.keys()\n self.sample_tokens = get_samples_in_panoptic_eval_set(self.nusc, self.eval_set)\n\n if self.verbose:\n print(f'There are {self.num_classes} classes, {len(self.sample_tokens)} samples.')\n\n self.evaluator = dict(segmentation=PanopticEval(n_classes=self.num_classes,\n ignore=[self.ignore_idx],\n min_points=self.min_inst_points))\n if self.task == 'tracking':\n self.scene_name2tok = {rec['name']: rec['token'] for rec in nusc.scene}\n self.evaluator['tracking'] = PanopticTrackingEval(n_classes=self.num_classes,\n min_stuff_cls_id=len(self.things) + 1,\n ignore=[self.ignore_idx],\n min_points=self.min_inst_points)\n\n self.eval_result_file = os.path.join(self.out_dir, self.task + '-result.json')\n if os.path.isfile(self.eval_result_file):\n os.remove(self.eval_result_file)\n\n def evaluate(self) -> None:\n \"\"\"\n Evaluate metrics for task. For segmentation task, only panoptic segmentation metrics will be evaluated. For\n tracking task, besides the multi-object panoptic tracking metrics, single frame based panoptic segmentation\n metrics will be evaluated as well.\n \"\"\"\n eval_results = {'segmentation': self.evaluate_segmentation()}\n if self.task == 'tracking':\n eval_results['tracking'] = self.evaluate_tracking()\n self.save_result(eval_results)\n\n def evaluate_segmentation(self) -> Dict[str, Any]:\n \"\"\"\n Calculate panoptic segmentation metrics.\n :return: A dict of panoptic metrics for mean of all classes and each class.\n {\n \"all\": { \"PQ\": float, \"SQ\": float, \"RQ\": float, \"mIoU\": float, \"PQ_dagger\": float},\n \"ignore\": { \"PQ\": float, \"SQ\": float, \"RQ\": float, \"IoU\": float},\n \"car\": { \"PQ\": float, \"SQ\": float, \"RQ\": float, \"IoU\": float},\n ...\n }\n \"\"\"\n for sample_token in tqdm(self.sample_tokens, disable=not self.verbose):\n sample = self.nusc.get('sample', sample_token)\n # Get the sample data token of the point cloud.\n sd_token = sample['data']['LIDAR_TOP']\n\n # Load the ground truth labels for the point cloud.\n panoptic_label_filename = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])\n panoptic_label = load_bin_file(panoptic_label_filename, type='panoptic')\n\n # Filter eval classes.\n label_sem = self.mapper.convert_label(panoptic_label // 1000)\n label_inst = panoptic_label\n panoptic_pred_filename = os.path.join(self.results_folder, 'panoptic', self.eval_set,\n sd_token + '_panoptic.npz')\n panoptic_pred = load_bin_file(panoptic_pred_filename, type='panoptic')\n pred_sem = panoptic_pred // 1000\n pred_inst = panoptic_pred\n\n # Get the confusion matrix between the ground truth and predictions. Update the confusion matrix for the\n # sample data into the confusion matrix for the eval set.\n self.evaluator['segmentation'].addBatch(pred_sem, pred_inst, label_sem, label_inst)\n\n mean_pq, mean_sq, mean_rq, class_all_pq, class_all_sq, class_all_rq = self.evaluator['segmentation'].getPQ()\n mean_iou, class_all_iou = self.evaluator['segmentation'].getSemIoU()\n\n results = self.wrap_result_segmentation(mean_pq, mean_sq, mean_rq, mean_iou, class_all_pq, class_all_sq,\n class_all_rq, class_all_iou)\n return results\n\n def wrap_result_segmentation(self,\n mean_pq: np.ndarray,\n mean_sq: np.ndarray,\n mean_rq: np.ndarray,\n mean_iou: np.ndarray,\n class_all_pq: np.ndarray,\n class_all_sq: np.ndarray,\n class_all_rq: np.ndarray,\n class_all_iou: np.ndarray) -> Dict[str, Any]:\n \"\"\"\n Wrap panoptic segmentation results to dict format.\n :param mean_pq: <float64: 1>, Mean Panoptic Quality over all classes.\n :param mean_sq: <float64: 1>, Mean Segmentation Quality over all classes.\n :param mean_rq: <float64: 1>, Mean Recognition Quality over all classes.\n :param mean_iou: <float64: 1>, Mean IoU score over all classes.\n :param class_all_pq: <float64: num_classes,>, Panoptic Quality for each class.\n :param class_all_sq: <float64: num_classes,> Segmentation Quality for each class.\n :param class_all_rq: <float64: num_classes,>, Recognition Quality for each class.\n :param class_all_iou: <float64: num_classes,>, IoU scores for each class.\n :return: A dict of panoptic segmentation metrics.\n \"\"\"\n mean_pq, mean_sq, mean_rq, mean_iou = mean_pq.item(), mean_sq.item(), mean_rq.item(), mean_iou.item()\n class_all_pq = class_all_pq.flatten().tolist()\n class_all_sq = class_all_sq.flatten().tolist()\n class_all_rq = class_all_rq.flatten().tolist()\n class_all_iou = class_all_iou.flatten().tolist()\n\n results = dict()\n results[\"all\"] = dict(PQ=mean_pq, SQ=mean_sq, RQ=mean_rq, mIoU=mean_iou)\n for idx, (pq, rq, sq, iou) in enumerate(zip(class_all_pq, class_all_rq, class_all_sq, class_all_iou)):\n results[self.id2name[idx]] = dict(PQ=pq, SQ=sq, RQ=rq, IoU=iou)\n thing_pq_list = [float(results[c][\"PQ\"]) for c in self.things]\n stuff_iou_list = [float(results[c][\"IoU\"]) for c in self.stuff]\n results[\"all\"][\"PQ_dagger\"] = np.mean(thing_pq_list + stuff_iou_list)\n\n return results\n\n def evaluate_tracking(self) -> Dict[str, Any]:\n \"\"\"\n Calculate multi-object panoptic tracking metrics.\n :return: A dict of panoptic metrics for mean of all classes and each class.\n {\n \"all\": { \"PAT\": float, \"PQ\": float, \"TQ\": float, PTQ\": float, \"sPTQ\": float, \"LSTQ\": float,\n \"mIoU\": float, \"S_assoc\": float, \"PTQ_dagger\": float, \"MOTSA\": float, \"sMOTSA\": float,\n \"MOTSP\": float},\n \"ignore\": { \"PTQ\": float, \"sPTQ\": float, \"IoU\": float},\n \"car\": { \"PTQ\": float, \"sPTQ\": float, \"IoU\": float},\n ...\n }\n \"\"\"\n eval_scenes = create_splits_scenes(verbose=False)[self.eval_set]\n for scene in tqdm(eval_scenes, disable=not self.verbose):\n scene = self.nusc.get('scene', self.scene_name2tok[scene])\n cur_token, last_token = scene['first_sample_token'], scene['last_sample_token']\n pred_sem, pred_inst, label_sem, label_inst = [None], [None], [None], [None]\n\n while True:\n cur_sample = self.nusc.get('sample', cur_token)\n sd_token = cur_sample['data']['LIDAR_TOP']\n\n # Load the ground truth labels for the point cloud, filter evaluation classes.\n gt_label_file = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])\n panoptic_label = load_bin_file(gt_label_file, type='panoptic')\n label_sem.append(self.mapper.convert_label(panoptic_label // 1000))\n label_sem = label_sem[-2:]\n label_inst.append(panoptic_label)\n label_inst = label_inst[-2:]\n\n # Load predictions for the point cloud, filter evaluation classes.\n pred_file = os.path.join(self.results_folder, 'panoptic', self.eval_set, sd_token + '_panoptic.npz')\n panoptic_pred = load_bin_file(pred_file, type='panoptic')\n pred_sem.append(panoptic_pred // 1000)\n pred_sem = pred_sem[-2:]\n pred_inst.append(panoptic_pred)\n pred_inst = pred_inst[-2:]\n\n # Get the confusion matrix between the ground truth and predictions. Update the confusion matrix for\n # the sample data into the confusion matrix for the eval set.\n self.evaluator['tracking'].add_batch(scene['name'], pred_sem, pred_inst, label_sem, label_inst)\n if cur_token == last_token:\n break\n cur_token = cur_sample['next']\n\n pat, mean_pq, mean_tq = self.evaluator['tracking'].get_pat()\n mean_ptq, class_all_ptq, mean_sptq, class_all_sptq = self.evaluator['tracking'].get_ptq()\n mean_iou, class_all_iou = self.evaluator['tracking'].getSemIoU()\n lstq, s_assoc = self.evaluator['tracking'].get_lstq()\n mean_motsa, mean_s_motsa, mean_motsp = self.evaluator['tracking'].get_motsa()\n\n results = self.wrap_result_mopt(pat=pat,\n mean_pq=mean_pq,\n mean_tq=mean_tq,\n mean_ptq=mean_ptq,\n class_all_ptq=class_all_ptq,\n mean_sptq=mean_sptq,\n class_all_sptq=class_all_sptq,\n mean_iou=mean_iou,\n class_all_iou=class_all_iou,\n lstq=lstq,\n s_assoc=s_assoc,\n mean_motsa=mean_motsa,\n mean_s_motsa=mean_s_motsa,\n mean_motsp=mean_motsp)\n\n return results\n\n def wrap_result_mopt(self,\n pat: np.ndarray,\n mean_pq: np.ndarray,\n mean_tq: np.ndarray,\n mean_ptq: np.ndarray,\n class_all_ptq: np.ndarray,\n mean_sptq: np.ndarray,\n class_all_sptq: np.ndarray,\n mean_iou: np.ndarray,\n class_all_iou: np.ndarray,\n lstq: np.ndarray,\n s_assoc: np.ndarray,\n mean_motsa: np.ndarray,\n mean_s_motsa: np.ndarray,\n mean_motsp: np.ndarray) -> Dict[str, Any]:\n \"\"\"\n Wrap up MOPT results to dictionary.\n :param pat: <float64: 1>, Panoptic Tracking (PAT) score over all classes.\n :param mean_pq: <float64: 1>, Mean Panoptic Quality over all classes.\n :param mean_tq: <float64: 1>, Mean Tracking Quality over all temporally unique instances.\n :param mean_ptq: <float64: 1>, Mean PTQ score over all classes.\n :param mean_sptq: <float64: 1>, Mean soft-PTQ score over all classes.\n :param mean_iou: <float64: 1>, Mean IoU score over all classes.\n :param class_all_ptq: <float64: num_classes,>, PTQ scores for each class.\n :param class_all_sptq: <float64: num_classes,>, Soft-PTQ scores for each class.\n :param class_all_iou: <float64: num_classes,>, IoU scores for each class.\n :param lstq: <float64: 1>, LiDAR Segmentation and Tracking Quality (LSTQ) score over all classes.\n :param s_assoc: <float64: 1>, Association Score over all classes.\n :param mean_motsa: <float64: 1>, Mean MOTSA score over all thing classes.\n :param mean_s_motsa: <float64: 1>, Mean sMOTSA score over all thing classes.\n :param mean_motsp: <float64: 1>, Mean MOTSP score over all thing classes.\n :return: A dict of multi-object panoptic tracking metrics.\n \"\"\"\n pat, mean_pq, mean_tq = pat.item(), mean_pq.item(), mean_tq.item()\n mean_ptq, mean_sptq, mean_iou = mean_ptq.item(), mean_sptq.item(), mean_iou.item()\n class_all_ptq = class_all_ptq.flatten().tolist()\n class_all_sptq = class_all_sptq.flatten().tolist()\n class_all_iou = class_all_iou.flatten().tolist()\n\n results = dict()\n results[\"all\"] = dict(PAT=pat, PQ=mean_pq, TQ=mean_tq, PTQ=mean_ptq, sPTQ=mean_sptq,\n LSTQ=lstq, mIoU=mean_iou, S_assoc=s_assoc, MOTSA=mean_motsa,\n sMOTSA=mean_s_motsa, MOTSP=mean_motsp)\n for idx, (ptq, sptq, iou) in enumerate(zip(class_all_ptq, class_all_sptq, class_all_iou)):\n results[self.id2name[idx]] = dict(PTQ=ptq, sPTQ=sptq, IoU=iou)\n thing_ptq_list = [float(results[c][\"PTQ\"]) for c in self.things]\n stuff_iou_list = [float(results[c][\"IoU\"]) for c in self.stuff]\n results[\"all\"][\"PTQ_dagger\"] = np.mean(thing_ptq_list + stuff_iou_list)\n\n return results\n\n def save_result(self, results: Dict[str, Dict[str, Any]]) -> None:\n \"\"\"\n Dump evaluation results to result.json\n :param results: {task_name: task_results}, evaluation results in a dictionary.\n \"\"\"\n if self.out_dir:\n os.makedirs(self.out_dir, exist_ok=True)\n with open(self.eval_result_file, 'w') as f:\n json.dump(results, f, indent=2)\n else:\n raise ValueError(f'Invalid output dir: {self.out_dir}')\n\n if self.verbose:\n print(f\"======\\nPanoptic nuScenes {self.task} evaluation for {self.eval_set}\")\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate Panoptic nuScenes results.')\n parser.add_argument('--result_path', type=str, help='The path to the results folder.')\n parser.add_argument('--eval_set', type=str, default='val',\n help='Which dataset split to evaluate on, train, val or test.')\n parser.add_argument('--task', type=str, default='segmentation',\n help='What task to evaluate, segmentation or tracking.')\n parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', help='Default nuScenes data directory.')\n parser.add_argument('--version', type=str, default='v1.0-trainval',\n help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')\n parser.add_argument('--min_inst_points', type=int, default=15,\n help='Lower bound for the number of points to be considered instance.')\n parser.add_argument('--verbose', type=bool, default=False, help='Whether to print to stdout.')\n parser.add_argument('--out_dir', type=str, default=None, help='Folder to write the panoptic labels to.')\n args = parser.parse_args()\n\n out_dir = args.out_dir if args.out_dir is not None else f'Panoptic-nuScenes-{args.version}'\n task = args.task\n # Overwrite with task from submission.json if the file exists.\n submission_file = os.path.join(args.result_path, args.eval_set, 'submission.json')\n if os.path.exists(submission_file):\n print(submission_file)\n with open(submission_file, 'r') as f:\n data = json.load(f)\n if 'meta' in data and 'task' in data['meta']:\n task = data['meta']['task']\n\n supported_tasks = ['segmentation', 'tracking']\n if task not in supported_tasks:\n raise ValueError(f'Supported task must be one of: {supported_tasks}, got: {task} !')\n\n print(f'Start {task} evaluation... \\nArguments: {args}')\n nusc = NuScenes(version=args.version, dataroot=args.dataroot, verbose=args.verbose)\n\n evaluator = NuScenesPanopticEval(nusc=nusc,\n results_folder=args.result_path,\n eval_set=args.eval_set,\n task=task,\n min_inst_points=args.min_inst_points,\n out_dir=out_dir,\n verbose=args.verbose)\n evaluator.evaluate()\n print(f'Evaluation results saved at {args.out_dir}/{task}-result.json. \\nFinished {task} evaluation.')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.mean"
]
] |
rubeea/pl_mmpose
|
[
"3b3643c66db636e8f743d0ac8f8fc14b0d5662fc"
] |
[
"mmpose/datasets/datasets/animal/animal_fly_dataset.py"
] |
[
"import os\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.core.evaluation.top_down_eval import (keypoint_auc, keypoint_epe,\n keypoint_pck_accuracy)\nfrom ...builder import DATASETS\nfrom .animal_base_dataset import AnimalBaseDataset\n\n\[email protected]_module()\nclass AnimalFlyDataset(AnimalBaseDataset):\n \"\"\"AnimalFlyDataset for animal pose estimation.\n\n `Fast animal pose estimation using deep neural networks'\n Nature methods'2019. More details can be found in the `paper\n <https://www.biorxiv.org/content/\n biorxiv/early/2018/05/25/331181.full.pdf>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Vinegar Fly keypoint indexes::\n\n 0: \"start\",\n 1: \"center\",\n 2: \"end\"\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n self.ann_info['use_different_joint_weights'] = False\n assert self.ann_info['num_joints'] == 3\n self.ann_info['joint_weights'] = \\\n np.ones((self.ann_info['num_joints'], 1), dtype=np.float32)\n\n # self.ann_info['flip_pairs'] = [[1, 2], [6, 18], [7, 19], [8, 20],\n # [9, 21], [10, 22], [11, 23], [12, 24],\n # [13, 25], [14, 26], [15, 27], [16, 28],\n # [17, 29], [30, 31]]\n\n self.dataset_name = 'fly'\n self.db = self._get_db()\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self):\n \"\"\"Load dataset.\"\"\"\n gt_db = []\n bbox_id = 0\n num_joints = self.ann_info['num_joints']\n for img_id in self.img_ids:\n\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)\n objs = self.coco.loadAnns(ann_ids)\n\n for obj in objs:\n if max(obj['keypoints']) == 0:\n continue\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n keypoints = np.array(obj['keypoints']).reshape(-1, 3)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])\n\n # the ori image is 192x192\n center, scale = self._xywh2cs(0, 0, 192, 192, 0.8)\n\n image_file = os.path.join(self.img_prefix,\n self.id2name[img_id])\n\n gt_db.append({\n 'image_file': image_file,\n 'center': center,\n 'scale': scale,\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'dataset': self.dataset_name,\n 'bbox': obj['bbox'],\n 'bbox_score': 1,\n 'bbox_id': bbox_id\n })\n bbox_id = bbox_id + 1\n gt_db = sorted(gt_db, key=lambda x: x['bbox_id'])\n\n return gt_db\n\n def _report_metric(self, res_file, metrics, pck_thr=0.2, auc_nor=30):\n \"\"\"Keypoint evaluation.\n\n Args:\n res_file (str): Json file stored prediction results.\n metrics (str | list[str]): Metric to be performed.\n Options: 'PCK', 'PCKh', 'AUC', 'EPE'.\n pck_thr (float): PCK threshold, default as 0.2.\n pckh_thr (float): PCKh threshold, default as 0.7.\n auc_nor (float): AUC normalization factor, default as 30 pixel.\n\n Returns:\n List: Evaluation results for evaluation metric.\n \"\"\"\n info_str = []\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n outputs = []\n gts = []\n masks = []\n threshold_bbox = []\n\n for pred, item in zip(preds, self.db):\n outputs.append(np.array(pred['keypoints'])[:, :-1])\n gts.append(np.array(item['joints_3d'])[:, :-1])\n masks.append((np.array(item['joints_3d_visible'])[:, 0]) > 0)\n if 'PCK' in metrics:\n bbox = np.array(item['bbox'])\n bbox_thr = np.max(bbox[2:])\n threshold_bbox.append(np.array([bbox_thr, bbox_thr]))\n\n outputs = np.array(outputs)\n gts = np.array(gts)\n masks = np.array(masks)\n threshold_bbox = np.array(threshold_bbox)\n\n if 'PCK' in metrics:\n _, pck, _ = keypoint_pck_accuracy(outputs, gts, masks, pck_thr,\n threshold_bbox)\n info_str.append(('PCK', pck))\n\n if 'AUC' in metrics:\n info_str.append(('AUC', keypoint_auc(outputs, gts, masks,\n auc_nor)))\n\n if 'EPE' in metrics:\n info_str.append(('EPE', keypoint_epe(outputs, gts, masks)))\n\n return info_str\n\n def evaluate(self, outputs, res_folder, metric='PCK', **kwargs):\n \"\"\"Evaluate Fly keypoint results. The pose prediction results will be\n saved in `${res_folder}/result_keypoints.json`.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs (list(preds, boxes, image_path, output_heatmap))\n :preds (np.ndarray[N,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n :boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n :image_paths (list[str]): For example, ['Test/source/0.jpg']\n :output_heatmap (np.ndarray[N, K, H, W]): model outpus.\n\n res_folder (str): Path of directory to save the results.\n metric (str | list[str]): Metric to be performed.\n Options: 'PCK', 'AUC', 'EPE'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'AUC', 'EPE']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n for output in outputs:\n preds = output['preds']\n boxes = output['boxes']\n image_paths = output['image_paths']\n bbox_ids = output['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n\n kpts.append({\n 'keypoints': preds[i].tolist(),\n 'center': boxes[i][0:2].tolist(),\n 'scale': boxes[i][2:4].tolist(),\n 'area': float(boxes[i][4]),\n 'score': float(boxes[i][5]),\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n return name_value\n"
] |
[
[
"numpy.minimum",
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros"
]
] |
syinari0123/tridepth
|
[
"5e7e90b537b82e731bb6beac1c8c93fc9187fee0"
] |
[
"auxiliary/evaluations.py"
] |
[
"import csv\nimport math\nimport numpy as np\nimport torch\n\nfrom auxiliary import AverageMeter\n\nworst_scores = {\n \"mse\": np.inf, \"rmse\": np.inf, \"mae\": np.inf,\n \"lg10\": np.inf, \"absrel\": np.inf,\n \"irmse\": np.inf, \"imae\": np.inf,\n \"delta1\": 0., \"delta2\": 0., \"dealta3\": 0.\n}\n\n\nclass EvalResultWriter:\n def __init__(self, csv_filename):\n self.fieldnames = ['epoch', 'mse', 'rmse', 'mae', 'lg10', 'absrel',\n 'irmse', 'imae', 'delta1', 'delta2', 'delta3', 'num_patch', 'num_vertex']\n # Prepare csv file\n self.csv_filename = csv_filename\n self._prepare_csv_file()\n\n # Average calculator (using AvgMeter())\n self.avg_calc_dic = {}\n for fname in self.fieldnames:\n self.avg_calc_dic[fname] = AverageMeter()\n\n def _prepare_csv_file(self):\n \"\"\"Prepare csv file writing csv header\n \"\"\"\n # Write fieldnames into csv header\n with open(self.csv_filename, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.fieldnames)\n writer.writeheader()\n\n def write_avg_into_csv(self, epoch):\n \"\"\"Calculate avg scores and write them into csv-file\n Return:\n final_avg_results: Calculated average scores\n \"\"\"\n # Calculate average score from AvgMeter()\n final_avg_results = {}\n for k, v in self.avg_calc_dic.items():\n final_avg_results[k] = v.avg[0]\n final_avg_results[\"epoch\"] = epoch\n\n # Write into csv file\n with open(self.csv_filename, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.fieldnames)\n writer.writerow(final_avg_results)\n\n # Reset all the averages for the next step\n self.reset()\n\n # Remove 'epoch' elements\n final_avg_results.pop(\"epoch\")\n\n return final_avg_results\n\n def reset(self):\n \"\"\"Reset all the scores in self.avg_calc_dic\n \"\"\"\n for fname in self.fieldnames:\n self.avg_calc_dic[fname].reset()\n\n def update(self, result_dic, batchsize):\n \"\"\"Add new results into self.avg_calc_dic\n \"\"\"\n # Update each AvgMeter() in avg_calc_dic\n for k, v in result_dic.items():\n self.avg_calc_dic[k].update(v, batchsize)\n\n\ndef log10(x):\n \"\"\"Convert a new tensor with the base-10 logarithm of the elements of x. \"\"\"\n return torch.log(x) / math.log(10)\n\n\ndef depth_evaluations(est_depth, gt_depth):\n \"\"\"Depthmap evaluation on general metrics\n \"\"\"\n # Prepare dictionary\n result_dic = {}\n\n # Choose valid pixel in depthmap\n valid_mask = gt_depth > 0\n est_depth = est_depth[valid_mask]\n gt_depth = gt_depth[valid_mask]\n\n # Error based metrics\n abs_diff = (est_depth - gt_depth).abs()\n result_dic[\"mse\"] = float((torch.pow(abs_diff, 2)).mean())\n result_dic[\"rmse\"] = math.sqrt(result_dic[\"mse\"])\n result_dic[\"mae\"] = float(abs_diff.mean())\n result_dic[\"lg10\"] = float((log10(est_depth) - log10(gt_depth)).abs().mean())\n result_dic[\"absrel\"] = float((abs_diff / gt_depth).mean())\n\n # Ratio based metrics\n maxRatio = torch.max(est_depth / gt_depth, gt_depth / est_depth)\n result_dic[\"delta1\"] = float((maxRatio < 1.25).float().mean())\n result_dic[\"delta2\"] = float((maxRatio < 1.25 ** 2).float().mean())\n result_dic[\"delta3\"] = float((maxRatio < 1.25 ** 3).float().mean())\n\n # Error on inverse depthmap\n inv_est_depth = 1 / est_depth\n inv_gt_depth = 1 / gt_depth\n abs_inv_diff = (inv_est_depth - inv_gt_depth).abs()\n result_dic[\"irmse\"] = math.sqrt((torch.pow(abs_inv_diff, 2)).mean())\n result_dic[\"imae\"] = float(abs_inv_diff.mean())\n\n return result_dic\n"
] |
[
[
"torch.pow",
"torch.log",
"torch.max"
]
] |
zomansud/machine-learning-specialization
|
[
"8b63eda4194241edc0c493fb74ca6834c9d0792d"
] |
[
"ml-clustering-and-retrieval/week-4/em_utilities.py"
] |
[
"from scipy.sparse import csr_matrix\nfrom scipy.sparse import spdiags\nfrom scipy.stats import multivariate_normal\nimport graphlab\nimport numpy as np\nimport sys\nimport time\nfrom copy import deepcopy\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.preprocessing import normalize\n\ndef sframe_to_scipy(x, column_name):\n '''\n Convert a dictionary column of an SFrame into a sparse matrix format where\n each (row_id, column_id, value) triple corresponds to the value of\n x[row_id][column_id], where column_id is a key in the dictionary.\n\n Example\n >>> sparse_matrix, map_key_to_index = sframe_to_scipy(sframe, column_name)\n '''\n assert x[column_name].dtype() == dict, \\\n 'The chosen column must be dict type, representing sparse data.'\n\n # Create triples of (row_id, feature_id, count).\n # 1. Add a row number.\n x = x.add_row_number()\n # 2. Stack will transform x to have a row for each unique (row, key) pair.\n x = x.stack(column_name, ['feature', 'value'])\n\n # Map words into integers using a OneHotEncoder feature transformation.\n f = graphlab.feature_engineering.OneHotEncoder(features=['feature'])\n # 1. Fit the transformer using the above data.\n f.fit(x)\n # 2. The transform takes 'feature' column and adds a new column 'feature_encoding'.\n x = f.transform(x)\n # 3. Get the feature mapping.\n mapping = f['feature_encoding']\n # 4. Get the feature id to use for each key.\n x['feature_id'] = x['encoded_features'].dict_keys().apply(lambda x: x[0])\n\n # Create numpy arrays that contain the data for the sparse matrix.\n i = np.array(x['id'])\n j = np.array(x['feature_id'])\n v = np.array(x['value'])\n width = x['id'].max() + 1\n height = x['feature_id'].max() + 1\n\n # Create a sparse matrix.\n mat = csr_matrix((v, (i, j)), shape=(width, height))\n\n return mat, mapping\n\ndef diag(array):\n n = len(array)\n return spdiags(array, 0, n, n)\n\ndef logpdf_diagonal_gaussian(x, mean, cov):\n '''\n Compute logpdf of a multivariate Gaussian distribution with diagonal covariance at a given point x.\n A multivariate Gaussian distribution with a diagonal covariance is equivalent\n to a collection of independent Gaussian random variables.\n\n x should be a sparse matrix. The logpdf will be computed for each row of x.\n mean and cov should be given as 1D numpy arrays\n mean[i] : mean of i-th variable\n cov[i] : variance of i-th variable'''\n\n n = x.shape[0]\n dim = x.shape[1]\n assert(dim == len(mean) and dim == len(cov))\n\n # multiply each i-th column of x by (1/(2*sigma_i)), where sigma_i is sqrt of variance of i-th variable.\n scaled_x = x.dot( diag(1./(2*np.sqrt(cov))) )\n # multiply each i-th entry of mean by (1/(2*sigma_i))\n scaled_mean = mean/(2*np.sqrt(cov))\n\n # sum of pairwise squared Eulidean distances gives SUM[(x_i - mean_i)^2/(2*sigma_i^2)]\n return -np.sum(np.log(np.sqrt(2*np.pi*cov))) - pairwise_distances(scaled_x, [scaled_mean], 'euclidean').flatten()**2\n\ndef log_sum_exp(x, axis):\n '''Compute the log of a sum of exponentials'''\n x_max = np.max(x, axis=axis)\n if axis == 1:\n return x_max + np.log( np.sum(np.exp(x-x_max[:,np.newaxis]), axis=1) )\n else:\n return x_max + np.log( np.sum(np.exp(x-x_max), axis=0) )\n\ndef EM_for_high_dimension(data, means, covs, weights, cov_smoothing=1e-5, maxiter=int(1e3), thresh=1e-4, verbose=False):\n # cov_smoothing: specifies the default variance assigned to absent features in a cluster.\n # If we were to assign zero variances to absent features, we would be overconfient,\n # as we hastily conclude that those featurese would NEVER appear in the cluster.\n # We'd like to leave a little bit of possibility for absent features to show up later.\n n = data.shape[0]\n dim = data.shape[1]\n mu = deepcopy(means)\n Sigma = deepcopy(covs)\n K = len(mu)\n weights = np.array(weights)\n\n ll = None\n ll_trace = []\n\n for i in range(maxiter):\n # E-step: compute responsibilities\n logresp = np.zeros((n,K))\n for k in xrange(K):\n logresp[:,k] = np.log(weights[k]) + logpdf_diagonal_gaussian(data, mu[k], Sigma[k])\n ll_new = np.sum(log_sum_exp(logresp, axis=1))\n if verbose:\n print(ll_new)\n sys.stdout.flush()\n logresp -= np.vstack(log_sum_exp(logresp, axis=1))\n resp = np.exp(logresp)\n counts = np.sum(resp, axis=0)\n\n # M-step: update weights, means, covariances\n weights = counts / np.sum(counts)\n for k in range(K):\n mu[k] = (diag(resp[:,k]).dot(data)).sum(axis=0)/counts[k]\n mu[k] = mu[k].A1\n\n Sigma[k] = diag(resp[:,k]).dot( data.multiply(data)-2*data.dot(diag(mu[k])) ).sum(axis=0) \\\n + (mu[k]**2)*counts[k]\n Sigma[k] = Sigma[k].A1 / counts[k] + cov_smoothing*np.ones(dim)\n\n # check for convergence in log-likelihood\n ll_trace.append(ll_new)\n if ll is not None and (ll_new-ll) < thresh and ll_new > -np.inf:\n ll = ll_new\n break\n else:\n ll = ll_new\n\n out = {'weights':weights,'means':mu,'covs':Sigma,'loglik':ll_trace,'resp':resp}\n\n return out\n"
] |
[
[
"sklearn.metrics.pairwise_distances",
"numpy.log",
"numpy.sqrt",
"scipy.sparse.csr_matrix",
"numpy.ones",
"scipy.sparse.spdiags",
"numpy.max",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
houzhenzhen/chainer
|
[
"642cb7470f7b3d03e3aea36aa6cf3e614309f2d9"
] |
[
"chainer/functions/array/permutate.py"
] |
[
"import numpy\nimport six\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _check_indices(indices):\n if len(indices) == 0:\n return\n # TODO(unno): Check indices without cpu\n indices = cuda.to_cpu(indices)\n for i in indices:\n if 0 <= i < len(indices):\n continue\n raise ValueError('Out of bounds index: {}'.format(i))\n sort = numpy.sort(indices)\n for s, t in six.moves.zip(sort, sort[1:]):\n if s == t:\n raise ValueError('indices contains duplicate value: {}'.format(s))\n\n\ndef _inverse_indices(indices):\n xp = cuda.get_array_module(indices)\n r = xp.empty_like(indices)\n if xp is numpy:\n for i, ind in enumerate(indices):\n r[ind] = i\n else:\n cuda.elementwise(\n 'int32 ind', 'raw int32 r',\n 'r[ind] = i',\n 'inverse_indices'\n )(indices, r)\n return r\n\n\nclass Permutate(function.Function):\n\n \"\"\"Permutate function.\"\"\"\n\n def __init__(self, axis=0, inv=False):\n self.axis = axis\n self.inv = inv\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, ind_type = in_types\n if self.axis < 0:\n type_check.expect(x_type.ndim >= -self.axis)\n else:\n type_check.expect(x_type.ndim > self.axis)\n\n type_check.expect(\n ind_type.dtype == numpy.int32,\n ind_type.ndim == 1,\n x_type.shape[self.axis] == ind_type.shape[0],\n )\n\n def _permutate(self, x, indices, inv):\n xp = cuda.get_array_module(x)\n if inv:\n indices = _inverse_indices(indices)\n\n return xp.take(x, indices, axis=self.axis)\n\n def forward(self, inputs):\n self.retain_inputs((1,))\n x, inds = inputs\n\n if chainer.is_debug():\n _check_indices(inds)\n\n return self._permutate(x, inds, self.inv),\n\n def backward(self, inputs, grads):\n inds = inputs[1]\n g = grads[0]\n return self._permutate(g, inds, not self.inv), None\n\n\ndef permutate(x, indices, axis=0, inv=False):\n \"\"\"Permutates a given variable along an axis.\n\n This function permutate ``x`` with given ``indices``.\n That means ``y[i] = x[indices[i]]`` for all ``i``.\n Note that this result is same as ``y = x.take(indices)``.\n ``indices`` must be a permutation of ``[0, 1, ..., len(x) - 1]``.\n\n When ``inv`` is ``True``, ``indices`` is treated as its inverse.\n That means ``y[indices[i]] = x[i]``.\n\n Args:\n x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \\\n :class:`cupy.ndarray`):\n Variable to permutate.\n A :math:`(s_1, s_2, ..., s_N)` -shaped float array.\n indices (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \\\n :class:`cupy.ndarray`):\n Indices to extract from the variable. A one-dimensional int array.\n axis (int): Axis that the input array is permutate along.\n inv (bool): If ``True``, ``indices`` is treated as its inverse.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. admonition:: Example\n\n >>> x = np.arange(6).reshape((3, 2)).astype('f')\n >>> x\n array([[ 0., 1.],\n [ 2., 3.],\n [ 4., 5.]], dtype=float32)\n >>> indices = np.array([2, 0, 1], 'i')\n >>> y = F.permutate(x, indices)\n >>> y.data\n array([[ 4., 5.],\n [ 0., 1.],\n [ 2., 3.]], dtype=float32)\n >>> y = F.permutate(x, indices, inv=True)\n >>> y.data\n array([[ 2., 3.],\n [ 4., 5.],\n [ 0., 1.]], dtype=float32)\n >>> indices = np.array([1, 0], 'i')\n >>> y = F.permutate(x, indices, axis=1)\n >>> y.data\n array([[ 1., 0.],\n [ 3., 2.],\n [ 5., 4.]], dtype=float32)\n\n \"\"\"\n return Permutate(axis=axis, inv=inv)(x, indices)\n"
] |
[
[
"numpy.sort"
]
] |
erfanMhi/rlpyt
|
[
"56574ea209f48075c26179c5b2f1a4676c38efdd"
] |
[
"rlpyt/ul/envs/maze.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom collections import namedtuple\nfrom rlpyt.envs.base import Env, EnvStep\nfrom rlpyt.spaces.int_box import IntBox\nfrom rlpyt.utils.quick_args import save__init__args\nfrom rlpyt.samplers.collections import TrajInfo\n\n\nEnvInfo = namedtuple(\"EnvInfo\", [\"game_score\", \"traj_done\"])\n\nclass GridHardXY(Env):\n def __init__(self, seed=np.random.randint(int(1e5))):\n #random_seed(seed)\n self.state_dim = (2,)\n self.obstacles_map = self.get_obstacles_map()\n # Spaces\n self.min_x, self.max_x, self.min_y, self.max_y = 0, 14, 0, 14\n self.goal_x, self.goal_y = 9, 9\n self.current_state = None\n H, W = self.max_x+1, self.max_y+1\n self.actions = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n self._action_space = IntBox(low=0, high=len(self.actions))\n obs_shape = (3, H, W)\n self._observation_space = IntBox(low=0, high=256, shape=obs_shape,\n dtype=\"uint8\")\n self._obs = np.zeros(shape=obs_shape, dtype=\"uint8\")\n self.timestep = 0\n\n def generate_state(self, coords):\n return np.array(coords)\n\n def info(self, key):\n return\n\n def reset(self):\n self.timestep = 0\n while True:\n rand_state = np.random.randint(low=0, high=15, size=2)\n rx, ry = rand_state\n if not int(self.obstacles_map[rx][ry]) and not (rx == self.goal_x and ry == self.goal_y):\n self.current_state = rand_state[0], rand_state[1]\n return self.generate_state(self.current_state)\n\n def step(self, a):\n dx, dy = self.actions[a]\n x, y = self.current_state\n\n nx = x + dx\n ny = y + dy\n\n nx, ny = min(max(nx, self.min_x), self.max_x), min(max(ny, self.min_y), self.max_y)\n\n if not self.obstacles_map[nx][ny]:\n x, y = nx, ny\n self.current_state = x, y\n if x == self.goal_x and y == self.goal_y:\n env_info = EnvInfo(game_score=1.0, traj_done=True)\n return EnvStep(self.generate_state([x, y]), 1.0, (True), env_info)\n elif self.timestep == 100:\n env_info = EnvInfo(game_score=0.0, traj_done=True)\n return EnvStep(self.generate_state([x, y]), 0.0, (True), env_info)\n else: \n env_info = EnvInfo(game_score=0.0, traj_done=False)\n self.timestep += 1\n return EnvStep(self.generate_state([x, y]), 0.0, (False), env_info)\n\n def get_visualization_segment(self):\n state_coords = [[x, y] for x in range(15)\n for y in range(15) if not int(self.obstacles_map[x][y])]\n states = [self.generate_state(coord) for coord in state_coords]\n goal_coords = [[9, 9], [0, 0], [14, 0], [7, 14]]\n goal_states = [self.generate_state(coord) for coord in goal_coords]\n return np.array(states), np.array(state_coords), np.array(goal_states), np.array(goal_coords)\n\n def get_obstacles_map(self):\n _map = np.zeros([15, 15])\n _map[2, 0:6] = 1.0\n _map[2, 8:] = 1.0\n _map[3, 5] = 1.0\n _map[4, 5] = 1.0\n _map[5, 2:7] = 1.0\n _map[5, 9:] = 1.0\n _map[8, 2] = 1.0\n _map[8, 5] = 1.0\n _map[8, 8:] = 1.0\n _map[9, 2] = 1.0\n _map[9, 5] = 1.0\n _map[9, 8] = 1.0\n _map[10, 2] = 1.0\n _map[10, 5] = 1.0\n _map[10, 8] = 1.0\n _map[11, 2:6] = 1.0\n _map[11, 8:12] = 1.0\n _map[12, 5] = 1.0\n _map[13, 5] = 1.0\n _map[14, 5] = 1.0\n\n return _map\n\n def get_useful(self, state=None):\n if state:\n return state\n else:\n return self.current_state\n\nclass GridHardRGB(GridHardXY):\n def __init__(self, seed=np.random.randint(int(1e5))):\n super().__init__(seed)\n\n d = len(self.obstacles_map)\n self.state_dim = (d, d, 3)\n\n \"\"\"\n # Gray-scale image\n Walls are Red\n Open spaces are Green\n Agent is Blue\n \"\"\"\n self.rgb_template = np.zeros(self.state_dim)\n for x in range(d):\n for y in range(d):\n if self.obstacles_map[x][y]:\n self.rgb_template[x][y][0] = 255.0\n else:\n self.rgb_template[x][y][1] = 255.0\n\n def generate_state(self, coords):\n state = np.copy(self.rgb_template)\n x, y = coords\n assert state[x][y][1] == 255.0 and state[x][y][2] == 0.0\n\n state[x][y][1] = 0.0 # setting the green color on\n state[x][y][2] = 255.0 # turning the blue color on\n return np.rollaxis(state, 2, 0).astype('uint8')\n\n def get_features(self, state):\n raise NotImplementedError\n\n def get_useful(self, state=None):\n blue = np.array([0., 0., 255.])\n if state is None:\n state = self.generate_state(self.current_state)\n idx = np.where(np.all(state==blue, axis=2) == True)\n coord = np.array([idx[0][0], idx[1][0]])\n return coord\n\nclass GridHardRGBGoalAll(GridHardRGB):\n def __init__(self, goal_id, seed=np.random.randint(int(1e5))):\n super().__init__(seed)\n # self.nos = (self.state_dim[0] * self.state_dim[1]) - int(np.sum(self.obstacles_map))\n self.goals = [[i, j] for i in range(self.state_dim[0]) \\\n for j in range(self.state_dim[1]) if not self.obstacles_map[i, j]]\n self.goal_x, self.goal_y = self.goals[goal_id]\n self.goal_state_idx = goal_id\n \n\n def get_goal(self):\n return self.goal_state_idx, [self.goal_x, self.goal_y]\n\n def get_goals_list(self):\n return self.goals\n\n def visualize_goal_id(self):\n ids = np.zeros((self.state_dim[0], self.state_dim[1]))\n for idx, xy in enumerate(self.goals):\n ids[xy[0], xy[1]] = idx\n plt.figure()\n plt.imshow(ids, interpolation='nearest', cmap=\"Blues\")\n for k in range(self.state_dim[0]):\n for j in range(self.state_dim[1]):\n if ids[k, j] != 0:\n plt.text(j, k, \"{:1.0f}\".format(ids[k, j]),\n ha=\"center\", va=\"center\", color=\"orange\")\n plt.show()"
] |
[
[
"numpy.rollaxis",
"matplotlib.pyplot.imshow",
"numpy.all",
"numpy.copy",
"numpy.random.randint",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
Goochaozheng/ChunkFusion
|
[
"7458a8e08886cc76cfeb87881c51e23b1d0674c3"
] |
[
"module/fusionIntegrator.py"
] |
[
"from network.utils import toSparseInput\nfrom network.fuser import Fuser\nimport open3d as o3d\nimport torch\nimport spconv\nimport numpy as np\nfrom time import time\n\nfrom .chunkManager import ChunkManager\nfrom geometry import transformPoints, pointToPixel\nfrom network import Fuser, Parser\n\n\nclass FusionIntegrator(ChunkManager):\n \"\"\"\n Reconstruction engine using the fusion network.\n \"\"\"\n\n def __init__(self, chunkSize:int, voxelResolution:float, truncation:float, minPointsPerChunk:int, meshing:bool, device, \n withFuser=True, withParser=True, parserModel=None, fuserModel=None,\n batchSize=128, padding=0, withBestScan=False, verbose=False) -> None:\n\n ChunkManager.__init__(self, chunkSize, voxelResolution, truncation, minPointsPerChunk, padding, meshing, device)\n\n self.verbose = verbose\n self.batchSize = batchSize\n self.withBestScan = withBestScan\n self.withFuser = withFuser\n self.withParser = withParser\n\n if self.withFuser:\n self.fuser = Fuser()\n self.fuser.load_state_dict(torch.load(fuserModel))\n self.fuser.eval().to(device)\n\n if self.withParser:\n self.parser = Parser()\n self.parser.load_state_dict(torch.load(parserModel))\n self.parser.eval().to(device)\n\n\n def integrateFrame(self, depthPointCloud):\n \"\"\"\n Integrate frame using fusion network.\n \"\"\"\n\n with torch.no_grad():\n\n if self.verbose:\n intersect_begin = time()\n\n chunkList, pointCount = self.getChunkFromPointCloud(depthPointCloud)\n if len(chunkList) == 0:\n print(\"- No Chunk Intersecting.\")\n return\n\n # if self.verbose:\n # print(\"- Get {} chunk intersecting. Timing: {:.06f}\".format(len(chunkList), time() - intersect_begin)) \n # allocation_begin = time()\n\n # Allocate chunks \n chunkData = self.getChunkListData(chunkList, withPad=(self.padding != 0))\n voxelPoints = chunkData[\"voxelPoints\"]\n localTSDF = chunkData[\"voxelTSDF\"]\n localWeight = chunkData[\"voxelWeight\"]\n obsCount = chunkData[\"obsCount\"]\n\n # if self.verbose:\n # print(\"- Querying {} chunks. Timing: {:.06f}\".format(len(chunkList), time() - allocation_begin)) \n # integrate_begin = time()\n\n if self.withBestScan:\n # For Debugging, Selective Update, Only use the scan with most points to infer the surface\n updateMask = pointCount > obsCount\n chunkList = chunkList[updateMask.cpu().numpy()]\n pointCount = pointCount[updateMask]\n voxelPoints = voxelPoints[updateMask]\n localTSDF = localTSDF[updateMask]\n localWeight = localWeight[updateMask]\n\n # Get Chunk TSDF\n inputTSDF = self.computeTSDF(depthPointCloud, voxelPoints, withPad=(self.padding != 0))\n\n inputTSDF = inputTSDF.unsqueeze(1)\n localTSDF = localTSDF.unsqueeze(1)\n localWeight = localWeight.unsqueeze(1)\n \n # Split input chunks as multiple batch\n numChunk = len(inputTSDF)\n numBatches = np.ceil(numChunk / self.batchSize)\n \n outputTSDF = torch.empty((0, self.chunkSize, self.chunkSize, self.chunkSize), device=self.device)\n outputWeight = torch.empty((0, self.chunkSize, self.chunkSize, self.chunkSize), device=self.device)\n\n for batchCount in np.arange(numBatches):\n\n dataBegin = int(batchCount * self.batchSize)\n dataEnd = int(min((batchCount + 1) * self.batchSize, numChunk))\n \n localTSDF_batch = localTSDF[dataBegin : dataEnd]\n localWeight_batch = localWeight[dataBegin : dataEnd]\n inputTSDF_batch = inputTSDF[dataBegin : dataEnd]\n\n inputMask = torch.abs(inputTSDF_batch) < 1\n localMask = torch.abs(localTSDF_batch) < 1\n updateMask = torch.logical_or(inputMask, localMask)\n\n # Predict\n if self.withFuser:\n # Fuse TSDF using fuserNet\n fuseTSDF_batch = self.fuser(inputTSDF_batch, localTSDF_batch)\n fuseWeight_batch = torch.ones_like(inputTSDF_batch)\n else:\n # Fuse TSDF with standard constant weight\n inputWeight_batch = torch.ones_like(inputTSDF_batch)\n fuseTSDF_batch, fuseWeight_batch = self.weightUpdate(inputTSDF_batch, inputWeight_batch, localTSDF_batch, localWeight_batch)\n\n if self.withParser:\n # Refine the TSDF with parser\n fuseTSDF_batch = self.parser(fuseTSDF_batch)\n elif self.withFuser:\n fuseTSDF_batch = spconv.ToDense()(fuseTSDF_batch) * 2 - 1\n\n # Set all invalid voxel to 1 for better numerical stability\n fuseTSDF_batch[torch.abs(fuseTSDF_batch) >= 1] = 1\n\n # Remove Padding\n fuseTSDF_batch = fuseTSDF_batch.squeeze(1)\n fuseWeight_batch = fuseWeight_batch.squeeze(1)\n if self.padding != 0:\n fuseTSDF_batch = fuseTSDF_batch[:,\n self.padding : self.padding + self.chunkSize,\n self.padding : self.padding + self.chunkSize,\n self.padding : self.padding + self.chunkSize\n ]\n\n fuseWeight_batch = fuseWeight_batch[:,\n self.padding : self.padding + self.chunkSize,\n self.padding : self.padding + self.chunkSize,\n self.padding : self.padding + self.chunkSize\n ]\n\n outputTSDF = torch.cat((outputTSDF, fuseTSDF_batch), dim=0)\n outputWeight = torch.cat((outputWeight, fuseWeight_batch), dim=0)\n\n # Update TSDF value into chunk manager\n self.setChunkListData(chunkList, outputTSDF, outputWeight, pointCount)\n\n if self.verbose:\n print(\"- Integrated {} chunks. Timing: {:.06f}\".format(len(chunkList), time() - intersect_begin)) \n # print(\"- Total time: {:.06f}\".format(time() - intersect_begin)) \n\n return chunkList\n\n\n def weightUpdate(self, inputTSDF, inputWeight, localTSDF, localWeight):\n inputMask = torch.abs(inputTSDF) < 1\n\n validInputWeight = inputWeight[inputMask]\n validLocalWeight = localWeight[inputMask]\n validNewWeight = validInputWeight + validLocalWeight\n\n localTSDF[inputMask] = (inputTSDF[inputMask] * validInputWeight + localTSDF[inputMask] * validLocalWeight) / validNewWeight\n localWeight[inputMask] = validNewWeight\n\n return localTSDF, localWeight\n\n\n def computeTSDF(self, frame, voxelPoints, withPad=True):\n \"\"\"\n Compute the TSDF value of given point cloud.\n\n :param frame: DepthPointCloud of current frame, containing depth map, intrinsics & pose.\n :param voxelPoints: 3D coordinates of all voxels. (N x 4096 x 3)\n ;param withPad: padding chunk or not\n :return TSDFValue: The TSDF value computed from input frame. (N x 4096)\n :return inputWeight: The observation mark of voxel. (N x 4096)\n \"\"\"\n\n numChunks = len(voxelPoints)\n voxelPoints = voxelPoints.reshape((-1, 3))\n voxelPoints = transformPoints(voxelPoints, frame.cameraPose.inverse())\n voxelPoints_z = voxelPoints[:, 2]\n\n voxelPixels = pointToPixel(voxelPoints, frame.intrinsics)\n voxelPixels_u = voxelPixels[:, 0]\n voxelPixels_v = voxelPixels[:, 1]\n \n # Filter out voxels points that visible in current frame\n imgHeight, imgWidth = frame.depth.shape\n pixelMask = torch.logical_and(voxelPixels_u >= 0,\n torch.logical_and(voxelPixels_u < imgWidth,\n torch.logical_and(voxelPixels_v >= 0,\n torch.logical_and(voxelPixels_v < imgHeight,\n voxelPoints_z > 0))))\n\n depthValue = torch.zeros(len(voxelPixels), dtype=torch.float32, device=self.device)\n valid_v = voxelPixels_v[pixelMask].long()\n valid_u = voxelPixels_u[pixelMask].long()\n depthValue[pixelMask] = frame.depth[valid_v, valid_u]\n\n # Compute truncated SDF value\n SDFValue = depthValue - voxelPoints_z\n voxelMask = torch.logical_and(depthValue > 0, torch.abs(SDFValue) < self.truncation) \n TSDFValue = torch.ones_like(SDFValue)\n TSDFValue[voxelMask] = SDFValue[voxelMask] / self.truncation\n\n if withPad:\n TSDFValue = TSDFValue.reshape((numChunks, self.paddedSize, self.paddedSize, self.paddedSize))\n else:\n TSDFValue = TSDFValue.reshape((numChunks, self.chunkSize, self.chunkSize, self.chunkSize))\n\n return TSDFValue\n\n\n\n def tsdfUpdate(self, frame, voxelPoints, voxelTSDF, voxelWeight):\n \"\"\"\n TSDF Updating.\n\n :param frame: DepthPointCloud of current frame, containing depth map, intrinsics & pose.\n :param voxelPoints: 3D coordinates of all voxels. (N x 3)\n :param voxelTSDF: TSDF value of each voxel. (N x 1)\n :param voxelWeight: Weight of each voxel. (N x 1)\n \"\"\"\n\n numChunk = len(voxelTSDF)\n voxelPoints = voxelPoints.reshape((-1, 3))\n voxelTSDF = voxelTSDF.reshape(-1)\n voxelWeight = voxelWeight.reshape(-1)\n\n voxelPoints = transformPoints(voxelPoints, frame.cameraPose.inverse())\n voxelPoints_z = voxelPoints[:, 2]\n\n voxelPixels = pointToPixel(voxelPoints, frame.intrinsics)\n voxelPixels_u = voxelPixels[:, 0]\n voxelPixels_v = voxelPixels[:, 1]\n \n # Filter out voxels points that visible in current frame\n imgHeight, imgWidth = frame.depth.shape\n pixelMask = torch.logical_and(voxelPixels_u >= 0,\n torch.logical_and(voxelPixels_u < imgWidth,\n torch.logical_and(voxelPixels_v >= 0,\n torch.logical_and(voxelPixels_v < imgHeight,\n voxelPoints_z > 0))))\n\n depthValue = torch.zeros(len(voxelPixels), dtype=torch.float32, device=self.device)\n valid_v = voxelPixels_v[pixelMask].long()\n valid_u = voxelPixels_u[pixelMask].long()\n depthValue[pixelMask] = frame.depth[valid_v, valid_u]\n\n # Compute truncated SDF value\n SDFValue = depthValue - voxelPoints_z\n voxelMask = torch.logical_and(depthValue > 0, torch.abs(SDFValue) < self.truncation) \n TSDFValue = SDFValue[voxelMask] / self.truncation\n\n oldTSDF = voxelTSDF[voxelMask]\n oldWeight = voxelWeight[voxelMask]\n\n # Integrate\n newWeight = oldWeight + torch.ones_like(oldWeight, dtype=torch.float32, device=self.device)\n voxelWeight[voxelMask] = newWeight\n voxelTSDF[voxelMask] = (oldWeight * oldTSDF + TSDFValue) / newWeight\n\n voxelPoints = voxelPoints.reshape((numChunk, self.paddedSize**3, 3))\n voxelTSDF = voxelTSDF.reshape((numChunk, self.paddedSize, self.paddedSize, self.paddedSize))\n voxelWeight = voxelWeight.reshape((numChunk, self.paddedSize, self.paddedSize, self.paddedSize))\n\n return voxelTSDF, voxelWeight\n\n\n\n def inverseTSDF(self, inputTSDF:torch.Tensor):\n \"\"\"\n Convert the TSDF into inverse TSDF.\n \"\"\"\n outputTSDF = torch.zeros_like(inputTSDF) \n outputTSDF[inputTSDF == 0] = 1\n outputTSDF[inputTSDF > 0] = 1 - inputTSDF[inputTSDF > 0]\n outputTSDF[inputTSDF < 0] = -1 - inputTSDF[inputTSDF < 0]\n return outputTSDF"
] |
[
[
"torch.abs",
"torch.empty",
"torch.load",
"torch.cat",
"numpy.arange",
"torch.zeros_like",
"numpy.ceil",
"torch.no_grad",
"torch.logical_and",
"torch.logical_or",
"torch.ones_like"
]
] |
LCB0B/metric
|
[
"0686ce80326b60ddde77989b82c218d94a016cd2"
] |
[
"main.py"
] |
[
"from copy import deepcopy\n# Import all the packages\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport numpy as np\nimport torch.optim as optim\nimport torch.nn.functional as f # create a dummy data\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport timeit\nfrom sklearn import metrics\nimport scipy.sparse as sparse\nimport scipy.stats as stats\nfrom sklearn import metrics\nfrom scipy.io import loadmat # this is the SciPy module that loads mat-files\nimport pandas as pd\nfrom torch_sparse import spspmm\nimport pandas as pd\nimport MDS_random_sampling\n\nstart = timeit.default_timer()\nCUDA = torch.cuda.is_available()\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# device = torch.device(\"cpu\")\n# torch.set_default_tensor_type('torch.FloatTensor')\nif device.type != 'cpu':\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n\ndef train(n_epochs, epoch_rez, sample_rate, dataset, latent_dim, save, plot):\n print(latent_dim)\n print(dataset)\n losses = np.zeros(n_epochs)\n # ROC = np.zeros(n_epoch s/ /epoch_rez)\n # PR = np.zeros(n_epoch s/ /epoch_rez)\n\n # ################################################################################################################################################################\n # ################################################################################################################################################################\n\n relation = torch.from_numpy(np.loadtxt(dataset + '/relation.csv', delimiter=\",\")).to(device)\n\n # network size\n N = len(relation)\n # sample size of blocks-> sample_size*(sample_size-1)/2 pairs\n sample_size = int(sample_rate * N)\n # Missing_data refers to dyads that are not observed, setting it to True does not consider these pairs in the likelihood\n # Missing_data should be set to False for link_prediction since we do not consider these interactions as missing but as zeros.\n # def __init__(self,sparse_i,sparse_j, input_size,latent_dim,sample_size,non_sparse_i=None,non_sparse_j=None,sparse_i_rem=None,sparse_j_rem=None):\n model = MDS_random_sampling.MDS(torch.randn(N, latent_dim), relation, N, latent_dim=latent_dim,\n sample_size=sample_size, device=device).to(device)\n\n optimizer = optim.Adam(model.parameters(), 0.01)\n\n for epoch in range(n_epochs):\n\n loss = model.MDS_likelihood(epoch=epoch) / sample_size\n losses[epoch] = loss.item()\n\n optimizer.zero_grad() # clear the gradients.\n loss.backward() # backpropagate\n optimizer.step() # update the weights\n if epoch % epoch_rez == 0:\n # roc,pr=model.link_prediction() #perfom link prediction and return auc-roc, auc-pr\n\n print('Epoch: ', epoch)\n print('Loss: ', loss.item())\n # print('ROC:',roc)\n # print('PR:',pr)\n # ROC[epoch//epoch_rez] = roc\n # PR[epoch//epoch_rez] = pr\n if save:\n # Save latent and loss\n z = model.get_latent_coord()\n z_np = z.numpy()\n z_df = pd.DataFrame(z_np)\n z_df.to_csv(f'output/{dataset}_{latent_dim}_{n_epochs}_{sample_rate}_coord.csv')\n\n np.savetxt(f'output/{dataset}_{latent_dim}_{n_epochs}_{sample_rate}_loss.csv', losses, delimiter=\",\")\n\n if plot:\n plt.figure()\n plt.plot(losses)\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.savefig(f'output/{dataset}_{latent_dim}_{n_epochs}_{sample_rate}_loss.png')\n\n plt.figure()\n plt.scatter(z_np[:,0],z_np[:,1],s=0.1)\n plt.savefig(f'output/{dataset}_{latent_dim}_{n_epochs}_{sample_rate}_scatter.png')\n return\n\n\nplt.style.use('ggplot')\ntorch.autograd.set_detect_anomaly(True)\n# cv=CV_missing_data(input_size=full_rank.shape[0],sparse_i_idx=sparse_i,sparse_j_idx=sparse_j,percentage=0.2)\n# sparse_i_rem_cv,sparse_j_rem_cv,non_sparse_i_cv,non_sparse_j_cv=cv.CV_Missing_ij()\n\nlatent_dims = [2]\ndataset = 'mnist'\nn_epochs = 100\nepoch_rez = 10\nsample_rates = [0.01, 0.1, 1]\nsave = 1\nplot = 1\n\nfor latent_dim in latent_dims:\n for sample_rate in sample_rates:\n train(n_epochs, epoch_rez, sample_rate, dataset, latent_dim, save, plot)\n"
] |
[
[
"torch.set_default_tensor_type",
"torch.autograd.set_detect_anomaly",
"matplotlib.pyplot.scatter",
"torch.randn",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"numpy.savetxt",
"matplotlib.pyplot.xlabel",
"numpy.zeros",
"matplotlib.pyplot.style.use",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
nowkim/GeNER
|
[
"5a34f4f0b32f27a85771b6d7c39ed2e71ece6784"
] |
[
"densephrases/utils/open_utils.py"
] |
[
"import os\nimport random\nimport logging\nimport json\nimport torch\nimport numpy as np\n\nfrom densephrases import MIPS\nfrom densephrases.utils.single_utils import backward_compat\nfrom densephrases.utils.squad_utils import get_question_dataloader, TrueCaser\nfrom densephrases.utils.embed_utils import get_question_results\n\nfrom transformers import (\n MODEL_MAPPING,\n AutoConfig,\n AutoTokenizer,\n AutoModel,\n)\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\ntruecase = None\n\n\ndef load_phrase_index(args, ignore_logging=False):\n # Configure paths for index serving\n phrase_dump_dir = os.path.join(args.dump_dir, args.phrase_dir)\n index_dir = os.path.join(args.dump_dir, args.index_name)\n index_path = os.path.join(index_dir, args.index_path)\n idx2id_path = os.path.join(index_dir, args.idx2id_path)\n\n # Load mips\n if 'aggregate' in args.__dict__.keys():\n logger.info(f'Aggregate: {args.aggregate}')\n mips = MIPS(\n phrase_dump_dir=phrase_dump_dir,\n index_path=index_path,\n idx2id_path=idx2id_path,\n cuda=args.cuda,\n logging_level=logging.WARNING if ignore_logging else (logging.DEBUG if args.verbose_logging else logging.INFO),\n )\n return mips\n\n\ndef load_cross_encoder(device, args):\n\n # Configure paths for cross-encoder serving\n cross_encoder = torch.load(\n os.path.join(args.load_dir, \"pytorch_model.bin\"), map_location=torch.device('cpu')\n )\n new_qd = {n[len('bert')+1:]: p for n, p in cross_encoder.items() if 'bert' in n}\n new_linear = {n[len('qa_outputs')+1:]: p for n, p in cross_encoder.items() if 'qa_outputs' in n}\n config, unused_kwargs = AutoConfig.from_pretrained(\n args.pretrained_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n return_unused_kwargs=True\n )\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.pretrained_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model = AutoModel.from_pretrained(\n args.pretrained_name_or_path,\n from_tf=bool(\".ckpt\" in args.pretrained_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model.load_state_dict(new_qd)\n qa_outputs = torch.nn.Linear(config.hidden_size, 2)\n qa_outputs.load_state_dict(new_linear)\n ce_model = torch.nn.ModuleList(\n [model, qa_outputs]\n )\n ce_model.to(device)\n\n logger.info(f'CrossEncoder loaded from {args.load_dir} having {MODEL_MAPPING[config.__class__]}')\n logger.info('Number of model parameters: {:,}'.format(sum(p.numel() for p in ce_model.parameters())))\n return ce_model, tokenizer\n\n\ndef get_query2vec(query_encoder, tokenizer, args, batch_size=64):\n device = 'cuda' if args.cuda else 'cpu'\n def query2vec(queries):\n question_dataloader, question_examples, query_features = get_question_dataloader(\n queries, tokenizer, args.max_query_length, batch_size=batch_size\n )\n question_results = get_question_results(\n question_examples, query_features, question_dataloader, device, query_encoder, batch_size=batch_size\n )\n if args.verbose_logging:\n logger.info(f\"{len(query_features)} queries: {' '.join(query_features[0].tokens_)}\")\n outs = []\n for qr_idx, question_result in enumerate(question_results):\n out = (\n question_result.start_vec.tolist(), question_result.end_vec.tolist(), query_features[qr_idx].tokens_\n )\n outs.append(out)\n return outs\n return query2vec\n\n\ndef load_qa_pairs(data_path, args, q_idx=None, draft_num_examples=100, shuffle=False):\n q_ids = []\n questions = []\n answers = []\n titles = []\n data = json.load(open(data_path))['data']\n for data_idx, item in enumerate(data):\n if q_idx is not None:\n if data_idx != q_idx:\n continue\n q_id = item['id']\n if 'origin' in item:\n q_id = item['origin'].split('.')[0] + '-' + q_id\n question = item['question']\n if '[START_ENT]' in question:\n question = question[max(question.index('[START_ENT]')-300, 0):question.index('[END_ENT]')+300]\n answer = item['answers']\n title = item.get('titles', [''])\n if len(answer) == 0:\n continue\n q_ids.append(q_id)\n questions.append(question)\n answers.append(answer)\n titles.append(title)\n questions = [query[:-1] if query.endswith('?') else query for query in questions]\n # questions = [query.lower() for query in questions] # force lower query\n\n if args.do_lower_case:\n logger.info(f'Lowercasing queries')\n questions = [query.lower() for query in questions]\n\n if shuffle:\n qa_pairs = list(zip(q_ids, questions, answers, titles))\n random.shuffle(qa_pairs)\n q_ids, questions, answers, titles = zip(*qa_pairs)\n logger.info(f'Shuffling QA pairs')\n\n if args.draft:\n q_ids = np.array(q_ids)[:draft_num_examples].tolist()\n questions = np.array(questions)[:draft_num_examples].tolist()\n answers = np.array(answers)[:draft_num_examples].tolist()\n titles = np.array(titles)[:draft_num_examples].tolist()\n\n if args.truecase:\n try:\n global truecase\n if truecase is None:\n logger.info('loading truecaser')\n truecase = TrueCaser(os.path.join(os.environ['DATA_DIR'], args.truecase_path))\n logger.info('Truecasing queries')\n questions = [truecase.get_true_case(query) if query == query.lower() else query for query in questions]\n except Exception as e:\n print(e)\n\n logger.info(f'Loading {len(questions)} questions from {data_path}')\n logger.info(f'Sample Q ({q_ids[0]}): {questions[0]}, A: {answers[0]}, Title: {titles[0]}')\n return q_ids, questions, answers, titles\n\n"
] |
[
[
"torch.device",
"torch.nn.Linear",
"torch.nn.ModuleList",
"numpy.array"
]
] |
simsong/dp-demo
|
[
"c7ae7b96a1d957413b4ede3ce3a0e39803d07f3b"
] |
[
"python/demo_bottom_up.py"
] |
[
"#!/usr/bin/env python3\n#\n# Demonstrate the bottom-up mechanism\n\nimport math\nimport random\nimport numpy\nimport sys\nimport copy\nimport statistics\nif sys.version < '3':\n raise RuntimeError(\"Requires Python 3\")\n\n\n# \n# misc support functions\n\ndef l1_error(acounts,bcounts):\n error = 0\n for key in set(list(acounts.keys()) + list(bcounts.keys())):\n error += math.fabs( acounts.get(key,0) - bcounts.get(key,0))\n return error\n \n\n# Simple implementation of geometric noise generator\n\ndef geometric(p):\n x = 1\n sum = prod = p\n q = 1.0 - p\n u = random.random()\n while (sum < u):\n prod *= q\n sum += prod\n x += 1\n return x\n\ndef geometric_noise(budget, sensitivity):\n e = budget / sensitivity\n p = 1.0 - math.exp(-e)\n x = geometric(p) - 1\n y = geometric(p) - 1\n return (x - y)\n\ndef privitize_categories(counts : dict, epsilon : float, sensitivity : int) -> dict:\n \"\"\"Apply geometric noise to each of the counts, and then optimize to make all counts equal N\"\"\"\n \n assert type(counts) == dict\n assert type(epsilon) == float\n assert type(sensitivity) == int\n\n real_total = sum( counts.values())\n\n # Add the noise to the counts and remember the noise\n pcounts = dict()\n noises = dict()\n for cat in counts.keys():\n noises[cat] = geometric_noise(epsilon,sensitivity)\n pcounts[cat] = counts[cat] +noises[cat]\n\n # If any of the values are negative, add a constant value to all\n # of the values so that none are negative. This prevents an\n # initial split of (-100,100) from being changed to (0,100) and\n # never recovering the 0 to a higher number\n\n bias = min(pcounts.values())\n if bias < 0 :\n for cat in pcounts.keys():\n pcounts[cat] -= bias\n\n # Now repeat until there is no error\n assert min(pcounts.values()) >= 0\n while True:\n error = real_total - sum( pcounts.values())\n if error==0:\n break\n cat = random.choice( list( pcounts.keys()))\n pcounts[cat] = max( pcounts[cat] + numpy.sign(error), 0 )\n\n assert min(pcounts.values()) >= 0\n return (pcounts,noises)\n \nif __name__==\"__main__\":\n import argparse\n import sys\n import time\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--debug\",action=\"store_true\",help=\"write results to STDOUT\")\n parser.add_argument(\"--epsilon\",\"-e\",type=float,default=1.0,help=\"Specify value of epsilon\")\n parser.add_argument(\"--sensitivity\",\"-s\",type=int,default=1,help=\"Specify value of sensitivity\")\n parser.add_argument(\"--integer\",\"-i\",action=\"store_true\",\n help=\"Force results to be integers (use geometric mechanism)\")\n parser.add_argument(\"--seed\",help=\"specify PRNG seed\")\n parser.add_argument(\"--repeat\",\"-r\",help=\"repeat count\",type=int,default=1)\n parser.add_argument(\"--loop\",help=\"Loop epsilon using min:max:step\")\n parser.add_argument(\"--graph\",help=\"Draw a graph of the average error, output to specified file\")\n parser.add_argument(\"count1\",help=\"category:count for the first category\")\n parser.add_argument(\"count2\",help=\"category:count for the second category\")\n parser.add_argument(\"counts\",nargs=\"*\",help=\"Additional categories ...\")\n\n args = parser.parse_args()\n seed = args.seed if args.seed else int(time.time())\n prng = numpy.random.RandomState(seed)\n \n # build the dictionary of 'category':count\n def csplit(s):\n (cat,count) = s.split(\":\")\n return (cat, int(count))\n \n counts = dict( [ csplit(args.count1), csplit(args.count2) ] + [csplit(c) for c in args.counts] )\n\n print(counts)\n if args.loop:\n (emin,emax,estep) = [float(x) for x in args.loop.split(\":\")]\n else:\n emin = args.epsilon\n emax = args.epsilon\n estep = 1\n\n epsilon = emin\n epsilon_vars = [] # perhaps we will graph (epsilon,error)\n while epsilon <= emax:\n errors = []\n for r in range( args.repeat ):\n (cpriv,noises) = privitize_categories(counts,epsilon, args.sensitivity)\n error = l1_error( counts, cpriv )\n fmt = \" \".join([\"{}:{}\".format(k,cpriv[k]) for k in sorted(cpriv.keys())])\n errors.append(error)\n if args.repeat<20:\n print(\"Run {}: ε={:.6g} Error: {} counts: {}\".format(r+1,epsilon,error,fmt))\n average_error = statistics.mean(errors)\n print(\"ε={:.6g} Average Error: {}\".format(epsilon,average_error))\n epsilon_vars.append((epsilon,average_error))\n epsilon += estep\n if args.repeat<20:\n print(\"\\n\")\n print(\"Graph:\")\n for row in epsilon_vars:\n print(\"{:.6g}, {} {}\".format(row[0],row[1],1/(row[1]+1)))\n\n \n if args.graph:\n import matplotlib.pyplot as plt\n import numpy as np\n import math\n\n (eps,err) = zip(*epsilon_vars)\n\n N = sum(counts.values())\n acc = [1-(0.5*e)/N for e in err]\n plt.plot(eps,acc)\n plt.xlabel('Privacy Loss Budget (ε)')\n plt.ylabel('accuracy')\n plt.title('Higher privacy loss results in higher accuracy')\n plt.grid(True)\n plt.savefig(args.graph)\n plt.show()\n \n\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.sign",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.random.RandomState",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
tatigabru/kaggle-lyft
|
[
"217be181b1bd2db1f5fd2707ea2cf4c2bf809736",
"217be181b1bd2db1f5fd2707ea2cf4c2bf809736"
] |
[
"src/models/pspnet.py",
"src/train_mask.py"
] |
[
"\"\"\"Pyramid Scene Parsing Network\r\nhttps://github.com/Tramac/awesome-semantic-segmentation-pytorch/blob/master/core/models/pspnet.py\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom .segbase import SegBaseModel\r\nfrom .fcn import _FCNHead\r\n\r\n__all__ = ['PSPNet', 'get_psp', 'get_psp_resnet50_voc', 'get_psp_resnet50_ade', 'get_psp_resnet101_voc',\r\n 'get_psp_resnet101_ade', 'get_psp_resnet101_citys', 'get_psp_resnet101_coco']\r\n\r\n\r\nclass PSPNet(SegBaseModel):\r\n \"\"\"Pyramid Scene Parsing Network\r\n Parameters\r\n ----------\r\n nclass : int\r\n Number of categories for the training dataset.\r\n backbone : string\r\n Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',\r\n 'resnet101' or 'resnet152').\r\n norm_layer : object\r\n Normalization layer used in backbone network (default: :class:`nn.BatchNorm`;\r\n for Synchronized Cross-GPU BachNormalization).\r\n aux : bool\r\n Auxiliary loss.\r\n Reference:\r\n Zhao, Hengshuang, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia.\r\n \"Pyramid scene parsing network.\" *CVPR*, 2017\r\n \"\"\"\r\n\r\n def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs):\r\n super(PSPNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs)\r\n self.head = _PSPHead(nclass, **kwargs)\r\n if self.aux:\r\n self.auxlayer = _FCNHead(1024, nclass, **kwargs)\r\n\r\n self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head'])\r\n\r\n def forward(self, x):\r\n size = x.size()[2:]\r\n _, _, c3, c4 = self.base_forward(x)\r\n outputs = []\r\n x = self.head(c4)\r\n x = F.interpolate(x, size, mode='bilinear', align_corners=True)\r\n outputs.append(x)\r\n\r\n if self.aux:\r\n auxout = self.auxlayer(c3)\r\n auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)\r\n outputs.append(auxout)\r\n return tuple(outputs)\r\n\r\n\r\ndef _PSP1x1Conv(in_channels, out_channels, norm_layer, norm_kwargs):\r\n return nn.Sequential(\r\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\r\n norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),\r\n nn.ReLU(True)\r\n )\r\n\r\n\r\nclass _PyramidPooling(nn.Module):\r\n def __init__(self, in_channels, **kwargs):\r\n super(_PyramidPooling, self).__init__()\r\n out_channels = int(in_channels / 4)\r\n self.avgpool1 = nn.AdaptiveAvgPool2d(1)\r\n self.avgpool2 = nn.AdaptiveAvgPool2d(2)\r\n self.avgpool3 = nn.AdaptiveAvgPool2d(3)\r\n self.avgpool4 = nn.AdaptiveAvgPool2d(6)\r\n self.conv1 = _PSP1x1Conv(in_channels, out_channels, **kwargs)\r\n self.conv2 = _PSP1x1Conv(in_channels, out_channels, **kwargs)\r\n self.conv3 = _PSP1x1Conv(in_channels, out_channels, **kwargs)\r\n self.conv4 = _PSP1x1Conv(in_channels, out_channels, **kwargs)\r\n\r\n def forward(self, x):\r\n size = x.size()[2:]\r\n feat1 = F.interpolate(self.conv1(self.avgpool1(x)), size, mode='bilinear', align_corners=True)\r\n feat2 = F.interpolate(self.conv2(self.avgpool2(x)), size, mode='bilinear', align_corners=True)\r\n feat3 = F.interpolate(self.conv3(self.avgpool3(x)), size, mode='bilinear', align_corners=True)\r\n feat4 = F.interpolate(self.conv4(self.avgpool4(x)), size, mode='bilinear', align_corners=True)\r\n return torch.cat([x, feat1, feat2, feat3, feat4], dim=1)\r\n\r\n\r\nclass _PSPHead(nn.Module):\r\n def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):\r\n super(_PSPHead, self).__init__()\r\n self.psp = _PyramidPooling(2048, norm_layer=norm_layer, norm_kwargs=norm_kwargs)\r\n self.block = nn.Sequential(\r\n nn.Conv2d(4096, 512, 3, padding=1, bias=False),\r\n norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)),\r\n nn.ReLU(True),\r\n nn.Dropout(0.1),\r\n nn.Conv2d(512, nclass, 1)\r\n )\r\n\r\n def forward(self, x):\r\n x = self.psp(x)\r\n return self.block(x)\r\n\r\n\r\ndef get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models',\r\n pretrained_base=True, **kwargs):\r\n \"\"\"Pyramid Scene Parsing Network\r\n Parameters\r\n ----------\r\n dataset : str, default pascal_voc\r\n The dataset that model pretrained on. (pascal_voc, ade20k)\r\n pretrained : bool or str\r\n Boolean value controls whether to load the default pretrained weights for model.\r\n String value represents the hashtag for a certain version of pretrained weights.\r\n root : str, default '~/.torch/models'\r\n Location for keeping the model parameters.\r\n pretrained_base : bool or str, default True\r\n This will load pretrained backbone network, that was trained on ImageNet.\r\n Examples\r\n --------\r\n >>> model = get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False)\r\n >>> print(model)\r\n \"\"\"\r\n acronyms = {\r\n 'pascal_voc': 'pascal_voc',\r\n 'pascal_aug': 'pascal_aug',\r\n 'ade20k': 'ade',\r\n 'coco': 'coco',\r\n 'citys': 'citys',\r\n }\r\n from ..data.dataloader import datasets\r\n model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs)\r\n if pretrained:\r\n from .model_store import get_model_file\r\n device = torch.device(kwargs['local_rank'])\r\n model.load_state_dict(torch.load(get_model_file('psp_%s_%s' % (backbone, acronyms[dataset]), root=root),\r\n map_location=device))\r\n return model\r\n\r\n\r\ndef get_psp_resnet50_voc(**kwargs):\r\n return get_psp('pascal_voc', 'resnet50', **kwargs)\r\n\r\n\r\ndef get_psp_resnet50_ade(**kwargs):\r\n return get_psp('ade20k', 'resnet50', **kwargs)\r\n\r\n\r\ndef get_psp_resnet101_voc(**kwargs):\r\n return get_psp('pascal_voc', 'resnet101', **kwargs)\r\n\r\n\r\ndef get_psp_resnet101_ade(**kwargs):\r\n return get_psp('ade20k', 'resnet101', **kwargs)\r\n\r\n\r\ndef get_psp_resnet101_citys(**kwargs):\r\n return get_psp('citys', 'resnet101', **kwargs)\r\n\r\n\r\ndef get_psp_resnet101_coco(**kwargs):\r\n return get_psp('coco', 'resnet101', **kwargs)\r\n\r\n\r\nif __name__ == '__main__':\r\n model = get_psp_resnet50_voc()\r\n img = torch.randn(4, 3, 480, 480)\r\n output = model(img)",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nAn Instance segmentation model for Lyft Dataset\r\nIn our case, we want to fine-tune from a pre-trained on coco \r\nMask-RCNN model on our dataset. \r\n\r\n\"\"\"\r\nimport argparse\r\nimport collections\r\nimport datetime\r\nimport glob\r\nimport os\r\nimport pickle\r\nimport random\r\nimport sys\r\nimport time\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image, ImageFile\r\nfrom skimage.color import label2rgb\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom tqdm import tqdm\r\n\r\nimport albumentations as A\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.utils.data\r\nimport torchvision\r\n# my imports\r\nfrom coco_helpers.my_engine import evaluate, train_one_epoch\r\nfrom coco_helpers.utils import collate_fn\r\nfrom configs import (BEV_SHAPE, DATA_ROOT, IMG_SIZE, NUM_CLASSES, ON_SERVER,\r\n OUTPUT_ROOT, PROJECT_ROOT)\r\nfrom datasets.bev_dataset_coco import BevDatasetAugs\r\nfrom datasets.transforms import (D4_transforms, augment_and_show,\r\n train_transforms, valid_transforms,\r\n visualize_bbox)\r\n# lyft SDK imports\r\nfrom lyft_dataset_sdk.lyftdataset import LyftDataset\r\nfrom lyft_dataset_sdk.utils.data_classes import (Box, LidarPointCloud,\r\n Quaternion)\r\nfrom lyft_dataset_sdk.utils.geometry_utils import transform_matrix, view_points\r\nfrom models.models import get_maskrcnn_model\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\r\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\r\nfrom utilities.utils import set_seed\r\n\r\nNUM_CLASSES = NUM_CLASSES + 1 # + 1 for background\r\nSAVE_PATH = OUTPUT_ROOT + '/maskrcnn'\r\n\r\n\r\ndef load_model_optim(model, optimizer, checkpoint_path: str):\r\n \"\"\"Loads model weigths, optimizer, epoch to continuer training\r\n \"\"\"\r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))\r\n\r\n\r\ndef train(model, model_name: str, data_folder: str, level5data, \r\n fold: int, debug=False, img_size=IMG_SIZE, bev_shape=BEV_SHAPE,\r\n epochs=15, batch_size = 8, num_workers=4, learning_rate=1e-3, resume_weights='', resume_epoch=0):\r\n \"\"\"\r\n Model training\r\n \r\n Input: \r\n model : PyTorch model\r\n model_name : string name for model for checkpoints saving\r\n fold: evaluation fold number, 0-3\r\n debug: if True, runs the debugging on few images \r\n img_size: size of images for training (for pregressive learning)\r\n epochs: number of epochs to train\r\n batch_size: number of images in batch\r\n num_workers: number of workers available\r\n resume_weights: directory with weights to resume (if avaialable)\r\n resume_epoch: number of epoch to continue training \r\n \"\"\"\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n print(f'device: {device}')\r\n\r\n # weight loss for the 0 class lower to account for (some of) the big class imbalance\r\n class_weights = torch.from_numpy(np.array([0.2] + [1.0]*NUM_CLASSES, dtype=np.float32))\r\n class_weights = class_weights.to(device)\r\n\r\n #creates directories for checkpoints, tensorboard and predicitons\r\n checkpoints_dir = f'{OUTPUT_ROOT}/checkpoints/{model_name}_fold_{fold}'\r\n history_dir = f'{OUTPUT_ROOT}/history/{model_name}_fold_{fold}'\r\n predictions_dir = f'{OUTPUT_ROOT}/oof/{model_name}_fold_{fold}'\r\n os.makedirs(checkpoints_dir, exist_ok=True)\r\n os.makedirs(history_dir, exist_ok=True)\r\n os.makedirs(predictions_dir, exist_ok=True)\r\n print('\\n', model_name, '\\n')\r\n\r\n # BEV conversion parameters \r\n voxel_size = (0.2, 0.2, 1.5)\r\n z_offset = -2.0 \r\n\r\n # choose inputs/targets\r\n input_filepaths = sorted(glob.glob(os.path.join(data_folder, \"*_input.png\")))\r\n sample_tokens = [x.split(\"/\")[-1].replace(\"_input.png\",\"\") for x in input_filepaths]\r\n sample_tokens = [x.replace(\"bev_data\\\\\",\"\") for x in sample_tokens] \r\n\r\n # train samples\r\n df = pd.read_csv('folds/train_samples.csv')\r\n train_df = df[df['samples'].isin(sample_tokens)]\r\n print('train samples: ', train_df.head())\r\n\r\n # validation samples\r\n df = pd.read_csv('folds/val_samples.csv')\r\n valid_df = df[df['samples'].isin(sample_tokens)]\r\n print('valid samples: ', valid_df.head())\r\n \r\n # load weights to continue training\r\n if resume_weights != '':\r\n print('Load model from: {}'.format(resume_weights))\r\n checkpoint = torch.load(resume_weights)\r\n model.load_state_dict(checkpoint['model'])\r\n resume_epoch = checkpoint['epoch']+1 \t\r\n \r\n model.to(device)\r\n\r\n # optimizer and schedulers \r\n print(f'learning_rate: {learning_rate}')\r\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n #optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.0005)\r\n #lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2, verbose=True, factor=0.2)\r\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.2) \r\n\r\n # datasets for train and validation \r\n train_dataset = BevDatasetAugs(fold=fold, df=train_df, \r\n level5data = level5data,\r\n debug=debug, \r\n img_size=bev_shape[0], \r\n input_dir=data_folder, \r\n transforms = train_transforms, \r\n bev_shape = bev_shape,\r\n voxel_size = voxel_size, \r\n z_offset = z_offset)\r\n\r\n valid_dataset = BevDatasetAugs(fold=fold, df=valid_df, \r\n level5data = level5data,\r\n debug=debug, \r\n img_size=bev_shape[0], \r\n input_dir=data_folder, \r\n # transforms = valid_transforms, \r\n bev_shape = bev_shape,\r\n voxel_size = voxel_size, \r\n z_offset = z_offset)\r\n\r\n # dataloaders for train and validation\r\n train_loader = DataLoader(train_dataset, \r\n num_workers=num_workers,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n collate_fn=collate_fn)\r\n\r\n valid_loader = DataLoader(valid_dataset,\r\n num_workers=num_workers,\r\n batch_size=1,\r\n shuffle=False,\r\n collate_fn=collate_fn)\r\n\r\n print('{} training images, {} validation images'.format(len(train_dataset), len(valid_dataset)))\r\n\r\n # training cycle \r\n print(\"Start training\")\r\n start_time = time.time()\r\n for epoch in range(epochs): \r\n # train for one epoch, printing every 10 iterations\r\n train_one_epoch(model, optimizer, None, train_loader, device, epoch, print_freq=10, warmup = True)\r\n #lr_scheduler.step()\r\n\r\n # save model after every epoch\r\n torch.save({\r\n 'model': model.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n #'lr_scheduler': scheduler.state_dict(),\r\n 'epoch': epoch, \r\n }, os.path.join(checkpoints_dir, f'{model_name}_fold_{fold}_epoch_{epoch}.pth')) \r\n \t \r\n # validate model after every epoch\r\n #evaluate(model, valid_loader, device=device)\r\n #valid_loss = validate(model, valid_loader, class_weights,\r\n # epoch, predictions_dir, save_oof = True)\r\n \r\n \r\n total_time = time.time() - start_time\r\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\r\n print('Training time {}'.format(total_time_str))\r\n \r\n return model\r\n\r\n\r\ndef validate(model, dataloader_valid, class_weights, epoch: int, \r\n predictions_dir: str, save_oof=True):\r\n \"\"\"\r\n Validate model at the epoch end \r\n \r\n Input: \r\n model: current model \r\n dataloader_valid: dataloader for the validation fold\r\n device: CUDA or CPU\r\n epoch: current epoch\r\n save_oof: boolean flag, if calculate oof predictions and save them in pickle \r\n save_oof_numpy: boolean flag, if save oof predictions in numpy \r\n predictions_dir: directory fro saving predictions\r\n Output:\r\n loss_valid: total validation loss, history \r\n \"\"\"\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n print(device)\r\n \r\n with torch.no_grad():\r\n model.eval()\r\n val_losses = []\r\n progress_bar = tqdm(dataloader_valid, total=len(dataloader_valid))\r\n\r\n for iter_num, (img, target) in enumerate(progress_bar):\r\n img = img.to(device) # [N, 3, H, W]\r\n target = target.to(device) # [N, H, W] with class indices (0, 1)\r\n prediction = model(img) # [N, 2, H, W]\r\n loss = F.cross_entropy(prediction, target, weight=class_weights)\r\n val_losses.append(loss.detach().cpu().numpy()) \r\n print(\"Epoch {}, Valid Loss: {}\".format(epoch, np.mean(val_losses)))\r\n \r\n return np.mean(val_losses) \r\n\r\n\r\ndef predict(model, dataset_test): \r\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\r\n img, _ = dataset_test[0]\r\n model.eval()\r\n with torch.no_grad():\r\n prediction = model([img.to(device)]) \r\n return prediction \r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser() \r\n arg = parser.add_argument \r\n arg('--model_name', type=str, default='mask_512', help='String model name from models dictionary')\r\n arg('--seed', type=int, default=1234, help='Random seed')\r\n arg('--fold', type=int, default=0, help='Validation fold')\r\n arg('--weights_dir', type=str, default='', help='Directory for loading model weights')\r\n arg('--epochs', type=int, default=12, help='Current epoch')\r\n arg('--lr', type=float, default=1e-3, help='Initial learning rate')\r\n arg('--debug', type=bool, default=False, help='If the debugging mode')\r\n args = parser.parse_args() \r\n set_seed(args.seed)\r\n\r\n # get data\r\n if ON_SERVER:\r\n level5data = LyftDataset(data_path = '.', json_path='../../input/train_data', verbose=True) # server\r\n else:\r\n level5data = LyftDataset(data_path = '../input/', json_path='../input/train_data', verbose=True) # local laptop\r\n \r\n classes = [\"car\", \"motorcycle\", \"bus\", \"bicycle\", \"truck\", \"pedestrian\", \"other_vehicle\", \"animal\", \"emergency_vehicle\"]\r\n \r\n # \"bev\" folder\r\n data_folder = os.path.join(OUTPUT_ROOT, \"bev_data\")\r\n\r\n # choose model\r\n model = get_maskrcnn_model(NUM_CLASSES) \r\n \r\n checkpoint= f'{OUTPUT_ROOT}/checkpoints/'\r\n\r\n train(model, model_name='mask_512', data_folder=data_folder, \r\n level5data = level5data, \r\n fold=args.fold, debug=args.debug, img_size=IMG_SIZE, bev_shape=BEV_SHAPE,\r\n epochs=args.epochs, batch_size=16, num_workers=4, \r\n learning_rate = args.lr, resume_weights=args.weights_dir, resume_epoch=0)\r\n\r\n \r\nif __name__ == '__main__': \r\n main() \r\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.device",
"torch.nn.ReLU"
],
[
"pandas.read_csv",
"torch.load",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"numpy.array",
"torch.optim.lr_scheduler.StepLR"
]
] |
hooloovooblu/satisfactory-mesh-spawner
|
[
"d824d13d591af1e7263d8c34389235c70fc0feeb"
] |
[
"mesh_spawner.py"
] |
[
"import numpy as np\r\nimport json\r\nfrom scipy.spatial.transform import Rotation as R\r\nfrom numpy.linalg import norm\r\nimport trimesh\r\nimport time\r\nimport subprocess\r\n\r\n\r\n# attach to logger so trimesh messages will be printed to console\r\n#trimesh.util.attach_to_log()\r\n\r\nitem_counts = {}\r\n\r\ndef find_item_id_offset(save_json):\r\n max_id = None\r\n for actor in save_json[\"actors\"]:\r\n if actor[\"className\"] == \"/Game/FactoryGame/Resource/BP_ItemPickup_Spawnable.BP_ItemPickup_Spawnable_C\":\r\n actor_id = int(actor[\"pathName\"].split(\"_\")[-1])\r\n if not max_id or actor_id > max_id:\r\n max_id = actor_id\r\n return max_id\r\n \r\n\r\ndef normal_vector_to_quat(axis_vector):\r\n try:\r\n up_vector = np.array([0,0,1])\r\n right_vector = np.cross(axis_vector, up_vector)\r\n right_vector = right_vector / norm(right_vector)\r\n angle = -1.0 * np.arccos(np.dot(axis_vector, up_vector))\r\n q = R.from_quat(np.array([right_vector[0], right_vector[1], right_vector[2], angle]))\r\n return q.as_quat()\r\n except:\r\n return R.identity().as_quat()\r\n\r\nclass PointWriter:\r\n def __init__(self,\r\n save_json,\r\n material,\r\n item_tmpl_path=\"dropped_item.json\"):\r\n self.save_json = save_json\r\n self.material = material\r\n with open(item_tmpl_path, 'rb') as f:\r\n self.item_tmpl = json.load(f)\r\n self.item_tmpl[\"entity\"][\"properties\"][0][\"value\"][\"properties\"][0][\"value\"][\"itemName\"] = material\r\n\r\n def write_point(self, point3d, item_id, rotationQuat):\r\n item = json.loads(json.dumps(self.item_tmpl))\r\n if rotationQuat is not None:\r\n item[\"transform\"][\"rotation\"] = [v.item() for v in rotationQuat]\r\n item[\"transform\"][\"translation\"] = [v.item() for v in point3d]\r\n item[\"pathName\"] = \"Persistent_Level:PersistentLevel.BP_ItemPickup_Spawnable_C_\" + str(item_id)\r\n save_json[\"actors\"].append(item)\r\n if self.material not in item_counts:\r\n item_counts[self.material] = 0\r\n item_counts[self.material] += 1\r\n\r\nclass MeshWriter(object):\r\n def __init__(self, image_paths, image_materials, save_json, points_method, translation = np.array([0.,0.,0.]), scale = 1.0, rotation=np.array([0,0,0])):\r\n self.point_writers = [PointWriter(save_json, material) for material in image_materials]\r\n self.meshes = [trimesh.load(image_path) for image_path in image_paths]\r\n tx = trimesh.transformations.compose_matrix(scale=np.array([scale]*3), translate=translation, angles=rotation)\r\n for mesh in self.meshes:\r\n mesh.apply_transform(tx)\r\n mesh.fix_normals(multibody=True)\r\n \r\n self.curr_id = find_item_id_offset(save_json) + 1\r\n self.points_method = points_method\r\n \r\n def write_meshes(self):\r\n for writer, (points, rotations) in zip(self.point_writers, self.points_method(self.meshes)):\r\n for point, rotation in zip(points, rotations):\r\n writer.write_point(point, self.curr_id, rotation)\r\n self.curr_id += 1\r\n \r\n\r\n\r\n# method 1: use the center of all mesh triangles to position points for items\r\ndef centroids(meshes):\r\n for mesh in meshes:\r\n yield mesh.triangles.mean(1), np.array([normal_vector_to_quat(v) for v in mesh.face_normals]) \r\n\r\n# roughly how many items will be spawned\r\nMAX_SAMPLES = 200000\r\nmax_per_mesh = 80000\r\n# method 2: sample the surface of the mesh weighted by mesh area\r\ndef samples(meshes):\r\n total_area = sum(mesh.area for mesh in meshes)\r\n weights = [mesh.area / total_area for mesh in meshes]\r\n for mesh, weight in zip(meshes, weights):\r\n requested_samples = min(int(MAX_SAMPLES*weight), max_per_mesh)\r\n points, faces = trimesh.sample.sample_surface_even(mesh, requested_samples)\r\n quats = np.array([normal_vector_to_quat(mesh.face_normals[i]) for i in faces])\r\n yield points, quats\r\n\r\n\r\n# Pick a point on the map where you want to spawn things in\r\ntranslation = np.array([-183178.09375, -71177.40625, 24282.08203125 + 10000])\r\n# Experiment with this number for your models until it looks good\r\nscale = 200\r\n\r\n# locations of your models, one per material type\r\n# you can find the item name / blueprint path for most items on the wiki\r\nteapot_paths = [\r\n # https://en.wikipedia.org/wiki/Utah_teapot#/media/File:Utah_teapot_(solid).stl\r\n 'teapot.stl',\r\n]\r\nteapot_item_names = [\r\n \"/Game/FactoryGame/Resource/Parts/IronPlate/Desc_IronPlate.Desc_IronPlate_C\"\r\n]\r\n\r\n# unrelated, pls ignore\r\ndef tame_doggos(save_json):\r\n for actor in save_json[\"actors\"]:\r\n if actor[\"className\"] == \"/Game/FactoryGame/Character/Creature/Wildlife/SpaceRabbit/Char_SpaceRabbit.Char_SpaceRabbit_C\":\r\n actor[\"entity\"][\"properties\"].append(json.loads(\"\"\" {\r\n \"name\": \"mFriendActor\",\r\n \"type\": \"ObjectProperty\",\r\n \"index\": 0,\r\n \"value\": {\r\n \"levelName\": \"Persistent_Level\",\r\n \"pathName\": \"Persistent_Level:PersistentLevel.Char_Player_C_3\"\r\n }\r\n }\"\"\"))\r\n\r\n# prepare a save with https://github.com/ficsit-felix/satisfactory-json sav2json.js\r\n# or with https://ficsit-felix.netlify.app/#/ (More -> Export json)\r\njson_save_path = \"debug.json\"\r\nwith open(json_save_path, 'rb') as g:\r\n print(\"Loading json\")\r\n save_json = json.load(g)\r\n #tame_doggos(save_json)\r\n print(\"Loading meshes\")\r\n mw = MeshWriter(\r\n teapot_paths,\r\n teapot_item_names,\r\n save_json, samples,\r\n translation,\r\n scale,\r\n # euler xyz degrees\r\n rotation=np.array([0.,0.,0.]))\r\n print(\"Writing meshes\")\r\n mw.write_meshes()\r\n\r\n \r\njson_save_out_path = \"debug_img.json\"\r\nwith open(json_save_out_path, 'w') as f:\r\n # set the save time to now to bump to the top of the load list\r\n save_json[\"saveDateTime\"] = str(time.time_ns())\r\n print(\"Dumping json\")\r\n json.dump(obj=save_json, fp=f, indent=\" \")\r\n\r\n# convert back to a save with https://github.com/ficsit-felix/satisfactory-json json2sav.js\r\n# or with https://ficsit-felix.netlify.app/#/open/json\r\nprint(\"Writing save\")\r\nprint(subprocess.run(args=[\r\n \"node\",\r\n \"json2sav.js\",\r\n \"debug_img.json\",\r\n \"debug_img.sav\"],\r\n check=False, capture_output=True))\r\n\r\nfor item in item_counts:\r\n print(item, item_counts[item])\r\n"
] |
[
[
"numpy.dot",
"numpy.linalg.norm",
"scipy.spatial.transform.Rotation.identity",
"numpy.cross",
"numpy.array"
]
] |
prmiles/mcmcplotly
|
[
"270112813d6e59ac5d6329d050ed2eb95144e30c"
] |
[
"test/general_functions.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 29 15:35:08 2018\n\n@author: prmiles\n\"\"\"\nimport numpy as np\nimport os\n\n\ndef removekey(d, key):\n r = dict(d)\n del r[key]\n return r\n\n\n# define test model function\ndef modelfun(xdata, theta):\n m = theta[0]\n b = theta[1]\n nrow = xdata.shape[0]\n y = np.zeros([nrow, 1])\n y[:, 0] = m*xdata.reshape(nrow,) + b\n return y\n\n\ndef ssfun(theta, data, local=None):\n xdata = data.xdata[0]\n ydata = data.ydata[0]\n # eval model\n ymodel = modelfun(xdata, theta)\n # calc sos\n ss = sum((ymodel[:, 0] - ydata[:, 0])**2)\n return ss\n\n\n# define test model function\ndef predmodelfun(data, theta):\n m = theta[0]\n b = theta[1]\n nrow = data.xdata[0].shape[0]\n y = np.zeros([nrow, 1])\n y[:, 0] = m*data.xdata[0].reshape(nrow,) + b\n return y\n\n\ndef setup_pseudo_results():\n results = {\n 'chain': np.random.random_sample(size=(100, 2)),\n 's2chain': np.random.random_sample(size=(100, 1)),\n 'sschain': np.random.random_sample(size=(100, 1)),\n 'parind': np.array([[0, 1]]),\n 'local': np.array([[0, 0]]),\n 'model_settings': {'nbatch': np.random.random_sample(\n size=(100, 1))},\n 'theta': np.random.random_sample(size=(2, )),\n 'sstype': np.random.random_sample(size=(1, 1)),\n }\n return results\n\n\ndef setup_pseudo_ci():\n ci = []\n ci1 = []\n ci1.append([np.random.random_sample(size=(100,)),\n np.random.random_sample(size=(100,)),\n np.random.random_sample(size=(100,))])\n ci.append(ci1)\n return ci\n\n\ndef generate_temp_folder():\n tmpfolder = 'temp0'\n count = 0\n flag = True\n while flag is True:\n if os.path.isdir(str('{}'.format(tmpfolder))):\n count += 1\n tmpfolder = str('{}{}'.format('temp', count))\n else:\n flag = False\n return tmpfolder\n\n\ndef generate_temp_file(extension='h5'):\n tmpfile = str('temp0.{}'.format(extension))\n count = 0\n flag = True\n while flag is True:\n if os.path.isfile(str('{}'.format(tmpfile))):\n count += 1\n tmpfile = str('{}{}.{}'.format('temp', count, extension))\n else:\n flag = False\n return tmpfile\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.random.random_sample"
]
] |
dstushar7/easy-tts
|
[
"da28192b7f117d9c466b594b22468cb4de994a05"
] |
[
"listen.py"
] |
[
"# -*- coding: utf-8 -*-\nimport os\nfrom os.path import isdir, join\nfrom pathlib import Path\nimport pandas as pd\n\n# Math\nimport numpy as np\nfrom scipy.fftpack import fft\nfrom scipy import signal\nfrom scipy.io import wavfile\nimport librosa\n\nfrom sklearn.decomposition import PCA\n\n# Visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport IPython.display as ipd\nimport librosa.display\n\nimport plotly.offline as py\npy.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\nimport plotly.tools as tls\nimport pandas as pd\nfrom pydub import AudioSegment\n\n\nfrom gtts import gTTS \n \n \nmytext = 'Bangladesh is playing well'\n \n# Language we want to use \nlanguage = 'en'\n\nmyobj = gTTS(text=mytext, lang=language, slow=False) \n \n\nmyobj.save(\"output.mp3\")\n\nsrc = \"output.mp3\"\ndst = \"./audio/analysis.wav\"\n\n# convert wav to mp3 \nsound = AudioSegment.from_mp3(src)\nsound.export(dst, format=\"wav\")\n\n\n\n\ntrain_audio_path = './audio/'\nfilename = 'analysis.wav'\nsamples, sample_rate = librosa.load(str(train_audio_path)+filename)\n\n\n\n\n\ndef log_specgram(audio, sample_rate, window_size=20,\n step_size=10, eps=1e-10):\n nperseg = int(round(window_size * sample_rate / 1e3))\n noverlap = int(round(step_size * sample_rate / 1e3))\n freqs, times, spec = signal.spectrogram(audio,\n fs=sample_rate,\n window='hann',\n nperseg=nperseg,\n noverlap=noverlap,\n detrend=False)\n return freqs, times, np.log(spec.T.astype(np.float32) + eps)\n\n\n\nS = librosa.feature.melspectrogram(samples, sr=sample_rate, n_mels=128)\n\n# Convert to log scale (dB). We'll use the peak power (max) as reference.\nlog_S = librosa.power_to_db(S, ref=np.max)\n\nplt.figure(figsize=(12, 4))\nlibrosa.display.specshow(log_S, sr=sample_rate, x_axis='time', y_axis='mel')\nplt.title('Mel power spectrogram ')\nplt.colorbar(format='%+02.0f dB')\nplt.tight_layout()\nplt.show(block=True)\n\nS = librosa.feature.melspectrogram(samples, sr=sample_rate, n_mels=128)\n\nlog_S = librosa.power_to_db(S, ref=np.max)\nmfcc = librosa.feature.mfcc(S=log_S, n_mfcc=13)\n\n# Let's pad on the first and second deltas while we're at it\ndelta2_mfcc = librosa.feature.delta(mfcc, order=2)\n\nplt.figure(figsize=(12, 4))\nlibrosa.display.specshow(delta2_mfcc)\nplt.ylabel('MFCC coeffs')\nplt.xlabel('Time')\nplt.title('MFCC')\nplt.colorbar()\nplt.tight_layout()\nplt.show(block=True)"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"scipy.signal.spectrogram",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
INM-6/swan
|
[
"ecd426657d6e0ee67e8ea31f0298daf2ea065158"
] |
[
"swan/virtual_unit_map.py"
] |
[
"\"\"\"\nCreated on Dec 12, 2013\n\n@author: Christoph Gollan\n\nIn this module you can find the :class:`VirtualUnitMap` which is used to map\nreal units to virtual units. The virtual units can be swapped to have the \nsame units in the same row.\n\"\"\"\nimport numpy as np\nfrom swan.automatic_mapping import SwanImplementation, calculate_mapping_bu\nfrom swan.gui.palettes import UNIT_COLORS\n\n\nclass VirtualUnitMap(object):\n \"\"\" \n A class for mapping real units to virtual units.\n \n \"\"\"\n\n def __init__(self):\n \"\"\"\n **Properties**\n \n *mapping* (list of list of integer):\n A 2d array with integers that give you in a column the numbers \n of the units from the same session and in the row\n the same unit id for the sessions.\n *visible* (list of boolean):\n Whether or not the unit rows should be visible.\n Contains one boolean for each unit row.\n *maximum_units* (integer):\n The summary of the length of all unit lists.\n *colors* (list of tuple of integer):\n Contains colors for plotting the units in different colors.\n Format: (R, G, B) RGB = 0..255\n *coln* (integer):\n The length of the colors list.\n \n \"\"\"\n self.mapping = []\n self.visible = []\n self.active = []\n self.total_units = 0\n self.colors = UNIT_COLORS\n self.number_of_colors = len(self.colors)\n\n def set_initial_map(self, data):\n \"\"\"\n Sets the default mapping.\n \n **Arguments**\n \n *total_units_per_block* (list of integer):\n The number of units per block.\n \n \"\"\"\n maximum_units = sum(data.total_units_per_block)\n self.total_units = maximum_units\n mapping = []\n for session in range(len(data.blocks)):\n mapping.append([])\n count = 1\n for global_unit_id in range(maximum_units):\n try:\n unit_description = data.blocks[session].channel_indexes[0].units[global_unit_id].description.split()\n\n if \"unclassified\" in unit_description or \"noise\" in unit_description:\n mapping[session].append(0)\n else:\n mapping[session].append(count)\n count += 1\n except IndexError:\n mapping[session].append(0)\n\n self.mapping = mapping\n self.visible = [[True for unit in session] for session in mapping]\n self.update_active()\n\n def set_map_from_dataframe(self, dataframe):\n if not dataframe.empty:\n vmap = np.zeros_like(np.array(self.mapping))\n for session_id in range(vmap.shape[0]):\n session_frame = dataframe.loc[dataframe.session == session_id]\n for global_unit_id, real_unit_id in zip(session_frame.label, session_frame.unit):\n vmap[session_id][global_unit_id] = real_unit_id\n\n self.mapping = vmap.astype(np.int32).tolist()\n self.visible = [[True for unit in session] for session in vmap]\n self.update_active()\n\n def set_map(self, total_units_per_block, virtual_unit_map):\n \"\"\"\n Sets the mapping given from the VUMap.\n \n **Arguments**\n \n *total_units_per_block* (list of integer):\n The number of units per block.\n *vum* (dictionary):\n A dictionary containing the mappings and other information.\n \n \"\"\"\n total_units = sum(total_units_per_block)\n self.total_units = total_units\n\n total_sessions = len(total_units_per_block)\n\n for session in range(total_sessions):\n self.mapping.append([])\n\n for l in virtual_unit_map.values():\n if type(l) == list:\n for session in range(total_sessions):\n unit = l[session][1]\n self.mapping[session].append(unit)\n\n self.visible = [[True for j in range(len(self.mapping[i]))] for i in range(len(self.mapping))]\n self.update_active()\n\n def get_realunit(self, session_index, unit_index, data):\n \"\"\"\n Returns the real unit for a virtual unit.\n \n **Arguments**\n \n *session_index* (integer):\n The session index.\n *unit_index* (integer):\n The unit index.\n *data* (:class:`src.neodata.NeoData`):\n The data object that contains the real unit.\n \n **Returns**: :class:`neo.core.unit.Unit`\n The real unit.\n \n \"\"\"\n virtual_unit = self.mapping[session_index][unit_index]\n if \"unclassified\" not in data.blocks[session_index].channel_indexes[0].units[0].description.split():\n real_unit = data.blocks[session_index].channel_indexes[0].units[virtual_unit - 1]\n else:\n real_unit = data.blocks[session_index].channel_indexes[0].units[virtual_unit]\n # real_unit = data.blocks[session_index].channel_indexes[0].units[virtual_unit]\n return real_unit\n\n def swap(self, session_index, first_unit_index, second_unit_index):\n \"\"\"\n Swaps two virtual units.\n \n **Arguments**\n \n *session_index* (integer):\n The session_index index.\n *first_unit_index* (integer):\n The unit index 1.\n *second_unit_index* (integer):\n The unit index 2.\n \n \"\"\"\n self.mapping[session_index][first_unit_index], self.mapping[session_index][second_unit_index] = \\\n self.mapping[session_index][second_unit_index], self.mapping[session_index][first_unit_index]\n # second_unit = self.mapping[session_index][second_unit_index]\n # self.mapping[session_index][second_unit_index] = self.mapping[session_index][first_unit_index]\n # self.mapping[session_index][first_unit_index] = second_unit\n self.update_active()\n\n def set_visible(self, session_id, global_unit_id, visible=True):\n \"\"\"\n Sets a unit row as visible or not.\n \n **Arguments**\n \n *i* (integer):\n The column (session) index.\n *j* (integer):\n The row (unit) index.\n *visible* (boolean):\n Whether or not the unit row should be visible.\n Default: True.\n \n \"\"\"\n self.visible[session_id][global_unit_id] = visible\n self.update_active()\n\n def update_active(self):\n \"\"\"\n Updates the active mapping.\n \n The active mapping is a list of N lists, where N is the\n number of loaded sessions. Each nested list consists of\n zeros or ones. Ones signify that a unit is occupying\n that position in the mapping and is not hidden. Zeros\n signify that either no unit is occupying that position in the\n mapping or is hidden/disabled.\n \n Since the number of lists corresponds to the number of\n loaded sessions, remember that the first index when \n looping over active corresponds to the sessions, while\n the second index corresponds to the units.\n \n \"\"\"\n checkmap = np.array(self.mapping)\n checkmap[checkmap > 0] = 1\n #\n # Converts the boolean visible array to a corresponding\n # matrix of zeros and ones (inner multiply). Then, creates\n # active mapping by taking the element-wise product of the\n # mapping and the checkmap array (outer multiply). Effectively,\n # combines information contained in visibility and mapping.\n #\n active = np.multiply(checkmap, np.multiply(self.visible, 1))\n\n # Strips trailing zeros\n # for n, num in enumerate(active):\n # active[n] = np.trim_zeros(num, 'b')\n # if not active[n]:\n # active[n] = [0]\n\n self.active = active\n\n def get_mapping(self):\n return self.mapping\n\n def get_visible(self):\n return self.visible\n\n def get_active(self):\n return self.active\n\n def get_color_list(self):\n return self.colors\n\n def get_colour(self, global_unit_id):\n \"\"\"\n Returns the color for the given unit row.\n \n **Arguments**\n \n *i* (integer):\n The unit row index.\n *mpl* (boolean):\n Whether or not you need the color for matplotlib.\n Mpl uses another rgb format.\n Default: False.\n *layer* (string):\n If mpl is True, you can specify a layer \n that needs a modified color.\n Default: None.\n \n **Returns**: tuple of integer\n The rgb color.\n \n \"\"\"\n global_unit_id = global_unit_id % self.number_of_colors\n col = (\n self.colors[global_unit_id][0],\n self.colors[global_unit_id][1],\n self.colors[global_unit_id][2],\n )\n return col\n\n def calculate_mapping(self, data, storage, automatic_mapping=0, parent=None):\n \"\"\"\n Calculates a mapping for the units based on features like distance.\n \n The units will be compared pare-wise and sequential.\n \n **Arguments**\n \n *data* (:class:`src.neodata.NeoData`):\n This object is needed to get the data \n which will be used to compare the units.\n \n *base* (:class:`base.mystorage.MyStorage`):\n The class which handles the data and the project files.\n \n \"\"\"\n if automatic_mapping == 0:\n # self.swan_implementation(data=data, base=base)\n algorithm = SwanImplementation(neodata=data, parent=parent)\n self.set_map_from_dataframe(algorithm.result)\n\n elif automatic_mapping == 1:\n calculate_mapping_bu(virtual_unit_map=self, data=data, storage=storage)\n\n def reset(self):\n \"\"\"\n Resets the mapping.\n \n \"\"\"\n self.mapping = []\n self.visible = []\n self.active = []\n self.total_units = 0\n"
] |
[
[
"numpy.array",
"numpy.multiply"
]
] |
fr-og/aphantasia
|
[
"35062cff300a82393c32a719cd55583c5a151887"
] |
[
"illustrip.py"
] |
[
"# coding: UTF-8\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport argparse\nimport numpy as np\nimport shutil\nimport PIL\nimport time\nfrom imageio import imread, imsave\n\ntry:\n from googletrans import Translator\n googletrans_ok = True\nexcept ImportError as e:\n googletrans_ok = False\n\nimport torch\nimport torchvision\nimport torch.nn.functional as F\nfrom torchvision import transforms as T\n\nimport clip\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nfrom aphantasia.image import to_valid_rgb, fft_image, resume_fft, pixel_image\nfrom aphantasia.utils import slice_imgs, derivat, sim_func, slerp, basename, file_list, img_list, img_read, pad_up_to, txt_clean, latent_anima, cvshow, checkout, save_cfg, old_torch\nfrom aphantasia import transforms\nfrom depth import depth\ntry: # progress bar for notebooks \n get_ipython().__class__.__name__\n from aphantasia.progress_bar import ProgressIPy as ProgressBar\nexcept: # normal console\n from aphantasia.progress_bar import ProgressBar\n\nclip_models = ['ViT-B/16', 'ViT-B/32', 'RN50', 'RN50x4', 'RN50x16', 'RN101']\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')\n parser.add_argument('-t', '--in_txt', default=None, help='Text string or file to process (main topic)')\n parser.add_argument('-pre', '--in_txt_pre', default=None, help='Prefix for input text')\n parser.add_argument('-post', '--in_txt_post', default=None, help='Postfix for input text')\n parser.add_argument('-t2', '--in_txt2', default=None, help='Text string or file to process (style)')\n parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')\n parser.add_argument('-im', '--in_img', default=None, help='input image or directory with images')\n parser.add_argument('-w0', '--weight0', default=0.3, type=float, help='weight for subtraction')\n parser.add_argument('-w2', '--weight2', default=0.5, type=float, help='weight for style')\n parser.add_argument('-wi', '--weight_img', default=0.5, type=float, help='weight for images')\n parser.add_argument('-r', '--resume', default=None, help='Resume from saved params or from an image')\n parser.add_argument( '--out_dir', default='_out')\n parser.add_argument('-tr', '--translate', action='store_true', help='Translate with Google Translate')\n parser.add_argument( '--invert', action='store_true', help='Invert criteria')\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')\n parser.add_argument('-nv', '--no-verbose', dest='verbose', action='store_false')\n parser.set_defaults(verbose=True)\n # training\n parser.add_argument( '--gen', default='RGB', help='Generation (optimization) method: FFT or RGB')\n parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')\n parser.add_argument( '--steps', default=300, type=int, help='Iterations (frames) per scene (text line)')\n parser.add_argument( '--samples', default=100, type=int, help='Samples to evaluate per frame')\n parser.add_argument('-lr', '--lrate', default=1, type=float, help='Learning rate')\n # motion\n parser.add_argument('-ops', '--opt_step', default=1, type=int, help='How many optimizing steps per save/transform step')\n parser.add_argument('-sm', '--smooth', action='store_true', help='Smoothen interframe jittering for FFT method')\n parser.add_argument('-it', '--interpol', default=True, help='Interpolate topics? (or change by cut)')\n parser.add_argument( '--fstep', default=100, type=int, help='How many frames before changing motion')\n parser.add_argument( '--scale', default=0.012, type=float)\n parser.add_argument( '--shift', default=10., type=float, help='in pixels')\n parser.add_argument( '--angle', default=0.8, type=float, help='in degrees')\n parser.add_argument( '--shear', default=0.4, type=float)\n parser.add_argument( '--anima', default=True, help='Animate motion')\n # depth\n parser.add_argument('-d', '--depth', default=0, type=float, help='Add depth with such strength, if > 0')\n parser.add_argument( '--tridepth', action='store_true', help='process depth 3 times [mirrored]')\n parser.add_argument( '--depth_model', default='AdaBins_nyu.pt', help='AdaBins model path')\n parser.add_argument( '--depth_mask', default='depth/mask.jpg', help='depth mask path')\n parser.add_argument( '--depth_dir', default=None, help='Directory to save depth, if not None')\n # tweaks\n parser.add_argument('-a', '--align', default='overscan', choices=['central', 'uniform', 'overscan', 'overmax'], help='Sampling distribution')\n parser.add_argument('-tf', '--transform', default='fast', choices=['none', 'fast', 'custom', 'elastic'], help='augmenting transforms')\n parser.add_argument('-opt', '--optimizer', default='adam', choices=['adam', 'adam_custom', 'adamw', 'adamw_custom'], help='Optimizer')\n parser.add_argument( '--fixcontrast', action='store_true', help='Required for proper resuming from image')\n parser.add_argument( '--contrast', default=1.2, type=float)\n parser.add_argument( '--colors', default=2.3, type=float)\n parser.add_argument('-sh', '--sharp', default=0, type=float)\n parser.add_argument('-mc', '--macro', default=0.3, type=float, help='Endorse macro forms 0..1 ')\n parser.add_argument('-e', '--enforce', default=0, type=float, help='Enforce details (by boosting similarity between two parallel samples)')\n parser.add_argument('-x', '--expand', default=0, type=float, help='Boosts diversity (by enforcing difference between prev/next samples)')\n parser.add_argument('-n', '--noise', default=2., type=float, help='Add noise to make composition sparse (FFT only)') # 0.04\n parser.add_argument( '--sim', default='mix', help='Similarity function (angular/spherical/mixed; None = cossim)')\n parser.add_argument( '--rem', default=None, help='Dummy text to add to project name')\n a = parser.parse_args()\n\n if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]\n if len(a.size)==1: a.size = a.size * 2\n a.gen = a.gen.upper()\n a.invert = -1. if a.invert is True else 1.\n \n # Overriding some parameters, depending on other settings\n if a.gen == 'RGB':\n a.smooth = False\n a.align = 'overscan'\n if a.resume is not None: a.fixcontrast = True\n if a.model == 'ViT-B/16': a.sim = 'cossim'\n\n if a.translate is True and googletrans_ok is not True: \n print('\\n Install googletrans module to enable translation!'); exit()\n \n return a\n\ndef depth_transform(img_t, depth_infer, depth_mask, size, depthX=0, scale=1., shift=[0,0], colors=1, depth_dir=None, save_num=0):\n size2 = [s//2 for s in size]\n if not isinstance(scale, float): scale = float(scale[0])\n # d X/Y define the origin point of the depth warp, effectively a \"3D pan zoom\", [-1..1]\n # plus = look ahead, minus = look aside\n dX = 100. * shift[0] / size[1]\n dY = 100. * shift[1] / size[0]\n # dZ = movement direction: 1 away (zoom out), 0 towards (zoom in), 0.5 stay\n dZ = 0.5 + 32. * (scale-1)\n img = depth.depthwarp(img_t, depth_infer, depth_mask, size2, depthX, [dX,dY], dZ, save_path=depth_dir, save_num=save_num)\n return img\n\ndef frame_transform(img, size, angle, shift, scale, shear):\n if old_torch(): # 1.7.1\n img = T.functional.affine(img, angle, tuple(shift), scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)\n img = T.functional.center_crop(img, size)\n img = pad_up_to(img, size)\n else: # 1.8+\n img = T.functional.affine(img, angle, tuple(shift), scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)\n img = T.functional.center_crop(img, size) # on 1.8+ also pads\n return img\n\ndef main():\n a = get_args()\n \n # Load CLIP models\n model_clip, _ = clip.load(a.model, jit=old_torch())\n try:\n a.modsize = model_clip.visual.input_resolution \n except:\n a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224\n if a.verbose is True: print(' using model', a.model)\n xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}\n if a.model in xmem.keys():\n a.samples = int(a.samples * xmem[a.model])\n\n if a.translate:\n translator = Translator()\n\n if a.enforce != 0:\n a.samples = int(a.samples * 0.5)\n\n if 'elastic' in a.transform:\n trform_f = transforms.transforms_elastic\n a.samples = int(a.samples * 0.95)\n elif 'custom' in a.transform:\n trform_f = transforms.transforms_custom\n a.samples = int(a.samples * 0.95)\n elif 'fast' in a.transform:\n trform_f = transforms.transforms_fast\n a.samples = int(a.samples * 0.95)\n else:\n trform_f = transforms.normalize()\n\n def enc_text(txt):\n if a.translate:\n txt = translator.translate(txt, dest='en').text\n emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])\n return emb.detach().clone()\n\n def enc_image(img_file):\n img_t = torch.from_numpy(img_read(img_file)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]\n in_sliced = slice_imgs([img_t], a.samples, a.modsize, transforms.normalize(), a.align)[0]\n emb = model_clip.encode_image(in_sliced)\n return emb.detach().clone()\n\n # Encode inputs\n count = 0\n texts = []\n styles = []\n images = []\n \n if a.in_txt is not None:\n if os.path.isfile(a.in_txt):\n with open(a.in_txt, 'r', encoding=\"utf-8\") as f:\n texts = f.readlines()\n texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']\n else:\n texts = [a.in_txt]\n if a.in_txt_pre is not None:\n texts = [' '.join([a.in_txt_pre, tt]).strip() for tt in texts]\n if a.in_txt_post is not None:\n texts = [' '.join([tt, a.in_txt_post]).strip() for tt in texts]\n key_txt_encs = [enc_text(txt) for txt in texts]\n count = max(count, len(key_txt_encs))\n\n if a.in_txt2 is not None:\n if os.path.isfile(a.in_txt2):\n with open(a.in_txt2, 'r', encoding=\"utf-8\") as f:\n styles = f.readlines()\n styles = [tt.strip() for tt in styles if len(tt.strip()) > 0 and tt[0] != '#']\n else:\n styles = [a.in_txt2]\n key_styl_encs = [enc_text(style) for style in styles]\n count = max(count, len(key_styl_encs))\n\n if a.in_img is not None and os.path.exists(a.in_img):\n images = file_list(a.in_img) if os.path.isdir(a.in_img) else [a.in_img]\n key_img_encs = [enc_image(image) for image in images]\n count = max(count, len(key_img_encs))\n \n assert count > 0, \"No inputs found!\"\n \n if a.in_txt0 is not None:\n if a.verbose is True: print(' subtract text:', a.in_txt0)\n if a.translate:\n a.in_txt0 = translator.translate(a.in_txt0, dest='en').text\n anti_txt_encs = [enc_text(txt) for txt in a.in_txt0.split('.')]\n\n if a.verbose is True: print(' samples:', a.samples)\n\n global params_tmp\n shape = [1, 3, *a.size]\n\n if a.gen == 'RGB':\n params_tmp, _, sz = pixel_image(shape, a.resume)\n params_tmp = params_tmp[0].cuda().detach()\n else:\n params_tmp, sz = resume_fft(a.resume, shape, decay=1.5, sd=1)\n if sz is not None: a.size = sz\n\n if a.depth != 0:\n depth_infer, depth_mask = depth.init_adabins(size=a.size, model_path=a.depth_model, mask_path=a.depth_mask, tridepth=a.tridepth)\n if a.depth_dir is not None:\n os.makedirs(a.depth_dir, exist_ok=True)\n print(' depth dir:', a.depth_dir)\n\n steps = a.steps\n glob_steps = count * steps\n if glob_steps == a.fstep: a.fstep = glob_steps // 2 # otherwise no motion\n\n workname = basename(a.in_txt) if a.in_txt is not None else basename(a.in_img)\n workname = txt_clean(workname)\n workdir = os.path.join(a.out_dir, workname + '-%s' % a.gen.lower())\n if a.rem is not None: workdir += '-%s' % a.rem\n if 'RN' in a.model.upper(): workdir += '-%s' % a.model\n tempdir = os.path.join(workdir, 'ttt')\n os.makedirs(tempdir, exist_ok=True)\n save_cfg(a, workdir)\n if a.in_txt is not None and os.path.isfile(a.in_txt):\n shutil.copy(a.in_txt, os.path.join(workdir, os.path.basename(a.in_txt)))\n if a.in_txt2 is not None and os.path.isfile(a.in_txt2):\n shutil.copy(a.in_txt2, os.path.join(workdir, os.path.basename(a.in_txt2)))\n\n midp = 0.5\n if a.anima:\n if a.gen == 'RGB': # zoom in\n m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[-0.3], verbose=False)\n m_scale = 1 + (m_scale + 0.3) * a.scale\n else:\n m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[0.6], verbose=False)\n m_scale = 1 - (m_scale-0.6) * a.scale\n m_shift = latent_anima([2], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp,midp], verbose=False)\n m_angle = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)\n m_shear = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)\n m_shift = (midp-m_shift) * a.shift * abs(m_scale-1) / a.scale\n m_angle = (midp-m_angle) * a.angle * abs(m_scale-1) / a.scale\n m_shear = (midp-m_shear) * a.shear * abs(m_scale-1) / a.scale\n \n def get_encs(encs, num):\n cnt = len(encs)\n if cnt == 0: return []\n enc_1 = encs[min(num, cnt-1)]\n enc_2 = encs[min(num+1, cnt-1)]\n return slerp(enc_1, enc_2, steps)\n\n prev_enc = 0\n def process(num):\n global params_tmp, opt_state, params, image_f, optimizer\n\n if a.interpol is True: # linear topics interpolation\n txt_encs = get_encs(key_txt_encs, num)\n styl_encs = get_encs(key_styl_encs, num)\n img_encs = get_encs(key_img_encs, num)\n else: # change by cut\n txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * steps if len(key_txt_encs) > 0 else []\n styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * steps if len(key_styl_encs) > 0 else []\n img_encs = [key_img_encs[min(num, len(key_img_encs)-1)][0]] * steps if len(key_img_encs) > 0 else []\n \n if a.verbose is True: \n if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80])\n if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80])\n if len(images) > 0: print(' ref image: ', basename(images[min(num, len(images)-1)])[:80])\n \n pbar = ProgressBar(steps)\n for ii in range(steps):\n glob_step = num * steps + ii # save/transform\n \n txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None\n styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None\n img_enc = img_encs[ii % len(img_encs)].unsqueeze(0) if len(img_encs) > 0 else None\n\n # MOTION: transform frame, reload params\n\n scale = m_scale[glob_step] if a.anima else 1 + a.scale\n shift = m_shift[glob_step] if a.anima else [0, a.shift]\n angle = m_angle[glob_step][0] if a.anima else a.angle\n shear = m_shear[glob_step][0] if a.anima else a.shear\n\n if a.gen == 'RGB':\n if a.depth > 0:\n params_tmp = depth_transform(params_tmp, depth_infer, depth_mask, a.size, a.depth, scale, shift, a.colors, a.depth_dir, glob_step)\n params_tmp = frame_transform(params_tmp, a.size, angle, shift, scale, shear)\n params, image_f, _ = pixel_image([1, 3, *a.size], resume=params_tmp)\n img_tmp = None\n\n else: # FFT\n if old_torch(): # 1.7.1\n img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=a.size)\n if a.depth > 0:\n img_tmp = depth_transform(img_tmp, depth_infer, depth_mask, a.size, a.depth, scale, shift, a.colors, a.depth_dir, glob_step)\n img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)\n params_tmp = torch.rfft(img_tmp, 2, normalized=True)\n else: # 1.8+\n if type(params_tmp) is not torch.complex64:\n params_tmp = torch.view_as_complex(params_tmp)\n img_tmp = torch.fft.irfftn(params_tmp, s=a.size, norm='ortho')\n if a.depth > 0:\n img_tmp = depth_transform(img_tmp, depth_infer, depth_mask, a.size, a.depth, scale, shift, a.colors, a.depth_dir, glob_step)\n img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)\n params_tmp = torch.fft.rfftn(img_tmp, s=a.size, dim=[2,3], norm='ortho')\n params_tmp = torch.view_as_real(params_tmp)\n params, image_f, _ = fft_image([1, 3, *a.size], sd=1, resume=params_tmp)\n\n if a.optimizer.lower() == 'adamw':\n optimizer = torch.optim.AdamW(params, a.lrate, weight_decay=0.01)\n elif a.optimizer.lower() == 'adamw_custom':\n optimizer = torch.optim.AdamW(params, a.lrate, weight_decay=0.01, betas=(.0,.999), amsgrad=True)\n elif a.optimizer.lower() == 'adam':\n optimizer = torch.optim.Adam(params, a.lrate)\n else: # adam_custom\n optimizer = torch.optim.Adam(params, a.lrate, betas=(.0,.999))\n image_f = to_valid_rgb(image_f, colors = a.colors)\n del img_tmp\n\n if a.smooth is True and num + ii > 0:\n optimizer.load_state_dict(opt_state)\n\n ### optimization\n for ss in range(a.opt_step):\n loss = 0\n\n noise = a.noise * (torch.rand(1, 1, a.size[0], a.size[1]//2+1, 1)-0.5).cuda() if a.noise>0 else 0.\n img_out = image_f(noise, fixcontrast=a.fixcontrast)\n \n img_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, a.macro)[0]\n out_enc = model_clip.encode_image(img_sliced)\n\n if a.gen == 'RGB': # empirical hack\n loss += abs(img_out.mean((2,3)) - 0.45).mean() # fix brightness\n loss += abs(img_out.std((2,3)) - 0.17).mean() # fix contrast\n\n if txt_enc is not None:\n loss -= a.invert * sim_func(txt_enc, out_enc, a.sim)\n if styl_enc is not None:\n loss -= a.weight2 * sim_func(styl_enc, out_enc, a.sim)\n if img_enc is not None:\n loss -= a.weight_img * sim_func(img_enc, out_enc, a.sim)\n if a.in_txt0 is not None: # subtract text\n for anti_txt_enc in anti_txt_encs:\n loss += 0.3 * sim_func(anti_txt_enc, out_enc, a.sim)\n if a.sharp != 0: # scharr|sobel|naive\n loss -= a.sharp * derivat(img_out, mode='naive')\n if a.enforce != 0:\n img_sliced = slice_imgs([image_f(noise, fixcontrast=a.fixcontrast)], a.samples, a.modsize, trform_f, a.align, a.macro)[0]\n out_enc2 = model_clip.encode_image(img_sliced)\n loss -= a.enforce * sim_func(out_enc, out_enc2, a.sim)\n del out_enc2; torch.cuda.empty_cache()\n if a.expand > 0:\n global prev_enc\n if ii > 0:\n loss += a.expand * sim_func(prev_enc, out_enc, a.sim)\n prev_enc = out_enc.detach().clone()\n del img_out, img_sliced, out_enc; torch.cuda.empty_cache()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n ### save params & frame\n\n params_tmp = params[0].detach().clone()\n if a.smooth is True:\n opt_state = optimizer.state_dict()\n\n with torch.no_grad():\n img_t = image_f(contrast=a.contrast, fixcontrast=a.fixcontrast)[0].permute(1,2,0)\n img_np = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8)\n imsave(os.path.join(tempdir, '%06d.jpg' % glob_step), img_np, quality=95)\n if a.verbose is True: cvshow(img_np)\n del img_t, img_np\n pbar.upd()\n\n params_tmp = params[0].detach().clone()\n \n glob_start = time.time()\n try:\n for i in range(count):\n process(i)\n except KeyboardInterrupt:\n pass\n\n os.system('ffmpeg -v warning -y -i %s/\\%%06d.jpg \"%s.mp4\"' % (tempdir, os.path.join(workdir, workname)))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.view_as_real",
"torch.optim.Adam",
"torch.fft.rfftn",
"torch.cuda.empty_cache",
"torch.rfft",
"torch.view_as_complex",
"torch.clip",
"torch.optim.AdamW",
"torch.no_grad",
"torch.rand",
"torch.fft.irfftn",
"torch.irfft"
]
] |
DesmondZhong/switch
|
[
"88abc164128b6a7345c7aa8806e2b37f74de54fa"
] |
[
"switch_model/upgrade/upgrade_2_0_1.py"
] |
[
"# Copyright (c) 2015-2019 The Switch Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.\n\n\"\"\"\nUpgrade input directories from 2.0.0b4 (final beta) to 2.0.1. (There were no changes for 2.0.0.)\nThis just moves some modules, as listed in the rename_modules variable.\n\"\"\"\nfrom __future__ import print_function\n\nimport os, shutil, argparse\nimport pandas\nimport switch_model.upgrade\n\nupgrades_from = '2.0.0b4'\nupgrades_to = '2.0.1'\n\n# note: we could keep switch_model.hawaii.reserves active, but then we would need special code to switch\n# the model to the main reserves module if and only if they are using the iterative demand response system\n# which seems unnecessarily complicated\nreplace_modules = {\n 'switch_model.hawaii.demand_response':\n ['switch_model.balancing.demand_response.iterative'],\n 'switch_model.hawaii.r_demand_system':\n ['switch_model.balancing.demand_response.iterative.r_demand_system'],\n 'switch_model.hawaii.reserves': [\n 'switch_model.balancing.operating_reserves.areas',\n 'switch_model.balancing.operating_reserves.spinning_reserves',\n ]\n}\nmodule_messages = {\n # description of significant changes to particular modules (other than moving)\n # old_module: message\n 'switch_model.hawaii.r_demand_system':\n 'The switch_model.hawaii.r_demand_system module has been moved. Please update '\n 'the --dr-demand-module flag to point to the new location.',\n 'switch_model.hawaii.demand_response':\n 'The switch_model.hawaii.demand_response module has been moved. Please update '\n 'iterate.txt to refer to the new location.',\n 'switch_model.hawaii.switch_patch':\n 'The switch_model.hawaii.switch_patch module no longer patches '\n 'the cplex solver to generate dual values for mixed-integer programs. '\n 'Use the new --retrieve-cplex-mip-duals flag if you need this behavior.'\n}\n\ndef upgrade_input_dir(inputs_dir):\n \"\"\"\n Upgrade the input directory.\n \"\"\"\n # rename modules and report changes\n update_modules(inputs_dir)\n\n # Write a new version text file.\n switch_model.upgrade._write_input_version(inputs_dir, upgrades_to)\n\n\ndef rename_file(old_name, new_name, optional_file=True):\n old_path = os.path.join(inputs_dir, old_name)\n new_path = os.path.join(inputs_dir, new_name)\n if optional_file and not os.path.isfile(old_path):\n return\n shutil.move(old_path, new_path)\n\ndef rename_column(file_name, old_col_name, new_col_name, optional_file=True):\n path = os.path.join(inputs_dir, file_name)\n if optional_file and not os.path.isfile(path):\n return\n df = pandas.read_csv(path, na_values=['.'], sep=r\"\\s+\", index_col=False)\n df.rename(columns={old_col_name: new_col_name}, inplace=True)\n df.to_csv(path, sep='\\t', na_rep='.', index=False)\n\ndef item_list(items):\n \"\"\"Generate normal-text version of list of items, with commas and \"and\" as needed.\"\"\"\n return ' and '.join(', '.join(items).rsplit(', ', 1))\n\ndef update_modules(inputs_dir):\n \"\"\"Rename modules in the module list if needed (list is sought in\n standard locations) and return list of alerts for user.\"\"\"\n\n modules_path = os.path.join(inputs_dir, 'modules.txt')\n if not os.path.isfile(modules_path):\n modules_path = os.path.join(inputs_dir, '..', 'modules.txt')\n if not os.path.isfile(modules_path):\n raise RuntimeError(\n \"Unable to find modules or modules.txt file for input directory '{}'. \"\n \"This file should be located in the input directory or its parent.\"\n .format(inputs_dir)\n )\n modules_path = os.path.normpath(modules_path) # tidy up for display later\n\n # Upgrade module listings\n # Each line of the original file is either a module identifier or a comment\n with open(modules_path) as f:\n old_module_list = [line.strip() for line in f.read().splitlines()]\n\n # rename modules as needed\n new_module_list=[]\n for module in old_module_list:\n try:\n new_modules = replace_modules[module]\n print (\n \"Module {old} has been replaced by {new} in {file}.\"\n .format(old=module, new=item_list(new_modules), file=modules_path)\n )\n except KeyError:\n new_modules = [module]\n new_module_list.extend(new_modules)\n\n # load reserve balancing areas module early, to support modules that\n # define reserve products.\n\n # switch_model.hawaii.reserves loaded late and then found reserve\n # components defined by other modules, but\n # switch_model.balancing.operating_reserves.spinning_reserves should\n # load early so other modules can register reserves with it.\n if 'switch_model.hawaii.reserves' in old_module_list:\n new_spin = 'switch_model.balancing.operating_reserves.areas'\n try:\n insert_pos = new_module_list.index('switch_model.balancing.load_zones') + 1\n if insert_pos < new_module_list.index(new_spin):\n new_module_list.remove(new_spin)\n new_module_list.insert(insert_pos, new_spin)\n # print (\n # '{} has been moved up to row {} in {}, '\n # 'to allow other modules to register reserves with it.'\n # .format(new_spin, insert_pos + 1, modules_path)\n # )\n except ValueError:\n # couldn't find the location to insert spinning reserves module\n print (\n '{} module should be moved early in the module list, '\n 'before any modules that define reserve elements.'\n .format(new_spin)\n )\n\n #import pdb; pdb.set_trace()\n\n # write new modules list\n with open(modules_path, 'w') as f:\n for module in new_module_list:\n f.write(module + \"\\n\")\n\n # report any significant changes in the previously active modules\n for module in old_module_list:\n try:\n print(\"ATTENTION: {}\".format(module_messages[module]))\n except KeyError:\n pass\n"
] |
[
[
"pandas.read_csv"
]
] |
tg12/Python
|
[
"398d1dbf4b780d1725aeae9a91b4c79f4410e2f0"
] |
[
"arithmetic_analysis/in_static_equilibrium.py"
] |
[
"'''THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND\nNON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE\nDISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,\nWHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'''\n\n# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk\n# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB\n# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu\n# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd\n\n# contact :- [email protected]\n\n\n\n\"\"\"\nChecks if a system of forces is in static equilibrium.\n\npython/black : true\nflake8 : passed\nmypy : passed\n\"\"\"\n\nfrom numpy import array, cos, sin, radians, cross # type: ignore\nfrom typing import List\n\n\ndef polar_force(\n magnitude: float, angle: float, radian_mode: bool = False\n) -> List[float]:\n \"\"\"\n Resolves force along rectangular components.\n (force, angle) => (force_x, force_y)\n >>> polar_force(10, 45)\n [7.0710678118654755, 7.071067811865475]\n >>> polar_force(10, 3.14, radian_mode=True)\n [-9.999987317275394, 0.01592652916486828]\n \"\"\"\n if radian_mode:\n return [magnitude * cos(angle), magnitude * sin(angle)]\n return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))]\n\n\ndef in_static_equilibrium(\n forces: array, location: array, eps: float = 10 ** -1\n) -> bool:\n \"\"\"\n Check if a system is in equilibrium.\n It takes two numpy.array objects.\n forces ==> [\n [force1_x, force1_y],\n [force2_x, force2_y],\n ....]\n location ==> [\n [x1, y1],\n [x2, y2],\n ....]\n >>> force = array([[1, 1], [-1, 2]])\n >>> location = array([[1, 0], [10, 0]])\n >>> in_static_equilibrium(force, location)\n False\n \"\"\"\n # summation of moments is zero\n moments: array = cross(location, forces)\n sum_moments: float = sum(moments)\n return abs(sum_moments) < eps\n\n\nif __name__ == \"__main__\":\n # Test to check if it works\n forces = array([polar_force(718.4, 180 - 30),\n polar_force(879.54, 45), polar_force(100, -90)])\n\n location = array([[0, 0], [0, 0], [0, 0]])\n\n assert in_static_equilibrium(forces, location)\n\n # Problem 1 in image_data/2D_problems.jpg\n forces = array(\n [\n polar_force(30 * 9.81, 15),\n polar_force(215, 180 - 45),\n polar_force(264, 90 - 30),\n ]\n )\n\n location = array([[0, 0], [0, 0], [0, 0]])\n\n assert in_static_equilibrium(forces, location)\n\n # Problem in image_data/2D_problems_1.jpg\n forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])\n\n location = array([[0, 0], [6, 0], [10, 0], [12, 0]])\n\n assert in_static_equilibrium(forces, location)\n\n import doctest\n\n doctest.testmod()\n"
] |
[
[
"numpy.radians",
"numpy.cos",
"numpy.sin",
"numpy.cross",
"numpy.array"
]
] |
larioandr/thesis-models
|
[
"ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f"
] |
[
"src/pyqumo/tests/test_sim_gg1.py"
] |
[
"from dataclasses import dataclass\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom pyqumo.arrivals import Poisson\nfrom pyqumo.random import Exponential, Distribution\nfrom pyqumo.sim.gg1 import simulate\n\n\n@dataclass\nclass GG1Props:\n arrival: Distribution\n service: Distribution\n queue_capacity: int\n\n # System and queue sizes:\n system_size_avg: float\n system_size_std: float\n queue_size_avg: float\n queue_size_std: float\n\n # Loss probability and utilization:\n loss_prob: float\n utilization: float\n\n # Departure process properties:\n departure_rate: float\n\n # Response and wait time:\n response_time_avg: float\n wait_time_avg: float\n\n # Test parameters:\n tol: float = 1e-1\n max_packets: int = int(1e5)\n\n\[email protected]('props', [\n GG1Props(\n arrival=Poisson(2), service=Poisson(5), queue_capacity=4,\n system_size_avg=0.642, system_size_std=0.981,\n queue_size_avg=0.2444, queue_size_std=0.6545,\n loss_prob=0.0062, utilization=0.3975, departure_rate=1.9877,\n response_time_avg=0.323, wait_time_avg=0.123),\n GG1Props(\n arrival=Exponential(42), service=Exponential(34),\n queue_capacity=7,\n system_size_avg=5.3295, system_size_std=5.6015**0.5,\n queue_size_avg=4.3708, queue_size_std=5.2010**0.5,\n loss_prob=0.2239, utilization=0.9587, departure_rate=32.5959,\n response_time_avg=0.163, wait_time_avg=0.134,\n max_packets=int(1e5)\n ),\n GG1Props(\n arrival=Poisson(1), service=Exponential(2),\n queue_capacity=np.inf,\n system_size_avg=1, system_size_std=2.0**0.5,\n queue_size_avg=0.5, queue_size_std=1.25**0.5,\n loss_prob=0, utilization=0.5, departure_rate=1.0,\n response_time_avg=1.0, wait_time_avg=0.5, max_packets=int(1e5)\n )\n])\ndef test_gg1(props):\n tol = props.tol\n results = simulate(props.arrival, props.service, props.queue_capacity,\n max_packets=props.max_packets)\n desc = f\"arrival: {props.arrival}, \" \\\n f\"service: {props.service}, \" \\\n f\"queue capacity: {props.queue_capacity}\"\n\n # Check system and queue sizes:\n assert_allclose(results.system_size.mean, props.system_size_avg, rtol=tol,\n err_msg=f\"system size average mismatch ({desc})\")\n assert_allclose(results.system_size.std, props.system_size_std, rtol=tol,\n err_msg=f\"system size std.dev. mismatch ({desc})\")\n assert_allclose(results.queue_size.mean, props.queue_size_avg, rtol=tol,\n err_msg=f\"queue size average mismatch ({desc})\")\n assert_allclose(results.queue_size.std, props.queue_size_std, rtol=tol,\n err_msg=f\"queue size std.dev. mismatch ({desc})\")\n\n # Loss probability and utilization:\n assert_allclose(results.loss_prob, props.loss_prob, rtol=tol,\n err_msg=f\"loss probability mismatch ({desc})\")\n assert_allclose(results.utilization, props.utilization, rtol=tol,\n err_msg=f\"utilization mismatch ({desc})\")\n\n # Departure process:\n assert_allclose(1/results.departures.avg, props.departure_rate, rtol=tol,\n err_msg=f\"departure rate mismatch ({desc})\")\n\n # Wait and response time:\n assert_allclose(results.response_time.avg, props.response_time_avg,\n rtol=tol, err_msg=f\"response time mismatch ({desc})\")\n assert_allclose(results.wait_time.avg, props.wait_time_avg, rtol=tol,\n err_msg=f\"waiting time mismatch ({desc})\")\n"
] |
[
[
"numpy.testing.assert_allclose"
]
] |
akharitonov/mdde
|
[
"b0443f3c9c3ca948e9dda213572926087c214d8d"
] |
[
"mdde/samples/heuristic/sample_heuristic_random_legal.py"
] |
[
"import argparse\nimport logging\nimport sys\n\nfrom typing import List\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom mdde.core import Environment\nfrom mdde.agent.default import SingleNodeDefaultAgent\nfrom mdde.registry.workload import EDefaultYCSBWorkload\nfrom mdde.scenario.default import DefaultScenarioSimulation\nfrom mdde.config import ConfigRegistry, ConfigEnvironment\nfrom mdde.registry.protocol import PRegistryControlClient, PRegistryWriteClient, PRegistryReadClient\nfrom mdde.registry.tcp import RegistryClientTCP\n\nimport abc_heuristic_sample\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass MDDERandomLegalAgents(abc_heuristic_sample.ABCMDDEHeuristicSample):\n \"\"\"Agents take actions at random from the pool of the legal actions.\"\"\"\n\n # Note: in this sample Readability > Efficiency\n\n run_result_dir = None\n \"\"\"Ray results output folder for the current experimental run.\"\"\"\n\n mdde_registry_host = 'localhost'\n \"\"\"MDDE registry host.\"\"\"\n mdde_registry_port = 8942\n \"\"\"MDDE registry control TCP port.\"\"\"\n mdde_registry_config = None\n \"\"\"Path to the MDDE registry configuration YAML.\"\"\"\n env_temp_dir = None\n \"\"\"Path to directory for temporary files created by the scenario or agents.\"\"\"\n\n NUM_FRAGMENTS = 20\n\n def run(self, config, workload):\n # Result paths\n result_dir_path_root = Path(self.run_result_dir).resolve()\n\n result_dir_path_mdde_obj = result_dir_path_root.joinpath(\"mdde\")\n result_dir_path_mdde_obj.mkdir(parents=True, exist_ok=True)\n result_dir_path_mdde = str(result_dir_path_mdde_obj)\n # Config\n config_file_full_path = str(Path(self.mdde_registry_config).resolve())\n # MDDE tmp\n temp_env_dir = self.env_temp_dir\n\n mdde_config = ConfigEnvironment(tmp_dir=temp_env_dir,\n result_dir=result_dir_path_mdde)\n\n def make_env(host: str,\n port: int,\n reg_config: str,\n env_config: ConfigEnvironment,\n write_stats: bool,\n initial_benchmark: bool = False) -> Environment:\n \"\"\"\n Configure MDDE environment to run default.\n :param host: MDDE registry host or IP.\n :param port: MDDE registry control port.\n :param reg_config: Path to MDDE registry config.\n :param env_config: Environment configuration object.\n :param write_stats: True to write additional analytics info.\n :param initial_benchmark: Execute benchmark immediately upon execution.\n :param do_nothing: Enable or disable the agents' \"do_nothing\" action.\n :return: MDDE Environment.\n \"\"\"\n\n # Ray is peculiar in the way it handles environments, passing a pre-configured environment might cause\n # unexpected behavior. Customize the code of this extension if more complex environment are needed\n\n # Create Registry client\n tcp_client = RegistryClientTCP(host, port, keep_open=True)\n read_client: PRegistryReadClient = tcp_client\n write_client: PRegistryWriteClient = tcp_client\n ctrl_client: PRegistryControlClient = tcp_client\n\n # Registry configuration\n config_container = ConfigRegistry()\n config_container.read(reg_config)\n\n # Create agents\n agents = list()\n idx = 0\n for node in config_container.get_nodes():\n agents.append(SingleNodeDefaultAgent(agent_name=node.id,\n agent_id=idx,\n data_node_id=node.id,\n write_stats=write_stats,\n allow_do_nothing=True))\n idx += 1\n\n # Create scenario\n scenario = DefaultScenarioSimulation(num_fragments=self.NUM_FRAGMENTS,\n num_steps_before_bench=config.bench_psteps,\n agents=agents,\n data_gen_workload=workload,\n bench_workload=workload,\n benchmark_clients=config.bench_clients,\n write_stats=write_stats) # Number of YCSB threads\n\n # Create environment\n environment = Environment(config=env_config,\n scenario=scenario,\n registry_ctrl=ctrl_client,\n registry_write=write_client,\n registry_read=read_client,\n write_stats=write_stats,\n write_obs_at_bench=False)\n # Re-generate data\n environment.initialize_registry(with_benchmark=initial_benchmark)\n\n return environment\n\n env = make_env(host=self.mdde_registry_host,\n port=self.mdde_registry_port,\n reg_config=config_file_full_path,\n env_config=mdde_config,\n write_stats=True,\n initial_benchmark=True)\n \"\"\"Initialized instance of the environment.\"\"\"\n\n episode = 0\n while episode < config.num_episodes:\n episode += 1\n logging.info(\"Episode: {}\".format(episode))\n step = 0\n obs_s, act_l_s = env.observation_space\n while step < config.ep_len:\n act_n = {}\n for agent_id, agent_legal_act in act_l_s.items():\n legal_act_indexes = np.where(agent_legal_act == 1)[0]\n act_n[agent_id] = np.random.choice(legal_act_indexes, 1, replace=True)[0]\n obs_s, reward, done, act_l_s = env.step(act_n)\n\n for idx_r, agent_reward in reward.items():\n logging.info(\"Reward at step {} for agent {}: {}\".format(step, idx_r, agent_reward))\n logging.info(\"Sum of rewards: %d\", sum(reward.values()))\n\n # self.tune_estimations(step_num=step, env=env)\n step += 1\n\n obs_s, act_l_s = env.reset()\n\n # self.out_final_results()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--result-dir',\n help='Results dir',\n type=str,\n default='../../../debug/debug/result/random_agent')\n parser.add_argument('-t', '--temp-dir',\n help='Temp folder (ray temporary files)',\n type=str,\n default='../../../debug/debug/temp/random_agent')\n parser.add_argument('--reg-host',\n help='MDDE registry host or IP',\n type=str,\n default='localhost')\n parser.add_argument('--reg-port',\n help='MDDE registry control TCP port',\n type=int,\n default=8942)\n parser.add_argument('--env-temp-dir',\n help='Directory for temporary files created by the scenario or agents',\n type=str,\n default='../../../debug/agents')\n parser.add_argument('-c', '--config',\n help='Path to the MDDE registry configuration YAML',\n type=str,\n default='../../../debug/registry_config.yml')\n\n parser.add_argument('--bench-psteps',\n help='Frequency of benchmark execution (execute every N steps).',\n type=int,\n default=1)\n\n parser.add_argument('--bench-clients',\n help='Number of benchmark clients.',\n type=int,\n default=50)\n\n parser.add_argument('--light',\n help='Execute corresponding \"light\" workload.',\n action='store_true')\n\n parser.add_argument('--out-file',\n help='Dump all output to the specificed file',\n type=str,\n default=None)\n\n parser.add_argument('--num_episodes',\n help='Total number of episodes.',\n type=int,\n default=100)\n parser.add_argument('--ep_len',\n help='Number of steps per episode.',\n type=int,\n default=1001)\n\n config = parser.parse_args()\n\n if config.out_file:\n fileHandler = logging.FileHandler(config.out_file)\n logging.getLogger().addHandler(fileHandler)\n\n MDDERandomLegalAgents.run_result_dir = config.result_dir\n MDDERandomLegalAgents.ray_temp_dir = config.temp_dir\n\n MDDERandomLegalAgents.mdde_registry_host = config.reg_host\n MDDERandomLegalAgents.mdde_registry_port = config.reg_port\n MDDERandomLegalAgents.mdde_registry_config = config.config\n\n MDDERandomLegalAgents.env_temp_dir = config.env_temp_dir\n\n workload = EDefaultYCSBWorkload.READ_10000_100000_LATEST_LARGE\n if config.light:\n workload = EDefaultYCSBWorkload.READ_10000_100000_LATEST\n\n runner = MDDERandomLegalAgents()\n runner.run(config, workload)\n"
] |
[
[
"numpy.where",
"numpy.random.choice"
]
] |
rtmlsh/Future-salary
|
[
"2d8de49be939c1752e2fb3e6a5a46a9bf42f7ad9"
] |
[
"fetch_sj_vacancies.py"
] |
[
"from itertools import count\n\nimport numpy\nimport requests\n\nfrom count_average_salaries import predict_salary\n\n\ndef search_sj_vacancies(language, sj_token, page=None,\n job_area=33, publish_period=30, city_num=4):\n url = 'https://api.superjob.ru/2.0/vacancies/'\n header = {'X-Api-App-Id': sj_token}\n payload = {\n 'catalogues': job_area,\n 'period': publish_period,\n 'keyword': f'Программист {language}',\n 'town': city_num,\n 'page': page\n }\n response = requests.get(url, headers=header, params=payload)\n response.raise_for_status()\n return response.json()\n\n\ndef get_salaries(language, sj_token):\n salaries = []\n for page in count(0):\n vacancies_page = search_sj_vacancies(language, sj_token, page=page)\n salaries.extend(predict_sj_rub_salaries(vacancies_page['objects']))\n if not vacancies_page['more']:\n break\n return salaries, vacancies_page['total']\n\n\ndef predict_sj_rub_salaries(vacancies):\n salary_range = []\n for vacancy in vacancies:\n if vacancy['currency'] == 'rub':\n payment_from = vacancy['payment_from']\n payment_to = vacancy['payment_to']\n average_salary = predict_salary(payment_from, payment_to)\n if average_salary:\n salary_range.append(average_salary)\n return salary_range\n\n\ndef get_sj_salary_stats(programming_languages, sj_token):\n salary_statistics = {}\n for language in programming_languages:\n predicted_salaries, vacancies_found = get_salaries(\n language,\n sj_token\n )\n salary_statistics[language] = {\n 'vacancies_found': vacancies_found,\n 'vacancies_processed': len(predicted_salaries),\n 'average_salary': int(numpy.mean(predicted_salaries))\n }\n return salary_statistics\n"
] |
[
[
"numpy.mean"
]
] |
sando-io/pdsando
|
[
"9f9cbf74b4ec189acb17958771149d32b737866a"
] |
[
"pdsando/ta/datafeeds/tsdata.py"
] |
[
"import pandas as pd\n\n\ndef to_time_series_data(df, timespan, multiplier, index_col=None, source=None, category=None):\n temp = df.copy()\n ts_vals = temp[index_col] if index_col else temp.index.to_series()\n idx_name = index_col or temp.index.name\n\n temp[idx_name] = match_to_resolution(\n ts_vals, Resolution(timespan, multiplier))\n temp.set_index(idx_name, inplace=True)\n\n return TimeSeriesData(temp, timespan=timespan, multiplier=multiplier, source=source, category=category)\n\n\ndef match_to_resolution(ts_series, resolution):\n seconds_from_epoch = (\n ts_series - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n seconds_of_day = 3600*ts_series.dt.hour + 60 * \\\n ts_series.dt.minute + ts_series.dt.second\n\n if resolution.timespan == 'second':\n seconds_to_delete = seconds_of_day % resolution.multiplier\n elif resolution.timespan == 'minute':\n seconds_to_delete = seconds_of_day % (resolution.multiplier * 60)\n elif resolution.timespan == 'hour':\n seconds_to_delete = seconds_of_day % (resolution.multiplier * 3600)\n elif resolution.timespan == 'day':\n seconds_to_delete = (((ts_series.dt.day-1) %\n resolution.multiplier) * 86400) + seconds_of_day\n # elif timespan == 'week':\n # seconds_to_delete = (((ts_series.dt.week-1) % resolution.multiplier) * 604800) + seconds_of_day\n else:\n raise ValueError(\n 'timespan may only be one of: second, minute, hour, day, week')\n\n return pd.to_datetime((seconds_from_epoch - seconds_to_delete), unit='s', origin='unix')\n\n\nclass TimeSeriesData(pd.DataFrame):\n\n def __init__(self, *args, **kwargs):\n # Init Pandas DataFrame\n super().__init__(*args, **\n {k: kwargs[k] for k in kwargs if k not in ['timespan', 'multiplier', 'source', 'category']})\n self.sort_index(inplace=True)\n\n # Store additional details specific to PriceData\n self._source = self._category = self._ts_col = None\n self._resolution = Resolution(kwargs['timespan'], kwargs['multiplier'])\n self.source = kwargs.get('source') or 'unknown'\n self.category = kwargs.get('category') or 'stocks'\n\n @property\n def timespan(self): return self._resolution.timespan\n @property\n def multiplier(self): return self._resolution.multiplier\n @property\n def resolution(self): return self._resolution\n\n @property\n def source(self): return self._source\n\n @source.setter\n def source(self, value):\n self._source = value\n\n @property\n def category(self): return self._category\n\n @category.setter\n def category(self, value):\n valid_categories = ('stocks', 'forex', 'crypto')\n if value not in valid_categories:\n raise ValueError(f'Category must be one of: {valid_categories}')\n self._category = value\n\n # Ensure DataFrame copy() returns another TimeSeriesData obj\n def copy(self, **kwargs):\n return TimeSeriesData(super().copy(**kwargs), timespan=self.timespan, multiplier=self.multiplier, source=self._source, category=self._category)\n\n def supplement(self, sup_data):\n if not isinstance(sup_data, TimeSeriesData):\n raise AttributeError(\n 'Supplementary data must be of type TimeSeriesData')\n\n if sup_data.resolution > self.resolution:\n sup_data['_join_ts'] = match_to_resolution(\n sup_data.index.to_series(), self.resolution)\n sup_data['_rank'] = sup_data.index.to_series().groupby(\n sup_data['_join_ts']).rank(method='first', ascending=True)\n temp = self.join(sup_data[sup_data['_rank'] == 1.0].set_index(\n '_join_ts'), rsuffix='_model', how='left')\n elif sup_data.resolution < self.resolution:\n self['_join_ts'] = match_to_resolution(\n self.index.to_series(), sup_data.resolution)\n temp = self.join(sup_data, on='_join_ts',\n rsuffix='_model', how='left')\n else:\n temp = self.join(sup_data, rsuffix='_model', how='left')\n\n return temp.drop(['_join_ts', '_rank'], errors='ignore', axis=1)\n\n # Up/downscale data resolution\n def match_resolution(self, model_data):\n if not isinstance(model_data, TimeSeriesData):\n raise AttributeError('Model data must be of type TimeSeriesData')\n\n if model_data.timespan != self.timespan:\n raise NotImplementedError('Currently cannot support matching different timespan (source: {} | target: {})'.format(\n self.timespan, model_data.timespan))\n if model_data.multiplier == self.multiplier:\n return\n\n if model_data.multiplier > self.multiplier:\n temp = self.join(model_data, rsuffix='_model', how='inner')\n else:\n temp = self.join(model_data, rsuffix='_model',\n how='right').fillna(method='ffill')\n\n return TimeSeriesData(temp[list(self.columns)].copy(), timespan=self.timespan, multiplier=self.multiplier, source=self._source, category=self._category)\n\n\nclass Resolution:\n\n valid_timespans = {\n 'second': 5,\n 'minute': 4,\n 'hour': 3,\n 'day': 2,\n 'week': 1\n }\n\n def __init__(self, timespan, multiplier):\n self._timespan = self._multiplier = None\n self.timespan = timespan\n self.multiplier = multiplier\n\n @property\n def timespan(self): return self._timespan\n\n @timespan.setter\n def timespan(self, value):\n vt = list(Resolution.valid_timespans.keys())\n if value not in vt:\n raise ValueError(f'Timespan must be one of: {vt}')\n self._timespan = value\n\n @property\n def multiplier(self): return self._multiplier\n\n @multiplier.setter\n def multiplier(self, value):\n if not isinstance(value, int):\n raise TypeError(\n f'Multiplier must be an integer, not {type(value)}')\n if int(value) <= 0:\n raise ValueError(\n f'Multiplier must be a valid integer greater than 0')\n if value in ('second', 'minute') and int(value) > 60:\n raise ValueError(\n f'Multiplier must be between 1 and 60 for timespan: {self._timespan}')\n elif value == 'hour' and int(value) > 24:\n raise ValueError(\n f'Multiplier must be between 1 and 24 for timespan: {self._timespan}')\n self._multiplier = int(value)\n\n def __lt__(self, other):\n return (\n Resolution.valid_timespans[self.timespan] < Resolution.valid_timespans[other.timespan]\n or\n (\n Resolution.valid_timespans[self.timespan] == Resolution.valid_timespans[other.timespan]\n and\n # Higher multiplier means lower resolution\n self.multiplier > other.multiplier\n )\n )\n\n def __le__(self, other):\n return (\n Resolution.valid_timespans[self.timespan] < Resolution.valid_timespans[other.timespan]\n or\n (\n Resolution.valid_timespans[self.timespan] == Resolution.valid_timespans[other.timespan]\n and\n # Higher multiplier means lower resolution\n self.multiplier >= other.multiplier\n )\n )\n\n def __eq__(self, other):\n return (\n Resolution.valid_timespans[self.timespan] == Resolution.valid_timespans[other.timespan]\n and\n self.multiplier == other.multiplier\n )\n\n def __ne__(self, other):\n return (\n Resolution.valid_timespans[self.timespan] != Resolution.valid_timespans[other.timespan]\n or\n self.multiplier != other.multiplier\n )\n\n def __gt__(self, other):\n return (\n Resolution.valid_timespans[self.timespan] > Resolution.valid_timespans[other.timespan]\n or\n (\n Resolution.valid_timespans[self.timespan] == Resolution.valid_timespans[other.timespan]\n and\n # Lower multiplier means higher resolution\n self.multiplier < other.multiplier\n )\n )\n\n def __ge__(self, other):\n return (\n Resolution.valid_timespans[self.timespan] > Resolution.valid_timespans[other.timespan]\n or\n (\n Resolution.valid_timespans[self.timespan] == Resolution.valid_timespans[other.timespan]\n and\n # Lower multiplier means higher resolution\n self.multiplier <= other.multiplier\n )\n )\n"
] |
[
[
"pandas.to_datetime",
"pandas.Timedelta",
"pandas.Timestamp"
]
] |
FabioCLima/Predict-Clients-Default
|
[
"6d5c9879dabffbca60f574ecedaabf5468eda060"
] |
[
"src/main.py"
] |
[
"# %%\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nmpl.rcParams['figure.dpi']=400\n\nSRC = os.path.abspath('.')\nBASE = os.path.dirname(SRC)\nDATA = os.path.join(BASE, 'data')\nMODELS = os.path.join(BASE, 'models')\nFIGS = os.path.join(BASE, 'figs')\n\n# %%\n#! Dados de entrada para a criação do modelo\ninput_file = 'Chapter_1_cleaned_data.csv'\nfile_path = os.path.join(DATA, input_file)\ndf = pd.read_csv(file_path)\ndf.head()\n# %%\n#! Proporção da classe positiva --> média de inadiplência\ndf['default payment next month'].mean()\n# %%\n#! Proporção de cada classe no dataset\ndf.groupby('default payment next month')['ID'].count()\n# %%\ndf_new = df.drop(columns=['ID']).copy()\ndata = df_new.drop(columns=['default payment next month']).copy()\n# %%\ntarget = df_new['default payment next month']\n# %%\ntarget.value_counts().plot.barh()\n_ = plt.title('Número de amostras por classe presente\\n no target')\n# %%\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier\n# %%\nclassifier = LogisticRegression(\n C=1.0,\n class_weight=None,\n dual=False,\n fit_intercept=True,\n intercept_scaling=1,\n max_iter=100,\n multi_class='auto',\n n_jobs=None,\n penalty='l2',\n random_state=123,\n solver='warn',\n tol=0.0001,\n verbose=0,\n warm_start=False\n)\n# %%\nprint(classifier)\n# %%\nclassifier.C = 0.1\nclassifier.solver = 'liblinear'\nclassifier\n# %%\nX = df['EDUCATION'][0:10].values.reshape(-1, 1)\nX\n# %%\ny = df['default payment next month'][0:10].values\ny\n# %%\nclassifier.fit(X, y)\n# %%\nnew_X = df['EDUCATION'][10:20].values.reshape(-1, 1)\nnew_X\n# %%\nclassifier.predict(new_X)\n# %%\ndf['default payment next month'][10:20].values\n# %%\ntarget_test = df['default payment next month'][10:20].values\ntarget_predicted = classifier.predict(new_X)\nresposta = np.mean(target_test == target_predicted)\nprint(f\"O modelo com apenas uma variável, previu a não inadiplência com\\n {resposta*100:.2f}%\")\n# %%\nnp.random.seed(seed=1)\nX = np.random.uniform(low=0.0, high=10.0, size=(1000,))\nX[0:10]\n# %%\n#! y = ax + b + N(mi, sigma)\nnp.random.seed(seed=1)\nslope = 0.25\nintercept = -1.25\ny = slope * X + np.random.normal(loc=0.0, scale=1.0, size=(1000,)) + intercept \n# %%\nmpl.rcParams['figure.dpi']=400\nplt.scatter(X, y, s=1)\n# %%\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg\n# %%\nlin_reg.fit(X.reshape(-1, 1), y)\nprint(lin_reg.intercept_)\nprint(lin_reg.coef_)\n# %%\ny_pred = lin_reg.predict(X.reshape(-1, 1))\nplt.scatter(X, y, s=1)\nplt.plot(X, y_pred, 'r')\n# %%\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n df['EDUCATION'].values.reshape(-1, 1), \n df['default payment next month'].values,\n test_size=0.2, random_state=24,\n stratify=df['default payment next month'].values\n)\n# %%\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n# %%\nnp.mean(y_train) # 1 = inadiplência\n\n# %%\nnp.mean(y_test) # 1 = inadiplência\n# %%\nclassifier = LogisticRegression(C=0.1,\n class_weight=None,\n dual=False,\n fit_intercept=True,\n intercept_scaling=1,\n max_iter=100,\n multi_class='auto',\n n_jobs=None,\n penalty='l2',\n random_state=123,\n solver='liblinear',\n tol=0.0001,\n verbose=0,\n warm_start=False\n)\n# %%\nclassifier.fit(X_train, y_train)\ny_pred = classifier.predict(X_test)\n# %%\n#! Determinar a acurácia\nis_correct = y_pred == y_test\nnp.mean(is_correct)\n# %%\nclassifier.score(X_test, y_test)\n# %%\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_pred)\n# %%\nfrom sklearn.dummy import DummyClassifier\n\ndummy_classifier = DummyClassifier(strategy=\"most_frequent\")\ndummy_classifier.fit(X_train, y_train)\nscore_dummy = dummy_classifier.score(X_test, y_test)\nprint(f\"Acurácia do dummy classifier é {score_dummy*100:.3f}%\")\n# %%\nP = sum(y_test)\nP #! amostras positivas\n# %%\n# verdadeiros positivos\nTP = sum((y_test==1) & (y_pred==1))\nTP\n# %%\nTPR = TP/P\nTPR\n# %%\n# falsos negativos\nFN = sum((y_test==1) & (y_pred==0))\nFN\n# %%\nFNR = FN/P \nFNR\n# %%\nN = sum(y_test==0)\nN\n# %%\nTN = sum((y_test==0) & (y_pred==0))\nTN\n# %%\nFP = sum((y_test==0) & (y_pred==1))\nFP\n# %%\nTNR = TN/N\nFPR = FP/N \nprint(f\"The true positive rate is {TNR} and the false positive rate is {FPR}\")\n# %%\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test, y_pred)\n# %%\nfrom sklearn.metrics import plot_confusion_matrix\n_ = plot_confusion_matrix(classifier, X_test, y_test)\n# %%\n"
] |
[
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"sklearn.dummy.DummyClassifier",
"sklearn.metrics.plot_confusion_matrix",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.plot",
"numpy.random.normal",
"numpy.mean",
"sklearn.linear_model.LinearRegression",
"numpy.random.uniform",
"sklearn.metrics.accuracy_score"
]
] |
PromodhPinto/anuvaad-corpus-tools
|
[
"8b7f7ab02c3dea2096e1de17c6853b3456b2bae3"
] |
[
"newsonair-crawler/newsonair_scraper.py"
] |
[
"#File contains code to scrape & create En-Hi CSV from newsonair\n\nimport re\nimport time\nimport pandas as pd\nfrom ast import literal_eval\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom indicnlp.tokenize import sentence_tokenize\nfrom nltk.tokenize import sent_tokenize \n\n\nENG_URL = \"http://newsonair.com/Text-Archive-Search.aspx\"\nHIN_URL = \"http://newsonair.com/hindi/Hindi-Text-Archive-Search.aspx\"\nFROM_DATE = \"arguments[0].setAttribute('value', '09/01/2020 12:00 AM')\"\nTO_DATE = \"arguments[0].setAttribute('value','09/2/2020 12:00 PM')\"\n\n#temporary output file paths\nENG_CSV = \"/home/eng_csv.csv\"\nHIN_CSV = \"/home/hin_csv.csv\"\n\n\n#function to scrap URL's\n#status WIP. It works but might hang in between without switching page\ndef get_href(url):\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get(url)\n a=driver.find_element_by_id('ctl00_ContentPlaceHolder1_from_Date_txt')\n driver.execute_script(FROM_DATE,a)\n b=driver.find_element_by_id('ctl00_ContentPlaceHolder1_to_Date_txt')\n driver.execute_script(TO_DATE, b)\n driver.find_element_by_xpath('//*[@id=\"ctl00_ContentPlaceHolder1_Button1\"]').click()\n j=100\n href_links=[]\n \n\n t_end = time.time() + 60\n while time.time() < t_end:\n try:\n driver.implicitly_wait(5)\n parent=driver.find_element_by_id('ctl00_ContentPlaceHolder1_pnlHelloWorld')\n driver.implicitly_wait(5)\n links1=parent.find_elements_by_tag_name(\"a\")\n for el in links1:\n if el.get_attribute('href') == None or not re.search('^http', el.get_attribute('href')):\n break\n else:\n # print(el.get_attribute(\"href\"))\n if el.get_attribute(\"href\") not in href_links:\n href_links.append(el.get_attribute(\"href\"))\n \n driver.find_element_by_xpath('//*[@id=\"ctl00_ContentPlaceHolder1_lbNext\"]').click() \n \n \n except Exception as e:\n print(e)\n pass\n print(len(href_links))\n return(href_links)\n\n\n\ndef get_df(link1):\n\n date = []\n timex = []\n title = []\n data = []\n links = []\n\n href_links = get_href(link1)\n driver = webdriver.Chrome(ChromeDriverManager().install())\n for link2 in href_links:\n \n try :\n print(link2)\n driver.get(link2)\n user_data=driver.find_element_by_xpath('//*[@id=\"ctl00_ContentPlaceHolder1_FormView1\"]/tbody/tr/td') \n # get the whole body text \n text_data=user_data.text \n list2=text_data.split('\\n')\n #get date \n date.append(list2[0])\n # get time \n timex.append(list2[2]) \n #get title \n title.append(list2[3]) \n # get all the data \n sentences=[sen.strip() for sen in list2[5:] if len(sen)>1]\n data.append(sentences)\n #get links \n links.append(link2)\n \n except Exception as e:\n print('check the id',link2)\n print(e)\n \n df=pd.DataFrame()\n df['time']=timex\n df['date']=date\n df['title']=title\n df['link']=links\n df['data']=data\n print(df)\n return(df)\n\nhi_df = get_df(HIN_URL)\nhi_df.to_csv(HIN_CSV)\n\nen_df = get_df(ENG_URL)\nen_df.to_csv(ENG_CSV)\n"
] |
[
[
"pandas.DataFrame"
]
] |
ExcitedStates/qfit-3.0
|
[
"8ed8e8f44015e4eb30fed7a5da65819a586c2bbf"
] |
[
"setup.py"
] |
[
"'''\nExcited States software: qFit 3.0\n\nContributors: Saulo H. P. de Oliveira, Gydo van Zundert, and Henry van den Bedem.\nContact: [email protected]\n\nCopyright (C) 2009-2019 Stanford University\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThis entire text, including the above copyright notice and this permission notice\nshall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n'''\n\nimport os.path\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom setuptools.extension import Extension\nimport numpy as np\n\n\ndef main():\n package_dir = {'': 'src'}\n packages = find_packages('src')\n package_data = {'qfit': [os.path.join('data', '*.npy'), ]}\n\n ext_modules = [Extension(\"qfit._extensions\",\n [os.path.join(\"src\", \"_extensions.c\")],\n include_dirs=[np.get_include()],),\n ]\n setup_requires = [\n 'setuptools_scm',\n ]\n install_requires = [\n 'numpy>=1.14',\n 'scipy>=1.00',\n 'pyparsing>=2.2.0',\n 'tqdm>=4.0.0',\n ]\n\n setup(name=\"qfit\",\n use_scm_version=True,\n author='Blake Riley, Stephanie A. Wankowicz, Gydo C.P. van Zundert, Saulo H.P. de Oliveira, and Henry van den Bedem',\n author_email='[email protected]',\n project_urls={'Documentation': 'https://github.com/ExcitedStates/qfit-3.0/'},\n package_dir=package_dir,\n packages=packages,\n package_data=package_data,\n ext_modules=ext_modules,\n setup_requires=setup_requires,\n install_requires=install_requires,\n zip_safe=False,\n python_requires='>=3.6', \n entry_points={\n 'console_scripts': [\n 'qfit_protein = qfit.qfit_protein:main',\n 'qfit_residue = qfit.qfit_residue:main',\n 'qfit_ligand = qfit.qfit_ligand:main',\n 'qfit_covalent_ligand = qfit.qfit_covalent_ligand:main',\n 'qfit_segment = qfit.qfit_segment:main',\n 'qfit_prep_map = qfit.qfit_prep_map:main',\n 'qfit_density = qfit.qfit_density:main',\n 'qfit_mtz_to_ccp4 = qfit.mtz_to_ccp4:main',\n 'edia = qfit.edia:main',\n 'relabel = qfit.relabel:main',\n 'remove_altconfs = qfit.remove_altconfs:main',\n 'side_chain_remover = qfit.side_chain_remover:main',\n 'redistribute_cull_low_occupancies = qfit.redistribute_cull_low_occupancies:main',\n 'fix_restraints = qfit.fix_restraints:main',\n 'qfit_ppiDesign = qfit.qfit_ppiDesign:main',\n 'add_non_rotamer_atoms = qfit.add_non_rotamer_atoms:main',\n 'remove_duplicates = qfit.remove_duplicates:main'\n ]},\n scripts=[\n 'scripts/post/qfit_final_refine_xray.sh',\n 'scripts/post/qfit_final_refine_cryoem.sh',\n 'scripts/post/find_largest_ligand.py',\n 'scripts/post/find_altlocs_near_ligand.py',\n 'scripts/post/qfit_RMSF.py',\n 'scripts/post/find_altlocs_near_ligand.py',\n 'scripts/post/compare_apo_holo.py',\n 'scripts/post/get_metrics.py',\n 'scripts/post/b_factor.py',\n 'scripts/post/subset_structure_AH.py'\n ],\n )\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.get_include"
]
] |
jiazhi412/Dataset-REPAIR
|
[
"581df7b0e0408247cc63e411d3ea2bb0191c4148"
] |
[
"colored_mnist.py"
] |
[
"import torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nimport os\n\nfrom utils.datasets import ColoredDataset\nfrom utils.measure import *\nfrom utils.models import *\n\nimport argparse\nparser = argparse.ArgumentParser()\n# parser.add_argument('--gpu', default=0, type=int)\nparser.add_argument('--with-cuda', dest='cuda', action='store_true')\nparser.add_argument('--model', default='lenet', choices=['lenet', 'mlp'], type=str)\nparser.add_argument('--color-std', type=float, default=0.1)\nparser.add_argument('--batch-size', default=128, type=int)\nparser.add_argument('--lr', '--learning-rate', default=1e-2, type=float)\nparser.add_argument('--epochs', default=20, type=int)\nargs = parser.parse_args()\n\nopt = vars(parser.parse_args())\nopt['device'] = torch.device('cuda' if opt['cuda'] else 'cpu')\n\nroot = './data'\nif not os.path.exists(root):\n os.mkdir(root)\n\n# load data\ntrain_set = datasets.MNIST(root=root, train=True, download=True, transform=transforms.ToTensor())\ntest_set = datasets.MNIST(root=root, train=False, download=True, transform=transforms.ToTensor())\n\n# biased datasets, i.e. colored mnist\nprint('Coloring MNIST dataset with standard deviation = {:.2f}'.format(args.color_std))\ncolored_train_set = ColoredDataset(train_set, classes=10, colors=[0, 1], std=args.color_std)\ntrain_loader = DataLoader(colored_train_set, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)\ncolored_test_set = ColoredDataset(test_set, classes=10, colors=colored_train_set.colors, std=args.color_std)\ntest_loader = DataLoader(colored_test_set, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)\n\n# grount-truth datasets, i.e. grayscale mnist\ngt_train_set = ColoredDataset(train_set, classes=10, colors=[1, 1], std=0)\ngt_train_loader = DataLoader(gt_train_set, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)\ngt_test_set = ColoredDataset(test_set, classes=10, colors=[1, 1], std=0)\ngt_test_loader = DataLoader(gt_test_set, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)\n\n# measure bias\ncolor_fn = lambda x: x.view(x.size(0), x.size(1), -1).max(2)[0] # color of digit\ncolor_dim = 3 # rgb\nbias = measure_bias(train_loader, test_loader, color_fn, color_dim, opt)[0]\ngt_bias = measure_bias(gt_train_loader, gt_test_loader, color_fn, color_dim, opt)[0]\nprint('Color bias of Colored MNIST = {:.3f}'.format(bias + 0))\nprint('Color bias of Grayscale MNIST = {:.3f}'.format(gt_bias + 0))\n\n# measure generalization\nmodel = create_mnist_model(args.model)\nacc, gt_acc = measure_generalization(train_loader, [test_loader, gt_test_loader], model, opt)\nprint('Test accuracy on Colored MNIST = {:.2%}'.format(acc))\nprint('Generalization on Grayscale MNIST = {:.2%}'.format(gt_acc))"
] |
[
[
"torch.device",
"torch.utils.data.DataLoader"
]
] |
LeeDaeil/Process_A3C
|
[
"1876fbe1b928e13b9c8766095b2d13abfda94019"
] |
[
"Step_1/Pro2.py"
] |
[
"import multiprocessing\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n\nclass Pro2(multiprocessing.Process):\n def __init__(self, shared_mem):\n multiprocessing.Process.__init__(self)\n\n self.shared_mem = shared_mem # Main.py 에서 선언했던 Shared memory 를 가져옴\n\n self.fig = plt.figure() # 그래프를 그리기 위해서 matplotlib 에서 figure()을 선언함.\n self.ax = self.fig.subplots() # figure()는 subplots()라는 하위 함수를 동봉하고 있다.\n # 이를 통해서 subplot 을 작성 할 수 있다.\n # matplotlib는 2단계로 구성할 수 있다.\n # 그림을 그릴때 쓰는 캔버스를 figure() 이라고 하며, 캔버스(figure())에는 하위 그래프가 그려진다.\n # 즉,\n # [ plt() ] < - [ figure() ] <- [ ax() ]\n # 과 같은 구조를 가진다.\n\n def run(self):\n # matplotlib 을 통하여 애니메이션을 그리기 위해서 아래 명령어를 작성한다.\n # 이때 주의 할점은 FuncAnimation() 함수는 thread 라이브러리를 내장하고 있다.\n #\n # CPU는 하나의 코어에 2가지 쓰레드가 존재하며 ( 저가의 CPU는 쓰레드가 1개 ), 1개의 쓰레드를\n # 통해서 여러개의 프로세스를 처리 할 수 있다.\n # 1개 쓰레드는 다중 프로세스를 처리할 수 있으며, 파이썬의 경우 1개의 Process 에 다중 Thread 를\n # 선언하여 사용 할 수 있다.\n\n #anim = animation.FuncAnimation(self.fig, self.animate, interval=60) # 60초 간격으로 그래프 업데이트\n anim = animation.FuncAnimation(self.fig, self.animate_ver2, interval=60) # 60초 간격으로 그래프 업데이트\n plt.show() # plt.show()를 통해서 animation 되고 있는 그래프 표현\n\n def animate(self, i):\n # animation 을 할 때는 기본적으로 2가지 과정을 거친다.\n\n # 1. 이전까지 그렸던 그래프를 지운다.\n self.ax.clear()\n\n # 2. 다시 재 빌드 한다.\n self.ax.plot(self.shared_mem['x축_데이터'], self.shared_mem['y축_데이터'])\n\n\n def animate_ver2(self, i):\n # 또한 기존의 데이터를 사용자가 원하는 입맛에 변경해서 사용도 가능하다.\n # 다음 예제코드는 사용자의 함수를 추가한 버전이다.\n\n # 0. 사용자가 작성한 함수를 가져오거나 작성한다.\n # 이때 주의할 점은 반드시 그래프의 x값과 y값의 길이를 동일하게 하자. 이 부분이 가장 오류가 많이\n # 유발되는 부분인 것같다.\n def user_function(mem):\n out_data = [] # 출력 값을 생산하기 위해서 빈 리스트를 만든다.\n for i in mem['x축_데이터']:\n result = i * i/2 - 2 * i # 사용자가 원하는 함수\n out_data.append(result) # 계산된 결과를 출력 리스트에 append\n return out_data # 출력 값을 반환\n\n # 1. 이전까지 그렸던 그래프를 지운다.\n self.ax.clear()\n\n # 2. 그래프를 재 빌드 한다.\n # 이 부분은 format 합수의 특징과 ax.clear()의 특징을 생각해서 실시간으로 변화하는 값을 범례에 업데이트\n # 하는 아이디어를 구현한 것이다. 이 처럼 각 함수의 특징과 기능을 잘 생각하면 간단하고 멋진 결과를 얻을 수\n # 있다.!\n self.ax.plot(self.shared_mem['x축_데이터'], self.shared_mem['y축_데이터'], label='Test1 : {}'.format(self.shared_mem['y축_데이터'][-1]))\n # label='Test1 : {}'.format(self.shared_mem['y축_데이터'][-1])\n # 1) label = '내용'\n # 범례의 이름을 입력하는 것이다.\n # 2) label = '내용 {}'.format(변수)\n # 내용 뿐만아니라 변수 값도 출력 하는 방법이다.\n # 3) label = '내용 {}'.format(변수리스트[-1])\n # 변수 리스트의 가장 오른쪽 값, 즉 가장 마지막 데이터를 추출한다. 이 경우 그래프 가장 마지막 부분의 값을\n # 의미한다.\n out_data_list = user_function(self.shared_mem)\n self.ax.plot(self.shared_mem['x축_데이터'], out_data_list, label='Test2 : {}'.format(out_data_list[-1]))\n\n # 3. 범례 표시\n self.ax.legend()\n\n # 4. 제목 표시\n self.ax.set_title('Test graph')"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure"
]
] |
bomber8013/h5py
|
[
"71a82d9fc99245181747a8630b45373ae1606de0"
] |
[
"h5py/tests/test_attrs_data.py"
] |
[
"# This file is part of h5py, a Python interface to the HDF5 library.\n#\n# http://www.h5py.org\n#\n# Copyright 2008-2013 Andrew Collette and contributors\n#\n# License: Standard 3-clause BSD; see \"license.txt\" for full license terms\n# and contributor agreement.\n\n\"\"\"\n Attribute data transfer testing module\n\n Covers all data read/write and type-conversion operations for attributes.\n\"\"\"\n\nimport numpy as np\n\nfrom .common import TestCase, ut\n\nimport h5py\nfrom h5py import h5a, h5s, h5t\nfrom h5py import File\nfrom h5py._hl.base import is_empty_dataspace\n\n\nclass BaseAttrs(TestCase):\n\n def setUp(self):\n self.f = File(self.mktemp(), 'w')\n\n def tearDown(self):\n if self.f:\n self.f.close()\n\n\nclass TestScalar(BaseAttrs):\n\n \"\"\"\n Feature: Scalar types map correctly to array scalars\n \"\"\"\n\n def test_int(self):\n \"\"\" Integers are read as correct NumPy type \"\"\"\n self.f.attrs['x'] = np.array(1, dtype=np.int8)\n out = self.f.attrs['x']\n self.assertIsInstance(out, np.int8)\n\n def test_compound(self):\n \"\"\" Compound scalars are read as numpy.void \"\"\"\n dt = np.dtype([('a', 'i'), ('b', 'f')])\n data = np.array((1, 4.2), dtype=dt)\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertIsInstance(out, np.void)\n self.assertEqual(out, data)\n self.assertEqual(out['b'], data['b'])\n\n\nclass TestArray(BaseAttrs):\n\n \"\"\"\n Feature: Non-scalar types are correctly retrieved as ndarrays\n \"\"\"\n\n def test_single(self):\n \"\"\" Single-element arrays are correctly recovered \"\"\"\n data = np.ndarray((1,), dtype='f')\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,))\n\n def test_multi(self):\n \"\"\" Rank-1 arrays are correctly recovered \"\"\"\n data = np.ndarray((42,), dtype='f')\n data[:] = 42.0\n data[10:35] = -47.0\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (42,))\n self.assertArrayEqual(out, data)\n\n\nclass TestTypes(BaseAttrs):\n\n \"\"\"\n Feature: All supported types can be stored in attributes\n \"\"\"\n\n def test_int(self):\n \"\"\" Storage of integer types \"\"\"\n dtypes = (np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64)\n for dt in dtypes:\n data = np.ndarray((1,), dtype=dt)\n data[...] = 42\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertEqual(out.dtype, dt)\n self.assertArrayEqual(out, data)\n\n def test_float(self):\n \"\"\" Storage of floating point types \"\"\"\n dtypes = tuple(np.dtype(x) for x in ('<f4', '>f4', '>f8', '<f8'))\n\n for dt in dtypes:\n data = np.ndarray((1,), dtype=dt)\n data[...] = 42.3\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n # TODO: Clean up after issue addressed !\n print(\"dtype: \", out.dtype, dt)\n print(\"value: \", out, data)\n self.assertEqual(out.dtype, dt)\n self.assertArrayEqual(out, data)\n\n def test_complex(self):\n \"\"\" Storage of complex types \"\"\"\n dtypes = tuple(np.dtype(x) for x in ('<c8', '>c8', '<c16', '>c16'))\n\n for dt in dtypes:\n data = np.ndarray((1,), dtype=dt)\n data[...] = -4.2j + 35.9\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertEqual(out.dtype, dt)\n self.assertArrayEqual(out, data)\n\n def test_string(self):\n \"\"\" Storage of fixed-length strings \"\"\"\n dtypes = tuple(np.dtype(x) for x in ('|S1', '|S10'))\n\n for dt in dtypes:\n data = np.ndarray((1,), dtype=dt)\n data[...] = 'h'\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertEqual(out.dtype, dt)\n self.assertEqual(out[0], data[0])\n\n def test_bool(self):\n \"\"\" Storage of NumPy booleans \"\"\"\n\n data = np.ndarray((2,), dtype=np.bool_)\n data[...] = True, False\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertEqual(out.dtype, data.dtype)\n self.assertEqual(out[0], data[0])\n self.assertEqual(out[1], data[1])\n\n def test_vlen_string_array(self):\n \"\"\" Storage of vlen byte string arrays\"\"\"\n dt = h5py.string_dtype(encoding='ascii')\n\n data = np.ndarray((2,), dtype=dt)\n data[...] = \"Hello\", \"Hi there! This is HDF5!\"\n\n self.f.attrs['x'] = data\n out = self.f.attrs['x']\n self.assertEqual(out.dtype, dt)\n self.assertEqual(out[0], data[0])\n self.assertEqual(out[1], data[1])\n\n def test_string_scalar(self):\n \"\"\" Storage of variable-length byte string scalars (auto-creation) \"\"\"\n\n self.f.attrs['x'] = b'Hello'\n out = self.f.attrs['x']\n\n self.assertEqual(out, 'Hello')\n self.assertEqual(type(out), str)\n\n aid = h5py.h5a.open(self.f.id, b\"x\")\n tid = aid.get_type()\n self.assertEqual(type(tid), h5py.h5t.TypeStringID)\n self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII)\n self.assertTrue(tid.is_variable_str())\n\n def test_unicode_scalar(self):\n \"\"\" Storage of variable-length unicode strings (auto-creation) \"\"\"\n\n self.f.attrs['x'] = u\"Hello\" + chr(0x2340) + u\"!!\"\n out = self.f.attrs['x']\n self.assertEqual(out, u\"Hello\" + chr(0x2340) + u\"!!\")\n self.assertEqual(type(out), str)\n\n aid = h5py.h5a.open(self.f.id, b\"x\")\n tid = aid.get_type()\n self.assertEqual(type(tid), h5py.h5t.TypeStringID)\n self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8)\n self.assertTrue(tid.is_variable_str())\n\n\nclass TestEmpty(BaseAttrs):\n\n def setUp(self):\n BaseAttrs.setUp(self)\n sid = h5s.create(h5s.NULL)\n tid = h5t.C_S1.copy()\n tid.set_size(10)\n aid = h5a.create(self.f.id, b'x', tid, sid)\n self.empty_obj = h5py.Empty(np.dtype(\"S10\"))\n\n def test_read(self):\n self.assertEqual(\n self.empty_obj, self.f.attrs['x']\n )\n\n def test_write(self):\n self.f.attrs[\"y\"] = self.empty_obj\n self.assertTrue(is_empty_dataspace(h5a.open(self.f.id, b'y')))\n\n def test_modify(self):\n with self.assertRaises(IOError):\n self.f.attrs.modify('x', 1)\n\n def test_values(self):\n # list() is for Py3 where these are iterators\n values = list(self.f.attrs.values())\n self.assertEqual(\n [self.empty_obj], values\n )\n\n def test_items(self):\n items = list(self.f.attrs.items())\n self.assertEqual(\n [(u\"x\", self.empty_obj)], items\n )\n\n def test_itervalues(self):\n values = list(self.f.attrs.values())\n self.assertEqual(\n [self.empty_obj], values\n )\n\n def test_iteritems(self):\n items = list(self.f.attrs.items())\n self.assertEqual(\n [(u\"x\", self.empty_obj)], items\n )\n\n\nclass TestWriteException(BaseAttrs):\n\n \"\"\"\n Ensure failed attribute writes don't leave garbage behind.\n \"\"\"\n\n def test_write(self):\n \"\"\" ValueError on string write wipes out attribute \"\"\"\n\n s = b\"Hello\\x00Hello\"\n\n try:\n self.f.attrs['x'] = s\n except ValueError:\n pass\n\n with self.assertRaises(KeyError):\n self.f.attrs['x']\n"
] |
[
[
"numpy.array",
"numpy.ndarray",
"numpy.dtype"
]
] |
Business-Wizard/yolov4-custom-functions
|
[
"daf98d28b40a17306495883425fe04b6e20b5ff6"
] |
[
"save_model.py"
] |
[
"import tensorflow as tf\r\nfrom absl import app, flags, logging\r\nfrom absl.flags import FLAGS\r\nfrom core.yolov4 import YOLO, decode, filter_boxes\r\nimport core.utils as utils\r\nfrom core.config import cfg\r\n\r\nflags.DEFINE_string('weights', './data/yolov4.weights', 'path to weights file')\r\nflags.DEFINE_string('output', './checkpoints/yolov4-416', 'path to output')\r\nflags.DEFINE_boolean('tiny', False, 'is yolo-tiny or not')\r\nflags.DEFINE_integer('input_size', 416, 'define input size of export model')\r\nflags.DEFINE_float('score_thres', 0.2, 'define score threshold')\r\nflags.DEFINE_string('framework', 'tf', 'define what framework do you want to convert (tf, trt, tflite)')\r\nflags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')\r\n\r\ndef save_tf():\r\n STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)\r\n\r\n input_layer = tf.keras.layers.Input([FLAGS.input_size, FLAGS.input_size, 3])\r\n feature_maps = YOLO(input_layer, NUM_CLASS, FLAGS.model, FLAGS.tiny)\r\n bbox_tensors = []\r\n prob_tensors = []\r\n for i, fm in enumerate(feature_maps):\r\n if i == 0:\r\n if FLAGS.tiny:\r\n output_tensors = decode(fm, FLAGS.input_size // 16, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)\r\n else:\r\n output_tensors = decode(fm, FLAGS.input_size // 8, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)\r\n else:\r\n output_tensors = decode(fm, FLAGS.input_size // 32, NUM_CLASS, STRIDES, ANCHORS, i, XYSCALE, FLAGS.framework)\r\n bbox_tensors.append(output_tensors[0])\r\n prob_tensors.append(output_tensors[1])\r\n pred_bbox = tf.concat(bbox_tensors, axis=1)\r\n pred_prob = tf.concat(prob_tensors, axis=1)\r\n if FLAGS.framework == 'tflite':\r\n pred = (pred_bbox, pred_prob)\r\n else:\r\n boxes, pred_conf = filter_boxes(pred_bbox, pred_prob, score_threshold=FLAGS.score_thres, input_shape=tf.constant([FLAGS.input_size, FLAGS.input_size]))\r\n pred = tf.concat([boxes, pred_conf], axis=-1)\r\n model = tf.keras.Model(input_layer, pred)\r\n utils.load_weights(model, FLAGS.weights, FLAGS.model, FLAGS.tiny)\r\n model.summary()\r\n model.save(FLAGS.output)\r\n\r\ndef main(_argv):\r\n save_tf()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n app.run(main)\r\n except SystemExit:\r\n pass\r\n"
] |
[
[
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input"
]
] |
movchan74/message_prediction_cnn_solution
|
[
"fd9564a981b8051120d20653426953169809702e"
] |
[
"resnet_prediction.py"
] |
[
"import json\nimport sys\nimport os\nfrom collections import Counter\nimport tqdm\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nfrom tensorflow.python.ops import array_ops\nimport random\nimport pickle\nimport editdistance\nimport soundex \nimport jellyfish\nimport string\nimport math\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\ndef conv1D(x, inp, out, kernel, stride, name):\n W = tf.get_variable(name+\"W\", shape=[kernel, inp, out],\n initializer=tf.contrib.layers.xavier_initializer())\n x = tf.nn.conv1d(x, W, stride, 'SAME')\n x = layers.bias_add(x)\n return x\n\ndef residual_block(inputs, dim, name):\n x = tf.nn.relu(inputs)\n x = conv1D(x, dim, dim, 3, 1, name+'.conv1')\n x = tf.nn.relu(x)\n x = conv1D(x, dim, dim, 3, 1, name+'.conv2')\n return inputs + x\n\nif len(sys.argv) != 3:\n print ('usage: resnet_prediction.py input.json output.json')\n exit()\n\ninput_filename = sys.argv[1]\noutput_filename = sys.argv[2]\n\nwith open(input_filename) as f:\n test_data = json.load(f)\n\n\nwith open('gen_data/input_vocab.hkl', 'rb') as f:\n input_vocab = pickle.load(f)\nwith open('gen_data/word_list.hkl', 'rb') as f:\n word_list = pickle.load(f)\nwith open('gen_data/users.hkl', 'rb') as f:\n users = pickle.load(f)\n\ninput_vocab_set = set(input_vocab)\ninp_item_to_index = {w: i for i, w in enumerate(input_vocab)}\nusers_to_index = {x:i for i, x in enumerate(users)}\n\nemb_dim = 64\nvocab_size = len(input_vocab)\nuser_emb_dim = 64\n\ninput_seq = tf.placeholder(tf.int32, shape=[None, None])\noutput_seq = tf.placeholder(tf.int32, shape=[None, None])\ninput_user = tf.placeholder(tf.int32, shape=[None, 1])\n\n# Embedding: [vocab_size, emb_dim]\ninit_width = 0.5 / emb_dim\nemb_weights = tf.Variable(\n tf.random_uniform(\n [vocab_size, emb_dim], -init_width, init_width),\n name=\"embed_weights\")\n\nembedding = tf.nn.embedding_lookup(emb_weights, input_seq)\n\ninit_width = 0.5 / user_emb_dim\nusers_emb_weights = tf.Variable(\n tf.random_uniform(\n [len(users), user_emb_dim], -init_width, init_width),\n name=\"users_embed_weights\")\n\nusers_embedding = tf.nn.embedding_lookup(users_emb_weights, input_user)\n\nnet = conv1D(embedding, emb_dim, 128, 3, 1, 'conv1')\nnet = residual_block(net, 128, 'res1')\nnet = residual_block(net, 128, 'res2')\nnet = residual_block(net, 128, 'res3')\nnet = tf.concat([tf.tile(users_embedding, [1, tf.shape(net)[1], 1]), net], axis=2)\nnet = conv1D(net, 192, len(word_list), 1, 1, 'conv_final')\n\npred_max = tf.argmax(net, 2)\nout = tf.reshape(net, (-1, len(word_list)))\n\nloss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(output_seq, (-1,)), logits=out)\nloss = tf.reduce_mean(loss)\n\nloss_summary_update = tf.summary.scalar(\"loss\", loss)\nsummary_op = tf.summary.merge_all()\n\ntrain_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(loss)\n\nsess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=8))\nsess.run(tf.global_variables_initializer())\nsess.run(tf.local_variables_initializer())\n\nsaver = tf.train.Saver()\nsaver.restore(sess, 'checkpoints/res_cnn_v2_3.817720.ckpt')\n\ndef get_prediction(item):\n inp_indexes = np.array([inp_item_to_index['__'+x['type']+'__'+x['value']]\n if '__'+x['type']+'__'+x['value'] in input_vocab_set else inp_item_to_index['__'+x['type']+'__']\n for x in item['entitiesShortened']])\n\n u = users_to_index[item['user']]\n\n\n\n p = sess.run(net, feed_dict={input_seq: inp_indexes.reshape((1, -1)),\n input_user: np.array(u).reshape((1, -1))})\n\n pred_seq = []\n\n for i, x in enumerate(item['entitiesShortened']):\n if x['type'] != 'letter':\n continue\n for k in np.argsort(p[0][i])[::-1]:\n if word_list[k][0] == x['value'].lower():\n break\n # k = np.argmax(p[0][i])\n pred_seq.append(x['value']+word_list[k][1:])\n\n return pred_seq\n\n\nsubmission = {}\nfor item in tqdm.tqdm(test_data):\n submission[item['id']] = get_prediction(item)\n \nwith open(output_filename, 'w') as f:\n json.dump(submission, f)"
] |
[
[
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv1d",
"tensorflow.summary.scalar",
"tensorflow.ConfigProto",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"numpy.argsort",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"tensorflow.contrib.layers.bias_add",
"tensorflow.nn.relu",
"tensorflow.local_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.random_uniform"
]
] |
liuyibin-git/insightface
|
[
"5b40f4bfce7e2c9d10bb6328c4fed33e9d76c9de"
] |
[
"recognition/ArcFace/image_iter.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport logging\nimport sys\nimport numbers\nimport math\nimport sklearn\nimport datetime\nimport numpy as np\nimport cv2\n\nimport mxnet as mx\nfrom mxnet import ndarray as nd\nfrom mxnet import io\nfrom mxnet import recordio\n\nlogger = logging.getLogger()\n\n\nclass FaceImageIter(io.DataIter):\n def __init__(self,\n batch_size,\n data_shape,\n path_imgrec=None,\n shuffle=False,\n aug_list=None,\n mean=None,\n rand_mirror=False,\n cutoff=0,\n color_jittering=0,\n images_filter=0,\n data_name='data',\n label_name='softmax_label',\n **kwargs):\n super(FaceImageIter, self).__init__()\n assert path_imgrec\n if path_imgrec:\n logging.info('loading recordio %s...', path_imgrec)\n path_imgidx = path_imgrec[0:-4] + \".idx\"\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec,\n 'r') # pylint: disable=redefined-variable-type\n s = self.imgrec.read_idx(0)\n header, _ = recordio.unpack(s)\n if header.flag > 0:\n print('header0 label', header.label)\n self.header0 = (int(header.label[0]), int(header.label[1]))\n #assert(header.flag==1)\n #self.imgidx = range(1, int(header.label[0]))\n self.imgidx = []\n self.id2range = {}\n self.seq_identity = range(int(header.label[0]),\n int(header.label[1]))\n for identity in self.seq_identity:\n s = self.imgrec.read_idx(identity)\n header, _ = recordio.unpack(s)\n a, b = int(header.label[0]), int(header.label[1])\n count = b - a\n if count < images_filter:\n continue\n self.id2range[identity] = (a, b)\n self.imgidx += range(a, b)\n print('id2range', len(self.id2range))\n else:\n self.imgidx = list(self.imgrec.keys)\n if shuffle:\n self.seq = self.imgidx\n self.oseq = self.imgidx\n print(len(self.seq))\n else:\n self.seq = None\n\n self.mean = mean\n self.nd_mean = None\n if self.mean:\n self.mean = np.array(self.mean, dtype=np.float32).reshape(1, 1, 3)\n self.nd_mean = mx.nd.array(self.mean).reshape((1, 1, 3))\n\n self.check_data_shape(data_shape)\n self.provide_data = [(data_name, (batch_size, ) + data_shape)]\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.shuffle = shuffle\n self.image_size = '%d,%d' % (data_shape[1], data_shape[2])\n self.rand_mirror = rand_mirror\n print('rand_mirror', rand_mirror)\n self.cutoff = cutoff\n self.color_jittering = color_jittering\n self.CJA = mx.image.ColorJitterAug(0.125, 0.125, 0.125)\n self.provide_label = [(label_name, (batch_size, ))]\n #print(self.provide_label[0][1])\n self.cur = 0\n self.nbatch = 0\n self.is_init = False\n\n def reset(self):\n \"\"\"Resets the iterator to the beginning of the data.\"\"\"\n print('call reset()')\n self.cur = 0\n if self.shuffle:\n random.shuffle(self.seq)\n if self.seq is None and self.imgrec is not None:\n self.imgrec.reset()\n\n def num_samples(self):\n return len(self.seq)\n\n def next_sample(self):\n \"\"\"Helper function for reading in next sample.\"\"\"\n #set total batch size, for example, 1800, and maximum size for each people, for example 45\n if self.seq is not None:\n while True:\n if self.cur >= len(self.seq):\n raise StopIteration\n idx = self.seq[self.cur]\n self.cur += 1\n if self.imgrec is not None:\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n label = header.label\n if not isinstance(label, numbers.Number):\n label = label[0]\n return label, img, None, None\n else:\n label, fname, bbox, landmark = self.imglist[idx]\n return label, self.read_image(fname), bbox, landmark\n else:\n s = self.imgrec.read()\n if s is None:\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img, None, None\n\n def brightness_aug(self, src, x):\n alpha = 1.0 + random.uniform(-x, x)\n src *= alpha\n return src\n\n def contrast_aug(self, src, x):\n alpha = 1.0 + random.uniform(-x, x)\n coef = nd.array([[[0.299, 0.587, 0.114]]])\n gray = src * coef\n gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)\n src *= alpha\n src += gray\n return src\n\n def saturation_aug(self, src, x):\n alpha = 1.0 + random.uniform(-x, x)\n coef = nd.array([[[0.299, 0.587, 0.114]]])\n gray = src * coef\n gray = nd.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n\n def color_aug(self, img, x):\n #augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]\n #random.shuffle(augs)\n #for aug in augs:\n # #print(img.shape)\n # img = aug(img, x)\n # #print(img.shape)\n #return img\n return self.CJA(img)\n\n def mirror_aug(self, img):\n _rd = random.randint(0, 1)\n if _rd == 1:\n for c in range(img.shape[2]):\n img[:, :, c] = np.fliplr(img[:, :, c])\n return img\n\n def compress_aug(self, img):\n from PIL import Image\n from io import BytesIO\n buf = BytesIO()\n img = Image.fromarray(img.asnumpy(), 'RGB')\n q = random.randint(2, 20)\n img.save(buf, format='JPEG', quality=q)\n buf = buf.getvalue()\n img = Image.open(BytesIO(buf))\n return nd.array(np.asarray(img, 'float32'))\n\n def next(self):\n if not self.is_init:\n self.reset()\n self.is_init = True\n \"\"\"Returns the next batch of data.\"\"\"\n #print('in next', self.cur, self.labelcur)\n self.nbatch += 1\n batch_size = self.batch_size\n c, h, w = self.data_shape\n batch_data = nd.empty((batch_size, c, h, w))\n if self.provide_label is not None:\n batch_label = nd.empty(self.provide_label[0][1])\n i = 0\n try:\n while i < batch_size:\n label, s, bbox, landmark = self.next_sample()\n _data = self.imdecode(s)\n if _data.shape[0] != self.data_shape[1]:\n _data = mx.image.resize_short(_data, self.data_shape[1])\n if self.rand_mirror:\n _rd = random.randint(0, 1)\n if _rd == 1:\n _data = mx.ndarray.flip(data=_data, axis=1)\n if self.color_jittering > 0:\n if self.color_jittering > 1:\n _rd = random.randint(0, 1)\n if _rd == 1:\n _data = self.compress_aug(_data)\n #print('do color aug')\n _data = _data.astype('float32', copy=False)\n #print(_data.__class__)\n _data = self.color_aug(_data, 0.125)\n if self.nd_mean is not None:\n _data = _data.astype('float32', copy=False)\n _data -= self.nd_mean\n _data *= 0.0078125\n if self.cutoff > 0:\n _rd = random.randint(0, 1)\n if _rd == 1:\n #print('do cutoff aug', self.cutoff)\n centerh = random.randint(0, _data.shape[0] - 1)\n centerw = random.randint(0, _data.shape[1] - 1)\n half = self.cutoff // 2\n starth = max(0, centerh - half)\n endh = min(_data.shape[0], centerh + half)\n startw = max(0, centerw - half)\n endw = min(_data.shape[1], centerw + half)\n #print(starth, endh, startw, endw, _data.shape)\n _data[starth:endh, startw:endw, :] = 128\n data = [_data]\n try:\n self.check_valid_image(data)\n except RuntimeError as e:\n logging.debug('Invalid image, skipping: %s', str(e))\n continue\n #print('aa',data[0].shape)\n #data = self.augmentation_transform(data)\n #print('bb',data[0].shape)\n for datum in data:\n assert i < batch_size, 'Batch size must be multiples of augmenter output length'\n #print(datum.shape)\n batch_data[i][:] = self.postprocess_data(datum)\n batch_label[i][:] = label\n i += 1\n except StopIteration:\n if i < batch_size:\n raise StopIteration\n\n return io.DataBatch([batch_data], [batch_label], batch_size - i)\n\n def check_data_shape(self, data_shape):\n \"\"\"Checks if the input data shape is valid\"\"\"\n if not len(data_shape) == 3:\n raise ValueError(\n 'data_shape should have length 3, with dimensions CxHxW')\n if not data_shape[0] == 3:\n raise ValueError(\n 'This iterator expects inputs to have 3 channels.')\n\n def check_valid_image(self, data):\n \"\"\"Checks if the input data is valid\"\"\"\n if len(data[0].shape) == 0:\n raise RuntimeError('Data shape is wrong')\n\n def imdecode(self, s):\n \"\"\"Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.\"\"\"\n img = mx.image.imdecode(s) #mx.ndarray\n return img\n\n def read_image(self, fname):\n \"\"\"Reads an input image `fname` and returns the decoded raw bytes.\n\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n \"\"\"\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return img\n\n def augmentation_transform(self, data):\n \"\"\"Transforms input data with specified augmentation.\"\"\"\n for aug in self.auglist:\n data = [ret for src in data for ret in aug(src)]\n return data\n\n def postprocess_data(self, datum):\n \"\"\"Final postprocessing step before image is loaded into the batch.\"\"\"\n return nd.transpose(datum, axes=(2, 0, 1))\n\n\nclass FaceImageIterList(io.DataIter):\n def __init__(self, iter_list):\n assert len(iter_list) > 0\n self.provide_data = iter_list[0].provide_data\n self.provide_label = iter_list[0].provide_label\n self.iter_list = iter_list\n self.cur_iter = None\n\n def reset(self):\n self.cur_iter.reset()\n\n def next(self):\n self.cur_iter = random.choice(self.iter_list)\n while True:\n try:\n ret = self.cur_iter.next()\n except StopIteration:\n self.cur_iter.reset()\n continue\n return ret\n"
] |
[
[
"numpy.asarray",
"numpy.fliplr",
"numpy.array"
]
] |
zhyhan/pathology-multiresolution
|
[
"a44e6563dc5bca5998278403e93a4f74dfc3e8e2"
] |
[
"api/hdf5_fun.py"
] |
[
"import h5py\nimport numpy as np\nimport config_fun\nimport glob\nimport os\nimport random\nimport sys\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\nimport patch_preprocess_fun\nfrom itertools import izip\nimport random\n\ndef _precoss_patches(cfg, dataset, file_type):\n data_block = np.zeros((cfg.patch_num_each_hdf5, cfg.patch_size * cfg.patch_size * 3), dtype=np.uint8)\n label_block = np.zeros((cfg.patch_num_each_hdf5, 1), dtype=np.uint8)\n name_block = []\n cnt = 0\n id = 0\n for idx, data in enumerate(tqdm(dataset)):\n img = Image.open(data['file_name'])\n img_np = np.asarray(img).astype(np.uint8)\n img_np = img_np.reshape(-1)\n data_block[cnt] = img_np\n label_block[cnt] = data['label']\n cnt += 1\n name_block.append(os.path.basename(data['file_name']))\n\n if cnt == cfg.patch_num_each_hdf5 or idx + 1 == len(dataset):\n data_save = data_block[0:cnt]\n label_save = label_block[0:cnt]\n name_save = '\\n'.join(name_block)\n prefix = ''\n if file_type == 'train':\n prefix = cfg.patch_hdf5_train_file_pre\n else:\n prefix = cfg.patch_hdf5_val_file_pre\n with h5py.File('%s_%d.h5'%(prefix, id), 'w') as f:\n f.create_dataset('data', shape=data_save.shape,\n compression='gzip', dtype='uint8', data=data_save)\n f.create_dataset('label', shape=label_save.shape,\n compression='gzip', dtype='uint8', data=label_save)\n f.attrs['name'] = name_save\n id += 1\n cnt = 0\n name_block = []\n\n\ndef _add_label(pos_patches, neg_patches):\n patches = []\n for patch in pos_patches:\n patches.append({'file_name': patch, 'label': 1})\n for patch in neg_patches:\n patches.append({'file_name': patch, 'label': 0})\n random.shuffle(patches)\n return patches\n\ndef convert_patch_to_hdf5():\n cfg = config_fun.config()\n train_pos_patches = glob.glob(os.path.join(cfg.patch_save_folder, 'train', 'pos', '*'+ cfg.img_ext))\n train_neg_patches = glob.glob(os.path.join(cfg.patch_save_folder, 'train', 'neg', '*'+ cfg.img_ext))\n val_pos_patches = glob.glob(os.path.join(cfg.patch_save_folder, 'val', 'pos', '*'+ cfg.img_ext))\n val_neg_patches = glob.glob(os.path.join(cfg.patch_save_folder, 'val', 'neg', '*'+ cfg.img_ext))\n\n train_patches = _add_label(train_pos_patches, train_neg_patches)\n val_patches = _add_label(val_pos_patches, val_neg_patches)\n print('processing train patches~')\n _precoss_patches(cfg, train_patches, 'train')\n print('processing validation patches~')\n _precoss_patches(cfg, val_patches, 'val')\n\ndef h5_extract_data_label_name(img_size, file_name):\n data = None\n label = None\n name = None\n with h5py.File(file_name) as f:\n data = f['data'].value\n label = f['label'].value\n name = f.attrs.values()\n return data.reshape(-1, img_size, img_size, 3), label, name[0].split('\\n')\n\n\ndef get_h5_file_list(data_type, cfg):\n if 'train' in data_type:\n file_names = glob.glob(cfg.patch_hdf5_train_file_pre + '*')\n elif 'val' in data_type:\n file_names = glob.glob(cfg.patch_hdf5_val_file_pre + '*')\n return file_names\n\n\ndef get_all_data_label_name(cfg, data_type, frac=1):\n file_names = None\n data = None\n label = None\n name = None\n\n file_names = get_h5_file_list(data_type, cfg)\n random.shuffle(file_names)\n file_names = file_names[:int(np.ceil(len(file_names)*frac))]\n\n for file_name in file_names:\n t_data, t_label, t_name = h5_extract_data_label_name(cfg.patch_size, file_name)\n if data is None:\n data = t_data\n label = t_label\n name = t_name\n else:\n data = np.concatenate((data, t_data), axis=0)\n label = np.concatenate((label, t_label), axis=0)\n name.extend(t_name)\n return data, label, name\n\n\ndef _random_vis_hdf5(cfg, data_type):\n data, label, name = get_all_data_label_name(cfg, data_type)\n save_dir_pre = cfg.vis_hdf5_folder\n if 'train' in data_type:\n file_type = 'train'\n elif 'val' in data_type:\n file_type = 'val'\n else:\n print('vis hdf5 error, wrong data type!')\n sys.exit(0)\n save_dir_pre = os.path.join(save_dir_pre, file_type)\n cfg.check_dir(save_dir_pre)\n save_dir_pos = os.path.join(save_dir_pre, 'pos')\n save_dir_neg = os.path.join(save_dir_pre, 'neg')\n cfg.check_dir(save_dir_pos)\n cfg.check_dir(save_dir_neg)\n for idx, n in enumerate(tqdm(name)):\n l = label[idx]\n d = data[idx]\n if random.random() < cfg.vis_hdf5_prob:\n img = Image.fromarray(d)\n if l == 1:\n img.save(os.path.join(save_dir_pos, n))\n else:\n img.save(os.path.join(save_dir_neg, n))\n img.close()\n\ndef random_vis_hdf5():\n cfg = config_fun.config()\n print('vis train hdf5 ~')\n _random_vis_hdf5(cfg, 'train')\n print('vis validation hdf5 ~')\n _random_vis_hdf5(cfg, 'val')\n\n\nif __name__ == '__main__':\n random_vis_hdf5()"
] |
[
[
"numpy.concatenate",
"numpy.zeros",
"numpy.asarray"
]
] |
pdstrnadJC/seldon-core
|
[
"76386a01309f41deb6362ffdb6f65ab26219238a"
] |
[
"python/tests/test_microservice_tester.py"
] |
[
"import os\nimport pytest\nimport json\nimport logging\nimport numpy as np\n\nfrom seldon_core.microservice_tester import (\n run_method,\n run_send_feedback,\n reconciliate_cont_type,\n SeldonTesterException,\n)\nfrom unittest import mock\nfrom seldon_core.utils import array_to_grpc_datadef, seldon_message_to_json\nfrom seldon_core.proto import prediction_pb2\n\nfrom .conftest import RESOURCES_PATH\n\n\nclass MockResponse:\n def __init__(self, json_data, status_code, reason=\"\", text=\"\"):\n self.json_data = json_data\n self.status_code = status_code\n self.reason = reason\n self.text = text\n\n def json(self):\n return self.json_data\n\n\ndef mocked_requests_post_success(url, *args, **kwargs):\n data = np.random.rand(1, 1)\n datadef = array_to_grpc_datadef(\"tensor\", data)\n request = prediction_pb2.SeldonMessage(data=datadef)\n json = seldon_message_to_json(request)\n return MockResponse(json, 200, text=\"{}\")\n\n\nclass Bunch:\n def __init__(self, adict):\n self.__dict__.update(adict)\n\n\[email protected](\"requests.post\", side_effect=mocked_requests_post_success)\ndef test_predict_rest(mock_post):\n filename = os.path.join(RESOURCES_PATH, \"model-template-app\", \"contract.json\")\n args_dict = {\n \"contract\": filename,\n \"host\": \"a\",\n \"port\": 1000,\n \"n_requests\": 1,\n \"batch_size\": 1,\n \"endpoint\": \"predict\",\n \"prnt\": True,\n \"grpc\": False,\n \"tensor\": True,\n }\n args = Bunch(args_dict)\n run_method(args, \"predict\")\n logging.info(mock_post.call_args[1])\n payload = json.loads(mock_post.call_args[1][\"data\"][\"json\"])\n assert payload[\"data\"][\"names\"] == [\n \"sepal_length\",\n \"sepal_width\",\n \"petal_length\",\n \"petal_width\",\n ]\n\n\[email protected](\"requests.post\", side_effect=mocked_requests_post_success)\ndef test_feedback_rest(mock_post):\n filename = os.path.join(RESOURCES_PATH, \"model-template-app\", \"contract.json\")\n args_dict = {\n \"contract\": filename,\n \"host\": \"a\",\n \"port\": 1000,\n \"n_requests\": 1,\n \"batch_size\": 1,\n \"endpoint\": \"feedback\",\n \"prnt\": True,\n \"grpc\": False,\n \"tensor\": True,\n }\n args = Bunch(args_dict)\n run_send_feedback(args)\n\n\[email protected](\"requests.post\", side_effect=mocked_requests_post_success)\ndef test_predict_rest_categorical(mock_post):\n filename = os.path.join(RESOURCES_PATH, \"contract.json\")\n args_dict = {\n \"contract\": filename,\n \"host\": \"a\",\n \"port\": 1000,\n \"n_requests\": 1,\n \"batch_size\": 1,\n \"endpoint\": \"predict\",\n \"prnt\": True,\n \"grpc\": False,\n \"tensor\": False,\n }\n args = Bunch(args_dict)\n run_method(args, \"predict\")\n\n\ndef test_reconciliate_exception():\n arr = np.array([1, 2])\n with pytest.raises(SeldonTesterException):\n reconciliate_cont_type(arr, \"FOO\")\n\n\ndef test_bad_contract():\n with pytest.raises(SeldonTesterException):\n filename = os.path.join(RESOURCES_PATH, \"bad_contract.json\")\n args_dict = {\n \"contract\": filename,\n \"host\": \"a\",\n \"port\": 1000,\n \"n_requests\": 1,\n \"batch_size\": 1,\n \"endpoint\": \"feedback\",\n \"prnt\": True,\n \"grpc\": False,\n \"tensor\": True,\n }\n args = Bunch(args_dict)\n run_send_feedback(args)\n"
] |
[
[
"numpy.array",
"numpy.random.rand"
]
] |
roclark/stable-baselines3
|
[
"21e9994ff99db306e14bfa19ca36f133c7153df4"
] |
[
"stable_baselines3/common/utils.py"
] |
[
"import glob\nimport os\nimport random\nfrom collections import deque\nfrom typing import Callable, Iterable, Optional, Union\n\nimport gym\nimport numpy as np\nimport torch as th\n\n# Check if tensorboard is available for pytorch\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n SummaryWriter = None\n\nfrom stable_baselines3.common import logger\nfrom stable_baselines3.common.preprocessing import is_image_space\nfrom stable_baselines3.common.type_aliases import GymEnv\nfrom stable_baselines3.common.vec_env import VecTransposeImage\n\n\ndef set_random_seed(seed: int, using_cuda: bool = False) -> None:\n \"\"\"\n Seed the different random generators\n :param seed: (int)\n :param using_cuda: (bool)\n \"\"\"\n # Seed python RNG\n random.seed(seed)\n # Seed numpy RNG\n np.random.seed(seed)\n # seed the RNG for all devices (both CPU and CUDA)\n th.manual_seed(seed)\n\n if using_cuda:\n # Deterministic operations for CuDNN, it may impact performances\n th.backends.cudnn.deterministic = True\n th.backends.cudnn.benchmark = False\n\n\n# From stable baselines\ndef explained_variance(y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n :param y_pred: (np.ndarray) the prediction\n :param y_true: (np.ndarray) the expected value\n :return: (float) explained variance of ypred and y\n \"\"\"\n assert y_true.ndim == 1 and y_pred.ndim == 1\n var_y = np.var(y_true)\n return np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y\n\n\ndef update_learning_rate(optimizer: th.optim.Optimizer, learning_rate: float) -> None:\n \"\"\"\n Update the learning rate for a given optimizer.\n Useful when doing linear schedule.\n\n :param optimizer: (th.optim.Optimizer)\n :param learning_rate: (float)\n \"\"\"\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = learning_rate\n\n\ndef get_schedule_fn(value_schedule: Union[Callable, float]) -> Callable:\n \"\"\"\n Transform (if needed) learning rate and clip range (for PPO)\n to callable.\n\n :param value_schedule: (callable or float)\n :return: (function)\n \"\"\"\n # If the passed schedule is a float\n # create a constant function\n if isinstance(value_schedule, (float, int)):\n # Cast to float to avoid errors\n value_schedule = constant_fn(float(value_schedule))\n else:\n assert callable(value_schedule)\n return value_schedule\n\n\ndef get_linear_fn(start: float, end: float, end_fraction: float) -> Callable:\n \"\"\"\n Create a function that interpolates linearly between start and end\n between ``progress_remaining`` = 1 and ``progress_remaining`` = ``end_fraction``.\n This is used in DQN for linearly annealing the exploration fraction\n (epsilon for the epsilon-greedy strategy).\n\n :params start: (float) value to start with if ``progress_remaining`` = 1\n :params end: (float) value to end with if ``progress_remaining`` = 0\n :params end_fraction: (float) fraction of ``progress_remaining``\n where end is reached e.g 0.1 then end is reached after 10%\n of the complete training process.\n :return: (Callable)\n \"\"\"\n\n def func(progress_remaining: float) -> float:\n if (1 - progress_remaining) > end_fraction:\n return end\n else:\n return start + (1 - progress_remaining) * (end - start) / end_fraction\n\n return func\n\n\ndef constant_fn(val: float) -> Callable:\n \"\"\"\n Create a function that returns a constant\n It is useful for learning rate schedule (to avoid code duplication)\n\n :param val: (float)\n :return: (Callable)\n \"\"\"\n\n def func(_):\n return val\n\n return func\n\n\ndef get_device(device: Union[th.device, str] = \"auto\") -> th.device:\n \"\"\"\n Retrieve PyTorch device.\n It checks that the requested device is available first.\n For now, it supports only cpu and cuda.\n By default, it tries to use the gpu.\n\n :param device: (Union[str, th.device]) One for 'auto', 'cuda', 'cpu'\n :return: (th.device)\n \"\"\"\n # Cuda by default\n if device == \"auto\":\n device = \"cuda\"\n # Force conversion to th.device\n device = th.device(device)\n\n # Cuda not available\n if device == th.device(\"cuda\") and not th.cuda.is_available():\n return th.device(\"cpu\")\n\n return device\n\n\ndef get_latest_run_id(log_path: Optional[str] = None, log_name: str = \"\") -> int:\n \"\"\"\n Returns the latest run number for the given log name and log path,\n by finding the greatest number in the directories.\n\n :return: (int) latest run number\n \"\"\"\n max_run_id = 0\n for path in glob.glob(f\"{log_path}/{log_name}_[0-9]*\"):\n file_name = path.split(os.sep)[-1]\n ext = file_name.split(\"_\")[-1]\n if log_name == \"_\".join(file_name.split(\"_\")[:-1]) and ext.isdigit() and int(ext) > max_run_id:\n max_run_id = int(ext)\n return max_run_id\n\n\ndef configure_logger(\n verbose: int = 0, tensorboard_log: Optional[str] = None, tb_log_name: str = \"\", reset_num_timesteps: bool = True\n) -> None:\n \"\"\"\n Configure the logger's outputs.\n\n :param verbose: (int) the verbosity level: 0 no output, 1 info, 2 debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param tb_log_name: (str) tensorboard log\n \"\"\"\n if tensorboard_log is not None and SummaryWriter is not None:\n latest_run_id = get_latest_run_id(tensorboard_log, tb_log_name)\n if not reset_num_timesteps:\n # Continue training in the same directory\n latest_run_id -= 1\n save_path = os.path.join(tensorboard_log, f\"{tb_log_name}_{latest_run_id + 1}\")\n if verbose >= 1:\n logger.configure(save_path, [\"stdout\", \"tensorboard\"])\n else:\n logger.configure(save_path, [\"tensorboard\"])\n elif verbose == 0:\n logger.configure(format_strings=[\"\"])\n\n\ndef check_for_correct_spaces(env: GymEnv, observation_space: gym.spaces.Space, action_space: gym.spaces.Space):\n \"\"\"\n Checks that the environment has same spaces as provided ones. Used by BaseAlgorithm to check if\n spaces match after loading the model with given env.\n Checked parameters:\n - observation_space\n - action_space\n\n :param env: (GymEnv) Environment to check for valid spaces\n :param observation_space: (gym.spaces.Space) Observation space to check against\n :param action_space: (gym.spaces.Space) Action space to check against\n \"\"\"\n if (\n observation_space != env.observation_space\n # Special cases for images that need to be transposed\n and not (\n is_image_space(env.observation_space)\n and observation_space == VecTransposeImage.transpose_space(env.observation_space)\n )\n ):\n raise ValueError(f\"Observation spaces do not match: {observation_space} != {env.observation_space}\")\n if action_space != env.action_space:\n raise ValueError(f\"Action spaces do not match: {action_space} != {env.action_space}\")\n\n\ndef is_vectorized_observation(observation: np.ndarray, observation_space: gym.spaces.Space) -> bool:\n \"\"\"\n For every observation type, detects and validates the shape,\n then returns whether or not the observation is vectorized.\n\n :param observation: (np.ndarray) the input observation to validate\n :param observation_space: (gym.spaces) the observation space\n :return: (bool) whether the given observation is vectorized or not\n \"\"\"\n if isinstance(observation_space, gym.spaces.Box):\n if observation.shape == observation_space.shape:\n return False\n elif observation.shape[1:] == observation_space.shape:\n return True\n else:\n raise ValueError(\n f\"Error: Unexpected observation shape {observation.shape} for \"\n + f\"Box environment, please use {observation_space.shape} \"\n + \"or (n_env, {}) for the observation shape.\".format(\", \".join(map(str, observation_space.shape)))\n )\n elif isinstance(observation_space, gym.spaces.Discrete):\n if observation.shape == (): # A numpy array of a number, has shape empty tuple '()'\n return False\n elif len(observation.shape) == 1:\n return True\n else:\n raise ValueError(\n f\"Error: Unexpected observation shape {observation.shape} for \"\n + \"Discrete environment, please use (1,) or (n_env, 1) for the observation shape.\"\n )\n\n elif isinstance(observation_space, gym.spaces.MultiDiscrete):\n if observation.shape == (len(observation_space.nvec),):\n return False\n elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec):\n return True\n else:\n raise ValueError(\n f\"Error: Unexpected observation shape {observation.shape} for MultiDiscrete \"\n + f\"environment, please use ({len(observation_space.nvec)},) or \"\n + f\"(n_env, {len(observation_space.nvec)}) for the observation shape.\"\n )\n elif isinstance(observation_space, gym.spaces.MultiBinary):\n if observation.shape == (observation_space.n,):\n return False\n elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n:\n return True\n else:\n raise ValueError(\n f\"Error: Unexpected observation shape {observation.shape} for MultiBinary \"\n + f\"environment, please use ({observation_space.n},) or \"\n + f\"(n_env, {observation_space.n}) for the observation shape.\"\n )\n else:\n raise ValueError(\n \"Error: Cannot determine if the observation is vectorized \" + f\" with the space type {observation_space}.\"\n )\n\n\ndef safe_mean(arr: Union[np.ndarray, list, deque]) -> np.ndarray:\n \"\"\"\n Compute the mean of an array if there is at least one element.\n For empty array, return NaN. It is used for logging only.\n\n :param arr:\n :return:\n \"\"\"\n return np.nan if len(arr) == 0 else np.mean(arr)\n\n\ndef polyak_update(params: Iterable[th.nn.Parameter], target_params: Iterable[th.nn.Parameter], tau: float) -> None:\n \"\"\"\n Perform a Polyak average update on ``target_params`` using ``params``:\n target parameters are slowly updated towards the main parameters.\n ``tau``, the soft update coefficient controls the interpolation:\n ``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``.\n The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors,\n or a computation graph, reducing memory cost and improving performance. We scale the target params\n by ``1-tau`` (in-place), add the new weights, scaled by ``tau`` and store the result of the sum in the target\n params (in place).\n See https://github.com/DLR-RM/stable-baselines3/issues/93\n\n :param params: (Iterable[th.nn.Parameter]) parameters to use to update the target params\n :param target_params: (Iterable[th.nn.Parameter]) parameters to update\n :param tau: (float) the soft update coefficient (\"Polyak update\", between 0 and 1)\n \"\"\"\n with th.no_grad():\n for param, target_param in zip(params, target_params):\n target_param.data.mul_(1 - tau)\n th.add(target_param.data, param.data, alpha=tau, out=target_param.data)\n"
] |
[
[
"torch.add",
"numpy.random.seed",
"torch.manual_seed",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.var",
"torch.device"
]
] |
pulp-platform/kws-on-pulp
|
[
"3a1e60ef0b8781bd6db1279e465fc0799a789079"
] |
[
"quantization/dataset.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# SPDX-License-Identifier: Apache-2.0\n# ==============================================================================\n#\n# Adapted by: Cristian Cioflan, ETH ([email protected])\n\n\nimport hashlib\nimport math\nimport os.path\nimport random\nimport os\nimport re\nimport glob\nimport time\nimport torch\nimport torchaudio\n\nfrom collections import Counter, OrderedDict\n\nimport soundfile as sf\nimport numpy as np\nimport tensorflow as tf\n\n\nMAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M\nBACKGROUND_NOISE_LABEL = '_background_noise_'\nSILENCE_LABEL = '_silence_'\nSILENCE_INDEX = 0\nUNKNOWN_WORD_LABEL = '_unknown_'\nUNKNOWN_WORD_INDEX = 1\nRANDOM_SEED = 59185\n\n\ndef prepare_words_list(wanted_words):\n return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words\n\n\ndef which_set(filename, validation_percentage, testing_percentage):\n # Split dataset in training, validation, and testing set\n # Should be modified to load validation data from validation_list.txt\n # Should be modified to load testing data from testing_list.txt\n\n base_name = os.path.basename(filename)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put a wav in, so the data set creator has a way of\n # grouping wavs that are close variations of each other.\n hash_name = re.sub(r'_nohash_.*$', '', base_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(hash_name.encode()).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_WAVS_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_WAVS_PER_CLASS))\n if percentage_hash < validation_percentage:\n result = 'validation'\n elif percentage_hash < (testing_percentage + validation_percentage):\n result = 'testing'\n else:\n result = 'training'\n return result\n\n\nclass AudioProcessor(object):\n # Prepare data\n\n def __init__(self, training_parameters, data_processing_parameters):\n \n self.data_directory = training_parameters['data_dir']\n self.generate_background_noise()\n self.generate_data_dictionary(training_parameters)\n self.data_processing_parameters = data_processing_parameters\n\n\n def generate_data_dictionary(self, training_parameters):\n # For each data set, generate a dictionary containing the path to each file, its label, and its speaker.\n # Make sure the shuffling and picking of unknowns is deterministic.\n random.seed(RANDOM_SEED)\n wanted_words_index = {}\n\n for index, wanted_word in enumerate(training_parameters['wanted_words']):\n wanted_words_index[wanted_word] = index + 2\n\n # Prepare data sets\n self.data_set = {'validation': [], 'testing': [], 'training': []}\n unknown_set = {'validation': [], 'testing': [], 'training': []}\n all_words = {}\n # Find all audio samples\n search_path = os.path.join(self.data_directory, '*', '*.wav')\n\n for wav_path in glob.glob(search_path):\n _ , word = os.path.split(os.path.dirname(wav_path))\n speaker_id = wav_path.split('/')[8].split('_')[0] # Hardcoded, should use regex.\n word = word.lower()\n\n # Ignore background noise, as it has been handled by generate_background_noise()\n if word == BACKGROUND_NOISE_LABEL:\n continue\n\n all_words[word] = True\n # Determine the set to which the word should belong\n set_index = which_set(wav_path, training_parameters['validation_percentage'], training_parameters['testing_percentage'])\n\n # If it's a known class, store its detail, otherwise add it to the list\n # we'll use to train the unknown label.\n # If we use 35 classes - all are known, hence no unkown samples \n if word in wanted_words_index:\n self.data_set[set_index].append({'label': word, 'file': wav_path, 'speaker': speaker_id})\n else:\n unknown_set[set_index].append({'label': word, 'file': wav_path, 'speaker': speaker_id})\n\n if not all_words:\n raise Exception('No .wavs found at ' + search_path)\n for index, wanted_word in enumerate(training_parameters['wanted_words']):\n if wanted_word not in all_words:\n raise Exception('Expected to find ' + wanted_word +\n ' in labels but only found ' +\n ', '.join(all_words.keys()))\n\n # We need an arbitrary file to load as the input for the silence samples.\n # It's multiplied by zero later, so the content doesn't matter.\n silence_wav_path = self.data_set['training'][0]['file']\n\n # Add silence and unknown words to each set\n for set_index in ['validation', 'testing', 'training']:\n\n set_size = len(self.data_set[set_index])\n silence_size = int(math.ceil(set_size * training_parameters['silence_percentage'] / 100))\n for _ in range(silence_size):\n self.data_set[set_index].append({\n 'label': SILENCE_LABEL,\n 'file': silence_wav_path,\n 'speaker': \"None\" \n })\n\n # Pick some unknowns to add to each partition of the data set.\n random.shuffle(unknown_set[set_index])\n unknown_size = int(math.ceil(set_size * training_parameters['unknown_percentage'] / 100))\n self.data_set[set_index].extend(unknown_set[set_index][:unknown_size])\n\n # Make sure the ordering is random.\n for set_index in ['validation', 'testing', 'training']:\n random.shuffle(self.data_set[set_index])\n\n # Prepare the rest of the result data structure.\n self.words_list = prepare_words_list(training_parameters['wanted_words'])\n self.word_to_index = {}\n for word in all_words:\n if word in wanted_words_index:\n self.word_to_index[word] = wanted_words_index[word]\n else:\n self.word_to_index[word] = UNKNOWN_WORD_INDEX\n self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX\n\n\n def generate_background_noise(self):\n # Load background noise, used to augment clean speech\n\n self.background_noise = []\n background_dir = os.path.join(self.data_directory, BACKGROUND_NOISE_LABEL)\n if not os.path.exists(background_dir):\n return self.background_noise\n\n search_path = os.path.join(self.data_directory, BACKGROUND_NOISE_LABEL,'*.wav')\n for wav_path in glob.glob(search_path):\n # List of tensor, each one is a background noise\n sf_loader, _ = sf.read(wav_path)\n wav_file = torch.Tensor(np.array([sf_loader]))\n self.background_noise.append(wav_file[0])\n\n if not self.background_noise:\n raise Exception('No background wav files were found in ' + search_path)\n\n\n def get_size(self, mode):\n # Compute data set size\n\n return len(self.data_set[mode])\n\n\n def get_data(self, mode, training_parameters):\n # Prepare and return data (utterances and labels) for inference\n\n # Pick one of the partitions to choose samples from\n candidates = self.data_set[mode]\n if training_parameters['batch_size'] == -1:\n samples_number = len(candidates)\n else:\n samples_number = max(0, min(training_parameters['batch_size'], len(candidates)))\n\n # Create a data placeholder\n data_placeholder = np.zeros((samples_number, self.data_processing_parameters['spectrogram_length'],self.data_processing_parameters['feature_bin_count']),dtype='float32' )\n labels_placeholder = np.zeros(samples_number)\n\n # Required for noise analysis\n use_background = (self.background_noise and (mode == 'training'))\n pick_deterministically = (mode != 'training')\n\n for i in range(0, samples_number):\n\n # Pick which audio sample to use.\n if training_parameters['batch_size'] == -1 or pick_deterministically:\n # The randomness is eliminated here to train on the same batch ordering\n sample_index = i \n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n\n # Compute time shift offset\n if training_parameters['time_shift_samples'] > 0:\n time_shift_amount = np.random.randint(-training_parameters['time_shift_samples'], training_parameters['time_shift_samples'])\n else:\n time_shift_amount = 0\n if time_shift_amount > 0:\n time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n time_shift_offset = [0, 0]\n else:\n time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n time_shift_offset = [-time_shift_amount, 0]\n \n data_augmentation_parameters = {\n 'wav_filename': sample['file'],\n 'time_shift_padding': time_shift_padding,\n 'time_shift_offset': time_shift_offset,\n }\n\n # Select background noise to mix in.\n if use_background or sample['label'] == SILENCE_LABEL:\n background_index = np.random.randint(len(self.background_noise)) \n background_samples = self.background_noise[background_index].numpy()\n assert (len(background_samples) > self.data_processing_parameters['desired_samples'])\n\n background_offset = np.random.randint(0, len(background_samples) - self.data_processing_parameters['desired_samples'])\n background_clipped = background_samples[background_offset:(background_offset + self.data_processing_parameters['desired_samples'])]\n background_reshaped = background_clipped.reshape([self.data_processing_parameters['desired_samples'], 1])\n\n if sample['label'] == SILENCE_LABEL:\n background_volume = np.random.uniform(0, 1)\n elif np.random.uniform(0, 1) < training_parameters['background_frequency']:\n background_volume = np.random.uniform(0, training_parameters['background_volume'])\n else:\n background_volume = 0\n else:\n background_reshaped = np.zeros([self.data_processing_parameters['desired_samples'], 1])\n background_volume = 0\n \n data_augmentation_parameters['background_noise'] = background_reshaped\n data_augmentation_parameters['background_volume'] = background_volume\n\n # For silence samples, remove any sound\n if sample['label'] == SILENCE_LABEL:\n data_augmentation_parameters['foreground_volume'] = 0\n else:\n data_augmentation_parameters['foreground_volume'] = 1\n\n # Load data\n try:\n sf_loader, _ = sf.read(data_augmentation_parameters['wav_filename'])\n wav_file = torch.Tensor(np.array([sf_loader]))\n except:\n pass\n\n # Ensure data length is equal to the number of desired samples\n if len(wav_file[0]) < self.data_processing_parameters['desired_samples']:\n wav_file=torch.nn.ConstantPad1d((0,self.data_processing_parameters['desired_samples']-len(wav_file[0])),0)(wav_file[0])\n else:\n wav_file=wav_file[0][:self.data_processing_parameters['desired_samples']]\n scaled_foreground = torch.mul(wav_file, data_augmentation_parameters['foreground_volume'])\n\n # Padding wrt the time shift offset\n pad_tuple=tuple(data_augmentation_parameters['time_shift_padding'][0])\n padded_foreground = torch.nn.ConstantPad1d(pad_tuple,0)(scaled_foreground)\n sliced_foreground = padded_foreground[data_augmentation_parameters['time_shift_offset'][0]:data_augmentation_parameters['time_shift_offset'][0]+self.data_processing_parameters['desired_samples']]\n \n # Mix in background noise\n background_mul = torch.mul(torch.Tensor(data_augmentation_parameters['background_noise'][:,0]),data_augmentation_parameters['background_volume']) \n background_add = torch.add(background_mul, sliced_foreground)\n\n # Compute MFCCs - PyTorch\n # melkwargs={ 'n_fft':1024, 'win_length':self.data_processing_parameters['window_size_samples'], 'hop_length':self.data_processing_parameters['window_stride_samples'],\n # 'f_min':20, 'f_max':4000, 'n_mels':40}\n # mfcc_transformation = torchaudio.transforms.MFCC(n_mfcc=self.data_processing_parameters['feature_bin_count'], sample_rate=self.data_processing_parameters['desired_samples'], melkwargs=melkwargs, log_mels=True, norm='ortho')\n # data = mfcc_transformation(background_add)\n # data_placeholder[i] = data[:,:self.data_processing_parameters['spectrogram_length']].numpy().transpose()\n\n # Compute MFCCs - TensorFlow (matching C-based implementation)\n tf_data = tf.convert_to_tensor(background_add.numpy(), dtype=tf.float32)\n tf_stfts = tf.signal.stft(tf_data, frame_length=self.data_processing_parameters['window_size_samples'], frame_step=self.data_processing_parameters['window_stride_samples'], fft_length=1024)\n tf_spectrograms = tf.abs(tf_stfts)\n power = True\n if power:\n tf_spectrograms = tf_spectrograms ** 2\n num_spectrogram_bins = tf_stfts.shape[-1]\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(40, num_spectrogram_bins, self.data_processing_parameters['desired_samples'], 20, 4000)\n tf_spectrograms = tf.cast(tf_spectrograms, tf.float32)\n tf_mel_spectrograms = tf.tensordot(tf_spectrograms, linear_to_mel_weight_matrix, 1)\n tf_mel_spectrograms.set_shape(tf_spectrograms.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n tf_log_mel = tf.math.log(tf_mel_spectrograms + 1e-6)\n tf_mfccs = tf.signal.mfccs_from_log_mel_spectrograms(tf_log_mel)[..., :self.data_processing_parameters['feature_bin_count']]\n mfcc = torch.Tensor(tf_mfccs.numpy())\n data_placeholder[i] = mfcc\n\n\n # Shift data in [0, 255] interval to match Dory request for uint8 inputs\n data_placeholder[i] = np.clip(data_placeholder[i] + 128, 0, 255)\n\n label_index = self.word_to_index[sample['label']]\n labels_placeholder[i] = label_index\n\n return data_placeholder, labels_placeholder\n\n\nclass AudioGenerator(torch.utils.data.Dataset):\n # Returning batches of data (MFCCs) and labels\n\n def __init__(self, mode, audio_processor, training_parameters):\n self.mode = mode\n self.audio_processor = audio_processor\n if self.mode != 'training':\n training_parameters['background_frequency'] = 0\n training_parameters['background_volume'] = 0\n training_parameters['time_shift_samples'] = 0\n self.training_parameters = training_parameters\n\n\n def __len__(self):\n # Return dataset length\n\n if self.training_parameters['batch_size']==-1:\n return(len(self.audio_processor.data_set[self.mode]))\n else:\n return int(len(self.audio_processor.data_set[self.mode])/self.training_parameters['batch_size'])\n\n\n def __getitem__(self, idx):\n # Return a random batch of data, unless training_parameters['batch_size'] == -1\n\n data, labels = self.audio_processor.get_data(self.mode, self.training_parameters) \n\n return data, labels\n"
] |
[
[
"tensorflow.signal.linear_to_mel_weight_matrix",
"torch.nn.ConstantPad1d",
"torch.add",
"torch.Tensor",
"numpy.clip",
"tensorflow.cast",
"tensorflow.math.log",
"torch.mul",
"numpy.random.randint",
"tensorflow.signal.mfccs_from_log_mel_spectrograms",
"tensorflow.tensordot",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"tensorflow.abs",
"tensorflow.signal.stft"
]
] |
stefantaubert/imageclef-lifelog-2019
|
[
"e779526583978be828ebc096538d094cc3cc260e"
] |
[
"src/segmentation/ClusterTransformer.py"
] |
[
"from src.segmentation.CachableTransformerBase import CachableTransformerBase\nimport scipy.cluster.hierarchy as shc\n\nclass ClusterTransformer(CachableTransformerBase):\n \"\"\"\n Perform hierarchical (agglomerative) clustering to the given histograms.\n Parameters:\n - metric: the metric which is used for comparing the histograms.\n - method: the method which is used for inter-cluster comparison.\n Input: list of histograms\n Output: cluster obtained through linkage\n \"\"\"\n def __init__(self, day: int, usr: int, is_dirty: bool = False, metric='euclidean', method='average'):\n self.metric = metric\n self.method = method\n return super().__init__(usr=usr, is_dirty=is_dirty, suffix=str(day))\n \n def before_transform(self, _):\n print(\"Building clusters with metric '{metric}' and method '{method}'...\".format(metric=self.metric, method=self.method))\n\n def transform_core(self, histograms: list):\n cluster = shc.linkage(histograms, method=self.method, metric=self.metric)\n # from matplotlib import pyplot as plt\n # plt.figure()\n # dn = shc.dendrogram(cluster)\n # plt.show()\n return cluster\n "
] |
[
[
"scipy.cluster.hierarchy.linkage"
]
] |
SuperXiang/bidd-molmap
|
[
"f0f5da299e4da4ebae83eed81ddfdad31c707d92"
] |
[
"molmap/utils/matrixopt.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 25 20:29:36 2019\n\n@author: [email protected]\n\nmatrix operation\n\n\"\"\"\n\nimport numpy as np\nfrom lapjv import lapjv\nfrom scipy.signal import convolve2d\nfrom scipy.spatial.distance import cdist\n\n\nclass Scatter2Grid:\n \n def __init__(self): \n \"\"\"assign x,y coords to gird numpy array\"\"\"\n self.fmap_shape = None\n self.indices = None\n self.indices_list = None\n\n \n def fit(self, df, split_channels = True, channel_col = 'Channels'):\n \"\"\"\n parameters\n ------------------\n df: dataframe with x, y columns\n split_channels: bool, if True, will apply split by group\n channel_col: column in df.columns, split to groups by this col \n \n \"\"\"\n df['idx'] = range(len(df))\n \n embedding_2d = df[['x','y']].values\n N = len(df)\n\n size1 = int(np.ceil(np.sqrt(N)))\n size2 = int(np.ceil(N/size1))\n grid_size = (size1, size2)\n \n grid = np.dstack(np.meshgrid(np.linspace(0, 1, size2), \n np.linspace(0, 1, size1))).reshape(-1, 2)\n grid_map = grid[:N]\n cost_matrix = cdist(grid_map, embedding_2d, \"sqeuclidean\").astype(np.float)\n cost_matrix = cost_matrix * (100000 / cost_matrix.max())\n row_asses, col_asses, _ = lapjv(cost_matrix)\n\n self.row_asses = row_asses\n self.col_asses = col_asses\n self.fmap_shape = grid_size\n self.indices = col_asses\n \n \n \n self.channel_col = channel_col\n self.split_channels = split_channels\n df['indices'] = self.indices\n self.df = df\n \n if self.split_channels:\n def _apply_split(x):\n return x[['idx', 'indices']].to_dict('list')\n sidx = df.groupby(channel_col).apply(_apply_split) \n channels = sidx.index.tolist()\n indices_list = sidx.tolist() \n self.channels = channels\n self.indices_list = indices_list\n\n \n def transform(self, vector_1d):\n \"\"\"vector_1d: extracted features\n \"\"\" \n ### linear assignment map ###\n M, N = self.fmap_shape\n\n if self.split_channels:\n arr_res = []\n for idict in self.indices_list:\n\n indices = idict['indices']\n idx = idict['idx']\n\n arr = np.zeros(self.fmap_shape)\n arr_1d = arr.reshape(M*N, )\n arr_1d[indices] = vector_1d[idx]\n arr = arr_1d.reshape(M, N) \n arr_res.append(arr) \n arr_res = np.stack(arr_res, axis=-1)\n else:\n arr_res = np.zeros(self.fmap_shape)\n arr_1d = arr_res.reshape(M*N, )\n arr_1d[self.indices] = vector_1d\n arr_res = arr_1d.reshape(M, N, 1) \n return arr_res\n \n\n \nclass Scatter2Array:\n \n def __init__(self, fmap_shape = (128,128)): \n \"\"\"convert x,y coords to numpy array\"\"\"\n self.fmap_shape = fmap_shape\n self.indices = None\n self.indices_list = None\n \n def _fit(self, df):\n \"\"\"df: dataframe with x, y columns\"\"\"\n M, N = self.fmap_shape\n self.X = np.linspace(df.x.min(), df.x.max(), M)\n self.Y = np.linspace(df.y.min(), df.y.max(), N)\n\n \n def _transform(self, dfnew):\n \"\"\"dfnew: dataframe with x, y columns\n in case we need to split channels\n \"\"\" \n x = dfnew.x.values\n y = dfnew.y.values\n M, N = self.fmap_shape\n indices = []\n for i in range(len(dfnew)):\n #perform a l1 distance\n idx = np.argmin(abs(self.X-x[i]))\n idy = np.argmin(abs(self.Y-y[i])) \n indice = N*idy + idx\n indices.append(indice)\n return indices\n \n \n def fit(self, df, split_channels = True, channel_col = 'Channels'):\n \"\"\"\n parameters\n ---------------\n df: embedding_df, dataframe\n split_channels: bool, if True, will apply split by group\n channel_col: column in df.columns, split to groups by this col\n \"\"\"\n df['idx'] = range(len(df))\n self.df = df\n self.channel_col = channel_col\n self.split_channels = split_channels\n _ = self._fit(df)\n \n if self.split_channels:\n g = df.groupby(channel_col)\n sidx = g.apply(self._transform) \n self.channels = sidx.index.tolist()\n self.indices_list = sidx.tolist()\n else: \n self.indices = self._transform(df)\n \n \n def transform(self, vector_1d):\n \"\"\"vector_1d: feature values 1d array\"\"\"\n \n M, N = self.fmap_shape\n arr = np.zeros(self.fmap_shape)\n arr_1d = arr.reshape(M*N, )\n \n if self.split_channels:\n df = self.df\n arr_res = []\n for indices, channel in zip(self.indices_list, self.channels):\n arr = np.zeros(self.fmap_shape)\n df1 = df[df[self.channel_col] == channel]\n idx = df1.idx.tolist()\n arr_1d_copy = arr_1d.copy()\n arr_1d_copy[indices] = vector_1d[idx]\n arr_1d_copy = arr_1d_copy.reshape(M, N) \n arr_res.append(arr_1d_copy)\n arr_res = np.stack(arr_res, axis=-1)\n else:\n arr_1d_copy = arr_1d.copy()\n arr_1d_copy[self.indices] = vector_1d\n arr_res = arr_1d_copy.reshape(M, N, 1) \n return arr_res\n\n\ndef smartpadding(array, target_size, mode='constant', constant_values=0):\n \"\"\"\n array: 2d array to be padded\n target_size: tuple of target array's shape\n \"\"\"\n X, Y = array.shape\n M, N = target_size\n top = int(np.ceil((M-X)/2))\n bottom = int(M - X - top)\n right = int(np.ceil((N-Y)/2))\n left = int(N - Y - right)\n array_pad = np.pad(array, pad_width=[(top, bottom),\n (left, right)], \n mode=mode, \n constant_values=constant_values)\n \n return array_pad\n\n\ndef fspecial_gauss(size = 31, sigma = 2):\n\n \"\"\"Function to mimic the 'fspecial' gaussian MATLAB function\n size should be odd value\n \"\"\"\n x, y = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]\n g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))\n return g/g.sum()\n\n\ndef conv2(array, kernel_size = 31, sigma = 2, mode='same', fillvalue = 0):\n kernel = fspecial_gauss(kernel_size, sigma)\n return np.rot90(convolve2d(np.rot90(array, 2), np.rot90(kernel, 2), \n mode=mode, \n fillvalue = fillvalue), 2)\n"
] |
[
[
"numpy.rot90",
"numpy.pad",
"numpy.sqrt",
"numpy.linspace",
"scipy.spatial.distance.cdist",
"numpy.stack",
"numpy.ceil",
"numpy.exp",
"numpy.zeros"
]
] |
sameesayeed007/DSE-MC-Portfolio-Optimization
|
[
"06a2cd331709134390fd27415a50a8d8cfc0e44d"
] |
[
"PreProcessing.py"
] |
[
"#importing lib\nimport pandas as pd\n#read csv\ndf = pd.read_csv(\"Data_2015_1.csv\")\n#selecting necessary column\ndf1 = df.iloc[:,1:4]\n#renaming columns\ndf1.columns = ['Date','Tickers','Price']\n#Dropping treasury bill rows\ndf1 = df1[~df1.Tickers.str.contains('|'.join(['T05Y','T10Y','T15Y','T20Y','T5Y']))]\n#Converting Price column to String -> Remove ',' -> Convert to Float\ndf1.Price = pd.to_numeric(df1.Price.astype(str).str.replace(',',''), errors='coerce')\n#Pivot table to Desired Dataset\ndf2= pd.pivot_table(df1, values='Price', index=['Date'], columns='Tickers')\n#Save Dataset\ndf2.to_csv(\"New_Data_2015_1.csv\")"
] |
[
[
"pandas.read_csv",
"pandas.pivot_table"
]
] |
nivedit1/TwitterSentimentAnalysis
|
[
"972fdb46fab6f07748d685b94b80450cb5131c5f"
] |
[
"svm.py"
] |
[
"from sklearn import svm,metrics\nimport numpy as np\nimport matplotlib as plt\nfrom sklearn.decomposition import PCA\nimport datetime\n# from mlxtend.plotting import plot_decision_regions\n\ndef predict_svm(x,y,z,clf):\n clf.fit(x, y)\n predicted = clf.predict(z)\n return predicted\n\n# def make_meshgrid(x, y, h=.02):\n# \"\"\"Create a mesh of points to plot in\n#\n# Parameters\n# ----------\n# x: data to base x-axis meshgrid on\n# y: data to base y-axis meshgrid on\n# h: stepsize for meshgrid, optional\n#\n# Returns\n# -------\n# xx, yy : ndarray\n# \"\"\"\n# x_min, x_max = x.min() - 1, x.max() + 1\n# y_min, y_max = y.min() - 1, y.max() + 1\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n# np.arange(y_min, y_max, h))\n# return xx, yy\n#\n#\n# def plot_contours(ax, clf, xx, yy, **params):\n# \"\"\"Plot the decision boundaries for a classifier.\n#\n# Parameters\n# ----------\n# ax: matplotlib axes object\n# clf: a classifier\n# xx: meshgrid ndarray\n# yy: meshgrid ndarray\n# params: dictionary of params to pass to contourf, optional\n# \"\"\"\n# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n# Z = Z.reshape(xx.shape)\n# out = ax.contourf(xx, yy, Z, **params)\n# return out\n\nif __name__ == \"__main__\":\n startTime = datetime.datetime.now()\n x = np.load('data/train_encoded_array.npy')\n y = np.load('data/train_target_array.npy')\n y = y.astype('int')\n y = y.flatten()\n z = np.load('data/test_encoded_array.npy')\n t = np.load('data/test_target_array.npy')\n t = t.astype('int')\n t = t.flatten()\n clf = svm.SVC()\n pca = PCA(n_components=200).fit(x)\n x_200d = pca.transform(x)\n z_200d = pca.transform(z)\n predicted = predict_svm(x_200d,y,z_200d,clf)\n accuracy = metrics.accuracy_score(t, predicted, normalize=False)\n confusion_matrix = metrics.confusion_matrix(t, predicted)\n # print(np.shape(predicted))\n print(\"Accuracy Score: \",accuracy)\n print(\"Confusion Matrix:\\n\",confusion_matrix)\n endTime = datetime.datetime.now()- startTime\n print(\"Total time taken to train: \",endTime)\n\n ########################################\n\n\n # # title for the plots\n # titles = ('SVC with linear kernel')\n #\n # # Set-up 2x2 grid for plotting.\n # fig,sub = plt.subplots(1, 1)\n # plt.subplots_adjust(wspace=0.4, hspace=0.4)\n #\n #\n # pca = PCA(n_components=2).fit(x)\n # x_2d = pca.transform(x)\n #\n # X0, X1 = x_2d[:, 0], x_2d[:, 1]\n # # xx, yy = make_meshgrid(X0, X1)\n #\n # # Plot Decision Region using mlxtend's awesome plotting function\n # plot_decision_regions(X=x_2d,\n # y=y,\n # clf=clf,\n # legend=2)\n #\n # # Update plot object with X/Y axis labels and Figure Title\n # plt.xlabel(X.columns[0], size=14)\n # plt.ylabel(X.columns[1], size=14)\n # plt.title('SVM Decision Region Boundary', size=16)\n\n # for clf, title, ax in (predicted,titles,sub):\n # plot_contours(ax, clf, xx, yy,\n # cmap=plt.cm.coolwarm, alpha=0.8)\n # ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n # ax.set_xlim(xx.min(), xx.max())\n # ax.set_ylim(yy.min(), yy.max())\n # ax.set_xlabel('Sepal length')\n # ax.set_ylabel('Sepal width')\n # ax.set_xticks(())\n # ax.set_yticks(())\n # ax.set_title(title)\n #\n # plt.show()\n"
] |
[
[
"sklearn.metrics.confusion_matrix",
"sklearn.svm.SVC",
"numpy.load",
"sklearn.decomposition.PCA",
"sklearn.metrics.accuracy_score"
]
] |
Kaiseem/PointNu-Net
|
[
"d56e6638567202e9a75956b74b53e1d4fe599865"
] |
[
"losses/focal_loss.py"
] |
[
"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\ndef binary_focal_loss(x, y, alpha=0.25, gamma=2., reduction='none'):\r\n pt = x.detach() * (y.detach() * 2 - 1)\r\n w = (1 - pt).pow(gamma)\r\n w[y == 0] *= (1 - alpha)\r\n w[y > 0] *= alpha\r\n # a = torch.where(y < 0, alpha, (1 - alpha))\r\n loss = F.binary_cross_entropy(x, y, w, reduction=reduction)\r\n return loss\r\n\r\nclass BinaryFocalLoss(nn.Module):\r\n def __init__(self, alpha=0.25, gamma=2):\r\n super(BinaryFocalLoss, self).__init__()\r\n self.alpha = alpha\r\n self.gamma = gamma\r\n self.smooth = 1e-6 # set '1e-4' when train with FP16\r\n\r\n def forward(self, output, target):\r\n prob = torch.sigmoid(output)\r\n prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)\r\n loss=-target*(1-self.alpha)*((1-prob)**self.gamma)*torch.log(prob)-(1-target)*self.alpha*(prob**self.gamma)*torch.log(1-prob)\r\n return loss\r\n\r\nclass BCELoss(nn.Module):\r\n def __init__(self):\r\n super(BCELoss, self).__init__()\r\n self.smooth = 1e-6 # set '1e-4' when train with FP16\r\n\r\n def forward(self, output, target):\r\n prob = torch.sigmoid(output)\r\n prob = torch.clamp(prob, self.smooth, 1.0 - self.smooth)\r\n loss=-target*torch.log(prob)-(1-target)*torch.log(1-prob)\r\n return loss\r\n\r\nclass FocalLoss_Ori(nn.Module):\r\n \"\"\"\r\n This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in\r\n 'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'\r\n Focal_Loss= -1*alpha*(1-pt)*log(pt)\r\n :param num_class:\r\n :param alpha: (tensor) 3D or 4D the scalar factor for this criterion\r\n :param gamma: (float,double) gamma > 0 reduces the relative loss for well-classified examples (p>0.5) putting more\r\n focus on hard misclassified example\r\n :param smooth: (float,double) smooth value when cross entropy\r\n :param size_average: (bool, optional) By default, the losses are averaged over each loss element in the batch.\r\n \"\"\"\r\n\r\n def __init__(self, num_class, alpha=[0.25, 0.75], gamma=2, balance_index=-1, size_average=True):\r\n super(FocalLoss_Ori, self).__init__()\r\n self.num_class = num_class\r\n self.alpha = alpha\r\n self.gamma = gamma\r\n self.size_average = size_average\r\n self.eps = 1e-6\r\n\r\n if isinstance(self.alpha, (list, tuple)):\r\n assert len(self.alpha) == self.num_class\r\n self.alpha = torch.Tensor(list(self.alpha))\r\n elif isinstance(self.alpha, (float, int)):\r\n assert 0 < self.alpha < 1.0, 'alpha should be in `(0,1)`)'\r\n assert balance_index > -1\r\n alpha = torch.ones((self.num_class))\r\n alpha *= 1 - self.alpha\r\n alpha[balance_index] = self.alpha\r\n self.alpha = alpha\r\n elif isinstance(self.alpha, torch.Tensor):\r\n self.alpha = self.alpha\r\n else:\r\n raise TypeError('Not support alpha type, expect `int|float|list|tuple|torch.Tensor`')\r\n\r\n def forward(self, logit, target):\r\n\r\n if logit.dim() > 2:\r\n # N,C,d1,d2 -> N,C,m (m=d1*d2*...)\r\n logit = logit.view(logit.size(0), logit.size(1), -1)\r\n logit = logit.transpose(1, 2).contiguous() # [N,C,d1*d2..] -> [N,d1*d2..,C]\r\n logit = logit.view(-1, logit.size(-1)) # [N,d1*d2..,C]-> [N*d1*d2..,C]\r\n target = target.view(-1, 1) # [N,d1,d2,...]->[N*d1*d2*...,1]\r\n\r\n # -----------legacy way------------\r\n # idx = target.cpu().long()\r\n # one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()\r\n # one_hot_key = one_hot_key.scatter_(1, idx, 1)\r\n # if one_hot_key.device != logit.device:\r\n # one_hot_key = one_hot_key.to(logit.device)\r\n # pt = (one_hot_key * logit).sum(1) + epsilon\r\n\r\n # ----------memory saving way--------\r\n pt = logit.gather(1, target).view(-1) + self.eps # avoid apply\r\n logpt = pt.log()\r\n\r\n if self.alpha.device != logpt.device:\r\n alpha = self.alpha.to(logpt.device)\r\n alpha_class = alpha.gather(0, target.view(-1))\r\n logpt = alpha_class * logpt\r\n loss = -1 * torch.pow(torch.sub(1.0, pt), self.gamma) * logpt\r\n\r\n if self.size_average:\r\n loss = loss.mean()\r\n else:\r\n loss = loss.sum()\r\n return loss\r\n\r\n"
] |
[
[
"torch.sigmoid",
"torch.ones",
"torch.sub",
"torch.nn.functional.binary_cross_entropy",
"torch.log",
"torch.clamp"
]
] |
namjiseong/greenfood
|
[
"c2612a2cb93631cd9e2f543db230a829a35b7fa3"
] |
[
"utils/benchmarks.py"
] |
[
"# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nRun YOLOv5 benchmarks on all supported export formats\n\nFormat | `export.py --include` | Model\n--- | --- | ---\nPyTorch | - | yolov5s.pt\nTorchScript | `torchscript` | yolov5s.torchscript\nONNX | `onnx` | yolov5s.onnx\nOpenVINO | `openvino` | yolov5s_openvino_model/\nTensorRT | `engine` | yolov5s.engine\nCoreML | `coreml` | yolov5s.mlmodel\nTensorFlow SavedModel | `saved_model` | yolov5s_saved_model/\nTensorFlow GraphDef | `pb` | yolov5s.pb\nTensorFlow Lite | `tflite` | yolov5s.tflite\nTensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite\nTensorFlow.js | `tfjs` | yolov5s_web_model/\n\nRequirements:\n $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU\n $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU\n $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT\n\nUsage:\n $ python utils/benchmarks.py --weights yolov5s.pt --img 640\n\"\"\"\n\nimport argparse\nimport sys\nimport time\nfrom pathlib import Path\n\nimport pandas as pd\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nif str(ROOT) not in sys.path:\n sys.path.append(str(ROOT)) # add ROOT to PATH\n# ROOT = ROOT.relative_to(Path.cwd()) # relative\n\nimport export\nimport val\nfrom utils import notebook_init\nfrom utils.general import LOGGER, print_args\nfrom utils.torch_utils import select_device\n\n\ndef run(\n weights=ROOT / 'yolov5s.pt', # weights path\n imgsz=640, # inference size (pixels)\n batch_size=1, # batch size\n data=ROOT / 'data/coco128.yaml', # dataset.yaml path\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\n half=False, # use FP16 half-precision inference\n test=False, # test exports only\n):\n y, t = [], time.time()\n formats = export.export_formats()\n device = select_device(device)\n for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable)\n try:\n assert i != 9, 'Edge TPU not supported'\n assert i != 10, 'TF.js not supported'\n if device.type != 'cpu':\n assert gpu, f'{name} inference not supported on GPU'\n\n # Export\n if f == '-':\n w = weights # PyTorch format\n else:\n w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others\n assert suffix in str(w), 'export failed'\n\n # Validate\n result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half)\n metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls))\n speeds = result[2] # times (preprocess, inference, postprocess)\n y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference\n except Exception as e:\n LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')\n y.append([name, None, None]) # mAP, t_inference\n\n # Print results\n LOGGER.info('\\n')\n parse_opt()\n notebook_init() # print system info\n py = pd.DataFrame(y, columns=['Format', '[email protected]:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', ''])\n LOGGER.info(f'\\nBenchmarks complete ({time.time() - t:.2f}s)')\n LOGGER.info(str(py if map else py.iloc[:, :2]))\n return py\n\n\ndef test(\n weights=ROOT / 'yolov5s.pt', # weights path\n imgsz=640, # inference size (pixels)\n batch_size=1, # batch size\n data=ROOT / 'data/coco128.yaml', # dataset.yaml path\n device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu\n half=False, # use FP16 half-precision inference\n test=False, # test exports only\n):\n y, t = [], time.time()\n formats = export.export_formats()\n device = select_device(device)\n for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable)\n try:\n w = weights if f == '-' else \\\n export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights\n assert suffix in str(w), 'export failed'\n y.append([name, True])\n except Exception:\n y.append([name, False]) # mAP, t_inference\n\n # Print results\n LOGGER.info('\\n')\n parse_opt()\n notebook_init() # print system info\n py = pd.DataFrame(y, columns=['Format', 'Export'])\n LOGGER.info(f'\\nExports complete ({time.time() - t:.2f}s)')\n LOGGER.info(str(py))\n return py\n\n\ndef parse_opt():\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')\n parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')\n parser.add_argument('--batch-size', type=int, default=1, help='batch size')\n parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')\n parser.add_argument('--test', action='store_true', help='test exports only')\n opt = parser.parse_args()\n print_args(vars(opt))\n return opt\n\n\ndef main(opt):\n test(**vars(opt)) if opt.test else run(**vars(opt))\n\n\nif __name__ == \"__main__\":\n opt = parse_opt()\n main(opt)\n"
] |
[
[
"pandas.DataFrame"
]
] |
alikefia/dask
|
[
"99ecc8e64cc70b55dc598197574a5b602a82da83"
] |
[
"dask/dataframe/io/tests/test_io.py"
] |
[
"import contextlib\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Lock\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.array as da\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.io.io import _meta_from_array\nfrom dask.dataframe.utils import assert_eq, is_categorical_dtype\nfrom dask.delayed import Delayed, delayed\nfrom dask.utils import tmpfile\n\n####################\n# Arrays and BColz #\n####################\n\n\ndef test_meta_from_array():\n x = np.array([[1, 2], [3, 4]], dtype=np.int64)\n res = _meta_from_array(x)\n assert isinstance(res, pd.DataFrame)\n assert res[0].dtype == np.int64\n assert res[1].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([0, 1]))\n\n x = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)\n res = _meta_from_array(x, columns=[\"a\", \"b\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.float64\n tm.assert_index_equal(res.columns, pd.Index([\"a\", \"b\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\", \"c\"])\n\n np.random.seed(42)\n x = np.random.rand(201, 2)\n x = dd.from_array(x, chunksize=50, columns=[\"a\", \"b\"])\n assert len(x.divisions) == 6 # Should be 5 partitions and the end\n\n\ndef test_meta_from_1darray():\n x = np.array([1.0, 2.0, 3.0], dtype=np.float64)\n res = _meta_from_array(x)\n assert isinstance(res, pd.Series)\n assert res.dtype == np.float64\n\n x = np.array([1, 2, 3], dtype=np.object_)\n res = _meta_from_array(x, columns=\"x\")\n assert isinstance(res, pd.Series)\n assert res.name == \"x\"\n assert res.dtype == np.object_\n\n x = np.array([1, 2, 3], dtype=np.object_)\n res = _meta_from_array(x, columns=[\"x\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"x\"].dtype == np.object_\n tm.assert_index_equal(res.columns, pd.Index([\"x\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\"])\n\n\ndef test_meta_from_recarray():\n x = np.array(\n [(i, i * 10) for i in range(10)], dtype=[(\"a\", np.float64), (\"b\", np.int64)]\n )\n res = _meta_from_array(x)\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([\"a\", \"b\"]))\n\n res = _meta_from_array(x, columns=[\"b\", \"a\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([\"b\", \"a\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\", \"c\"])\n\n\ndef test_from_array():\n x = np.arange(10 * 3).reshape(10, 3)\n d = dd.from_array(x, chunksize=4)\n assert isinstance(d, dd.DataFrame)\n tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))\n assert d.divisions == (0, 4, 8, 9)\n assert (d.compute().values == x).all()\n\n d = dd.from_array(x, chunksize=4, columns=list(\"abc\"))\n assert isinstance(d, dd.DataFrame)\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\", \"c\"]))\n assert d.divisions == (0, 4, 8, 9)\n assert (d.compute().values == x).all()\n\n with pytest.raises(ValueError):\n dd.from_array(np.ones(shape=(10, 10, 10)))\n\n\ndef test_from_array_with_record_dtype():\n x = np.array([(i, i * 10) for i in range(10)], dtype=[(\"a\", \"i4\"), (\"b\", \"i4\")])\n d = dd.from_array(x, chunksize=4)\n assert isinstance(d, dd.DataFrame)\n assert list(d.columns) == [\"a\", \"b\"]\n assert d.divisions == (0, 4, 8, 9)\n\n assert (d.compute().to_records(index=False) == x).all()\n\n\[email protected]\ndef check_bcolz_deprecation_warning():\n with pytest.warns(FutureWarning, match=\"bcolz was deprecated\"):\n yield\n\n\ndef test_from_bcolz_multiple_threads():\n bcolz = pytest.importorskip(\"bcolz\")\n\n def check(i):\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n\n d = dd.from_bcolz(t, chunksize=2)\n\n assert d.npartitions == 2\n assert is_categorical_dtype(d.dtypes[\"a\"])\n assert list(d.x.compute(scheduler=\"sync\")) == [1, 2, 3]\n assert list(d.a.compute(scheduler=\"sync\")) == [\"a\", \"b\", \"a\"]\n\n d = dd.from_bcolz(t, chunksize=2, index=\"x\")\n\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [1, 2, 3] or L == [1, 3, 2]\n\n # Names\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(\n dd.from_bcolz(t, chunksize=2).dask\n )\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(\n dd.from_bcolz(t, chunksize=3).dask\n )\n\n with check_bcolz_deprecation_warning():\n with ThreadPoolExecutor(5) as pool:\n list(pool.map(check, range(5)))\n\n\ndef test_from_bcolz():\n bcolz = pytest.importorskip(\"bcolz\")\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n\n with check_bcolz_deprecation_warning():\n d = dd.from_bcolz(t, chunksize=2)\n assert d.npartitions == 2\n assert is_categorical_dtype(d.dtypes[\"a\"])\n assert list(d.x.compute(scheduler=\"sync\")) == [1, 2, 3]\n assert list(d.a.compute(scheduler=\"sync\")) == [\"a\", \"b\", \"a\"]\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [0, 1, 2]\n\n d = dd.from_bcolz(t, chunksize=2, index=\"x\")\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [1, 2, 3] or L == [1, 3, 2]\n\n # Names\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(\n dd.from_bcolz(t, chunksize=2).dask\n )\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(\n dd.from_bcolz(t, chunksize=3).dask\n )\n\n dsk = dd.from_bcolz(t, chunksize=3).dask\n\n t.append((4, 4.0, \"b\"))\n t.flush()\n\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(dsk)\n\n\ndef test_from_bcolz_no_lock():\n bcolz = pytest.importorskip(\"bcolz\")\n locktype = type(Lock())\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"], chunklen=2\n )\n\n with check_bcolz_deprecation_warning():\n a = dd.from_bcolz(t, chunksize=2)\n b = dd.from_bcolz(t, chunksize=2, lock=True)\n c = dd.from_bcolz(t, chunksize=2, lock=False)\n\n assert_eq(a, b)\n assert_eq(a, c)\n\n assert not any(isinstance(item, locktype) for v in c.dask.values() for item in v)\n\n\ndef test_from_bcolz_filename():\n bcolz = pytest.importorskip(\"bcolz\")\n\n with tmpfile(\".bcolz\") as fn:\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]],\n names=[\"x\", \"y\", \"a\"],\n rootdir=fn,\n )\n t.flush()\n\n with check_bcolz_deprecation_warning():\n d = dd.from_bcolz(fn, chunksize=2)\n\n assert list(d.x.compute()) == [1, 2, 3]\n\n\ndef test_from_bcolz_column_order():\n bcolz = pytest.importorskip(\"bcolz\")\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n\n with check_bcolz_deprecation_warning():\n df = dd.from_bcolz(t, chunksize=2)\n\n assert list(df.loc[0].compute().columns) == [\"x\", \"y\", \"a\"]\n\n\ndef test_from_pandas_dataframe():\n a = list(\"aaaaaaabbbbbbbbccccccc\")\n df = pd.DataFrame(\n dict(a=a, b=np.random.randn(len(a))),\n index=pd.date_range(start=\"20120101\", periods=len(a)),\n )\n ddf = dd.from_pandas(df, 3)\n assert len(ddf.dask) == 3\n assert len(ddf.divisions) == len(ddf.dask) + 1\n assert isinstance(ddf.divisions[0], type(df.index[0]))\n tm.assert_frame_equal(df, ddf.compute())\n ddf = dd.from_pandas(df, chunksize=8)\n msg = \"Exactly one of npartitions and chunksize must be specified.\"\n with pytest.raises(ValueError) as err:\n dd.from_pandas(df, npartitions=2, chunksize=2)\n assert msg in str(err.value)\n with pytest.raises((ValueError, AssertionError)) as err:\n dd.from_pandas(df)\n assert msg in str(err.value)\n assert len(ddf.dask) == 3\n assert len(ddf.divisions) == len(ddf.dask) + 1\n assert isinstance(ddf.divisions[0], type(df.index[0]))\n tm.assert_frame_equal(df, ddf.compute())\n\n\ndef test_from_pandas_small():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n for i in [1, 2, 30]:\n a = dd.from_pandas(df, i)\n assert len(a.compute()) == 3\n assert a.divisions[0] == 0\n assert a.divisions[-1] == 2\n\n a = dd.from_pandas(df, chunksize=i)\n assert len(a.compute()) == 3\n assert a.divisions[0] == 0\n assert a.divisions[-1] == 2\n\n for sort in [True, False]:\n for i in [0, 2]:\n df = pd.DataFrame({\"x\": [0] * i})\n ddf = dd.from_pandas(df, npartitions=5, sort=sort)\n assert_eq(df, ddf)\n\n s = pd.Series([0] * i, name=\"x\", dtype=int)\n ds = dd.from_pandas(s, npartitions=5, sort=sort)\n assert_eq(s, ds)\n\n\[email protected](\"n\", [1, 2, 4, 5])\ndef test_from_pandas_npartitions_is_accurate(n):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n assert dd.from_pandas(df, npartitions=n).npartitions <= n\n\n\ndef test_from_pandas_series():\n n = 20\n s = pd.Series(np.random.randn(n), index=pd.date_range(start=\"20120101\", periods=n))\n ds = dd.from_pandas(s, 3)\n assert len(ds.dask) == 3\n assert len(ds.divisions) == len(ds.dask) + 1\n assert isinstance(ds.divisions[0], type(s.index[0]))\n tm.assert_series_equal(s, ds.compute())\n\n ds = dd.from_pandas(s, chunksize=8)\n assert len(ds.dask) == 3\n assert len(ds.divisions) == len(ds.dask) + 1\n assert isinstance(ds.divisions[0], type(s.index[0]))\n tm.assert_series_equal(s, ds.compute())\n\n\ndef test_from_pandas_non_sorted():\n df = pd.DataFrame({\"x\": [1, 2, 3]}, index=[3, 1, 2])\n ddf = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert_eq(df, ddf)\n\n ddf = dd.from_pandas(df, chunksize=2, sort=False)\n assert not ddf.known_divisions\n assert_eq(df, ddf)\n\n\ndef test_from_pandas_single_row():\n df = pd.DataFrame({\"x\": [1]}, index=[1])\n ddf = dd.from_pandas(df, npartitions=1)\n assert ddf.divisions == (1, 1)\n assert_eq(ddf, df)\n\n\ndef test_from_pandas_with_datetime_index():\n df = pd.DataFrame(\n {\n \"Date\": [\n \"2015-08-28\",\n \"2015-08-27\",\n \"2015-08-26\",\n \"2015-08-25\",\n \"2015-08-24\",\n \"2015-08-21\",\n \"2015-08-20\",\n \"2015-08-19\",\n \"2015-08-18\",\n ],\n \"Val\": list(range(9)),\n }\n )\n df.Date = df.Date.astype(\"datetime64[ns]\")\n ddf = dd.from_pandas(df, 2)\n assert_eq(df, ddf)\n ddf = dd.from_pandas(df, chunksize=2)\n assert_eq(df, ddf)\n\n\ndef test_DataFrame_from_dask_array():\n x = da.ones((10, 3), chunks=(4, 2))\n\n df = dd.from_dask_array(x, [\"a\", \"b\", \"c\"])\n assert isinstance(df, dd.DataFrame)\n tm.assert_index_equal(df.columns, pd.Index([\"a\", \"b\", \"c\"]))\n assert list(df.divisions) == [0, 4, 8, 9]\n assert (df.compute(scheduler=\"sync\").values == x.compute(scheduler=\"sync\")).all()\n\n # dd.from_array should re-route to from_dask_array\n df2 = dd.from_array(x, columns=[\"a\", \"b\", \"c\"])\n assert isinstance(df, dd.DataFrame)\n tm.assert_index_equal(df2.columns, df.columns)\n assert df2.divisions == df.divisions\n\n\ndef test_Series_from_dask_array():\n x = da.ones(10, chunks=4)\n\n ser = dd.from_dask_array(x, \"a\")\n assert isinstance(ser, dd.Series)\n assert ser.name == \"a\"\n assert list(ser.divisions) == [0, 4, 8, 9]\n assert (ser.compute(scheduler=\"sync\").values == x.compute(scheduler=\"sync\")).all()\n\n ser = dd.from_dask_array(x)\n assert isinstance(ser, dd.Series)\n assert ser.name is None\n\n # dd.from_array should re-route to from_dask_array\n ser2 = dd.from_array(x)\n assert isinstance(ser2, dd.Series)\n assert_eq(ser, ser2)\n\n\[email protected](\"as_frame\", [True, False])\ndef test_from_dask_array_index(as_frame):\n s = dd.from_pandas(pd.Series(range(10), index=list(\"abcdefghij\")), npartitions=3)\n if as_frame:\n s = s.to_frame()\n result = dd.from_dask_array(s.values, index=s.index)\n assert_eq(s, result)\n\n\ndef test_from_dask_array_index_raises():\n x = da.random.uniform(size=(10,), chunks=(5,))\n with pytest.raises(ValueError) as m:\n dd.from_dask_array(x, index=pd.Index(np.arange(10)))\n assert m.match(\"must be an instance\")\n\n a = dd.from_pandas(pd.Series(range(12)), npartitions=2)\n b = dd.from_pandas(pd.Series(range(12)), npartitions=4)\n with pytest.raises(ValueError) as m:\n dd.from_dask_array(a.values, index=b.index)\n\n assert m.match(\"index\")\n assert m.match(\"number\")\n assert m.match(\"blocks\")\n assert m.match(\"4 != 2\")\n\n\ndef test_from_dask_array_compat_numpy_array():\n x = da.ones((3, 3, 3), chunks=2)\n\n with pytest.raises(ValueError):\n dd.from_dask_array(x) # dask\n\n with pytest.raises(ValueError):\n dd.from_array(x.compute()) # numpy\n\n x = da.ones((10, 3), chunks=(3, 3))\n d1 = dd.from_dask_array(x) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))\n\n d2 = dd.from_array(x.compute()) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))\n\n with pytest.raises(ValueError):\n dd.from_dask_array(x, columns=[\"a\"]) # dask\n\n with pytest.raises(ValueError):\n dd.from_array(x.compute(), columns=[\"a\"]) # numpy\n\n d1 = dd.from_dask_array(x, columns=[\"a\", \"b\", \"c\"]) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([\"a\", \"b\", \"c\"]))\n\n d2 = dd.from_array(x.compute(), columns=[\"a\", \"b\", \"c\"]) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([\"a\", \"b\", \"c\"]))\n\n\ndef test_from_dask_array_compat_numpy_array_1d():\n\n x = da.ones(10, chunks=3)\n d1 = dd.from_dask_array(x) # dask\n assert isinstance(d1, dd.Series)\n assert (d1.compute().values == x.compute()).all()\n assert d1.name is None\n\n d2 = dd.from_array(x.compute()) # numpy\n assert isinstance(d1, dd.Series)\n assert (d2.compute().values == x.compute()).all()\n assert d2.name is None\n\n d1 = dd.from_dask_array(x, columns=\"name\") # dask\n assert isinstance(d1, dd.Series)\n assert (d1.compute().values == x.compute()).all()\n assert d1.name == \"name\"\n\n d2 = dd.from_array(x.compute(), columns=\"name\") # numpy\n assert isinstance(d1, dd.Series)\n assert (d2.compute().values == x.compute()).all()\n assert d2.name == \"name\"\n\n # passing list via columns results in DataFrame\n d1 = dd.from_dask_array(x, columns=[\"name\"]) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([\"name\"]))\n\n d2 = dd.from_array(x.compute(), columns=[\"name\"]) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([\"name\"]))\n\n\ndef test_from_dask_array_struct_dtype():\n x = np.array([(1, \"a\"), (2, \"b\")], dtype=[(\"a\", \"i4\"), (\"b\", \"object\")])\n y = da.from_array(x, chunks=(1,))\n df = dd.from_dask_array(y)\n tm.assert_index_equal(df.columns, pd.Index([\"a\", \"b\"]))\n assert_eq(df, pd.DataFrame(x))\n\n assert_eq(\n dd.from_dask_array(y, columns=[\"b\", \"a\"]), pd.DataFrame(x, columns=[\"b\", \"a\"])\n )\n\n\ndef test_from_dask_array_unknown_chunks():\n # Series\n dx = da.Array(\n {(\"x\", 0): np.arange(5), (\"x\", 1): np.arange(5, 11)},\n \"x\",\n ((np.nan, np.nan),),\n np.arange(1).dtype,\n )\n df = dd.from_dask_array(dx)\n assert isinstance(df, dd.Series)\n assert not df.known_divisions\n assert_eq(df, pd.Series(np.arange(11)), check_index=False)\n\n # DataFrame\n dsk = {(\"x\", 0, 0): np.random.random((2, 3)), (\"x\", 1, 0): np.random.random((5, 3))}\n dx = da.Array(dsk, \"x\", ((np.nan, np.nan), (3,)), np.float64)\n df = dd.from_dask_array(dx)\n assert isinstance(df, dd.DataFrame)\n assert not df.known_divisions\n assert_eq(df, pd.DataFrame(dx.compute()), check_index=False)\n\n # Unknown width\n dx = da.Array(dsk, \"x\", ((np.nan, np.nan), (np.nan,)), np.float64)\n with pytest.raises(ValueError):\n df = dd.from_dask_array(dx)\n\n\ndef test_to_bag():\n a = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(a, 2)\n\n assert ddf.to_bag().compute() == list(a.itertuples(False))\n assert ddf.to_bag(True).compute() == list(a.itertuples(True))\n assert ddf.to_bag(format=\"dict\").compute() == [\n {\"x\": \"a\", \"y\": 2},\n {\"x\": \"b\", \"y\": 3},\n {\"x\": \"c\", \"y\": 4},\n {\"x\": \"d\", \"y\": 5},\n ]\n assert ddf.to_bag(True, format=\"dict\").compute() == [\n {\"index\": 1.0, \"x\": \"a\", \"y\": 2},\n {\"index\": 2.0, \"x\": \"b\", \"y\": 3},\n {\"index\": 3.0, \"x\": \"c\", \"y\": 4},\n {\"index\": 4.0, \"x\": \"d\", \"y\": 5},\n ]\n assert ddf.x.to_bag(True).compute() == list(a.x.items())\n assert ddf.x.to_bag().compute() == list(a.x)\n\n assert ddf.x.to_bag(True, format=\"dict\").compute() == [\n {\"x\": \"a\"},\n {\"x\": \"b\"},\n {\"x\": \"c\"},\n {\"x\": \"d\"},\n ]\n assert ddf.x.to_bag(format=\"dict\").compute() == [\n {\"x\": \"a\"},\n {\"x\": \"b\"},\n {\"x\": \"c\"},\n {\"x\": \"d\"},\n ]\n\n\ndef test_to_records():\n pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n assert_eq(\n df.to_records(), ddf.to_records(), check_type=False\n ) # TODO: make check_type pass\n\n\[email protected](\"lengths\", [[2, 2], True])\ndef test_to_records_with_lengths(lengths):\n pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.to_records(lengths=lengths)\n assert_eq(df.to_records(), result, check_type=False) # TODO: make check_type pass\n\n assert isinstance(result, da.Array)\n\n expected_chunks = ((2, 2),)\n\n assert result.chunks == expected_chunks\n\n\ndef test_to_records_raises():\n pytest.importorskip(\"dask.array\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n with pytest.raises(ValueError):\n ddf.to_records(lengths=[2, 2, 2])\n pytest.fail(\"3 != 2\")\n\n with pytest.raises(ValueError):\n ddf.to_records(lengths=5)\n pytest.fail(\"Unexpected value\")\n\n\ndef test_from_delayed():\n df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list(\"abcd\"))\n parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]\n dfs = [delayed(parts.__getitem__)(i) for i in range(4)]\n meta = dfs[0].compute()\n\n my_len = lambda x: pd.Series([len(x)])\n\n for divisions in [None, [0, 1, 3, 6, 10]]:\n ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)\n assert_eq(ddf, df)\n assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]\n assert ddf.known_divisions == (divisions is not None)\n\n s = dd.from_delayed([d.a for d in dfs], meta=meta.a, divisions=divisions)\n assert_eq(s, df.a)\n assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]\n assert ddf.known_divisions == (divisions is not None)\n\n meta2 = [(c, \"f8\") for c in df.columns]\n assert_eq(dd.from_delayed(dfs, meta=meta2), df)\n assert_eq(dd.from_delayed([d.a for d in dfs], meta=(\"a\", \"f8\")), df.a)\n\n with pytest.raises(ValueError):\n dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])\n\n with pytest.raises(ValueError) as e:\n dd.from_delayed(dfs, meta=meta.a).compute()\n assert str(e.value).startswith(\"Metadata mismatch found in `from_delayed`\")\n\n\ndef test_from_delayed_preserves_hlgs():\n df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list(\"abcd\"))\n parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]\n dfs = [delayed(parts.__getitem__)(i) for i in range(4)]\n meta = dfs[0].compute()\n\n chained = [d.a for d in dfs]\n hlg = dd.from_delayed(chained, meta=meta).dask\n for d in chained:\n for layer_name, layer in d.dask.layers.items():\n assert hlg.layers[layer_name] == layer\n assert hlg.dependencies[layer_name] == d.dask.dependencies[layer_name]\n\n\ndef test_from_delayed_misordered_meta():\n df = pd.DataFrame(\n columns=[\"(1)\", \"(2)\", \"date\", \"ent\", \"val\"],\n data=[range(i * 5, i * 5 + 5) for i in range(3)],\n index=range(3),\n )\n\n # meta with different order for columns\n misordered_meta = pd.DataFrame(\n columns=[\"date\", \"ent\", \"val\", \"(1)\", \"(2)\"], data=[range(5)]\n )\n\n ddf = dd.from_delayed([delayed(lambda: df)()], meta=misordered_meta)\n\n with pytest.raises(ValueError) as info:\n # produces dataframe which does not match meta\n ddf.reset_index().compute(scheduler=\"sync\")\n msg = (\n \"The columns in the computed data do not match the columns in the\"\n \" provided metadata\"\n )\n assert msg in str(info.value)\n\n\ndef test_from_delayed_sorted():\n a = pd.DataFrame({\"x\": [1, 2]}, index=[1, 10])\n b = pd.DataFrame({\"x\": [4, 1]}, index=[100, 200])\n\n A = dd.from_delayed([delayed(a), delayed(b)], divisions=\"sorted\")\n assert A.known_divisions\n\n assert A.divisions == (1, 100, 200)\n\n\ndef test_to_delayed():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n # Frame\n a, b = ddf.to_delayed()\n assert isinstance(a, Delayed)\n assert isinstance(b, Delayed)\n assert_eq(a.compute(), df.iloc[:2])\n\n # Scalar\n x = ddf.x.sum()\n dx = x.to_delayed()\n assert isinstance(dx, Delayed)\n assert_eq(dx.compute(), x)\n\n\ndef test_to_delayed_optimize_graph():\n df = pd.DataFrame({\"x\": list(range(20))})\n ddf = dd.from_pandas(df, npartitions=20)\n ddf2 = (ddf + 1).loc[:2]\n\n # Frame\n d = ddf2.to_delayed()[0]\n assert len(d.dask) < 20\n d2 = ddf2.to_delayed(optimize_graph=False)[0]\n assert sorted(d2.dask) == sorted(ddf2.dask)\n assert_eq(ddf2.get_partition(0), d.compute())\n assert_eq(ddf2.get_partition(0), d2.compute())\n\n # Scalar\n x = ddf2.x.sum()\n dx = x.to_delayed()\n dx2 = x.to_delayed(optimize_graph=False)\n assert len(dx.dask) < len(dx2.dask)\n assert_eq(dx.compute(), dx2.compute())\n\n\ndef test_from_dask_array_index_dtype():\n x = da.ones((10,), chunks=(5,))\n\n df = pd.DataFrame(\n {\n \"date\": pd.date_range(\"2019-01-01\", periods=10, freq=\"1T\"),\n \"val1\": list(range(10)),\n }\n )\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"date\")\n\n ddf2 = dd.from_dask_array(x, index=ddf.index, columns=\"val2\")\n\n assert ddf.index.dtype == ddf2.index.dtype\n assert ddf.index.name == ddf2.index.name\n\n df = pd.DataFrame({\"idx\": np.arange(0, 1, 0.1), \"val1\": list(range(10))})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"idx\")\n\n ddf2 = dd.from_dask_array(x, index=ddf.index, columns=\"val2\")\n\n assert ddf.index.dtype == ddf2.index.dtype\n assert ddf.index.name == ddf2.index.name\n"
] |
[
[
"numpy.random.random",
"pandas.Series",
"numpy.random.seed",
"numpy.arange",
"pandas.Index",
"pandas.DataFrame",
"numpy.ones",
"numpy.random.normal",
"numpy.random.randn",
"numpy.random.rand",
"pandas.date_range",
"numpy.array"
]
] |
steven-lang/SPFlow
|
[
"be7492d4229857454b4e23596be7ba71d7af5960"
] |
[
"src/spn/tests/test_layerwise.py"
] |
[
"#!/usr/bin/env python3\n\nimport random\nimport unittest\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.distributions import Normal as TorchNormal\nfrom torch.nn import functional as F\n\nfrom spn.algorithms.layerwise import layers, distributions\nfrom spn.algorithms.layerwise.type_checks import (\n check_valid,\n OutOfBoundsException,\n InvalidTypeException,\n InvalidStackedSpnConfigurationException,\n)\nfrom spn.algorithms.layerwise.utils import SamplingContext, provide_evidence\n\n\nclass TestLayerwiseImplementation(unittest.TestCase):\n \"\"\"Testcases taht ensure, that inference methods for Sum, Product and Leaf layers are working as expected.\"\"\"\n\n def test_sum_layer(self):\n \"\"\"Test the forward pass of a sum layer\"\"\"\n\n # Setup layer\n in_channels = 8\n out_channels = 7\n in_features = 3\n num_repetitions = 5\n sum_layer = layers.Sum(\n in_channels=in_channels, out_channels=out_channels, in_features=in_features, num_repetitions=num_repetitions\n )\n\n w = torch.rand(in_features, in_channels, out_channels, num_repetitions)\n\n # Set the sum layer parameters\n sum_layer.weights = nn.Parameter(w)\n\n # Apply softmax once again since Sum forward pass uses F.log_softmax internally to project random weights\n # back into valid ranges\n w = F.softmax(w, dim=1)\n\n # Setup test input\n batch_size = 16\n x = torch.rand(size=(batch_size, in_features, in_channels, num_repetitions))\n\n # Expected outcome\n expected_result = torch.zeros(batch_size, in_features, out_channels, num_repetitions)\n for n in range(batch_size):\n for d in range(in_features):\n for oc in range(out_channels):\n for r in range(num_repetitions):\n expected_result[n, d, oc, r] = x[n, d, :, r] @ w[d, :, oc, r]\n\n # Do forward pass: apply log as sum layer operates in log space. Exp() afterwards to make it comparable to the\n # expected result\n result = sum_layer(x.log()).exp()\n\n # Run assertions\n self.assertTrue(result.shape[0] == batch_size)\n self.assertTrue(result.shape[1] == in_features)\n self.assertTrue(result.shape[2] == out_channels)\n self.assertTrue(result.shape[3] == num_repetitions)\n self.assertTrue(((result - expected_result).abs() < 1e-6).all())\n\n def test_product_layer(self):\n \"\"\"Test the product layer forward pass.\"\"\"\n\n # Setup product layer\n in_features = 9\n cardinality = 3\n num_repetitions = 5\n prod_layer = layers.Product(in_features=in_features, cardinality=cardinality, num_repetitions=num_repetitions)\n\n # Setup test input\n batch_size = 16\n in_channels = 3\n x = torch.rand(size=(batch_size, in_features, in_channels, num_repetitions))\n\n # Expected result:\n expected_result = torch.ones(batch_size, in_features // cardinality, in_channels, num_repetitions)\n for n in range(batch_size):\n for d in range(0, in_features, cardinality):\n for c in range(in_channels):\n for r in range(num_repetitions):\n for i in range(cardinality):\n expected_result[n, d // cardinality, c, r] *= x[n, d + i, c, r]\n\n # Actual result\n result = prod_layer(x.log()).exp()\n\n # Run assertions\n self.assertTrue(result.shape[0] == batch_size)\n self.assertTrue(result.shape[1] == in_features // cardinality)\n self.assertTrue(result.shape[2] == in_channels)\n self.assertTrue(result.shape[3] == num_repetitions)\n self.assertTrue(((result - expected_result).abs() < 1e-6).all())\n\n def test_normal_leaf_layer(self):\n \"\"\"Test the normal leaf layer.\"\"\"\n # Setup leaf layer\n out_channels = 7\n in_features = 8\n num_repetitions = 5\n leaf = distributions.Normal(out_channels=out_channels, in_features=in_features, num_repetitions=num_repetitions)\n\n # Setup test input\n batch_size = 3\n x = torch.rand(size=(batch_size, in_features))\n\n # Setup artificial means and scale matrices\n means = torch.randn(1, in_features, out_channels, num_repetitions)\n scale = torch.rand(1, in_features, out_channels, num_repetitions)\n\n # Use scipy norm to get pdfs\n # Expected result\n expected_result = torch.zeros(batch_size, in_features, out_channels, num_repetitions)\n\n # Repetition 1\n for n in range(batch_size):\n for d in range(in_features):\n for c in range(out_channels):\n for r in range(num_repetitions):\n expected_result[n, d, c, r] = TorchNormal(\n loc=means[0, d, c, r], scale=scale[0, d, c, r]\n ).log_prob(x[n, d])\n\n # Perform forward pass in leaf\n leaf.means.data = means\n leaf.stds.data = scale\n result = leaf(x)\n\n # Make assertions\n self.assertEqual(result.shape[0], batch_size)\n self.assertEqual(result.shape[1], in_features)\n self.assertEqual(result.shape[2], out_channels)\n self.assertTrue(((result - expected_result).abs() < 1e-6).all())\n\n\nclass TestLayerwiseSampling(unittest.TestCase):\n \"\"\"Testcases that ensure that sampling methods for Sum, Product and Leaf layers are working as expected.\"\"\"\n\n def test_sum_shape_as_root_node(self):\n \"\"\"Check that the sum node has the correct sampling shape when used as root.\"\"\"\n n = 5\n num_repetitions = 1\n for in_channels in [1, 5, 10]:\n for in_features in [1, 5, 10]:\n sum_layer = layers.Sum(\n in_channels=in_channels, out_channels=1, in_features=in_features, num_repetitions=num_repetitions\n )\n ctx = SamplingContext(n=n)\n ctx = sum_layer.sample(context=ctx)\n self.assertTrue(ctx.parent_indices.shape[0] == n)\n self.assertTrue(ctx.parent_indices.shape[1] == in_features)\n\n def test_product_shape_as_root_node(self):\n \"\"\"Check that the product node has the correct sampling shape when used as root.\"\"\"\n prod_layer = layers.Product(in_features=10, cardinality=2, num_repetitions=1)\n ctx = SamplingContext(n=5)\n ctx = prod_layer.sample(context=ctx)\n self.assertTrue(ctx.parent_indices.shape[0] == 5)\n self.assertTrue(ctx.parent_indices.shape[1] == 1)\n\n def test_sum_as_intermediate_node(self):\n \"\"\"Check that sum node returns the correct sample indices when used as indermediate node.\"\"\"\n # Some values for the sum layer\n in_features = 10\n in_channels = 3\n out_channels = 5\n num_repetitions = 7\n n = 2\n parent_indices = torch.randint(out_channels, size=(n, in_features))\n\n # Create sum layer\n sum_layer = layers.Sum(\n in_features=in_features, in_channels=in_channels, out_channels=out_channels, num_repetitions=num_repetitions\n )\n\n # Choose `in_features` number of random indexes from 0 to in_channels-1 which will have probability of 1.0 in\n # the sum layer weight tensor\n rand_indxs = torch.randint(in_channels, size=(in_features, num_repetitions))\n rep_idxs = torch.randint(num_repetitions, size=(n,))\n\n # Artificially set sum weights (probabilities) to 1.0\n weights = torch.zeros(in_features, in_channels, out_channels, num_repetitions)\n for r in range(num_repetitions):\n weights[range(in_features), rand_indxs[:, r], :, r] = 1.0\n sum_layer.weights = nn.Parameter(torch.log(weights))\n\n # Perform sampling\n ctx = SamplingContext(n=n, parent_indices=parent_indices, repetition_indices=rep_idxs)\n sum_layer.sample(context=ctx)\n\n # Assert that the sample indexes are those where the weights were set to 1.0\n for i in range(n):\n self.assertTrue((rand_indxs[:, rep_idxs[i]] == ctx.parent_indices[i, :]).all())\n\n def test_prod_as_intermediate_node(self):\n # Product layer values\n in_features = 10\n num_samples = 5\n num_repetitions = 5\n for cardinality in range(2, in_features):\n prod_layer = layers.Product(\n in_features=in_features, cardinality=cardinality, num_repetitions=num_repetitions\n )\n\n # Example parent indexes\n parent_indices = torch.randint(high=5, size=(num_samples, in_features))\n\n # Create expected indexes: each index is repeated #cardinality times\n pad = (cardinality - in_features % cardinality) % cardinality\n expected_sample_indices = []\n for j in range(num_samples):\n\n sample_i_indices = []\n for i in parent_indices[j, :]:\n sample_i_indices += [i] * cardinality\n\n # Remove padding\n if pad > 0:\n sample_i_indices = sample_i_indices[:-pad]\n\n # Add current sample\n expected_sample_indices.append(sample_i_indices)\n\n # As tensor\n expected_sample_indices = torch.tensor(expected_sample_indices)\n\n # Sample\n ctx = SamplingContext(n=num_samples, parent_indices=parent_indices)\n prod_layer.sample(context=ctx)\n self.assertTrue((expected_sample_indices == ctx.parent_indices).all())\n\n def test_normal_leaf(self):\n # Setup leaf layer\n out_channels = 10\n in_features = 10\n num_repetitions = 5\n leaf = distributions.Normal(out_channels=out_channels, in_features=in_features, num_repetitions=num_repetitions)\n\n # Set leaf layer mean to some random int\n leaf.means.data = torch.randint(\n low=-100, high=100, size=(1, in_features, out_channels, num_repetitions)\n ).float()\n # Set leaf layer std to 0 such that the samples will all be the mean (so we can actually make assertions in the end)\n leaf.stds.data = torch.zeros(size=(1, in_features, out_channels, num_repetitions)).float()\n\n # Create some random indices into the out_channels axis\n parent_indices = torch.randint(high=out_channels, size=(1, in_features,))\n repetition_indices = torch.randint(high=num_repetitions, size=(1,))\n\n # Perform sampling\n ctx = SamplingContext(n=1, parent_indices=parent_indices, repetition_indices=repetition_indices)\n result = leaf.sample(context=ctx)\n\n # Expected sampling\n expected_result = leaf.means.data[:, range(in_features), parent_indices, repetition_indices[0]]\n\n # Run assertions\n self.assertTrue(((result - expected_result).abs() < 1e-6).all())\n\n def test_spn_sampling(self):\n\n # Define SPN\n leaf = distributions.Normal(in_features=2 ** 3, out_channels=5, num_repetitions=1)\n sum_1 = layers.Sum(in_channels=5, in_features=2 ** 3, out_channels=20, num_repetitions=1)\n prd_1 = layers.Product(in_features=2 ** 3, cardinality=2, num_repetitions=1)\n sum_2 = layers.Sum(in_channels=20, in_features=2 ** 2, out_channels=20, num_repetitions=1)\n prd_2 = layers.Product(in_features=2 ** 2, cardinality=2, num_repetitions=1)\n sum_3 = layers.Sum(in_channels=20, in_features=2 ** 1, out_channels=20, num_repetitions=1)\n prd_3 = layers.Product(in_features=2 ** 1, cardinality=2, num_repetitions=1)\n sum_4 = layers.Sum(in_channels=20, in_features=2 ** 0, out_channels=1, num_repetitions=1)\n\n # Test forward pass\n x_test = torch.randn(1, 2 ** 3)\n\n x_test = leaf(x_test)\n x_test = sum_1(x_test)\n x_test = prd_1(x_test)\n x_test = sum_2(x_test)\n x_test = prd_2(x_test)\n x_test = sum_3(x_test)\n x_test = prd_3(x_test)\n res = sum_4(x_test)\n\n # Sampling pass\n ctx = SamplingContext(n=1000)\n sum_4.sample(context=ctx)\n prd_3.sample(context=ctx)\n sum_3.sample(context=ctx)\n prd_2.sample(context=ctx)\n sum_2.sample(context=ctx)\n prd_1.sample(context=ctx)\n sum_1.sample(context=ctx)\n samples = leaf.sample(context=ctx)\n\n def test_spn_mpe(self):\n\n # Define SPN\n leaf = distributions.Normal(in_features=2 ** 3, out_channels=5, num_repetitions=1)\n sum_1 = layers.Sum(in_channels=5, in_features=2 ** 3, out_channels=20, num_repetitions=1)\n prd_1 = layers.Product(in_features=2 ** 3, cardinality=2, num_repetitions=1)\n sum_2 = layers.Sum(in_channels=20, in_features=2 ** 2, out_channels=20, num_repetitions=1)\n prd_2 = layers.Product(in_features=2 ** 2, cardinality=2, num_repetitions=1)\n sum_3 = layers.Sum(in_channels=20, in_features=2 ** 1, out_channels=20, num_repetitions=1)\n prd_3 = layers.Product(in_features=2 ** 1, cardinality=2, num_repetitions=1)\n sum_4 = layers.Sum(in_channels=20, in_features=2 ** 0, out_channels=1, num_repetitions=1)\n\n sum_1._enable_input_cache()\n sum_2._enable_input_cache()\n sum_3._enable_input_cache()\n sum_4._enable_input_cache()\n\n # Test forward pass\n x_test = torch.randn(1, 2 ** 3)\n\n x_test = leaf(x_test)\n x_test = sum_1(x_test)\n x_test = prd_1(x_test)\n x_test = sum_2(x_test)\n x_test = prd_2(x_test)\n x_test = sum_3(x_test)\n x_test = prd_3(x_test)\n res = sum_4(x_test)\n\n ctx = SamplingContext(n=x_test.shape[0], is_mpe=True)\n sum_4.sample(context=ctx)\n prd_3.sample(context=ctx)\n sum_3.sample(context=ctx)\n prd_2.sample(context=ctx)\n sum_2.sample(context=ctx)\n prd_1.sample(context=ctx)\n sum_1.sample(context=ctx)\n\n # Should be the same\n mpe_1 = leaf.sample(context=ctx)\n mpe_2 = leaf.sample(context=ctx)\n mpe_3 = leaf.sample(context=ctx)\n self.assertTrue(((mpe_1 - mpe_2).abs() < 1e-6).all())\n self.assertTrue(((mpe_2 - mpe_3).abs() < 1e-6).all())\n\n\nclass TestTypeChecks(unittest.TestCase):\n def test_valid(self):\n # Ints\n check_valid(0, int, 0)\n check_valid(np.int64(0), int, 0)\n check_valid(np.int32(0), int, 0)\n check_valid(np.int16(0), int, 0)\n check_valid(np.int8(0), int, 0)\n check_valid(torch.tensor(0).int(), int, 0)\n check_valid(torch.tensor(0).long(), int, 0)\n\n # Floats\n check_valid(1.0, float, 0)\n check_valid(np.float64(1.0), float, 0)\n check_valid(np.float32(1.0), float, 0)\n check_valid(np.float16(1.0), float, 0)\n check_valid(torch.tensor(1.0).half(), float, 0)\n check_valid(torch.tensor(1.0).float(), float, 0)\n check_valid(torch.tensor(1.0).double(), float, 0)\n\n def test_invalid_range(self):\n with self.assertRaises(OutOfBoundsException):\n check_valid(0, int, 1, 2)\n\n with self.assertRaises(OutOfBoundsException):\n check_valid(0.0, float, 1.0, 2.0)\n\n with self.assertRaises(OutOfBoundsException):\n check_valid(2, int, 0, 1)\n\n def test_invalid_type(self):\n with self.assertRaises(InvalidTypeException):\n check_valid(0, float, 0, 1)\n\n with self.assertRaises(InvalidTypeException):\n check_valid(0.0, int, 0, 1)\n\n with self.assertRaises(InvalidTypeException):\n check_valid(np.int64(0), float, 0, 1)\n\n with self.assertRaises(InvalidTypeException):\n check_valid(torch.tensor(0).int(), float, 0, 1)\n\n\nclass TestRATLayerwise(unittest.TestCase):\n def test_rat_forward(self):\n from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpn\n from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpnConfig\n from spn.experiments.RandomSPNs_layerwise.distributions import RatNormal\n\n # Setup RatSpn\n config = RatSpnConfig()\n config.F = 16\n config.R = 13\n config.D = 3\n config.C = 2\n config.I = 11\n config.S = 12\n config.dropout = 0.0\n config.leaf_base_class = RatNormal\n\n spn = RatSpn(config)\n\n # Generate data\n batch_size = 32\n x = torch.randn(batch_size, config.F)\n\n # Forward pass\n result = spn(x)\n\n # Make assertions on the shape\n self.assertEqual(result.shape[0], batch_size)\n self.assertEqual(result.shape[1], config.C)\n\n def test_rat_sampling(self):\n from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpn\n from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpnConfig\n from spn.experiments.RandomSPNs_layerwise.distributions import RatNormal\n\n # Setup RatSpn\n config = RatSpnConfig()\n config.F = 16\n config.R = 13\n config.D = 3\n config.C = 2\n config.I = 11\n config.S = 12\n config.dropout = 0.0\n config.leaf_base_class = RatNormal\n spn = RatSpn(config)\n\n # Sample\n n = 10\n samples = spn.sample(n=n)\n self.assertTrue(samples.shape[0] == n)\n self.assertTrue(samples.shape[1] == config.F)\n\n # Conditional sampling\n x = torch.randn(n, config.F)\n x[:, 0 : config.F // 2] = float(\"nan\")\n spn.sample(evidence=x)\n\n def test_rat_mpe(self):\n from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpn\n from spn.experiments.RandomSPNs_layerwise.rat_spn import RatSpnConfig\n from spn.experiments.RandomSPNs_layerwise.distributions import RatNormal\n\n # Setup RatSpn\n config = RatSpnConfig()\n config.F = 16\n config.R = 13\n config.D = 3\n config.C = 2\n config.I = 11\n config.S = 12\n config.dropout = 0.0\n config.leaf_base_class = RatNormal\n spn = RatSpn(config)\n\n # Conditional MPE\n x = torch.randn(10, config.F)\n x[:, 0 : config.F // 2] = float(\"nan\")\n mpe_1 = spn.mpe(evidence=x)\n mpe_2 = spn.mpe(evidence=x)\n mpe_3 = spn.mpe(evidence=x)\n self.assertTrue(((mpe_1 - mpe_2).abs() < 1e-6).all())\n self.assertTrue(((mpe_2 - mpe_3).abs() < 1e-6).all())\n\n\n\nif __name__ == \"__main__\":\n seed = 0\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n unittest.main()\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.randint",
"torch.zeros",
"torch.ones",
"torch.randn",
"numpy.float16",
"numpy.int8",
"torch.tensor",
"torch.rand",
"numpy.float32",
"torch.nn.Parameter",
"numpy.int64",
"torch.log",
"torch.distributions.Normal",
"numpy.random.seed",
"torch.manual_seed",
"numpy.int32",
"numpy.int16",
"numpy.float64"
]
] |
dilawarm/AlphaZero
|
[
"a5e38d49ba24bf9587f5571ad8c1ea7465005d34",
"a5e38d49ba24bf9587f5571ad8c1ea7465005d34"
] |
[
"Multiprocessing.py",
"Gamerendering.py"
] |
[
"import Train\nfrom multiprocessing import Process, Manager\nimport numpy as np\nimport time\nfrom FourInARow import Config\n# from TicTacToe import Config\nfrom collections import defaultdict\n\n\nclass DataStore:\n def __init__(self, max_epochs_stored):\n self.data = {}\n self.max_epochs_stored = max_epochs_stored\n self.counter = 0\n\n def put_data(self, x, y_pol, y_val):\n self.data[self.counter] = [x, y_pol, y_val]\n self.counter = (self.counter + 1) % self.max_epochs_stored\n\n def get_data(self):\n x = []\n y_pol = []\n y_val = []\n\n for data in self.data.values():\n x.extend(data[0])\n y_pol.extend(data[1])\n y_val.extend(data[2])\n return np.array(x), np.array(y_pol), np.array(y_val)\n\n\ndef multiprocess_function(config, num_processes, num_games_each_process, num_search, name_weights, seeds=None):\n res_dict = Manager().dict()\n x = list()\n y_pol = list()\n y_val = list()\n\n workers = [Process(target=Train.generate_data,\n args=(res_dict, config, num_games_each_process, num_search, i, name_weights, seeds[i]))\n for i in range(num_processes)]\n\n for worker in workers:\n worker.daemon = True\n worker.start()\n for worker in workers: worker.join()\n\n print(\"done\")\n\n for value in res_dict.values():\n x.extend(value[0])\n y_pol.extend(value[1])\n y_val.extend(value[2])\n\n return np.array(x), np.array(y_pol), np.array(y_val)\n\n\ndef train_process(x, y_pol, y_val, load_name, store_name, h, w, d):\n # Importing libraries and setting the max gpu usage\n from keras.optimizers import SGD\n from loss import softmax_cross_entropy_with_logits, softmax\n import ResNet\n import tensorflow as tf\n from keras.backend.tensorflow_backend import set_session\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n set_session(sess)\n\n # Training the agent and storing the new weights\n agent = ResNet.ResNet.build(h, w, d, 128, Config.policy_output_dim, num_res_blocks=10)\n agent.compile(loss=[softmax_cross_entropy_with_logits, 'mean_squared_error'],\n optimizer=SGD(lr=0.0005, momentum=0.9))\n agent.load_weights(load_name)\n agent.fit(x=x, y=[y_pol, y_val], batch_size=min(128, len(x)), epochs=2, callbacks=[])\n agent.save_weights(store_name)\n\n\ndef combine_equals(x, y_pol, y_val):\n dd = defaultdict(lambda: [0, None, np.zeros(y_pol[0].shape), 0])\n for i in range(len(x)):\n c = dd[str(x[i])]\n c[0] += 1\n c[1] = x[i]\n c[2] += y_pol[i]\n c[3] += y_val[i]\n x = []\n y_pol = []\n y_val = []\n for value in dd.values():\n x.append(value[1])\n y_pol.append(value[2] / value[0])\n y_val.append(value[3] / value[0])\n x = np.array(x)\n y_pol = np.array(y_pol)\n y_val = np.array(y_val)\n return x, y_pol, y_val\n\n\ndef train(config, epochs, num_processes, num_games_each_process, num_search, game_name):\n h, w, d = config.board_dims[1:]\n\n data_store = DataStore(4)\n\n # TODO: create process that does this\n # import ResNet as nn\n\n base_name = \"Models/\" + str(game_name) + \"/\"\n # nn.ResNet().build(h, w, d, 128, config.policy_output_dim, num_res_blocks=10).save_weights(base_name + \"10_3_0.h5\")\n\n for epoch in range(epochs):\n now = time.time()\n load_weights_name = base_name + \"10_3_\" + str(epoch) + \".h5\"\n seed_max = 1000000000\n seeds = [[np.random.randint(0, seed_max) for _ in range(num_games_each_process)] for _ in\n range(num_games_each_process)]\n x, y_pol, y_val = multiprocess_function(config, num_processes, num_games_each_process, num_search,\n load_weights_name,\n seeds=seeds)\n\n x, y_pol, y_val = combine_equals(x, y_pol, y_val)\n\n data_store.max_epochs_stored = min(40, 4 + 3 * epochs // 4)\n data_store.put_data(x, y_pol, y_val)\n x, y_pol, y_val = data_store.get_data()\n store_weights_name = base_name + \"10_3_\" + str(epoch + 1) + \".h5\"\n worker = Process(target=train_process, args=(x, y_pol, y_val, load_weights_name, store_weights_name, h, w, d))\n worker.daemon = True\n worker.start()\n worker.join()\n print(\"Finished epoch\", epoch, \"time:\", time.time() - now)\n return None\n\n\nif __name__ == '__main__':\n train(Config, 3000, 8, 500, 600, Config.name)\n",
"import pygame\nimport sys\nfrom time import sleep\nimport copy\nimport pydot\nimport heapq\nimport os\nimport random\n\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'\n\n# from MCTS import MCTS\nfrom Main import *\n\n\"\"\"Weird bug when trying to import MCTS, so had to star import from Main\"\"\"\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\nset_session(sess)\n\n\nclass GameRendering:\n\n def __init__(self, game, agent, Config, numSearch):\n \"\"\"Initialize the pygame\"\"\"\n pygame.init()\n pygame.font.init()\n self.default_font = pygame.font.get_default_font()\n self.text_size = 10\n self.font_color = (40, 40, 40)\n self.font_renderer = pygame.font.Font(self.default_font, self.text_size)\n \"\"\"Variables\"\"\"\n self.game = copy.deepcopy(game)\n\n self.Config = Config\n\n self.start_pos = np.copy(game.board)\n self.agent = agent\n self.side_length = 100\n self.line_th = 5\n self.height = self.game.board_dims[1]\n self.width = self.game.board_dims[2]\n self.image = pygame.image.load(\"Images/nevraltnett.png\")\n self.imagerect = (0, 0)\n self.black = (0, 0, 0)\n self.white = (255, 255, 255)\n self.piece_size = self.side_length // 3\n self.screen = pygame.display.set_mode([self.side_length * self.width + self.line_th + self.imagerect[0],\n max(self.side_length * (self.height + 1) + self.line_th,\n self.imagerect[1])]) # + self.imagerect[0],self.imagerect[1]\n self.weights = [0] * len(self.game.get_moves()) # Set weight to empty array to represent all moves\n self.count = 0 # Switches sides of human and machine\n self.primary_line = []\n self.won = 0\n self.tied = 0\n self.lost = 0\n\n \"\"\"find what game is given\"\"\"\n self.tictactoe = False\n self.fourinarow = False\n if self.game.name == \"TicTacToe\":\n self.tictactoe = True\n self.background_color = (0, 109, 50)\n elif self.game.name == \"FourInARow\":\n self.fourinarow = True\n self.background_color = (0, 100, 150)\n \"\"\"check if graphviz is installed and in path\"\"\"\n try:\n self.test_graph = \"digraph g{rankdir=LR;testing -> testing -> tested}\"\n self.test_graph = pydot.graph_from_dot_data(self.test_graph)[0]\n self.test_graph.write_png('Images/graph1.png')\n except FileNotFoundError:\n print(\"Error:Graphviz is not installed or not on path, skipping visualization\")\n self.draw_graph = False\n except:\n print(\"Error: Unknown error with graph visualization, skipping\")\n self.draw_graph = False\n else:\n self.draw_graph = True\n\n self.update_screen()\n\n \"\"\"continuosly check for updates\"\"\"\n while True:\n self.mouse_pos = pygame.mouse.get_pos()\n if self.game.is_final():\n sleep(4)\n \"\"\"Show death screen\"\"\"\n self.screen.fill(self.black)\n font_size = max((self.side_length * self.width + self.line_th) // 35, 20)\n myfont = pygame.font.SysFont(self.default_font, 2 * font_size)\n \"\"\"Who won\"\"\"\n if self.game.get_outcome()[0] == 0:\n winner = myfont.render('Tied', False, (255, 255, 255))\n self.tied += 1\n elif (self.game.get_outcome()[0] == 1 and self.count % 2 == 0) or (\n self.game.get_outcome()[1] == 1 and self.count % 2 == 1):\n winner = myfont.render('AI won!', False, (255, 255, 255))\n self.lost += 1\n else:\n winner = myfont.render('Human won', False, (255, 255, 255))\n self.won += 1\n self.screen.blit(winner, (\n (self.side_length * self.width + self.line_th + self.imagerect[0]) // 2 - winner.get_width() / 2,\n self.side_length // 3 - winner.get_height() // 2))\n\n myfont = pygame.font.SysFont('Comic Sans MS', font_size)\n switch_side = myfont.render('(Switching sides)', False, (0, 255, 0))\n self.screen.blit(switch_side, (\n (self.side_length * self.width + self.line_th + self.imagerect[\n 0]) // 2 - switch_side.get_width() / 2,\n self.side_length // 3 - switch_side.get_height() // 2 + winner.get_height()))\n\n \"\"\"Shows the score\"\"\"\n myfont = pygame.font.SysFont(self.default_font, 2 * font_size)\n wtl = myfont.render('Win/Tie/Loss', False, self.white)\n self.screen.blit(wtl, (\n (self.side_length * self.width + self.line_th + self.imagerect[0]) // 2 - wtl.get_width() / 2,\n (self.side_length * self.height) // 2 - wtl.get_height() // 2))\n score = myfont.render(str(self.won) + \"-\" + str(self.tied) + \"-\" + str(self.lost), False, self.white)\n self.screen.blit(score, (\n (self.side_length * self.width + self.line_th + self.imagerect[0]) // 2 - score.get_width() / 2,\n (self.side_length * self.height) // 2 + wtl.get_height()))\n\n pygame.display.flip()\n print(\"GAME IS OVER\")\n self.count += 1 # Switches sides\n\n sleep(1) # Catch glitchy graohics\n sleep(4) # Hold the death screen open\n \"\"\"clean the board and graphics\"\"\"\n self.weights = [0] * len(self.weights)\n self.game.board = np.copy(self.start_pos) # don't want to change it\n self.game.history = []\n self.imagerect = (0, 0)\n self.screen = pygame.display.set_mode(\n [self.side_length * self.width + self.line_th + self.imagerect[0],\n max(self.side_length * (self.height + 1) + self.line_th, self.imagerect[1])])\n self.update_screen()\n\n elif (self.game.get_turn() + self.count) % 2 and not self.game.is_final():\n \"\"\"look for human input\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT or pygame.key.get_pressed()[27]: # Escape quits the game\n sys.exit()\n elif pygame.mouse.get_pressed()[0] and self.mouse_pos[\n 0] < self.side_length * self.width + self.line_th and self.mouse_pos[\n 1] < self.side_length * self.height + self.line_th: # Check if mouse is pressed in board\n self.execute_move()\n self.update_screen()\n elif pygame.key.get_pressed()[8]: # Backspace to undo move\n self.game.undo_move()\n self.game.undo_move()\n self.update_screen()\n\n elif not self.game.is_final():\n \"\"\"If machines turn, machine do move\"\"\"\n tree = MCTS.MCTS(self.game, self.game.board, self.agent, self.Config)\n if len(self.game.get_moves()) > 1: # Does not compute last possible move very deeply\n for searches in range(numSearch):\n tree.search()\n if (searches % 100 == 0 and searches != 0):\n \"\"\"update weight on screen every 200 search\"\"\"\n self.NNvisual(tree, num_nodes=20)\n\n self.NNvisual(tree, num_nodes=20)\n else:\n tree.search_series(numSearch)\n predict = tree.get_most_searched_move(tree.root)\n # print(\"Stillingen vurderes som: \",self.agent.predict(np.array([self.game.get_board()]))[1])\n self.game.execute_move(predict)\n self.update_screen()\n self.show_gamelines(self.primary_line)\n self.see_valuation()\n\n def see_valuation(self):\n \"\"\"see how the nn values different moves on its turn for itself\"\"\"\n if self.tictactoe:\n \"\"\"Has to flip the logic in y direction since it increases down\"\"\"\n possible_moves = self.game.get_moves()\n for move in possible_moves:\n self.label = self.font_renderer.render(str(round(self.weights[move], 4)), 1, self.font_color)\n self.screen.blit(self.label, [(self.side_length + self.line_th) // 2 + self.side_length * (\n (self.Config.move_to_number(move)) % self.width) - self.label.get_width() / 2,\n self.side_length * ((8 - self.Config.move_to_number(\n move)) // self.width) + self.label.get_height() - self.line_th])\n pygame.display.flip()\n elif self.fourinarow:\n possible_moves = self.game.get_moves()\n for move in possible_moves:\n self.label = self.font_renderer.render(str(round(self.weights[move], 4)), 1, self.font_color)\n self.screen.blit(self.label, [(self.side_length + self.line_th) // 2 + self.side_length * (\n (self.Config.move_to_number(move)) % self.width) - self.label.get_width() / 2,\n (self.side_length // self.height) - self.label.get_height()])\n pygame.display.flip()\n\n def _render(self, background_color, line_color, p1_color, p2_color, possible_color):\n \"\"\"Generic board maker\"\"\"\n\n \"\"\"Background\"\"\"\n self.screen.fill(background_color)\n self.screen.blit(self.image, (self.width * self.side_length + self.line_th, 0))\n \"\"\"Draw board lines\n pygame.draw.line(surface, color, start_pos, end_pos, width)\"\"\"\n for line in range(self.width + 1):\n \"\"\"Vertical lines\"\"\"\n pygame.draw.line(self.screen, line_color,\n [line * self.side_length + 2, 0],\n [line * self.side_length + 2, self.side_length * self.height + self.line_th - 2],\n self.line_th)\n \"\"\"Horizontal lines\"\"\"\n if line <= self.height:\n pygame.draw.line(self.screen, line_color,\n [0, line * self.side_length + 2],\n [self.side_length * self.width + self.line_th - 2, line * self.side_length + 2],\n self.line_th)\n\n \"\"\"Render pieces\"\"\"\n board = self.game.board\n for x in range(self.width):\n for y in range(self.height):\n if board[self.height - y - 1, x, 0] == 1:\n pygame.draw.circle(self.screen, p1_color,\n [(self.side_length + self.line_th) // 2 + self.side_length * x,\n (self.side_length + self.line_th) // 2 + self.side_length * y],\n self.piece_size)\n elif board[self.height - y - 1, x, 1] == 1:\n pygame.draw.circle(self.screen, p2_color,\n [(self.side_length + self.line_th) // 2 + self.side_length * x,\n (self.side_length + self.line_th) // 2 + self.side_length * y],\n self.piece_size)\n\n \"\"\"Render possible moves\"\"\"\n possible_moves = self.game.get_moves()\n for move in possible_moves:\n new_possible_color = (possible_color[0], possible_color[1], 200 * self.weights[move])\n if self.tictactoe:\n move = self.y_flip(move)\n pygame.draw.circle(self.screen, new_possible_color, [\n (self.side_length + self.line_th) // 2 + self.side_length * (\n (self.Config.move_to_number(move)) % self.width),\n (self.side_length + self.line_th) // 2 + self.side_length * (\n (self.Config.move_to_number(move)) // self.width)], self.piece_size)\n myfont = pygame.font.SysFont(self.default_font, self.piece_size)\n action = myfont.render(str(move), False, background_color)\n self.screen.blit(action, ((self.side_length + self.line_th) // 2 + self.side_length * (\n (self.Config.move_to_number(move)) % self.width) - action.get_width() // 2,\n (self.side_length + self.line_th) // 2 + self.side_length * (\n (self.Config.move_to_number(\n move)) // self.width) - action.get_height() // 2))\n pygame.display.flip()\n\n def _render_tictactoe(self):\n \"\"\"Update screen for Tic tac toe\"\"\"\n line_color = self.black\n p1_color = self.white\n p2_color = self.black\n possible_color = (255, 0, 0)\n self._render(self.background_color, line_color, p1_color, p2_color, possible_color)\n\n def _render_fourinarow(self):\n \"\"\"Update screen for Four in a Row\"\"\"\n line_color = self.black\n p1_color = (255, 0, 0)\n p2_color = (255, 255, 0)\n possible_color = (0, 255, 0)\n self._render(self.background_color, line_color, p1_color, p2_color, possible_color)\n\n def update_screen(self):\n \"\"\"updates the screen with the right graphics\"\"\"\n if self.tictactoe:\n self._render_tictactoe()\n elif self.fourinarow:\n self._render_fourinarow()\n\n def execute_move(self):\n \"\"\"Find the mouse position and execute move based on it\"\"\"\n if self.tictactoe:\n self.mouse_pos = (self.mouse_pos[0], self.height * self.side_length - self.mouse_pos[1])\n self.game.execute_move(self.Config.number_to_move((self.mouse_pos[1] - 2) // self.side_length * self.width + (\n self.mouse_pos[0] - 2) // self.side_length)) # må generaliseres\n sleep(0.2) # Delay for preventing multiple presses accidently\n\n def build_graph(self, graph_root, tree_root, graph, heap):\n heapq.heappush(heap, (100000 - tree_root.get_times_visited(), random.randint(1, 10000000000), tree_root))\n # graph.add_node(node)\n for child in tree_root.children:\n self.build_graph(None, child, graph, heap)\n # if graph_root:\n # graph.add_edge(pydot.Edge(graph_root, node, label=str(\"a\")))\n\n def visualize_tree(self, root, num_nodes=20):\n heap = []\n graph = pydot.Dot(graph_type='graph')\n self.build_graph(None, root, graph, heap)\n for x in range(num_nodes):\n if heap == []:\n break\n top_heap = heapq.heappop(heap)\n tree_node, node_visits = top_heap[2], top_heap[0]\n node = pydot.Node(id(tree_node), style='filled',\n fillcolor=\"#aa88aa\",\n label=str(- node_visits + 100000), shape=\"circle\", fixedsize=\"shape\")\n graph.add_node(node)\n if x != 0:\n move = tree_node.get_last_action()\n if self.tictactoe:\n move = self.y_flip(move)\n graph.add_edge(pydot.Edge(id(tree_node.parent), id(tree_node), label=str(move)))\n \"\"\"roterer grafen, setter bakgrunn\"\"\"\n graph_string = graph.to_string()\n graph_string = graph_string.replace(\"{\", '{rankdir = LR;bgcolor=\"#%02x%02x%02x\";' % self.background_color)\n graph = pydot.graph_from_dot_data(graph_string)[0]\n graph.write_png('graph.png')\n\n def NNvisual(self, tree, num_nodes):\n '''visualize tree'''\n if self.draw_graph:\n self.visualize_tree(tree.root, num_nodes)\n self.image = pygame.image.load(\"graph.png\")\n self.imagerect = self.image.get_size()\n self.image = pygame.transform.smoothscale(self.image,\n (min(720, self.imagerect[0]), min(720, self.imagerect[1])))\n self.imagerect = self.image.get_size()\n self.screen = pygame.display.set_mode(\n [self.side_length * self.width + self.line_th + self.imagerect[0],\n max(self.side_length * (self.height + 1) + self.line_th, self.imagerect[1])])\n self.weights = tree.get_posterior_probabilities()\n \"\"\"Build most searched line, and show it on screen\"\"\"\n self.primary_line = []\n best_action = tree.get_most_searched_child_node(tree.root)\n while best_action != None:\n if self.tictactoe:\n self.primary_line.append(self.y_flip(best_action.last_action))\n else:\n self.primary_line.append(best_action.last_action)\n best_action = tree.get_most_searched_child_node(best_action)\n '''update screen'''\n self.update_screen()\n self.see_valuation()\n self.show_gamelines(self.primary_line)\n\n def show_gamelines(self, pline):\n myfont = pygame.font.SysFont(self.default_font, 30)\n line = myfont.render('Projected line:' + str(pline), False, self.white)\n self.screen.blit(line, (0, (self.side_length * self.height) + self.line_th))\n pygame.display.flip()\n\n def y_flip(self, move):\n # for use with TicTacToe\n return (self.width * self.height) - (move) // self.width * self.width + (move) % self.width - self.width\n"
] |
[
[
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"tensorflow.ConfigProto",
"tensorflow.GPUOptions"
]
] |
expertanalytics/fagkveld
|
[
"96e16b9610e8b60d36425e7bc5435d266de1f8bf"
] |
[
"worldmap/test/test_country.py"
] |
[
"import pytest\nimport numpy as np\nimport bokeh\n\n\[email protected]()\ndef dtm():\n dtm = DTM()\n country1 = Country()\n country1.border_x = [np.array([1, 3, 3, 1])]\n country1.border_y = [np.array([0, 0, 1, 1])]\n\n country2 = Country()\n country2.border_x = [np.array([1, 3, 3, 1])]\n country2.border_y = [np.array([1, 1, 2, 2])]\n\n country3 = Country()\n country3.border_x = [np.array([0, 2, 2, 0])]\n country3.border_y = [np.array([2, 2, 3, 3])]\n\n country4 = Country()\n country4.border_x = [np.array([2, 4, 4, 2])]\n country4.border_y = [np.array([2, 2, 3, 3])]\n dtm.countries = {\n \"a\": country1,\n \"b\": country2,\n \"c\": country3,\n \"d\": country4\n }\n return dtm\n\n"
] |
[
[
"numpy.array"
]
] |
CodyJG10/Business-Finder
|
[
"6e725bff340086417582fffac6493909e287008a"
] |
[
"place_searching.py"
] |
[
"import googlemaps\nimport pandas as pd\nimport time\nimport sys\nimport json\n\ndef retrieve_places():\n # print(sys.argv[1])\n # print('test')\n print(location_query)\n location = client.geocode(location_query)[0]['geometry']['location']\n\n results = client.places(query = query,\n location = location,\n radius=distance,\n\n )\n\n status = results['status']\n\n print('Status: ' + status)\n\n if status == 'ZERO_RESULTS':\n #TODO Add func\n print('there were zero results!')\n\n\n results_df = pd.DataFrame(results['results'])\n\n while 'next_page_token' in results and len(results_df.index) < results_to_find:\n print('retrieving next page')\n print('starting delay...')\n time.sleep(next_page_delay)\n print('delay complete')\n token = results['next_page_token']\n \n results = client.places(query = query,\n location = location,\n radius=distance,\n page_token = token\n ) \n\n if results['status'] == 'ZERO_RESULTS':\n print('next page led to zero results!')\n break\n \n next_results_df = pd.DataFrame(results['results'])\n results_df = results_df.append(next_results_df)\n print('Retrieved ' + str(len(next_results_df.index)) + ' additional results')\n\n\n results_df.reset_index(drop=True, inplace=True)\n\n print('Completed retrieving results')\n\n return results_df\n\ndef format_results(df):\n cols_to_remove = ['business_status', 'geometry', 'icon', 'icon_background_color', \\\n 'icon_mask_base_uri', 'opening_hours', 'photos', 'plus_code', 'reference', \\\n 'types' ]\n \n df.drop(cols_to_remove, axis=1, inplace=True)\n \n print('Completed formatting results!')\n\n return df\n\ndef init_client(api_key):\n global client\n client = googlemaps.Client(key=api_key)\n\n\n# returns new dataframe with updated data\ndef add_place_details_to_place(place_id, df):\n place_details = client.place(place_id)['result']\n\n phone = place_details['formatted_phone_number'] \\\n if 'formatted_phone_number' in place_details \\\n else ''\n\n website = place_details['website'] \\\n if 'website' in place_details \\\n else ''\n\n index = df.index[df['place_id'] == place_id].tolist()[0]\n\n df.at[index, 'phone'] = phone\n df.at[index, 'website'] = website\n\n print('retrieving place details')\n\n return df\n\nargs = sys.argv\n\n# Location\n# Query\n# Distance (miles)\n# results to find\n# delay\n\nf = open('config.json')\nconfig = json.load(f)\n\nlocation_query = '20550 Maxim Pkwy, Orlando, FL 32833'\nquery = 'Realtor'\ndistance = 10\nresults_to_find = 20\napi_key = config[\"API_KEY\"] \n# AIzaSyClJgh0MhdAeOIX4yEuSqqGXw3fGhCyi6E\n#TODO add api_key dynamically\nnext_page_delay = 1.5\n\nif(len(args) > 1):\n location_query = args[1]\n query = args[2]\n distance = int(args[3])\n results_to_find = int(args[4])\n next_page_delay = float(args[5]) if len(args) >= 6 else 2.5\n\ninit_client(api_key)\n\ndf = retrieve_places()\n\nfor row in df.itertuples():\n place_id = row.place_id\n df = add_place_details_to_place(place_id, df)\n\nformat_results(df)\n\nprint('process complete.')\n\ndf.index.name = 'Index'\ndf.to_csv('results.csv')\n\nprint('results saved.')\n\n# EX. place_searching.py \"Fort Myers\" \"Businesses\" 10 160 1.5"
] |
[
[
"pandas.DataFrame"
]
] |
mberkanbicer/torch-light
|
[
"facd5e12f45127e81951ca1e6119960e196c6165"
] |
[
"voice-conversion/encode/data_loader.py"
] |
[
"import pickle \nimport os \n\nfrom torch.utils import data\nimport torch\nimport numpy as np\n\nclass Utterances(data.Dataset):\n def __init__(self, hparams):\n self.len_crop = hparams.len_crop\n self.train_dataset = pickle.load(open(os.path.join(hparams.data_dir, hparams.training_data), \"rb\"))\n self.num_tokens = len(self.train_dataset)\n \n def __getitem__(self, index):\n embedding, mel, f0 = self.train_dataset[index]\n \n if mel.shape[0] < self.len_crop:\n len_pad = self.len_crop - mel.shape[0]\n uttr = np.pad(mel, ((0,len_pad),(0,0)), 'constant')\n f0 = np.pad(f0, (0,len_pad), 'constant')\n elif tmp.shape[0] > self.len_crop:\n left = np.random.randint(mel.shape[0]-self.len_crop)\n uttr = mel[left:left+self.len_crop, :]\n f0 = f0[left:left+self.len_crop]\n else:\n uttr = mel\n f0 = f0\n \n return uttr, embedding, f0\n \n def __len__(self):\n return self.num_tokens\n\ndef get_loader(hparams, num_workers=0): \n dataset = Utterances(hparams)\n \n worker_init_fn = lambda x: np.random.seed((torch.initial_seed()) % (2**32))\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=hparams.batch_size,\n shuffle=True,\n num_workers=num_workers,\n drop_last=True,\n worker_init_fn=worker_init_fn)\n return data_loader"
] |
[
[
"torch.utils.data.DataLoader",
"numpy.pad",
"torch.initial_seed",
"numpy.random.randint"
]
] |
weifanjiang/treeVerification
|
[
"2a841d24d3f930ffdfae9c554f4c1d9fa8756edc"
] |
[
"generate_bound.py"
] |
[
"import argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', help='input testing data')\nparser.add_argument('-o', '--output', help='output path for features')\nparser.add_argument('-e', '--epsilon', help='epsilon', type=float)\nargs = vars(parser.parse_args())\n\ninput = open(args['input'], 'r')\none_data = input.readlines()[0]\nfout = open(args['output'], 'w')\nfout.write('{\\n')\n\neps = args['epsilon']\nchange = eps * 0.2\n\nkeys = list()\nfor token in one_data.split(\" \")[1:]:\n key, val = token.split(\":\")\n keys.append(key)\n\nfor i in range(len(keys)):\n key = keys[i]\n lo = np.random.uniform(-1 * change, change)\n hi = np.random.uniform(-1 * change, change)\n if i == len(keys) - 1:\n fout.write(\" \\\"{}\\\": [{}, {}]\\n\".format(key, eps + lo, eps + hi))\n else:\n fout.write(\" \\\"{}\\\": [{}, {}],\\n\".format(key, eps + lo, eps + hi))\nfout.write(\"}\")"
] |
[
[
"numpy.random.uniform"
]
] |
priyansh-1902/olympus
|
[
"f57ad769918c0d5d805c439ab5ffbd180af698fa"
] |
[
"src/olympus/planners/planner_particle_swarms/wrapper_particle_swarms.py"
] |
[
"#!/usr/bin/env python\n\nimport time\nfrom olympus.objects import ParameterVector\nfrom olympus.planners import AbstractPlanner\nfrom olympus.utils import daemon\nimport numpy as np\n\n\nclass ParticleSwarms(AbstractPlanner):\n\n def __init__(self, goal='minimize', max_iters=10**8, options={'c1': 0.5, 'c2': 0.3, 'w': 0.9}, particles=10):\n \"\"\"\n Particle swarm optimizer.\n\n Args:\n goal (str): The optimization goal, either 'minimize' or 'maximize'. Default is 'minimize'.\n max_iters (int): The maximum number of iterations for the swarm to search.\n options (dict): ???\n particles (int): The number of particles in the swarm.\n \"\"\"\n AbstractPlanner.__init__(**locals())\n self.has_optimizer = False\n self.is_converged = False\n\n def _set_param_space(self, param_space):\n self.param_space = param_space\n\n def _tell(self, observations):\n self._params = observations.get_params(as_array = False)\n self._values = observations.get_values(as_array=True, opposite=self.flip_measurements)\n if len(self._values) > 0:\n self.RECEIVED_VALUES.append(self._values[-1])\n\n def _priv_evaluator(self, params_array):\n for params in params_array:\n params = self._project_into_domain(params)\n self.SUBMITTED_PARAMS.append(params)\n while len(self.RECEIVED_VALUES) < self.particles:\n time.sleep(0.1)\n measurements = np.array(self.RECEIVED_VALUES)\n measurements = np.reshape(measurements, (len(measurements),))\n self.RECEIVED_VALUES = []\n return measurements\n\n @daemon\n def create_optimizer(self):\n from pyswarms.single import GlobalBestPSO\n self.optimizer = GlobalBestPSO(\n n_particles=self.particles,\n options=self.options,\n dimensions=len(self.param_space))\n cost, pos = self.optimizer.optimize(self._priv_evaluator, iters=self.max_iters)\n self.is_converged = True\n\n def _ask(self):\n if self.has_optimizer is False:\n self.create_optimizer()\n self.has_optimizer = True\n\n while len(self.SUBMITTED_PARAMS) == 0:\n time.sleep(0.1)\n if self.is_converged:\n return ParameterVector().from_dict(self._params[-1])\n params = self.SUBMITTED_PARAMS.pop(0)\n return ParameterVector().from_array(params, self.param_space)\n"
] |
[
[
"numpy.array"
]
] |
alexandru-dinu/MCMC
|
[
"c45632a7aba9e78a30c47644b261130b261f6278"
] |
[
"src/metropolis_hastings.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as stats\nimport seaborn as sns\n\nmus = np.array([5, 5])\nsigmas = np.array([[1, 0.9], [0.9, 1]])\n\n\ndef circle(x, y):\n return (x - 1) ** 2 + (y - 2) ** 2 - 3 ** 2\n\n\ndef pgauss(x, y):\n return stats.multivariate_normal.pdf([x, y], mean=mus, cov=sigmas)\n\n\ndef metropolis_hastings(p, num_iter=1000):\n x, y = 0.0, 0.0\n samples = np.zeros((num_iter, 2))\n\n for i in range(num_iter):\n eps = np.random.normal(0, 1, size=2)\n x_star, y_star = np.array([x, y]) + eps\n if np.random.rand() < p(x_star, y_star) / p(x, y):\n x, y = x_star, y_star\n samples[i] = [x, y]\n\n return samples\n\n\nif __name__ == \"__main__\":\n samples = metropolis_hastings(circle, num_iter=10000)\n sns.jointplot(samples[:, 0], samples[:, 1])\n plt.title(\"Circle\")\n plt.show()\n\n samples = metropolis_hastings(pgauss, num_iter=10000)\n sns.jointplot(samples[:, 0], samples[:, 1])\n plt.title(\"Gauss\")\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.title",
"numpy.random.normal",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"scipy.stats.multivariate_normal.pdf"
]
] |
dreness/data-wrangling-components
|
[
"cf1a6eb152bb4f2fd1d3b933b9aa32b965a29610"
] |
[
"python/data_wrangling_components/engine/verbs/union.py"
] |
[
"#\r\n# Copyright (c) Microsoft. All rights reserved.\r\n# Licensed under the MIT license. See LICENSE file in the project.\r\n#\r\n\r\nimport pandas as pd\r\n\r\nfrom data_wrangling_components.table_store import TableStore\r\nfrom data_wrangling_components.types import SetOperationArgs, Step\r\n\r\n\r\ndef union(step: Step, store: TableStore):\r\n \"\"\"Calculates the set union between two tables.\r\n\r\n :param step:\r\n Parameters to execute the operation.\r\n See :py:class:`~data_wrangling_components.engine.types.SetOperationArgs`.\r\n :type step: Step\r\n :param store:\r\n Table store that contains the inputs to be used in the execution.\r\n :type store: TableStore\r\n\r\n :return: new table with the result of the operation.\r\n \"\"\"\r\n args = SetOperationArgs(others=step.args[\"others\"])\r\n input_table = store.get(step.input)\r\n others = [store.get(other) for other in args.others]\r\n output = pd.concat([input_table] + others, ignore_index=True).drop_duplicates()\r\n\r\n return output\r\n"
] |
[
[
"pandas.concat"
]
] |
congma/libsncompress
|
[
"ef0c8ee36b8a53b6106ade675d5210fa6e4d5409"
] |
[
"tests/test_base_usage.py"
] |
[
"\"\"\"Testing usage of libsncompress.base\"\"\"\nimport os\nimport os.path\nimport pytest\nimport six\nimport six.moves as sm\nimport numpy\nfrom numpy.random import shuffle\nimport libsncompress\nfrom lsnz_test_infra import jla_full_paths, outdir\n\n\[email protected]\ndef extra_file(jla_full_paths):\n fits_dir = jla_full_paths[0]\n fpath = os.path.join(fits_dir, \"test_tmp.dat\")\n fh = open(fpath, \"wb\")\n fh.close()\n yield fpath\n try:\n os.remove(fpath)\n except OSError:\n pass\n\n\ndef test_invalid_fits_dir(outdir, jla_full_paths):\n emptydir = \"%s\" % outdir.mkdir(\"invalid\")\n with pytest.raises(ValueError):\n base = libsncompress.BinnedSN(emptydir, jla_full_paths[1])\n\n\ndef test_extra_file_in_fits_dir(extra_file, jla_full_paths):\n os.stat(extra_file)\n base = libsncompress.BinnedSN(*jla_full_paths)\n\n\ndef test_binning_that_selects_nothing(jla_full_paths):\n with pytest.raises(ValueError):\n base = libsncompress.BinnedSN(*jla_full_paths,\n logbins=[numpy.log10([1.5, 2.0])])\n\n\[email protected](\"size\", list(sm.range(2, 36)))\ndef test_bins_sizes(jla_full_paths, size):\n rndlogz = numpy.linspace(-2.0, numpy.log10(1.3), num=size)\n shuffle(rndlogz)\n base = libsncompress.BinnedSN(*jla_full_paths, logbins=[rndlogz])\n # Sanity check for the number of bins.\n assert len(base.binidcontent) == size - 1\n assert base.bins.nbins == size - 1\n # Each data point is mapped (i.e. binned).\n revlookup_range_size = sum(len(v) for v in\n six.itervalues(base.binidcontent))\n assert revlookup_range_size == base.datadimension\n for lgz in base.logredshifts:\n assert base.bins.searchenum(lgz)[0] is not None\n"
] |
[
[
"numpy.log10",
"numpy.random.shuffle"
]
] |
subshine/tutorials
|
[
"717320cbec72e3e68acefad9c367fc6c5ffb37b1"
] |
[
"matplotlibTUT/plt16_grid_subplot.py"
] |
[
"# View more python tutorials on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n# 16 - grid\n\"\"\"\nPlease note, this script is for python3+.\nIf you are using python2+, please modify it accordingly.\nTutorial reference:\nhttp://matplotlib.org/users/gridspec.html\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n# method 1: subplot2grid\n##########################\nplt.figure()\nax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3) # stands for axes\nax1.plot([1, 2], [1, 2])\nax1.set_title('ax1_title')\nax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2)\nax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\nax4 = plt.subplot2grid((3, 3), (2, 0))\nax4.scatter([1, 2], [2, 2])\nax4.set_xlabel('ax4_x')\nax4.set_ylabel('ax4_y')\nax5 = plt.subplot2grid((3, 3), (2, 1))\n\n# method 2: gridspec\n#########################\nplt.figure()\ngs = gridspec.GridSpec(3, 3)\n# use index from 0\nax6 = plt.subplot(gs[0, :])\nax7 = plt.subplot(gs[1, :2])\nax8 = plt.subplot(gs[1:, 2])\nax9 = plt.subplot(gs[-1, 0])\nax10 = plt.subplot(gs[-1, -2])\n\n# method 3: easy to define structure\n####################################\nf, ((ax11, ax12), (ax13, ax14)) = plt.subplots(2, 2, sharex=True, sharey=True)\nax11.scatter([1,2], [1,2])\n\nplt.tight_layout()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
] |
michlkallen/gpx_mapping
|
[
"8272b8a1d99d9d4c80933163eddf32a6d66583e6"
] |
[
"gpx_correct.py"
] |
[
"\"\"\"\nScript to correct timestamps on GPX files.\n\nUse if creating a GPX file manually through www.gpxgenerator.com\nand the speed used is incorrect. (Or if you want to modify an existing route.)\n\nTo use:\n-------\n1. Edit the `file_in` name and the `file_out` name if desired.\n2. Edit the average speed (and variability) if desired. Currently set up for 4:25/km\n\nAfter navigating to the directory in question, run:\n\n`python gpx_correct.py`\n\nReturns elapsed time and a new GPX file to the directory.\n\nThe script is currently set up to only function when there are also \"elev\" tags in the\nGPX file. It could be modified in the future to work without.\n\"\"\"\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport pandas as pd\nimport haversine\n\n# File to modify\nfile_in = \"0331.gpx\"\nfile_out = file_in[:-4] + \"-analog.gpx\"\n\n# Average speed (avg_rate) [seconds/km] and variability (var) [seconds]\navg_rate = 265\nvar = 10\n\n# namespace for GPX files\nns = {\"gpx\": \"http://www.topografix.com/GPX/1/1\"}\n\n# initialize lists for distance, elevation gain, elevation\nelev_gain = [0]\ndistance = [0]\nelev = [0]\n\n\n# Extract the longitude, latitude, elevation, timestamp from the gpx\ntree = ET.parse(file_in)\nlatitude = [\n float(x.attrib[\"lat\"])\n for x in tree.findall(\"./gpx:trk/gpx:trkseg/gpx:trkpt\", ns)\n]\nlongitude = [\n float(y.attrib[\"lon\"])\n for y in tree.findall(\"./gpx:trk/gpx:trkseg/gpx:trkpt\", ns)\n]\nelevation = [\n float(e.text)\n for e in tree.findall(\"./gpx:trk/gpx:trkseg/gpx:trkpt/gpx:ele\", ns)\n]\ntimestamp = [\n t.text for t in tree.findall(\"./gpx:trk/gpx:trkseg/gpx:trkpt/gpx:time\", ns)\n]\n\n# Calculate the distance and elevation traveled\nfor i in range(len(latitude)):\n if i == 0:\n pass\n else:\n p1_lat, p1_lon = latitude[i - 1], longitude[i - 1]\n p2_lat, p2_lon = latitude[i], longitude[i]\n\n delta = haversine.haversine((p1_lat, p1_lon), (p2_lat, p2_lon))\n elev_delta = elevation[i] - elevation[i - 1]\n delta_3d = np.sqrt(delta**2 + (elev_delta/1000)**2)\n\n distance.append(delta_3d)\n elev.append(elev_delta)\n\n# Update the timestamps!\nrate = avg_rate + np.random.default_rng().integers(-var, var,\n size=len(distance),\n endpoint=True)\n\n# Calculate the seconds between data points\ntime_delta = np.array(distance)*rate\n\n# Calculate the cumulative sum (easier for writing the next step)\ntime_delta_cum = np.cumsum(time_delta)\nnew_stamps = [\n (pd.to_datetime(timestamp[0])\n + pd.Timedelta(time_delta_cum[i], \"s\")).strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n for i in range(len(timestamp))\n ]\n\nfor i, time in enumerate(tree.iterfind(\"./gpx:trk/gpx:trkseg/gpx:trkpt/gpx:time\", ns)):\n time.text = new_stamps[i]\n\ntree.write(file_out, encoding=\"UTF-8\")\n\nprint(f\"{time_delta_cum[-1]/60:.1f} minutes\")\n"
] |
[
[
"pandas.to_datetime",
"numpy.sqrt",
"numpy.cumsum",
"pandas.Timedelta",
"numpy.array",
"numpy.random.default_rng"
]
] |
decile-team/spear
|
[
"7629cc46ce738a4a67e5b4a6ba7d1935c4833421"
] |
[
"spear/utils/utils_jl.py"
] |
[
"import torch.nn as nn\nfrom torch import log\nimport numpy as np\n\nfrom .utils_cage import probability\n\n\ndef log_likelihood_loss_supervised(theta, pi, y, m, s, k, n_classes, continuous_mask, qc, device):\n\t'''\n\t\tJoint Learning utils: Negative log likelihood loss, used in loss 4 in :cite:p:`DBLP:journals/corr/abs-2008-09887`\n\n\tArgs:\n\t\ttheta: [n_classes, n_lfs], the parameters\n\t\tpi: [n_classes, n_lfs], the parameters\n\t\tm: [n_instances, n_lfs], m[i][j] is 1 if jth LF is triggered on ith instance, else it is 0\n\t\ts: [n_instances, n_lfs], s[i][j] is the continuous score of ith instance given by jth continuous LF\n\t\tk: [n_lfs], k[i] is the class of ith LF, range: 0 to num_classes-1\n\t\tn_classes: num of classes/labels\n\t\tcontinuous_mask: [n_lfs], continuous_mask[i] is 1 if ith LF has continuous counter part, else it is 0\n\t\tqc: a float value OR [n_lfs], qc[i] quality index for ith LF\n\t\tdevice: 'cuda' if drivers are available, else 'cpu'\n\t\n\tReturn:\n\t\ta real value, summation over (the log of probability for an instance)\n\t'''\n\tprob = probability(theta, pi, m, s, k, n_classes, continuous_mask, qc, device)\n\tprob = (prob.t() / prob.sum(1)).t()\n\treturn nn.NLLLoss()(log(prob), y)\n\ndef entropy(probabilities):\n\t'''\n\t\tJoint Learning utils: Entropy, Used in loss 2 in :cite:p:`DBLP:journals/corr/abs-2008-09887`\n\n\tArgs:\n\t\tprobabilities: [num_unsup_instances, num_classes], probabilities[i][j] is probability of ith instance being jth class\n\t\n\tReturn:\n\t\ta real value, the entropy value of given probability\n\t'''\n\tentropy = - (probabilities * log(probabilities)).sum(1)\n\treturn entropy.sum() / probabilities.shape[0]\n\ndef kl_divergence(probs_p, probs_q):\n\t'''\n\t\tJoint Learning utils: KL divergence of two probabilities, used in loss 6 in :cite:p:`DBLP:journals/corr/abs-2008-09887`\n\t\t\n\tArgs:\n\t\tprobs_p: [num_instances, num_classes]\n\t\tprobs_q: [num_instances, num_classes]\n\t\n\tReturn:\n\t\ta real value, the KL divergence of given probabilities\n\t'''\n\treturn (probs_p * log(probs_p / probs_q)).sum() / probs_p.shape[0]\n\n\ndef find_indices(data, data_sub):\n\t'''\n\t\tA helper function for subset selection\n\n\tArgs:\n\t\tdata: the complete data, torch tensor of shape [num_instances, num_classes]\n\t\tdata_sub: the subset of 'data' whose indices are to be found. Should be of same shape as 'data'\n\t\n\tReturn:\n\t\tlist of indices, to be found from the result of apricot library\n\t'''\n\tindices = []\n\tfor element in data_sub:\n\t\tx = np.where((data.cpu().numpy() == element.cpu().numpy()).all(axis=1))[0]\n\t\tindices.append(x[0])\n\treturn indices\n\n\ndef get_similarity_kernel(preds):\n\t'''\n\t\tA helper function for subset selection\n\n\tArgs:\n\t\tpreds: numpy.ndarray of shape (num_samples,)\n\t\n\tReturn:\n\t\tnumpy.ndarray of shape (num_sample, num_samples)\n\n\t'''\n\tnum_samples = len(preds)\n\tkernel_matrix = np.zeros((num_samples, num_samples))\n\tfor pred in np.unique(preds):\n\t\tx = np.where(preds == pred)[0]\n\t\tprod = np.transpose([np.tile(x, len(x)), np.repeat(x, len(x))])\n\t\tkernel_matrix[prod] = 1\n\treturn kernel_matrix\n"
] |
[
[
"torch.nn.NLLLoss",
"numpy.unique",
"torch.log",
"numpy.zeros",
"numpy.where"
]
] |
kammerjager/Yume-Bot
|
[
"c3099b929e30602deec23967c7a49f389b5a6d2c"
] |
[
"modules/sql/rankingsdb.py"
] |
[
"# Copyright (c) 2020.\n# MIT License\n#\n# Copyright (c) 2019 YumeNetwork\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nimport pandas\nimport psycopg2\nfrom psycopg2 import extras\n\nfrom modules.sql.guild import Guild\nfrom modules.sql.user import User\n\ntry:\n con = psycopg2.connect(\"host=postgre dbname=yumebot port=5432 user=postgres password=yumebot\")\n cur = con.cursor(cursor_factory=psycopg2.extras.DictCursor)\nexcept psycopg2.DatabaseError as e:\n print('Error %s' % e)\n\n\nclass RankingsDB:\n\n @staticmethod\n def rows_to_dict(rows) -> dict:\n rankings = {\"level\": rows['level'], \"xp\": rows['xp'], \"total\": rows['total'], \"guild_id\": rows['guild_id'],\n \"reach\": rows['reach'], \"user_id\": rows['user_id']}\n return rankings\n\n @staticmethod\n def rankings_from_rows(rows) -> list:\n rankings = []\n for row in rows:\n rankings.append(RankingsDB.rows_to_dict(row))\n return rankings\n\n \"\"\"\n Get methods\n \"\"\"\n\n @staticmethod\n def get_one(user_id: int, guild_id: int) -> dict:\n try:\n cur.execute(\"SELECT * FROM public.rankings WHERE user_id = {} and guild_id = {};\".format(user_id, guild_id))\n except Exception as err:\n print(err)\n con.rollback()\n rows = cur.fetchone()\n if rows:\n rankings = RankingsDB.rows_to_dict(rows)\n return rankings\n return {}\n\n @staticmethod\n def get_user(user: User, guild: Guild) -> dict:\n try:\n cur.execute(\n \"SELECT * FROM public.rankings WHERE user_id = {} and guild_id = {};\".format(user.user_id,\n guild.guild_id))\n except Exception as err:\n print(err)\n con.rollback()\n try:\n rows = cur.fetchone()\n except (Exception, psycopg2.Error) as error:\n return {}\n else:\n if rows:\n rankings = RankingsDB.rows_to_dict(rows)\n return rankings\n\n return {}\n\n @staticmethod\n def ranking_exists(user: User, guild: Guild) -> bool:\n try:\n cur.execute(\n \"SELECT count(*) FROM public.rankings WHERE user_id = {} AND guild_id = {};\".format(user.user_id,\n guild.guild_id))\n except Exception as err:\n print(err)\n con.rollback()\n rows = cur.fetchone()\n if rows[0] > 0:\n return True\n return False\n\n \"\"\"\n Create methods\n \"\"\"\n\n @staticmethod\n def create_ranking(user: User, guild: Guild):\n try:\n cur.execute(\n \"INSERT INTO public.rankings ( guild_id, level, reach, total, user_id, xp) VALUES ( {}, 0, 20, 0, {}, 0 );\".format(\n guild.guild_id, user.user_id))\n except Exception as err:\n print(err)\n con.rollback()\n con.commit()\n\n @staticmethod\n def reset_user(user: User, guild: Guild):\n try:\n cur.execute(\n \"UPDATE public.rankings SET level = 0, reach = 20, total = 0, xp = 0 WHERE guild_id = {} AND user_id = {};\".format(\n guild.guild_id, user.user_id))\n except Exception as err:\n print(err)\n con.rollback()\n con.commit()\n\n \"\"\"\n Level methods\n \"\"\"\n\n @staticmethod\n def update_user(user: User, guild: Guild, ranking: dict):\n try:\n cur.execute(\n \"UPDATE public.rankings SET level = {}, reach = {}, total = {}, xp = {} WHERE guild_id = {} AND user_id = {};\".format(\n ranking['level'], ranking['reach'], ranking['total'], ranking['xp'],\n guild.guild_id, user.user_id))\n except Exception as err:\n print(err)\n con.rollback()\n con.commit()\n\n @staticmethod\n def update_user_id(user: id, guild: id, level: int, reach: int, xp: int):\n try:\n cur.execute(\n \"UPDATE public.rankings SET level = {}, reach = {}, xp = {} WHERE guild_id = {} AND user_id = {};\".format(\n level, reach, xp, guild, user))\n except Exception as err:\n print(err)\n con.rollback()\n con.commit()\n\n @staticmethod\n def get_rank(user: User, guild: Guild) -> int:\n try:\n cur.execute(\n \"SELECT user_id FROM public.rankings WHERE guild_id = {} GROUP BY user_id, total ORDER BY total DESC \".format(\n guild.guild_id))\n except Exception as err:\n print(err)\n con.rollback()\n rows = cur.fetchall()\n if rows:\n df = pandas.DataFrame(np.array(rows), columns=[\"ID\"])\n return df.ID[df.ID == user.user_id].index.tolist()[0] + 1\n return 0\n\n @staticmethod\n def get_scoreboard(guild: Guild) -> list:\n try:\n cur.execute(\n \"SELECT user_id FROM public.rankings WHERE guild_id = {} GROUP BY user_id, total ORDER BY total DESC LIMIT 10\".format(\n guild.guild_id))\n except Exception as err:\n print(err)\n con.rollback()\n rows = cur.fetchall()\n if rows:\n df = pandas.DataFrame(np.array(rows), columns=[\"ID\"])\n return df.ID.values.tolist()\n return []\n\n @staticmethod\n def get_all():\n try:\n cur.execute(\n \"SELECT * FROM public.rankings;\")\n except Exception as err:\n print(err)\n con.rollback()\n rows = cur.fetchall()\n if rows:\n return RankingsDB.rankings_from_rows(rows)\n"
] |
[
[
"numpy.array"
]
] |
stephenfuqua/Ed-Fi-X-Fizz
|
[
"94597eda585d4f62f69c12e2a58fa8e8846db11b"
] |
[
"src/google-classroom-extractor/edfi_google_classroom_extractor/mapping/users.py"
] |
[
"# SPDX-License-Identifier: Apache-2.0\n# Licensed to the Ed-Fi Alliance under one or more agreements.\n# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.\n# See the LICENSE and NOTICES files in the project root for more information.\n\nfrom pandas import DataFrame, concat\nfrom edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM\n\n\nSTUDENT_USER_ROLE = \"Student\"\nTEACHER_USER_ROLE = \"Teacher\"\n\n\ndef _students_or_teachers_to_users_df(\n students_or_teachers_df: DataFrame, lms_udm_user_role: str\n) -> DataFrame:\n \"\"\"\n Convert a Students or Teachers API DataFrame to a LMSUsers DataFrame\n\n Parameters\n ----------\n students_or_teachers_df: DataFrame\n is a Students or Teachers API DataFrame\n lms_udm_user_role\n is the LMS UDM user role as a string\n\n Returns\n -------\n DataFrame\n a LMSUsers DataFrame based on the given Students or Teachers API DataFrame\n\n Notes\n -----\n DataFrame columns are:\n EmailAddress: The primary e-mail address for the user\n LocalUserIdentifier: The user identifier assigned by a school or district\n Name: The full name of the user\n SISUserIdentifier: The user identifier defined in the Student Information System (SIS)\n SourceSystem: The system code or name providing the user data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source system\n UserRole: The role assigned to the user\n SourceCreateDate: Date this record was created in the LMS\n SourceLastModifiedDate: Date this record was last updated in the LMS\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert \"userId\" in students_or_teachers_df.columns\n assert \"profile.name.fullName\" in students_or_teachers_df.columns\n assert \"profile.emailAddress\" in students_or_teachers_df.columns\n\n result: DataFrame = students_or_teachers_df[\n [\n \"userId\",\n \"profile.name.fullName\",\n \"profile.emailAddress\",\n \"CreateDate\",\n \"LastModifiedDate\",\n ]\n ]\n result = result.rename(\n columns={\n \"userId\": \"SourceSystemIdentifier\",\n \"profile.name.fullName\": \"Name\",\n \"profile.emailAddress\": \"EmailAddress\",\n }\n )\n\n # Student records are per-course, so there may be duplicates\n result.drop_duplicates(inplace=True)\n result[\"SourceSystem\"] = SOURCE_SYSTEM\n result[\"UserRole\"] = lms_udm_user_role\n\n result[\"LocalUserIdentifier\"] = \"\" # No local id available from API\n result[\"SISUserIdentifier\"] = \"\" # No SIS id available from API\n result[\"SourceCreateDate\"] = \"\" # No create date available from API\n result[\"SourceLastModifiedDate\"] = \"\" # No modified date available from API\n\n return result\n\n\ndef students_and_teachers_to_users_df(\n students_df: DataFrame, teachers_df: DataFrame\n) -> DataFrame:\n \"\"\"\n Convert Students and Teachers API DataFrames to an LMS UDM DataFrame\n\n Parameters\n ----------\n students_df: DataFrame\n is a Students API DataFrame\n teachers_df: DataFrame\n is a Teachers API DataFrame\n\n Returns\n -------\n DataFrame\n a LMSUsers DataFrame based on the given Students and Teachers API DataFrames\n\n Notes\n -----\n DataFrame columns are:\n EmailAddress: The primary e-mail address for the user\n LocalUserIdentifier: The user identifier assigned by a school or district\n Name: The full name of the user\n SISUserIdentifier: The user identifier defined in the Student Information System (SIS)\n SourceSystem: The system code or name providing the user data\n SourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source system\n UserRole: The role assigned to the user\n SourceCreateDate: Date this record was created in the LMS\n SourceLastModifiedDate: Date this record was last updated in the LMS\n CreateDate: Date this record was created in the extractor\n LastModifiedDate: Date this record was last updated in the extractor\n \"\"\"\n assert \"userId\" in students_df.columns\n assert \"profile.name.fullName\" in students_df.columns\n assert \"profile.emailAddress\" in students_df.columns\n\n assert \"userId\" in teachers_df.columns\n assert \"profile.name.fullName\" in teachers_df.columns\n assert \"profile.emailAddress\" in teachers_df.columns\n\n users_from_students_df: DataFrame = _students_or_teachers_to_users_df(\n students_df, STUDENT_USER_ROLE\n )\n users_from_teachers_df: DataFrame = _students_or_teachers_to_users_df(\n teachers_df, TEACHER_USER_ROLE\n )\n\n return concat(\n [users_from_students_df, users_from_teachers_df], ignore_index=True, sort=False\n )\n"
] |
[
[
"pandas.concat"
]
] |
tdeme/NLP-Partisanship
|
[
"4a289d1157ac1b96e5af3b38b2676f2d5c84e21b"
] |
[
"Notebooks and Scripts/model_testing.py"
] |
[
"from transformers import TFAutoModel, AutoTokenizer, AutoModelForSequenceClassification, DistilBertForSequenceClassification\nfrom newspaper import Article\nfrom torch import nn\n\n'''\nThis script will test the model previously trained, and compare\nits performance with that of a similar existing model that was \nfound on the Hugging Face model hub.\n'''\n\ndef prepare_text(article_url, split=True):\n '''\n This function uses the Article object from the newspaper module\n to scrape the webpage at the given url. The split parameter\n determines whether the function will return the text split into\n paragraphs, or simply the original text stripped of newlines.\n (Split is set to True by default).\n '''\n\n article = Article(article_url)\n article.download()\n article.parse()\n paras = article.text.split('\\n\\n')\n if split:\n return paras\n else:\n text = ''\n for para in paras:\n text+=para\n return text\n \n\ndef get_score(pt_outputs):\n '''\n The pt_predictions are the outputs of the model. To get a more\n concrete prediction, we use the softmax function to get \n probability-based predictions that are more intuitive to interpret.\n The function returns the average of the probability-based predictions.\n '''\n\n pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)\n sum = 0\n for prediction in pt_predictions.tolist():\n sum+=prediction[1]\n overall_score = sum/len(pt_predictions)\n return overall_score\n\n\ndef evaluate(score, tally, party):\n '''\n The party parameter was included in the urls object so that a\n 0 indicates a left-leaning article and a 1 indicates a right-\n leaning article. If the score predicted the correct leaning,\n a 1 will be appended to the corresponding tally list. Otherwise,\n a 0 will be appended, indicating that the prediction was false.\n '''\n\n if score<0.5:\n if not party:\n tally.append(1)\n else:\n tally.append(0)\n else:\n if party:\n tally.append(1)\n else:\n tally.append(0)\n\n\ndef run_tests(urls):\n #Much of this syntax comes from the Hugging Face documentation.\n \n my_tokenizer = AutoTokenizer.from_pretrained('./recent_twitter_model')\n my_bert_model = DistilBertForSequenceClassification.from_pretrained('./recent_twitter_model')\n my_tally = []\n\n control_tokenizer = AutoTokenizer.from_pretrained('spencerh/rightpartisan')\n control_bert_model = DistilBertForSequenceClassification.from_pretrained('spencerh/rightpartisan')\n control_tally = []\n\n for url in urls:\n \n my_paras = prepare_text(url[0])\n control_paras = prepare_text(url[0], False)\n\n my_batch = my_tokenizer(\n [para for para in my_paras],\n padding=True,\n truncation=True,\n max_length=512,\n return_tensors=\"pt\"\n )\n\n my_outputs = my_bert_model(**my_batch)\n\n my_score = get_score(my_outputs)\n\n evaluate(my_score, my_tally, url[1])\n\n control_batch = control_tokenizer(\n [para for para in control_paras],\n padding=True,\n truncation=True,\n max_length=512,\n return_tensors=\"pt\"\n )\n\n control_outputs = control_bert_model(**control_batch)\n\n control_score = get_score(control_outputs)\n\n evaluate(control_score, control_tally, url[1])\n\n return my_tally, control_tally\n\n\ndef main():\n import os\n\n os.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n urls = (('https://www.cnn.com/2021/03/06/opinions/biden-gop-relief-bill-zelizer/index.html',0),\n ('https://www.cnn.com/2021/03/06/opinions/tweets-gop-zoe-lofgren-ghitis/index.html',0),\n ('https://www.cnn.com/2021/03/05/opinions/pandemic-lessons-preparedness-besser/index.html',0),\n ('https://www.cnn.com/2021/03/04/opinions/joe-bidens-big-chance-sachs/index.html',0),\n ('https://www.cnn.com/2021/03/04/opinions/texas-covid-restrictions-science-mehnert/index.html',0),\n ('https://www.cnn.com/2021/10/28/opinions/trumps-ridiculous-letter-to-wall-street-journal-dantonio/index.html',0),\n ('https://www.msnbc.com/opinion/liberty-university-sexual-assault-allegations-reveal-flaws-evangelical-institutions-n1282463',0),\n ('https://www.msnbc.com/opinion/virginia-s-youngkin-spotlights-woman-who-tried-ban-toni-morrison-n1282458',0),\n ('https://www.msnbc.com/opinion/virginia-s-youngkin-spotlights-woman-who-tried-ban-toni-morrison-n1282458',0),\n ('https://www.msnbc.com/opinion/murdaugh-murder-mystery-highlights-power-privilege-n1282320',0),\n ('https://www.foxnews.com/opinion/afghanistan-leadership-rep-michael-waltz',1),\n ('https://www.foxnews.com/opinion/afghanistan-mission-impossible-biden-fantasyland-endgame-k-t-mcfarland',1),\n ('https://www.foxnews.com/opinion/afghanistan-biden-team-us-pivot-mike-pompeo',1),\n ('https://www.foxnews.com/opinion/gov-cuomo-resigned-cnn-chris-cuomo-tim-graham',1),\n ('https://www.foxnews.com/opinion/biden-broken-border-policies-crisis-arizona-ag-mark-brnovich',1),\n ('https://www.foxnews.com/opinion/att-racial-reeducation-racism-white-trait-christopher-rufo',1),\n ('https://www.foxnews.com/opinion/tucker-carlson-biden-america-stupid-not-cross-border-illegally',1),\n ('https://www.foxnews.com/opinion/liberal-media-mcauliffe-virginia-gubernatorial-race-dan-gainor',1),\n ('https://www.foxnews.com/opinion/joe-concha-mcauliffe-biden-trump-virginia-race-youngkin',1),\n ('https://www.foxnews.com/opinion/biden-sanders-socialism-spending-rep-michael-burgess',1),\n )\n\n print(run_tests(urls))\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.nn.functional.softmax"
]
] |
antgonza/qtp-biom
|
[
"731b5529fc5f559a868c1d3a6e14cecf4e59198b"
] |
[
"qtp_biom/tests/test_plugin.py"
] |
[
"# -----------------------------------------------------------------------------\n# Copyright (c) 2014--, The Qiita Development Team.\n#\n# Distributed under the terms of the BSD 3-clause License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# -----------------------------------------------------------------------------\n\nfrom unittest import main\nfrom tempfile import mkdtemp, mkstemp\nfrom os import remove, close\nfrom os.path import exists, isdir\nfrom shutil import rmtree\nfrom json import dumps\nfrom time import sleep\n\nimport numpy as np\nfrom biom import Table\nfrom biom.util import biom_open\nfrom qiita_client.testing import PluginTestCase\n\nfrom qtp_biom import plugin\n\n\nclass PluginTests(PluginTestCase):\n def setUp(self):\n self.out_dir = mkdtemp()\n self._clean_up_files = [self.out_dir]\n\n def tearDown(self):\n for fp in self._clean_up_files:\n if exists(fp):\n if isdir(fp):\n rmtree(fp)\n else:\n remove(fp)\n\n def _wait_job(self, job_id):\n for i in range(10):\n status = self.qclient.get_job_info(job_id)['status']\n if status != 'running':\n break\n sleep(0.5)\n return status\n\n def test_execute_job_summary(self):\n # Create a summary job\n data = {'command': dumps(['BIOM type', '2.1.4 - Qiime2',\n 'Generate HTML summary']),\n 'parameters': dumps({'input_data': 4}),\n 'status': 'queued'}\n job_id = self.qclient.post(\n '/apitest/processing_job/', data=data)['job']\n\n plugin(\"https://localhost:8383\", job_id, self.out_dir)\n\n obs = self._wait_job(job_id)\n self.assertEqual(obs, 'success')\n\n def test_execute_job_validate(self):\n # Create a prep template\n prep_info = {'SKB8.640193': {'col': 'val1'},\n 'SKD8.640184': {'col': 'val2'}}\n data = {'prep_info': dumps(prep_info),\n 'study': 1,\n 'data_type': '16S'}\n template = self.qclient.post(\n '/apitest/prep_template/', data=data)['prep']\n # Create a new validate job\n fd, biom_fp = mkstemp(suffix=\".biom\")\n close(fd)\n data = np.random.randint(100, size=(2, 2))\n table = Table(data, ['O1', 'O2'], ['SKB8.640193', 'SKD8.640184'])\n with biom_open(biom_fp, 'w') as f:\n table.to_hdf5(f, \"Test\")\n data = {'command': dumps(['BIOM type', '2.1.4 - Qiime2', 'Validate']),\n 'parameters': dumps(\n {'files': dumps({'biom': [biom_fp]}),\n 'template': template,\n 'artifact_type': 'BIOM'}),\n 'artifact_type': 'BIOM',\n 'status': 'queued'}\n job_id = self.qclient.post(\n '/apitest/processing_job/', data=data)['job']\n\n plugin(\"https://localhost:8383\", job_id, self.out_dir)\n obs = self._wait_job(job_id)\n self.assertEqual(obs, 'success')\n\n def test_execute_job_error(self):\n # Create a prep template\n prep_info = {'SKB8.640193': {'col': 'val1'},\n 'SKD8.640184': {'col': 'val2'}}\n data = {'prep_info': dumps(prep_info),\n 'study': 1,\n 'data_type': '16S'}\n template = self.qclient.post(\n '/apitest/prep_template/', data=data)['prep']\n # Create a new validate job\n fd, biom_fp = mkstemp(suffix=\".biom\")\n close(fd)\n data = np.random.randint(100, size=(2, 2))\n table = Table(data, ['O1', 'O2'], ['S1', 'S2'])\n with biom_open(biom_fp, 'w') as f:\n table.to_hdf5(f, \"Test\")\n data = {'command': dumps(['BIOM type', '2.1.4 - Qiime2', 'Validate']),\n 'parameters': dumps(\n {'files': dumps({'biom': [biom_fp]}),\n 'template': template,\n 'artifact_type': 'BIOM'}),\n 'artifact_type': 'BIOM',\n 'status': 'queued'}\n job_id = self.qclient.post(\n '/apitest/processing_job/', data=data)['job']\n\n plugin(\"https://localhost:8383\", job_id, self.out_dir)\n obs = self._wait_job(job_id)\n\n self.assertEqual(obs, 'error')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.randint"
]
] |
GavinAbercrombie/motion_policy_detection
|
[
"f7a609f8af8acbe781418edbfec7629b4225b3c0"
] |
[
"returns_policy.py"
] |
[
"### Matches Hansard HoC debate motions and motion quasi-sentences \n### to policy codes from the Comparative Manifesto Project.\n\nimport os, csv\nfrom collections import Counter, OrderedDict\nfrom nltk import word_tokenize \nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport heapq\n\nprint('Matching motions to policies ...\\n')\n\ndef lemmatizer(text):\n \"\"\"Input: string of 1 or more words,\n output: string of corresponding lemmas\"\"\"\n lemmad = ''\n wnl = WordNetLemmatizer()\n lemma_list = [wnl.lemmatize(t) for t in word_tokenize(text)]\n for lem in lemma_list:\n lemmad = lemmad + lem + ' '\n return lemmad\n \ntfidf_vectorizer = TfidfVectorizer(stop_words='english')\n\n# load all the annotated UK manifesto quasi-sentences (qs) into a dictionary of format {code: [qs0, qs1, ..., qsn]} \nmanifestos = {}\nfor filename in os.listdir('resources/manifestos/'):\n manifesto = csv.reader(open('resources/manifestos/'+filename), delimiter=',')\n for row in manifesto:\n if row[1].isdigit():\n if row[1] not in manifestos:\n manifestos[row[1]] = [row[0]]\n else:\n manifestos[row[1]].append(row[0])\n \n# load hand-annotated motions:\ndata = csv.reader(open('resources/hand_annotated_motions_sub.csv'))\n# create ordered dict of motions:\nmotions = OrderedDict()\nfor row in data:\n feats_no = len(row) # no. of feature types for analysis (1 = title, 2 = motion, 3 = speech)\n idee = row[0]\n if idee not in motions: # create dict entry for fist example of each code\n motions[idee] = [[row[f] for f in range(1, feats_no)]] \n else: # add subsequent examples to corresponding code values\n motions[idee].append([row[f] for f in range(1, feats_no)])\n \n# get aggregate annotations and create list of sentences:\nsentences = []\nagg_anns = [] # for each motion, we're going to put the aggregation of all the qs annotations here\nall_annotations = []\nfor k, v in motions.items():\n anns = [] # temporary list of annotations. Next, find most common code in list\n for i in v:\n #if i[-1] != '000': # allow non-000 codes to dominate if 000 is majority code\n anns.append(i[-1])\n all_annotations.append(i[-1])\n sentences.append(i)\n if len(anns) == 0:\n anns.append('000')\n agg_anns.append(Counter(anns).most_common(1)[0][0])\nno_qs = len(sentences) # store number of qss for use in splitting tfidf matrix\n\n# load manifesto project coding scheme:\nmanifesto_codes = csv.reader(open('resources/manifesto_codes.csv'))\ncodes = {}\nfor line in manifesto_codes:\n codes[line[0]] = line[1]\n \n# create dict of coded manifestos\nmanifesto_codes_dict = {}\nuk_codes = [] # find out which codes are actually used in uk manifestos\nfor csv_file in os.listdir('resources/manifestos'):\n manifests = csv.reader(open('resources/manifestos/' + csv_file))\n for row in manifests:\n if row[1].isdigit():\n if row[1] not in manifesto_codes_dict:\n manifesto_codes_dict[row[1]] = [codes[row[1]], row[0].replace(\"\\u2022\",\"\"), row[1]] # remove bullet points\n uk_codes.append(row[1])\n else:\n manifesto_codes_dict[row[1]][1] += ' ' + row[0].replace(\"\\u2022\",\"\")\n \n# put together data from 'texts' and 'manifesto_codes_dict'\nfor k, v in manifesto_codes_dict.items():\n sentences.append(v)\n \n# extract and combine strings from lists for testing:\nall_data = []\nclass_labels = []\nfor i in sentences:\n lemmastring = lemmatizer(i[0])\n for j in range(1,2):\n lemmastring += ' ' + lemmatizer(i[j])\n all_data.append(lemmastring)\n class_labels.append(i[-1])\n \n# extract data for testing:\nall_data = []\nclass_labels = []\nfor i in sentences:\n lemmastring = lemmatizer(i[0])\n for j in range(1,2):\n lemmastring += ' ' + lemmatizer(i[j])\n all_data.append(lemmastring)\n class_labels.append(i[-1])\n \n# get motion + manifesto code text and perform tf-idf:\ntfidf_matrix = tfidf_vectorizer.fit_transform(all_data)\n\n# compare matched CMP policy codes with annotated labels\nscore = 0\nscores_dict = OrderedDict()\ncount = 0\nseen_sents = 0\nqs_level_codes = []\nfor k, v in motions.items():\n print('******************************')\n title = v[0][0]\n print(title, agg_anns[count])\n no_sents = len(v) # no. of sentences in the motion\n all_results = []\n for i in range(no_sents):\n sentence_index = seen_sents + i\n #print(sentence_index)\n cos_sim = cosine_similarity(tfidf_matrix[sentence_index], tfidf_matrix)[0]\n # get largest cos sim scores:\n top5 = heapq.nlargest(5, range(no_qs,len(cos_sim)), key=cos_sim.__getitem__)\n results = []\n for result in top5:\n results.append(sentences[result][2])\n all_results.append(results[0])\n qs_level_codes.append(results[0])\n agg_res = Counter(all_results).most_common(1)[0][0]\n print(agg_res)\n if agg_anns[count] == agg_res:\n score += 1\n seen_sents += no_sents\n count += 1\nprint('\\nMOTION LEVEL SCORE:',score)\nqs_score = len([i for i, j in zip(all_annotations, qs_level_codes) if i == j])\nprint('\\nQS LEVEL SCORE', qs_score)\nprint('Raw motion-level agreement:',(score/len(motions))*100,'%')\nprint('Raw qs-level agreement:',(qs_score/no_qs)*100,'%')\n"
] |
[
[
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
graebe/StatePerception
|
[
"bba6743ef95ba5f1d693ba9d409188e37b0d95ec"
] |
[
"StatePerception/KerasLayer.py"
] |
[
"\"\"\"\r\n@author: Torben Gräber\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom keras.engine.topology import Layer\r\nfrom keras.losses import mean_squared_error\r\nfrom keras import backend as K\r\nimport numpy as np\r\n\r\n# =============================================================================\r\n# Custom Loss\r\n# =============================================================================\r\n\r\n\r\nclass loss_with_convergence_time():\r\n\r\n def __init__(self, loss_function=mean_squared_error, convergence_time=100):\r\n self.loss_function = loss_function\r\n self.convergence_time = convergence_time\r\n self.__name__ = loss_function.__name__ + \\\r\n '_convtime' + str(convergence_time)\r\n\r\n def __call__(self, y_true, y_pred):\r\n # Apply Mask\r\n y_true, y_pred = self._mask(y_true, y_pred)\r\n # Calculate Loss\r\n return self.loss_function(y_true, y_pred)\r\n\r\n def _mask(self, y_true, y_pred):\r\n # Apply Mask\r\n y_true = y_true[:, self.convergence_time:, :]\r\n y_pred = y_pred[:, self.convergence_time:, :]\r\n return y_true, y_pred\r\n\r\n\r\nclass weighted_loss():\r\n\r\n def __init__(self, loss_function=mean_squared_error, loss_weights=None):\r\n # Tensor\r\n self.loss_function = loss_function\r\n self.loss_weights = tf.expand_dims(\r\n tf.expand_dims(\r\n tf.constant(\r\n value=loss_weights,\r\n dtype=tf.float32),\r\n axis=0),\r\n axis=1)\r\n self.__name__ = loss_function.__name__ + '_weighted'\r\n\r\n def __call__(self, y_true, y_pred):\r\n # Weight Vectors\r\n y_true, y_pred = self._weight_outputs(y_true, y_pred)\r\n # Calculate Loss\r\n loss = self.loss_function(y_true, y_pred)\r\n # Return\r\n return loss\r\n\r\n def _weight_outputs(self, y_true, y_pred):\r\n # Multiply Weights to y_true and y_pred\r\n y_true_weighted = tf.multiply(y_true, self.loss_weights)\r\n y_pred_weighted = tf.multiply(y_pred, self.loss_weights)\r\n # Return\r\n return y_true_weighted, y_pred_weighted\r\n\r\n\r\n# =============================================================================\r\n# Instantiated Custom Loss\r\n# =============================================================================\r\nmean_squared_error_convtime100 = loss_with_convergence_time(\r\n loss_function=mean_squared_error,\r\n convergence_time=100)\r\nmse_conv100_w2 = weighted_loss(\r\n loss_function=mean_squared_error_convtime100,\r\n loss_weights=np.array([0.5, 2]))\r\n\r\n# =============================================================================\r\n# Custom Layers\r\n# =============================================================================\r\n\r\n\r\nclass PCA_Layer(Layer):\r\n\r\n def __init__(self, components, idx, input_dim, **kwargs):\r\n self._components = components # np array\r\n self._idx = idx\r\n self._input_dim = input_dim\r\n\r\n # build kernel\r\n kernel = np.diag(np.ones(self._input_dim))\r\n for i, idx_curr in enumerate(self._idx):\r\n kernel[idx_curr, self._idx] = components[i, :]\r\n self._kernel = np.transpose(kernel)\r\n\r\n super(PCA_Layer, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n\r\n # Get Initializers\r\n kernel_init = tf.constant_initializer(\r\n value=self._kernel, dtype=tf.float32)\r\n\r\n # Create a trainable weight variable for this layer.\r\n self.kernel = self.add_weight(name='kernel',\r\n shape=self._kernel.shape,\r\n initializer=kernel_init,\r\n trainable=False)\r\n # Be sure to call this somewhere!\r\n super(PCA_Layer, self).build(input_shape)\r\n\r\n def call(self, x):\r\n return K.dot(x, self.kernel)\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\nclass ConstantNormalizationLayer(Layer):\r\n\r\n def __init__(self, scale, mean=None, position='input', **kwargs):\r\n self._mean = mean\r\n self._scale = scale\r\n self._position = position\r\n\r\n super(ConstantNormalizationLayer, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Get Initializers\r\n if self._mean is not(None):\r\n mean_init = tf.constant_initializer(\r\n value=self._mean, dtype=tf.float32)\r\n scale_init = tf.constant_initializer(\r\n value=self._scale, dtype=tf.float32)\r\n\r\n # Create Constants for Normalization\r\n if self._mean is not(None):\r\n self.mean = self.add_weight(\r\n name='mean',\r\n shape=(\r\n self._mean.shape[0],\r\n ),\r\n trainable=False,\r\n initializer=mean_init)\r\n self.scale = self.add_weight(\r\n name='scale',\r\n shape=(\r\n self._scale.shape[0],\r\n ),\r\n trainable=False,\r\n initializer=scale_init)\r\n\r\n super(ConstantNormalizationLayer, self).build(input_shape)\r\n\r\n def call(self, x):\r\n if self._position == 'input':\r\n if self._mean is not(None):\r\n x = x - self.mean\r\n x = tf.multiply(x, tf.divide(1, self.scale))\r\n if self._position == 'output':\r\n x = tf.multiply(x, self.scale)\r\n if self._mean is not(None):\r\n x = x + self.mean\r\n return x\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\nclass DerivativeLayer(Layer):\r\n\r\n def __init__(self, dt, order=1, factor=1, **kwargs):\r\n # Write Arguments\r\n self._dt = dt\r\n self._order = order\r\n self._factor = factor\r\n # Init Super\r\n super(DerivativeLayer, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Get Initializers\r\n dt_init = tf.constant_initializer(value=self._dt, dtype=tf.float32)\r\n factor_init = tf.constant_initializer(\r\n value=self._factor, dtype=tf.float32)\r\n # Create Constants\r\n self.dt = self.add_weight(name='dt', shape=(\r\n 1,), trainable=False, initializer=dt_init)\r\n self.factor = self.add_weight(name='factor', shape=(\r\n 1,), trainable=False, initializer=factor_init)\r\n # Build Super\r\n super(DerivativeLayer, self).build(input_shape)\r\n\r\n def call(self, x):\r\n # Loop over derivatives (order)\r\n for i in range(self._order):\r\n # Differences\r\n der_start = (x[:, 1:, :] - x[:, 0:-1, :]) / self._dt # Right DQ\r\n # der_center = (x[:,2:,:]-x[:,0:-2,:])/self._dt # Central DQ\r\n der_end = (x[:, -1:, :] - x[:, -2:-1, :]) / self._dt # Left DQ\r\n # Concatenate\r\n x = tf.concat([der_start, der_end], axis=1)\r\n # Apply Factor\r\n if self._factor != 1:\r\n x = self.factor * x\r\n return x\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\nclass AddMeanSignalsToSignal(Layer):\r\n\r\n def __init__(\r\n self,\r\n ind_mean_signals=[\r\n 4,\r\n 5,\r\n 6,\r\n 7],\r\n ind_target_signal=0,\r\n **kwargs):\r\n # Write Arguments\r\n self.ind_mean_signals = ind_mean_signals\r\n self.ind_target_signal = ind_target_signal\r\n # Init Super\r\n super(AddMeanSignalsToSignal, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Build Super\r\n super(AddMeanSignalsToSignal, self).build(input_shape)\r\n\r\n def call(self, x):\r\n # Map tensors\r\n x0 = x[0]\r\n x1 = x[1]\r\n # Average of Soruce Signals\r\n av = tf.mean(x0[:, :, ind_mean_signals], axis=2) / \\\r\n len(self.ind_mean_signals)\r\n\r\n return\r\n\r\n def compute_output_shape(self, input_shape):\r\n return (input_shape[0], input_shape[1], 1)\r\n\r\n\r\nclass BetaFromVyVx(Layer):\r\n\r\n def __init__(self, ind_vx=0, ind_vy=1, **kwargs):\r\n # Write Arguments\r\n self.ind_vx = ind_vx\r\n self.ind_vy = ind_vy\r\n # Init Super\r\n super(BetaFromVyVx, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Build Super\r\n super(BetaFromVyVx, self).build(input_shape)\r\n\r\n def call(self, x):\r\n vx = x[:, :, self.ind_vx]\r\n vy = x[:, :, self.ind_vy]\r\n return tf.expand_dims(tf.atan(tf.divide(vy, vx)), axis=2)\r\n\r\n def compute_output_shape(self, input_shape):\r\n return (input_shape[0], input_shape[1], 1)\r\n\r\n\r\n# =============================================================================\r\n# Custom Functions Lib\r\n# =============================================================================\r\nclass CustomFunctionsLib():\r\n\r\n def __init__(self):\r\n self.fundict = {\r\n 'mean_squared_error_convtime100': mean_squared_error_convtime100,\r\n 'PCA_Layer': PCA_Layer,\r\n 'ConstantNormalizationLayer': ConstantNormalizationLayer}\r\n"
] |
[
[
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.constant",
"numpy.ones",
"tensorflow.constant_initializer",
"tensorflow.divide",
"tensorflow.mean",
"numpy.transpose",
"numpy.array"
]
] |
CAMI-DKFZ/simpa_paper_experiments
|
[
"f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc"
] |
[
"experiments/tissue_generation/forearm.py"
] |
[
"# SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ\n# SPDX-FileCopyrightText: 2021 Janek Groehl\n# SPDX-License-Identifier: MIT\n\nfrom simpa import Tags\nimport simpa as sp\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom simpa.utils.libraries.structure_library import define_horizontal_layer_structure_settings, \\\n define_vessel_structure_settings, define_circular_tubular_structure_settings, define_background_structure_settings\nfrom utils.save_directory import get_save_path\n\n\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\nVOLUME_WIDTH_HEIGHT_DIM_IN_MM = 50\nVOLUME_PLANAR_DIM_IN_MM = 50\nSPACING = 0.5\nRANDOM_SEED = 24618925\n\npath_manager = sp.PathManager()\nSAVE_PATH = get_save_path(\"Tissue_Generation\", \"Forearm\")\n\n\ndef create_example_tissue():\n tissue_dict = sp.Settings()\n tissue_dict[Tags.BACKGROUND] = define_background_structure_settings(molecular_composition=sp.TISSUE_LIBRARY.ultrasound_gel())\n tissue_dict[\"epidermis\"] = define_horizontal_layer_structure_settings(z_start_mm=0, thickness_mm=2,\n adhere_to_deformation=True,\n molecular_composition=sp.TISSUE_LIBRARY.epidermis())\n tissue_dict[\"dermis\"] = define_horizontal_layer_structure_settings(z_start_mm=2, thickness_mm=9,\n adhere_to_deformation=True,\n molecular_composition=sp.TISSUE_LIBRARY.dermis())\n tissue_dict[\"fat\"] = define_horizontal_layer_structure_settings(z_start_mm=11, thickness_mm=4,\n adhere_to_deformation=True,\n molecular_composition=sp.TISSUE_LIBRARY.subcutaneous_fat())\n tissue_dict[\"vessel_1\"] = define_vessel_structure_settings(vessel_start_mm=[25, 0, 17],\n vessel_direction_mm=[-0.05, 1, 0],\n radius_mm=2, bifurcation_length_mm=100,\n curvature_factor=0.01,\n molecular_composition=sp.TISSUE_LIBRARY.blood())\n tissue_dict[\"vessel_2\"] = define_vessel_structure_settings(vessel_start_mm=[5, 0, 17],\n vessel_direction_mm=[0, 1, 0],\n radius_mm=1.5, bifurcation_length_mm=100,\n curvature_factor=0.01,\n molecular_composition=sp.TISSUE_LIBRARY.blood())\n tissue_dict[\"vessel_3\"] = define_vessel_structure_settings(vessel_start_mm=[45, 0, 19],\n vessel_direction_mm=[0.05, 1, 0],\n radius_mm=1.5, bifurcation_length_mm=100,\n curvature_factor=0.01,\n molecular_composition=sp.TISSUE_LIBRARY.blood())\n tissue_dict[\"vessel_4\"] = define_vessel_structure_settings(vessel_start_mm=[25, 0, 35],\n vessel_direction_mm=[0.05, 1, 0],\n radius_mm=6, bifurcation_length_mm=15,\n curvature_factor=0.1,\n molecular_composition=sp.TISSUE_LIBRARY.blood())\n tissue_dict[\"bone\"] = define_circular_tubular_structure_settings(tube_start_mm=[5, 0, 45], tube_end_mm=[5, 50, 45],\n radius_mm=15,\n molecular_composition=sp.TISSUE_LIBRARY.bone())\n return tissue_dict\n\n# Seed the numpy random configuration prior to creating the global_settings file in\n# order to ensure that the same volume\n# is generated with the same random seed every time.\n\n\nnp.random.seed(RANDOM_SEED)\nVOLUME_NAME = \"ForearmScan\" + str(RANDOM_SEED)\nfile_path = SAVE_PATH + \"/\" + VOLUME_NAME + \".hdf5\"\n\nsettings = {\n # These parameters set he general propeties of the simulated volume\n Tags.RANDOM_SEED: RANDOM_SEED,\n Tags.VOLUME_NAME: VOLUME_NAME,\n Tags.SIMULATION_PATH: SAVE_PATH,\n Tags.SPACING_MM: SPACING,\n Tags.WAVELENGTHS: [700],\n Tags.DIM_VOLUME_Z_MM: VOLUME_WIDTH_HEIGHT_DIM_IN_MM,\n Tags.DIM_VOLUME_X_MM: VOLUME_WIDTH_HEIGHT_DIM_IN_MM,\n Tags.DIM_VOLUME_Y_MM: VOLUME_PLANAR_DIM_IN_MM,\n Tags.VOLUME_CREATOR: Tags.VOLUME_CREATOR_VERSATILE\n}\n\nsettings = sp.Settings(settings)\n\nsettings.set_volume_creation_settings({\n Tags.STRUCTURES: create_example_tissue(),\n Tags.SIMULATE_DEFORMED_LAYERS: True\n})\n\n\ndevice = sp.RSOMExplorerP50()\nSIMUATION_PIPELINE = [\n sp.ModelBasedVolumeCreationAdapter(settings)\n]\nimport time\nstart_time = time.time()\nsp.simulate(SIMUATION_PIPELINE, settings, device)\nend_time = time.time() - start_time\nwith open(os.path.join(SAVE_PATH, \"run_time.txt\"), \"w+\") as out_file:\n out_file.write(\"{:.2f} s\".format(end_time))\nwavelength = settings[Tags.WAVELENGTHS][0]\n\nsegmentation_mask = sp.load_data_field(file_path=file_path,\n wavelength=wavelength,\n data_field=Tags.DATA_FIELD_SEGMENTATION)\nfontsize = 13\nfig = plt.figure(figsize=(7, 7))\nax = fig.add_subplot(111, projection='3d')\nax.voxels(segmentation_mask==sp.SegmentationClasses.EPIDERMIS, shade=True, facecolors=\"brown\", alpha=0.45)\nax.voxels(segmentation_mask==sp.SegmentationClasses.DERMIS, shade=True, facecolors=\"pink\", alpha=0.45)\nax.voxels(segmentation_mask==sp.SegmentationClasses.FAT, shade=True, facecolors=\"yellow\", alpha=0.45)\nax.voxels(segmentation_mask==sp.SegmentationClasses.BLOOD, shade=True, facecolors=\"red\", alpha=0.55)\nax.voxels(segmentation_mask==sp.SegmentationClasses.BONE, shade=True, facecolors=\"antiquewhite\", alpha=0.55)\nax.set_aspect('auto')\n# ax.set_xticks(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM]/settings[Tags.SPACING_MM], 6))\n# ax.set_yticks(np.linspace(0, settings[Tags.DIM_VOLUME_Y_MM]/settings[Tags.SPACING_MM], 6))\n# ax.set_zticks(np.linspace(0, settings[Tags.DIM_VOLUME_Z_MM]/settings[Tags.SPACING_MM], 6))\n# ax.set_xticklabels(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM], 6, dtype=int), fontsize=fontsize)\n# ax.set_yticklabels(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM], 6, dtype=int), fontsize=fontsize)\n# ax.set_zticklabels(np.linspace(0, settings[Tags.DIM_VOLUME_X_MM], 6, dtype=int), fontsize=fontsize)\nax.set_xticklabels([])\nax.set_yticklabels([])\nax.set_zticklabels([])\nax.set_zlim(int(settings[Tags.DIM_VOLUME_X_MM]/settings[Tags.SPACING_MM]), 0)\n# ax.set_zlabel(\"Depth [mm]\", fontsize=fontsize)\n# ax.set_xlabel(\"x width [mm]\", fontsize=fontsize)\n# ax.set_ylabel(\"y width [mm]\", fontsize=fontsize)\n# plt.axis(\"off\")\nax.view_init(elev=10., azim=-45)\nplt.savefig(os.path.join(SAVE_PATH, \"forearm.svg\"), dpi=300)\nplt.close()"
] |
[
[
"matplotlib.pyplot.close",
"numpy.random.seed",
"matplotlib.pyplot.figure"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.