repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
tsbiosky/Cascade-point-rcnn
[ "ab9f7559e2f91cea00fb98cdc20e33accadc0f14" ]
[ "tools/kitti_object_eval_python/eval.py" ]
[ "import numpy as np\nimport numba\nimport io as sysio\nfrom tools.kitti_object_eval_python.rotate_iou import rotate_iou_gpu_eval\n\n\[email protected]\ndef get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):\n scores.sort()\n scores = scores[::-1]\n current_recall = 0\n thresholds = []\n for i, score in enumerate(scores):\n l_recall = (i + 1) / num_gt\n if i < (len(scores) - 1):\n r_recall = (i + 2) / num_gt\n else:\n r_recall = l_recall\n if (((r_recall - current_recall) < (current_recall - l_recall))\n and (i < (len(scores) - 1))):\n continue\n # recall = l_recall\n thresholds.append(score)\n current_recall += 1 / (num_sample_pts - 1.0)\n return thresholds\n\n\ndef clean_data(gt_anno, dt_anno, current_class, difficulty):\n CLASS_NAMES = ['car', 'pedestrian', 'cyclist']\n MIN_HEIGHT = [40, 25, 25]\n MAX_OCCLUSION = [0, 1, 2]\n MAX_TRUNCATION = [0.15, 0.3, 0.5]\n dc_bboxes, ignored_gt, ignored_dt = [], [], []\n current_cls_name = CLASS_NAMES[current_class].lower()\n num_gt = len(gt_anno[\"name\"])\n num_dt = len(dt_anno[\"name\"])\n num_valid_gt = 0\n for i in range(num_gt):\n bbox = gt_anno[\"bbox\"][i]\n gt_name = gt_anno[\"name\"][i].lower()\n height = bbox[3] - bbox[1]\n valid_class = -1\n if (gt_name == current_cls_name):\n valid_class = 1\n elif (current_cls_name == \"Pedestrian\".lower()\n and \"Person_sitting\".lower() == gt_name):\n valid_class = 0\n elif (current_cls_name == \"Car\".lower() and \"Van\".lower() == gt_name):\n valid_class = 0\n else:\n valid_class = -1\n ignore = False\n if ((gt_anno[\"occluded\"][i] > MAX_OCCLUSION[difficulty])\n or (gt_anno[\"truncated\"][i] > MAX_TRUNCATION[difficulty])\n or (height <= MIN_HEIGHT[difficulty])):\n # if gt_anno[\"difficulty\"][i] > difficulty or gt_anno[\"difficulty\"][i] == -1:\n ignore = True\n if valid_class == 1 and not ignore:\n ignored_gt.append(0)\n num_valid_gt += 1\n elif (valid_class == 0 or (ignore and (valid_class == 1))):\n ignored_gt.append(1)\n else:\n ignored_gt.append(-1)\n # for i in range(num_gt):\n if gt_anno[\"name\"][i] == \"DontCare\":\n dc_bboxes.append(gt_anno[\"bbox\"][i])\n for i in range(num_dt):\n if (dt_anno[\"name\"][i].lower() == current_cls_name):\n valid_class = 1\n else:\n valid_class = -1\n height = abs(dt_anno[\"bbox\"][i, 3] - dt_anno[\"bbox\"][i, 1])\n if height < MIN_HEIGHT[difficulty]:\n ignored_dt.append(1)\n elif valid_class == 1:\n ignored_dt.append(0)\n else:\n ignored_dt.append(-1)\n\n return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes\n\n\[email protected](nopython=True)\ndef image_box_overlap(boxes, query_boxes, criterion=-1):\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\n for k in range(K):\n qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *\n (query_boxes[k, 3] - query_boxes[k, 1]))\n for n in range(N):\n iw = (min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]))\n if iw > 0:\n ih = (min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]))\n if ih > 0:\n if criterion == -1:\n ua = (\n (boxes[n, 2] - boxes[n, 0]) *\n (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)\n elif criterion == 0:\n ua = ((boxes[n, 2] - boxes[n, 0]) *\n (boxes[n, 3] - boxes[n, 1]))\n elif criterion == 1:\n ua = qbox_area\n else:\n ua = 1.0\n overlaps[n, k] = iw * ih / ua\n return overlaps\n\n\ndef bev_box_overlap(boxes, qboxes, criterion=-1):\n riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)\n return riou\n\n\[email protected](nopython=True, parallel=True)\ndef d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):\n # ONLY support overlap in CAMERA, not lider.\n N, K = boxes.shape[0], qboxes.shape[0]\n for i in range(N):\n for j in range(K):\n if rinc[i, j] > 0:\n # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +\n # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))\n iw = (min(boxes[i, 1], qboxes[j, 1]) - max(\n boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))\n\n if iw > 0:\n area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]\n area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]\n inc = iw * rinc[i, j]\n if criterion == -1:\n ua = (area1 + area2 - inc)\n elif criterion == 0:\n ua = area1\n elif criterion == 1:\n ua = area2\n else:\n ua = inc\n rinc[i, j] = inc / ua\n else:\n rinc[i, j] = 0.0\n\n\ndef d3_box_overlap(boxes, qboxes, criterion=-1):\n rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],\n qboxes[:, [0, 2, 3, 5, 6]], 2)\n d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)\n return rinc\n\n\[email protected](nopython=True)\ndef compute_statistics_jit(overlaps,\n gt_datas,\n dt_datas,\n ignored_gt,\n ignored_det,\n dc_bboxes,\n metric,\n min_overlap,\n thresh=0,\n compute_fp=False,\n compute_aos=False):\n\n det_size = dt_datas.shape[0]\n gt_size = gt_datas.shape[0]\n dt_scores = dt_datas[:, -1]\n dt_alphas = dt_datas[:, 4]\n gt_alphas = gt_datas[:, 4]\n dt_bboxes = dt_datas[:, :4]\n gt_bboxes = gt_datas[:, :4]\n\n assigned_detection = [False] * det_size\n ignored_threshold = [False] * det_size\n if compute_fp:\n for i in range(det_size):\n if (dt_scores[i] < thresh):\n ignored_threshold[i] = True\n NO_DETECTION = -10000000\n tp, fp, fn, similarity = 0, 0, 0, 0\n # thresholds = [0.0]\n # delta = [0.0]\n thresholds = np.zeros((gt_size, ))\n thresh_idx = 0\n delta = np.zeros((gt_size, ))\n delta_idx = 0\n for i in range(gt_size):\n if ignored_gt[i] == -1:\n continue\n det_idx = -1\n valid_detection = NO_DETECTION\n max_overlap = 0\n assigned_ignored_det = False\n\n for j in range(det_size):\n if (ignored_det[j] == -1):\n continue\n if (assigned_detection[j]):\n continue\n if (ignored_threshold[j]):\n continue\n overlap = overlaps[j, i]\n dt_score = dt_scores[j]\n if (not compute_fp and (overlap > min_overlap)\n and dt_score > valid_detection):\n det_idx = j\n valid_detection = dt_score\n elif (compute_fp and (overlap > min_overlap)\n and (overlap > max_overlap or assigned_ignored_det)\n and ignored_det[j] == 0):\n max_overlap = overlap\n det_idx = j\n valid_detection = 1\n assigned_ignored_det = False\n elif (compute_fp and (overlap > min_overlap)\n and (valid_detection == NO_DETECTION)\n and ignored_det[j] == 1):\n det_idx = j\n valid_detection = 1\n assigned_ignored_det = True\n\n if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:\n fn += 1\n elif ((valid_detection != NO_DETECTION)\n and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):\n assigned_detection[det_idx] = True\n elif valid_detection != NO_DETECTION:\n tp += 1\n # thresholds.append(dt_scores[det_idx])\n thresholds[thresh_idx] = dt_scores[det_idx]\n thresh_idx += 1\n if compute_aos:\n # delta.append(gt_alphas[i] - dt_alphas[det_idx])\n delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]\n delta_idx += 1\n\n assigned_detection[det_idx] = True\n if compute_fp:\n for i in range(det_size):\n if (not (assigned_detection[i] or ignored_det[i] == -1\n or ignored_det[i] == 1 or ignored_threshold[i])):\n fp += 1\n nstuff = 0\n if metric == 0:\n overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)\n for i in range(dc_bboxes.shape[0]):\n for j in range(det_size):\n if (assigned_detection[j]):\n continue\n if (ignored_det[j] == -1 or ignored_det[j] == 1):\n continue\n if (ignored_threshold[j]):\n continue\n if overlaps_dt_dc[j, i] > min_overlap:\n assigned_detection[j] = True\n nstuff += 1\n fp -= nstuff\n if compute_aos:\n tmp = np.zeros((fp + delta_idx, ))\n # tmp = [0] * fp\n for i in range(delta_idx):\n tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0\n # tmp.append((1.0 + np.cos(delta[i])) / 2.0)\n # assert len(tmp) == fp + tp\n # assert len(delta) == tp\n if tp > 0 or fp > 0:\n similarity = np.sum(tmp)\n else:\n similarity = -1\n return tp, fp, fn, similarity, thresholds[:thresh_idx]\n\n\ndef get_split_parts(num, num_part):\n same_part = num // num_part\n remain_num = num % num_part\n if remain_num == 0:\n return [same_part] * num_part\n else:\n return [same_part] * num_part + [remain_num]\n\n\[email protected](nopython=True)\ndef fused_compute_statistics(overlaps,\n pr,\n gt_nums,\n dt_nums,\n dc_nums,\n gt_datas,\n dt_datas,\n dontcares,\n ignored_gts,\n ignored_dets,\n metric,\n min_overlap,\n thresholds,\n compute_aos=False):\n gt_num = 0\n dt_num = 0\n dc_num = 0\n for i in range(gt_nums.shape[0]):\n for t, thresh in enumerate(thresholds):\n overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:\n gt_num + gt_nums[i]]\n\n gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]\n dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]\n ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]\n ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]\n dontcare = dontcares[dc_num:dc_num + dc_nums[i]]\n tp, fp, fn, similarity, _ = compute_statistics_jit(\n overlap,\n gt_data,\n dt_data,\n ignored_gt,\n ignored_det,\n dontcare,\n metric,\n min_overlap=min_overlap,\n thresh=thresh,\n compute_fp=True,\n compute_aos=compute_aos)\n pr[t, 0] += tp\n pr[t, 1] += fp\n pr[t, 2] += fn\n if similarity != -1:\n pr[t, 3] += similarity\n gt_num += gt_nums[i]\n dt_num += dt_nums[i]\n dc_num += dc_nums[i]\n\n\ndef calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):\n \"\"\"fast iou algorithm. this function can be used independently to\n do result analysis. Must be used in CAMERA coordinate system.\n Args:\n gt_annos: dict, must from get_label_annos() in kitti_common.py\n dt_annos: dict, must from get_label_annos() in kitti_common.py\n metric: eval type. 0: bbox, 1: bev, 2: 3d\n num_parts: int. a parameter for fast calculate algorithm\n \"\"\"\n #print(len(gt_annos),len(dt_annos))\n assert len(gt_annos) == len(dt_annos)\n total_dt_num = np.stack([len(a[\"name\"]) for a in dt_annos], 0)\n total_gt_num = np.stack([len(a[\"name\"]) for a in gt_annos], 0)\n num_examples = len(gt_annos)\n split_parts = get_split_parts(num_examples, num_parts)\n parted_overlaps = []\n example_idx = 0\n\n for num_part in split_parts:\n gt_annos_part = gt_annos[example_idx:example_idx + num_part]\n dt_annos_part = dt_annos[example_idx:example_idx + num_part]\n if metric == 0:\n gt_boxes = np.concatenate([a[\"bbox\"] for a in gt_annos_part], 0)\n dt_boxes = np.concatenate([a[\"bbox\"] for a in dt_annos_part], 0)\n overlap_part = image_box_overlap(gt_boxes, dt_boxes)\n elif metric == 1:\n loc = np.concatenate(\n [a[\"location\"][:, [0, 2]] for a in gt_annos_part], 0)\n dims = np.concatenate(\n [a[\"dimensions\"][:, [0, 2]] for a in gt_annos_part], 0)\n rots = np.concatenate([a[\"rotation_y\"] for a in gt_annos_part], 0)\n gt_boxes = np.concatenate(\n [loc, dims, rots[..., np.newaxis]], axis=1)\n loc = np.concatenate(\n [a[\"location\"][:, [0, 2]] for a in dt_annos_part], 0)\n dims = np.concatenate(\n [a[\"dimensions\"][:, [0, 2]] for a in dt_annos_part], 0)\n rots = np.concatenate([a[\"rotation_y\"] for a in dt_annos_part], 0)\n dt_boxes = np.concatenate(\n [loc, dims, rots[..., np.newaxis]], axis=1)\n overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(\n np.float64)\n elif metric == 2:\n loc = np.concatenate([a[\"location\"] for a in gt_annos_part], 0)\n dims = np.concatenate([a[\"dimensions\"] for a in gt_annos_part], 0)\n rots = np.concatenate([a[\"rotation_y\"] for a in gt_annos_part], 0)\n gt_boxes = np.concatenate(\n [loc, dims, rots[..., np.newaxis]], axis=1)\n loc = np.concatenate([a[\"location\"] for a in dt_annos_part], 0)\n dims = np.concatenate([a[\"dimensions\"] for a in dt_annos_part], 0)\n rots = np.concatenate([a[\"rotation_y\"] for a in dt_annos_part], 0)\n dt_boxes = np.concatenate(\n [loc, dims, rots[..., np.newaxis]], axis=1)\n overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(\n np.float64)\n else:\n raise ValueError(\"unknown metric\")\n parted_overlaps.append(overlap_part)\n example_idx += num_part\n overlaps = []\n example_idx = 0\n for j, num_part in enumerate(split_parts):\n gt_annos_part = gt_annos[example_idx:example_idx + num_part]\n dt_annos_part = dt_annos[example_idx:example_idx + num_part]\n gt_num_idx, dt_num_idx = 0, 0\n for i in range(num_part):\n gt_box_num = total_gt_num[example_idx + i]\n dt_box_num = total_dt_num[example_idx + i]\n overlaps.append(\n parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,\n dt_num_idx:dt_num_idx + dt_box_num])\n gt_num_idx += gt_box_num\n dt_num_idx += dt_box_num\n example_idx += num_part\n\n return overlaps, parted_overlaps, total_gt_num, total_dt_num\n\n\ndef _prepare_data(gt_annos, dt_annos, current_class, difficulty):\n gt_datas_list = []\n dt_datas_list = []\n total_dc_num = []\n ignored_gts, ignored_dets, dontcares = [], [], []\n total_num_valid_gt = 0\n for i in range(len(gt_annos)):\n rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)\n num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets\n ignored_gts.append(np.array(ignored_gt, dtype=np.int64))\n ignored_dets.append(np.array(ignored_det, dtype=np.int64))\n if len(dc_bboxes) == 0:\n dc_bboxes = np.zeros((0, 4)).astype(np.float64)\n else:\n dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)\n total_dc_num.append(dc_bboxes.shape[0])\n dontcares.append(dc_bboxes)\n total_num_valid_gt += num_valid_gt\n gt_datas = np.concatenate(\n [gt_annos[i][\"bbox\"], gt_annos[i][\"alpha\"][..., np.newaxis]], 1)\n dt_datas = np.concatenate([\n dt_annos[i][\"bbox\"], dt_annos[i][\"alpha\"][..., np.newaxis],\n dt_annos[i][\"score\"][..., np.newaxis]\n ], 1)\n gt_datas_list.append(gt_datas)\n dt_datas_list.append(dt_datas)\n total_dc_num = np.stack(total_dc_num, axis=0)\n return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,\n total_dc_num, total_num_valid_gt)\n\n\ndef eval_class(gt_annos,\n dt_annos,\n current_classes,\n difficultys,\n metric,\n min_overlaps,\n compute_aos=False,\n num_parts=50):\n \"\"\"Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.\n Args:\n gt_annos: dict, must from get_label_annos() in kitti_common.py\n dt_annos: dict, must from get_label_annos() in kitti_common.py\n current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist\n difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard\n metric: eval type. 0: bbox, 1: bev, 2: 3d\n min_overlaps: float, min overlap. format: [num_overlap, metric, class].\n num_parts: int. a parameter for fast calculate algorithm\n\n Returns:\n dict of recall, precision and aos\n \"\"\"\n #print(len(gt_annos), len(dt_annos))\n assert len(gt_annos) == len(dt_annos)\n num_examples = len(gt_annos)\n split_parts = get_split_parts(num_examples, num_parts)\n\n rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)\n overlaps, parted_overlaps, total_dt_num, total_gt_num = rets\n N_SAMPLE_PTS = 41\n num_minoverlap = len(min_overlaps)\n num_class = len(current_classes)\n num_difficulty = len(difficultys)\n precision = np.zeros(\n [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\n recall = np.zeros(\n [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\n aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\n for m, current_class in enumerate(current_classes):\n for l, difficulty in enumerate(difficultys):\n rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)\n (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,\n dontcares, total_dc_num, total_num_valid_gt) = rets\n for k, min_overlap in enumerate(min_overlaps[:, metric, m]):\n thresholdss = []\n for i in range(len(gt_annos)):\n rets = compute_statistics_jit(\n overlaps[i],\n gt_datas_list[i],\n dt_datas_list[i],\n ignored_gts[i],\n ignored_dets[i],\n dontcares[i],\n metric,\n min_overlap=min_overlap,\n thresh=0.0,\n compute_fp=False)\n tp, fp, fn, similarity, thresholds = rets\n thresholdss += thresholds.tolist()\n thresholdss = np.array(thresholdss)\n thresholds = get_thresholds(thresholdss, total_num_valid_gt)\n thresholds = np.array(thresholds)\n pr = np.zeros([len(thresholds), 4])\n idx = 0\n for j, num_part in enumerate(split_parts):\n gt_datas_part = np.concatenate(\n gt_datas_list[idx:idx + num_part], 0)\n dt_datas_part = np.concatenate(\n dt_datas_list[idx:idx + num_part], 0)\n dc_datas_part = np.concatenate(\n dontcares[idx:idx + num_part], 0)\n ignored_dets_part = np.concatenate(\n ignored_dets[idx:idx + num_part], 0)\n ignored_gts_part = np.concatenate(\n ignored_gts[idx:idx + num_part], 0)\n fused_compute_statistics(\n parted_overlaps[j],\n pr,\n total_gt_num[idx:idx + num_part],\n total_dt_num[idx:idx + num_part],\n total_dc_num[idx:idx + num_part],\n gt_datas_part,\n dt_datas_part,\n dc_datas_part,\n ignored_gts_part,\n ignored_dets_part,\n metric,\n min_overlap=min_overlap,\n thresholds=thresholds,\n compute_aos=compute_aos)\n idx += num_part\n for i in range(len(thresholds)):\n recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])\n precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])\n if compute_aos:\n aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])\n for i in range(len(thresholds)):\n precision[m, l, k, i] = np.max(\n precision[m, l, k, i:], axis=-1)\n recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)\n if compute_aos:\n aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)\n ret_dict = {\n \"recall\": recall,\n \"precision\": precision,\n \"orientation\": aos,\n }\n return ret_dict\n\n\ndef get_mAP(prec):\n sums = 0\n for i in range(0, prec.shape[-1], 4):\n sums = sums + prec[..., i]\n return sums / 11 * 100\n\n\ndef print_str(value, *arg, sstream=None):\n if sstream is None:\n sstream = sysio.StringIO()\n sstream.truncate(0)\n sstream.seek(0)\n print(value, *arg, file=sstream)\n return sstream.getvalue()\n\n\ndef do_eval(gt_annos,\n dt_annos,\n current_classes,\n min_overlaps,\n compute_aos=False):\n # min_overlaps: [num_minoverlap, metric, num_class]\n difficultys = [0, 1, 2]\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,\n min_overlaps, compute_aos)\n # ret: [num_class, num_diff, num_minoverlap, num_sample_points]\n mAP_bbox = get_mAP(ret[\"precision\"])\n mAP_aos = None\n if compute_aos:\n mAP_aos = get_mAP(ret[\"orientation\"])\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,\n min_overlaps)\n mAP_bev = get_mAP(ret[\"precision\"])\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,\n min_overlaps)\n mAP_3d = get_mAP(ret[\"precision\"])\n return mAP_bbox, mAP_bev, mAP_3d, mAP_aos\n\n\ndef do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,\n compute_aos):\n # overlap_ranges: [range, metric, num_class]\n min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])\n for i in range(overlap_ranges.shape[1]):\n for j in range(overlap_ranges.shape[2]):\n min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])\n mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(\n gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)\n # ret: [num_class, num_diff, num_minoverlap]\n mAP_bbox = mAP_bbox.mean(-1)\n mAP_bev = mAP_bev.mean(-1)\n mAP_3d = mAP_3d.mean(-1)\n if mAP_aos is not None:\n mAP_aos = mAP_aos.mean(-1)\n return mAP_bbox, mAP_bev, mAP_3d, mAP_aos\n\n\ndef get_official_eval_result(gt_annos, dt_annos, current_classes):\n overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,\n 0.5], [0.7, 0.5, 0.5, 0.7, 0.5],\n [0.7, 0.5, 0.5, 0.7, 0.5]])\n overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,\n 0.5], [0.5, 0.25, 0.25, 0.5, 0.25],\n [0.5, 0.25, 0.25, 0.5, 0.25]])\n min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]\n class_to_name = {\n 0: 'Car',\n 1: 'Pedestrian',\n 2: 'Cyclist',\n 3: 'Van',\n 4: 'Person_sitting',\n }\n name_to_class = {v: n for n, v in class_to_name.items()}\n if not isinstance(current_classes, (list, tuple)):\n current_classes = [current_classes]\n current_classes_int = []\n for curcls in current_classes:\n if isinstance(curcls, str):\n current_classes_int.append(name_to_class[curcls])\n else:\n current_classes_int.append(curcls)\n current_classes = current_classes_int\n min_overlaps = min_overlaps[:, :, current_classes]\n result = ''\n # check whether alpha is valid\n compute_aos = False\n for anno in dt_annos:\n if anno['alpha'].shape[0] != 0:\n if anno['alpha'][0] != -10:\n compute_aos = True\n break\n mAPbbox, mAPbev, mAP3d, mAPaos = do_eval(\n gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)\n\n ret_dict = {}\n for j, curcls in enumerate(current_classes):\n # mAP threshold array: [num_minoverlap, metric, class]\n # mAP result: [num_class, num_diff, num_minoverlap]\n for i in range(min_overlaps.shape[0]):\n result += print_str(\n (f\"{class_to_name[curcls]} \"\n \"AP@{:.2f}, {:.2f}, {:.2f}:\".format(*min_overlaps[i, :, j])))\n result += print_str((f\"bbox AP:{mAPbbox[j, 0, i]:.4f}, \"\n f\"{mAPbbox[j, 1, i]:.4f}, \"\n f\"{mAPbbox[j, 2, i]:.4f}\"))\n result += print_str((f\"bev AP:{mAPbev[j, 0, i]:.4f}, \"\n f\"{mAPbev[j, 1, i]:.4f}, \"\n f\"{mAPbev[j, 2, i]:.4f}\"))\n result += print_str((f\"3d AP:{mAP3d[j, 0, i]:.4f}, \"\n f\"{mAP3d[j, 1, i]:.4f}, \"\n f\"{mAP3d[j, 2, i]:.4f}\"))\n\n\n if compute_aos:\n result += print_str((f\"aos AP:{mAPaos[j, 0, i]:.2f}, \"\n f\"{mAPaos[j, 1, i]:.2f}, \"\n f\"{mAPaos[j, 2, i]:.2f}\"))\n ret_dict['Car_3d_easy'] = mAP3d[0, 0, 0]\n ret_dict['Car_3d_moderate'] = mAP3d[0, 1, 0]\n ret_dict['Car_3d_hard'] = mAP3d[0, 2, 0]\n ret_dict['Car_bev_easy'] = mAPbev[0, 0, 0]\n ret_dict['Car_bev_moderate'] = mAPbev[0, 1, 0]\n ret_dict['Car_bev_hard'] = mAPbev[0, 2, 0]\n ret_dict['Car_image_easy'] = mAPbbox[0, 0, 0]\n ret_dict['Car_image_moderate'] = mAPbbox[0, 1, 0]\n ret_dict['Car_image_hard'] = mAPbbox[0, 2, 0]\n\n return result, ret_dict\n\n\ndef get_coco_eval_result(gt_annos, dt_annos, current_classes):\n class_to_name = {\n 0: 'Car',\n 1: 'Pedestrian',\n 2: 'Cyclist',\n 3: 'Van',\n 4: 'Person_sitting',\n }\n class_to_range = {\n 0: [0.5, 0.95, 10],\n 1: [0.25, 0.7, 10],\n 2: [0.25, 0.7, 10],\n 3: [0.5, 0.95, 10],\n 4: [0.25, 0.7, 10],\n }\n name_to_class = {v: n for n, v in class_to_name.items()}\n if not isinstance(current_classes, (list, tuple)):\n current_classes = [current_classes]\n current_classes_int = []\n for curcls in current_classes:\n if isinstance(curcls, str):\n current_classes_int.append(name_to_class[curcls])\n else:\n current_classes_int.append(curcls)\n current_classes = current_classes_int\n overlap_ranges = np.zeros([3, 3, len(current_classes)])\n for i, curcls in enumerate(current_classes):\n overlap_ranges[:, :, i] = np.array(\n class_to_range[curcls])[:, np.newaxis]\n result = ''\n # check whether alpha is valid\n compute_aos = False\n for anno in dt_annos:\n if anno['alpha'].shape[0] != 0:\n if anno['alpha'][0] != -10:\n compute_aos = True\n break\n mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(\n gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)\n for j, curcls in enumerate(current_classes):\n # mAP threshold array: [num_minoverlap, metric, class]\n # mAP result: [num_class, num_diff, num_minoverlap]\n o_range = np.array(class_to_range[curcls])[[0, 2, 1]]\n o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)\n result += print_str((f\"{class_to_name[curcls]} \"\n \"coco AP@{:.2f}:{:.2f}:{:.2f}:\".format(*o_range)))\n result += print_str((f\"bbox AP:{mAPbbox[j, 0]:.2f}, \"\n f\"{mAPbbox[j, 1]:.2f}, \"\n f\"{mAPbbox[j, 2]:.2f}\"))\n result += print_str((f\"bev AP:{mAPbev[j, 0]:.2f}, \"\n f\"{mAPbev[j, 1]:.2f}, \"\n f\"{mAPbev[j, 2]:.2f}\"))\n result += print_str((f\"3d AP:{mAP3d[j, 0]:.2f}, \"\n f\"{mAP3d[j, 1]:.2f}, \"\n f\"{mAP3d[j, 2]:.2f}\"))\n if compute_aos:\n result += print_str((f\"aos AP:{mAPaos[j, 0]:.2f}, \"\n f\"{mAPaos[j, 1]:.2f}, \"\n f\"{mAPaos[j, 2]:.2f}\"))\n return result\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.stack", "numpy.cos", "numpy.linspace" ] ]
nicoladimauro/GPU
[ "43a2743d1cca4a9517b1f8b53018fca9d62816f6" ]
[ "gpu.py" ]
[ "import os\nimport csv \nimport numpy as np\n\nimport arff\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import precision_score, recall_score, f1_score, make_scorer\nfrom sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\n\nimport sys\n\n# Load a CSV file\ndef load_csv(filename):\n file = open(filename, \"r\")\n lines = csv.reader(file)\n dataset = list(lines)\n return np.array(dataset)\n\ndef write_csv(data, path):\n with open(path, \"w\") as csv_file:\n writer_ = csv.writer(csv_file, quoting=csv.QUOTE_ALL)\n for line in data:\n writer_.writerow(line)\n\ndef is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False\n\ndatasets = ['audiology','breast-cancer', 'chess', 'dermatology',\n 'hepatitis', 'lymph', 'nursery', 'pima', 'soybean', 'vote']\nclass_labels = ['cochlear_age','no-recurrence-events', 'won', \n '<50', 'DIE', 'malign_lymph', 'not_recom', '0', 'brown-spot', 'democrat']\n\nneg_class_labels = ['cochlear_unknown', 'recurrence-events', 'nowin', \n '>50_1', 'LIVE', 'metastases', 'priority', '1', 'alternarialeaf-spot', 'republican']\n\nC_values = [10**v for v in range(-8, 4, 1)]\ngamma_values = [10**v for v in range(-6, 6, 1)]\n\n\nwith open('results', 'w') as output:\n output.write('Dataset,perc,pos,ones,precision,recall,f1-score\\n')\n\n for dataset, class_label, neg_class_label in zip(datasets,class_labels, neg_class_labels):\n #creating feature filename for bnlearn\n data_filename = 'data/' + dataset + '_train_pos_50_1.arff'\n data = arff.load(open(data_filename, 'r'))\n features_name = 'data/' + dataset + '.features'\n log_file = 'data/' + dataset + '.log'\n\n out_log_file = open(log_file,\"w\")\n out_log_file.write('perc,fold,ones,gamma,c,precision,recall,f1-score\\n')\n out_log_file.flush()\n\n with open(features_name, 'w') as features_file:\n for attr in data['attributes'][:-1]:\n features_file.write('\"' + attr[0] + '\":categorical:')\n for val in attr[1][:-1]:\n features_file.write('\"' + val + '\",')\n if attr[1][-1] != '':\n features_file.write('\"' + attr[1][-1] + '\".\\n')\n else:\n features_file.write('\".\\n')\n\n\n for perc in ['30', '40', '50']:\n\n precision_f = []\n recall_f = []\n f1_score_f = []\n ones_f = []\n\n for fold in range(1,11):\n print('Fold:', fold)\n\n pos_name = 'data/' + dataset + '_train_pos_' + perc + '_' + str(fold) + '.arff'\n unl_name = 'data/' + dataset + '_train_unl_' + perc + '_' + str(fold) + '.arff'\n test_name = 'data/' + dataset + '_test_' + perc + '_' + str(fold) + '.arff'\n\n\n train_pos = arff.load(open(pos_name, 'r'))\n train_unl = arff.load(open(unl_name, 'r'))\n test = arff.load(open(test_name, 'r'))\n\n train_pos_data = np.array(train_pos['data'])\n train_unl_data = np.array(train_unl['data'])\n\n test_data = np.array(test['data'])\n\n write_csv(train_pos_data[:,:-1],'./data/pos.data')\n write_csv(train_unl_data[:,:-1],'./data/unl.data')\n\n command = 'R --no-save --args ./data/' + dataset + '.features ./data/pos ./data/unl outfile < bn_k2.R > /dev/null'\n\n os.system(command)\n\n lls = np.loadtxt('outfile')\n\n argsort = np.argsort(lls)\n\n ones = 0\n for index in argsort[:train_pos_data.shape[0]]:\n if train_unl_data[index,-1]==class_label:\n ones = ones + 1\n\n\n X_train_pos_neg = np.concatenate((train_pos_data[:,:-1], train_unl_data[argsort[:train_pos_data.shape[0]],:-1]), axis=0)\n y_train_pos_neg = np.array([class_label]*train_pos_data.shape[0] + [neg_class_label]*train_pos_data.shape[0])\n\n X_train_pos_neg_int = np.zeros((X_train_pos_neg.shape[0],X_train_pos_neg.shape[1]))\n attributes = train_pos['attributes']\n for i in range(X_train_pos_neg.shape[1]):\n values = attributes[i][1]\n for j in range(X_train_pos_neg.shape[0]):\n X_train_pos_neg_int[j,i] = values.index(X_train_pos_neg[j,i])\n\n X_test_int = np.zeros((test_data.shape[0],test_data.shape[1]-1))\n attributes = train_pos['attributes']\n for i in range(test_data.shape[1]-1):\n values = attributes[i][1]\n for j in range(test_data.shape[0]):\n X_test_int[j,i] = values.index(test_data[j,i])\n\n X_all_int = np.concatenate((X_train_pos_neg_int, X_test_int), axis=0)\n\n encoder = OneHotEncoder()\n encoder.fit(X_all_int)\n A = encoder.transform(X_train_pos_neg_int).toarray()\n B = encoder.transform(X_test_int).toarray()\n\n param_grid = dict(gamma=gamma_values, C=C_values)\n #cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=177)\n cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=177)\n f1_scorer = make_scorer(f1_score, pos_label=neg_class_label)\n grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv, scoring=f1_scorer)\n grid.fit(A, y_train_pos_neg)\n\n print(\"The best parameters are %s with a score of %0.2f\"\n % (grid.best_params_, grid.best_score_))\n\n gamma = grid.best_params_['gamma']\n C = grid.best_params_['C']\n\n\n clf = SVC(kernel='rbf', gamma=gamma, C=C)\n clf.fit(A, y_train_pos_neg) \n\n y_train_pred = clf.predict(A)\n\n print('Train stats')\n pr = precision_score(y_train_pos_neg,y_train_pred, pos_label=neg_class_label, average='binary')\n re = recall_score(y_train_pos_neg,y_train_pred, pos_label=neg_class_label, average='binary')\n f1 = f1_score(y_train_pos_neg,y_train_pred, pos_label=neg_class_label, average='binary')\n print('Precision:', pr)\n print('Recall:', re)\n print('F1-score:', f1)\n\n y_test_pred = clf.predict(B)\n\n print('Test stats')\n pr = precision_score(test_data[:,-1],y_test_pred, pos_label=neg_class_label, average='binary')\n re = recall_score(test_data[:,-1],y_test_pred, pos_label=neg_class_label, average='binary')\n f1 = f1_score(test_data[:,-1],y_test_pred, pos_label=neg_class_label, average='binary')\n print('Precision:', pr)\n print('Recall:', re)\n print('F1-score:', f1)\n\n precision_f.append(pr)\n recall_f.append(re)\n f1_score_f.append(f1)\n ones_f.append(ones)\n\n out_log_file.write(str(perc) + ',' + str(fold) + ',' +str(ones) + ',' +str(gamma) + ',' +\n str(C) + ',' + str(pr) + ',' + str(re) + ',' + str(f1) +'\\n')\n out_log_file.flush()\n\n output.write(dataset + ',' + perc + ',')\n output.write(str(train_pos_data.shape[0]) + ',' +\n str(np.mean(ones_f)) + ',' +\n str(np.mean(precision_f)) + ',' +\n str(np.mean(recall_f)) + ',' +\n str(np.mean(f1_score_f)) + '\\n')\n\n output.flush()\n out_log_file.close()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.mean", "sklearn.svm.SVC", "numpy.loadtxt", "numpy.argsort", "sklearn.metrics.make_scorer", "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.metrics.precision_score", "sklearn.metrics.f1_score", "sklearn.preprocessing.OneHotEncoder", "sklearn.metrics.recall_score" ] ]
ian-r-rose/visualization
[ "ed6d9fab95eb125e7340ab3fad3ed114ed3214af" ]
[ "docker/src/clawpack-5.3.1/pyclaw/src/pyclaw/classic/setup.py" ]
[ "#!/usr/bin/env python\n\nimport os\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('classic', parent_package, top_path)\n\n config.add_extension('classic1',\n ['limiter.f90','philim.f90','step1.f90'],f2py_options=['--quiet'])\n\n config.add_extension('classic2',\n ['limiter.f90','philim.f90','flux2.f90','step2ds.f90','step2.f90'],f2py_options=['--quiet'])\n\n config.add_extension('classic3',\n ['limiter.f90','philim.f90','flux3.f90','step3ds.f90','step3.f90'],f2py_options=['--quiet'])\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n" ]
[ [ "numpy.distutils.misc_util.Configuration" ] ]
gmum/uncertainty-baselines
[ "63df2e5f29cdaefe49626439bbe13289f37eed36" ]
[ "uncertainty_baselines/models/models_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for ub.models.get().\"\"\"\n\nimport tensorflow as tf\nimport uncertainty_baselines as ub\n\n\nclass ModelsTest(tf.test.TestCase):\n\n def testGetModel(self):\n model = ub.models.get('resnet50', batch_size=13, batch_norm_epsilon=1e-2)\n self.assertEqual(176, len(model.layers))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
herjy/BlendingToolKit
[ "f36dfa45cf96b182a9e0c87d163edfc10e8237d8" ]
[ "btk/utils.py" ]
[ "\"\"\"Contains functions to perform detection, deblending and measurement\n on images.\n\"\"\"\nfrom btk import measure\nimport btk.create_blend_generator\nimport numpy as np\nimport astropy.table\nimport skimage.feature\n\n\nclass SEP_params(measure.Measurement_params):\n \"\"\"Class to perform detection and deblending with SEP\"\"\"\n\n def get_centers(self, image):\n \"\"\"Return centers detected when object detection and photometry\n is done on input image with SEP.\n Args:\n image: Image (single band) of galaxy to perform measurement on.\n Returns:\n centers: x and y coordinates of detected centroids\n\n \"\"\"\n sep = __import__('sep')\n bkg = sep.Background(image)\n self.catalog, self.segmentation = sep.extract(\n image, 1.5, err=bkg.globalrms, segmentation_map=True)\n centers = np.stack((self.catalog['x'], self.catalog['y']), axis=1)\n return centers\n\n def get_deblended_images(self, data, index):\n \"\"\"Returns scarlet modeled blend and centers for the given blend\"\"\"\n image = np.mean(data['blend_images'][index], axis=2)\n peaks = self.get_centers(image)\n return {'deblend_image': None, 'peaks': peaks}\n\n\nclass Stack_params(measure.Measurement_params):\n \"\"\"Class with functions that describe how LSST science pipeline can\n perform measurements on the input data.\"\"\"\n min_pix = 1 # Minimum size in pixels to be considered a source\n bkg_bin_size = 32 # Binning size of the local background\n thr_value = 5 # SNR threshold for the detection\n psf_stamp_size = 41 # size of pstamp to draw PSF on\n\n def get_psf_sky(self, obs_cond):\n \"\"\"Returns postage stamp image of the PSF and mean background sky\n level value saved in the input obs_cond class\n Args:\n obs_cond:`descwl.survey.Survey` class describing observing\n conditions.\n\n \"\"\"\n mean_sky_level = obs_cond.mean_sky_level\n psf = obs_cond.psf_model\n psf_image = psf.drawImage(\n scale=obs_cond.pixel_scale,\n nx=self.psf_stamp_size,\n ny=self.psf_stamp_size).array\n return psf_image, mean_sky_level\n\n def make_measurement(self, data, index):\n \"\"\"Perform detection, deblending and measurement on the i band image of\n the blend for input index entry in the batch.\n\n Args:\n data: Dictionary with blend images, isolated object images, blend\n catalog, and observing conditions.\n index: Position of the blend to measure in the batch.\n\n Returns:\n astropy.Table of the measurement results.\n \"\"\"\n image_array = data['blend_images'][index, :, :, 3].astype(np.float32)\n psf_image, mean_sky_level = self.get_psf_sky(\n data['obs_condition'][index][3])\n variance_array = image_array + mean_sky_level\n psf_array = psf_image.astype(np.float64)\n cat = run_stack(image_array, variance_array, psf_array,\n min_pix=self.min_pix, bkg_bin_size=self.bkg_bin_size,\n thr_value=self.thr_value)\n cat_chldrn = cat[cat['deblend_nChild'] == 0]\n cat_chldrn = cat_chldrn.copy(deep=True)\n return cat_chldrn.asAstropy()\n\n def get_deblended_images(self, data=None, index=None):\n return None\n\n\ndef run_stack(image_array, variance_array, psf_array,\n min_pix=1, bkg_bin_size=32, thr_value=5):\n \"\"\"\n Function to setup the DM stack and perform detection, deblending and\n measurement\n Args:\n image_array: Numpy array of image to run stack on\n variance_array: per pixel variance of the input image_array (must\n have same dimensions as image_array)\n psf_array: Image of the PSF for image_array.\n min_pix: Minimum size in pixels of a source to be considered by the\n stack (default=1).\n bkg_bin_size: Binning of the local background in pixels (default=32).\n thr_value: SNR threshold for the detected sources to be included in the\n final catalog(default=5).\n Returns:\n catalog: AstroPy table of detected sources\n \"\"\"\n # Convert to stack Image object\n import lsst.afw.table\n import lsst.afw.image\n import lsst.afw.math\n import lsst.meas.algorithms\n import lsst.meas.base\n import lsst.meas.deblender\n import lsst.meas.extensions.shapeHSM\n image = lsst.afw.image.ImageF(image_array)\n variance = lsst.afw.image.ImageF(variance_array)\n # Generate a masked image, i.e., an image+mask+variance image (mask=None)\n masked_image = lsst.afw.image.MaskedImageF(image, None, variance)\n # Create the kernel in the stack's format\n psf_im = lsst.afw.image.ImageD(psf_array)\n fkernel = lsst.afw.math.FixedKernel(psf_im)\n psf = lsst.meas.algorithms.KernelPsf(fkernel)\n # Passing the image to the stack\n exposure = lsst.afw.image.ExposureF(masked_image)\n # Assign the exposure the PSF that we created\n exposure.setPsf(psf)\n schema = lsst.afw.table.SourceTable.makeMinimalSchema()\n config1 = lsst.meas.algorithms.SourceDetectionConfig()\n # Tweaks in the configuration that can improve detection\n # Change carefully!\n #####\n config1.tempLocalBackground.binSize = bkg_bin_size\n config1.minPixels = min_pix\n config1.thresholdValue = thr_value\n #####\n detect = lsst.meas.algorithms.SourceDetectionTask(schema=schema,\n config=config1)\n deblend = lsst.meas.deblender.SourceDeblendTask(schema=schema)\n config1 = lsst.meas.base.SingleFrameMeasurementConfig()\n config1.plugins.names.add('ext_shapeHSM_HsmShapeRegauss')\n config1.plugins.names.add('ext_shapeHSM_HsmSourceMoments')\n config1.plugins.names.add('ext_shapeHSM_HsmPsfMoments')\n measure = lsst.meas.base.SingleFrameMeasurementTask(schema=schema,\n config=config1)\n table = lsst.afw.table.SourceTable.make(schema)\n detect_result = detect.run(table, exposure) # run detection task\n catalog = detect_result.sources\n deblend.run(exposure, catalog) # run the deblending task\n measure.run(catalog, exposure) # run the measuring task\n catalog = catalog.copy(deep=True)\n return catalog\n\n\nclass Scarlet_params(measure.Measurement_params):\n \"\"\"Class with functions that describe how scarlet should deblend images in\n the input data\"\"\"\n iters = 200 # Maximum number of iterations for scarlet to run\n e_rel = .015 # Relative error for convergence\n detect_centers = True\n\n def make_measurement(self, data=None, index=None):\n return None\n\n def get_centers(self, image):\n \"\"\"Returns centers from SEP detection on the band averaged mean of the\n input image.\n\n Args:\n image: Numpy array of multi-band image to run scarlet on\n [Number of bands, height, width].\n\n Returns:\n Array of x and y coordinate of centroids of objects in the image.\n \"\"\"\n sep = __import__('sep')\n detect = image.mean(axis=0) # simple average for detection\n bkg = sep.Background(detect)\n catalog = sep.extract(detect, 1.5, err=bkg.globalrms)\n return np.stack((catalog['x'], catalog['y']), axis=1)\n\n def scarlet_initialize(self, images, peaks,\n bg_rms, iters, e_rel):\n \"\"\" Initializes scarlet ExtendedSource at locations specified as\n peaks in the (multi-band) input images.\n Args:\n images: Numpy array of multi-band image to run scarlet on\n [Number of bands, height, width].\n peaks: Array of x and y coordinate of centroids of objects in\n the image [number of sources, 2].\n bg_rms: Background RMS value of the images [Number of bands]\n\n Returns:\n blend: scarlet.Blend object for the initialized sources\n rejected_sources: list of sources (if any) that scarlet was\n unable to initialize the image with.\n \"\"\"\n scarlet = __import__(\"scarlet\")\n sources, rejected_sources = [], []\n for n, peak in enumerate(peaks):\n try:\n result = scarlet.ExtendedSource(\n (peak[1], peak[0]),\n images,\n bg_rms)\n sources.append(result)\n except scarlet.source.SourceInitError:\n rejected_sources.append(n)\n print(\"No flux in peak {0} at {1}\".format(n, peak))\n blend = scarlet.Blend(sources).set_data(images, bg_rms=bg_rms)\n blend.fit(iters, e_rel=e_rel)\n return blend, rejected_sources\n\n def get_deblended_images(self, data, index):\n \"\"\"\n Deblend input images with scarlet\n Args:\n images: Numpy array of multi-band image to run scarlet on\n [Number of bands, height, width].\n peaks: x and y coordinate of centroids of objects in the image.\n [number of sources, 2]\n bg_rms: Background RMS value of the images [Number of bands]\n iters: Maximum number of iterations if scarlet doesn't converge\n (Default: 200).\n e_rel: Relative error for convergence (Default: 0.015)\n\n Returns:\n blend: scarlet.Blend object for the initialized sources\n rejected_sources: list of sources (if any) that scarlet was\n unable to initialize the image with.\n \"\"\"\n images = np.transpose(data['blend_images'][index], axes=(2, 0, 1))\n blend_cat = data['blend_list'][index]\n if self.detect_centers:\n peaks = self.get_centers(images)\n else:\n peaks = np.stack((blend_cat['dx'], blend_cat['dy']), axis=1)\n bg_rms = np.array(\n [data['obs_condition'][index][i].mean_sky_level**0.5 for i in range(len(images))])\n blend, rejected_sources = self.scarlet_initialize(images, peaks,\n bg_rms, self.iters,\n self.e_rel)\n im, selected_peaks = [], []\n for m in range(len(blend.sources)):\n im .append(np.transpose(blend.get_model(k=m), axes=(1, 2, 0)))\n selected_peaks.append(\n [blend.components[m].center[1], blend.components[m].center[0]])\n return {'deblend_image': np.array(im), 'peaks': selected_peaks}\n\n\ndef make_true_seg_map(image, threshold):\n \"\"\"Returns a boolean segmentation map corresponding to pixels in\n image above a certain threshold value.threshold\n Args:\n image: Image to estimate segmentation map of\n threshold: Pixels above this threshold are marked as belonging to\n segmentation map\n\n Returns:\n Boolean segmentation map of the image\n \"\"\"\n seg_map = np.zeros_like(image)\n seg_map[image < threshold] = 0\n seg_map[image >= threshold] = 1\n return seg_map.astype(np.bool)\n\n\ndef basic_selection_function(catalog):\n \"\"\"Apply selection cuts to the input catalog.\n\n Only galaxies that satisfy the below criteria are returned:\n 1) i band magnitude less than 27\n 2) Second moment size is less than 3 arcsec.\n Second moments size (r_sec) computed as described in A1 of Chang et.al 2012\n\n Args:\n catalog: CatSim-like catalog from which to sample galaxies.\n\n Returns:\n CatSim-like catalog after applying selection cuts.\n \"\"\"\n f = catalog['fluxnorm_bulge']/(catalog['fluxnorm_disk']+catalog['fluxnorm_bulge'])\n r_sec = np.hypot(catalog['a_d']*(1-f)**0.5*4.66,\n catalog['a_b']*f**0.5*1.46)\n q, = np.where((r_sec <= 4) & (catalog['i_ab'] <= 27))\n return catalog[q]\n\n\ndef basic_sampling_function(Args, catalog):\n \"\"\"Randomly picks entries from input catalog that are brighter than 25.3\n mag in the i band. The centers are randomly distributed within 1/5 of the\n stamp size.\n At least one bright galaxy (i<=24) is always selected.\n \"\"\"\n number_of_objects = np.random.randint(0, Args.max_number)\n a = np.hypot(catalog['a_d'], catalog['a_b'])\n cond = (a <= 2) & (a > 0.2)\n q_bright, = np.where(cond & (catalog['i_ab'] <= 24))\n if np.random.random() >= 0.9:\n q, = np.where(cond & (catalog['i_ab'] < 28))\n else:\n q, = np.where(cond & (catalog['i_ab'] <= 25.3))\n blend_catalog = astropy.table.vstack(\n [catalog[np.random.choice(q_bright, size=1)],\n catalog[np.random.choice(q, size=number_of_objects)]])\n blend_catalog['ra'], blend_catalog['dec'] = 0., 0.\n # keep number density of objects constant\n maxshift = Args.stamp_size/30.*number_of_objects**0.5\n dx, dy = btk.create_blend_generator.get_random_center_shift(\n Args, number_of_objects + 1, maxshift=maxshift)\n blend_catalog['ra'] += dx\n blend_catalog['dec'] += dy\n return blend_catalog\n\n\ndef group_sampling_function(Args, catalog):\n \"\"\"Blends are defined from *groups* of galaxies from the CatSim\n catalog previously analyzed with WLD.\n\n The group is centered on the middle of the postage stamp.\n Function only draws galaxies that lie within the postage stamp size\n determined in Args.\n\n Note: the pre-run WLD images are not used here. We only use the pre-run\n catalog (in i band) to identify galaxies that belong to a group.\n \"\"\"\n if not hasattr(Args, 'wld_catalog_name'):\n raise Exception(\"A pre-run WLD catalog name should be input as \"\n \"Args.wld_catalog_name\")\n else:\n wld_catalog = astropy.table.Table.read(Args.wld_catalog_name,\n format='fits')\n # randomly sample a group.\n group_ids = np.unique(wld_catalog['grp_id'][wld_catalog['grp_size'] >= 2])\n group_id = np.random.choice(group_ids, replace=False)\n # get all galaxies belonging to the group.\n ids = wld_catalog['db_id'][wld_catalog['grp_id'] == group_id]\n blend_catalog = astropy.table.vstack(\n [catalog[catalog['galtileid'] == i] for i in ids])\n # Set mean x and y coordinates of the group galaxies to the center of the\n # postage stamp.\n blend_catalog['ra'] -= np.mean(blend_catalog['ra'])\n blend_catalog['dec'] -= np.mean(blend_catalog['dec'])\n # convert ra dec from degrees to arcsec\n blend_catalog['ra'] *= 3600\n blend_catalog['dec'] *= 3600\n # Add small random shift so that center does not perfectly align with\n # the stamp center\n dx, dy = btk.create_blend_generator.get_random_center_shift(\n Args, 1, maxshift=3 * Args.pixel_scale)\n blend_catalog['ra'] += dx\n blend_catalog['dec'] += dy\n # make sure galaxy centers don't lie too close to edge\n cond1 = np.abs(blend_catalog['ra']) < Args.stamp_size / 2. - 3\n cond2 = np.abs(blend_catalog['dec']) < Args.stamp_size / 2. - 3\n no_boundary = blend_catalog[cond1 & cond2]\n if len(no_boundary) == 0:\n return no_boundary\n # make sure number of galaxies in blend is less than Args.max_number\n # randomly select max_number of objects if larger.\n num = min([len(no_boundary), Args.max_number])\n select = np.random.choice(range(len(no_boundary)), num, replace=False)\n return no_boundary[select]\n\n\nclass Basic_measure_params(measure.Measurement_params):\n \"\"\"Class to perform detection and deblending with SEP\"\"\"\n\n def get_centers(self, image):\n \"\"\"Return centers detected when object detection and photometry\n is done on input image with SEP.\n Args:\n image: Image (single band) of galaxy to perform measurement on.\n\n Returns:\n centers: x and y coordinates of detected centroids\n \"\"\"\n # set detection threshold to 5 times std of image\n threshold = 5*np.std(image)\n coordinates = skimage.feature.peak_local_max(image, min_distance=2,\n threshold_abs=threshold)\n return np.stack((coordinates[:, 1], coordinates[:, 0]), axis=1)\n\n def get_deblended_images(self, data, index):\n \"\"\"Returns scarlet modeled blend and centers for the given blend\"\"\"\n image = np.mean(data['blend_images'][index], axis=2)\n peaks = self.get_centers(image)\n return {'deblend_image': None, 'peaks': peaks}\n\n\nclass Basic_metric_params(btk.compute_metrics.Metrics_params):\n def __init__(self, *args, **kwargs):\n super(Basic_metric_params, self).__init__(*args, **kwargs)\n \"\"\"Class describing functions to return results of\n detection/deblending/measurement algorithm in meas_generator. Each\n blend results yielded by the meas_generator for a batch.\n \"\"\"\n\n def get_detections(self):\n \"\"\"Returns blend catalog and detection catalog for detction performed\n\n Returns:\n Results of the detection algorithm are returned as:\n true_tables: List of astropy Table of the blend catalogs of the\n batch. Length of tables must be the batch size. x and y\n coordinate values must be under columns named 'dx' and 'dy'\n respectively, in pixels from bottom left corner as (0, 0).\n detected_tables: List of astropy Table of output from detection\n algorithm. Length of tables must be the batch size. x and y\n coordinate values must be under columns named 'dx' and 'dy'\n respectively, in pixels from bottom left corner as (0, 0).\n \"\"\"\n # Astropy table with entries corresponding to true sources\n blend_op, deblend_op, _ = next(self.meas_generator)\n true_tables = blend_op['blend_list']\n detected_tables = []\n for i in range(len(true_tables)):\n detected_centers = deblend_op[i]['peaks']\n detected_table = astropy.table.Table(detected_centers,\n names=['dx', 'dy'])\n detected_tables.append(detected_table)\n return true_tables, detected_tables\n\n\nclass Stack_metric_params(btk.compute_metrics.Metrics_params):\n def __init__(self, *args, **kwargs):\n super(Stack_metric_params, self).__init__(*args, **kwargs)\n \"\"\"Class describing functions to return results of\n detection/deblending/measurement algorithm in meas_generator. Each\n blend results yielded by the meas_generator for a batch.\n \"\"\"\n\n def get_detections(self):\n \"\"\"Returns blend catalog and detection catalog for detection performed\n\n Returns:\n Results of the detection algorithm are returned as:\n true_tables: List of astropy Table of the blend catalogs of the\n batch. Length of tables must be the batch size. x and y\n coordinate values must be under columns named 'dx' and 'dy'\n respectively, in pixels from bottom left corner as (0, 0).\n detected_tables: List of astropy Table of output from detection\n algorithm. Length of tables must be the batch size. x and y\n coordinate values must be under columns named 'dx' and 'dy'\n respectively, in pixels from bottom left corner as (0, 0).\n \"\"\"\n # Astropy table with entries corresponding to true sources\n blend_op, _, cat = next(self.meas_generator)\n true_tables = blend_op['blend_list']\n detected_tables = []\n for i in range(len(true_tables)):\n detected_centers = np.stack(\n [cat[i]['base_NaiveCentroid_x'],\n cat[i]['base_NaiveCentroid_y']],\n axis=1)\n detected_table = astropy.table.Table(detected_centers,\n names=['dx', 'dy'])\n detected_tables.append(detected_table)\n return true_tables, detected_tables\n\n\ndef get_detection_eff_matrix(summary_table, num):\n \"\"\"Computes the detection efficiency table for input detection summary\n table.\n\n Input argument num sets the maximum number of true detections for which the\n detection efficiency matrix is to be created for. Detection efficiency is\n computed for number of true objects in the range (1-num).\n\n Args:\n summary(`numpy.array`) : Detection summary as a table [N, 5].\n num(int): Maximum number of true objects to create matrix for. Number\n of columns in matrix will be num-1.\n\n Returns:\n numpy.ndarray of size[num+2, num-1] that shows detection efficiency.\n \"\"\"\n eff_matrix = np.zeros((num + 2, num + 1))\n for i in range(0, num + 1):\n q_true, = np.where(summary_table[:, 0] == i)\n for j in range(0, num + 2):\n if len(q_true) > 0:\n q_det, = np.where(summary_table[q_true, 1] == j)\n eff_matrix[j, i] = len(q_det)\n norm = np.sum(eff_matrix, axis=0)\n # If not detections along a column, set sum to 1 to avoid dividing by zero.\n norm[norm == 0.] = 1\n # normalize over columns.\n eff_matrix = eff_matrix / norm[np.newaxis, :] * 100.\n return eff_matrix\n" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.random.choice", "numpy.zeros", "numpy.sum", "numpy.mean", "numpy.where", "numpy.std", "numpy.stack", "numpy.random.randint", "numpy.hypot", "numpy.transpose", "numpy.abs", "numpy.random.random", "numpy.unique" ] ]
thepeytongreen/pyCGM
[ "c5a55dec675747a5c2d07e171d5bd2d046218ad5" ]
[ "pyCGM_Single/tests/test_csvOutput.py" ]
[ "import pytest\nimport numpy as np\nimport os\nimport sys\nimport tempfile\nfrom shutil import rmtree\nfrom pyCGM_Single.pyCGM_Helpers import getfilenames\nfrom pyCGM_Single.pycgmCalc import calcAngles, calcKinetics\nfrom pyCGM_Single.pycgmIO import dataAsDict, loadData, loadVSK, writeResult\nfrom pyCGM_Single.pycgmStatic import getStatic, rotmat\n\n#Define several helper functions used in loading and comparing output CSV files\ndef convert_to_pycgm_label(label):\n \"\"\"Convert angle label name to known pycgm angle label.\n\n Since output from other programs can use slightly \n different angle labels, we convert them to the pycgm format \n to make it easier to compare csv outputs across different\n formats.\n\n Parameters\n ----------\n label : string\n String of the label name.\n\n Returns\n -------\n string\n String of the known pycgm label corresponding to `label`.\n \"\"\"\n known_labels = set(['Pelvis','R Hip','L Hip','R Knee','L Knee','R Ankle',\n 'L Ankle','R Foot','L Foot',\n 'Head','Thorax','Neck','Spine','R Shoulder','L Shoulder',\n 'R Elbow','L Elbow','R Wrist','L Wrist'])\n \n label_aliases = {\n #Angle names commonly used to pycgm angle names\n 'RPelvisAngles': 'Pelvis',\n 'RHipAngles' : 'R Hip',\n 'LHipAngles' : 'L Hip',\n 'RKneeAngles' : 'R Knee',\n 'LKneeAngles' : 'L Knee',\n 'RAnkleAngles' : 'R Ankle',\n 'LAnkleAngles' : 'L Ankle',\n 'RFootProgressAngles' : 'R Foot',\n 'LFootProgressAngles' : 'L Foot',\n 'RHeadAngles' : 'Head',\n 'RThoraxAngles' : 'Thorax',\n 'RNeckAngles' : 'Neck',\n 'RSpineAngles' : 'Spine',\n 'RShoulderAngles' : 'R Shoulder',\n 'LShoulderAngles' : 'L Shoulder',\n 'RElbowAngles' : 'R Elbow',\n 'LElbowAngles' : 'L Elbow',\n 'RWristAngles' : 'R Wrist',\n 'LWristAngles' : 'L Wrist'\n }\n\n if label in known_labels:\n return label\n elif label in label_aliases:\n return label_aliases[label]\n else:\n return None\n\ndef load_output_csv(csv_file, header_line_number=5, first_output_row=7, first_output_col=1, label_prefix_len=0):\n \"\"\"\n Loads an output csv of angles or markers into a 2d array where each index\n represents a row in the csv.\n\n This function tests for equality of the 19 angles that pycgm outputs, but allows\n loading of files of different formats. Assumes that each angle has exactly three\n values associated with it (x, y, z).\n\n Parameters\n ----------\n csv_file : string\n String of the path of the filename to be loaded.\n header_line_number : int\n Index of the line number in which the angle or marker labels are written.\n The default header_line_number of 5 represents the output from pycgmIO.writeResult().\n first_output_row : int\n Index of the line number in which the first row of output begins.\n The default first_output_row of 7 represents the output from pycgmIO.writeResult().\n first_output_col : int\n Index of the column number in which the first column of output begins.\n The default first_output_col of 1 represents the output from pycgmIO.writeResult().\n label_prefix_len : int\n Length of the prefix on each label, if it exists. 0 by default.\n\n Returns\n -------\n output : 2darray\n 2d matrix where each index represents a row of angle data loaded from\n the csv.\n \"\"\"\n known_labels = ['Pelvis','R Hip','L Hip','R Knee','L Knee','R Ankle',\n 'L Ankle','R Foot','L Foot',\n 'Head','Thorax','Neck','Spine','R Shoulder','L Shoulder',\n 'R Elbow','L Elbow','R Wrist','L Wrist']\n output = []\n infile = open(csv_file, 'r')\n lines = infile.readlines()\n #Create a dict of index to known pycgm label:\n index_to_header = {}\n headers = lines[header_line_number].strip().split(',')[first_output_col:]\n for i in range(len(headers)):\n header = headers[i]\n if header != \"\":\n #Find which known pycgm header this header corresponds to, trimming prefix length if needed\n header = header.strip()[label_prefix_len:]\n header = convert_to_pycgm_label(header)\n #Record that index i corresponds to this header\n index_to_header[i] = header\n \n #Loop over all lines starting from the first line of output\n for line in lines[first_output_row:]:\n arr = [0 for i in range(19*3)]\n #Convert line in the csv to an array of floats\n formatted_line = line.strip().split(',')[first_output_col:]\n l = []\n for num in formatted_line:\n try:\n l.append(float(num))\n except:\n l.append(0)\n #Loop over the array of floats, knowing which indices \n #corresponds to which angles from the index_to_header dictionary\n for i in range(len(l)):\n if i in index_to_header:\n label = index_to_header[i]\n if (label != None):\n index = known_labels.index(label) * 3\n arr[index] = l[i]\n arr[index+1] = l[i+1]\n arr[index+2] = l[i+2]\n output.append(arr)\n\n infile.close()\n return np.array(output)\n\ndef load_center_of_mass(csv_file, row_start, col_start):\n \"\"\"Load center of mass values into an array, where each index\n has the center of mass coordinates for a frame.\n\n Parameters\n ----------\n csv_file : string\n Filename of the csv file to be loaded.\n row_start : int\n Index of the first row in which center of mass data begins.\n col_start : int\n Index of the first column in which center of mass data begins.\n\n Returns\n -------\n center_of_mass : 2darray\n Array representation of the center of mass data.\n \"\"\"\n infile = open(csv_file, 'r')\n center_of_mass = []\n lines = infile.readlines()\n for line in lines[row_start:]:\n formatted_line = line.strip().split(',')\n coordinates = formatted_line[col_start:col_start+3]\n coordinates = [float(x) for x in coordinates]\n center_of_mass.append(coordinates)\n infile.close()\n return center_of_mass\n\ndef compare_center_of_mass(result, expected, tolerance):\n \"\"\"Asserts that two arrays of center of mass coordinates\n are equal with a certain tolerance.\n\n Assumes that center of mass coordinates are in mm.\n \n Result and expected must be the same length.\n\n Parameters\n ----------\n result : array\n Array of result center of mass coordinates.\n expected : array\n Array of expected center of mass coordinates.\n tolerance : int\n Sets how large the difference between any two center of mass coordinates\n can be.\n \"\"\"\n for i in range(len(expected)):\n for j in range(len(expected[i])):\n assert abs(result[i][j] - expected[i][j] < tolerance)\n\ndef load_files(dynamic_trial, static_trial, vsk_file):\n \"\"\"\n Uses load functions from pycgmIO to load data from c3d and\n vsk files.\n \"\"\"\n motion_data = loadData(dynamic_trial)\n static_data = loadData(static_trial)\n vsk_data = loadVSK(vsk_file, dict=False)\n return motion_data, static_data, vsk_data\n\ndef get_columns_to_compare(test_folder):\n \"\"\"\n Helper function to test the files in SampleData. Gets\n indices of angles that can be compared for equality, depending\n on which file is being compared.\n\n There are 57 angle coordinates to be compared, with 3 coordinates\n for each of 19 angles.\n\n If the global coordinate system is unknown for a given file,\n angles affected by the GCS are ignored.\n Ignored angles are Pelvis, R Foot, L Foot, Head, Thorax, with corresponding\n indices 0, 1, 2 and 21 - 32.\n\n The files in Test_Files also ignore the Neck X coordinate, at \n index 33.\n \"\"\"\n gcs_ignore = [i for i in range(21, 33)]\n gcs_ignore.extend([0,1,2])\n columns = [i for i in range(57)]\n if (test_folder == 'ROM'):\n return columns\n if (test_folder == '59993_Frame'):\n for i in gcs_ignore:\n columns.remove(i)\n return columns\n if (test_folder == 'Test_Files'):\n for i in gcs_ignore:\n columns.remove(i)\n columns.remove(33)\n return columns\n\nclass TestCSVOutput:\n @classmethod\n def setup_class(self):\n \"\"\"\n Called once for all tests in TestCSVOutput.\n Sets rounding precision, and sets the current working\n directory to the pyCGM folder. Sets the current python version\n and loads filenames used for testing.\n\n We also use the pycgm functions to generate and load output CSV data\n and load them into the class.\n \"\"\"\n self.rounding_precision = 8\n cwd = os.getcwd()\n if (cwd.split(os.sep)[-1]==\"pyCGM_Single\"):\n parent = os.path.dirname(cwd)\n os.chdir(parent)\n self.cwd = os.getcwd()\n self.pyver = sys.version_info.major\n\n #Create a temporary directory used for writing CSVs to\n if (self.pyver == 2):\n self.tmp_dir_name = tempfile.mkdtemp()\n else:\n self.tmp_dir = tempfile.TemporaryDirectory()\n self.tmp_dir_name = self.tmp_dir.name\n \n #Create file path names for the files being tested\n self.sample_data_directory = os.path.join(self.cwd, \"SampleData\")\n self.directory_59993_Frame = os.path.join(self.sample_data_directory, '59993_Frame')\n self.directory_ROM = os.path.join(self.sample_data_directory, 'ROM')\n self.directory_test = os.path.join(self.sample_data_directory, 'Test_Files')\n\n #Load outputs to be tested for SampleData/59993_Frame/\n\n self.filename_59993_Frame_dynamic = os.path.join(self.directory_59993_Frame, '59993_Frame_Dynamic.c3d')\n self.filename_59993_Frame_static = os.path.join(self.directory_59993_Frame, '59993_Frame_Static.c3d')\n self.filename_59993_Frame_vsk = os.path.join(self.directory_59993_Frame, '59993_Frame_SM.vsk')\n\n motion_data,static_data,vsk_data = load_files(self.filename_59993_Frame_dynamic, self.filename_59993_Frame_static, self.filename_59993_Frame_vsk)\n cal_SM = getStatic(static_data,vsk_data,flat_foot=False)\n kinematics,joint_centers=calcAngles(motion_data,start=0, end=500,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)\n \n outfile = os.path.join(self.tmp_dir_name, 'output_59993_Frame')\n writeResult(kinematics,outfile,angles=True,axis=False)\n expected_file = os.path.join(self.directory_59993_Frame,'pycgm_results.csv.csv')\n\n self.result_59993_Frame = load_output_csv(outfile + '.csv')\n self.expected_59993_Frame = load_output_csv(expected_file)\n\n #Load outputs to be tested for SampleData/ROM/\n\n self.filename_ROM_dynamic = os.path.join(self.directory_ROM, 'Sample_Dynamic.c3d')\n self.filename_ROM_static = os.path.join(self.directory_ROM, 'Sample_Static.c3d')\n self.filename_ROM_vsk = os.path.join(self.directory_ROM, 'Sample_SM.vsk')\n\n motion_data,static_data,vsk_data = load_files(self.filename_ROM_dynamic, self.filename_ROM_static, self.filename_ROM_vsk)\n cal_SM = getStatic(static_data,vsk_data,flat_foot=False)\n kinematics,joint_centers=calcAngles(motion_data,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)\n \n outfile = os.path.join(self.tmp_dir_name, 'output_ROM')\n writeResult(kinematics,outfile,angles=True,axis=False)\n expected_file = os.path.join(self.directory_ROM,'pycgm_results.csv.csv')\n\n self.result_ROM = load_output_csv(outfile + '.csv')\n self.expected_ROM = load_output_csv(expected_file)\n\n #Load outputs to be tested for SampleData/Test_Files/\n\n self.filename_test_dynamic = os.path.join(self.directory_test, 'Movement_trial.c3d')\n self.filename_test_static = os.path.join(self.directory_test, 'Static_trial.c3d')\n self.filename_test_vsk = os.path.join(self.directory_test, 'Test.vsk')\n\n motion_data,static_data,vsk_data = load_files(self.filename_test_dynamic, self.filename_test_static, self.filename_test_vsk)\n cal_SM = getStatic(static_data,vsk_data,flat_foot=False)\n kinematics,joint_centers=calcAngles(motion_data,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)\n \n outfile = os.path.join(self.tmp_dir_name, 'output_Test_Files')\n writeResult(kinematics,outfile,angles=True,axis=False)\n expected_file = os.path.join(self.directory_test,'Movement_trial.csv')\n\n self.result_Test_Files = load_output_csv(outfile + '.csv')\n self.expected_Test_Files = load_output_csv(expected_file, header_line_number=2, first_output_row=5, first_output_col=2, label_prefix_len=5)\n\n @classmethod\n def teardown_class(self):\n \"\"\"\n Called once after all tests in TestCSVOutput are finished running.\n If using Python 2, perform cleanup of the previously created\n temporary directory in setup_class(). Cleanup is done automatically in \n Python 3. \n \"\"\"\n if (self.pyver == 2):\n rmtree(self.tmp_dir_name)\n\n @pytest.fixture\n def angles_ROM(self, request):\n column = request.param\n return self.result_ROM[:,column], self.expected_ROM[:,column]\n\n @pytest.mark.parametrize(\"angles_ROM\", get_columns_to_compare(\"ROM\"), indirect=True)\n def test_ROM(self, angles_ROM):\n \"\"\"\n Tests pycgm output csv files using input files from SampleData/ROM/.\n \"\"\"\n result_angles, expected_angles = angles_ROM\n np.testing.assert_almost_equal(result_angles, expected_angles, self.rounding_precision)\n\n @pytest.fixture\n def angles_59993_Frame(self, request):\n column = request.param\n return self.result_59993_Frame[:,column], self.expected_59993_Frame[:,column]\n\n @pytest.mark.parametrize(\"angles_59993_Frame\", get_columns_to_compare(\"59993_Frame\"), indirect=True)\n def test_59993_Frame(self, angles_59993_Frame):\n \"\"\"\n Tests pycgm output csv files using input files from SampleData/ROM/.\n \"\"\"\n result_angles, expected_angles = angles_59993_Frame\n np.testing.assert_almost_equal(result_angles, expected_angles, self.rounding_precision)\n\n @pytest.fixture\n def angles_Test_Files(self, request):\n column = request.param\n return self.result_Test_Files[:,column], self.expected_Test_Files[:,column]\n\n @pytest.mark.parametrize(\"angles_Test_Files\", get_columns_to_compare(\"Test_Files\"), indirect=True)\n def test_Test_Files(self, angles_Test_Files):\n \"\"\"\n Tests pycgm output csv files using input files from SampleData/ROM/.\n \"\"\"\n result_angles, expected_angles = angles_Test_Files\n np.testing.assert_almost_equal(result_angles, expected_angles, 3)\n\n def test_Test_Files_center_of_mass(self):\n \"\"\"\n Test center of mass output values using sample files in SampleData/Test_Files/.\n \"\"\"\n motion_data,static_data,vsk_data = load_files(self.filename_test_dynamic, self.filename_test_static, self.filename_test_vsk)\n cal_SM = getStatic(static_data,vsk_data,flat_foot=False)\n kinematics,joint_centers=calcAngles(motion_data,vsk=cal_SM,splitAnglesAxis=False,formatData=False,returnjoints=True)\n kinetics = calcKinetics(joint_centers, cal_SM['Bodymass'])\n expected = load_center_of_mass(os.path.join(self.directory_test,'Movement_trial.csv'), 5, 2)\n compare_center_of_mass(kinetics, expected, 30)" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.array" ] ]
skamdar/gesture_recognition
[ "87613dcf923e2c518e2d58853d6b6bb9a8296d17" ]
[ "utils/grit_json.py" ]
[ "# convert annotation data in csv file to dict and dump the dict in json format.\n# dict looks as follow:\n\n# {'labels': ['ApplyEyeMakeup', 'ApplyLipstick', ...],\n#\n# 'database': {'v_ApplyEyeMakeup_g08_c01': {'subset': 'training', 'annotations': {'lable': 'ApplyEyeMakeup'}},\n# 'v_ApplyLipstick_g17_c05': {'subset': 'training', 'annotations': {'label': 'ApplyLipstick'}},\n# ...\n# }\n# }\n\nfrom __future__ import print_function, division\nimport os\nimport sys\nimport json\nimport pandas as pd\n\n\ndef convert_csv_to_dict(csv_path, subset):\n data = pd.read_csv(csv_path, delimiter=' ', header=None)\n keys = []\n key_labels = []\n for i in range(data.shape[0]):\n row = data.ix[i, :]\n slash_rows = data.ix[i, 0].split('/')\n class_name = slash_rows[0]\n basename = slash_rows[1].split('.')[0]\n\n keys.append(basename)\n key_labels.append(class_name)\n\n database = {}\n for i in range(len(keys)):\n key = keys[i]\n database[key] = {}\n database[key]['subset'] = subset\n label = key_labels[i]\n database[key]['annotations'] = {'label': label}\n\n # database dict looks like:\n # {'v_ApplyEyeMakeup_g08_c01': {'subset': 'training', 'annotations': {'lable': 'ApplyEyeMakeup'}},\n # 'v_ApplyLipstick_g17_c05': {'subset': 'training', 'annotations': {'label': 'ApplyLipstick'}}...}\n\n return database\n\n\ndef load_labels(label_csv_path):\n data = pd.read_csv(label_csv_path, delimiter=' ', header=None)\n labels = []\n for i in range(data.shape[0]):\n labels.append(data.ix[i, 1])\n return labels\n\n\ndef convert_grit_csv_to_activitynet_json(label_csv_path, train_csv_path,\n val_csv_path, test_csv_path, dst_json_path):\n labels = load_labels(label_csv_path)\n train_database = convert_csv_to_dict(train_csv_path, 'training')\n val_database = convert_csv_to_dict(val_csv_path, 'validation')\n test_database = convert_csv_to_dict(test_csv_path, 'testing')\n\n dst_data = {}\n dst_data['labels'] = labels\n dst_data['database'] = {}\n dst_data['database'].update(train_database)\n dst_data['database'].update(val_database)\n dst_data['database'].update(test_database)\n\n # dst_data looks like:\n # {'labels': ['ApplyEyeMakeup', 'ApplyLipstick', ...],\n # 'database': {'v_ApplyEyeMakeup_g08_c01': {'subset': 'training', 'annotations': {'lable': 'ApplyEyeMakeup'}},\n # 'v_ApplyLipstick_g17_c05': {'subset': 'training', 'annotations': {'label': 'ApplyLipstick'}}...}}\n\n with open(dst_json_path, 'w') as dst_file:\n json.dump(dst_data, dst_file)\n\n\nif __name__ == '__main__':\n csv_dir_path = sys.argv[1]\n\n # we have 1 split containing train, test and validation data\n\n label_csv_path = os.path.join(csv_dir_path, 'classInd.txt')\n train_csv_path = os.path.join(csv_dir_path, 'trainlist.txt')\n val_csv_path = os.path.join(csv_dir_path, 'vallist.txt')\n test_csv_path = os.path.join(csv_dir_path, 'testlist.txt')\n dst_json_path = os.path.join(csv_dir_path, 'grit.json')\n\n convert_grit_csv_to_activitynet_json(label_csv_path, train_csv_path,\n val_csv_path, test_csv_path, dst_json_path)\n" ]
[ [ "pandas.read_csv" ] ]
haoxiangyang89/COVID_Staged_Alert
[ "4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a" ]
[ "InterventionsMIP/reporting/plotting.py" ]
[ "'''\nModule for plotting function\n'''\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport time\nimport argparse\nimport calendar as py_cal\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport matplotlib.dates as mdates\nimport matplotlib.cbook as cbook\nfrom matplotlib import rc\nimport matplotlib.patches as patches\nimport matplotlib.colors as pltcolors\nfrom collections import defaultdict\nfrom utils import round_closest, roundup\nfrom InterventionsMIP import plots_path, instances_path\nimport copy\n\nplt.rcParams['hatch.linewidth'] = 3.0\n\ncolors = {'S': 'b', 'E': 'y', 'IA': 'c', 'IY': 'm', 'IH': 'k', 'R': 'g', 'D': 'k', 'ToIHT': 'teal', 'ICU': 'k', 'ToICU': 'teal', 'IHT': 'k', 'ITot': 'k'}\nlight_colors = {'IH':'silver','ToIHT':'paleturquoise', 'ICU':'silver', 'ToICU': 'paleturquoise', 'IHT': 'silver', 'ITot': 'silver'}\nl_styles = {'sim': '-', 'opt': '--'}\ncompartment_names = {\n 'ITot': 'Total Infectious',\n 'IY': 'Symptomatic',\n 'IH': 'General Beds',\n 'ToIHT': 'COVID-19 Hospital Admissions\\n(Seven-day Average)',\n 'D': 'Deaths',\n 'R': 'Recovered',\n 'S': 'Susceptible',\n 'ICU': 'COVID-19 ICU Patients',\n 'IHT': 'COVID-19 Hospitalizations',\n 'ToICU': 'Daily COVID-19 ICU Admissions'\n}\n\ndef colorDecide(u,tier_by_tr):\n preCoded_color = [\"blue\",\"yellow\",\"orange\",\"red\"]\n colorDict = {}\n for tKey in tier_by_tr.keys():\n colorDict[tier_by_tr[tKey][\"color\"]] = tKey\n if u < colorDict[\"blue\"]:\n return \"white\",\"\"\n else:\n # if it is a color above blue, forced to be below red\n belowTier = -1\n aboveTier = 2\n for item in preCoded_color:\n if (u > colorDict[item])and(colorDict[item] >= belowTier):\n belowTier = colorDict[item]\n if (u < colorDict[item])and(colorDict[item] <= aboveTier):\n aboveTier = colorDict[item]\n aboveColor = pltcolors.to_rgb(tier_by_tr[aboveTier][\"color\"])\n belowColor = pltcolors.to_rgb(tier_by_tr[belowTier][\"color\"])\n ratio = (u - belowTier)/(aboveTier - belowTier)\n setcolor = ratio*np.array(aboveColor) + (1-ratio)*np.array(belowColor)\n return setcolor,tier_by_tr[aboveTier][\"color\"]+\\\n \"_\"+tier_by_tr[belowTier][\"color\"]+\\\n \"_\"+str(ratio)\n\ndef find_central_path(city, states_to_plot_temp, states_ts_temp, real_hosp, real_icu, real_new_admission=None):\n '''\n Obtains the central path id\n\n Args:\n TO DO\n '''\n central_path_id = 0\n weights_obs = 0.1 #0.005\n weights = np.array(np.repeat((1 - weights_obs)/12, 12)) \n data_metrics = np.empty((300, 0), float)\n for v_t in states_to_plot_temp:\n data_metrics = np.append(data_metrics, np.max(states_ts_temp[v_t], axis = 1, keepdims = True), 1) \n data_metrics = np.append(data_metrics, np.argmax(states_ts_temp[v_t], axis = 1).reshape(len(states_ts_temp[v_t]), 1), 1) \n data_metrics = np.append(data_metrics, np.quantile(states_ts_temp[v_t], 0.5, axis = 1, keepdims = True), 1) \n data_metrics = np.append(data_metrics, np.sum(states_ts_temp[v_t], axis = 1, keepdims = True), 1)\n \n #Standardize\n std_data_metrics = (data_metrics - np.mean(data_metrics, axis=0)) / np.std(data_metrics, axis=0)\n errorlist1 = (np.square(std_data_metrics).dot(weights)).reshape(len(states_ts_temp['IHT']), 1) \n if city == 'austin':\n w = 7.3*(1 - 0.10896) + 9.9*0.10896\n #Metric for deviations from the observed data\n if np.sum(real_hosp) > np.sum(real_icu):\n x_dev = np.mean(np.square((states_ts_temp['IHT'][:, 0:len(real_hosp)] - real_hosp[0:])), axis = 1, keepdims = True)\n z_dev = np.mean(np.square((states_ts_temp['ICU'][:, 0:len(real_icu)] - real_icu[0:])), axis = 1, keepdims = True)\n else:\n x_dev = np.mean(np.square((states_ts_temp['IHT'][:, 0:len(real_icu)] - real_icu[0:])), axis = 1, keepdims = True)\n z_dev = np.mean(np.square((states_ts_temp['ICU'][:, 0:len(real_hosp)] - real_hosp[0:])), axis = 1, keepdims = True)\n y_dev = np.mean(np.square((states_ts_temp['ToIHT'][:, 0:len(real_new_admission)] - real_new_admission[0:])), axis = 1, keepdims = True)\n errorlist2 = 1/(np.square(w))*x_dev + np.square(2.5)/(np.square(w))*z_dev + y_dev\n else:\n w = 7.3*(1 - 0.10896) + 9.9*0.10896\n #Metric for deviations from the observed data\n if np.sum(real_hosp) > np.sum(real_icu):\n x_dev = np.mean(np.square((states_ts_temp['IHT'][:, 0:len(real_hosp)] - real_hosp[0:])), axis = 1, keepdims = True)\n z_dev = np.mean(np.square((states_ts_temp['ICU'][:, 0:len(real_icu)] - real_icu[0:])), axis = 1, keepdims = True)\n else:\n x_dev = np.mean(np.square((states_ts_temp['IHT'][:, 0:len(real_icu)] - real_icu[0:])), axis = 1, keepdims = True)\n z_dev = np.mean(np.square((states_ts_temp['ICU'][:, 0:len(real_hosp)] - real_hosp[0:])), axis = 1, keepdims = True)\n errorlist2 = 1/(np.square(w))*x_dev + np.square(2.5)/(np.square(w))*z_dev\n \n errorlist = (errorlist1 + weights_obs*errorlist2).tolist()\n central_path_id = errorlist.index(min(errorlist))\n \n if central_path_id == 0:\n sorted_er = sorted(errorlist)\n central_path_id = errorlist.index(sorted_er[1])\n\n print(\"central_path_id: \", central_path_id) \n return central_path_id\n\ndef change_avg(all_st, min_st ,max_st, mean_st, nday_avg):\n # obtain the n-day average of the statistics\n all_st_copy = copy.deepcopy(all_st)\n min_st_copy = copy.deepcopy(min_st)\n max_st_copy = copy.deepcopy(max_st)\n mean_st_copy = copy.deepcopy(mean_st)\n \n # change all statistics to n-day average\n for v in all_st_copy.keys():\n if v not in ['z', 'tier_history']:\n for i in range(len(all_st_copy[v])):\n for t in range(len(all_st_copy[v][i])):\n all_st_copy[v][i][t] = np.mean(all_st[v][i][np.maximum(t-nday_avg,0):t+1])\n for t in range(len(min_st_copy[v])):\n min_st_copy[v][t] = np.mean(min_st[v][np.maximum(t-nday_avg,0):t+1])\n for t in range(len(max_st_copy[v])):\n max_st_copy[v][t] = np.mean(max_st[v][np.maximum(t-nday_avg,0):t+1])\n for t in range(len(mean_st_copy[v])):\n mean_st_copy[v][t] = np.mean(mean_st[v][np.maximum(t-nday_avg,0):t+1])\n \n return all_st_copy,min_st_copy,max_st_copy,mean_st_copy\n\ndef plot_multi_tier_sims(instance_name,\n instance,\n policy,\n profiles,\n profile_labels,\n real_hosp,\n plot_left_axis=['IH'],\n plot_right_axis=[],\n scale_plot=False,\n align_axes=True,\n show=True,\n plot_triggers=False,\n plot_trigger_annotations=False,\n plot_legend=False,\n y_lim=None,\n n_replicas=300,\n config=None,\n hosp_beds_list=None,\n real_new_admission=None,\n real_hosp_or_icu=None,\n bed_scale=1,\n is_representative_path=False,\n t_start = -1,\n central_path_id=0,\n cap_path_id=0,\n vertical_fill=True,\n nday_avg=None,\n **kwargs):\n '''\n Plots a list of profiles in the same figure. Each profile corresponds\n to a stochastic replica for the given instance.\n\n Args:\n profiles (list of dict): a list of dictionaries that contain epi vars profiles\n profile_labels (list of str): name of each profile\n plot_only (list of str): list of variable names to be plot\n '''\n plt.rcParams[\"font.size\"] = \"18\"\n T = kwargs['T']\n if \"add_tiers\" in kwargs.keys():\n add_tiers = kwargs[\"add_tiers\"]\n cal = instance.cal\n population = instance.N.sum()\n interventions = kwargs['interventions']\n policy_params = kwargs['policy_params']\n if hosp_beds_list is None:\n hosp_beds_list = [instance.hosp_beds]\n hosp_beds = hosp_beds_list[0]\n \n lb_band = 5\n ub_band = 95\n \n text_size = 28\n fig, (ax1, actions_ax) = plt.subplots(2, 1, figsize=(17, 9), gridspec_kw={'height_ratios': [10, 1.1]})\n # Main axis\n # ax1.set_xlabel('Time')\n ax2 = None\n # Policy axis\n policy_ax = ax1.twinx()\n #policy_ax.set_ylabel('Social Distance')\n # If there are plot to be on the right axis, move policy_ax\n # Second, show the right spine.\n if len(plot_right_axis) > 0:\n # Create second axis\n ax2 = ax1.twinx()\n # Fix policy axis\n policy_ax.spines[\"right\"].set_position((\"axes\", 1.1))\n make_patch_spines_invisible(policy_ax)\n policy_ax.spines[\"right\"].set_visible(True)\n \n # Start plots\n max_y_lim_1 = population if 'S' in plot_left_axis or 'R' in plot_left_axis else 0\n max_y_lim_2 = population if 'S' in plot_right_axis or 'R' in plot_right_axis else 0\n plotted_lines = []\n \n # Add IHT field\n if 'ICU' in profiles[0].keys():\n for p in profiles:\n p['IHT'] = p['IH'] + p['ICU']\n \n # Transform data of interest\n states_to_plot = plot_left_axis + plot_right_axis\n last_day_hosp_data = len(real_hosp) - 1\n lb_hosp = real_hosp[-1] * (1 - config['div_filter_frac'])\n ub_hosp = real_hosp[-1] * (1 + config['div_filter_frac'])\n states_ts = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in states_to_plot}\n states_ts['z'] = np.vstack(list(p['z'][:T] for p in profiles))\n states_ts['tier_history'] = np.vstack(list(p['tier_history'][:T] for p in profiles))\n \n states_to_plot_temp = ['IHT','ToIHT', 'ICU']\n states_ts_temp = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in states_to_plot_temp}\n \n \n central_path = 0\n representative_path_id = 0\n print(\"Printed seed is: \", profiles[0][\"seed\"])\n\n if is_representative_path == False:\n central_path = central_path_id\n mean_st = {v: states_ts[v][central_path] if v not in ['z', 'tier_history'] else states_ts[v] for v in states_ts}\n else:\n representative_path_id = find_central_path(instance.city, states_to_plot_temp, states_ts_temp, real_hosp, real_hosp_or_icu, real_new_admission)\n mean_st = {v: states_ts[v][representative_path_id] if v not in ['z', 'tier_history'] else states_ts[v] for v in states_ts}\n central_path = representative_path_id\n cap_path_id = representative_path_id\n\n all_st = {v: states_ts[v][:] if v not in ['z', 'tier_history'] else states_ts[v] for v in states_ts}\n min_st = {\n v: np.percentile(states_ts[v], q=lb_band, axis=0) if v not in ['z', 'tier_history'] else states_ts[v]\n for v in states_ts\n }\n max_st = {\n v: np.percentile(states_ts[v], q=ub_band, axis=0) if v not in ['z', 'tier_history'] else states_ts[v]\n for v in states_ts\n }\n if nday_avg is not None:\n all_st, min_st ,max_st, mean_st = change_avg(all_st, min_st ,max_st, mean_st, nday_avg)\n # People that arrive above capacity\n # np.mean(np.sum(states_ts['IYIH']*(states_ts['IH']>=3239) , 1))\n new_profiles = [mean_st, min_st, max_st]\n \n # Stats\n all_states = ['S', 'E', 'IH', 'IA', 'IY', 'R', 'D']\n if 'ICU' in profiles[0].keys():\n all_states.append('ICU')\n all_states.append('IHT')\n all_states.append('ToICU')\n all_states_ts = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in all_states}\n #assert len(all_states_ts['IH']) >= n_replicas\n for v in all_states_ts:\n all_states_ts[v] = all_states_ts[v][:n_replicas]\n #assert len(all_states_ts['IH']) == n_replicas\n # Hospitalizations Report\n # Probabilities of reaching x% of the capacity\n prob50 = np.sum(np.any(all_states_ts['IH'] >= 0.5 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob60 = np.sum(np.any(all_states_ts['IH'] >= 0.6 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob70 = np.sum(np.any(all_states_ts['IH'] >= 0.7 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob80 = np.sum(np.any(all_states_ts['IH'] >= 0.8 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob90 = np.sum(np.any(all_states_ts['IH'] >= 0.9 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob100 = np.sum(np.any(all_states_ts['IH'] >= 1 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob110 = np.sum(np.any(all_states_ts['IH'] >= 1.1 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n n_replicas_used = len(all_states_ts['IH'])\n print(f\"{'P 50':10s}{'P 60':10s}{'P 70':10s}{'P 80':10s}{'P 90':10s}{'P 100':10s}{'P 110':10s}{'Scenarios':10s}\")\n print(\n f\"{prob50:<10.4f}{prob60:<10.4f}{prob70:<10.4f}{prob80:<10.4f}{prob90:<10.4f}{prob100:<10.4f}{prob110:<10.4f}{n_replicas_used}\"\n )\n # Min, Med, Max at the peak\n print('Hospitalization Peaks')\n peak_days = np.argmax(all_states_ts['IH'], axis=1)\n peak_vals = np.take_along_axis(all_states_ts['IH'], peak_days[:, None], axis=1)\n print(f'{\"Percentile (%)\":<15s} {\"Peak IH\":<15s} {\"Date\":15}')\n for q in [0, 5, 10, 50, 90, 100]:\n peak_day_percentile = int(np.percentile(peak_days, q))\n peak_percentile = np.percentile(peak_vals, q)\n print(f'{q:<15} {peak_percentile:<15.0f} {str(cal.calendar[peak_day_percentile])}')\n \n # Deaths\n all_states_ts_ind = {\n v: np.array(list(p[v][:T, :, :] for p in profiles)) for v in all_states\n }\n \n #assert len(all_states_ts_ind['IH']) >= n_replicas\n for v in all_states_ts:\n all_states_ts_ind[v] = all_states_ts_ind[v][:n_replicas]\n #assert len(all_states_ts_ind['IH']) == n_replicas\n # Deaths data\n avg_deaths_by_group = np.round(np.mean(all_states_ts_ind['D'][:, -1, :, :], axis=0).reshape((10, 1)), 0)\n Median_deaths = np.round(np.percentile(np.sum(all_states_ts_ind['D'][:, -1, :, :], axis=(1, 2)), 50))\n CI5_deaths = np.round(np.percentile(np.sum(all_states_ts_ind['D'][:, -1, :, :], axis=(1, 2)), lb_band))\n CI95_deaths = np.round(np.percentile(np.sum(all_states_ts_ind['D'][:, -1, :, :], axis=(1, 2)), ub_band))\n print('Deaths End Horizon')\n print(f'Point forecast {all_states_ts[\"D\"][0][-1]}')\n print(f'Mean {avg_deaths_by_group.sum()} Median:{Median_deaths} CI_5_95:[{CI5_deaths}-{CI95_deaths}]')\n print('Fraction by Age and Risk Group (1-5, L-H)')\n print(100 * avg_deaths_by_group.reshape(5, 2) / avg_deaths_by_group.sum())\n R_mean = np.mean(all_states_ts['R'][:, -1] / population)\n print(f'R End Horizon {R_mean}')\n # Policy\n lockdown_threshold = policy.lockdown_thresholds[0]\n # fdmi = policy_params['first_day_month_index']\n # policy = {(m, y): lockdown_threshold[fdmi[m, y]] for (m, y) in fdmi if fdmi[m, y] < T}\n # print('Lockdown Threshold:')\n # print(policy)\n hide = 1\n l_style = l_styles['sim']\n for v in plot_left_axis:\n max_y_lim_1 = np.maximum(max_y_lim_1, np.max(max_st[v]))\n label_v = compartment_names[v]\n if v != 'IYIHa':\n v_a = ax1.plot(mean_st[v].T * bed_scale, c=colors[v], linestyle=l_style, linewidth=2, label=label_v, alpha=1 * hide, zorder = 50)\n plotted_lines.append(v_a[0])\n v_aa = ax1.plot(all_st[v].T * bed_scale, c=light_colors[v], linestyle=l_style, linewidth=1, label=label_v, alpha=0.8 * hide)\n plotted_lines.append(v_aa[0])\n #if central_path != 0:\n # ax1.fill_between(range(len(max_st[v])),\n # max_st[v],\n # min_st[v],\n # color=colors[v],\n # linestyle=l_style,\n # facecolor=\"none\",\n # linewidth=0.0,\n # alpha=0.5 * hide)\n if v == 'IH' or v == 'ICU' or v == 'IHT' or v == 'ITot':\n real_h_plot = ax1.scatter(range(len(real_hosp_or_icu)), real_hosp_or_icu, color='maroon', label='Actual hospitalizations',zorder=100,s=15)\n max_y_lim_1 = np.maximum(roundup(np.max(hosp_beds_list), 100), max_y_lim_1)\n try:\n if v == 'IH' or v == 'IHT':\n ax1.plot(profiles[cap_path_id]['capacity'][:T], color='k', linestyle='-', linewidth=3)\n else:\n for hosp_beds_lines in hosp_beds_list:\n ax1.hlines(hosp_beds_lines, 0, T, color='k', linestyle='-', linewidth=3)\n except:\n for hosp_beds_lines in hosp_beds_list:\n ax1.hlines(hosp_beds_lines, 0, T, color='k', linestyle='-', linewidth=3)\n xpos = 30 #440 #200 # 440\n if plot_trigger_annotations:\n ax1.annotate('Hospital capacity', (xpos, hosp_beds + 150),\n xycoords='data',\n color=colors[v],\n annotation_clip=True,\n fontsize=text_size + 2) #\n if plot_triggers:\n ax1.hlines(policy_params['hosp_beds'] * 0.6, 0, T, 'b', '-', linewidth=3)\n for tier_ix, tier in enumerate(policy.tiers):\n ax1.plot([policy.lockdown_thresholds[tier_ix][0]]*T, color=tier['color'], linewidth=5)\n xpos = np.minimum(405, int(T * 0.65)) #180 #405\n xytext = (xpos, lockdown_threshold[xpos] - 20)\n\n if plot_trigger_annotations:\n ax1.annotate('Safety threshold', (xpos, policy_params['hosp_level_release'] - 250),\n xycoords='data',\n color='b',\n annotation_clip=True,\n fontsize=text_size + 2)\n if v == 'ToIHT' or v == 'ToICU':\n if v == 'ToIHT':\n if real_new_admission is not None:\n real_h_plot = ax1.scatter(range(len(real_new_admission)), real_new_admission, color='maroon', label='New hospital admission',zorder=100,s=15)\n if plot_triggers and vertical_fill:\n #if central_path > 0:\n # IYIH_mov_ave = []\n # for t in range(T):\n # IYIH_mov_ave.append(np.mean(mean_st[v][np.maximum(0, t - 7):t]))\n # v_avg = ax1.plot(IYIH_mov_ave, c='black', linestyle=l_style, label=f'Moving Avg. {label_v}')\n # plotted_lines.append(v_avg[0])\n for tier_ix, tier in enumerate(policy.tiers):\n ax1.plot([policy.lockdown_thresholds[tier_ix][0]]*T, color=tier['color'], linewidth=5)\n xpos = np.minimum(405, int(T * 0.65)) #180 #405\n xytext = (xpos, lockdown_threshold[xpos] - 20)\n if plot_trigger_annotations:\n ax1.annotate('Lock-down threshold',\n xy=(120, lockdown_threshold[120]),\n xytext=xytext,\n xycoords='data',\n textcoords='data',\n color='b',\n annotation_clip=True,\n fontsize=text_size + 2)\n if \"plot_ACS_triggers\" in kwargs.keys():\n if kwargs[\"plot_ACS_triggers\"]:\n ax1.plot([policy.acs_thrs]*T, color='k', linewidth=5)\n for v in plot_right_axis:\n max_y_lim_2 = np.maximum(max_y_lim_2, np.max(max_st[v]))\n label_v = compartment_names[v]\n v_a = ax2.plot(mean_st[v].T, c=colors[v], linestyle=l_style, label=label_v)\n plotted_lines.append(v_a[0])\n ax2.fill_between(range(T), min_st[v], max_st[v], color=colors[v], linestyle=l_style, alpha=0.5)\n if v == 'IH':\n max_y_lim_2 = np.maximum(roundup(hosp_beds, 100), max_y_lim_2)\n ax2.hlines(hosp_beds, 0, T, color='r', linestyle='--', label='N. of beds')\n if plot_triggers:\n ax2.hlines(policy_params['hosp_level_release'], 0, T, 'b', '--')\n ax2.annotate('Trigger - Current hospitalizations ',\n (0.05, 0.78 * policy_params['hosp_level_release'] / max_y_lim_1),\n xycoords='axes fraction',\n color='b',\n annotation_clip=True)\n if v == 'ToIHT':\n if plot_triggers:\n ax2.plot(lockdown_threshold[:T], 'b-')\n xytext = (160, lockdown_threshold[160] - 15)\n ax2.annotate('Trigger - Avg. Daily Hospitalization',\n xy=(120, lockdown_threshold[120]),\n xytext=xytext,\n xycoords='data',\n textcoords='data',\n color='b',\n annotation_clip=True)\n # ax2.annotate(' ',\n # xy=(85, lockdown_threshold[85]),\n # xytext=xytext,\n # xycoords='data',\n # textcoords='data',\n # arrowprops={'arrowstyle': '-|>'},\n # color='b',\n # annotation_clip=True)\n \n # Plotting the policy\n # Plot school closure and cocooning\n tiers = policy.tiers\n z_ts = profiles[central_path]['z'][:T]\n tier_h = profiles[central_path]['tier_history'][:T]\n print('seed was', profiles[central_path]['seed'])\n sc_co = [interventions[k].school_closure for k in z_ts]\n unique_policies = set(sc_co)\n sd_lvl = [interventions[k].social_distance for k in z_ts]\n sd_levels = [tier['transmission_reduction'] for tier in tiers] + [0, 0.95] + sd_lvl\n unique_sd_policies = list(set(sd_levels))\n unique_sd_policies.sort()\n intervals = {u: [False for t in range(len(z_ts) + 1)] for u in unique_policies}\n intervals_sd = {u: [False for t in range(len(z_ts) + 1)] for u in unique_sd_policies}\n for t in range(len(z_ts)):\n sc_co_t = interventions[z_ts[t]].school_closure\n for u in unique_policies:\n if u == sc_co_t:\n intervals[u][t] = True\n intervals[u][t + 1] = True\n for u_sd in unique_sd_policies:\n if u_sd == interventions[z_ts[t]].social_distance:\n intervals_sd[u_sd][t] = True\n intervals_sd[u_sd][t + 1] = True\n \n interval_color = {0: 'orange', 1: 'purple', 0.5: 'green'}\n interval_labels = {0: 'Schools Open', 1: 'Schools Closed', 0.5: 'Schools P. Open'}\n interval_alpha = {0: 0.3, 1: 0.3, 0.5: 0.3}\n for u in unique_policies:\n u_color = interval_color[u]\n u_label = interval_labels[u]\n \n actions_ax.fill_between(\n range(len(z_ts) + 1),\n 0,\n 1,\n where=intervals[u],\n color='white', #u_color,\n alpha=0, #interval_alpha[u],\n label=u_label,\n linewidth=0,\n hatch = '/',\n step='pre')\n # for kv in interval_labels:\n # kv_label = interval_labels[kv]\n # kv_color = interval_color[kv]\n # kv_alpha = interval_alpha[kv]\n # actions_ax.fill_between(range(len(z_ts) + 1),\n # 0,\n # 0.0001,\n # color=kv_color,\n # alpha=kv_alpha,\n # label=kv_label,\n # linewidth=0,\n # step='pre')\n \n sd_labels = {\n 0: '',\n 0.95: 'Initial lock-down',\n }\n sd_labels.update({tier['transmission_reduction']: tier['name'] for tier in tiers})\n tier_by_tr = {tier['transmission_reduction']: tier for tier in tiers}\n tier_by_tr[0.746873309820472] = {\n \"name\": 'Ini Lockdown',\n \"transmission_reduction\": 0.95,\n \"cocooning\": 0.95,\n \"school_closure\": 1,\n \"min_enforcing_time\": 0,\n \"daily_cost\": 0,\n \"color\": 'darkgrey'\n }\n \n if \"add_tiers\" in kwargs.keys():\n for add_t in add_tiers.keys():\n tier_by_tr[add_t] = {\"color\": add_tiers[add_t],\n \"name\": \"added stage\"}\n \n if align_axes:\n max_y_lim_1 = np.maximum(max_y_lim_1, max_y_lim_2)\n max_y_lim_2 = max_y_lim_1\n if y_lim is not None:\n max_y_lim_1 = y_lim\n else:\n max_y_lim_1 = roundup(max_y_lim_1, 100 if 'ToIHT' in plot_left_axis else 1000)\n \n if vertical_fill:\n for u in unique_sd_policies:\n try:\n if u in tier_by_tr.keys():\n u_color = tier_by_tr[u]['color']\n u_label = f'{tier_by_tr[u][\"name\"]}' if u > 0 else \"\"\n else:\n u_color,u_label = colorDecide(u,tier_by_tr)\n u_alpha1 = 0.6\n u_alpha2 = 0.6\n fill_1 = intervals_sd[u].copy()\n fill_2 = intervals_sd[u].copy()\n for i in range(len(intervals_sd[u])):\n if 'history_white' in kwargs.keys() and kwargs['history_white']:\n if i <= t_start:\n fill_2[i] = False\n fill_1[i] = False\n else:\n if i <= t_start:\n fill_2[i] = False\n else:\n fill_1[i] = False\n \n policy_ax.fill_between(range(len(z_ts) + 1),\n 0,\n 1,\n where=fill_1,\n color=u_color,\n alpha=u_alpha1,\n label=u_label,\n linewidth=0.0,\n step='pre')\n policy_ax.fill_between(range(len(z_ts) + 1),\n 0,\n 1,\n where=fill_2,\n color=u_color,\n alpha=u_alpha2,\n label=u_label,\n linewidth=0.0,\n step='pre')\n except Exception:\n print(f'WARNING: TR value {u} was not plotted')\n else:\n # fill the horizontal policy color\n for ti in range(len(tiers)):\n u = tiers[ti]['transmission_reduction']\n if u in tier_by_tr.keys():\n u_color = tier_by_tr[u]['color']\n u_label = f'{tier_by_tr[u][\"name\"]}' if u > 0 else \"\"\n else:\n u_color,u_label = colorDecide(u,tier_by_tr)\n u_alpha = 0.6\n u_lb = policy.lockdown_thresholds[ti][0]\n u_ub = policy.lockdown_thresholds_ub[ti][0]\n if u_ub == np.inf:\n u_ub = max_y_lim_1\n \n if u_lb >= 0 and u_ub >= 0:\n policy_ax.fill_between(range(len(z_ts) + 1),\n u_lb/max_y_lim_1,\n u_ub/max_y_lim_1,\n color=u_color,\n alpha=u_alpha,\n label=u_label,\n linewidth=0.0,\n step='pre')\n\n if \"acs_fill\" in kwargs.keys():\n # fill the ACS plot\n policy_ax.fill_between(range(len(z_ts) + 1),\n 0,\n 1,\n color='white',\n linewidth=0.0,\n step='pre')\n policy_ax.fill_between(range(len(z_ts) + 1),\n 0,\n hosp_beds/max_y_lim_1,\n color='lightgreen',\n alpha=0.6,\n linewidth=0.0,\n step='pre')\n fill_acs = [False]*(len(z_ts) + 1)\n acs_rec = -1\n acs_date = []\n for tind in range(T):\n if profiles[cap_path_id]['capacity'][tind] > hosp_beds:\n acs_date.append(tind)\n fill_acs[tind] = True\n acs_rec = profiles[cap_path_id]['capacity'][tind]\n ax1.plot(acs_date,[hosp_beds]*len(acs_date),color='gray', linestyle='-', linewidth=1)\n if acs_rec > 0:\n policy_ax.fill_between(range(len(z_ts) + 1),\n hosp_beds/max_y_lim_1,\n acs_rec/max_y_lim_1,\n where = fill_acs,\n color='forestgreen',\n alpha=0.6,\n linewidth=0.0,\n step='pre')\n \n # # Plot again for consolidated legend\n # for u in sd_alphas:\n # u_label = sd_labels[u]\n # policy_ax.fill_between(range(len(z_ts) + 1),\n # 0,\n # 0.0001,\n # color=u_color,\n # alpha=sd_alphas[u],\n # label=u_label,\n # linewidth=0,\n # step='pre')\n # Plot social distance\n social_distance = [interventions[k].social_distance for k in z_ts]\n #policy_ax.plot(social_distance, c='k', alpha=0.6 * hide) # marker='_', linestyle='None',\n hsd = np.sum(np.array(social_distance[:T]) >= 0.78)\n print(f'HIGH SOCIAL DISTANCE')\n print(f'Point Forecast: {hsd}')\n hsd_list = np.array(\n [np.sum(np.array([interventions[k].social_distance for k in z_ts]) >= 0.78) for z_ts in states_ts['z']])\n count_lockdowns = defaultdict(int)\n for z_ts in states_ts['z']:\n n_lockdowns = 0\n for ix_k in range(1, len(z_ts)):\n if interventions[z_ts[ix_k]].social_distance - interventions[z_ts[ix_k - 1]].social_distance > 0:\n n_lockdowns += 1\n count_lockdowns[n_lockdowns] += 1\n print(\n f'Mean: {np.mean(hsd_list):.2f} Median: {np.percentile(hsd_list,q=50)} - SD CI_5_95: {np.percentile(hsd_list,q=5)}-{np.percentile(hsd_list,q=95)}'\n )\n for nlock in count_lockdowns:\n print(f'Prob of having exactly {nlock} lockdowns: {count_lockdowns[nlock]/len(states_ts[\"z\"]):4f}')\n unique_social_distance = np.unique(social_distance)\n # for usd in unique_social_distance:\n # if usd > 0:\n # offset = {0.1: -0.03, 0.2: -0.03, 0.4: -0.03, 0.6: -0.03, 0.8: -0.03, 0.9: 0.02}[usd]\n # policy_ax.annotate(f'{int(usd*100)}% social distance', (0.07, usd + offset),\n # xycoords='axes fraction',\n # color='k',\n # annotation_clip=True) #\n \n # START PLOT STYLING\n # Axis limits\n ax1.set_ylim(0, max_y_lim_1)\n if ax2 is not None:\n ax2.set_ylim(0, roundup(max_y_lim_2, 1000))\n policy_ax.set_ylim(0, 1)\n \n # plot a vertical line for the t_start\n plt.vlines(t_start, 0, max_y_lim_1, colors='k',linewidth = 3)\n \n # Axis format and names\n ax1.set_ylabel(\" / \".join((compartment_names[v] for v in plot_left_axis)), fontsize=text_size)\n if ax2 is not None:\n ax2.set_ylabel(compartment_names[plot_right_axis[0]])\n \n # Axis ticks\n ax1.xaxis.set_ticks([t for t, d in enumerate(cal.calendar) if (d.day == 1 and t < T)])\n ax1.xaxis.set_ticklabels(\n [f' {py_cal.month_abbr[d.month]} ' for t, d in enumerate(cal.calendar) if (d.day == 1 and t < T)],\n rotation=0,\n fontsize=22)\n for tick in ax1.xaxis.get_major_ticks():\n #tick.tick1line.set_markersize(0)\n #tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('left')\n ax1.tick_params(axis='y', labelsize=text_size, length=5, width=2)\n ax1.tick_params(axis='x', length=5, width=2)\n \n # Policy axis span 0 - 1\n #policy_ax.yaxis.set_ticks(np.arange(0, 1.001, 0.1))\n policy_ax.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n right=False, # ticks along the top edge are off\n labelbottom=False,\n labelright=False) # labels along the bottom edge are off\n \n actions_ax.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n left=False, # ticks along the top edge are off\n labelbottom=False,\n labelleft=False) # labels along the bottom edge are off\n actions_ax.spines['top'].set_visible(False)\n actions_ax.spines['bottom'].set_visible(False)\n actions_ax.spines['left'].set_visible(False)\n actions_ax.spines['right'].set_visible(False)\n\n # if 321 <= T:\n # # line to separate years\n # actions_ax.axvline(321, 0, 1, color='k', alpha=0.3)\n if 140 <= T:\n actions_ax.annotate('2020',\n xy=(140, 0),\n xycoords='data',\n color='k',\n annotation_clip=True,\n fontsize=text_size - 2)\n if 425 <= T:\n actions_ax.annotate('2021',\n xy=(425, 0),\n xycoords='data',\n color='k',\n annotation_clip=True,\n fontsize=text_size - 2)\n \n # Order of layers\n ax1.set_zorder(policy_ax.get_zorder() + 10) # put ax in front of policy_ax\n ax1.patch.set_visible(False) # hide the 'canvas'\n if ax2 is not None:\n ax2.set_zorder(policy_ax.get_zorder() + 5) # put ax in front of policy_ax\n ax2.patch.set_visible(False) # hide the 'canvas'\n \n # Plot margins\n ax1.margins(0)\n actions_ax.margins(0)\n if ax2 is not None:\n ax2.margins(0)\n policy_ax.margins(0.)\n \n # Plot Grid\n #ax1.grid(True, which='both', color='grey', alpha=0.1, linewidth=0.5, zorder=0)\n \n # fig.delaxes(ax1[1, 2])\n if plot_legend:\n handles_ax1, labels_ax1 = ax1.get_legend_handles_labels()\n handles_ax2, labels_ax2 = ax2.get_legend_handles_labels() if ax2 is not None else ([], [])\n handles_action_ax, labels_action_ax = actions_ax.get_legend_handles_labels()\n handles_policy_ax, labels_policy_ax = policy_ax.get_legend_handles_labels()\n plotted_labels = [pl.get_label() for pl in plotted_lines]\n if 'ToIHT' in plot_left_axis or True:\n fig_legend = ax1.legend(\n plotted_lines + handles_policy_ax + handles_action_ax,\n plotted_labels + labels_policy_ax + labels_action_ax,\n loc='upper right',\n fontsize=text_size + 2,\n #bbox_to_anchor=(0.90, 0.9),\n prop={'size': text_size},\n framealpha=1)\n elif 'IH' in plot_left_axis:\n fig_legend = ax1.legend(\n handles_ax1,\n labels_ax1,\n loc='upper right',\n fontsize=text_size + 2,\n #bbox_to_anchor=(0.90, 0.9),\n prop={'size': text_size},\n framealpha=1)\n fig_legend.set_zorder(4)\n \n plt.tight_layout()\n plt.subplots_adjust(hspace=0)\n plots_left_right = plot_left_axis + plot_right_axis\n plot_filename = plots_path / f'scratch_{instance_name}_{\"\".join(plots_left_right)}.pdf'\n plt.savefig(plot_filename)\n if show:\n plt.show()\n plt.close()\n return plot_filename\n\ndef stack_plot(instance_name,\n instance,\n policy,\n profiles,\n profile_labels,\n real_hosp,\n plot_left_axis=['IH'],\n plot_right_axis=[],\n scale_plot=False,\n align_axes=True,\n show=True,\n plot_triggers=False,\n plot_trigger_annotations=False,\n plot_legend=False,\n y_lim=None,\n n_replicas=300,\n config=None,\n hosp_beds_list=None,\n real_new_admission=None,\n real_hosp_or_icu=None,\n bed_scale=1,\n is_representative_path=False,\n t_start = -1,\n central_path_id=0,\n cap_path_id=0,\n **kwargs):\n '''\n Plots a list of profiles in the same figure. Each profile corresponds\n to a stochastic replica for the given instance.\n\n Args:\n profiles (list of dict): a list of dictionaries that contain epi vars profiles\n profile_labels (list of str): name of each profile\n plot_only (list of str): list of variable names to be plot\n '''\n plt.rcParams[\"font.size\"] = \"18\"\n T = kwargs['T']\n if \"add_tiers\" in kwargs.keys():\n add_tiers = kwargs[\"add_tiers\"]\n cal = instance.cal\n population = instance.N.sum()\n interventions = kwargs['interventions']\n policy_params = kwargs['policy_params']\n if hosp_beds_list is None:\n hosp_beds_list = [instance.hosp_beds]\n hosp_beds = hosp_beds_list[0]\n \n lb_band = 5\n ub_band = 95\n \n text_size = 28\n fig, (ax1, actions_ax) = plt.subplots(2, 1, figsize=(17, 9), gridspec_kw={'height_ratios': [10, 1.1]})\n # Main axis\n # ax1.set_xlabel('Time')\n ax2 = None\n # Policy axis\n policy_ax = ax1.twinx()\n #policy_ax.set_ylabel('Social Distance')\n # If there are plot to be on the right axis, move policy_ax\n # Second, show the right spine.\n if len(plot_right_axis) > 0:\n # Create second axis\n ax2 = ax1.twinx()\n # Fix policy axis\n policy_ax.spines[\"right\"].set_position((\"axes\", 1.1))\n make_patch_spines_invisible(policy_ax)\n policy_ax.spines[\"right\"].set_visible(True)\n \n # Start plots\n max_y_lim_1 = population if 'S' in plot_left_axis or 'R' in plot_left_axis else 0\n max_y_lim_2 = population if 'S' in plot_right_axis or 'R' in plot_right_axis else 0\n plotted_lines = []\n \n # Add IHT field\n if 'ICU' in profiles[0].keys():\n for p in profiles:\n p['IHT'] = p['IH'] + p['ICU']\n \n # Transform data of interest\n states_to_plot = plot_left_axis + plot_right_axis\n last_day_hosp_data = len(real_hosp) - 1\n lb_hosp = real_hosp[-1] * (1 - config['div_filter_frac'])\n ub_hosp = real_hosp[-1] * (1 + config['div_filter_frac'])\n states_ts = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in states_to_plot}\n states_ts['z'] = np.vstack(list(p['z'][:T] for p in profiles))\n states_ts['tier_history'] = np.vstack(list(p['tier_history'][:T] for p in profiles))\n \n if states_to_plot[0] == 'IH':\n states_to_plot_temp = ['ToIHT']\n states_ts_temp = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in states_to_plot_temp}\n else:\n states_to_plot_temp = ['IH']\n states_ts_temp = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in states_to_plot_temp}\n \n \n central_path = 0\n representative_path_id = 0\n print(\"Printed seed is: \", profiles[0][\"seed\"])\n\n if is_representative_path == False:\n central_path = central_path_id\n mean_st = {v: states_ts[v][central_path] if v not in ['z', 'tier_history'] else states_ts[v] for v in states_ts}\n else:\n representative_path_id = find_central_path(instance.city, states_to_plot_temp, states_ts_temp, real_hosp, real_hosp_or_icu, real_new_admission)\n mean_st = {v: states_ts[v][representative_path_id] if v not in ['z', 'tier_history'] else states_ts[v] for v in states_ts}\n central_path = representative_path_id\n cap_path_id = representative_path_id\n\n all_st = {v: states_ts[v][:] if v not in ['z', 'tier_history'] else states_ts[v] for v in states_ts}\n min_st = {\n v: np.percentile(states_ts[v], q=lb_band, axis=0) if v not in ['z', 'tier_history'] else states_ts[v]\n for v in states_ts\n }\n max_st = {\n v: np.percentile(states_ts[v], q=ub_band, axis=0) if v not in ['z', 'tier_history'] else states_ts[v]\n for v in states_ts\n }\n # People that arrive above capacity\n # np.mean(np.sum(states_ts['IYIH']*(states_ts['IH']>=3239) , 1))\n new_profiles = [mean_st, min_st, max_st]\n \n # Stats\n all_states = ['S', 'E', 'IH', 'IA', 'IY', 'R', 'D']\n if 'ICU' in profiles[0].keys():\n all_states.append('ICU')\n all_states.append('IHT')\n all_states.append('ToICU')\n all_states_ts = {v: np.vstack(list(np.sum(p[v], axis=(1, 2))[:T] for p in profiles)) for v in all_states}\n #assert len(all_states_ts['IH']) >= n_replicas\n for v in all_states_ts:\n all_states_ts[v] = all_states_ts[v][:n_replicas]\n #assert len(all_states_ts['IH']) == n_replicas\n \n\n \n # Hospitalizations Report\n # Probabilities of reaching x% of the capacity\n prob50 = np.sum(np.any(all_states_ts['IH'] >= 0.5 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob60 = np.sum(np.any(all_states_ts['IH'] >= 0.6 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob70 = np.sum(np.any(all_states_ts['IH'] >= 0.7 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob80 = np.sum(np.any(all_states_ts['IH'] >= 0.8 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob90 = np.sum(np.any(all_states_ts['IH'] >= 0.9 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob100 = np.sum(np.any(all_states_ts['IH'] >= 1 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n prob110 = np.sum(np.any(all_states_ts['IH'] >= 1.1 * hosp_beds, axis=1)) / len(all_states_ts['IH'])\n n_replicas_used = len(all_states_ts['IH'])\n print(f\"{'P 50':10s}{'P 60':10s}{'P 70':10s}{'P 80':10s}{'P 90':10s}{'P 100':10s}{'P 110':10s}{'Scenarios':10s}\")\n print(\n f\"{prob50:<10.4f}{prob60:<10.4f}{prob70:<10.4f}{prob80:<10.4f}{prob90:<10.4f}{prob100:<10.4f}{prob110:<10.4f}{n_replicas_used}\"\n )\n # Min, Med, Max at the peak\n print('Hospitalization Peaks')\n peak_days = np.argmax(all_states_ts['IH'], axis=1)\n peak_vals = np.take_along_axis(all_states_ts['IH'], peak_days[:, None], axis=1)\n print(f'{\"Percentile (%)\":<15s} {\"Peak IH\":<15s} {\"Date\":15}')\n for q in [0, 5, 10, 50, 90, 100]:\n peak_day_percentile = int(np.percentile(peak_days, q))\n peak_percentile = np.percentile(peak_vals, q)\n print(f'{q:<15} {peak_percentile:<15.0f} {str(cal.calendar[peak_day_percentile])}')\n \n # Deaths\n all_states_ts_ind = {\n v: np.array(list(p[v][:T, :, :] for p in profiles)) for v in all_states\n }\n \n #assert len(all_states_ts_ind['IH']) >= n_replicas\n for v in all_states_ts:\n all_states_ts_ind[v] = all_states_ts_ind[v][:n_replicas]\n #assert len(all_states_ts_ind['IH']) == n_replicas\n # Deaths data\n avg_deaths_by_group = np.round(np.mean(all_states_ts_ind['D'][:, -1, :, :], axis=0).reshape((10, 1)), 0)\n Median_deaths = np.round(np.percentile(np.sum(all_states_ts_ind['D'][:, -1, :, :], axis=(1, 2)), 50))\n CI5_deaths = np.round(np.percentile(np.sum(all_states_ts_ind['D'][:, -1, :, :], axis=(1, 2)), lb_band))\n CI95_deaths = np.round(np.percentile(np.sum(all_states_ts_ind['D'][:, -1, :, :], axis=(1, 2)), ub_band))\n print('Deaths End Horizon')\n print(f'Point forecast {all_states_ts[\"D\"][0][-1]}')\n print(f'Mean {avg_deaths_by_group.sum()} Median:{Median_deaths} CI_5_95:[{CI5_deaths}-{CI95_deaths}]')\n print('Fraction by Age and Risk Group (1-5, L-H)')\n print(100 * avg_deaths_by_group.reshape(5, 2) / avg_deaths_by_group.sum())\n R_mean = np.mean(all_states_ts['R'][:, -1] / population)\n print(f'R End Horizon {R_mean}')\n # Policy\n lockdown_threshold = policy.lockdown_thresholds[0]\n # fdmi = policy_params['first_day_month_index']\n # policy = {(m, y): lockdown_threshold[fdmi[m, y]] for (m, y) in fdmi if fdmi[m, y] < T}\n # print('Lockdown Threshold:')\n # print(policy)\n central_path = central_path_id\n hide = 1\n l_style = l_styles['sim']\n for v in plot_left_axis:\n max_y_lim_1 = np.maximum(max_y_lim_1, np.max(max_st[v]))\n label_v = compartment_names[v]\n if v != 'IYIHa':\n v_a = ax1.plot(mean_st[v].T * bed_scale, c=colors[v], linestyle=l_style, linewidth=2, label=label_v, alpha=1 * hide, zorder = 50)\n plotted_lines.append(v_a[0])\n v_aa = ax1.plot(all_st[v].T * bed_scale, c=light_colors[v], linestyle=l_style, linewidth=1, label=label_v, alpha=0.8 * hide)\n plotted_lines.append(v_aa[0])\n #if central_path != 0:\n # ax1.fill_between(range(len(max_st[v])),\n # max_st[v],\n # min_st[v],\n # color=colors[v],\n # linestyle=l_style,\n # facecolor=\"none\",\n # linewidth=0.0,\n # alpha=0.5 * hide)\n if v == 'IH' or v == 'ICU' or v == 'IHT':\n real_h_plot = ax1.scatter(range(len(real_hosp_or_icu)), real_hosp_or_icu, color='maroon', label='Actual hospitalizations',zorder=100,s=15)\n max_y_lim_1 = np.maximum(roundup(np.max(hosp_beds_list), 100), max_y_lim_1)\n try:\n if v == 'IH' or v == 'IHT':\n ax1.plot(profiles[0]['capacity'][:T], color='k', linestyle='-', linewidth=3)\n else:\n for hosp_beds_lines in hosp_beds_list:\n ax1.hlines(hosp_beds_lines, 0, T, color='k', linestyle='-', linewidth=3)\n except:\n for hosp_beds_lines in hosp_beds_list:\n ax1.hlines(hosp_beds_lines, 0, T, color='k', linestyle='-', linewidth=3)\n xpos = 30 #440 #200 # 440\n if plot_trigger_annotations:\n ax1.annotate('Hospital capacity', (xpos, hosp_beds + 150),\n xycoords='data',\n color=colors[v],\n annotation_clip=True,\n fontsize=text_size + 2) #\n if plot_triggers:\n ax1.hlines(policy_params['hosp_beds'] * 0.6, 0, T, 'b', '-', linewidth=3)\n if plot_trigger_annotations:\n ax1.annotate('Safety threshold', (xpos, policy_params['hosp_level_release'] - 250),\n xycoords='data',\n color='b',\n annotation_clip=True,\n fontsize=text_size + 2)\n if v == 'ToIHT' or v == 'ToICU':\n if v == 'ToIHT':\n if real_new_admission is not None:\n real_h_plot = ax1.scatter(range(len(real_new_admission)), real_new_admission, color='maroon', label='New hospital admission',zorder=100,s=15)\n if plot_triggers:\n #if central_path > 0:\n # IYIH_mov_ave = []\n # for t in range(T):\n # IYIH_mov_ave.append(np.mean(mean_st[v][np.maximum(0, t - 7):t]))\n # v_avg = ax1.plot(IYIH_mov_ave, c='black', linestyle=l_style, label=f'Moving Avg. {label_v}')\n # plotted_lines.append(v_avg[0])\n for tier_ix, tier in enumerate(policy.tiers):\n ax1.plot([policy.lockdown_thresholds[tier_ix][0]]*T, color=tier['color'], linewidth=5)\n xpos = np.minimum(405, int(T * 0.65)) #180 #405\n xytext = (xpos, lockdown_threshold[xpos] - 20)\n if plot_trigger_annotations:\n ax1.annotate('Lock-down threshold',\n xy=(120, lockdown_threshold[120]),\n xytext=xytext,\n xycoords='data',\n textcoords='data',\n color='b',\n annotation_clip=True,\n fontsize=text_size + 2)\n if \"plot_ACS_triggers\" in kwargs.keys():\n if kwargs[\"plot_ACS_triggers\"]:\n ax1.plot([policy.acs_thrs]*T, color='k', linewidth=5)\n for v in plot_right_axis:\n max_y_lim_2 = np.maximum(max_y_lim_2, np.max(max_st[v]))\n label_v = compartment_names[v]\n v_a = ax2.plot(mean_st[v].T, c=colors[v], linestyle=l_style, label=label_v)\n plotted_lines.append(v_a[0])\n ax2.fill_between(range(T), min_st[v], max_st[v], color=colors[v], linestyle=l_style, alpha=0.5)\n if v == 'IH':\n max_y_lim_2 = np.maximum(roundup(hosp_beds, 100), max_y_lim_2)\n ax2.hlines(hosp_beds, 0, T, color='r', linestyle='--', label='N. of beds')\n if plot_triggers:\n ax2.hlines(policy_params['hosp_level_release'], 0, T, 'b', '--')\n ax2.annotate('Trigger - Current hospitalizations ',\n (0.05, 0.78 * policy_params['hosp_level_release'] / max_y_lim_1),\n xycoords='axes fraction',\n color='b',\n annotation_clip=True)\n if v == 'ToIHT':\n if plot_triggers:\n ax2.plot(lockdown_threshold[:T], 'b-')\n xytext = (160, lockdown_threshold[160] - 15)\n ax2.annotate('Trigger - Avg. Daily Hospitalization',\n xy=(120, lockdown_threshold[120]),\n xytext=xytext,\n xycoords='data',\n textcoords='data',\n color='b',\n annotation_clip=True)\n # ax2.annotate(' ',\n # xy=(85, lockdown_threshold[85]),\n # xytext=xytext,\n # xycoords='data',\n # textcoords='data',\n # arrowprops={'arrowstyle': '-|>'},\n # color='b',\n # annotation_clip=True)\n \n # Plotting the policy\n # Plot school closure and cocooning\n tiers = policy.tiers\n z_ts = profiles[central_path]['z'][:T]\n tier_h = profiles[central_path]['tier_history'][:T]\n print('seed was', profiles[central_path]['seed'])\n sc_co = [interventions[k].school_closure for k in z_ts]\n unique_policies = set(sc_co)\n sd_lvl = [interventions[k].social_distance for k in z_ts]\n sd_levels = [tier['transmission_reduction'] for tier in tiers] + [0, 0.95] + sd_lvl\n unique_sd_policies = list(set(sd_levels))\n unique_sd_policies.sort()\n intervals = {u: [False for t in range(len(z_ts) + 1)] for u in unique_policies}\n intervals_sd = {u: [False for t in range(len(z_ts) + 1)] for u in unique_sd_policies}\n for t in range(len(z_ts)):\n sc_co_t = interventions[z_ts[t]].school_closure\n for u in unique_policies:\n if u == sc_co_t:\n intervals[u][t] = True\n intervals[u][t + 1] = True\n for u_sd in unique_sd_policies:\n if u_sd == interventions[z_ts[t]].social_distance:\n intervals_sd[u_sd][t] = True\n intervals_sd[u_sd][t + 1] = True\n \n interval_color = {0: 'orange', 1: 'purple', 0.5: 'green'}\n interval_labels = {0: 'Schools Open', 1: 'Schools Closed', 0.5: 'Schools P. Open'}\n interval_alpha = {0: 0.3, 1: 0.3, 0.5: 0.3}\n for u in unique_policies:\n u_color = interval_color[u]\n u_label = interval_labels[u]\n \n actions_ax.fill_between(\n range(len(z_ts) + 1),\n 0,\n 1,\n where=intervals[u],\n color='white', #u_color,\n alpha=0, #interval_alpha[u],\n label=u_label,\n linewidth=0,\n hatch = '/',\n step='pre')\n \n\n # for kv in interval_labels:\n # kv_label = interval_labels[kv]\n # kv_color = interval_color[kv]\n # kv_alpha = interval_alpha[kv]\n # actions_ax.fill_between(range(len(z_ts) + 1),\n # 0,\n # 0.0001,\n # color=kv_color,\n # alpha=kv_alpha,\n # label=kv_label,\n # linewidth=0,\n # step='pre')\n \n sd_labels = {\n 0: '',\n 0.95: 'Initial lock-down',\n }\n sd_labels.update({tier['transmission_reduction']: tier['name'] for tier in tiers})\n tier_by_tr = {tier['transmission_reduction']: tier for tier in tiers}\n tier_by_tr[0.746873309820472] = {\n \"name\": 'Ini Lockdown',\n \"transmission_reduction\": 0.95,\n \"cocooning\": 0.95,\n \"school_closure\": 1,\n \"min_enforcing_time\": 0,\n \"daily_cost\": 0,\n \"color\": 'darkgrey'\n }\n \n if \"add_tiers\" in kwargs.keys():\n for add_t in add_tiers.keys():\n tier_by_tr[add_t] = {\"color\": add_tiers[add_t],\n \"name\": \"added stage\"}\n for u in unique_sd_policies:\n try:\n if u in tier_by_tr.keys():\n u_color = tier_by_tr[u]['color']\n u_label = f'{tier_by_tr[u][\"name\"]}' if u > 0 else \"\"\n else:\n u_color,u_label = colorDecide(u,tier_by_tr)\n u_alpha1 = 0.6\n fill_1 = intervals_sd[u].copy()\n fill_2 = intervals_sd[u].copy()\n for i in range(len(intervals_sd[u])):\n if 'history_white' in kwargs.keys() and kwargs['history_white']:\n if i <= t_start:\n fill_2[i] = False\n fill_1[i] = False\n else:\n if i <= t_start:\n fill_2[i] = False\n else:\n fill_1[i] = False\n \n policy_ax.fill_between(range(len(z_ts) + 1),\n 0,\n 1,\n where=fill_1,\n color=u_color,\n alpha=u_alpha1,\n label=u_label,\n linewidth=0.0,\n step='pre')\n # policy_ax.fill_between(range(len(z_ts) + 1),\n # 0,\n # 1,\n # where=fill_2,\n # color=u_color,\n # alpha=u_alpha,\n # label=u_label,\n # linewidth=0.0,\n # step='pre')\n except Exception:\n print(f'WARNING: TR value {u} was not plotted')\n \n # Plot social distance\n social_distance = [interventions[k].social_distance for k in z_ts]\n #policy_ax.plot(social_distance, c='k', alpha=0.6 * hide) # marker='_', linestyle='None',\n hsd = np.sum(np.array(social_distance[:T]) >= 0.78)\n print(f'HIGH SOCIAL DISTANCE')\n print(f'Point Forecast: {hsd}')\n hsd_list = np.array(\n [np.sum(np.array([interventions[k].social_distance for k in z_ts]) >= 0.78) for z_ts in states_ts['z']])\n count_lockdowns = defaultdict(int)\n for z_ts in states_ts['z']:\n n_lockdowns = 0\n for ix_k in range(1, len(z_ts)):\n if interventions[z_ts[ix_k]].social_distance - interventions[z_ts[ix_k - 1]].social_distance > 0:\n n_lockdowns += 1\n count_lockdowns[n_lockdowns] += 1\n print(\n f'Mean: {np.mean(hsd_list):.2f} Median: {np.percentile(hsd_list,q=50)} - SD CI_5_95: {np.percentile(hsd_list,q=5)}-{np.percentile(hsd_list,q=95)}'\n )\n for nlock in count_lockdowns:\n print(f'Prob of having exactly {nlock} lockdowns: {count_lockdowns[nlock]/len(states_ts[\"z\"]):4f}')\n unique_social_distance = np.unique(social_distance)\n \n # START PLOT STYLING\n # Axis limits\n if align_axes:\n max_y_lim_1 = np.maximum(max_y_lim_1, max_y_lim_2)\n max_y_lim_2 = max_y_lim_1\n if y_lim is not None:\n max_y_lim_1 = y_lim\n else:\n max_y_lim_1 = roundup(max_y_lim_1, 100 if 'ToIHT' in plot_left_axis else 1000)\n ax1.set_ylim(0, max_y_lim_1)\n policy_ax.set_ylim(0, 1)\n\n \n # plot the stacked part of the stage proportion\n ax3 = ax1.twinx()\n ax3.set_ylim(0, max_y_lim_1)\n \n data = states_ts['tier_history'].T\n \n tierColor = {}\n for tierInd in range(len(policy.tiers)):\n tierColor[tierInd] = (np.sum(data[(t_start+1):T,:] == tierInd, axis = 1)/len(data[0]))*max_y_lim_1 \n \n# #r = range(len(tier1))\n r = range((t_start+1), T-1)\n bottomTier = 0\n for tierInd in range(len(policy.tiers)):\n ax3.bar(r, tierColor[tierInd], color = policy.tiers[tierInd]['color'], bottom = bottomTier, label = 'tier{}'.format(tierInd), width = 1, alpha = 0.6, linewidth = 0)\n bottomTier += np.array(tierColor[tierInd])\n # ax3.bar(r, tier2, color = 'blue', bottom = np.array(tier1), label = 'tier2', width = 1, alpha = 0.6, linewidth = 0)\n # ax3.bar(r, tier3, color = 'yellow', bottom = np.array(tier1) + np.array(tier2), label = 'tier3', width = 1, alpha = 0.6, linewidth = 0)\n # ax3.bar(r, tier4, color = 'orange', bottom = np.array(tier1) + np.array(tier2) + np.array(tier3), label = 'tier4', width = 1, alpha = 0.6, linewidth = 0)\n # ax3.bar(r, tier5, color = 'red', bottom = np.array(tier1) + np.array(tier2) + np.array(tier3) + np.array(tier4), label = 'tier5', width = 1, alpha = 0.6, linewidth = 0)\n ax3.set_yticks([])\n \n if ax2 is not None:\n ax2.set_ylim(0, roundup(max_y_lim_2, 1000))\n \n # plot a vertical line for the t_start\n plt.vlines(t_start, 0, max_y_lim_1, colors='k',linewidth = 3)\n \n # Axis format and names\n ax1.set_ylabel(\" / \".join((compartment_names[v] for v in plot_left_axis)), fontsize=text_size)\n if ax2 is not None:\n ax2.set_ylabel(compartment_names[plot_right_axis[0]])\n \n # Axis ticks\n ax1.xaxis.set_ticks([t for t, d in enumerate(cal.calendar) if (d.day == 1 and t < T)])\n ax1.xaxis.set_ticklabels(\n [f' {py_cal.month_abbr[d.month]} ' for t, d in enumerate(cal.calendar) if (d.day == 1 and t < T)],\n rotation=0,\n fontsize=22)\n for tick in ax1.xaxis.get_major_ticks():\n #tick.tick1line.set_markersize(0)\n #tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('left')\n ax1.tick_params(axis='y', labelsize=text_size, length=5, width=2)\n ax1.tick_params(axis='x', length=5, width=2)\n \n # Policy axis span 0 - 1\n #policy_ax.yaxis.set_ticks(np.arange(0, 1.001, 0.1))\n policy_ax.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n right=False, # ticks along the top edge are off\n labelbottom=False,\n labelright=False) # labels along the bottom edge are off\n \n actions_ax.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n left=False, # ticks along the top edge are off\n labelbottom=False,\n labelleft=False) # labels along the bottom edge are off\n actions_ax.spines['top'].set_visible(False)\n actions_ax.spines['bottom'].set_visible(False)\n actions_ax.spines['left'].set_visible(False)\n actions_ax.spines['right'].set_visible(False)\n # if 321 <= T:\n # # line to separate years\n # actions_ax.axvline(321, 0, 1, color='k', alpha=0.3)\n if 140 <= T:\n actions_ax.annotate('2020',\n xy=(140, 0),\n xycoords='data',\n color='k',\n annotation_clip=True,\n fontsize=text_size - 2)\n if 425 <= T:\n actions_ax.annotate('2021',\n xy=(425, 0),\n xycoords='data',\n color='k',\n annotation_clip=True,\n fontsize=text_size - 2)\n \n # Order of layers\n ax1.set_zorder(policy_ax.get_zorder() + 10) # put ax in front of policy_ax\n ax1.patch.set_visible(False) # hide the 'canvas'\n if ax2 is not None:\n ax2.set_zorder(policy_ax.get_zorder() + 5) # put ax in front of policy_ax\n ax2.patch.set_visible(False) # hide the 'canvas'\n \n # Plot margins\n ax1.margins(0)\n actions_ax.margins(0)\n if ax2 is not None:\n ax2.margins(0)\n policy_ax.margins(0.)\n \n # Plot Grid\n #ax1.grid(True, which='both', color='grey', alpha=0.1, linewidth=0.5, zorder=0)\n \n # fig.delaxes(ax1[1, 2])\n if plot_legend:\n handles_ax1, labels_ax1 = ax1.get_legend_handles_labels()\n handles_ax2, labels_ax2 = ax2.get_legend_handles_labels() if ax2 is not None else ([], [])\n handles_action_ax, labels_action_ax = actions_ax.get_legend_handles_labels()\n handles_policy_ax, labels_policy_ax = policy_ax.get_legend_handles_labels()\n plotted_labels = [pl.get_label() for pl in plotted_lines]\n if 'ToIHT' in plot_left_axis or True:\n fig_legend = ax1.legend(\n plotted_lines + handles_policy_ax + handles_action_ax,\n plotted_labels + labels_policy_ax + labels_action_ax,\n loc='upper right',\n fontsize=text_size + 2,\n #bbox_to_anchor=(0.90, 0.9),\n prop={'size': text_size},\n framealpha=1)\n elif 'IH' in plot_left_axis:\n fig_legend = ax1.legend(\n handles_ax1,\n labels_ax1,\n loc='upper right',\n fontsize=text_size + 2,\n #bbox_to_anchor=(0.90, 0.9),\n prop={'size': text_size},\n framealpha=1)\n fig_legend.set_zorder(4)\n \n plt.tight_layout()\n plt.subplots_adjust(hspace=0)\n plots_left_right = plot_left_axis + plot_right_axis\n plot_filename = plots_path / f'scratch_{instance_name}_{\"\".join(plots_left_right)}.pdf'\n plt.savefig(plot_filename)\n if show:\n plt.show()\n plt.close()\n return plot_filename\n\n \ndef plot_pareto(cost_record, typePlt):\n # plot the pareto frontier with the cost record\n # take the mean of cost record\n plot_record_x = []\n plot_record_y = []\n for iKey in cost_record.keys():\n # each item corresponds to a candidate\n item = cost_record[iKey]\n cost_record_ij = np.array(item)\n lockdown_cost = np.mean(cost_record_ij[:,0])\n over_cap_cost = np.mean(cost_record_ij[:,1])\n plot_record_x.append(lockdown_cost)\n plot_record_y.append(over_cap_cost)\n if typePlt == 's':\n # plot the scatter plot\n plt.scatter(plot_record_x,plot_record_y)\n elif typePlt == 'l':\n # calculate the pareto frontier and plot the line plot\n n_points = len(plot_record_x)\n xy = np.zeros([n_points,2])\n xy[:,0] = np.array(plot_record_x)\n xy[:,1] = np.array(plot_record_y)\n xy_unique = np.unique(xy,axis = 0)\n xy_pareto = is_pareto_efficient_dumb(xy_unique)\n # plot the dots on the pareto frontier\n plt.scatter(xy_unique[xy_pareto][:,0],xy_unique[xy_pareto][:,1])\n # plot the line on the pareto frontier\n xy_pareto_sort = xy_unique[xy_pareto][xy_unique[xy_pareto][:,0].argsort()]\n plt.plot(xy_pareto_sort[:,0],xy_pareto_sort[:,1])\n plt.show()\n\ndef is_pareto_efficient_dumb(costs):\n \"\"\"\n Find the pareto-efficient points\n :param costs: An (n_points, n_costs) array\n :return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient\n \"\"\"\n is_efficient = np.ones(costs.shape[0], dtype = bool)\n for i, c in enumerate(costs):\n is_efficient[i] = np.all(np.any(costs[:i]>c, axis=1)) and np.all(np.any(costs[i+1:]>c, axis=1))\n return is_efficient\n\ndef make_patch_spines_invisible(ax):\n ax.set_frame_on(True)\n ax.patch.set_visible(False)\n for sp in ax.spines.values():\n sp.set_visible(False)" ]
[ [ "numpy.quantile", "numpy.mean", "numpy.max", "numpy.empty", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "numpy.argmax", "matplotlib.pyplot.tight_layout", "matplotlib.colors.to_rgb", "matplotlib.pyplot.subplots_adjust", "numpy.square", "numpy.array", "numpy.zeros", "numpy.percentile", "matplotlib.pyplot.close", "numpy.std", "numpy.take_along_axis", "matplotlib.pyplot.show", "numpy.sum", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.vlines", "numpy.any", "numpy.repeat", "matplotlib.pyplot.scatter", "numpy.unique", "numpy.maximum" ] ]
qiaw99/Data-Structure
[ "3b1cdce96d4f35329ccfec29c03de57378ef0552" ]
[ "Computerorietierte_Mathematik/U8/run7_3.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# n ist das minimum zwischen a und b. Dann müssen wir uns zuerst für das minimum entscheiden.\n# kmin = n - 1 + 1 = n\n# kmax = 2(n - 1) + 1 = 2n - 1\ndef ggT_tumb(a, b):\n ggt = 1\n counter = 1\n temp = a if a <= b else b\n for i in range(2, temp + 1):\n counter += 1\n if (a % i) == 0:\n counter += 1\n if (b % i) == 0:\n ggt = i\n return (ggt, counter)\n2*3*5 / 32\n# kmin = 3\n# kmax = n - 1\ndef ggT_tumbpp(a, b):\n temp = a if a <= b else b\n counter = 1\n for i in range(temp, 1, -1):\n counter += 1\n if(a % i == 0):\n counter += 1\n if (b % i == 0):\n return (i, counter)\n return (1, counter)\n\n# kmin = 2\n# kmax = 1 + (n - 1) + 1 = n + 1\ndef ggT_euclid(a, b):\n if a >= b:\n m = a\n n = b\n else:\n m = b\n n = a\n counter = 1\n while(n > 0):\n counter += 1\n r = m % n\n m = n\n n = r\n counter += 1\n return (m, counter)\n\ndef draw_graph_ggT_tumb():\n # {100,...,1000}\n a = np.random.randint(900, size = 1000) + 100\n b = np.random.randint(900, size = 1000) + 100\n\n ls = []\n for i in range(1000):\n counter = ggT_tumb(a[i], b[i])[1]\n ls.append(counter)\n\n gr1 = 0\n gr2 = 0\n gr3 = 0\n gr4 = 0\n gr5 = 0\n gr6 = 0\n gr7 = 0\n gr8 = 0\n gr9 = 0\n gr10 = 0\n maximum = max(ls)\n minimum = min(ls)\n\n for x in ls:\n if x < maximum/10:\n gr1 += 1\n elif x < 2*maximum/10:\n gr2 += 1\n elif x < 3*maximum/10:\n gr3 += 1\n elif x < 4*maximum/10:\n gr4 += 1\n elif x < 5*maximum/10:\n gr5 += 1\n elif x < 6*maximum/10:\n gr6 += 1\n elif x < 7*maximum/10:\n gr7 += 1\n elif x < 8*maximum/10:\n gr8 += 1\n elif x < 9*maximum/10:\n gr9 += 1\n else:\n gr10 += 1\n\n y = [gr1, gr2, gr3, gr4, gr5,gr6, gr7, gr8, gr9, gr10]\n x = ['>' + str(i) for i in range (0, maximum * 2, 200)]\n\n plt.title(\"ggT_tumb; max is \" + str(maximum) + \" min is \" + str(minimum))\n plt.xlabel(\"Gruppe nach der Anzahl der Vergleiche\")\n plt.ylabel(\"Anzahl der Elemente in der Gruppe\")\n #print(range(len(y)))\n plt.bar(range(len(y)), y, fc = 'r', tick_label = x)\n plt.savefig(\"histtumb.png\")\n plt.show()\n\ndef draw_graph_ggT_tumbpp():\n # {100,...,1000}\n a = np.random.randint(900, size = 1000) + 100\n b = np.random.randint(900, size = 1000) + 100\n\n ls = []\n for i in range(1000):\n counter = ggT_tumbpp(a[i], b[i])[1]\n ls.append(counter)\n\n gr1 = 0\n gr2 = 0\n gr3 = 0\n gr4 = 0\n gr5 = 0\n gr6 = 0\n gr7 = 0\n gr8 = 0\n gr9 = 0\n gr10 = 0\n maximum = max(ls)\n minimum = min(ls)\n\n for x in ls:\n if x < maximum/10:\n gr1 += 1\n elif x < 2*maximum/10:\n gr2 += 1\n elif x < 3*maximum/10:\n gr3 += 1\n elif x < 4*maximum/10:\n gr4 += 1\n elif x < 5*maximum/10:\n gr5 += 1\n elif x < 6*maximum/10:\n gr6 += 1\n elif x < 7*maximum/10:\n gr7 += 1\n elif x < 8*maximum/10:\n gr8 += 1\n elif x < 9*maximum/10:\n gr9 += 1\n else:\n gr10 += 1\n\n y = [gr1, gr2, gr3, gr4, gr5,gr6, gr7, gr8, gr9, gr10]\n x = ['>' + str(i) for i in range (0, maximum * 2, 200)]\n\n plt.title(\"ggT_tumbpp; max is \" + str(maximum) + \" min is \" + str(minimum))\n plt.xlabel(\"Gruppe nach der Anzahl der Vergleiche\")\n plt.ylabel(\"Anzahl der Elemente in der Gruppe\")\n plt.bar(range(len(y)), y, fc = 'r', tick_label = x)\n plt.savefig(\"histtumbpp.png\")\n plt.show()\n\ndef draw_graph_ggT_euclid():\n # {100,...,1000}\n a = np.random.randint(900, size = 1000) + 100\n b = np.random.randint(900, size = 1000) + 100\n\n ls = []\n for i in range(1000):\n counter = ggT_euclid(a[i], b[i])[1]\n ls.append(counter)\n\n gr1 = 0\n gr2 = 0\n gr3 = 0\n gr4 = 0\n gr5 = 0\n gr6 = 0\n gr7 = 0\n gr8 = 0\n gr9 = 0\n gr10 = 0\n maximum = max(ls)\n minimum = min(ls)\n\n for x in ls:\n if x < maximum/10:\n gr1 += 1\n elif x < 2*maximum/10:\n gr2 += 1\n elif x < 3*maximum/10:\n gr3 += 1\n elif x < 4*maximum/10:\n gr4 += 1\n elif x < 5*maximum/10:\n gr5 += 1\n elif x < 6*maximum/10:\n gr6 += 1\n elif x < 7*maximum/10:\n gr7 += 1\n elif x < 8*maximum/10:\n gr8 += 1\n elif x < 9*maximum/10:\n gr9 += 1\n else:\n gr10 += 1\n\n y = [gr1, gr2, gr3, gr4, gr5,gr6, gr7, gr8, gr9, gr10]\n x = ['>' + str(i) for i in range (0, 10)]\n\n plt.title(\"ggT_euclid; max is \" + str(maximum) + \" min is \" + str(minimum))\n plt.xlabel(\"Gruppe nach der Anzahl der Vergleiche\")\n plt.ylabel(\"Anzahl der Elemente in der Gruppe\")\n plt.bar(range(len(y)), y, fc = 'r', tick_label = x)\n plt.savefig(\"histeuclid.png\")\n plt.show()\n\ndef main():\n draw_graph_ggT_tumb()\n draw_graph_ggT_tumbpp()\n draw_graph_ggT_euclid()\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "numpy.random.randint", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
lilanka/Falcon
[ "57a866b6a5e467684a3f45a36ec2b51c5bd097c0" ]
[ "falcon/fprob/binomial.test.py" ]
[ "import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom fstat.distributions import D\n\nif __name__ == \"__main__\":\n data = np.random.rand(2000)\n \n \"\"\"\n b1 = D.binomial(5, 3, data)\n b2 = D.binomial(20, 12, data)\n b3 = D.binomial(100, 60, data)\n b4 = D.binomial(1000, 600, data)\n\n \"\"\"\n\n b1 = D.gaussian(0, 1, data)\n \n fig, axs = plt.subplots(4)\n axs[0].plot(data, b1, '.')\n \"\"\"\n axs[1].plot(data, b2, '.')\n axs[2].plot(data, b3, '.')\n axs[3].plot(data, b4, '.')\n \"\"\"\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.random.rand", "matplotlib.pyplot.subplots" ] ]
Eternity666/DREAMPlace
[ "f694cddc39486e3ef2d5ae36431d8b21d1c397bd" ]
[ "dreamplace/PlaceObj.py" ]
[ "##\n# @file PlaceObj.py\n# @author Yibo Lin\n# @date Jul 2018\n# @brief Placement model class defining the placement objective.\n#\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport itertools\nimport logging\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nimport gzip\nif sys.version_info[0] < 3:\n import cPickle as pickle\nelse:\n import _pickle as pickle\nimport dreamplace.ops.weighted_average_wirelength.weighted_average_wirelength as weighted_average_wirelength\nimport dreamplace.ops.logsumexp_wirelength.logsumexp_wirelength as logsumexp_wirelength\nimport dreamplace.ops.density_overflow.density_overflow as density_overflow\nimport dreamplace.ops.electric_potential.electric_overflow as electric_overflow\nimport dreamplace.ops.electric_potential.electric_potential as electric_potential\nimport dreamplace.ops.density_potential.density_potential as density_potential\nimport dreamplace.ops.rudy.rudy as rudy\nimport dreamplace.ops.pin_utilization.pin_utilization as pin_utilization\nimport dreamplace.ops.nctugr_binary.nctugr_binary as nctugr_binary\nimport dreamplace.ops.adjust_node_area.adjust_node_area as adjust_node_area\n\n\nclass PreconditionOp:\n \"\"\"Preconditioning engine is critical for convergence.\n Need to be carefully designed.\n \"\"\"\n\n def __init__(self, placedb, data_collections):\n self.placedb = placedb\n self.data_collections = data_collections\n self.iteration = 0\n self.alpha = 1.0\n self.best_overflow = None\n self.overflows = []\n if len(placedb.regions) > 0:\n self.movablenode2fence_region_map_clamp = (\n data_collections.node2fence_region_map[: placedb.num_movable_nodes]\n .clamp(max=len(placedb.regions))\n .long()\n )\n self.filler2fence_region_map = torch.zeros(\n placedb.num_filler_nodes, device=data_collections.pos[0].device, dtype=torch.long\n )\n for i in range(len(placedb.regions) + 1):\n filler_beg, filler_end = self.placedb.filler_start_map[i : i + 2]\n self.filler2fence_region_map[filler_beg:filler_end] = i\n\n def set_overflow(self, overflow):\n self.overflows.append(overflow)\n if self.best_overflow is None:\n self.best_overflow = overflow\n elif self.best_overflow.mean() > overflow.mean():\n self.best_overflow = overflow\n\n def __call__(self, grad, density_weight, update_mask=None):\n \"\"\"Introduce alpha parameter to avoid divergence.\n It is tricky for this parameter to increase.\n \"\"\"\n with torch.no_grad():\n if density_weight.size(0) == 1:\n precond = (\n self.data_collections.num_pins_in_nodes\n + self.alpha * density_weight * self.data_collections.node_areas\n )\n else:\n ### only precondition the non fence region\n node_areas = self.data_collections.node_areas.clone()\n\n mask = self.data_collections.node2fence_region_map[: self.placedb.num_movable_nodes] >= len(\n self.placedb.regions\n )\n node_areas[: self.placedb.num_movable_nodes].masked_scatter_(\n mask, node_areas[: self.placedb.num_movable_nodes][mask] * density_weight[-1]\n )\n filler_beg, filler_end = self.placedb.filler_start_map[-2:]\n node_areas[\n self.placedb.num_nodes\n - self.placedb.num_filler_nodes\n + filler_beg : self.placedb.num_nodes\n - self.placedb.num_filler_nodes\n + filler_end\n ] *= density_weight[-1]\n precond = self.data_collections.num_pins_in_nodes + self.alpha * node_areas\n\n precond.clamp_(min=1.0)\n grad[0 : self.placedb.num_nodes].div_(precond)\n grad[self.placedb.num_nodes : self.placedb.num_nodes * 2].div_(precond)\n\n ### stop gradients for terminated electric field\n if update_mask is not None:\n grad = grad.view(2, -1)\n update_mask = ~update_mask\n movable_mask = update_mask[self.movablenode2fence_region_map_clamp]\n filler_mask = update_mask[self.filler2fence_region_map]\n grad[0, : self.placedb.num_movable_nodes].masked_fill_(movable_mask, 0)\n grad[1, : self.placedb.num_movable_nodes].masked_fill_(movable_mask, 0)\n grad[0, self.placedb.num_nodes - self.placedb.num_filler_nodes :].masked_fill_(filler_mask, 0)\n grad[1, self.placedb.num_nodes - self.placedb.num_filler_nodes :].masked_fill_(filler_mask, 0)\n grad = grad.view(-1)\n self.iteration += 1\n\n # only work in benchmarks without fence region, assume overflow has been updated\n if (\n len(self.placedb.regions) > 0\n and self.overflows\n and self.overflows[-1] < 0.3\n and self.alpha < 1024\n ):\n if (self.iteration % 20) == 0:\n self.alpha *= 2\n logging.info(\n \"preconditioning alpha = %g, best_overflow %g, overflow %g\"\n % (self.alpha, self.best_overflow, self.overflows[-1])\n )\n\n return grad\n\n\nclass PlaceObj(nn.Module):\n \"\"\"\n @brief Define placement objective:\n wirelength + density_weight * density penalty\n It includes various ops related to global placement as well.\n \"\"\"\n def __init__(self, density_weight, params, placedb, data_collections,\n op_collections, global_place_params):\n \"\"\"\n @brief initialize ops for placement\n @param density_weight density weight in the objective\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of all data and variables required for constructing the ops\n @param op_collections a collection of all ops\n @param global_place_params global placement parameters for current global placement stage\n \"\"\"\n super(PlaceObj, self).__init__()\n\n ### quadratic penalty\n self.density_quad_coeff = 2000\n self.init_density = None\n ### increase density penalty if slow convergence\n self.density_factor = 1\n\n if(len(placedb.regions) > 0):\n ### fence region will enable quadratic penalty by default\n self.quad_penalty = True\n else:\n ### non fence region will use first-order density penalty by default\n self.quad_penalty = False\n\n ### fence region\n ### update mask controls whether stop gradient/updating, 1 represents allow grad/update\n self.update_mask = None\n if len(placedb.regions) > 0:\n ### for subregion rough legalization, once stop updating, perform immediate greddy legalization once\n ### this is to avoid repeated legalization\n ### 1 represents already legal\n self.legal_mask = torch.zeros(len(placedb.regions) + 1)\n\n self.params = params\n self.placedb = placedb\n self.data_collections = data_collections\n self.op_collections = op_collections\n self.global_place_params = global_place_params\n\n self.gpu = params.gpu\n self.data_collections = data_collections\n self.op_collections = op_collections\n if len(placedb.regions) > 0:\n ### different fence region needs different density weights in multi-electric field algorithm\n self.density_weight = torch.tensor(\n [density_weight]*(len(placedb.regions)+1),\n dtype=self.data_collections.pos[0].dtype,\n device=self.data_collections.pos[0].device)\n else:\n self.density_weight = torch.tensor(\n [density_weight],\n dtype=self.data_collections.pos[0].dtype,\n device=self.data_collections.pos[0].device)\n ### Note: even for multi-electric fields, they use the same gamma\n num_bins_x = global_place_params[\"num_bins_x\"] if global_place_params[\n \"num_bins_x\"] else placedb.num_bins_x\n num_bins_y = global_place_params[\"num_bins_y\"] if global_place_params[\n \"num_bins_y\"] else placedb.num_bins_y\n self.num_bins_x = num_bins_x\n self.num_bins_y = num_bins_y\n self.bin_size_x = (placedb.xh - placedb.xl) / num_bins_x\n self.bin_size_y = (placedb.yh - placedb.yl) / num_bins_y\n self.gamma = torch.tensor(10 * self.base_gamma(params, placedb),\n dtype=self.data_collections.pos[0].dtype,\n device=self.data_collections.pos[0].device)\n\n # compute weighted average wirelength from position\n\n name = \"%dx%d bins\" % (num_bins_x, num_bins_y)\n self.name = name\n\n if global_place_params[\"wirelength\"] == \"weighted_average\":\n self.op_collections.wirelength_op, self.op_collections.update_gamma_op = self.build_weighted_average_wl(\n params, placedb, self.data_collections,\n self.op_collections.pin_pos_op)\n elif global_place_params[\"wirelength\"] == \"logsumexp\":\n self.op_collections.wirelength_op, self.op_collections.update_gamma_op = self.build_logsumexp_wl(\n params, placedb, self.data_collections,\n self.op_collections.pin_pos_op)\n else:\n assert 0, \"unknown wirelength model %s\" % (\n global_place_params[\"wirelength\"])\n\n self.op_collections.density_overflow_op = self.build_electric_overflow(\n params,\n placedb,\n self.data_collections,\n self.num_bins_x,\n self.num_bins_y)\n\n self.op_collections.density_op = self.build_electric_potential(\n params,\n placedb,\n self.data_collections,\n self.num_bins_x,\n self.num_bins_y,\n name=name)\n ### build multiple density op for multi-electric field\n if len(self.placedb.regions) > 0:\n self.op_collections.fence_region_density_ops, self.op_collections.fence_region_density_merged_op, self.op_collections.fence_region_density_overflow_merged_op = self.build_multi_fence_region_density_op()\n self.op_collections.update_density_weight_op = self.build_update_density_weight(\n params, placedb)\n self.op_collections.precondition_op = self.build_precondition(\n params, placedb, self.data_collections)\n self.op_collections.noise_op = self.build_noise(\n params, placedb, self.data_collections)\n if params.routability_opt_flag:\n # compute congestion map, RISA/RUDY congestion map\n self.op_collections.route_utilization_map_op = self.build_route_utilization_map(\n params, placedb, self.data_collections)\n self.op_collections.pin_utilization_map_op = self.build_pin_utilization_map(\n params, placedb, self.data_collections)\n self.op_collections.nctugr_congestion_map_op = self.build_nctugr_congestion_map(\n params, placedb, self.data_collections)\n # adjust instance area with congestion map\n self.op_collections.adjust_node_area_op = self.build_adjust_node_area(\n params, placedb, self.data_collections)\n\n self.Lgamma_iteration = global_place_params[\"iteration\"]\n if 'Llambda_density_weight_iteration' in global_place_params:\n self.Llambda_density_weight_iteration = global_place_params[\n 'Llambda_density_weight_iteration']\n else:\n self.Llambda_density_weight_iteration = 1\n if 'Lsub_iteration' in global_place_params:\n self.Lsub_iteration = global_place_params['Lsub_iteration']\n else:\n self.Lsub_iteration = 1\n if 'routability_Lsub_iteration' in global_place_params:\n self.routability_Lsub_iteration = global_place_params[\n 'routability_Lsub_iteration']\n else:\n self.routability_Lsub_iteration = self.Lsub_iteration\n self.start_fence_region_density = False\n\n\n def obj_fn(self, pos):\n \"\"\"\n @brief Compute objective.\n wirelength + density_weight * density penalty\n @param pos locations of cells\n @return objective value\n \"\"\"\n self.wirelength = self.op_collections.wirelength_op(pos)\n if len(self.placedb.regions) > 0:\n self.density = self.op_collections.fence_region_density_merged_op(pos)\n else:\n self.density = self.op_collections.density_op(pos)\n\n if self.init_density is None:\n ### record initial density\n self.init_density = self.density.data.clone()\n ### density weight subgradient preconditioner\n self.density_weight_grad_precond = self.init_density.masked_scatter(self.init_density > 0, 1 /self.init_density[self.init_density > 0])\n self.quad_penalty_coeff = self.density_quad_coeff / 2 * self.density_weight_grad_precond\n if self.quad_penalty:\n ### quadratic density penalty\n self.density = self.density * (1 + self.quad_penalty_coeff * self.density)\n if len(self.placedb.regions) > 0:\n result = self.wirelength + self.density_weight.dot(self.density)\n else:\n result = torch.add(self.wirelength, self.density, alpha=(self.density_factor * self.density_weight).item())\n\n return result\n\n def obj_and_grad_fn_old(self, pos_w, pos_g=None, admm_multiplier=None):\n \"\"\"\n @brief compute objective and gradient.\n wirelength + density_weight * density penalty\n @param pos locations of cells\n @return objective value\n \"\"\"\n if not self.start_fence_region_density:\n obj = self.obj_fn(pos_w, pos_g, admm_multiplier)\n if pos_w.grad is not None:\n pos_w.grad.zero_()\n obj.backward()\n else:\n num_nodes = self.placedb.num_nodes\n num_movable_nodes = self.placedb.num_movable_nodes\n num_filler_nodes = self.placedb.num_filler_nodes\n\n\n wl = self.op_collections.wirelength_op(pos_w)\n if pos_w.grad is not None:\n pos_w.grad.zero_()\n wl.backward()\n wl_grad = pos_w.grad.data.clone()\n if pos_w.grad is not None:\n pos_w.grad.zero_()\n\n if self.init_density is None:\n self.init_density = self.op_collections.density_op(pos_w.data).data.item()\n\n if self.quad_penalty:\n inner_density = self.op_collections.inner_fence_region_density_op(pos_w)\n inner_density = inner_density + self.density_quad_coeff / 2 / self.init_density * inner_density**2\n else:\n inner_density = self.op_collections.inner_fence_region_density_op(pos_w)\n\n inner_density.backward()\n inner_density_grad = pos_w.grad.data.clone()\n mask = self.data_collections.node2fence_region_map > 1e3\n inner_density_grad[:num_movable_nodes].masked_fill_(mask, 0)\n inner_density_grad[num_nodes:num_nodes+num_movable_nodes].masked_fill_(mask, 0)\n inner_density_grad[num_nodes-num_filler_nodes:num_nodes].mul_(0.5)\n inner_density_grad[-num_filler_nodes:].mul_(0.5)\n if pos_w.grad is not None:\n pos_w.grad.zero_()\n\n if self.quad_penalty:\n outer_density = self.op_collections.outer_fence_region_density_op(pos_w)\n outer_density = outer_density + self.density_quad_coeff / 2 / self.init_density * outer_density ** 2\n else:\n outer_density = self.op_collections.outer_fence_region_density_op(pos_w)\n\n outer_density.backward()\n outer_density_grad = pos_w.grad.data.clone()\n mask = self.data_collections.node2fence_region_map < 1e3\n outer_density_grad[:num_movable_nodes].masked_fill_(mask, 0)\n outer_density_grad[num_nodes:num_nodes+num_movable_nodes].masked_fill_(mask, 0)\n outer_density_grad[num_nodes-num_filler_nodes:num_nodes].mul_(0.5)\n outer_density_grad[-num_filler_nodes:].mul_(0.5)\n\n if self.quad_penalty:\n density = self.op_collections.density_op(pos_w.data)\n obj = wl.data.item() + self.density_weight * (density + self.density_quad_coeff / 2 / self.init_density * density ** 2)\n else:\n obj = wl.data.item() + self.density_weight * self.op_collections.density_op(pos_w.data)\n\n pos_w.grad.data.copy_(wl_grad + self.density_weight * (inner_density_grad + outer_density_grad))\n\n\n self.op_collections.precondition_op(pos_w.grad, self.density_weight, 0)\n\n return obj, pos_w.grad\n\n def obj_and_grad_fn(self, pos):\n \"\"\"\n @brief compute objective and gradient.\n wirelength + density_weight * density penalty\n @param pos locations of cells\n @return objective value\n \"\"\"\n #self.check_gradient(pos)\n if pos.grad is not None:\n pos.grad.zero_()\n obj = self.obj_fn(pos)\n\n obj.backward()\n\n self.op_collections.precondition_op(pos.grad, self.density_weight, self.update_mask)\n\n return obj, pos.grad\n\n def forward(self):\n \"\"\"\n @brief Compute objective with current locations of cells.\n \"\"\"\n return self.obj_fn(self.data_collections.pos[0])\n\n def check_gradient(self, pos):\n \"\"\"\n @brief check gradient for debug\n @param pos locations of cells\n \"\"\"\n wirelength = self.op_collections.wirelength_op(pos)\n\n if pos.grad is not None:\n pos.grad.zero_()\n wirelength.backward()\n wirelength_grad = pos.grad.clone()\n\n pos.grad.zero_()\n density = self.density_weight * self.op_collections.density_op(pos)\n density.backward()\n density_grad = pos.grad.clone()\n\n wirelength_grad_norm = wirelength_grad.norm(p=1)\n density_grad_norm = density_grad.norm(p=1)\n\n logging.info(\"wirelength_grad norm = %.6E\" % (wirelength_grad_norm))\n logging.info(\"density_grad norm = %.6E\" % (density_grad_norm))\n pos.grad.zero_()\n\n def estimate_initial_learning_rate(self, x_k, lr):\n \"\"\"\n @brief Estimate initial learning rate by moving a small step.\n Computed as | x_k - x_k_1 |_2 / | g_k - g_k_1 |_2.\n @param x_k current solution\n @param lr small step\n \"\"\"\n obj_k, g_k = self.obj_and_grad_fn(x_k)\n x_k_1 = torch.autograd.Variable(x_k - lr * g_k, requires_grad=True)\n obj_k_1, g_k_1 = self.obj_and_grad_fn(x_k_1)\n\n return (x_k - x_k_1).norm(p=2) / (g_k - g_k_1).norm(p=2)\n\n def build_weighted_average_wl(self, params, placedb, data_collections,\n pin_pos_op):\n \"\"\"\n @brief build the op to compute weighted average wirelength\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of data and variables required for constructing ops\n @param pin_pos_op the op to compute pin locations according to cell locations\n \"\"\"\n\n # use WeightedAverageWirelength atomic\n wirelength_for_pin_op = weighted_average_wirelength.WeightedAverageWirelength(\n flat_netpin=data_collections.flat_net2pin_map,\n netpin_start=data_collections.flat_net2pin_start_map,\n pin2net_map=data_collections.pin2net_map,\n net_weights=data_collections.net_weights,\n net_mask=data_collections.net_mask_ignore_large_degrees,\n pin_mask=data_collections.pin_mask_ignore_fixed_macros,\n gamma=self.gamma,\n algorithm='merged')\n\n # wirelength for position\n def build_wirelength_op(pos):\n return wirelength_for_pin_op(pin_pos_op(pos))\n\n # update gamma\n base_gamma = self.base_gamma(params, placedb)\n\n def build_update_gamma_op(iteration, overflow):\n self.update_gamma(iteration, overflow, base_gamma)\n #logging.debug(\"update gamma to %g\" % (wirelength_for_pin_op.gamma.data))\n\n return build_wirelength_op, build_update_gamma_op\n\n def build_logsumexp_wl(self, params, placedb, data_collections,\n pin_pos_op):\n \"\"\"\n @brief build the op to compute log-sum-exp wirelength\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of data and variables required for constructing ops\n @param pin_pos_op the op to compute pin locations according to cell locations\n \"\"\"\n\n wirelength_for_pin_op = logsumexp_wirelength.LogSumExpWirelength(\n flat_netpin=data_collections.flat_net2pin_map,\n netpin_start=data_collections.flat_net2pin_start_map,\n pin2net_map=data_collections.pin2net_map,\n net_weights=data_collections.net_weights,\n net_mask=data_collections.net_mask_ignore_large_degrees,\n pin_mask=data_collections.pin_mask_ignore_fixed_macros,\n gamma=self.gamma,\n algorithm='merged')\n\n # wirelength for position\n def build_wirelength_op(pos):\n return wirelength_for_pin_op(pin_pos_op(pos))\n\n # update gamma\n base_gamma = self.base_gamma(params, placedb)\n\n def build_update_gamma_op(iteration, overflow):\n self.update_gamma(iteration, overflow, base_gamma)\n #logging.debug(\"update gamma to %g\" % (wirelength_for_pin_op.gamma.data))\n\n return build_wirelength_op, build_update_gamma_op\n\n def build_density_overflow(self, params, placedb, data_collections,\n num_bins_x, num_bins_y):\n \"\"\"\n @brief compute density overflow\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of all data and variables required for constructing the ops\n \"\"\"\n bin_size_x = (placedb.xh - placedb.xl) / num_bins_x\n bin_size_y = (placedb.yh - placedb.yl) / num_bins_y\n\n return density_overflow.DensityOverflow(\n data_collections.node_size_x,\n data_collections.node_size_y,\n bin_center_x=data_collections.bin_center_x_padded(placedb, 0, num_bins_x),\n bin_center_y=data_collections.bin_center_y_padded(placedb, 0, num_bins_y),\n target_density=data_collections.target_density,\n xl=placedb.xl,\n yl=placedb.yl,\n xh=placedb.xh,\n yh=placedb.yh,\n bin_size_x=bin_size_x,\n bin_size_y=bin_size_y,\n num_movable_nodes=placedb.num_movable_nodes,\n num_terminals=placedb.num_terminals,\n num_filler_nodes=0)\n\n def build_electric_overflow(self, params, placedb, data_collections,\n num_bins_x, num_bins_y):\n \"\"\"\n @brief compute electric density overflow\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of all data and variables required for constructing the ops\n @param num_bins_x number of bins in horizontal direction\n @param num_bins_y number of bins in vertical direction\n \"\"\"\n bin_size_x = (placedb.xh - placedb.xl) / num_bins_x\n bin_size_y = (placedb.yh - placedb.yl) / num_bins_y\n\n return electric_overflow.ElectricOverflow(\n node_size_x=data_collections.node_size_x,\n node_size_y=data_collections.node_size_y,\n bin_center_x=data_collections.bin_center_x_padded(placedb, 0, num_bins_x),\n bin_center_y=data_collections.bin_center_y_padded(placedb, 0, num_bins_y),\n target_density=data_collections.target_density,\n xl=placedb.xl,\n yl=placedb.yl,\n xh=placedb.xh,\n yh=placedb.yh,\n bin_size_x=bin_size_x,\n bin_size_y=bin_size_y,\n num_movable_nodes=placedb.num_movable_nodes,\n num_terminals=placedb.num_terminals,\n num_filler_nodes=0,\n padding=0,\n deterministic_flag=params.deterministic_flag,\n sorted_node_map=data_collections.sorted_node_map,\n movable_macro_mask=data_collections.movable_macro_mask)\n\n def build_density_potential(self, params, placedb, data_collections,\n num_bins_x, num_bins_y, padding, name):\n \"\"\"\n @brief NTUPlace3 density potential\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of data and variables required for constructing ops\n @param num_bins_x number of bins in horizontal direction\n @param num_bins_y number of bins in vertical direction\n @param padding number of padding bins to left, right, bottom, top of the placement region\n @param name string for printing\n \"\"\"\n bin_size_x = (placedb.xh - placedb.xl) / num_bins_x\n bin_size_y = (placedb.yh - placedb.yl) / num_bins_y\n\n xl = placedb.xl - padding * bin_size_x\n xh = placedb.xh + padding * bin_size_x\n yl = placedb.yl - padding * bin_size_y\n yh = placedb.yh + padding * bin_size_y\n local_num_bins_x = num_bins_x + 2 * padding\n local_num_bins_y = num_bins_y + 2 * padding\n max_num_bins_x = np.ceil(\n (np.amax(placedb.node_size_x) + 4 * bin_size_x) / bin_size_x)\n max_num_bins_y = np.ceil(\n (np.amax(placedb.node_size_y) + 4 * bin_size_y) / bin_size_y)\n max_num_bins = max(int(max_num_bins_x), int(max_num_bins_y))\n logging.info(\n \"%s #bins %dx%d, bin sizes %gx%g, max_num_bins = %d, padding = %d\"\n % (name, local_num_bins_x, local_num_bins_y,\n bin_size_x / placedb.row_height,\n bin_size_y / placedb.row_height, max_num_bins, padding))\n if local_num_bins_x < max_num_bins:\n logging.warning(\"local_num_bins_x (%d) < max_num_bins (%d)\" %\n (local_num_bins_x, max_num_bins))\n if local_num_bins_y < max_num_bins:\n logging.warning(\"local_num_bins_y (%d) < max_num_bins (%d)\" %\n (local_num_bins_y, max_num_bins))\n\n node_size_x = placedb.node_size_x\n node_size_y = placedb.node_size_y\n\n # coefficients\n ax = (4 / (node_size_x + 2 * bin_size_x) /\n (node_size_x + 4 * bin_size_x)).astype(placedb.dtype).reshape(\n [placedb.num_nodes, 1])\n bx = (2 / bin_size_x / (node_size_x + 4 * bin_size_x)).astype(\n placedb.dtype).reshape([placedb.num_nodes, 1])\n ay = (4 / (node_size_y + 2 * bin_size_y) /\n (node_size_y + 4 * bin_size_y)).astype(placedb.dtype).reshape(\n [placedb.num_nodes, 1])\n by = (2 / bin_size_y / (node_size_y + 4 * bin_size_y)).astype(\n placedb.dtype).reshape([placedb.num_nodes, 1])\n\n # bell shape overlap function\n def npfx1(dist):\n # ax will be broadcast from num_nodes*1 to num_nodes*num_bins_x\n return 1.0 - ax.reshape([placedb.num_nodes, 1]) * np.square(dist)\n\n def npfx2(dist):\n # bx will be broadcast from num_nodes*1 to num_nodes*num_bins_x\n return bx.reshape([\n placedb.num_nodes, 1\n ]) * np.square(dist - node_size_x / 2 - 2 * bin_size_x).reshape(\n [placedb.num_nodes, 1])\n\n def npfy1(dist):\n # ay will be broadcast from num_nodes*1 to num_nodes*num_bins_y\n return 1.0 - ay.reshape([placedb.num_nodes, 1]) * np.square(dist)\n\n def npfy2(dist):\n # by will be broadcast from num_nodes*1 to num_nodes*num_bins_y\n return by.reshape([\n placedb.num_nodes, 1\n ]) * np.square(dist - node_size_y / 2 - 2 * bin_size_y).reshape(\n [placedb.num_nodes, 1])\n\n # should not use integral, but sum; basically sample 5 distances, -2wb, -wb, 0, wb, 2wb; the sum does not change much when shifting cells\n integral_potential_x = npfx1(0) + 2 * npfx1(bin_size_x) + 2 * npfx2(\n 2 * bin_size_x)\n cx = (node_size_x.reshape([placedb.num_nodes, 1]) /\n integral_potential_x).reshape([placedb.num_nodes, 1])\n # should not use integral, but sum; basically sample 5 distances, -2wb, -wb, 0, wb, 2wb; the sum does not change much when shifting cells\n integral_potential_y = npfy1(0) + 2 * npfy1(bin_size_y) + 2 * npfy2(\n 2 * bin_size_y)\n cy = (node_size_y.reshape([placedb.num_nodes, 1]) /\n integral_potential_y).reshape([placedb.num_nodes, 1])\n\n return density_potential.DensityPotential(\n node_size_x=data_collections.node_size_x,\n node_size_y=data_collections.node_size_y,\n ax=torch.tensor(ax.ravel(),\n dtype=data_collections.pos[0].dtype,\n device=data_collections.pos[0].device),\n bx=torch.tensor(bx.ravel(),\n dtype=data_collections.pos[0].dtype,\n device=data_collections.pos[0].device),\n cx=torch.tensor(cx.ravel(),\n dtype=data_collections.pos[0].dtype,\n device=data_collections.pos[0].device),\n ay=torch.tensor(ay.ravel(),\n dtype=data_collections.pos[0].dtype,\n device=data_collections.pos[0].device),\n by=torch.tensor(by.ravel(),\n dtype=data_collections.pos[0].dtype,\n device=data_collections.pos[0].device),\n cy=torch.tensor(cy.ravel(),\n dtype=data_collections.pos[0].dtype,\n device=data_collections.pos[0].device),\n bin_center_x=data_collections.bin_center_x_padded(placedb, padding, num_bins_x),\n bin_center_y=data_collections.bin_center_y_padded(placedb, padding, num_bins_y),\n target_density=data_collections.target_density,\n num_movable_nodes=placedb.num_movable_nodes,\n num_terminals=placedb.num_terminals,\n num_filler_nodes=placedb.num_filler_nodes,\n xl=xl,\n yl=yl,\n xh=xh,\n yh=yh,\n bin_size_x=bin_size_x,\n bin_size_y=bin_size_y,\n padding=padding,\n sigma=(1.0 / 16) * placedb.width / bin_size_x,\n delta=2.0)\n\n def build_electric_potential(self, params, placedb, data_collections,\n num_bins_x, num_bins_y, name, region_id=None, fence_regions=None):\n \"\"\"\n @brief e-place electrostatic potential\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of data and variables required for constructing ops\n @param num_bins_x number of bins in horizontal direction\n @param num_bins_y number of bins in vertical direction\n @param name string for printing\n @param fence_regions a [n_subregions, 4] tensor for fence regions potential penalty\n \"\"\"\n bin_size_x = (placedb.xh - placedb.xl) / num_bins_x\n bin_size_y = (placedb.yh - placedb.yl) / num_bins_y\n\n max_num_bins_x = np.ceil(\n (np.amax(placedb.node_size_x[0:placedb.num_movable_nodes]) +\n 2 * bin_size_x) / bin_size_x)\n max_num_bins_y = np.ceil(\n (np.amax(placedb.node_size_y[0:placedb.num_movable_nodes]) +\n 2 * bin_size_y) / bin_size_y)\n max_num_bins = max(int(max_num_bins_x), int(max_num_bins_y))\n logging.info(\n \"%s #bins %dx%d, bin sizes %gx%g, max_num_bins = %d, padding = %d\"\n % (name, num_bins_x, num_bins_y,\n bin_size_x / placedb.row_height,\n bin_size_y / placedb.row_height, max_num_bins, 0))\n if num_bins_x < max_num_bins:\n logging.warning(\"num_bins_x (%d) < max_num_bins (%d)\" %\n (num_bins_x, max_num_bins))\n if num_bins_y < max_num_bins:\n logging.warning(\"num_bins_y (%d) < max_num_bins (%d)\" %\n (num_bins_y, max_num_bins))\n #### for fence region, the target density is different from different regions\n target_density = data_collections.target_density.item() if fence_regions is None else placedb.target_density_fence_region[region_id]\n return electric_potential.ElectricPotential(\n node_size_x=data_collections.node_size_x,\n node_size_y=data_collections.node_size_y,\n bin_center_x=data_collections.bin_center_x_padded(placedb, 0, num_bins_x),\n bin_center_y=data_collections.bin_center_y_padded(placedb, 0, num_bins_y),\n target_density=target_density,\n xl=placedb.xl,\n yl=placedb.yl,\n xh=placedb.xh,\n yh=placedb.yh,\n bin_size_x=bin_size_x,\n bin_size_y=bin_size_y,\n num_movable_nodes=placedb.num_movable_nodes,\n num_terminals=placedb.num_terminals,\n num_filler_nodes=placedb.num_filler_nodes,\n padding=0,\n deterministic_flag=params.deterministic_flag,\n sorted_node_map=data_collections.sorted_node_map,\n movable_macro_mask=data_collections.movable_macro_mask,\n fast_mode=params.RePlAce_skip_energy_flag,\n region_id=region_id,\n fence_regions=fence_regions,\n node2fence_region_map=data_collections.node2fence_region_map,\n placedb=placedb)\n\n def initialize_density_weight(self, params, placedb):\n \"\"\"\n @brief compute initial density weight\n @param params parameters\n @param placedb placement database\n \"\"\"\n wirelength = self.op_collections.wirelength_op(\n self.data_collections.pos[0])\n if self.data_collections.pos[0].grad is not None:\n self.data_collections.pos[0].grad.zero_()\n wirelength.backward()\n wirelength_grad_norm = self.data_collections.pos[0].grad.norm(p=1)\n\n self.data_collections.pos[0].grad.zero_()\n\n if len(self.placedb.regions) > 0:\n density_list = []\n density_grad_list = []\n for density_op in self.op_collections.fence_region_density_ops:\n density_i = density_op(self.data_collections.pos[0])\n density_list.append(density_i.data.clone())\n density_i.backward()\n density_grad_list.append(self.data_collections.pos[0].grad.data.clone())\n self.data_collections.pos[0].grad.zero_()\n\n ### record initial density\n self.init_density = torch.stack(density_list)\n ### density weight subgradient preconditioner\n self.density_weight_grad_precond = self.init_density.masked_scatter(self.init_density > 0, 1/self.init_density[self.init_density > 0])\n ### compute u\n self.density_weight_u = self.init_density * self.density_weight_grad_precond\n self.density_weight_u += 0.5 * self.density_quad_coeff * self.density_weight_u ** 2\n ### compute s\n density_weight_s = 1 + self.density_quad_coeff * self.init_density * self.density_weight_grad_precond\n ### compute density grad L1 norm\n density_grad_norm = sum(self.density_weight_u[i] * density_weight_s[i] * density_grad_list[i].norm(p=1) for i in range(density_weight_s.size(0)))\n\n self.density_weight_u *= params.density_weight * wirelength_grad_norm / density_grad_norm\n ### set initial step size for density weight update\n self.density_weight_step_size_inc_low = 1.03\n self.density_weight_step_size_inc_high = 1.04\n self.density_weight_step_size = (self.density_weight_step_size_inc_low - 1) * self.density_weight_u.norm(p=2)\n ### commit initial density weight\n self.density_weight = self.density_weight_u * density_weight_s\n\n else:\n density = self.op_collections.density_op(self.data_collections.pos[0])\n ### record initial density\n self.init_density = density.data.clone()\n density.backward()\n density_grad_norm = self.data_collections.pos[0].grad.norm(p=1)\n\n grad_norm_ratio = wirelength_grad_norm / density_grad_norm\n self.density_weight = torch.tensor(\n [params.density_weight * grad_norm_ratio],\n dtype=self.data_collections.pos[0].dtype,\n device=self.data_collections.pos[0].device)\n\n return self.density_weight\n\n def build_update_density_weight(self, params, placedb, algo=\"overflow\"):\n \"\"\"\n @brief update density weight\n @param params parameters\n @param placedb placement database\n \"\"\"\n ### params for hpwl mode from RePlAce\n ref_hpwl = params.RePlAce_ref_hpwl\n LOWER_PCOF = params.RePlAce_LOWER_PCOF\n UPPER_PCOF = params.RePlAce_UPPER_PCOF\n ### params for overflow mode from elfPlace\n assert algo in {\"hpwl\", \"overflow\"}, logging.error(\"density weight update not supports hpwl mode or overflow mode\")\n\n def update_density_weight_op_hpwl(cur_metric, prev_metric, iteration):\n ### based on hpwl\n with torch.no_grad():\n delta_hpwl = cur_metric.hpwl - prev_metric.hpwl\n if delta_hpwl < 0:\n mu = UPPER_PCOF * np.maximum(\n np.power(0.9999, float(iteration)), 0.98)\n else:\n mu = UPPER_PCOF * torch.pow(\n UPPER_PCOF, -delta_hpwl / ref_hpwl).clamp(\n min=LOWER_PCOF, max=UPPER_PCOF)\n self.density_weight *= mu\n\n def update_density_weight_op_overflow(cur_metric, prev_metric, iteration):\n assert self.quad_penalty == True, \"[Error] density weight update based on overflow only works for quadratic density penalty\"\n ### based on overflow\n ### stop updating if a region has lower overflow than stop overflow\n with torch.no_grad():\n density_norm = cur_metric.density * self.density_weight_grad_precond\n density_weight_grad = density_norm + self.density_quad_coeff / 2 * density_norm ** 2\n density_weight_grad /= density_weight_grad.norm(p=2)\n\n self.density_weight_u += self.density_weight_step_size * density_weight_grad\n density_weight_s = 1 + self.density_quad_coeff * density_norm\n\n density_weight_new = (self.density_weight_u * density_weight_s).clamp(max=10)\n\n ### conditional update if this region's overflow is higher than stop overflow\n if(self.update_mask is None):\n self.update_mask = cur_metric.overflow >= self.params.stop_overflow\n else:\n ### restart updating is not allowed\n self.update_mask &= cur_metric.overflow >= self.params.stop_overflow\n self.density_weight.masked_scatter_(self.update_mask, density_weight_new[self.update_mask])\n\n ### update density weight step size\n rate = torch.log(self.density_quad_coeff * density_norm.norm(p=2)).clamp(min=0)\n rate = rate / (1 + rate)\n rate = rate * (self.density_weight_step_size_inc_high - self.density_weight_step_size_inc_low) + self.density_weight_step_size_inc_low\n self.density_weight_step_size *= rate\n\n if not self.quad_penalty and algo == \"overflow\":\n logging.warn(\"quadratic density penalty is disabled, density weight update is forced to be based on HPWL\")\n algo = \"hpwl\"\n if len(self.placedb.regions) == 0 and algo == \"overflow\":\n logging.warn(\"for benchmark without fence region, density weight update is forced to be based on HPWL\")\n algo = \"hpwl\"\n\n update_density_weight_op = {\"hpwl\":update_density_weight_op_hpwl,\n \"overflow\": update_density_weight_op_overflow}[algo]\n\n return update_density_weight_op\n\n def base_gamma(self, params, placedb):\n \"\"\"\n @brief compute base gamma\n @param params parameters\n @param placedb placement database\n \"\"\"\n return params.gamma * (self.bin_size_x + self.bin_size_y)\n\n def update_gamma(self, iteration, overflow, base_gamma):\n \"\"\"\n @brief update gamma in wirelength model\n @param iteration optimization step\n @param overflow evaluated in current step\n @param base_gamma base gamma\n \"\"\"\n ### overflow can have multiple values for fence regions, use their weighted average based on movable node number\n if overflow.numel() == 1:\n overflow_avg = overflow\n else:\n overflow_avg = overflow\n coef = torch.pow(10, (overflow_avg - 0.1) * 20 / 9 - 1)\n self.gamma.data.fill_((base_gamma * coef).item())\n return True\n\n def build_noise(self, params, placedb, data_collections):\n \"\"\"\n @brief add noise to cell locations\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of data and variables required for constructing ops\n \"\"\"\n node_size = torch.cat(\n [data_collections.node_size_x, data_collections.node_size_y],\n dim=0).to(data_collections.pos[0].device)\n\n def noise_op(pos, noise_ratio):\n with torch.no_grad():\n noise = torch.rand_like(pos)\n noise.sub_(0.5).mul_(node_size).mul_(noise_ratio)\n # no noise to fixed cells\n noise[placedb.num_movable_nodes:placedb.num_nodes -\n placedb.num_filler_nodes].zero_()\n noise[placedb.num_nodes +\n placedb.num_movable_nodes:2 * placedb.num_nodes -\n placedb.num_filler_nodes].zero_()\n return pos.add_(noise)\n\n return noise_op\n\n def build_precondition(self, params, placedb, data_collections):\n \"\"\"\n @brief preconditioning to gradient\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of data and variables required for constructing ops\n \"\"\"\n\n return PreconditionOp(placedb, data_collections)\n\n def build_route_utilization_map(self, params, placedb, data_collections):\n \"\"\"\n @brief routing congestion map based on current cell locations\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of all data and variables required for constructing the ops\n \"\"\"\n congestion_op = rudy.Rudy(\n netpin_start=data_collections.flat_net2pin_start_map,\n flat_netpin=data_collections.flat_net2pin_map,\n net_weights=data_collections.net_weights,\n xl=placedb.routing_grid_xl,\n yl=placedb.routing_grid_yl,\n xh=placedb.routing_grid_xh,\n yh=placedb.routing_grid_yh,\n num_bins_x=placedb.num_routing_grids_x,\n num_bins_y=placedb.num_routing_grids_y,\n unit_horizontal_capacity=placedb.unit_horizontal_capacity,\n unit_vertical_capacity=placedb.unit_vertical_capacity,\n initial_horizontal_utilization_map=data_collections.\n initial_horizontal_utilization_map,\n initial_vertical_utilization_map=data_collections.\n initial_vertical_utilization_map)\n\n def route_utilization_map_op(pos):\n pin_pos = self.op_collections.pin_pos_op(pos)\n return congestion_op(pin_pos)\n\n return route_utilization_map_op\n\n def build_pin_utilization_map(self, params, placedb, data_collections):\n \"\"\"\n @brief pin density map based on current cell locations\n @param params parameters\n @param placedb placement database\n @param data_collections a collection of all data and variables required for constructing the ops\n \"\"\"\n return pin_utilization.PinUtilization(\n pin_weights=data_collections.pin_weights,\n flat_node2pin_start_map=data_collections.flat_node2pin_start_map,\n node_size_x=data_collections.node_size_x,\n node_size_y=data_collections.node_size_y,\n xl=placedb.routing_grid_xl,\n yl=placedb.routing_grid_yl,\n xh=placedb.routing_grid_xh,\n yh=placedb.routing_grid_yh,\n num_movable_nodes=placedb.num_movable_nodes,\n num_filler_nodes=placedb.num_filler_nodes,\n num_bins_x=placedb.num_routing_grids_x,\n num_bins_y=placedb.num_routing_grids_y,\n unit_pin_capacity=data_collections.unit_pin_capacity,\n pin_stretch_ratio=params.pin_stretch_ratio)\n\n def build_nctugr_congestion_map(self, params, placedb, data_collections):\n \"\"\"\n @brief call NCTUgr for congestion estimation\n \"\"\"\n path = \"%s/%s\" % (params.result_dir, params.design_name())\n return nctugr_binary.NCTUgr(\n aux_input_file=os.path.realpath(params.aux_input),\n param_setting_file=\"%s/../thirdparty/NCTUgr.ICCAD2012/DAC12.set\" %\n (os.path.dirname(os.path.realpath(__file__))),\n tmp_pl_file=\"%s/%s.NCTUgr.pl\" %\n (os.path.realpath(path), params.design_name()),\n tmp_output_file=\"%s/%s.NCTUgr\" %\n (os.path.realpath(path), params.design_name()),\n horizontal_routing_capacities=torch.from_numpy(\n placedb.unit_horizontal_capacities *\n placedb.routing_grid_size_y),\n vertical_routing_capacities=torch.from_numpy(\n placedb.unit_vertical_capacities *\n placedb.routing_grid_size_x),\n params=params,\n placedb=placedb)\n\n def build_adjust_node_area(self, params, placedb, data_collections):\n \"\"\"\n @brief adjust cell area according to routing congestion and pin utilization map\n \"\"\"\n total_movable_area = (\n data_collections.node_size_x[:placedb.num_movable_nodes] *\n data_collections.node_size_y[:placedb.num_movable_nodes]).sum()\n total_filler_area = (\n data_collections.node_size_x[-placedb.num_filler_nodes:] *\n data_collections.node_size_y[-placedb.num_filler_nodes:]).sum()\n total_place_area = (total_movable_area + total_filler_area\n ) / data_collections.target_density\n adjust_node_area_op = adjust_node_area.AdjustNodeArea(\n flat_node2pin_map=data_collections.flat_node2pin_map,\n flat_node2pin_start_map=data_collections.flat_node2pin_start_map,\n pin_weights=data_collections.pin_weights,\n xl=placedb.routing_grid_xl,\n yl=placedb.routing_grid_yl,\n xh=placedb.routing_grid_xh,\n yh=placedb.routing_grid_yh,\n num_movable_nodes=placedb.num_movable_nodes,\n num_filler_nodes=placedb.num_filler_nodes,\n route_num_bins_x=placedb.num_routing_grids_x,\n route_num_bins_y=placedb.num_routing_grids_y,\n pin_num_bins_x=placedb.num_routing_grids_x,\n pin_num_bins_y=placedb.num_routing_grids_y,\n total_place_area=total_place_area,\n total_whitespace_area=total_place_area - total_movable_area,\n max_route_opt_adjust_rate=params.max_route_opt_adjust_rate,\n route_opt_adjust_exponent=params.route_opt_adjust_exponent,\n max_pin_opt_adjust_rate=params.max_pin_opt_adjust_rate,\n area_adjust_stop_ratio=params.area_adjust_stop_ratio,\n route_area_adjust_stop_ratio=params.route_area_adjust_stop_ratio,\n pin_area_adjust_stop_ratio=params.pin_area_adjust_stop_ratio,\n unit_pin_capacity=data_collections.unit_pin_capacity)\n\n def build_adjust_node_area_op(pos, route_utilization_map,\n pin_utilization_map):\n return adjust_node_area_op(\n pos, data_collections.node_size_x,\n data_collections.node_size_y, data_collections.pin_offset_x,\n data_collections.pin_offset_y, data_collections.target_density,\n route_utilization_map, pin_utilization_map)\n\n return build_adjust_node_area_op\n\n def build_fence_region_density_op(self, fence_region_list, node2fence_region_map):\n assert type(fence_region_list) == list and len(fence_region_list) == 2, \"Unsupported fence region list\"\n self.data_collections.node2fence_region_map = torch.from_numpy(self.placedb.node2fence_region_map[:self.placedb.num_movable_nodes]).to(fence_region_list[0].device)\n self.op_collections.inner_fence_region_density_op = self.build_electric_potential(\n self.params,\n self.placedb,\n self.data_collections,\n self.num_bins_x,\n self.num_bins_y,\n name=self.name,\n fence_regions=fence_region_list[0],\n fence_region_mask=self.data_collections.node2fence_region_map>1e3) # density penalty for inner cells\n self.op_collections.outer_fence_region_density_op = self.build_electric_potential(\n self.params,\n self.placedb,\n self.data_collections,\n self.num_bins_x,\n self.num_bins_y,\n name=self.name,\n fence_regions = fence_region_list[1],\n fence_region_mask=self.data_collections.node2fence_region_map<1e3) # density penalty for outer cells\n\n def build_multi_fence_region_density_op(self):\n # region 0, ..., region n, non_fence_region\n self.op_collections.fence_region_density_ops = []\n\n for i, fence_region in enumerate(self.data_collections.virtual_macro_fence_region[:-1]):\n self.op_collections.fence_region_density_ops.append(self.build_electric_potential(\n self.params,\n self.placedb,\n self.data_collections,\n self.num_bins_x,\n self.num_bins_y,\n name=self.name,\n region_id=i,\n fence_regions=fence_region)\n )\n\n self.op_collections.fence_region_density_ops.append(self.build_electric_potential(\n self.params,\n self.placedb,\n self.data_collections,\n self.num_bins_x,\n self.num_bins_y,\n name=self.name,\n region_id=len(self.placedb.regions),\n fence_regions=self.data_collections.virtual_macro_fence_region[-1])\n )\n def merged_density_op(pos):\n ### stop mask is to stop forward of density\n ### 1 represents stop flag\n res = torch.stack([density_op(pos, mode=\"density\") for density_op in self.op_collections.fence_region_density_ops])\n return res\n\n def merged_density_overflow_op(pos):\n ### stop mask is to stop forward of density\n ### 1 represents stop flag\n overflow_list, max_density_list = [], []\n for density_op in self.op_collections.fence_region_density_ops:\n overflow, max_density = density_op(pos, mode=\"overflow\")\n overflow_list.append(overflow)\n max_density_list.append(max_density)\n overflow_list, max_density_list = torch.stack(overflow_list), torch.stack(max_density_list)\n\n return overflow_list, max_density_list\n\n self.op_collections.fence_region_density_merged_op = merged_density_op\n\n self.op_collections.fence_region_density_overflow_merged_op = merged_density_overflow_op\n return self.op_collections.fence_region_density_ops, self.op_collections.fence_region_density_merged_op, self.op_collections.fence_region_density_overflow_merged_op\n\n\n\n\n" ]
[ [ "torch.zeros", "numpy.square", "torch.cat", "torch.stack", "torch.rand_like", "torch.autograd.Variable", "torch.no_grad", "torch.from_numpy", "torch.tensor", "numpy.amax", "torch.pow" ] ]
NCAR-ASP-2021/hydro_tutorial_asp2021
[ "9045417d10503d1718f50c9194d6d126b4b16ba1" ]
[ "utils.py" ]
[ "import numpy as np\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\n\ndef damop_model(runoffarr, dt, catcharea, kappa, hmax, hmin, wmax, wmin, rmax, sigma):\n print()\n print('damop_model has been called with the constraints:')\n print('wmax = ',wmax,' wmin = ',wmin,' hmax = ',hmax,' hmin = ',hmin)\n #\n # Set parameter used to control computational mode using filter similar to Robert-Asselin\n #\n alpha = 0.1\n #\n # Convert runoff data from units of m to an equivalent inflow in m^3 s^-1\n # Assume that the same runoff rate applies to the entire catchment area for dam\n #\n runoffave = np.mean(runoffarr)\n inflow = catcharea*runoffarr/dt\n n = len(inflow)\n inmax = max(inflow)\n #\n # Apply running mean to the inflow data if required for smoother solution \n # to the optimisation. Averaging window length = nwin.\n #\n nwin = 3\n inflow = running_mean(inflow, nwin)\n #\n # Scale mu so that the sum of generation over time points is approx one.\n # This gives a better numerical solution in the optimisation for max generation\n # by reducing numerical truncation error in the calculation.\n #\n mu = 1.0/(n*sigma*wmax*hmax)\n #\n # The dam management optimization model is set up in the mathematical form of a \n # quadratic programming problem.\n # The only input time series is the inflow to the reservoir.\n # The model solves for the water head at the dam maximizing power generation.\n # This then gives the flow rate through the turbines.\n # However, contraints are applied on maximum and minimum water level \n # and maximum/minimum flow rate through the turbines.\n #\n # The equation for generation can be written in the form\n # \n # G = 0.5*H^T P H + q^T H\n #\n # where H is the head time series we are solving for (a 1-D array) and \n # P is a matrix and q is also a 1-D time series (scaled inflow).\n # The notation ^T means the transpose of the matrix. \n # Quadratic programming aims to minimize -G which is equivalent to max(G).\n #\n q = -mu*sigma*inflow\n pmat = np.zeros((n, n))\n cmat = np.zeros((n, n))\n umat = np.zeros((n, n))\n for i in range(n-1):\n pmat[i, i] = -1\n pmat[i, i+1] = 1\n umat[i, i] = 1\n umat[n-1, n-1] = 1\n for j in range(n-2):\n i = j+1\n cmat[i, i-1] = -1 + 0.5*alpha\n cmat[i, i] = -alpha\n cmat[i, i+1] = 1 + 0.5*alpha\n\n pscal = mu*sigma*(kappa/dt)*cmat\n gscal = -0.5*(kappa/dt)*cmat\n #\n # Set constraints on the rate of reservoir volume decrease (W+R-I)\n # based on the parameters Wmax, Rmax and Wmin\n #\n wmaxcons = np.zeros(n)\n wmincons = np.zeros(n)\n for i in range(n):\n wmaxcons[i] = wmax+rmax-inflow[i]\n wmincons[i] = min([wmin, wmax-inflow[i]])\n #\n # Set constraints on the water head at the dam: hmin <= h <= hmax\n #\n hscal = umat\n hmaxcons = np.ones(n)*hmax\n hmincons = np.ones(n)*hmin\n\n vmat = np.concatenate((gscal, -gscal, hscal, -hscal), axis=0)\n vcons = np.concatenate((wmaxcons, -wmincons, hmaxcons, -hmincons))\n print('Now apply quadratic minimization technique')\n\n def gen(x, sign=1.):\n return sign * (0.5*np.dot(x.T, np.dot(pscal, x)) + np.dot(q.T, x))\n\n def jac(x, sign=1.):\n return sign * (np.dot(x.T, pscal) + q.T)\n\n cons = {'type':'ineq',\n 'fun':lambda x: vcons - np.dot(vmat, x),\n 'jac':lambda x: -vmat}\n\n opt = {'disp':True, 'maxiter':100, 'ftol':1e-08}\n\n #\n # Obtain solution by minimization nouter times and average the results\n # to remove noise.\n # Note that the minimize method does not always find a solution consistent \n # with the contraints imposed (depending on the first guess data) and these\n # failed attempts are not included in the average solution.\n #\n nouter = 2\n istsuccess = 1\n ic = -1\n xinit = hmin*(1.0 + 0.01*np.random.randn(n))\n nwin = 5\n xinit = running_mean(xinit, nwin)\n for io in range(nouter):\n #while istsuccess == 1:\n #\n # First guess values for x (water head).\n # Random variation on top of constant level.\n # Smooth to reduce 2-grid noise in input data.\n #\n ic = ic+1\n res_cons = optimize.minimize(gen, xinit, jac=jac, constraints=cons,\n method='SLSQP', options=opt)\n xup = res_cons['x']\n fup = res_cons['fun']\n stexit = res_cons['status']\n\n if stexit != 4:\n if istsuccess == 1:\n x = xup\n x = running_mean(x, nwin)\n xinit = x\n f = fup\n print('Constrained optimization')\n print(res_cons)\n print('iter ',ic,' f = ',f)\n istsuccess = 0\n else:\n if (fup/f) < 2:\n afac = float(ic+1)/nouter\n x = afac*x + (1-afac)*xup\n x = running_mean(x, nwin)\n xinit = x\n f = afac*f + (1-afac)*fup\n print('iter ',ic,' f = ',f)\n if ic == nouter:\n print(nouter,' outer iterations finished without reaching result')\n istsuccess = 1\n # end outer loop\n\n #\n # Optimisation returns the head in variable x\n # Total flow rate ft = W+R is calculated from head and known inflow rate\n # Total flow is diverted into relief flow when it exceeds Wmax\n #\n ft = np.dot(gscal, x) + inflow\n w = np.copy(ft)\n r = np.zeros(n)\n excessflow = np.where(ft > wmax)\n w[excessflow] = wmax\n r[excessflow] = ft[excessflow]-wmax\n gout = -f\n\n return inflow, x, w, r, gout\n\ndef running_mean(xarr, nwin):\n '''\n Apply running mean filter through array\n Inputs:\n xarr - array to filter\n nwin - number of points in the filter window (odd number expected)\n Output:\n xfilt - same length as xarr after application of filter\n '''\n n = len(xarr)\n xfilt = np.copy(xarr)\n ist = int(nwin/2)\n xconv = np.convolve(xarr, np.ones(nwin),'valid')/nwin\n xfilt[ist:n-ist] = xconv[:]\n\n return xfilt\n\ndef plot_series(timarr, inflow, h, w, r, powergen, ilabel, hlabel, wlabel, rlabel, plabel, mytitle):\n '''\n Plot the subset time series\n Inputs:\n timarr - time array in datetime format\n y - data time series\n ylabel - string name for data\n mytitle - plot title\n '''\n fig = plt.figure()\n plt.plot(timarr,inflow,label=ilabel)\n plt.plot(timarr,h,label=hlabel)\n plt.plot(timarr,w,label=wlabel)\n plt.plot(timarr,r,label=rlabel)\n plt.plot(timarr,powergen,label=plabel)\n plt.xlabel(\"Days\")\n plt.ylabel(ilabel)\n plt.title(mytitle)\n plt.legend()\n plt.show()" ]
[ [ "numpy.concatenate", "numpy.dot", "numpy.zeros", "matplotlib.pyplot.xlabel", "numpy.copy", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.mean", "matplotlib.pyplot.figure", "numpy.ones", "numpy.where", "numpy.random.randn", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "scipy.optimize.minimize" ] ]
ORFMark/Misc_Projects_and_Programs
[ "8984b73f36a58e69c569ea1be4a2c193b7ba1061" ]
[ "Graphs/Lab_2_Vector_Cartesian_Refrence.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.patches import Ellipse\nells = [];\nxUncertain = 0.0001\nyUncertain = 0\nvectors = [[0.0, .10462 * 9.81,'#345678']\n , [(90.1/360)*(2*3.14), .15441 * 9.81,'#338458']\n , [(235.6/360)*(2*3.14), .18657 * 9.81,'#582341']]\nfig, ax = plt.subplots(subplot_kw = {'aspect': 'equal'})\nfor v in vectors:\n x = v[1] * np.cos(v[0])\n y = v[1]* np.sin(v[0])\n yUncertain = (0.1/(2*3.14) * (v[1]/9.81))\n ax.plot([0,x], [0, y], c = v[2])\n ells.append(Ellipse(xy=[x,y]\n , width=xUncertain, height=yUncertain\n , angle = 0));\nfor i in range(0, len(ells)):\n ax.add_artist(ells[i])\n ells[i].set_alpha(np.random.rand())\n ells[i].set_facecolor([0.2, 0.4, 0.4])\nax.set_xlim(-2, 2)\nax.set_ylim(-2, 2)\nplt.title(\"Figure 3: Plot of the Vectors in the Cartesian plane\")\nplt.show();\n" ]
[ [ "numpy.sin", "numpy.random.rand", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.patches.Ellipse", "numpy.cos", "matplotlib.pyplot.show" ] ]
forallx94/Korean-license-plate-Generator
[ "d34a488680b93a4f09b8efc97d5fe33ac5f82d74" ]
[ "Eletronic_Generator_original.py" ]
[ "import os, random\nimport cv2, argparse\nimport numpy as np\n\ndef random_bright(img):\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = np.array(img, dtype=np.float64)\n random_bright = .5 + np.random.uniform()\n img[:, :, 2] = img[:, :, 2] * random_bright\n img[:, :, 2][img[:, :, 2] > 255] = 255\n img = np.array(img, dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n return img\n\n\nclass ImageGenerator:\n def __init__(self, save_path):\n self.save_path = save_path\n # Plate\n self.plate = cv2.imread(\"plate_b.jpg\")\n\n # loading Number\n file_path = \"./num/\"\n file_list = os.listdir(file_path)\n self.Number = list()\n self.Number_thr = list()\n self.Number_thr1 = list()\n self.number_list = list()\n for file in file_list:\n img_path = os.path.join(file_path, file)\n img = cv2.imread(img_path)\n # thr plate\n _, thr = cv2.threshold(img, 127 , 1 ,cv2.THRESH_BINARY)\n # thr img\n _, thr1 = cv2.threshold(img, 127 , 1 ,cv2.THRESH_BINARY_INV)\n self.Number.append(img)\n self.Number_thr.append(thr)\n self.Number_thr1.append(thr1)\n self.number_list.append(file[0:-4])\n\n # loading Char\n file_path = \"./char1/\"\n file_list = os.listdir(file_path)\n self.Char1 = list()\n self.Char1_thr = list()\n self.Char1_thr1 = list()\n self.char_list = list()\n for file in file_list:\n img_path = os.path.join(file_path, file)\n img = cv2.imread(img_path)\n # thr plate\n _, thr = cv2.threshold(img, 127 , 1 ,cv2.THRESH_BINARY)\n # thr img\n _, thr1 = cv2.threshold(img, 127 , 1 ,cv2.THRESH_BINARY_INV)\n self.Char1.append(img)\n self.Char1_thr.append(thr)\n self.Char1_thr1.append(thr1)\n self.char_list.append(file[0:-4])\n\n def Type_6(self, num, save=False):\n number = [cv2.resize(number, (56, 83)) for number in self.Number]\n number_thr = [cv2.resize(number_thr, (56, 83)) for number_thr in self.Number_thr]\n number_thr1 = [cv2.resize(number_thr1, (56, 83)) for number_thr1 in self.Number_thr1]\n char = [cv2.resize(char1, (60, 83)) for char1 in self.Char1]\n char_thr = [cv2.resize(char1_thr, (60, 83)) for char1_thr in self.Char1_thr]\n char_thr1 = [cv2.resize(char1_thr1, (60, 83)) for char1_thr1 in self.Char1_thr1]\n Plate = cv2.resize(self.plate, (520, 110))\n\n for i, Iter in enumerate(range(num)):\n Plate = cv2.resize(self.plate, (520, 110))\n label = \"Z\"\n # row -> y , col -> x\n row, col = 13, 44 # row + 83, col + 56\n # number 1\n rand_int = random.randint(0, 9)\n label += self.number_list[rand_int]\n Plate[row:row + 83, col:col + 56, :] = Plate[row:row + 83, col:col + 56, :]* number_thr[rand_int] + number[rand_int] * number_thr1[rand_int]\n col += 56\n\n # number 2\n rand_int = random.randint(0, 9)\n label += self.number_list[rand_int]\n Plate[row:row + 83, col:col + 56, :] = Plate[row:row + 83, col:col + 56, :]* number_thr[rand_int] + number[rand_int] * number_thr1[rand_int]\n col += 56\n\n # character 3\n label += self.char_list[i%37]\n Plate[row:row + 83, col:col + 60, :] = Plate[row:row + 83, col:col + 60, :]* char_thr[i%37] + char[i%37] * char_thr1[i%37]\n col += (60 + 36)\n\n # number 4\n rand_int = random.randint(0, 9)\n label += self.number_list[rand_int]\n Plate[row:row + 83, col:col + 56, :] = Plate[row:row + 83, col:col + 56, :]* number_thr[rand_int] + number[rand_int] * number_thr1[rand_int]\n col += 56\n\n # number 5\n rand_int = random.randint(0, 9)\n label += self.number_list[rand_int]\n Plate[row:row + 83, col:col + 56, :] = Plate[row:row + 83, col:col + 56, :]* number_thr[rand_int] + number[rand_int] * number_thr1[rand_int]\n col += 56\n\n # number 6\n rand_int = random.randint(0, 9)\n label += self.number_list[rand_int]\n Plate[row:row + 83, col:col + 56, :] = Plate[row:row + 83, col:col + 56, :]* number_thr[rand_int] + number[rand_int] * number_thr1[rand_int]\n col += 56\n\n # number 7\n rand_int = random.randint(0, 9)\n label += self.number_list[rand_int]\n Plate[row:row + 83, col:col + 56, :] = Plate[row:row + 83, col:col + 56, :]* number_thr[rand_int] + number[rand_int] * number_thr1[rand_int]\n col += 56\n Plate = random_bright(Plate)\n if save:\n cv2.imwrite(self.save_path + label + \".jpg\", Plate)\n else:\n cv2.imshow(label, Plate)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--img_dir\", help=\"save image directory\",\n type=str, default=\"../CRNN/DB/\")\nparser.add_argument(\"-n\", \"--num\", help=\"number of image\",\n type=int)\nparser.add_argument(\"-s\", \"--save\", help=\"save or imshow\",\n type=bool, default=True)\nargs = parser.parse_args()\n\n\nimg_dir = args.img_dir\nA = ImageGenerator(img_dir)\n\nnum_img = args.num\nSave = args.save\n\nA.Type_6(num_img, save=Save)\nprint(\"Type 6 finish\")" ]
[ [ "numpy.array", "numpy.random.uniform" ] ]
blynotes/CS6301_SDN
[ "7db92b6273222ddce28aaa238678a40fbd435b25" ]
[ "Code/src/old/sparkStatisticalMethods.py" ]
[ "from __future__ import print_function\n\n# https://spark.apache.org/docs/2.3.0/api/python/pyspark.html\nfrom pyspark import SparkConf, SparkContext\n# https://spark.apache.org/docs/2.3.0/api/python/pyspark.streaming.html\nfrom pyspark.streaming import StreamingContext\n# https://spark.apache.org/docs/2.3.0/api/python/pyspark.streaming.html#module-pyspark.streaming.kafka\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom pyspark.mllib.stat import Statistics\nimport numpy as np\n\nfrom elasticsearch import Elasticsearch\nimport elasticsearch.helpers\n\nimport json\nimport sys\nimport requests\n\ndef readDataFromES():\n\tes = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\tresults_gen = elasticsearch.helpers.scan(es, index='netflowrepo', doc_type='entry', query={\"query\": {\"match_all\": {}}})\n\t\n\tresults = list(results_gen)\n\t\n\tsumOfFlows_list = []\n\tsumOfBytes_list = []\n\tuniqDstIPs_list = []\n\tuniqDstPorts_list = []\n\n\tfor row in results:\n\t\tsumOfFlows_list.append(row['_source']['sumOfFlows'])\n\t\tsumOfBytes_list.append(row['_source']['sumOfBytes'])\n\t\tuniqDstIPs_list.append(row['_source']['uniqDstIPs'])\n\t\tuniqDstPorts_list.append(row['_source']['uniqDstPorts'])\n\n\t\n\t# Convert data to numpy arrays.\n\tnp_Flows = np.array(sumOfFlows_list)\n\tnp_Bytes = np.array(sumOfBytes_list)\n\tnp_DstIPs = np.array(uniqDstIPs_list)\n\tnp_DstPorts = np.array(uniqDstPorts_list)\n\n\t# Convert data into Matrix. Each feature is in a column.\n\ttmp1 = np.concatenate((np_Flows.reshape((-1,1)), np_Bytes.reshape((-1,1))), axis=1)\n\ttmp2 = np.concatenate((tmp1, np_DstIPs.reshape((-1,1))), axis=1)\n\ttmp3 = np.concatenate((tmp2, np_DstPorts.reshape((-1,1))), axis=1)\n\tmat = sc.parallelize(tmp3.tolist())\n\t\n\tsummary = Statistics.colStats(mat)\n\t\n\tprint(\"count =\", summary.count())\n\tprint(\"mean =\", summary.mean())\n\tprint(\"min =\", summary.min())\n\tprint(\"max =\", summary.max())\n\tprint(\"variance =\", summary.variance())\n\t\n\tmean = summary.mean()\n\tmax = summary.max()\n\tstddev = np.sqrt(summary.variance())\n\t\n\treturn (mean, max, stddev)\n\t\n\t\ndef sendToONOS(anomalies):\n\t# First get a list of all hosts.\n\t# Create dictionary mapping from IP to host switch.\n\tipToSwitchMap = {}\n\tipToSwitchPortMap = {}\n\t\n\thosts = requests.get('http://10.28.34.39:8181/onos/v1/hosts', auth=('onos', 'rocks'))\n\t\n\thost_json = hosts.json()\n\tfor host in host_json['hosts']:\n\t\tIP = host['ipAddresses'][0]\n\t\tswitch = host['locations'][0]['elementId']\n\t\t\n\t\tipToSwitchMap[IP] = switch\n\t\n\t# For each anomaly IP, send a request to ONOS to drop traffic for that srcAddr from the\n\t# switch the bad device is connected on.\n\tfor entry in anomalies:\n\t\tprint(\"Send to ONOS: Need to block {0}\".format(entry[\"srcAddr\"]))\n\t\t# Configure parameters needed for POST request\n\t\tblockData = {\n\t\t\t\"priority\": 40000,\n\t\t\t\"timeout\": 0,\n\t\t\t\"isPermanent\": \"true\",\n\t\t\t\"deviceId\": ipToSwitchMap[entry[\"srcAddr\"]],\n\t\t\t\"treatment\": {}, # blank treatment means drop traffic.\n\t\t\t\"selector\": {\n\t\t\t\t\"criteria\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"type\": \"ETH_TYPE\",\n\t\t\t\t\t\t\"ethType\": \"0x0800\" # IPv4 Traffic.\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"type\": \"IPV4_SRC\",\n\t\t\t\t\t\t\"ip\": \"{0}/32\".format(entry[\"srcAddr\"]) # Must include subnet mask.\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\t\t\n\t\turlToPost = \"http://10.28.34.39:8181/onos/v1/flows/{0}?appId=org.onosproject.fwd\".format(ipToSwitchMap[entry[\"srcAddr\"]])\n\t\tprint(\"urlToPost = {0}\".format(urlToPost))\n\t\tresp = requests.post(urlToPost, data=json.dumps(blockData), auth=('onos', 'rocks'))\n\t\tprint(\"response is {0}\".format(resp))\n\t\t\n\n\nif __name__ == \"__main__\":\n\t# SparkContext represents connection to a Spark cluster.\n\tconf = SparkConf()\n\tconf.setAppName(\"Spark Statistical Methods App\")\n\tconf.setMaster('local[2]')\n\tsc = SparkContext(conf=conf)\n\tsc.setLogLevel(\"WARN\")\n\t\n\t# Read data from elasticsearch and return mean, max, and stddev.\n\tmean, max, stddev = readDataFromES()\n\t\n\t# Determine the numStdDevAboveMean for each feature.\n\t# We want to increase it until the max value is within that range.\n\tnumStdDevAboveMean_Flows = 2\n\twhile (max[0] > mean[0] + (numStdDevAboveMean_Flows * stddev[0])):\n\t\t# # # print(\"Increasing numStdDevAboveMean_Flows\")\n\t\tnumStdDevAboveMean_Flows += 1\n\tupperThreshold_Flows = mean[0] + numStdDevAboveMean_Flows * stddev[0]\n\tprint(\"max[0] = {0}\".format(max[0]))\n\tprint(\"mean[0] = {0}\".format(mean[0]))\n\tprint(\"stddev[0] = {0}\".format(stddev[0]))\n\tprint(\"numStdDevAboveMean_Flows = {0}\".format(numStdDevAboveMean_Flows))\n\tprint(\"upperThreshold_Flows = {0}\".format(upperThreshold_Flows))\n\t\n\tnumStdDevAboveMean_Bytes = 2\n\twhile (max[1] > mean[1] + (numStdDevAboveMean_Bytes * stddev[1])):\n\t\t# # # print(\"Increasing numStdDevAboveMean_Bytes\")\n\t\tnumStdDevAboveMean_Bytes += 1\n\tupperThreshold_Bytes = mean[1] + numStdDevAboveMean_Bytes * stddev[1]\n\tprint(\"max[1] = {0}\".format(max[1]))\n\tprint(\"mean[1] = {0}\".format(mean[1]))\n\tprint(\"stddev[1] = {0}\".format(stddev[1]))\n\tprint(\"numStdDevAboveMean_Bytes = {0}\".format(numStdDevAboveMean_Bytes))\n\tprint(\"upperThreshold_Bytes = {0}\".format(upperThreshold_Bytes))\n\t\t\n\tnumStdDevAboveMean_DstIPs = 2\n\twhile (max[2] > mean[2] + (numStdDevAboveMean_DstIPs * stddev[2])):\n\t\t# # # print(\"Increasing numStdDevAboveMean_DstIPs\")\n\t\tnumStdDevAboveMean_DstIPs += 1\n\tupperThreshold_DstIPs = mean[2] + numStdDevAboveMean_DstIPs * stddev[2]\n\tprint(\"max[2] = {0}\".format(max[2]))\n\tprint(\"mean[2] = {0}\".format(mean[2]))\n\tprint(\"stddev[2] = {0}\".format(stddev[2]))\n\tprint(\"numStdDevAboveMean_DstIPs = {0}\".format(numStdDevAboveMean_DstIPs))\n\tprint(\"upperThreshold_DstIPs = {0}\".format(upperThreshold_DstIPs))\n\t\t\n\tnumStdDevAboveMean_DstPorts = 2\n\twhile (max[3] > mean[3] + (numStdDevAboveMean_DstPorts * stddev[3])):\n\t\t# # # print(\"Increasing numStdDevAboveMean_DstPorts\")\n\t\tnumStdDevAboveMean_DstPorts += 1\n\tupperThreshold_DstPorts = mean[3] + numStdDevAboveMean_DstPorts * stddev[3]\n\tprint(\"max[3] = {0}\".format(max[3]))\n\tprint(\"mean[3] = {0}\".format(mean[3]))\n\tprint(\"stddev[3] = {0}\".format(stddev[3]))\n\tprint(\"numStdDevAboveMean_DstPorts = {0}\".format(numStdDevAboveMean_DstPorts))\n\tprint(\"upperThreshold_DstPorts = {0}\".format(upperThreshold_DstPorts))\n\t\n\t\n\t# StreamingContext represents connection to a Spark cluster from existing SparkContext.\n\tssc = StreamingContext(sc, 60) # the number indicates how many seconds each batch lasts.\n\t\n\t# Creates an input stream that pulls events from Kafka.\n\tkvs = KafkaUtils.createStream(ssc, \"streamsetApp:2181\", \"spark-streaming-consumer\", {\"NETFLOW\":1})\n\tparsed = kvs.map(lambda x: json.loads(x[1]))\n\t\n\t# Get only elements that are needed and rename to make it clear.\n\tnetflow_dict = parsed.map(lambda x: ({'srcAddr': x['srcaddr_s'], 'srcPort': x['srcport'], 'dstAddr': x['dstaddr_s'], 'dstPort': x['dstport'], 'tcpFlags': x['tcp_flags'], 'protocol': x['proto'], 'timestampStart': x['first'], 'timestampEnd': x['last'], 'numBytes': x['dOctets'], 'numFlows': x['count']}))\n\t# # # netflow_dict.pprint()\n\t\n\t# Get Sum of Flows sent from Source IP in window.\n\tsumOfFlows = netflow_dict.filter(lambda e: \"srcAddr\" in e).map(lambda e: (e[\"srcAddr\"], e[\"numFlows\"])).reduceByKey(lambda x, y: x + y)\n\t\n\t# Get sum of Bytes sent from Source IP in window.\n\tsumOfBytes = netflow_dict.filter(lambda e: \"srcAddr\" in e).map(lambda e: (e[\"srcAddr\"], e[\"numBytes\"])).reduceByKey(lambda x, y: x + y)\n\t\n\t# Count of unique dest IP that source IP talked to in window.\n\t# First map gets unique src/dst pairs. Second map reduces just to srcAddr and counts number of uniq dstAddr.\n\tuniqDstIPs = netflow_dict.filter(lambda e: \"srcAddr\" in e).map(lambda e: (e[\"srcAddr\"], e[\"dstAddr\"])).countByValue().map(lambda e: e[0][0]).countByValue()\n\t\n\t# Count of unique destination ports that source IP talked to in window.\n\t# First map gets unique src/dst pairs. Second map reduces just to srcAddr and counts number of uniq dstPort.\n\tuniqDstPorts = netflow_dict.filter(lambda e: \"srcAddr\" in e).map(lambda e: (e[\"srcAddr\"], e[\"dstPort\"])).countByValue().map(lambda e: e[0][0]).countByValue()\n\t\n\t\n\t# Filter and get all inputs that exceed their respective thresholds.\n\t# # # flowsAboveThreshold = sumOfFlows.filter(lambda e: e[1] > upperThreshold_Flows) # Remove since giving false positives.\n\tbytesAboveThreshold = sumOfBytes.filter(lambda e: e[1] > upperThreshold_Bytes)\n\tdstIPsAboveThreshold = uniqDstIPs.filter(lambda e: e[1] > upperThreshold_DstIPs)\n\tdstPortsAboveThreshold = uniqDstPorts.filter(lambda e: e[1] > upperThreshold_DstPorts)\n\t\n\t# Join data together, joining by srcAddr. Need to do fullOuterJoin since some srcAddr may not be in some RDDs.\n\t# Unlikely that a srcAddr exceeded threshold in all of the features.\n\tjoin1 = bytesAboveThreshold.fullOuterJoin(dstIPsAboveThreshold)\n\tjoin2 = join1.fullOuterJoin(dstPortsAboveThreshold)\n\t\n\t# Map into format: (SrcAddr, sumOfBytes, uniqDstIPs, uniqDstPorts).\n\tjoined = join2.map(lambda e: ({\"srcAddr\": e[0], \"sumOfBytes\": e[1][0][0], \"uniqDstIPs\": e[1][0][1], \"uniqDstPorts\": e[1][1]}))\n\tjoined.pprint(12) # Show for all 12 IPs.\n\t\n\t\n\t# Send srcAddr to ONOS to block.\n\tjoined.foreachRDD(lambda rdd: rdd.foreachPartition(sendToONOS))\n\t\n\t\t\n\t\n\t# Start the execution of streams.\n\tssc.start()\n\t\n\t# Wait for execution to stop.\n\tssc.awaitTermination()\n" ]
[ [ "numpy.array" ] ]
lfrommelt/monty
[ "e8cabf0e4ac01ab3d97eecee5e699139076d6544" ]
[ "2018/11/code.py" ]
[ "import numpy as np\n\n\ndef _UnitStep(x):\n return np.heaviside(x.real, 1)\n\ndef _x(t):\n return (((-1/4 * np.sin(10/7 - 23 * t) -\n 3/10 * np.sin(4/3 - 22 * t) -\n 2/5 * np.sin(7/5 - 19 * t) -\n 1/5 * np.sin(7/5 - 16 * t) -\n 3/7 * np.sin(10/7 - 15 * t) -\n 3/8 * np.sin(13/9 - 9 * t) -\n 19/13 * np.sin(11/7 - 3 * t) +\n 222/5 * np.sin(t + 11/7) +\n 41/2 * np.sin(2 * t + 11/7) +\n 34/9 * np.sin(4 * t + 11/7) +\n 1/3 * np.sin(5 * t + 8/5) +\n 3/8 * np.sin(6 * t + 8/5) +\n 12/7 * np.sin(7 * t + 13/8) +\n 11/7 * np.sin(8 * t + 13/8) +\n 1/4 * np.sin(10 * t + 20/13) +\n 2/9 * np.sin(11 * t + 16/9) +\n 3/8 * np.sin(12 * t + 8/5) +\n 1/3 * np.sin(13 * t + 7/4) +\n 1/2 * np.sin(14 * t + 17/10) +\n 5/7 * np.sin(17 * t + 17/10) +\n 1/28 * np.sin(18 * t + 9/2) +\n 1/2 * np.sin(20 * t + 12/7) +\n 3/7 * np.sin(21 * t + 16/9) +\n 6/11 * np.sin(24 * t + 7/4) -\n 979/9) * _UnitStep(51 * np.pi - t) * _UnitStep(t - 47 * np.pi) +\n (-6/5 * np.sin(14/9 - 22 * t) -\n 1/9 * np.sin(7/5 - 19 * t) -\n 9/8 * np.sin(14/9 - 18 * t) -\n 1/14 * np.sin(15/11 - 15 * t) -\n 6/5 * np.sin(11/7 - 12 * t) -\n 7/6 * np.sin(11/7 - 8 * t) -\n 29/10 * np.sin(11/7 - 6 * t) -\n 104/3 * np.sin(11/7 - 2 * t) +\n 415/18 * np.sin(t + 11/7) +\n 71/18 * np.sin(3 * t + 11/7) +\n 19/8 * np.sin(4 * t + 33/7) +\n 22/21 * np.sin(5 * t + 8/5) +\n 3/8 * np.sin(7 * t + 61/13) +\n 5/9 * np.sin(9 * t + 11/7) +\n 1/8 * np.sin(10 * t + 14/3) +\n 4/7 * np.sin(11 * t + 11/7) +\n 4/11 * np.sin(13 * t + 14/3) +\n 1/7 * np.sin(14 * t + 14/3) +\n 2/7 * np.sin(16 * t + 5/3) +\n 1/6 * np.sin(17 * t + 5/3) +\n 6/7 * np.sin(20 * t + 8/5) +\n 1/7 * np.sin(21 * t + 5/3) +\n 1/6 * np.sin(23 * t + 8/5) -\n 2765/8) * _UnitStep(47 * np.pi - t) * _UnitStep(t - 43 * np.pi) +\n (1189/22 * np.sin(t + 11/7) +\n 3/4 * np.sin(2 * t + 13/8) +\n 11/2 * np.sin(3 * t + 8/5) +\n 2/7 * np.sin(4 * t + 17/7) +\n 22/9 * np.sin(5 * t + 18/11) +\n 1/4 * np.sin(6 * t + 17/7) +\n 16/17 * np.sin(7 * t + 20/11) +\n 1/5 * np.sin(8 * t + 29/9) -\n 1627/7) * _UnitStep(43 * np.pi - t) * _UnitStep(t - 39 * np.pi) +\n (-3/7 * np.sin(1/18 - 5 * t) -\n 3/4 * np.sin(1/2 - 3 * t) +\n 109/9 * np.sin(t + 13/10) +\n 5/8 * np.sin(2 * t + 11/3) +\n 5/9 * np.sin(4 * t + 10/3) +\n 3/10 * np.sin(6 * t + 21/8) +\n 2/9 * np.sin(7 * t + 2/3) +\n 1/4 * np.sin(8 * t + 23/8) -\n 1190/9) * _UnitStep(39 * np.pi - t) * _UnitStep(t - 35 * np.pi) +\n (188/21 * np.sin(t + 27/28) +\n 2/5 * np.sin(2 * t + 17/6) +\n 2/3 * np.sin(3 * t + 91/23) +\n 3/8 * np.sin(4 * t + 53/18) +\n 2/11 * np.sin(5 * t + 1/7) -\n 369) * _UnitStep(35 * np.pi - t) * _UnitStep(t - 31 * np.pi) +\n (-8/9 * np.sin(1/10 - 12 * t) -\n 34/9 * np.sin(10/9 - 6 * t) -\n 137/10 * np.sin(5/7 - 2 * t) +\n 26/5 * np.sin(t + 13/4) +\n 118/5 * np.sin(3 * t + 11/8) +\n 43/8 * np.sin(4 * t + 13/7) +\n 49/6 * np.sin(5 * t + 11/12) +\n 22/5 * np.sin(7 * t + 13/4) +\n 17/16 * np.sin(8 * t + 1/7) +\n 5/4 * np.sin(9 * t + 1/4) +\n 5/7 * np.sin(10 * t + 17/5) +\n 29/15 * np.sin(11 * t + 5/6) -\n 1915/8) * _UnitStep(31 * np.pi - t) * _UnitStep(t - 27 * np.pi) +\n (-2/7 * np.sin(10/7 - 7 * t) -\n np.sin(1/27 - 4 * t) +\n 68/7 * np.sin(t + 44/15) +\n 76/9 * np.sin(2 * t + 37/10) +\n 30/7 * np.sin(3 * t + 1) +\n 8/9 * np.sin(5 * t + 3/2) +\n 4/5 * np.sin(6 * t + 31/8) +\n 3/7 * np.sin(8 * t + 10/3) +\n 6/13 * np.sin(9 * t + 8/7) +\n 1/3 * np.sin(10 * t + 31/9) -\n 2135/9) * _UnitStep(27 * np.pi - t) * _UnitStep(t - 23 * np.pi) +\n (-3/8 * np.sin(1/4 - 23 * t) -\n 3/5 * np.sin(1/8 - 22 * t) -\n 13/8 * np.sin(5/4 - 20 * t) -\n 9/7 * np.sin(3/2 - 16 * t) -\n 41/5 * np.sin(4/3 - 4 * t) +\n 768/7 * np.sin(t + 11/5) +\n 109/5 * np.sin(2 * t + 16/7) +\n 150/13 * np.sin(3 * t + 11/6) +\n 33/7 * np.sin(5 * t + 97/24) +\n 23/4 * np.sin(6 * t + 5/7) +\n 69/7 * np.sin(7 * t + 9/8) +\n 32/5 * np.sin(8 * t + 21/5) +\n 7/6 * np.sin(9 * t + 22/9) +\n 28/5 * np.sin(10 * t + 5/6) +\n 43/10 * np.sin(11 * t + 26/7) +\n 14/9 * np.sin(12 * t + 5/11) +\n 13/9 * np.sin(13 * t + 40/9) +\n 11/6 * np.sin(14 * t + 2/5) +\n 3/2 * np.sin(15 * t + 17/10) +\n 7/11 * np.sin(17 * t + 4/3) +\n 3/8 * np.sin(18 * t + 31/10) +\n 4/7 * np.sin(19 * t + 14/9) +\n 6/5 * np.sin(21 * t + 17/7) +\n 4/7 * np.sin(24 * t + 27/8) +\n 1006/11) * _UnitStep(23 * np.pi - t) * _UnitStep(t - 19 * np.pi) +\n (-63/8 * np.sin(2/7 - 8 * t) -\n 38/13 * np.sin(11/9 - 6 * t) -\n 14/5 * np.sin(1/17 - 4 * t) +\n 77/9 * np.sin(t + 1/2) +\n 52/7 * np.sin(2 * t + 10/3) +\n 22/9 * np.sin(3 * t + 76/17) +\n 21/8 * np.sin(5 * t + 26/7) +\n 3 * np.sin(7 * t + 15/8) +\n 64/7 * np.sin(9 * t + 57/14) +\n 6 * np.sin(10 * t + 17/6) -\n 544/7) * _UnitStep(19 * np.pi - t) * _UnitStep(t - 15 * np.pi) +\n (-37/10 * np.sin(4/7 - 5 * t) -\n 3 * np.sin(3/7 - 3 * t) +\n 24/7 * np.sin(t + 7/6) +\n 9/7 * np.sin(2 * t + 2/5) +\n 31/15 * np.sin(4 * t + 37/8) +\n 9/5 * np.sin(6 * t + 12/5) +\n 59/12 * np.sin(7 * t + 13/6) +\n 15/7 * np.sin(8 * t + 25/8) +\n 134/15 * np.sin(9 * t + 7/3) +\n 73/8 * np.sin(10 * t + 1/5) -\n 4406/11) * _UnitStep(15 * np.pi - t) * _UnitStep(t - 11 * np.pi) +\n (236/7 * np.sin(t + 6/5) +\n 1/2 * np.sin(2 * t + 47/12) -\n 627/5) * _UnitStep(11 * np.pi - t) * _UnitStep(t - 7 * np.pi) +\n (69/2 * np.sin(t + 5/6) -\n 715/2) * _UnitStep(7 * np.pi - t) * _UnitStep(t - 3 * np.pi) +\n (-19/9 * np.sin(6/5 - 21 * t) -\n 37/10 * np.sin(7/9 - 19 * t) -\n 23/8 * np.sin(1 - 17 * t) -\n 16/3 * np.sin(7/6 - 16 * t) -\n 29/5 * np.sin(1/5 - 9 * t) -\n 919/11 * np.sin(1/7 - 3 * t) +\n 1573/6 * np.sin(t + 91/45) +\n 214/5 * np.sin(2 * t + 33/8) +\n 421/14 * np.sin(4 * t + 13/8) +\n 61/6 * np.sin(5 * t + 19/5) +\n 401/16 * np.sin(6 * t + 43/14) +\n 511/51 * np.sin(7 * t + 35/8) +\n 144/7 * np.sin(8 * t + 5/6) +\n 137/10 * np.sin(10 * t + 25/13) +\n 18/7 * np.sin(11 * t + 15/7) +\n 17/9 * np.sin(12 * t + 41/9) +\n 9/7 * np.sin(13 * t + 13/7) +\n 29/10 * np.sin(14 * t + 22/7) +\n 25/8 * np.sin(15 * t + 1/4) +\n 12/5 * np.sin(18 * t + 11/8) +\n 14/5 * np.sin(20 * t + 27/7) +\n 13/8 * np.sin(22 * t + 12/7) +\n 7/6 * np.sin(23 * t + 7/9) +\n 26/11 * np.sin(24 * t + 23/7) -\n 1891/8) * _UnitStep(3 * np.pi - t) * _UnitStep(t + np.pi)) * _UnitStep(np.sqrt(np.sign(np.sin(t/2))))).real\n\ndef _y(t):\n return (((-8/11 * np.sin(11/8 - 22 * t) -\n 1/2 * np.sin(10/7 - 21 * t) +\n 67/6 * np.sin(t + 33/7) +\n 1478/29 * np.sin(2 * t + 11/7) +\n 3/5 * np.sin(3 * t + 30/7) +\n 26/3 * np.sin(4 * t + 11/7) +\n 1/6 * np.sin(5 * t + 13/9) +\n 30/29 * np.sin(6 * t + 8/5) +\n 2/5 * np.sin(7 * t + 14/3) +\n 88/29 * np.sin(8 * t + 8/5) +\n 1/4 * np.sin(9 * t + 31/7) +\n 11/8 * np.sin(10 * t + 8/5) +\n 1/16 * np.sin(11 * t + 9/2) +\n 1/12 * np.sin(12 * t + 5/4) +\n 1/10 * np.sin(13 * t + 25/11) +\n 11/8 * np.sin(14 * t + 18/11) +\n 2/7 * np.sin(15 * t + 37/8) +\n 1/6 * np.sin(16 * t + 11/8) +\n 2/9 * np.sin(17 * t + 5/3) +\n 1/5 * np.sin(18 * t + 17/10) +\n 1/13 * np.sin(19 * t + 19/8) +\n 23/24 * np.sin(20 * t + 12/7) +\n 7/11 * np.sin(23 * t + 9/5) +\n 9/7 * np.sin(24 * t + 7/4) -\n 1538/7) * _UnitStep(51 * np.pi - t) * _UnitStep(t - 47 * np.pi) +\n (-2/7 * np.sin(20/13 - 23 * t) -\n 1/6 * np.sin(3/2 - 20 * t) -\n 5/7 * np.sin(20/13 - 17 * t) -\n 1/9 * np.sin(20/13 - 11 * t) -\n 1/6 * np.sin(13/9 - 9 * t) -\n 19/6 * np.sin(17/11 - 3 * t) +\n 263/5 * np.sin(t + 11/7) +\n 614/15 * np.sin(2 * t + 11/7) +\n 87/10 * np.sin(4 * t + 11/7) +\n 1/7 * np.sin(5 * t + 11/8) +\n 19/11 * np.sin(6 * t + 11/7) +\n 7/5 * np.sin(7 * t + 11/7) +\n 4/3 * np.sin(8 * t + 8/5) +\n 9/5 * np.sin(10 * t + 14/9) +\n 4/7 * np.sin(12 * t + 8/5) +\n 3/11 * np.sin(13 * t + 3/2) +\n 1/8 * np.sin(14 * t + 22/15) +\n 1/9 * np.sin(15 * t + 12/7) +\n 6/5 * np.sin(16 * t + 11/7) +\n 2/9 * np.sin(18 * t + 11/7) +\n 3/5 * np.sin(19 * t + 8/5) +\n 1/26 * np.sin(21 * t + 15/11) +\n 6/7 * np.sin(22 * t + 8/5) -\n 1867/8) * _UnitStep(47 * np.pi - t) * _UnitStep(t - 43 * np.pi) +\n (118/39 * np.sin(t + 11/7) +\n 40/7 * np.sin(2 * t + 33/7) +\n 49/25 * np.sin(3 * t + 14/3) +\n 12/5 * np.sin(4 * t + 8/5) +\n 1/9 * np.sin(5 * t + 32/13) +\n 5/2 * np.sin(6 * t + 13/8) +\n 2/5 * np.sin(7 * t + 22/5) +\n 3/4 * np.sin(8 * t + 7/4) -\n 143/10) * _UnitStep(43 * np.pi - t) * _UnitStep(t - 39 * np.pi) +\n (-1/8 * np.sin(2/3 - 8 * t) -\n 1/2 * np.sin(7/5 - 2 * t) -\n 246/19 * np.sin(1/7 - t) +\n 1/4 * np.sin(3 * t + 33/16) +\n 1/6 * np.sin(4 * t + 17/6) +\n 1/5 * np.sin(5 * t + 31/7) +\n 1/11 * np.sin(6 * t + 50/17) +\n 1/8 * np.sin(7 * t + 30/7) +\n 665/6) * _UnitStep(39 * np.pi - t) * _UnitStep(t - 35 * np.pi) +\n (-119/10 * np.sin(7/15 - t) +\n 2/11 * np.sin(2 * t + 25/7) +\n 2/9 * np.sin(3 * t + 5/8) +\n 1/5 * np.sin(4 * t + 33/7) +\n 1/4 * np.sin(5 * t + 19/10) +\n 1023/10) * _UnitStep(35 * np.pi - t) * _UnitStep(t - 31 * np.pi) +\n (-1/7 * np.sin(2/7 - 12 * t) -\n 1/8 * np.sin(3/10 - 5 * t) +\n 25/7 * np.sin(t + 77/17) +\n 355/59 * np.sin(2 * t + 41/40) +\n 27/5 * np.sin(3 * t + 46/15) +\n 33/7 * np.sin(4 * t + 11/3) +\n 27/10 * np.sin(6 * t + 13/9) +\n 5/11 * np.sin(7 * t + 11/5) +\n 5/8 * np.sin(8 * t + 3) +\n 8/5 * np.sin(9 * t + 16/15) +\n 16/15 * np.sin(10 * t + 1/7) +\n 7/9 * np.sin(11 * t + 12/5) -\n 862/7) * _UnitStep(31 * np.pi - t) * _UnitStep(t - 27 * np.pi) +\n (-1/3 * np.sin(5/4 - 8 * t) -\n 2/5 * np.sin(5/9 - 7 * t) -\n 5/7 * np.sin(11/8 - 5 * t) -\n 7/2 * np.sin(15/14 - 2 * t) +\n 29/8 * np.sin(t + 41/10) +\n 11/6 * np.sin(3 * t + 13/3) +\n 7/6 * np.sin(4 * t + 1/27) +\n 2/7 * np.sin(6 * t + 8/7) +\n 1/9 * np.sin(9 * t + 9/5) +\n 2/7 * np.sin(10 * t + 1/10) +\n 201/5) * _UnitStep(27 * np.pi - t) * _UnitStep(t - 23 * np.pi) +\n (-4/11 * np.sin(8/9 - 12 * t) -\n 10/7 * np.sin(19/13 - 10 * t) +\n 623/3 * np.sin(t + 10/7) +\n 39/5 * np.sin(2 * t + 10/11) +\n 251/9 * np.sin(3 * t + 4/3) +\n 5/7 * np.sin(4 * t + 4/3) +\n 61/6 * np.sin(5 * t + 4/3) +\n 14/9 * np.sin(6 * t + 23/7) +\n 76/25 * np.sin(7 * t + 9/7) +\n 3/4 * np.sin(8 * t + 1/4) +\n 19/5 * np.sin(9 * t + 3/2) +\n 17/6 * np.sin(11 * t + 6/5) +\n 13/8 * np.sin(13 * t + 14/13) +\n 8/9 * np.sin(14 * t + 17/6) +\n 24/25 * np.sin(15 * t + 1/2) +\n 1/6 * np.sin(16 * t + 13/8) +\n 5/8 * np.sin(17 * t + 1) +\n 1/7 * np.sin(18 * t + 18/17) +\n 6/7 * np.sin(19 * t + 1) +\n 1/4 * np.sin(20 * t + 4/9) +\n 2/7 * np.sin(21 * t + 7/5) +\n 1/3 * np.sin(22 * t + 8/7) +\n 2/5 * np.sin(23 * t + 1/26) +\n 2/11 * np.sin(24 * t + 8/7) -\n 243/8) * _UnitStep(23 * np.pi - t) * _UnitStep(t - 19 * np.pi) +\n (-111/10 * np.sin(4/5 - 9 * t) -\n 12/5 * np.sin(7/13 - 2 * t) +\n 1/6 * np.sin(t + 48/11) +\n 13/8 * np.sin(3 * t + 27/7) +\n 71/24 * np.sin(4 * t + 6/11) +\n 22/9 * np.sin(5 * t + 7/2) +\n 19/7 * np.sin(6 * t + 8/17) +\n 20/7 * np.sin(7 * t + 34/9) +\n 55/7 * np.sin(8 * t + 6/5) +\n 64/9 * np.sin(10 * t + 38/9) +\n 27/5) * _UnitStep(19 * np.pi - t) * _UnitStep(t - 15 * np.pi) +\n (-22/7 * np.sin(4/3 - 8 * t) -\n 19/7 * np.sin(20/13 - 6 * t) +\n 38/13 * np.sin(t + 1/24) +\n 12/11 * np.sin(2 * t + 5/9) +\n 26/7 * np.sin(3 * t + 7/9) +\n 11/5 * np.sin(4 * t + 12/11) +\n 37/10 * np.sin(5 * t + 17/10) +\n 51/10 * np.sin(7 * t + 10/3) +\n 33/4 * np.sin(9 * t + 26/7) +\n 41/5 * np.sin(10 * t + 9/5) -\n 27/2) * _UnitStep(15 * np.pi - t) * _UnitStep(t - 11 * np.pi) +\n (-172/5 * np.sin(3/8 - t) +\n 5/4 * np.sin(2 * t + 7/2) +\n 2303/24) * _UnitStep(11 * np.pi - t) * _UnitStep(t - 7 * np.pi) +\n (441/5 - 455/12 * np.sin(7/9 - t)) * _UnitStep(7 * np.pi - t) * _UnitStep(t - 3 * np.pi) +\n (-1/3 * np.sin(1/20 - 18 * t) -\n 7/5 * np.sin(7/9 - 17 * t) -\n 18/11 * np.sin(2/5 - 14 * t) -\n 24/5 * np.sin(1/13 - 9 * t) +\n 2767/7 * np.sin(t + 11/3) +\n 229/5 * np.sin(2 * t + 17/7) +\n 313/8 * np.sin(3 * t + 22/5) +\n 32/3 * np.sin(4 * t + 22/5) +\n 169/6 * np.sin(5 * t + 21/8) +\n 23/7 * np.sin(6 * t + 26/11) +\n 21/2 * np.sin(7 * t + 5/6) +\n 55/6 * np.sin(8 * t + 14/5) +\n 212/13 * np.sin(10 * t + 24/7) +\n 26/9 * np.sin(11 * t + 9/2) +\n 16/5 * np.sin(12 * t + 25/6) +\n 35/17 * np.sin(13 * t + 4/11) +\n 15/8 * np.sin(15 * t + 7/10) +\n 2/3 * np.sin(16 * t + 20/9) +\n 16/7 * np.sin(19 * t + 4/5) +\n 13/7 * np.sin(20 * t + 29/7) +\n 14/3 * np.sin(21 * t + 7/5) +\n 4/3 * np.sin(22 * t + 7/4) +\n 12/7 * np.sin(23 * t + 34/33) +\n 7/4 * np.sin(24 * t + 27/7) -\n 211/5) * _UnitStep(3 * np.pi - t) * _UnitStep(t + np.pi)) * _UnitStep(np.sqrt(np.sign(np.sin(t/2))))).real\n\ndef get_x_y():\n t = np.arange(0, 52 * np.pi, 0.0025, dtype=np.complex128)\n return _x(t), _y(t)\n\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n import matplotlib.animation as animation\n\n\n # def sine_tl(a, t):\n # pass\n \n # def sine_tr(a, t):\n # pass\n \n # def\n\n\n # # y = np.random.normal(0, 5, (10000))\n\n # y1 = lambda x: 2*x - 2\n # y2 = lambda x: -2*x + 6\n # y3 = lambda x: -0.25*(x - 2)**2 + 2\n # y4 = lambda x: 0.25*(x - 2)**2 + 2\n\n # x, y = get_x_y()\n # # # x = np.linspace(-4, 8, 100)\n\n # # fig = plt.figure()\n # # # plt.plot(x, y1(x))\n # # # plt.plot(x, y2(x))\n # # # plt.plot(x, y3(x))\n # # # plt.plot(x, y4(x))\n # # # plt.hist(y, bins=25, rwidth=.9)\n # plt.scatter(x, y, s=0.5)\n # plt.show()\n\n x = np.linspace(-np.pi, np.pi, 1000)\n\n amps = [4, 2, 1]\n periods = [1, 1, 3]\n colours = ['red', 'green', 'purple']\n labelmaker = lambda amp, period: f\"A: {amp}, T: {period}\"\n\n xtick = np.arange(-np.pi, np.pi+np.pi/2, np.pi/2)\n xlabel = [r'$-\\pi$', r'$-\\frac{\\pi}{2}$', r'$0$', r'$\\frac{\\pi}{2}$', r'$\\pi$']\n \n font = {'family': 'Calibri',\n 'weight': 'bold',\n 'size': 22}\n\n plt.rc('font', **font)\n plt.rc('text', usetex=True)\n\n fig = plt.figure(\"Inspecting Sines\")\n\n for i in range(len(amps)):\n a = amps[i]\n T = periods[i]\n c = colours[i]\n\n if i < 2:\n ax = plt.subplot(2, 2, i + 1)\n else:\n ax = plt.subplot(2, 1, 2)\n\n ax.set_xlim((-np.pi, np.pi))\n ax.grid(True)\n ax.margins(xmargin=0)\n ax.set_xticks(xtick)\n ax.set_xticklabels(xlabel)\n\n ax.plot(x, np.full(1000, 0), c='black', lw=.5)\n\n y = a * np.sin(T * x)\n ax.plot(x, y, color=c, label=labelmaker(a, T))\n\n ax.legend()\n\n plt.show()\n\n\n\n# fig1, ax1 = plt.subplots(1, 2)\n# fig1.suptitle(\"1 row 2 columns\")\n# print(\"Axis1:\\n\", ax1)\n# ax1[0].plot([1, 2, 3])\n# ax1[0].set_title(\"A line\")\n# ax1[1].plot([3, 1, 2])\n# ax1[1].set_title(\"A check\")\n\n# fig2, ax2 = plt.subplots(2, 2)\n# fig2.suptitle(\"2 rows 2 columns\")\n# print(\"Axis2:\\n\", ax2)\n# ax2[0, 0].scatter([1, 2, 3], [4, 2, 0])\n# ax2[0, 0].set_title(\"Some Data Points\")\n# ax2[0, 1].hist(np.random.randn(100), rwidth=.9)\n# ax2[0, 1].set_title(\"Normal Distribution\")\n# ax2[1, 0].plot([1, 2, 0, 3])\n# ax2[1, 0].set_title(\"Some line...\")\n# ax2[1, 1].plot([1,0,-1,0,1])\n# ax2[1, 1].set_title(\"Flat line\")\n\n# fig2.tight_layout()\n# plt.show()\n\n# numbers_x = [5, 10, 60]\n# numbers_y1 = [1, 5, 7]\n# numbers_y2 = [6, 2, 3]\n\n# ax = plt.subplot(2, 2, 1)\n# ax.plot(numbers_x, numbers_y1)\n\n# ax = plt.subplot(2, 2, 3)\n# ax.plot(numbers_y1, numbers_x)\n\n# ax = plt.subplot(1, 2, 2)\n# ax.plot(numbers_x, numbers_y2)\n\n# plt.show()\n\n\n# fig, ax = plt.subplots(2,2)\n# ax = ax.flatten()\n\n# ax[0].plot([np.pi, np.e])\n# ax[1].plot([-1, 0, 1], [-2, 3, 1])\n# ax[2].scatter([-5, 5, 3, 2, 6], [1, 3, -3, 2, 0.1])\n# ax[3].hist(np.random.normal(5, 2.5, 1000))\n\n# plt.show()\n\n# numbers_x = [5, 10, 60]\n# numbers_y1 = [1, 5, 7]\n# numbers_y2 = [6, 2, 3]\n\n# ax = plt.subplot(2, 2, 1) # top left subplot of a 2x2 grid\n# ax.plot(numbers_x, numbers_y1)\n\n# ax = plt.subplot(2, 2, 3) # bottom left subplot of a 2x2 grid\n# ax.plot(numbers_y1, numbers_x)\n\n# # note how we change the grid definition!\n# # since the top defined a 2x2 grid, this will span 2 rows\n# ax = plt.subplot(1, 2, 2) # the right plot of a 1x2 grid\n# ax.plot(numbers_x, numbers_y2)\n\n# plt.show()\n" ]
[ [ "numpy.full", "numpy.sin", "numpy.heaviside", "matplotlib.pyplot.figure", "matplotlib.pyplot.rc", "numpy.arange", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.subplot" ] ]
hurschler/pig-face-recognition
[ "5834f3c89448a645ee0eaf2bbdade064f0c4be93" ]
[ "augmentation/data_aug_edge_det.py" ]
[ "import glob\n\nimport mpimg as mpimg\nimport numpy as np\nimport cv2\nimport os\nimport scipy.misc as sm\nimport tensorflow as tf\nfrom keras.preprocessing.image import load_img\nfrom PIL import Image as Pil_Image\nfrom matplotlib import pyplot as plt\nfrom IPython.display import display, HTML\nfrom scipy import ndimage\nfrom skimage.color import rgb2gray\n\nimport util.config as config\nfrom albumentations import *\nimport logging.config\nfrom skimage import exposure\nfrom matplotlib import image\nimport util.logger_init\n\n\nlog = logging.getLogger(__name__)\n\n\n# https://github.com/albumentations-team/albumentations#installation\ndef generate_aug_images():\n img_path_crop = '/Users/patrickrichner/Desktop/FH/OneDrive - Hochschule Luzern/BDA2021/07_Daten/small_dataset/test/train'\n pig_img_folders = os.listdir(img_path_crop)\n for i, pig_name in enumerate(pig_img_folders):\n img_path = os.path.join(img_path_crop, pig_name)\n image_names = glob.glob(os.path.join(img_path, 'DSC*'))\n for image_name in image_names:\n image_name = os.path.basename(image_name)\n img_keras = load_img(os.path.join(img_path, image_name))\n img_np = np.array(img_keras)\n edges = cv2.Canny(img_np, 100, 200, 3)\n plt.subplot(121), plt.imshow(img_np, cmap='gray')\n plt.title('Original Image'), plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(edges, cmap='gray')\n plt.title('Edge Image'), plt.xticks([]), plt.yticks([])\n plt.show()\n save_aug_image(image_name, img_path, img_np, 'E-')\n log.info('Augmentation in process Edge:' + str(i))\n log.info('Augmentation finished ')\n\n\ndef sobel_filters():\n img_path = '/sample'\n image_name = 'DSC_V1_6460_2238.JPG'\n img_keras = load_img(os.path.join(img_path, image_name))\n img = np.array(img_keras)\n\n Kx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)\n Ky = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32)\n\n Ix = ndimage.filters.convolve(img, Kx)\n Iy = ndimage.filters.convolve(img, Ky)\n\n G = np.hypot(Ix, Iy)\n G = G / G.max() * 255\n theta = np.arctan2(Iy, Ix)\n plt.imshow(theta)\n plt.show()\n return (G, theta)\n\n\ndef save_aug_image(image_name, img_path, pig_img_aug1, prefix):\n log.info('Saving image...')\n img_aug_opencv = np.array(pig_img_aug1)\n pil_img = Pil_Image.fromarray(img_aug_opencv)\n aug_img_name = prefix + image_name\n pil_img.save(os.path.join(img_path, aug_img_name))\n\n def rgb2gray(rgb):\n\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\ndef load_data(dir_name='faces_imgs'):\n '''\n Load images from the \"faces_imgs\" directory\n Images are in JPG and we convert it to gray scale images\n '''\n imgs = []\n for filename in os.listdir(dir_name):\n if os.path.isfile(dir_name + '/' + filename):\n img = mpimg.imread(dir_name + '/' + filename)\n img = rgb2gray(img)\n imgs.append(img)\n return imgs\n\ndef visualize(imgs, format=None, gray=False):\n plt.figure(figsize=(20, 40))\n for i, img in enumerate(imgs):\n if img.shape[0] == 3:\n img = img.transpose(1, 2, 0)\n plt_idx = i + 1\n plt.subplot(2, 2, plt_idx)\n plt.imshow(img, format)\n plt.show()\n\n\n# generate_aug_images()\nsobel_filters()\n\n\n# pig_img = cv2.imread(r'../sample/DSC_V1_6460_2238.JPG')\n# pig_img = cv2.cvtColor(pig_img, cv2.COLOR_BGR2RGB)\n# pig_img = image_resize(pig_img, height=416)\n\n# alpha = 1.2\n# aug = RandomBrightnessContrast(p=1)\n# pig_img_aug1 = aug.apply(pig_img, alpha=alpha)\n\n# aug = RandomFog(p=1, fog_coef_lower=0.1, fog_coef_upper=0.1, alpha_coef=0.8)\n# pig_img_aug2 = aug.apply(pig_img)\n\n# aug = HueSaturationValue(hue_shift_limit=200, sat_shift_limit=70, val_shift_limit=27, p=1)\n# pig_img_aug3 = aug.apply(pig_img)\n\n# aug = ElasticTransform(alpha=203, sigma=25, alpha_affine=25, p=1.0)\n# pig_img_aug4 = aug.apply(pig_img)\n\n# aug = ToGray(p=0.5)\n# pig_img_aug5 = aug.apply(pig_img)\n\n# aug = CLAHE(p=1.0)\n# pig_img_aug6 = aug.apply(pig_img)\n\n# aug = Blur(p=0.5, blur_limit=7)\n# pig_img_aug7 = aug.apply(pig_img)\n\n# -----------------------------------------------------------------------------------------------------------\nplt.rcParams['figure.figsize'] = [16, 8]\nrows = 2\nnum = 0\n\n# plot_image(pig_img, 'orig')\n# plot_image(pig_img_aug1, 'brightnessContrast')\n# plot_image(pig_img_aug2, 'fog')\n# plot_image(pig_img_aug3, 'hueSaturation')\n# plot_image(pig_img_aug4, 'elasticTransform')\n# plot_image(pig_img_aug5, 'toGray')\n# plot_image(pig_img_aug6, 'clahe')\n# plot_image(pig_img_aug7, 'blur')\n\n# generate_aug_images()\n\nplt.axis('off')\nplt.tight_layout()\nplt.show()\n\n# cv2.waitKey(0)" ]
[ [ "matplotlib.pyplot.subplot", "numpy.array", "scipy.ndimage.filters.convolve", "matplotlib.pyplot.title", "matplotlib.pyplot.xticks", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "numpy.hypot", "numpy.arctan2", "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
alexbjorling/nanomax-analysis-utils
[ "e208d098243e28a90004c6e1c5fb69251177d23e" ]
[ "nmutils/gui/scanViewer/widgets/ScalarWidget.py" ]
[ "from silx.gui import qt\nimport numpy as np\n\nfrom .MapWidget import MapWidget\nfrom .Base import PairedWidgetBase\n\nclass ScalarWidget(PairedWidgetBase):\n # This widget defines a MapWidget and and normal text label and describes\n # how they are related by data operations.\n def __init__(self, parent=None):\n\n super(ScalarWidget, self).__init__()\n self.map = MapWidget(self)\n self.value = qt.QLabel(self)\n self.value.setText('scalar value')\n self.setLayout(qt.QHBoxLayout())\n splitter = qt.QSplitter()\n splitter.addWidget(self.value)\n splitter.addWidget(self.map)\n splitter.setSizes((300,300))\n self.layout().addWidget(splitter)\n\n # connect the interpolation thingies\n self.map.interpolBox.valueChanged.connect(self.updateMap)\n\n # connect the selection tools\n self.map.indexSelectionChanged.connect(self.selectByIndex)\n self.map.clickSelectionChanged.connect(self.selectByPosition)\n self.map.selectionCleared.connect(self.clearSelection)\n\n # connect the positions button\n self.map.positionsAction.triggered.connect(self.togglePositions)\n\n # connect the mask widget to the update\n self.map.getMaskToolsDockWidget().widget()._mask.sigChanged.connect(self.updateImage)\n\n # keep track of map selections by ROI or by index\n self.selectionMode = 'roi' # 'roi' or 'ind'\n\n def setScan(self, scan):\n self.scan = scan\n if not scan:\n self.map.removeImage('data')\n self.value.setText('scalar data')\n return\n # avoid old position grids:\n if self.map.positionsAction.isChecked():\n self.togglePositions()\n self.map.indexBox.setMaximum(scan.nPositions - 1)\n self.resetMap()\n\n def resetMap(self):\n self.updateMap()\n self.map.resetZoom()\n\n def updateMap(self):\n if self.scan is None:\n return\n try:\n self.window().statusOutput('Building scalar map...')\n # workaround to avoid the infinite loop which occurs when both\n # mask widgets are open at the same time\n self.map.getMaskToolsDockWidget().setVisible(False)\n # store the limits to maintain zoom\n xlims = self.map.getGraphXLimits()\n ylims = self.map.getGraphYLimits()\n # if the mask is cleared, reset without wasting time\n sampling = self.map.interpolBox.value()\n x, y, z = self.scan.interpolatedMap(self.scan.data['0d'], sampling, origin='ul', method='nearest')\n self.map.addImage(z, legend='data', \n scale=[abs(x[0,0]-x[0,1]), abs(y[0,0]-y[1,0])],\n origin=[x.min(), y.min()], resetzoom=False)\n self.map.setGraphXLimits(*xlims)\n self.map.setGraphYLimits(*ylims)\n aspect = (x.max() - x.min()) / (y.max() - y.min())\n if aspect > 50 or aspect < 1./50:\n self.map.setKeepDataAspectRatio(False)\n else:\n self.map.setKeepDataAspectRatio(True)\n self.map.setGraphXLabel(self.scan.positionDimLabels[0])\n self.map.setGraphYLabel(self.scan.positionDimLabels[1])\n self.window().statusOutput('')\n except:\n self.window().statusOutput('Failed to build scalar map. See terminal output.')\n raise\n\n def updateImage(self):\n if self.scan is None:\n return\n try:\n # get and check the mask array\n if self.selectionMode == 'ind':\n index = self.map.indexBox.value()\n data = self.scan.data['0d'][index]\n elif self.selectionMode == 'roi':\n self.indexMarkerOn(False)\n mask = self.map.getMaskToolsDockWidget().widget().getSelectionMask()\n if (mask is None) or (not np.sum(mask)):\n # the mask is empty, don't waste time with positions\n print('calculating scalar from all positions')\n data = np.mean(self.scan.data['0d'], axis=0)\n else:\n # recreate the interpolated grid from above, to find masked\n # positions on the oversampled grid\n dummy = np.zeros(self.scan.nPositions)\n x, y, z = self.scan.interpolatedMap(dummy, self.map.interpolBox.value(), origin='ul')\n maskedPoints = np.vstack((x[np.where(mask)], y[np.where(mask)])).T\n pointSpacing2 = (x[0,1] - x[0,0])**2 + (y[0,0] - y[1,0])**2\n # go through actual positions and find the masked ones\n maskedPositions = []\n for i in range(self.scan.nPositions):\n # the minimum distance of the current position to a selected grid point:\n dist2 = np.sum((maskedPoints - self.scan.positions[i])**2, axis=1).min()\n if dist2 < pointSpacing2:\n maskedPositions.append(i)\n # get the average and replace the image with legend 'data'\n print('calculating average scalar from %d positions'%len(maskedPositions))\n data = np.mean(self.scan.data['0d'][maskedPositions], axis=0)\n self.value.setText('scalar value: \\n%s' % data)\n self.window().statusOutput('')\n except:\n self.window().statusOutput('Failed to build diffraction pattern. See terminal output.')\n raise\n\n" ]
[ [ "numpy.where", "numpy.sum", "numpy.zeros", "numpy.mean" ] ]
ricardoperezf/MLExamples
[ "058dbda967dbf6e83dbc7562a70ee8d3e453d539" ]
[ "FlowerDetection/app.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport mglearn\nimport pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\niris_dataset = load_iris()\n\n# print(\"Keys of iris_dataset: \\n{}\".format(iris_dataset.keys()))\n\n# print(iris_dataset['DESCR'][:] + \"\\n...\")\n\n# print(\"Target names: {}\".format(iris_dataset['target_names']))\n\n# SPLIT THE DATA COLLECTED TO TRAINING SET AND TESTING SET (75%, 25%)\nX_train, X_test, y_train, y_test = train_test_split(\n iris_dataset['data'], iris_dataset['target'], random_state=0)\n\n# create dataframe from data in X_train\n# label the columns using the strings in iris_dataset.feature_names\niris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)\n# create a scatter matrix from the dataframe, color by y_train\n# pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o', hist_kwds={'bins': 20}, s=60,\n# alpha=.8, cmap=mglearn.cm3)\n\nknn = KNeighborsClassifier(n_neighbors=1) # assign algorithm.\n\n# MAKING AND TRAINING THE MODEL\nknn.fit(X_train, y_train) # assign arguments: x_train of the data and y_train of the labels.\n\n# MAKING PREDICTIONS\nX_new = np.array([[5, 2.9, 1, 0.2]])\n# print(\"X_new.shape: {}\".format(X_new.shape))\n\nprediction = knn.predict(X_new)\n# print(\"\\nPrediction: {}\".format(prediction))\nprint(\"Predicted target name: {}\".format(iris_dataset['target_names'][prediction]))\n\n# TESTING THE MODEL\ny_pred = knn.predict(X_test) # WE PASS THE 25% OF THE DATA TO PREDICT IT.\nprint(\"\\nTest set predictions:\\n {}\".format(y_pred)) # WE GET LABELS OF EACH ONE.\nprint(\"Test set score: {:.2f}\".format(np.mean(y_pred == y_test))) # WE TEST THE PREDICTED DATA AGAINST THE KNOWN LABELS\n" ]
[ [ "numpy.array", "pandas.DataFrame", "sklearn.neighbors.KNeighborsClassifier", "numpy.mean", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_iris" ] ]
mhuen/TFScripts
[ "4ade57237efcd9ce5332532d8bcc399a06bcddf2" ]
[ "tfscripts/compat/v1/core.py" ]
[ "'''\nCore functions of tfscripts.compat.v1:\n\n Add residuals, batch normalisation, activation,\n'''\n\nfrom __future__ import division, print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n# tfscripts.compat.v1 specific imports\nfrom tfscripts.compat.v1.weights import new_weights\n\n# constants\nfrom tfscripts.compat.v1 import FLOAT_PRECISION\n\n\ndef add_residual(input, residual, strides=None, use_scale_factor=True,\n scale_factor=0.001):\n '''Convenience function to add a residual\n\n Will add input + scale*residual where these overlap in the last dimension\n currently only supports input and residual tensors of same shape in\n other dimensions\n\n Parameters\n ----------\n input : tf.Tensor\n Input tensor.\n residual : tf.Tensor\n Residual to be added to the input tensor\n strides : list of int, optional\n strides must define a stride (int) for each dimension of input.\n use_scale_factor : bool, optional\n If true, the residuals will be scaled by the scale_factor prior\n to addition.\n scale_factor : float, optional\n Defines how much the residuals will be scaled prior to addition if\n use_scale_factor is True.\n\n Returns\n -------\n tf.Tensor\n The output Tensor: input + scale * residual(if use_scale_factor)\n '''\n\n # ----------------------\n # strides for mismatching\n # dimensions other than channel\n # dimension\n # (Post Masterthesis)\n # ----------------------\n if strides is not None:\n\n assert len(strides) == len(input.get_shape().as_list()), \\\n 'Number of dimensions of strides and input must match'\n assert strides[0] == 1, 'stride in batch dimension must be 1'\n\n if not strides == [1 for s in strides]:\n begin = [0 for s in strides]\n end = [0] + input.get_shape().as_list()[1:]\n input = tf.strided_slice(input,\n begin=begin,\n end=end,\n strides=strides,\n begin_mask=1,\n end_mask=1,\n )\n # ----------------------\n\n num_outputs = residual.get_shape().as_list()[-1]\n num_inputs = input.get_shape().as_list()[-1]\n\n # Residuals added over multiple layers accumulate.\n # A scale factor < 1 reduces instabilities in beginnning\n if use_scale_factor:\n scale = new_weights([num_outputs], stddev=scale_factor)\n residual = residual*scale\n if num_inputs == num_outputs:\n output = residual + input\n elif num_inputs > num_outputs:\n output = residual + input[..., :num_outputs]\n elif num_inputs < num_outputs:\n output = tf.concat([residual[..., :num_inputs] + input,\n residual[..., num_inputs:]], axis=-1)\n else:\n if num_inputs == num_outputs:\n output = (residual + input)/np.sqrt(2.)\n elif num_inputs > num_outputs:\n output = (residual + input[..., :num_outputs])/np.sqrt(2.)\n elif num_inputs < num_outputs:\n output = tf.concat(\n [(residual[..., :num_inputs] + input)/np.sqrt(2.),\n residual[..., num_inputs:]],\n axis=-1)\n\n return output\n\n\ndef activation(layer, activation_type,\n use_batch_normalisation=False,\n is_training=None,\n verbose=True):\n '''\n Helper-functions to perform activation on a layer\n\n for parametric activation functions this assumes that the first\n dimension is batch size and that for each of the other dimensions\n seperate parametrizations should be learned\n\n Parameters\n ----------\n layer : tf.Tensor\n Input tensor.\n activation_type : str or callable\n The activation type to be used.\n use_batch_normalisation : bool, optional\n True: use batch normalisation\n is_training : None, optional\n Indicates whether currently in training or inference mode.\n True: in training mode\n False: inference mode.\n verbose : bool, optional\n If true, more verbose output is printed.\n\n Returns\n -------\n tf.Tensor\n The output tensor.\n\n Raises\n ------\n ValueError\n If wrong settings passed.\n '''\n\n # Use batch normalisation?\n if use_batch_normalisation:\n if verbose:\n print('Using Batch Normalisation')\n if is_training is None:\n raise ValueError('To use batch normalisation a boolean is_training'\n ' needs to be passed')\n layer = batch_norm_wrapper(layer, is_training)\n\n if activation_type == '':\n return layer\n\n if hasattr(tf.nn, activation_type):\n layer = getattr(tf.nn, activation_type)(layer)\n\n elif hasattr(tf, activation_type):\n layer = getattr(tf, activation_type)(layer)\n\n elif activation_type == 'leaky':\n layer = tf.multiply(tf.maximum(-0.01*layer, layer), tf.sign(layer))\n # todo: NecroRelu\n # https://stats.stackexchange.com/questions/176794/\n # how-does-rectilinear-activation-function-solve-the-\n # vanishing-gradient-problem-in\n # https://github.com/ibmua/learning-to-make-nn-in-python/\n # blob/master/nn_classifier.py\n elif activation_type == 'requ':\n layer = tf.where(tf.less(layer, tf.constant(0, dtype=FLOAT_PRECISION)),\n tf.zeros_like(layer, dtype=FLOAT_PRECISION),\n tf.square(layer))\n\n elif activation_type == 'selu':\n lam = 1.0507\n alpha = 1.6733\n # from https://arxiv.org/abs/1706.02515\n # self normalizing networks\n layer = tf.where(tf.less(layer, tf.constant(0, dtype=FLOAT_PRECISION)),\n tf.exp(layer) * tf.constant(alpha,\n dtype=FLOAT_PRECISION)\n - tf.constant(alpha,dtype=FLOAT_PRECISION),\n layer)\n layer = layer * tf.constant(lam, dtype=FLOAT_PRECISION)\n\n elif activation_type == 'centeredRelu':\n layer = tf.nn.relu6(layer) - tf.constant(3, dtype=FLOAT_PRECISION)\n\n elif activation_type == 'negrelu':\n layer = -tf.nn.relu(layer)\n\n elif activation_type == 'invrelu':\n layer = tf.where(tf.less(layer, tf.constant(0,\n dtype=FLOAT_PRECISION)), layer, (layer+1e-8)**-1)\n\n elif activation_type == 'sign':\n layer = tf.where(tf.less(layer, tf.constant(0, dtype=FLOAT_PRECISION)),\n layer, tf.sign(layer))\n\n elif activation_type == 'prelu':\n slope = new_weights(layer.get_shape().as_list()[1:]) + 1.0\n layer = tf.where(tf.less(layer, tf.constant(0, dtype=FLOAT_PRECISION)),\n layer*slope, layer)\n\n elif activation_type == 'pelu':\n a = new_weights(layer.get_shape().as_list()[1:]) + 1.0\n b = new_weights(layer.get_shape().as_list()[1:]) + 1.0\n layer = tf.where(tf.less(layer,\n tf.constant(0, dtype=FLOAT_PRECISION)),\n (tf.exp(layer/b) - 1)*a, layer*(a/b))\n\n elif activation_type == 'gaussian':\n layer = tf.exp(-tf.square(layer))\n\n elif activation_type == 'pgaussian':\n sigma = new_weights(layer.get_shape().as_list()[1:]) + \\\n tf.constant(1.0, dtype=FLOAT_PRECISION)\n mu = new_weights(layer.get_shape().as_list()[1:])\n layer = tf.exp(tf.square((layer - mu) / sigma) *\n tf.constant(-0.5, dtype=FLOAT_PRECISION)) / (sigma)\n\n elif callable(activation_type):\n layer = activation_type(layer)\n\n else:\n raise ValueError('activation: Unknown activation type: {!r}'.format(\n activation_type))\n\n return layer\n\n\ndef batch_norm_wrapper(inputs, is_training, decay=0.99, epsilon=1e-6):\n ''' Batch normalisation\n\n Adopted from:\n http://r2rt.com/implementing-batch-normalization-in-tensorflow.html\n\n Performs batch normalisation on the inputs according to\n BN2015 paper by Sergey Ioffe and Christian Szegedy\n\n Parameters\n ----------\n inputs: A Tensor on which to perform batch normalisation\n Tensor will be normalised in all but\n\n is_training : tf.placeholder of type bool.\n Indicates wheter the network is being trained\n or whether it is being used in inference mode.\n If set to true, the population mean and variance\n will be updated and learned.\n\n decay : Decay of moving exponential average\n\n epsilon : Small constant used in normalisation to prevent\n division by zero.\n\n Returns\n -------\n A Tensor. Has the same type as inputs.\n The batch normalized input\n '''\n norm_shape = inputs.get_shape().as_list()[1:]\n scale = tf.Variable(tf.ones(norm_shape, dtype=FLOAT_PRECISION),\n name='BN_scale', dtype=FLOAT_PRECISION)\n beta = tf.Variable(tf.zeros(norm_shape,dtype=FLOAT_PRECISION),\n name='BN_beta', dtype=FLOAT_PRECISION)\n pop_mean = tf.Variable(tf.zeros(norm_shape, dtype=FLOAT_PRECISION),\n trainable=False,\n name='BN_pop_mean',\n dtype=FLOAT_PRECISION)\n pop_var = tf.Variable(tf.ones(norm_shape, dtype=FLOAT_PRECISION),\n trainable=False,\n name='BN_pop_var',\n dtype=FLOAT_PRECISION)\n\n if is_training:\n batch_mean, batch_var = tf.nn.moments(x=inputs, axes=[0], keepdims=False)\n train_mean = tf.compat.v1.assign(pop_mean,\n pop_mean * decay + batch_mean * (1 - decay))\n train_var = tf.compat.v1.assign(pop_var,\n pop_var * decay + batch_var * (1 - decay))\n with tf.control_dependencies([train_mean, train_var]):\n return tf.nn.batch_normalization(\n inputs,\n batch_mean, batch_var, beta, scale, epsilon, # R2RT's blog\n # pop_mean, pop_var, beta, scale, epsilon,\n )\n else:\n return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta,\n scale, epsilon)\n" ]
[ [ "tensorflow.exp", "tensorflow.compat.v1.assign", "tensorflow.zeros", "tensorflow.concat", "tensorflow.nn.relu", "tensorflow.ones", "tensorflow.nn.relu6", "tensorflow.strided_slice", "tensorflow.nn.moments", "tensorflow.constant", "tensorflow.sign", "tensorflow.zeros_like", "numpy.sqrt", "tensorflow.control_dependencies", "tensorflow.maximum", "tensorflow.nn.batch_normalization", "tensorflow.square" ] ]
rychallener/theresa
[ "886c6b74bee2edef7df9b6b54ce6d97de4aa4421" ]
[ "theresa/lib/utils.py" ]
[ "import numpy as np\nimport pickle\nimport theano\nimport time\nimport constants as c\nimport scipy.constants as sc\nimport scipy.interpolate as spi\nimport eigen\nimport starry\nimport progressbar\nimport theano\nimport theano.tensor as tt\nimport mc3.stats as ms\nfrom numba import njit\n\ndef initsystem(fit, ydeg):\n '''\n Uses a fit object to build the respective starry objects. Useful\n because starry objects cannot be pickled. Returns a tuple of\n (star, planet, system).\n '''\n \n cfg = fit.cfg\n\n star = starry.Primary(starry.Map(ydeg=1, amp=1),\n m =cfg.star.m,\n r =cfg.star.r,\n prot=cfg.star.prot)\n\n planet = starry.kepler.Secondary(starry.Map(ydeg=ydeg),\n m =cfg.planet.m,\n r =cfg.planet.r,\n porb =cfg.planet.porb,\n prot =cfg.planet.prot,\n Omega=cfg.planet.Omega,\n ecc =cfg.planet.ecc,\n w =cfg.planet.w,\n t0 =cfg.planet.t0,\n inc =cfg.planet.inc,\n theta0=180)\n\n system = starry.System(star, planet)\n\n return star, planet, system\n\ndef specint(wn, spec, filtwn_list, filttrans_list):\n \"\"\"\n Integrate a spectrum over the given filters.\n\n Arguments\n ---------\n wn: 1D array\n Wavenumbers (/cm) of the spectrum\n\n spec: 1D array\n Spectrum to be integrated\n\n filtwn_list: list\n List of arrays of filter wavenumbers, in /cm.\n\n filttrans_list: list\n List of arrays of filter transmission. Same length as filtwn_list.\n\n Returns\n -------\n intspec: 1D array\n The spectrum integrated over each filter. \n \"\"\"\n if len(filtwn_list) != len(filttrans_list):\n print(\"ERROR: list sizes do not match.\")\n raise Exception\n \n intspec = np.zeros(len(filtwn_list)) \n \n for i, (filtwn, filttrans) in enumerate(zip(filtwn_list, filttrans_list)):\n # Sort ascending\n idx = np.argsort(filtwn)\n \n intfunc = spi.interp1d(filtwn[idx], filttrans[idx],\n bounds_error=False, fill_value=0)\n\n # Interpolate transmission\n inttrans = intfunc(wn)\n\n # Normalize to one\n norminttrans = inttrans / np.trapz(inttrans, wn)\n\n # Integrate filtered spectrum\n intspec[i] = np.trapz(spec * norminttrans, wn)\n\n return intspec\n\n \ndef vislon(planet, fit):\n \"\"\"\n Determines the range of visible longitudes based on times of\n observation.\n\n Arguments\n ---------\n planet: starry Planet object\n Planet object\n\n fit: Fit object\n Fit object. Must contain observation information.\n\n Returns\n -------\n minlon: float\n Minimum visible longitude, in degrees\n\n maxlon: float\n Maximum visible longitude, in degrees\n \"\"\"\n t = fit.t\n\n porb = planet.porb # days / orbit\n prot = planet.prot # days / rotation\n t0 = planet.t0 # days\n theta0 = planet.theta0 # degrees\n\n # Central longitude at each time (\"sub-observer\" point)\n centlon = theta0 - (t - t0) / prot * 360\n\n # Minimum and maximum longitudes (assuming +/- 90 degree\n # visibility)\n limb1 = centlon - 90\n limb2 = centlon + 90\n\n # Rescale to [-180, 180]\n limb1 = (limb1 + 180) % 360 - 180\n limb2 = (limb2 + 180) % 360 - 180\n\n return np.min(limb1.eval()), np.max(limb2.eval())\n \n \ndef readfilters(filterfiles):\n \"\"\"\n Reads filter files and determines the mean wavelength.\n \n Arguments\n ---------\n filterfiles: list\n list of paths to filter files\n\n Returns\n -------\n filtmid: 1D array\n Array of mean wavelengths\n \"\"\"\n filtwl_list = []\n filtwn_list = []\n filttrans_list = []\n \n wnmid = np.zeros(len(filterfiles))\n for i, filterfile in enumerate(filterfiles):\n filtwl, trans = np.loadtxt(filterfile, unpack=True)\n \n filtwn = 1.0 / (filtwl * c.um2cm)\n\n wnmid[i] = np.sum(filtwn * trans) / np.sum(trans)\n\n filtwl_list.append(filtwl)\n filtwn_list.append(filtwn)\n filttrans_list.append(trans)\n\n wlmid = 1 / (c.um2cm * wnmid)\n\n return filtwl_list, filtwn_list, filttrans_list, wnmid, wlmid\n \ndef visibility(t, latgrid, longrid, dlatgrid, dlongrid, theta0, prot,\n t0, rp, rs, x, y):\n \"\"\"\n Calculate the visibility of a grid of cells on a planet at a specific\n time. Returns a combined visibility based on the observer's\n line-of-sight, the area of the cells, and the effect of the star.\n\n Arguments\n ---------\n t: float\n Time to calculate visibility.\n \n latgrid: 2D array\n Array of latitudes, in radians, from -pi/2 to pi/2.\n\n longrid: 2D array\n Array of longitudes, in radians, from -pi to pi.\n\n dlat: float\n Latitude resolution in radians.\n\n dlon: float\n Longitude resoltuion in radians.\n\n theta0: float\n Rotation at t0 in radians.\n\n prot: float\n Rotation period, the same units as t.\n\n t0: float\n Time of transit, same units as t.\n\n rp: float\n Planet radius in solar radii.\n\n rs: float\n Star radius in solar radii.\n\n x: tuple\n x position of (star, planet)\n\n y: tuple\n y position of (star, planet)\n\n Returns\n -------\n vis: 2D array\n Visibility of each grid cell. Same shape as latgrid and longrid.\n\n \"\"\"\n if latgrid.shape != longrid.shape:\n print(\"Number of latitudes and longitudes do not match.\")\n raise Exception\n\n losvis = np.zeros(latgrid.shape)\n starvis = np.zeros(latgrid.shape)\n \n # Flag to do star visibility calculation (improves efficiency)\n dostar = True\n\n # Central longitude (observer line-of-sight)\n centlon = theta0 - (t - t0) / prot * 2 * np.pi\n\n # Convert relative to substellar point\n centlon = (centlon + np.pi) % (2 * np.pi) - np.pi\n \n xsep = x[0] - x[1]\n ysep = y[0] - y[1]\n d = np.sqrt(xsep**2 + ysep**2)\n\n # Visible fraction due to star \n # No grid cells visible. Return 0s\n if (d < rs - rp):\n return np.zeros(latgrid.shape)\n \n # All grid cells visible. No need to do star calculation.\n elif (d > rs + rp):\n starvis[:,:] = 1.0\n dostar = False\n # Otherwise, time is during ingress/egress and we cannot simplify\n # calculation\n\n nlat, nlon = latgrid.shape\n for i in range(nlat):\n for j in range(nlon):\n # Angles wrt the observer\n lat = latgrid[i,j]\n lon = longrid[i,j]\n dlat = dlatgrid[i,j]\n dlon = dlongrid[i,j]\n \n phi = lon - centlon\n theta = lat\n phimin = phi - dlon / 2.\n phimax = phi + dlon / 2.\n\n thetamin = lat - dlat / 2.\n thetamax = lat + dlat / 2.\n\n # Cell is not visible at this time. No need to calculate further.\n if (phimin > np.pi / 2.) or (phimax < -np.pi / 2.):\n losvis[i,j] = 0\n\n # Cell is visible at this time\n else:\n # Determine visible phi/theta range of the cell\n phirng = np.array((np.max((phimin, -np.pi / 2.)),\n np.min((phimax, np.pi / 2.))))\n thetarng = np.array((np.max((thetamin, -np.pi / 2.)),\n np.min((thetamax, np.pi / 2.))))\n\n\n # Visibility based on LoS\n # This is the integral of\n #\n # A(theta, phi) V(theta, phi) dtheta dphi\n #\n # where\n #\n # A = r**2 cos(theta)\n # V = cos(theta) cos(phi)\n #\n # Here we've normalized by pi*r**2, since\n # visibility will be applied to Fp/Fs where planet\n # size is already taken into account.\n losvis[i,j] = (np.diff(thetarng/2) + \\\n np.diff(np.sin(2*thetarng) / 4)) * \\\n np.diff(np.sin(phirng)) / \\\n np.pi\n\n # Grid cell maybe only partially visible\n if dostar:\n thetamean = np.mean(thetarng)\n phimean = np.mean(phirng)\n # Grid is \"within\" the star\n if dgrid(x, y, rp, thetamean, phimean) < rs:\n starvis[i,j] = 0.0\n # Grid is not in the star\n else:\n starvis[i,j] = 1.0\n\n return starvis * losvis\n\ndef dgrid(x, y, rp, theta, phi):\n \"\"\"\n Calculates the projected distance between a latitude (theta) and a \n longitude (phi) on a planet with radius rp to a star. Projected\n star position is (x[0], y[0]) and planet position is (x[1], y[1]).\n \"\"\"\n xgrid = x[1] + rp * np.cos(theta) * np.sin(phi)\n ygrid = y[1] + rp * np.sin(theta)\n d = np.sqrt((xgrid - x[0])**2 + (ygrid - y[0])**2)\n return d\n\ndef t_dgrid():\n \"\"\"\n Returns a theano function of dgrid(), with the same arguments.\n \"\"\"\n print('Defining theano function.')\n arg1 = theano.tensor.dvector('x')\n arg2 = theano.tensor.dvector('y')\n arg3 = theano.tensor.dscalar('rp')\n arg4 = theano.tensor.dscalar('theta')\n arg5 = theano.tensor.dscalar('phi')\n\n f = theano.function([arg1, arg2, arg3, arg4, arg5],\n dgrid(arg1, arg2, arg3, arg4, arg5)) \n return f\n\ndef mapintensity(map, lat, lon, amp):\n \"\"\"\n Calculates a grid of intensities, multiplied by the amplitude given.\n \"\"\"\n grid = map.intensity(lat=lat.flatten(), lon=lon.flatten()).eval()\n grid *= amp\n grid = grid.reshape(lat.shape)\n return grid\n\n\ndef hotspotloc_driver(fit, map):\n \"\"\"\n Calculates a distribution of hotspot locations based on the MCMC\n posterior distribution.\n\n Note that this function assumes the first ncurves parameters\n in the posterior are associated with eigencurves. This will not\n be true if some eigencurves are skipped over, as MC3 does not\n include fixed parameters in the posterior.\n\n Inputs\n ------\n fit: Fit instance\n\n map: Map instance (not starry Map)\n\n Returns\n -------\n hslocbest: tuple\n Best-fit hotspot location (lat, lon), in degrees.\n\n hslocstd: tuple\n Standard deviation of the hotspot location posterior distribution\n as (lat, lon)\n\n hspot: tuple\n Marginalized posterior distributions of latitude and longitude\n \"\"\"\n \n post = map.post[map.zmask]\n\n nsamp, nfree = post.shape\n\n ntries = 5\n oversample = 1\n\n if fit.cfg.twod.ncalc > nsamp:\n print(\"Warning: ncalc reduced to match burned-in sample.\")\n ncalc = nsamp\n else:\n ncalc = fit.cfg.twod.ncalc\n \n hslon = np.zeros(ncalc)\n hslat = np.zeros(ncalc)\n thinning = nsamp // ncalc\n\n bounds = None\n bounds = (-90, 90),(-360, 360)\n smap = starry.Map(ydeg=map.lmax)\n # Function defined in this way to avoid passing non-numeric arguments\n def hotspotloc(yval): \n smap[1:,:] = yval\n lat, lon, val = smap.minimize(oversample=oversample,\n ntries=ntries, bounds=bounds)\n return lat, lon, val\n\n arg1 = tt.dvector()\n t_hotspotloc = theano.function([arg1], hotspotloc(arg1))\n\n # Note the maps created here do not include the correct uniform\n # component because that does not affect the location of the\n # hotspot. Also note that the eigenvalues are negated because\n # we want to maximize, not minize, but starry only includes\n # a minimize method.\n pbar = progressbar.ProgressBar(max_value=ncalc)\n for i in range(0, ncalc):\n ipost = i * thinning\n yval = np.zeros((map.lmax+1)**2-1)\n for j in range(map.ncurves):\n yval += -1 * post[ipost,j] * map.eigeny[j,1:]\n\n hslat[i], hslon[i], _ = t_hotspotloc(yval)\n pbar.update(i+1)\n\n star, planet, system = initsystem(fit, map.lmax)\n planet.map[1:,:] = 0.0\n for j in range(map.ncurves):\n planet.map[1:,:] += -1 * map.bestp[j] * map.eigeny[j,1:]\n hslatbest, hslonbest, _ = planet.map.minimize(oversample=oversample,\n bounds=bounds,\n ntries=ntries)\n hslonbest = hslonbest.eval()\n hslatbest = hslatbest.eval()\n\n # Constrain longitudes to [-180, 180]\n hslonbest = (hslonbest + 180.) % 360. - 180.\n hslon = (hslon + 180.) % 360. - 180.\n hslatbest = (hslatbest + 90.) % 180. - 90.\n hslat = (hslat + 90.) % 180. - 90.\n \n hslonstd = np.std(hslon)\n hslatstd = np.std(hslat)\n\n # Two-sided errors\n pdf, xpdf, hpdmin = ms.cred_region(hslon)\n crlo = np.amin(xpdf[pdf>hpdmin])\n crhi = np.amax(xpdf[pdf>hpdmin])\n hsloncrlo = crlo - hslonbest\n hsloncrhi = crhi - hslonbest\n\n pdf, xpdf, hpdmin = ms.cred_region(hslat)\n crlo = np.amin(xpdf[pdf>hpdmin])\n crhi = np.amax(xpdf[pdf>hpdmin])\n hslatcrlo = crlo - hslatbest\n hslatcrhi = crhi - hslatbest\n\n hslocbest = (hslatbest, hslonbest)\n hslocstd = (hslatstd, hslonstd)\n hslocpost = (hslat, hslon)\n hsloctserr = ((hslatcrhi, hslatcrlo), (hsloncrhi, hsloncrlo))\n \n return hslocbest, hslocstd, hslocpost, hsloctserr\n\ndef tmappost(fit, map):\n post = map.post[map.zmask]\n\n nsamp, nfree = post.shape\n ncurves = map.ncurves\n\n if fit.cfg.twod.ncalc > nsamp:\n print(\"Warning: ncalc reduced to match burned-in sample.\")\n ncalc = nsamp\n else:\n ncalc = fit.cfg.twod.ncalc\n\n thinning = nsamp // ncalc\n\n fmaps = np.zeros((ncalc, fit.cfg.twod.nlat, fit.cfg.twod.nlon))\n tmaps = np.zeros((ncalc, fit.cfg.twod.nlat, fit.cfg.twod.nlon))\n \n star, planet, system = initsystem(fit, map.lmax)\n\n def calcfmap(yval, unifamp):\n planet.map[1:,:] = 0.0\n amp = unifamp - 1\n fmap = planet.map.intensity(lat=fit.lat.flatten(),\n lon=fit.lon.flatten()) * amp\n\n planet.map[1:,:] = yval\n fmap += planet.map.intensity(lat=fit.lat.flatten(),\n lon=fit.lon.flatten())\n\n return fmap\n\n arg1 = tt.dvector()\n arg2 = tt.dscalar()\n t_calcfmap = theano.function([arg1, arg2], calcfmap(arg1, arg2))\n \n pbar = progressbar.ProgressBar(max_value=ncalc)\n for i in range(ncalc):\n ipost = i * thinning\n yval = np.zeros((map.lmax+1)**2-1)\n for j in range(map.ncurves):\n yval += post[ipost,j] * map.eigeny[j,1:]\n \n fmaps[i] = t_calcfmap(yval, post[ipost, ncurves]).reshape(fit.lat.shape)\n tmaps[i] = fmap_to_tmap(fmaps[i], map.wlmid*1e-6,\n fit.cfg.planet.r, fit.cfg.star.r,\n fit.cfg.star.t, post[ipost,ncurves+1])\n \n pbar.update(i+1)\n\n return fmaps, tmaps\n\ndef fmap_to_tmap(fmap, wl, rp, rs, ts, scorr):\n '''\n Convert flux map to brightness temperatures.\n See Rauscher et al., 2018, eq. 8\n '''\n ptemp = (sc.h * sc.c) / (wl * sc.k)\n sfact = 1 + scorr\n tmap = ptemp / np.log(1 + (rp / rs)**2 *\n (np.exp(ptemp / ts) - 1) /\n (np.pi * fmap * sfact))\n return tmap\n\ndef ess(chain):\n '''\n Calculates the Steps Per Effectively-Independent Sample and\n Effective Sample Size (ESS) of a chain from an MCMC posterior \n distribution.\n\n Adapted from some code I wrote for MC3 many years ago, and\n the SPEIS/ESS calculation in BART.\n '''\n nciter, npar = chain.shape\n\n speis = np.zeros(npar)\n ess = np.zeros(npar)\n\n for i in range(npar):\n mean = np.mean(chain[:,i])\n autocorr = np.correlate(chain[:,i] - mean,\n chain[:,i] - mean,\n mode='full')\n # Keep lags >= 0 and normalize\n autocorr = autocorr[np.size(autocorr) // 2:] / np.max(autocorr)\n # Sum adjacent pairs (Geyer, 1993)\n pairsum = autocorr[:-1:2] + autocorr[1::2]\n # Find where the sum goes negative, or use the whole thing\n if np.any(pairsum < 0):\n idx = np.where(pairsum < 0)[0][0]\n else:\n idx = len(pairsum)\n print(\"WARNING: parameter {} did not decorrelate!\"\n \"Do not trust ESS/SPEIS!\".format(i))\n # Calculate SPEIS\n speis[i] = -1 + 2 * np.sum(pairsum[:idx])\n ess[i] = nciter / speis[i]\n\n return speis, ess\n\ndef crsig(ess, cr=0.683):\n '''\n Calculates the absolute error on an estimate of a credible region\n of a given percentile based on the effective sample size.\n\n See Harrington et al, 2021.\n\n Arguments\n ---------\n ess: int\n Effective Sample Size\n\n cr: float\n Credible region percentile to calculate error on. E.g., \n for a 1-sigma region, use 0.683 (the default).\n\n Returns\n -------\n crsig: float\n The absolute error on the supplied credible region.\n '''\n return (cr * (1 - cr) / (ess + 3))**0.5\n\n@njit\ndef fast_linear_interp(a, b, x):\n return (b[1] - a[1]) / (b[0] - a[0]) * (x - a[0]) + a[1]\n\n@njit\ndef blackbody(T, wn):\n '''\n Calculates the Planck function for a grid of temperatures and\n wavenumbers. Wavenumbers must be in /cm.\n '''\n nt = len(T)\n nwn = len(wn)\n bb = np.zeros((nt, nwn))\n\n # Convert from /cm to /m\n wn_m = wn * 1e2\n for i in range(nt):\n bb[i] = (2.0 * sc.h * sc.c**2 * wn_m**3) \\\n * 1/(np.exp(sc.h * sc.c * wn_m / sc.k / T[i]) - 1.0)\n\n return bb \n" ]
[ [ "numpy.exp", "numpy.mean", "numpy.min", "numpy.where", "numpy.cos", "numpy.size", "numpy.max", "numpy.sin", "numpy.trapz", "numpy.sqrt", "scipy.interpolate.interp1d", "numpy.zeros", "numpy.diff", "numpy.std", "numpy.loadtxt", "numpy.amax", "numpy.argsort", "numpy.amin", "numpy.correlate", "numpy.sum", "numpy.any" ] ]
huangxiang701/polymerNEN
[ "df5d706a881666dac7c38e7151caea8a58862de4" ]
[ "src/trainer/train_vect_data.py" ]
[ "\"\"\"\nCopyright (C) 2019 University of Massachusetts Amherst.\nThis file is part of \"expLinkage\"\nhttp://github.com/iesl/expLinkage\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse, time, sys, os\nfrom pathlib import Path\nimport torch\n#import ptvsd\n#ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True)\n\nfrom models.mahalabonis import MahalanobisDist, GenLinkMahalanobis\n\nfrom utils.Config import Config\nfrom utils.basic_utils import create_logger\nfrom eval.finalEval import run_final_eval\n\nfrom trainer.VectDataTrainer import VectDataTrainer\n\ndef trainExpLinkOnly(trainer):\n\tif trainer.config.trainObj == \"linkage_auto\":\n\t\ttrainer.logger.info(\"Not training linkageAlpha separately because if trainObj is linakge_auto then it must be trained already...\")\n\telif trainer.config.modelType == \"maha\":\n\t\t\n\t\tassert isinstance(trainer.model, MahalanobisDist)\n\t\t\n\t\tnew_model = GenLinkMahalanobis(trainer.config)\n\t\tnew_model.seqModel[0].weight.requires_grad = False\n\t\tnew_model.seqModel[0].weight.data = trainer.model.seqModel[0].weight.data\n\t\tnew_model.seqModel[0].weight.requires_grad = True\n\t\ttrainer.model = new_model\n\t\tif trainer.config.useGPU:\n\t\t\ttrainer.logger.info(\"Shifting model to cuda because GPUs are available!\")\n\t\t\ttrainer.model = trainer.model.cuda()\n\t\t\n\t\ttrainer.config.trainAlpha = True\n\t\ttrainer.config.trainModel = False\n\t\ttrainer.resetOptimizer()\n\t\t\n\t\tif \"linkage_auto\" not in trainer.config.inferenceMethods:\n\t\t\ttrainer.config.inferenceMethods += [\"linkage_auto\"]\n\t\tif \"linkage_auto@t\" not in trainer.config.inferenceMethods:\n\t\t\ttrainer.config.inferenceMethods += [\"linkage_auto@t\"]\n\t\t\n\t\torigCSVFile = \"{}/origTraining/results.csv\"\n\t\tfileCheck = Path(origCSVFile.format(trainer.config.resultDir))\n\t\tif not fileCheck.is_file():\n\t\t\tprint(\"File does not exist:{}\".format(origCSVFile))\n\t\t\tcommand = \"cd {} && mkdir -p origTraining && cp *.csv origTraining/ && cp *.png origTraining/\".format(trainer.config.resultDir)\n\t\t\tos.system(command)\n\t\t\n\t\ttrainer.config.trainObj = \"linkage_auto\"\n\t\ttrainer.logger.info(\"Training alpha parameter of expLink ...\\n\\n\\n\")\n\t\ttrainer.logger.info(trainer.model)\n\t\t\n\t\tt1 = time.time()\n\t\tsuccess = trainer.train()\n\t\tif success is not None and (not success):\n\t\t\ttry:\n\t\t\t\ttrainer.config.inferenceMethods.remove(\"linkage_auto@t\")\n\t\t\t\ttrainer.config.inferenceMethods.remove(\"linkage_auto\")\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\t\n\n\t\ttrainer.printModelWeights()\n\t\ttrainer.logger.info(\"Training alpha parameter of expLink linkage ends...in time={:.3f}\".format(time.time() - t1))\n\t\ttrainer.logger.info(\"Saving model...\")\n\t\t\n\t\ttrainer.config.bestModel = os.path.join(trainer.config.resultDir, \"model_alpha.torch\")\n\t\ttorch.save(trainer.model, trainer.config.bestModel )\n\t\ttrainer.config.save_config(trainer.config.resultDir, \"config_expLink.json\")\n\t\ttrainer.logger.info(\"Saved model...\")\n\t\t\n\telse:\n\t\ttrainer.logger.info(\"Not training linkageAlpha separately because if modelType is not Mahalanobis distance matrix... \")\n\ndef runMain(config):\n\tcommand = sys.argv\n\tstart = time.time()\n\tassert isinstance(config,Config)\n\tif config.mode == \"train\":\n\t\ttrainer = VectDataTrainer(config)\n\t\t# trainer.printModelWeights()\n\t\t\n\t\tt1 = time.time()\n\t\ttrainer.train()\n\t\t\n\t\ttrainer.logger.info(\"Training ends...in time={:.3f}\".format(time.time() - t1))\n\t\t# trainer.printModelWeights()\n\t\ttrainer.logger.info(\"Saving model...\")\n\t\t\n\t\ttrainer.config.bestModel = os.path.join(trainer.config.resultDir, \"model.torch\")\n\t\ttorch.save(trainer.model, trainer.config.bestModel)\n\t\ttrainer.config.save_config(trainer.config.resultDir)\n\t\ttrainer.logger.info(\"Saved model...\")\n\t\t\n\t\t################### Train alpha parameter for softLink ##########################\n\t\n\t\tif config.trainExpLink:\n\t\t\ttrainExpLinkOnly(trainer)\n\t\t#################################################################################\n\t\n\telif config.mode == \"trainExpLink\":\n\t\ttrainer = VectDataTrainer(config)\n\t\t\n\t\t# Load model and reset optimizer to have parameters of the loaded model\n\t\ttrainer.loadModel()\n\t\t\n\t\t# Update output directory\n\t\ttrainer.config.resultDir = trainer.config.resultDir + trainer.config.newDirSuffix\n\t\tPath(trainer.config.resultDir).mkdir(parents=True, exist_ok=True) # Create resultDir directory if not already present\n\t\t\n\t\t# Update logger object\n\t\ttrainer.logger = create_logger(config=config, logFile=\"logFile_trainExpLink.txt\", currLogger=trainer.logger)\n\t\t\n\t\ttrainer.logger.info(trainer)\n\t\ttrainer.logger.info(command)\n\t\ttrainExpLinkOnly(trainer)\n\t\t\n\telif config.mode == \"test\":\n\t\ttrainer = VectDataTrainer(config)\n\t\t\n\t\t# Load model and reset optimizer to have parameters of the loaded model\n\t\ttrainer.loadModel()\n\t\t\n\t\t# Update output directory\n\t\ttrainer.config.resultDir = trainer.config.resultDir + trainer.config.newDirSuffix\n\t\tPath(trainer.config.resultDir).mkdir(parents=True, exist_ok=True) # Create resultDir directory if not already present\n\t\t\n\t\t# Update logger object\n\t\ttrainer.logger = create_logger(config=config, logFile=\"logFile_retest.txt\", currLogger=trainer.logger)\n\t\t\n\t\ttrainer.logger.info(command)\n\t\ttrainer.logger.info(trainer)\n\t\n\telse:\n\t\traise Exception(\"Invalid mode = {}. Choose one from: test, train\".format(config.mode))\n\t\n\t\n\trun_final_eval(trainer)\n\t# trainer.performFinalEvaluation()\n\ttrainer.logger.info(\"\\n\\n\\n\\n\")\n\t\n\ttrainer.logger.info(trainer)\n\ttrainer.logger.info(command)\n\tend = time.time()\n\ttrainer.logger.info(\" Total time taken = {:.4f} = {:.4f} min = {:.4f} hours\".format(end - start, (end - start)/60, (end - start)/3600))\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser( description='Supervised clustering training for data in R^n')\n\tparser.add_argument('--config', type=str, help=\"Config file\")\n\n\t################################## OPTIONAL ARGUMENTS TO OVERWRITE CONFIG FILE ARGS###################################################\n\ttemp_config = Config()\n\tfor config_arg in temp_config.__dict__:\n\t\tdef_val = temp_config.__getattribute__(config_arg)\n\t\targ_type = type(def_val) if def_val is not None else str\n\t\tparser.add_argument('--{}'.format(config_arg), type=arg_type, default=None, help='If not specified then value from config file will be used')\n\t#########################################################################################################\n\n\targs = parser.parse_args()\n\t\n\tassert args.config is not None\n\tconfig = Config(args.config)\n\tfor config_arg in temp_config.__dict__:\n\t\tdef_val = getattr(args, config_arg)\n\t\tif def_val is not None:\n\t\t\told_val = config.__dict__[config_arg]\n\t\t\tconfig.__dict__.update({config_arg:def_val})\n\t\t\tnew_val =config.__dict__[config_arg]\n\t\t\tprint(\"Updating Config.{} from {} to {} using arg_val={}\".format(config_arg, old_val, new_val, def_val))\n\t\n\t# Update result directory if there are any parameters passed through command line that are different from those in config file\n\tif config.resultDir is None:\n\t\tconfig.updateResultDir(\"auto\")\n\telse:\n\t\tconfig.updateResultDir(config.resultDir)\n\t\n\tPath(config.resultDir).mkdir(parents=True, exist_ok=True) # Create resultDir directory if not already present\n\tconfig.useGPU \t\t= config.cuda and torch.cuda.is_available()\n\tconfig.updateRandomSeeds(config.seed)\n\tconfig.save_config(config.resultDir, \"orig_config.json\")\n\trunMain(config)\n\t\n\t\n\n\n" ]
[ [ "torch.save", "torch.cuda.is_available" ] ]
FranklinBF/drl_girls_melodic
[ "20d44e5748d628e5a04d3b2a8106f420d834f41c" ]
[ "rl_agent/src/rl_agent/env_wrapper/ros_env_raw_scan_prep_wp.py" ]
[ "'''\n @name: ros_env_raw_scan_prep_wo.py\n @brief: This class is a simulation environment wrapper for\n the Polar Representation.\n @author: Ronja Gueldenring\n @version: 3.5\n @date: 2019/04/05\n'''\n\n# python relevant\nimport numpy as np\nimport math\n# ros-relevant\nimport rospy\n# custom classes\nfrom rl_agent.env_wrapper.ros_env import RosEnvAbs\nfrom sensor_msgs.msg import LaserScan\n\nclass RosEnvRawScanPrepWp(RosEnvAbs):\n '''\n This class is a simulation environment wrapper for\n the Polar Representation.\n '''\n def __init__(self, ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):\n state_collector.set_state_mode(2)\n super(RosEnvRawScanPrepWp, self).__init__(ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc)\n self.__res = rospy.get_param(\"%s/rl_agent/resolution\"%ns)\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the raw laser scan data,\n the waypoint data in with the same format as the laser scan data.\n The distance of the waypoint is saved\n at the appropriate angle position in the vector.\n :return: state\n \"\"\"\n waypoint = self.wp_\n num_of_wps = len(waypoint.points)\n\n state = np.ones(self.STATE_SIZE, dtype=np.float)\n\n # add laserscan\n state[ :, 0, 0] = self.merged_scan_.ranges\n\n # generate wp-vector\n wp_vector = np.zeros(self.STATE_SIZE[0])\n for i in range(num_of_wps):\n dist = math.sqrt(math.pow(waypoint.points[i].x, 2) + math.pow(waypoint.points[i].y, 2))\n angle = math.atan2(waypoint.points[i].y, waypoint.points[i].x) + math.pi\n wp_vector[math.floor(angle/self.merged_scan_.angle_increment)] = dist\n state[:,1,0] = wp_vector\n\n # Discretize to a resolution of 5cm.\n state = np.round(np.divide(state, self.__res))*self.__res\n if self.debug_:\n debug_scan = LaserScan()\n # debug_scan.header.frame_id = self.merged_scan_.header.frame_id\n debug_scan.header = self.merged_scan_.header\n debug_scan.angle_min = self.merged_scan_.angle_min\n debug_scan.angle_max = self.merged_scan_.angle_max\n debug_scan.angle_increment = self.merged_scan_.angle_increment\n debug_scan.range_max = 7.0\n debug_scan.ranges = state[:, 0, 0]\n self.debugger_.show_scan_stack(debug_scan)\n return state\n" ]
[ [ "numpy.divide", "numpy.ones", "numpy.zeros" ] ]
alexmlamb/mimicry
[ "be030fb24a79c9bd50843d188d930c01e2260f45" ]
[ "torch_mimicry/nets/infomax_gan/attention.py" ]
[ "\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_mimicry.nets.infomax_gan.sparse_attn import SparseAttention\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, dropout=0.1):\n super().__init__()\n self.temperature = temperature\n #self.dropout = nn.Dropout(attn_dropout)\n self.use_sparse = True\n\n self.dropout = nn.Dropout(dropout)\n\n self.sa = SparseAttention()\n\n def forward(self, q, k, v, mask=None):\n\n # bs x pos x key .. bs x key x pos\n\n # bs x pos x pos .. bs x pos x key\n\n attn = torch.matmul(q / self.temperature, k.permute(0,2,1))\n\n if mask is not None:\n attn = attn.masked_fill(mask == 0, -1e9)\n\n\n attn = self.dropout(F.softmax(attn, dim=-1))\n if self.use_sparse:\n mb, ins, outs = attn.shape[0], attn.shape[1], attn.shape[2]\n sparse_attn = attn.reshape((mb*ins, outs))\n sparse_attn = self.sa(sparse_attn)\n sparse_attn = sparse_attn.reshape((mb,ins,outs))\n attn = sparse_attn*1.0\n\n\n\n output = torch.matmul(attn, v)\n\n return output, attn\n\n" ]
[ [ "torch.nn.Dropout", "torch.matmul", "torch.nn.functional.softmax" ] ]
Pandinosaurus/Colorization
[ "a1d84d12fa29040b79c4001e3de8e2a77916b43e" ]
[ "atari_diverse/loss.py" ]
[ "import torch\nimport torch.nn as nn\n\nmaeloss = nn.L1Loss()\nmseloss = nn.MSELoss()\nsoftplus = nn.Softplus()\n\n\nclass DecomposeLossCalculator:\n def __init__(self):\n pass\n\n @staticmethod\n def content_loss(y: torch.Tensor, t: torch.Tensor) -> torch.Tensor:\n return torch.mean(torch.abs(y - t))\n\n @staticmethod\n def adversarial_disloss(discriminator: nn.Module,\n y: torch.Tensor,\n t: torch.Tensor) -> torch.Tensor:\n sum_loss = 0\n fake_list = discriminator(y)\n real_list = discriminator(t)\n\n for fake, real in zip(fake_list, real_list):\n loss = torch.mean(softplus(-real)) + torch.mean(softplus(fake))\n sum_loss += loss\n\n return sum_loss\n\n @staticmethod\n def adversarial_genloss(discriminator: nn.Module,\n y: torch.Tensor) -> torch.Tensor:\n sum_loss = 0\n fake_list = discriminator(y)\n\n for fake in fake_list:\n loss = torch.mean(softplus(-fake))\n sum_loss += loss\n\n return sum_loss\n\n @staticmethod\n def adversarial_hingedis(discriminator: nn.Module,\n y: torch.Tensor,\n t: torch.Tensor) -> torch.Tensor:\n sum_loss = 0\n fake_list = discriminator(y)\n real_list = discriminator(t)\n\n for fake, real in zip(fake_list, real_list):\n sum_loss += nn.ReLU()(1.0 + fake).mean()\n sum_loss += nn.ReLU()(1.0 - real).mean()\n\n return sum_loss\n\n @staticmethod\n def adversarial_hingegen(discriminator: torch.Tensor,\n y: torch.Tensor) -> torch.Tensor:\n sum_loss = 0\n fake_list = discriminator(y)\n\n for fake in fake_list:\n sum_loss += -fake.mean()\n\n return sum_loss\n\n @staticmethod\n def positive_enforcing_loss(y: torch.Tensor) -> torch.Tensor:\n sum_loss = 0\n batch, ch, h, w = y.size()\n\n for color in range(3):\n perch = y[:, color, :, :]\n mean = torch.mean(perch)\n mean = mean * torch.ones_like(mean)\n loss = torch.mean((perch-mean)**2)\n sum_loss += loss\n\n return -sum_loss\n\n @staticmethod\n def perceptual_loss(vgg: nn.Module,\n y: torch.Tensor,\n t: torch.Tensor) -> torch.Tensor:\n y_vgg = vgg(y)\n t_vgg = vgg(t)\n\n _, c, h, w = y.size()\n\n loss = maeloss(y_vgg, t_vgg) / (c * h * w)\n\n return loss\n\n @staticmethod\n def total_variation_loss(y: torch.Tensor) -> torch.Tensor:\n _, c, h, w = y.size()\n\n vertical_loss = torch.mean((torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:]))**2)\n horizon_loss = torch.mean((torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]))**2)\n\n return (vertical_loss + horizon_loss) / (c * h * w)\n\n @staticmethod\n def latent_constrain_loss(y: torch.Tensor,\n t: torch.Tensor) -> torch.Tensor:\n return torch.mean(torch.abs(y - t))\n\n @staticmethod\n def kl_loss(y: torch.Tensor) -> torch.Tensor:\n x_2 = torch.pow(y, 2)\n loss = torch.mean(x_2)\n\n return loss\n\n @staticmethod\n def mode_seeking_regularize(y0: torch.Tensor,\n y1: torch.Tensor,\n z0: torch.Tensor,\n z1: torch.Tensor) -> torch.Tensor:\n\n lz = torch.mean(torch.abs(y0 - y1)) / (torch.mean(torch.abs(z0 - z1)) + 1e-9)\n loss = 1 / (lz + 1e-5)\n\n return loss\n\n @staticmethod\n def color_regularize(fixer: nn.Module,\n y: torch.Tensor,\n t: torch.Tensor) -> torch.Tensor:\n\n y_flat = fixer(y)\n\n return torch.mean(torch.abs(y_flat - t))\n" ]
[ [ "torch.nn.MSELoss", "torch.nn.L1Loss", "torch.abs", "torch.nn.ReLU", "torch.nn.Softplus", "torch.ones_like", "torch.mean", "torch.pow" ] ]
Valentyn1997/oct-diagn-semi-supervised
[ "6fb5803fba23a8ad875235c90361be1a37eeeb72" ]
[ "src/data/transforms.py" ]
[ "import numpy as np\nfrom PIL import Image\nfrom src.data.rand_augment import RandAugmentMC\nimport torchvision.transforms as transforms\n\n\ndef pad(x, border=4):\n return np.pad(x, [(0, 0), (border, border), (border, border)], mode='reflect')\n\n\nclass RandomPadandCrop(object):\n \"\"\"Crop randomly the image.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, width=4, output_size=None):\n self.width = width\n if output_size is None:\n self.output_size = output_size\n # assert isinstance(output_size, (int, tuple))\n elif isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, x):\n old_h, old_w = x.size[:2]\n x = np.transpose(x, (2, 0, 1))\n x = pad(x, self.width)\n\n h, w = x.shape[1:]\n if self.output_size is None:\n new_h, new_w = old_h, old_w\n else:\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n x = x[:, top: top + new_h, left: left + new_w]\n\n return Image.fromarray(np.transpose(x, (1, 2, 0)))\n\n\n# TODO Implement TransformKTimes\nclass TransformTwice:\n def __init__(self, transform):\n self.transform = transform\n\n def __call__(self, inp):\n out1 = self.transform(inp)\n out2 = self.transform(inp)\n return out1, out2\n\n\nclass TransformFix(object):\n def __init__(self, base_transform):\n self.weak = base_transform\n\n # Inserting strong augmentation\n self.strong = []\n for transform in base_transform.transforms:\n if isinstance(transform, transforms.ToTensor):\n self.strong.append(RandAugmentMC(n=2, m=10))\n self.strong.append(transform)\n self.strong = transforms.Compose(self.strong)\n\n def __call__(self, inp):\n weak = self.weak(inp)\n strong = self.strong(inp)\n return weak, strong\n\n\ndef build_transforms(normalize=None, center_crop=None, image_size=None,\n random_crop=None, flip=None, random_resize_crop=None):\n \"\"\"\n\n Args:\n normalize (tuple or transforms.Normalize): Parameters for data normalization.\n center_crop (int): Size for center crop.\n image_size (int): Size for image size.\n random_crop (int): Size for image random crop.\n flip (bool): Randomly flip the data horizontally.\n random_resize_crop (dict): Random resize crop the image.\n\n Returns:\n Transforms\n\n \"\"\"\n\n transform_ = []\n\n if image_size:\n if isinstance(image_size, int):\n image_size = (image_size, image_size)\n transform_.append(transforms.Resize(image_size))\n\n if random_resize_crop:\n transform_.append(transforms.RandomResizedCrop(random_resize_crop['size'], random_resize_crop['scale']))\n elif random_crop:\n transform_.append(transforms.RandomCrop(random_crop))\n elif center_crop:\n transform_.append(transforms.CenterCrop(center_crop))\n\n if flip:\n transform_.append(transforms.RandomHorizontalFlip())\n\n transform_.append(transforms.ToTensor())\n\n if normalize:\n if isinstance(normalize, transforms.Normalize):\n transform_.append(normalize)\n else:\n transform_.append(transforms.Normalize(*normalize))\n transform = transforms.Compose(transform_)\n return transform\n" ]
[ [ "numpy.pad", "numpy.random.randint", "numpy.transpose" ] ]
vsinic/deep_rl_trader
[ "1aafa451f0f00ad3e27c43241a596a62d608ba72" ]
[ "ddqn_rl_trader.py" ]
[ "import numpy as np\n\n# import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, CuDNNLSTM\nfrom keras.optimizers import Adam\n\n# keras-rl agent\nfrom rl.agents.dqn import DQNAgent\nfrom rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy\nfrom rl.memory import SequentialMemory\n\n# trader environment\nfrom TraderEnv import OhlcvEnv\n# custom normalizer\nfrom util import NormalizerProcessor\n\ndef create_model(shape, nb_actions):\n model = Sequential()\n model.add(CuDNNLSTM(64, input_shape=shape, return_sequences=True))\n model.add(CuDNNLSTM(64))\n model.add(Dense(32))\n model.add(Activation('relu'))\n model.add(Dense(nb_actions, activation='linear'))\n\ndef main():\n # OPTIONS\n ENV_NAME = 'OHLCV-v0'\n TIME_STEP = 30\n\n # Get the environment and extract the number of actions.\n PATH_TRAIN = \"./data/train/\"\n PATH_TEST = \"./data/test/\"\n env = OhlcvEnv(TIME_STEP, path=PATH_TRAIN)\n env_test = OhlcvEnv(TIME_STEP, path=PATH_TEST)\n\n # random seed\n np.random.seed(123)\n env.seed(123)\n\n nb_actions = env.action_space.n\n model = create_model(shape=env.shape, nb_actions=nb_actions)\n print(model.summary())\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and even the metrics!\n memory = SequentialMemory(limit=50000, window_length=TIME_STEP)\n # policy = BoltzmannQPolicy()\n policy = EpsGreedyQPolicy()\n # enable the dueling network\n # you can specify the dueling_type to one of {'avg','max','naive'}\n dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=200,\n enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy,\n processor=NormalizerProcessor())\n dqn.compile(Adam(lr=1e-3), metrics=['mae'])\n\n while True:\n # train\n dqn.fit(env, nb_steps=5500, nb_max_episode_steps=10000, visualize=False, verbose=2)\n try:\n # validate\n info = dqn.test(env_test, nb_episodes=1, visualize=False)\n n_long, n_short, total_reward, portfolio = info['n_trades']['long'], info['n_trades']['short'], info[\n 'total_reward'], int(info['portfolio'])\n np.array([info]).dump(\n './info/duel_dqn_{0}_weights_{1}LS_{2}_{3}_{4}.info'.format(ENV_NAME, portfolio, n_long, n_short,\n total_reward))\n dqn.save_weights(\n './model/duel_dqn_{0}_weights_{1}LS_{2}_{3}_{4}.h5f'.format(ENV_NAME, portfolio, n_long, n_short, total_reward),\n overwrite=True)\n except KeyboardInterrupt:\n continue\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.random.seed", "numpy.array" ] ]
BearerPipelineTest/Cirq
[ "640ef8f82d6a56ec95361388ce7976e096cca906" ]
[ "cirq-core/cirq/sim/density_matrix_simulation_state_test.py" ]
[ "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\n\nimport cirq\n\n\ndef test_default_parameter():\n qid_shape = (2,)\n tensor = cirq.to_valid_density_matrix(\n 0, len(qid_shape), qid_shape=qid_shape, dtype=np.complex64\n )\n args = cirq.DensityMatrixSimulationState(qubits=cirq.LineQubit.range(1), initial_state=0)\n np.testing.assert_almost_equal(args.target_tensor, tensor)\n assert len(args.available_buffer) == 3\n for buffer in args.available_buffer:\n assert buffer.shape == tensor.shape\n assert buffer.dtype == tensor.dtype\n assert args.qid_shape == qid_shape\n\n\ndef test_shallow_copy_buffers():\n args = cirq.DensityMatrixSimulationState(qubits=cirq.LineQubit.range(1), initial_state=0)\n copy = args.copy(deep_copy_buffers=False)\n assert copy.available_buffer is args.available_buffer\n\n\ndef test_decomposed_fallback():\n class Composite(cirq.Gate):\n def num_qubits(self) -> int:\n return 1\n\n def _decompose_(self, qubits):\n yield cirq.X(*qubits)\n\n args = cirq.DensityMatrixSimulationState(\n qubits=cirq.LineQubit.range(1),\n prng=np.random.RandomState(),\n initial_state=0,\n dtype=np.complex64,\n )\n\n cirq.act_on(Composite(), args, cirq.LineQubit.range(1))\n np.testing.assert_allclose(\n args.target_tensor, cirq.one_hot(index=(1, 1), shape=(2, 2), dtype=np.complex64)\n )\n\n\ndef test_cannot_act():\n class NoDetails:\n pass\n\n args = cirq.DensityMatrixSimulationState(\n qubits=cirq.LineQubit.range(1),\n prng=np.random.RandomState(),\n initial_state=0,\n dtype=np.complex64,\n )\n with pytest.raises(TypeError, match=\"Can't simulate operations\"):\n cirq.act_on(NoDetails(), args, qubits=())\n\n\ndef test_with_qubits():\n original = cirq.DensityMatrixSimulationState(\n qubits=cirq.LineQubit.range(1), initial_state=1, dtype=np.complex64\n )\n extened = original.with_qubits(cirq.LineQubit.range(1, 2))\n np.testing.assert_almost_equal(\n extened.target_tensor,\n cirq.density_matrix_kronecker_product(\n np.array([[0, 0], [0, 1]], dtype=np.complex64),\n np.array([[1, 0], [0, 0]], dtype=np.complex64),\n ),\n )\n\n\ndef test_qid_shape_error():\n with pytest.raises(ValueError, match=\"qid_shape must be provided\"):\n cirq.sim.density_matrix_simulation_state._BufferedDensityMatrix.create(initial_state=0)\n\n\ndef test_initial_state_vector():\n qubits = cirq.LineQubit.range(3)\n args = cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((8,), 1 / np.sqrt(8)), dtype=np.complex64\n )\n assert args.target_tensor.shape == (2, 2, 2, 2, 2, 2)\n\n args2 = cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((2, 2, 2), 1 / np.sqrt(8)), dtype=np.complex64\n )\n assert args2.target_tensor.shape == (2, 2, 2, 2, 2, 2)\n\n\ndef test_initial_state_matrix():\n qubits = cirq.LineQubit.range(3)\n args = cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((8, 8), 1 / 8), dtype=np.complex64\n )\n assert args.target_tensor.shape == (2, 2, 2, 2, 2, 2)\n\n args2 = cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((2, 2, 2, 2, 2, 2), 1 / 8), dtype=np.complex64\n )\n assert args2.target_tensor.shape == (2, 2, 2, 2, 2, 2)\n\n\ndef test_initial_state_bad_shape():\n qubits = cirq.LineQubit.range(3)\n with pytest.raises(ValueError, match=\"Invalid quantum state\"):\n cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((4,), 1 / 2), dtype=np.complex64\n )\n with pytest.raises(ValueError, match=\"Invalid quantum state\"):\n cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((2, 2), 1 / 2), dtype=np.complex64\n )\n\n with pytest.raises(ValueError, match=\"Invalid quantum state\"):\n cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((4, 4), 1 / 4), dtype=np.complex64\n )\n with pytest.raises(ValueError, match=\"Invalid quantum state\"):\n cirq.DensityMatrixSimulationState(\n qubits=qubits, initial_state=np.full((2, 2, 2, 2), 1 / 4), dtype=np.complex64\n )\n" ]
[ [ "numpy.full", "numpy.array", "numpy.random.RandomState", "numpy.testing.assert_almost_equal", "numpy.sqrt" ] ]
superkerokero/hinabe
[ "9fa00d1cbdee2b046426c8ed0c7c269556125337" ]
[ "naruhodo/utils/misc.py" ]
[ "\"\"\"\nModule for miscellaneous utility functions.\n\"\"\"\nimport re\nimport json\nfrom math import sqrt\nimport numpy as np\nimport networkx as nx\nfrom nxpd import draw\nfrom naruhodo.utils.dicts import NodeType2StyleDict, NodeType2ColorDict, NodeType2FontColorDict, EdgeType2StyleDict, EdgeType2ColorDict\n\n\n_re_sent = re.compile(r'([^ !?。]*[!?。])')\n\"\"\"\nPrecompiled regular expression for separating sentences.\n\"\"\"\n_re1 = re.compile(r'\\(.*?\\)')\n_re2 = re.compile(r'\\[.*?\\]')\n_re3 = re.compile(r'\\(.*?\\)')\n_re4 = re.compile(r'\\<.*?\\>')\n\"\"\"\nPrecompiled regular expressions for getting rid of parenthesis.\n\"\"\"\n\ndef preprocessText(text):\n \"\"\"Get rid of weird parts from the text that interferes analysis.\"\"\"\n text = text.replace(\"\\n\", \"\").replace(\"|\", \"、\").replace(\" \", \"\").strip()\n text = _re1.sub(\"\", text)\n text = _re2.sub(\"\", text)\n text = _re3.sub(\"\", text)\n text = _re4.sub(\"\", text)\n return text\n\ndef parseToSents(context):\n \"\"\"Parse given context into list of individual sentences.\"\"\"\n return [sent.strip().replace('*', \"-\") for sent in _re_sent.split(context) if sent.strip() != \"\"]\n\ndef exportToJsonObj(G):\n \"\"\"Export given networkx graph to JSON object(dict object in python).\"\"\"\n return nx.node_link_data(G)\n\ndef exportToJsonFile(G, filename):\n \"\"\"Export given networkx graph to JSON file.\"\"\"\n with open(filename, 'w') as outfile:\n json.dump(exportToJsonObj(G), outfile)\n \ndef getNodeProperties(info, depth=False):\n \"\"\"Convert node properties for node drawing using nxpd.\"\"\"\n ret = dict()\n ret['shape'] = NodeType2StyleDict[info['type']]\n ret['fillcolor'] = NodeType2ColorDict[info['type']]\n ret['fontcolor'] = NodeType2FontColorDict[info['type']]\n ret['label'] = info['label']\n ret['style'] = 'filled'\n ret['fixedsize'] = True \n ret['fontsize'] = (5.0 + 20.0 / len(info['label'])) * info['count']\n ret['width'] = info['count']*0.75\n ret['count'] = info['count']\n if depth:\n d = np.average(info['depth']) # Average depth of the node\n d = min(d, 5.) # Normalize d to a range of [0, 6]\n cs = [255, 80, 0] # Base reference color at start\n ct = [255, 255, 255] # Base reference color at end\n cn = [0, 0, 0] # Average depth scaled node color\n for i in range(3):\n cn[i] = cs[i] + int((ct[i] - cs[i]) / 5. * d)\n ret['fillcolor'] = rgb2Hex(cn)\n ret['fontcolor'] = '#000000'\n return ret\n \ndef getEdgeProperties(info):\n \"\"\"Convert edge properties for node drawing using nxpd.\"\"\"\n ret = dict()\n ret['label'] = info['label']\n ret['penwidth'] = info['weight'] * 2.0\n ret['weight'] = info['weight']\n ret['style'] = EdgeType2StyleDict[info['type']]\n ret['color'] = EdgeType2ColorDict[info['type']]\n return ret\n\ndef inclusive(A, B):\n \"\"\"Find if one of string A and B includes the other.\"\"\"\n if len(A) > len(B):\n if A.find(B) != -1:\n ret = 1\n else:\n ret = 0\n elif len(A) < len(B):\n if B.find(A) != -1:\n ret = -1\n else:\n ret = 0\n else:\n ret = 0\n return ret\n\ndef cosSimilarity(A, B):\n \"\"\"Compute the cosine similarity between vectors A and B.\"\"\"\n return np.dot(A, B) / sqrt(np.dot(A, A) * np.dot(B, B))\n\ndef harmonicSim(AG, B):\n \"\"\"Return the harmonic distance between a group of vectors AG and vector B.\"\"\"\n size = len(AG)\n ret = 0.\n for i in range(size):\n ret += 1. / cosSimilarity(AG[i], B)\n return float(size) / ret\n\ndef decorate(G, depth, rankdir):\n \"\"\"Generate temporal graph with drawing properties added for nxpd.\"\"\"\n ret = nx.DiGraph()\n ret.graph['rankdir'] = rankdir\n for key, val in G.nodes.items():\n ret.add_node(key, **getNodeProperties(val, depth))\n for key, val in G.edges.items():\n ret.add_edge(*key, **getEdgeProperties(val))\n return ret\n\ndef show(G, depth=False, rankdir='TB'):\n \"\"\"Decorate and draw given graph using nxpd in notebook.\"\"\"\n return draw(decorate(G, depth, rankdir), show='ipynb')\n\ndef plotToFile(G, filename, depth=False, rankdir='TB'):\n \"\"\"Output given graph to a png file using nxpd.\"\"\"\n return draw(decorate(G, depth, rankdir), filename=filename, show=False)\n\ndef _mergeGraph(A, B):\n \"\"\"Return the merged graph of A and B.\"\"\"\n for key, val in B.nodes.items():\n if A.has_node(key):\n A.nodes[key]['count'] += val['count']\n for i in range(len(val['pos'])):\n if val['pos'][i] not in A.nodes[key]['pos']:\n A.nodes[key]['pos'].append(val['pos'][i])\n A.nodes[key]['lpos'].append(val['lpos'][i])\n A.nodes[key]['func'].append(val['func'][i])\n A.nodes[key]['surface'].append(val['surface'][i])\n A.nodes[key]['yomi'].append(val['yomi'][i])\n if 'depth' in A.nodes[key]:\n A.nodes[key]['depth'].append(val['depth'][i])\n else:\n A.add_node(key, **val)\n for key, val in B.edges.items():\n if A.has_edge(*key):\n A.edges[key[0], key[1]]['weight'] += val['weight']\n else:\n A.add_edge(*key, **val)\n return A\n\ndef _mergeEntityList(A, B):\n \"\"\"Return merged entityList os A and B.\"\"\"\n for i in range(len(B)):\n for key, val in B[i].items():\n if key in A[i]:\n for item in val:\n A[i][key].append(item)\n else:\n A[i][key] = val\n return A\n\ndef _mergeProList(A, B):\n \"\"\"Return merged proList os A and B.\"\"\"\n for item in B:\n A.append(item)\n return A\n\ndef _mergeAll(A, B):\n \"\"\"Return merged result of graph, entity list and pronoun list.\"\"\"\n A[0] = _mergeGraph(A[0], B[0])\n A[1] = _mergeEntityList(A[1], B[1])\n A[2] = _mergeProList(A[2], B[2])\n return A\n\ndef hex2Rgb(c):\n \"\"\"\n Convert hex color in #XXXXXX format to RGB list.\n \"\"\"\n return [int(c.lstrip(\"#\")[i:i+2], 16) for i in (0, 2, 4)]\n\ndef rgb2Hex(c):\n \"\"\"\n Convert color in RGB format to hex format.\n \"\"\"\n return \"#{0:02x}{1:02x}{2:02x}\".format(clamp(c[0]), clamp(c[1]), clamp(c[2]))\n\ndef clamp(x): \n \"\"\"\n Clamp x to 0 <= x <= 255.\n \"\"\"\n return max(0, min(x, 255))\n" ]
[ [ "numpy.average", "numpy.dot" ] ]
skyw/NeMo
[ "3886aa251f7be7c2e43aeb7315afc6b8924228aa" ]
[ "nemo/collections/asr/models/ctc_models.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport json\nimport os\nimport tempfile\nfrom math import ceil\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nfrom omegaconf import DictConfig, OmegaConf, open_dict\nfrom pytorch_lightning import Trainer\nfrom tqdm.auto import tqdm\n\nfrom nemo.collections.asr.data import audio_to_text_dataset\nfrom nemo.collections.asr.data.audio_to_text_dali import DALIOutputs\nfrom nemo.collections.asr.losses.ctc import CTCLoss\nfrom nemo.collections.asr.metrics.wer import WER\nfrom nemo.collections.asr.models.asr_model import ASRModel, ExportableEncDecModel\nfrom nemo.collections.asr.parts.perturb import process_augmentations\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, LogprobsType, NeuralType, SpectrogramType\nfrom nemo.utils import logging\n\n__all__ = ['EncDecCTCModel', 'JasperNet', 'QuartzNet']\n\n\nclass EncDecCTCModel(ASRModel, ExportableEncDecModel):\n \"\"\"Base class for encoder decoder CTC-based models.\"\"\"\n\n @classmethod\n def list_available_models(cls) -> Optional[PretrainedModelInfo]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n results = []\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"QuartzNet15x5Base-En\",\n description=\"QuartzNet15x5 model trained on six datasets: LibriSpeech, Mozilla Common Voice (validated clips from en_1488h_2019-12-10), WSJ, Fisher, Switchboard, and NSC Singapore English. It was trained with Apex/Amp optimization level O1 for 600 epochs. The model achieves a WER of 3.79% on LibriSpeech dev-clean, and a WER of 10.05% on dev-other. Please visit https://ngc.nvidia.com/catalog/models/nvidia:nemospeechmodels for further details.\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/QuartzNet15x5Base-En.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_en_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_quartznet15x5/versions/1.0.0rc1/files/stt_en_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_zh_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_zh_quartznet15x5/versions/1.0.0rc1/files/stt_zh_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_en_jasper10x5dr\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_en_jasper10x5dr\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_en_jasper10x5dr/versions/1.0.0rc1/files/stt_en_jasper10x5dr.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_ca_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ca_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ca_quartznet15x5/versions/1.0.0rc1/files/stt_ca_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_it_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_quartznet15x5/versions/1.0.0rc1/files/stt_it_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_fr_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_fr_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_fr_quartznet15x5/versions/1.0.0rc1/files/stt_fr_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_es_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_es_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_es_quartznet15x5/versions/1.0.0rc1/files/stt_es_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_de_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_de_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_de_quartznet15x5/versions/1.0.0rc1/files/stt_de_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_pl_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_pl_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_pl_quartznet15x5/versions/1.0.0rc1/files/stt_pl_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_ru_quartznet15x5\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_quartznet15x5\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_quartznet15x5/versions/1.0.0rc1/files/stt_ru_quartznet15x5.nemo\",\n )\n results.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"stt_zh_citrinet_512\",\n description=\"For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_zh_citrinet_512\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_zh_citrinet_512/versions/1.0.0rc1/files/stt_zh_citrinet_512.nemo\",\n )\n results.append(model)\n\n return results\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n # Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable\n # Global_rank and local_rank is set by LightningModule in Lightning 1.2.0\n self.world_size = 1\n if trainer is not None:\n self.world_size = trainer.num_nodes * trainer.num_gpus\n\n super().__init__(cfg=cfg, trainer=trainer)\n self.preprocessor = EncDecCTCModel.from_config_dict(self._cfg.preprocessor)\n self.encoder = EncDecCTCModel.from_config_dict(self._cfg.encoder)\n\n with open_dict(self._cfg):\n if \"feat_in\" not in self._cfg.decoder or (\n not self._cfg.decoder.feat_in and hasattr(self.encoder, '_feat_out')\n ):\n self._cfg.decoder.feat_in = self.encoder._feat_out\n if \"feat_in\" not in self._cfg.decoder or not self._cfg.decoder.feat_in:\n raise ValueError(\"param feat_in of the decoder's config is not set!\")\n\n self.decoder = EncDecCTCModel.from_config_dict(self._cfg.decoder)\n\n self.loss = CTCLoss(\n num_classes=self.decoder.num_classes_with_blank - 1,\n zero_infinity=True,\n reduction=self._cfg.get(\"ctc_reduction\", \"mean_batch\"),\n )\n\n if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:\n self.spec_augmentation = EncDecCTCModel.from_config_dict(self._cfg.spec_augment)\n else:\n self.spec_augmentation = None\n\n # Setup metric objects\n self._wer = WER(\n vocabulary=self.decoder.vocabulary,\n batch_dim_index=0,\n use_cer=self._cfg.get('use_cer', False),\n ctc_decode=True,\n dist_sync_on_step=True,\n log_prediction=self._cfg.get(\"log_prediction\", False),\n )\n\n @torch.no_grad()\n def transcribe(\n self, paths2audio_files: List[str], batch_size: int = 4, logprobs=False, return_hypotheses: bool = False\n ) -> List[str]:\n \"\"\"\n Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.\n\n Args:\n paths2audio_files: (a list) of paths to audio files. \\\n Recommended length per file is between 5 and 25 seconds. \\\n But it is possible to pass a few hours long file if enough GPU memory is available.\n batch_size: (int) batch size to use during inference.\n Bigger will result in better throughput performance but would use more memory.\n logprobs: (bool) pass True to get log probabilities instead of transcripts.\n return_hypotheses: (bool) Either return hypotheses or text\n With hypotheses can do some postprocessing like getting timestamp or rescoring\n\n Returns:\n A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files\n \"\"\"\n if paths2audio_files is None or len(paths2audio_files) == 0:\n return {}\n\n if return_hypotheses and logprobs:\n raise ValueError(\n \"Either `return_hypotheses` or `logprobs` can be True at any given time.\"\n \"Returned hypotheses will contain the logprobs.\"\n )\n\n # We will store transcriptions here\n hypotheses = []\n # Model's mode and device\n mode = self.training\n device = next(self.parameters()).device\n dither_value = self.preprocessor.featurizer.dither\n pad_to_value = self.preprocessor.featurizer.pad_to\n\n try:\n self.preprocessor.featurizer.dither = 0.0\n self.preprocessor.featurizer.pad_to = 0\n # Switch model to evaluation mode\n self.eval()\n # Freeze the encoder and decoder modules\n self.encoder.freeze()\n self.decoder.freeze()\n logging_level = logging.get_verbosity()\n logging.set_verbosity(logging.WARNING)\n # Work in tmp directory - will store manifest file there\n with tempfile.TemporaryDirectory() as tmpdir:\n with open(os.path.join(tmpdir, 'manifest.json'), 'w') as fp:\n for audio_file in paths2audio_files:\n entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': 'nothing'}\n fp.write(json.dumps(entry) + '\\n')\n\n config = {'paths2audio_files': paths2audio_files, 'batch_size': batch_size, 'temp_dir': tmpdir}\n\n temporary_datalayer = self._setup_transcribe_dataloader(config)\n for test_batch in tqdm(temporary_datalayer, desc=\"Transcribing\"):\n logits, logits_len, greedy_predictions = self.forward(\n input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)\n )\n if logprobs:\n # dump log probs per file\n for idx in range(logits.shape[0]):\n hypotheses.append(logits[idx][: logits_len[idx]])\n else:\n current_hypotheses = self._wer.ctc_decoder_predictions_tensor(\n greedy_predictions, predictions_len=logits_len, return_hypotheses=return_hypotheses,\n )\n\n if return_hypotheses:\n # dump log probs per file\n for idx in range(logits.shape[0]):\n current_hypotheses[idx].y_sequence = logits[idx][: logits_len[idx]]\n\n hypotheses += current_hypotheses\n\n del greedy_predictions\n del logits\n del test_batch\n finally:\n # set mode back to its original value\n self.train(mode=mode)\n self.preprocessor.featurizer.dither = dither_value\n self.preprocessor.featurizer.pad_to = pad_to_value\n if mode is True:\n self.encoder.unfreeze()\n self.decoder.unfreeze()\n logging.set_verbosity(logging_level)\n return hypotheses\n\n def change_vocabulary(self, new_vocabulary: List[str]):\n \"\"\"\n Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.\n This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would\n use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need\n model to learn capitalization, punctuation and/or special characters.\n\n If new_vocabulary == self.decoder.vocabulary then nothing will be changed.\n\n Args:\n\n new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \\\n this is target alphabet.\n\n Returns: None\n\n \"\"\"\n if self.decoder.vocabulary == new_vocabulary:\n logging.warning(f\"Old {self.decoder.vocabulary} and new {new_vocabulary} match. Not changing anything.\")\n else:\n if new_vocabulary is None or len(new_vocabulary) == 0:\n raise ValueError(f'New vocabulary must be non-empty list of chars. But I got: {new_vocabulary}')\n decoder_config = self.decoder.to_config_dict()\n new_decoder_config = copy.deepcopy(decoder_config)\n new_decoder_config['vocabulary'] = new_vocabulary\n new_decoder_config['num_classes'] = len(new_vocabulary)\n\n del self.decoder\n self.decoder = EncDecCTCModel.from_config_dict(new_decoder_config)\n del self.loss\n self.loss = CTCLoss(\n num_classes=self.decoder.num_classes_with_blank - 1,\n zero_infinity=True,\n reduction=self._cfg.get(\"ctc_reduction\", \"mean_batch\"),\n )\n self._wer = WER(\n vocabulary=self.decoder.vocabulary,\n batch_dim_index=0,\n use_cer=self._cfg.get('use_cer', False),\n ctc_decode=True,\n dist_sync_on_step=True,\n log_prediction=self._cfg.get(\"log_prediction\", False),\n )\n\n # Update config\n OmegaConf.set_struct(self._cfg.decoder, False)\n self._cfg.decoder = new_decoder_config\n OmegaConf.set_struct(self._cfg.decoder, True)\n\n logging.info(f\"Changed decoder to output to {self.decoder.vocabulary} vocabulary.\")\n\n def _setup_dataloader_from_config(self, config: Optional[Dict]):\n if 'augmentor' in config:\n augmentor = process_augmentations(config['augmentor'])\n else:\n augmentor = None\n\n shuffle = config['shuffle']\n device = 'gpu' if torch.cuda.is_available() else 'cpu'\n if config.get('use_dali', False):\n device_id = self.local_rank if device == 'gpu' else None\n dataset = audio_to_text_dataset.get_dali_char_dataset(\n config=config,\n shuffle=shuffle,\n device_id=device_id,\n global_rank=self.global_rank,\n world_size=self.world_size,\n preprocessor_cfg=self._cfg.preprocessor,\n )\n return dataset\n\n # Instantiate tarred dataset loader or normal dataset loader\n if config.get('is_tarred', False):\n if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (\n 'manifest_filepath' in config and config['manifest_filepath'] is None\n ):\n logging.warning(\n \"Could not load dataset as `manifest_filepath` was None or \"\n f\"`tarred_audio_filepaths` is None. Provided config : {config}\"\n )\n return None\n\n shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0\n dataset = audio_to_text_dataset.get_tarred_char_dataset(\n config=config,\n shuffle_n=shuffle_n,\n global_rank=self.global_rank,\n world_size=self.world_size,\n augmentor=augmentor,\n )\n shuffle = False\n else:\n if 'manifest_filepath' in config and config['manifest_filepath'] is None:\n logging.warning(f\"Could not load dataset as `manifest_filepath` was None. Provided config : {config}\")\n return None\n\n dataset = audio_to_text_dataset.get_char_dataset(config=config, augmentor=augmentor)\n\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=config['batch_size'],\n collate_fn=dataset.collate_fn,\n drop_last=config.get('drop_last', False),\n shuffle=shuffle,\n num_workers=config.get('num_workers', 0),\n pin_memory=config.get('pin_memory', False),\n )\n\n def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):\n \"\"\"\n Sets up the training data loader via a Dict-like object.\n\n Args:\n train_data_config: A config that contains the information regarding construction\n of an ASR Training dataset.\n\n Supported Datasets:\n - :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`\n \"\"\"\n if 'shuffle' not in train_data_config:\n train_data_config['shuffle'] = True\n\n # preserve config\n self._update_dataset_config(dataset_name='train', config=train_data_config)\n\n self._train_dl = self._setup_dataloader_from_config(config=train_data_config)\n\n # Need to set this because if using an IterableDataset, the length of the dataloader is the total number\n # of samples rather than the number of batches, and this messes up the tqdm progress bar.\n # So we set the number of steps manually (to the correct number) to fix this.\n if 'is_tarred' in train_data_config and train_data_config['is_tarred']:\n # We also need to check if limit_train_batches is already set.\n # If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,\n # and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).\n if isinstance(self._trainer.limit_train_batches, float):\n self._trainer.limit_train_batches = int(\n self._trainer.limit_train_batches\n * ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])\n )\n\n def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):\n \"\"\"\n Sets up the validation data loader via a Dict-like object.\n\n Args:\n val_data_config: A config that contains the information regarding construction\n of an ASR Training dataset.\n\n Supported Datasets:\n - :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`\n \"\"\"\n if 'shuffle' not in val_data_config:\n val_data_config['shuffle'] = False\n\n # preserve config\n self._update_dataset_config(dataset_name='validation', config=val_data_config)\n\n self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)\n\n def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):\n \"\"\"\n Sets up the test data loader via a Dict-like object.\n\n Args:\n test_data_config: A config that contains the information regarding construction\n of an ASR Training dataset.\n\n Supported Datasets:\n - :class:`~nemo.collections.asr.data.audio_to_text.AudioToCharDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.AudioToBPEDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToCharDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text.TarredAudioToBPEDataset`\n - :class:`~nemo.collections.asr.data.audio_to_text_dali.AudioToCharDALIDataset`\n \"\"\"\n if 'shuffle' not in test_data_config:\n test_data_config['shuffle'] = False\n\n # preserve config\n self._update_dataset_config(dataset_name='test', config=test_data_config)\n\n self._test_dl = self._setup_dataloader_from_config(config=test_data_config)\n\n @property\n def input_types(self) -> Optional[Dict[str, NeuralType]]:\n if hasattr(self.preprocessor, '_sample_rate'):\n input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)\n else:\n input_signal_eltype = AudioSignal()\n return {\n \"input_signal\": NeuralType(('B', 'T'), input_signal_eltype, optional=True),\n \"input_signal_length\": NeuralType(tuple('B'), LengthsType(), optional=True),\n \"processed_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),\n \"processed_signal_length\": NeuralType(tuple('B'), LengthsType(), optional=True),\n }\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n return {\n \"outputs\": NeuralType(('B', 'T', 'D'), LogprobsType()),\n \"encoded_lengths\": NeuralType(tuple('B'), LengthsType()),\n \"greedy_predictions\": NeuralType(('B', 'T'), LabelsType()),\n }\n\n @typecheck()\n def forward(\n self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None\n ):\n \"\"\"\n Forward pass of the model.\n\n Args:\n input_signal: Tensor that represents a batch of raw audio signals,\n of shape [B, T]. T here represents timesteps, with 1 second of audio represented as\n `self.sample_rate` number of floating point values.\n input_signal_length: Vector of length B, that contains the individual lengths of the audio\n sequences.\n processed_signal: Tensor that represents a batch of processed audio signals,\n of shape (B, D, T) that has undergone processing via some DALI preprocessor.\n processed_signal_length: Vector of length B, that contains the individual lengths of the\n processed audio sequences.\n\n Returns:\n A tuple of 3 elements -\n 1) The log probabilities tensor of shape [B, T, D].\n 2) The lengths of the acoustic sequence after propagation through the encoder, of shape [B].\n 3) The greedy token predictions of the model of shape [B, T] (via argmax)\n \"\"\"\n has_input_signal = input_signal is not None and input_signal_length is not None\n has_processed_signal = processed_signal is not None and processed_signal_length is not None\n if (has_input_signal ^ has_processed_signal) == False:\n raise ValueError(\n f\"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive \"\n \" with ``processed_signal`` and ``processed_signal_len`` arguments.\"\n )\n\n if not has_processed_signal:\n processed_signal, processed_signal_length = self.preprocessor(\n input_signal=input_signal, length=input_signal_length,\n )\n\n if self.spec_augmentation is not None and self.training:\n processed_signal = self.spec_augmentation(input_spec=processed_signal)\n\n encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)\n log_probs = self.decoder(encoder_output=encoded)\n greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)\n\n return log_probs, encoded_len, greedy_predictions\n\n # PTL-specific methods\n def training_step(self, batch, batch_nb):\n signal, signal_len, transcript, transcript_len = batch\n if isinstance(batch, DALIOutputs) and batch.has_processed_signal:\n log_probs, encoded_len, predictions = self.forward(\n processed_signal=signal, processed_signal_length=signal_len\n )\n else:\n log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)\n\n loss_value = self.loss(\n log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len\n )\n\n tensorboard_logs = {'train_loss': loss_value, 'learning_rate': self._optimizer.param_groups[0]['lr']}\n\n if hasattr(self, '_trainer') and self._trainer is not None:\n log_every_n_steps = self._trainer.log_every_n_steps\n else:\n log_every_n_steps = 1\n\n if (batch_nb + 1) % log_every_n_steps == 0:\n self._wer.update(\n predictions=predictions,\n targets=transcript,\n target_lengths=transcript_len,\n predictions_lengths=encoded_len,\n )\n wer, _, _ = self._wer.compute()\n tensorboard_logs.update({'training_batch_wer': wer})\n\n return {'loss': loss_value, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n signal, signal_len, transcript, transcript_len = batch\n if isinstance(batch, DALIOutputs) and batch.has_processed_signal:\n log_probs, encoded_len, predictions = self.forward(\n processed_signal=signal, processed_signal_length=signal_len\n )\n else:\n log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)\n\n loss_value = self.loss(\n log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len\n )\n self._wer.update(\n predictions=predictions, targets=transcript, target_lengths=transcript_len, predictions_lengths=encoded_len\n )\n wer, wer_num, wer_denom = self._wer.compute()\n return {\n 'val_loss': loss_value,\n 'val_wer_num': wer_num,\n 'val_wer_denom': wer_denom,\n 'val_wer': wer,\n }\n\n def test_step(self, batch, batch_idx, dataloader_idx=0):\n logs = self.validation_step(batch, batch_idx, dataloader_idx=dataloader_idx)\n test_logs = {\n 'test_loss': logs['val_loss'],\n 'test_wer_num': logs['val_wer_num'],\n 'test_wer_denom': logs['val_wer_denom'],\n 'test_wer': logs['val_wer'],\n }\n return test_logs\n\n def test_dataloader(self):\n if self._test_dl is not None:\n return self._test_dl\n\n def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':\n \"\"\"\n Setup function for a temporary data loader which wraps the provided audio file.\n\n Args:\n config: A python dictionary which contains the following keys:\n paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \\\n Recommended length per file is between 5 and 25 seconds.\n batch_size: (int) batch size to use during inference. \\\n Bigger will result in better throughput performance but would use more memory.\n temp_dir: (str) A temporary directory where the audio manifest is temporarily\n stored.\n\n Returns:\n A pytorch DataLoader for the given audio file(s).\n \"\"\"\n dl_config = {\n 'manifest_filepath': os.path.join(config['temp_dir'], 'manifest.json'),\n 'sample_rate': self.preprocessor._sample_rate,\n 'labels': self.decoder.vocabulary,\n 'batch_size': min(config['batch_size'], len(config['paths2audio_files'])),\n 'trim_silence': True,\n 'shuffle': False,\n }\n\n temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))\n return temporary_datalayer\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
KsanaKozlova/sdc
[ "70b5ae813243d120571eec908e3b7bf1178cb8dd" ]
[ "sdc/tests/test_compile_time.py" ]
[ "# *****************************************************************************\n# Copyright (c) 2019-2020, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\nimport numba\nimport numpy as np\nimport pandas as pd\nimport re\nimport unittest\n\nfrom contextlib import redirect_stdout\nfrom io import StringIO\nfrom sdc.tests.test_base import TestCase\nfrom sdc.decorators import debug_compile_time\n\n\n# regexp patterns for lines in @debug_compile_time output log\nline_heading = r'\\*+\\s+COMPILE STATS\\s+\\*+\\n'\nline_function = r'Function: [^\\s]+\\n'\nline_args = r'\\s+Args:.*\\n'\nline_pipeline = r'\\s+Pipeline: \\w+\\n'\nline_passes = r'(\\s+\\w+\\s+[\\d.]+\\n)+'\nline_time = r'\\s+Time: [\\d.]+\\n'\nline_ending = r'\\*+\\n'\n\n\nclass TestCompileTime(TestCase):\n\n @staticmethod\n def _gen_usecase_data():\n n = 11\n S1 = pd.Series(np.ones(n))\n S2 = pd.Series(2 ** np.arange(n))\n return S1, S2\n\n def test_log_format_summary(self):\n \"\"\" Verifies shortened log format when only summary info is printed \"\"\"\n\n @debug_compile_time(level=0)\n @self.jit\n def test_impl(S1, S2):\n return S1 + S2\n\n buffer = StringIO()\n with redirect_stdout(buffer):\n S1, S2 = self._gen_usecase_data()\n test_impl(S1, S2)\n\n entry_format = fr'{line_function}{line_pipeline}{line_time}\\n'\n log_format = fr'^{line_heading}({entry_format})+{line_ending}$'\n self.assertRegex(buffer.getvalue(), log_format)\n\n def test_log_format_detailed(self):\n \"\"\" Verifies detailed log format with passes and args information \"\"\"\n\n @debug_compile_time()\n @self.jit\n def test_impl(S1, S2):\n return S1 + S2\n\n buffer = StringIO()\n with redirect_stdout(buffer):\n S1, S2 = self._gen_usecase_data()\n test_impl(S1, S2)\n\n entry_format = fr'{line_function}{line_args}{line_pipeline}{line_passes}{line_time}\\n'\n log_format = fr'{line_heading}({entry_format})+{line_ending}'\n self.assertRegex(buffer.getvalue(), log_format)\n\n def test_func_names_filter(self):\n \"\"\" Verifies filtering log entries via func_names paramter \"\"\"\n searched_name = 'add'\n\n @debug_compile_time(func_names=[searched_name])\n @self.jit\n def test_impl(S1, S2):\n return S1 + S2\n\n buffer = StringIO()\n with redirect_stdout(buffer):\n S1, S2 = self._gen_usecase_data()\n test_impl(S1, S2)\n\n line_function = r'Function: ([^\\s]+)\\n'\n match_iter = re.finditer(line_function, buffer.getvalue())\n next(match_iter) # skip entry for top-level func\n for m in match_iter:\n self.assertIn(searched_name, m.group(1))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.arange" ] ]
luisgrivas/name-correction
[ "1de4a2146a8d8ae50d340a580e79720f2866653a" ]
[ "distance.py" ]
[ "import numpy as np\n\nletters = 'abcdefghijklmnopqrstuvwxyz'\n\n\ndef splits(string):\n return [(string[:i], string[i:]) for i in range(len(string) + 1)]\n\n\ndef transposes(string):\n all_splits = splits(string)\n return [L + R[1] + R[0] + R[2:] for L, R in all_splits if len(R) > 1]\n\n\ndef deletes(string):\n all_splits = splits(string)\n return [L + R[1:] for L, R in all_splits if R]\n\n\ndef replaces(string):\n all_splits = splits(string)\n return [L + c + R[1:] for L, R in all_splits if R for c in letters]\n\n\ndef inserts(string):\n all_splits = splits(string)\n return [L + c + R for L, R in all_splits for c in letters]\n\n\ndef lev(str1, str2, weigts=(1, 1, 1)):\n m = len(str1)\n n = len(str2)\n dist = np.zeros((m + 1, n + 1))\n\n for i in range(m + 1):\n dist[i][0] = i\n\n for j in range(n + 1):\n dist[0][j] = j\n\n for j in range(n):\n for i in range(m):\n if str1[i] == str2[j]:\n dist[i + 1][j + 1] = dist[i][j]\n else:\n dist[i + 1][j + 1] = min(dist[i][j + 1] + 1,\n dist[i + 1][j] + 1,\n dist[i][j] + 1)\n return dist[m][n]\n\n\ndef dam_lev(str1, str2, weights=(1, 1, 1)):\n m = len(str1)\n n = len(str2)\n dist = np.zeros((m + 1, n + 1))\n\n for i in range(m + 1):\n dist[i][0] = i\n\n for j in range(n + 1):\n dist[0][j] = j\n\n for j in range(n):\n for i in range(m):\n if str1[i] == str2[j]:\n dist[i + 1][j + 1] = dist[i][j]\n else:\n dist[i + 1][j + 1] = min(dist[i][j + 1] + 1,\n dist[i + 1][j] + 1,\n dist[i][j] + 1)\n if i > 0 and j > 0:\n if str1[i] == str2[j - 1] and str1[i - 1] == str2[j]:\n dist[i + 1][j + 1] = min(dist[i + 1][j + 1],\n dist[i - 1][j - 1] + 1)\n return dist[m][n]\n" ]
[ [ "numpy.zeros" ] ]
energeeks/ashrae-energy-prediction
[ "2b15a3749dd5810df228f99d7c23d62234117a1f" ]
[ "src/features/build_features.py" ]
[ "import os\n\nimport click\nimport math\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom meteocalc import feels_like\n\nfrom src.timer import timer\n\n\[email protected]()\[email protected]('data_dir', type=click.Path(exists=True))\[email protected]('output_filepath', type=click.Path())\ndef click_main(data_dir, output_filepath):\n main(data_dir, output_filepath)\n\n\ndef main(data_dir, output_filepath):\n \"\"\" Runs data feature engineering scripts to turn interim data from\n (../interim) into data which is ready for usage in ML models\n (saved in ../processed).\n :param data_dir: Directory that contains the data\n :param output_filepath: Directory where processed results will be saved in.\n \"\"\"\n with open(\"src/config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\n with timer(\"Loading interim data\"):\n train_df, test_df = load_interim_data(data_dir + \"/interim\")\n\n train_df, test_df = build_features(train_df, test_df, cfg=cfg)\n\n if cfg[\"exclude_faulty_rows\"]:\n with timer(\"Exclude faulty data and outliers\"):\n train_df = exclude_faulty_readings(train_df, data_dir + \"/external\")\n\n if cfg[\"add_leaks_to_train\"]:\n with timer(\"Adding Leak Label to training set\"):\n train_df = add_leaked_data(train_df, test_df)\n\n with timer(\"Sort training set\"):\n train_df.sort_values(\"timestamp\", inplace=True)\n train_df.reset_index(drop=True, inplace=True)\n\n with timer(\"Save processed data\"):\n save_processed_data(output_filepath, train_df, test_df)\n\n\ndef load_interim_data(input_filepath):\n \"\"\"\n Loads interim data which already is preserved as python object due to\n previous processing steps\n :param input_filepath: Directory that contains the interim data\n :return: Tuple containing training and test data\n \"\"\"\n train_df = pd.read_pickle(input_filepath + \"/train_data.pkl\")\n test_df = pd.read_pickle(input_filepath + \"/test_data.pkl\")\n return train_df, test_df\n\n\ndef build_features(*dfs, cfg):\n with timer(\"Encoding categorical features\"):\n dfs = [encode_categorical_data(df) for df in dfs]\n\n with timer(\"Encoding timestamp features\"):\n dfs = [encode_timestamp(df, circular=cfg[\"circular_timestamp_encoding\"]) for df in dfs]\n\n with timer(\"Create area per floor feature\"):\n dfs = [calculate_area_per_floor(df) for df in dfs]\n\n if cfg[\"log_transform_square_feet\"]:\n with timer(\"Taking the log of selected features\"):\n dfs = [calculate_square_feet_log(df) for df in dfs]\n\n if cfg[\"log_transform_area_per_floor\"]:\n with timer(\"Taking the log of area per floor\"):\n dfs = [calculate_area_per_floor_log(df) for df in dfs]\n\n if cfg[\"label_square_feet_outlier\"]:\n with timer(\"Create outlier label for square feet\"):\n dfs = [label_square_feet_outlier(df) for df in dfs]\n\n if cfg[\"label_area_per_floor_outlier\"]:\n with timer(\"Create outlier label for area per floor\"):\n dfs = [label_area_per_floor_outlier(df) for df in dfs]\n\n with timer(\"Calculating age of buildings\"):\n dfs = [calculate_age_of_building(df) for df in dfs]\n\n if cfg[\"encode_wind_direction\"]:\n with timer(\"Encoding wind_direction features\"):\n dfs = [encode_wind_direction(df) for df in dfs]\n\n with timer(\"Calculate relative humidity\"):\n dfs = [calculate_relative_humidity(df) for df in dfs]\n\n if cfg[\"include_feels_like\"]:\n with timer(\"Create feels_like_temp\"):\n dfs = [calculate_feels_like_temp(df) for df in dfs]\n\n if cfg[\"fill_na_with_zero\"]:\n dfs = [df.fillna(0) for df in dfs]\n\n if cfg[\"add_lag_features\"]:\n with timer(\"Adding Lag Features\"):\n dfs = [add_lag_features(df, cfg[\"lag_columns\"], cfg[\"lag_windows\"]) for df in dfs]\n\n return dfs\n\n\ndef encode_categorical_data(data_frame):\n \"\"\"\n Sets a fitting format for categorical data.\n \"\"\"\n primary_use_label = {\n 'Education': 0,\n 'Entertainment/public assembly': 1,\n 'Food sales and service': 2,\n 'Healthcare': 3,\n 'Lodging/residential': 4,\n 'Manufacturing/industrial': 5,\n 'Office': 6,\n 'Other': 7,\n 'Parking': 8,\n 'Public services': 9,\n 'Religious worship': 10,\n 'Retail': 11,\n 'Services': 12,\n 'Technology/science': 13,\n 'Utility': 14,\n 'Warehouse/storage': 15\n }\n data_frame[\"primary_use\"].replace(primary_use_label, inplace=True)\n\n columns = [\"site_id\", \"building_id\", \"meter\", \"primary_use\"]\n for column in columns:\n if column in data_frame.columns:\n data_frame[column] = pd.Categorical(data_frame[column])\n return data_frame\n\n\ndef encode_timestamp(data_frame, circular=False):\n \"\"\"\n Extracts time based features out of the timestamp column. In particular the\n time of the day, weekday and day of the year were being chosen. Due to the\n repetitive nature of time features a cyclic encoding can been chosen.\n \"\"\"\n timestamp = data_frame[\"timestamp\"]\n if circular:\n timestamp_seconds_of_day = (timestamp.dt.hour * 60 + timestamp.dt.minute) * 60 + timestamp.dt.second\n data_frame[\"timeofday_sin\"] = np.sin(2 * np.pi * timestamp_seconds_of_day / 86400)\n data_frame[\"timeofday_cos\"] = np.cos(2 * np.pi * timestamp_seconds_of_day / 86400)\n data_frame[\"dayofweek_sin\"] = np.sin(2 * np.pi * timestamp.dt.dayofweek / 7)\n data_frame[\"dayofweek_cos\"] = np.cos(2 * np.pi * timestamp.dt.dayofweek / 7)\n data_frame[\"dayofyear_sin\"] = np.sin(2 * np.pi * timestamp.dt.dayofyear / 366)\n data_frame[\"dayofyear_cos\"] = np.cos(2 * np.pi * timestamp.dt.dayofyear / 366)\n else:\n data_frame[\"hour\"] = pd.Categorical(timestamp.dt.hour)\n data_frame[\"weekday\"] = pd.Categorical(timestamp.dt.dayofweek)\n data_frame[\"month\"] = pd.Categorical(timestamp.dt.month)\n return data_frame\n\n\ndef calculate_area_per_floor(df):\n df[\"area_per_floor\"] = df[\"square_feet\"] / df[\"floor_count\"]\n return df\n\n\ndef calculate_square_feet_log(df):\n df[\"square_feet\"] = np.log(df[\"square_feet\"])\n return df\n\n\ndef calculate_area_per_floor_log(df):\n df[\"area_per_floor\"] = np.log(df[\"area_per_floor\"])\n return df\n\n\ndef label_square_feet_outlier(df):\n df[\"outlier_square_feet\"] = label_outlier(\"square_feet\", df)\n return df\n\n\ndef label_area_per_floor_outlier(df):\n df[\"outlier_area_per_floor\"] = label_outlier(\"area_per_floor\", df)\n return df\n\n\ndef label_outlier(variable, df):\n \"\"\"\n Flags outliers contained in the dataframe\n :param variable:\n :param df:\n :return: true for each outlier present\n \"\"\"\n var = df[variable]\n mn = np.mean(var)\n std = np.std(var)\n lower = mn - 2.5 * std\n upper = mn + 2.5 * std\n is_outlier = (var < lower) | (var > upper)\n return is_outlier\n\n\ndef calculate_age_of_building(data_frame):\n \"\"\"\n Transforms year_built feature in building_metadata.cvs into age.\n :param data_frame:\n :return: dataframe with transformed feature\n \"\"\"\n data_frame[\"year_built\"] = 2019 - data_frame[\"year_built\"]\n return data_frame\n\n\ndef add_lag_features(data_frame, cols, windows):\n for col in cols:\n for window in windows:\n data_frame[\"{}_{}_lag\".format(col, window)] = data_frame \\\n .groupby([\"building_id\", \"meter\"])[col] \\\n .rolling(window, center=False) \\\n .mean().reset_index(drop=True)\n return data_frame\n\n\ndef exclude_faulty_readings(data_frame, external_data_dir):\n \"\"\"\"\n Cleanses the provided data_frame from faulty readings and/or outlier data.\n Special thanks goes to https://www.kaggle.com/purist1024/ashrae-simple-data\n -cleanup-lb-1-08-no-leaks for providing a detailed guide and identification\n of the problematical rows.\n \"\"\"\n rows_to_drop = pd.read_csv(external_data_dir + \"/rows_to_drop.csv\")\n return data_frame.drop(index=rows_to_drop.iloc[:, 0])\n\n\ndef encode_wind_direction(data_frame):\n \"\"\"\n Encode the wind_direction using a cyclic encoding.\n If there is no wind_direction or the wind_speed is zero the points are encoded as the origin.\n \"\"\"\n data_frame[\"wind_direction_sin\"] = np.sin(2 * np.pi * data_frame[\"wind_direction\"] / 360)\n data_frame[\"wind_direction_cos\"] = np.cos(2 * np.pi * data_frame[\"wind_direction\"] / 360)\n data_frame.loc[data_frame[\"wind_direction\"].isna(), [\"wind_direction_sin\", \"wind_direction_cos\"]] = 0\n data_frame.loc[data_frame[\"wind_speed\"].isna(), [\"wind_direction_sin\", \"wind_direction_cos\"]] = 0\n data_frame.loc[data_frame[\"wind_speed\"] == 0, [\"wind_direction_sin\", \"wind_direction_cos\"]] = 0\n return data_frame\n\n\ndef calculate_relative_humidity(df):\n if \"relative_humidity\" in df.columns:\n return df\n subset = df[[\"air_temperature\", \"dew_temperature\"]].drop_duplicates()\n subset[\"relative_humidity\"] = subset.apply(\n lambda row: calculate_row_relative_humidity(row[\"air_temperature\"], row[\"dew_temperature\"]), axis=1)\n return df.merge(subset, on=[\"air_temperature\", \"dew_temperature\"])\n\n\ndef calculate_row_relative_humidity(air_temperature, dew_temperature):\n \"\"\"\n Computes the relative humidity from air temperature and dew point.\n :param air_temperature: the dry air temperature\n :param dew_temperature: the dew point\n :return: the relative humidity\n \"\"\"\n positive = {'b': 17.368, 'c': 238.88}\n negative = {'b': 17.966, 'c': 247.15}\n const = positive if air_temperature > 0 else negative\n pa = math.exp(dew_temperature * const['b'] / (const['c'] + dew_temperature))\n rel_humidity = pa * 100. * 1 / math.exp(const['b'] * air_temperature / (const['c'] + air_temperature))\n return rel_humidity\n\n\ndef calculate_feels_like_temp(df):\n \"\"\"\n Creates a feels-like temperature feature for the dataframe.\n :param df: weather data frame.\n :return: Dataframe with added feature\n \"\"\"\n subset = df[[\"air_temperature\", \"wind_speed\", \"relative_humidity\"]].drop_duplicates()\n subset[\"air_temp_f\"] = subset[\"air_temperature\"] * 9 / 5 + 32\n subset[\"feels_like_temp\"] = subset.apply(\n lambda row: calculate_row_feels_like_temp(row[\"air_temperature\"], row[\"wind_speed\"], row[\"relative_humidity\"]),\n axis=1)\n return df.merge(subset, on=[\"air_temperature\", \"wind_speed\", \"relative_humidity\"])\n\n\ndef calculate_row_feels_like_temp(air_temperature, wind_speed, relative_humidity):\n \"\"\"\n Computes feels like feature for an entry from the dataframe\n :param air_temperature: air temperature in celsius\n :param wind_speed: wind speed\n :param relative_humidity: relative humidity\n :return: feels like value for the entry\n \"\"\"\n air_temperature_fahrenheit = air_temperature * 9 / 5 + 32\n fl = feels_like(air_temperature_fahrenheit, wind_speed, relative_humidity)\n out = fl.c\n return out\n\n\ndef add_leaked_data(train_df, test_df):\n \"\"\"\n Adds the leaked data published in public notebooks in Kaggle's website\n :param train_df:\n :param test_df:\n :return: concatenated dataframe\n \"\"\"\n leaked_df = pd.read_feather(\"data/leak/leak.feather\")\n leaked_df.loc[leaked_df[\"meter_reading\"] < 0, \"meter_reading\"] = 0\n leaked_df = leaked_df[leaked_df[\"building_id\"] != 245]\n\n test_leak_df = test_df.copy(deep=True)\n test_leak_df = test_leak_df.merge(leaked_df, left_on=[\"building_id\", \"meter\", \"timestamp\"],\n right_on=[\"building_id\", \"meter\", \"timestamp\"], how=\"left\")\n test_leak_df.dropna(subset=[\"meter_reading\"], inplace=True)\n del test_leak_df[\"row_id\"]\n\n return pd.concat([train_df, test_leak_df], sort=False)\n\n\ndef save_processed_data(output_filepath, train_df, test_df):\n \"\"\"\n Saves the processed data\n \"\"\"\n os.makedirs(output_filepath, exist_ok=True)\n train_df.to_pickle(output_filepath + \"/train_data.pkl\")\n test_df.to_pickle(output_filepath + \"/test_data.pkl\")\n click.echo(\"Data successfully saved in folder: \" + output_filepath)\n\n\nif __name__ == '__main__':\n click_main()\n" ]
[ [ "pandas.read_pickle", "numpy.sin", "numpy.log", "numpy.mean", "numpy.std", "pandas.Categorical", "numpy.cos", "pandas.concat", "pandas.read_csv", "pandas.read_feather" ] ]
welgazil/DeDTW
[ "05d46c68122521dfe706736aaff24d6f99807e6e" ]
[ "deepspeech_pytorch/loader/spec_augment.py" ]
[ "# Copyright 2019 RnD at Spoon Radio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SpecAugment Implementation for Tensorflow.\nRelated paper : https://arxiv.org/pdf/1904.08779.pdf\nIn this paper, show summarized parameters by each open datasets in Tabel 1.\n-----------------------------------------\nPolicy | W | F | m_F | T | p | m_T\n-----------------------------------------\nNone | 0 | 0 | - | 0 | - | -\n-----------------------------------------\nLB | 80 | 27 | 1 | 100 | 1.0 | 1\n-----------------------------------------\nLD | 80 | 27 | 2 | 100 | 1.0 | 2\n-----------------------------------------\nSM | 40 | 15 | 2 | 70 | 0.2 | 2\n-----------------------------------------\nSS | 40 | 27 | 2 | 70 | 0.2 | 2\n-----------------------------------------\nLB : LibriSpeech basic\nLD : LibriSpeech double\nSM : Switchboard mild\nSS : Switchboard strong\n\"\"\"\n\nimport librosa\nimport librosa.display\nimport numpy as np\nimport random\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom .sparse_image_warp import sparse_image_warp\nimport torch\n\n\ndef time_warp(spec, W=5):\n num_rows = spec.shape[0]\n spec_len = spec.shape[1]\n\n y = num_rows // 2\n horizontal_line_at_ctr = spec[0][y]\n # assert len(horizontal_line_at_ctr) == spec_len\n\n point_to_warp = horizontal_line_at_ctr[random.randrange(W, spec_len - W)]\n # assert isinstance(point_to_warp, torch.Tensor)\n\n # Uniform distribution from (0,W) with chance to be up to W negative\n dist_to_warp = random.randrange(-W, W)\n src_pts = torch.tensor([[[y, point_to_warp]]])\n dest_pts = torch.tensor([[[y, point_to_warp + dist_to_warp]]])\n warped_spectro, dense_flows = sparse_image_warp(spec, src_pts, dest_pts)\n\n return warped_spectro.squeeze(3)\n\n\ndef spec_augment(\n mel_spectrogram,\n time_warping_para=40,\n frequency_masking_para=27,\n time_masking_para=70,\n frequency_mask_num=1,\n time_mask_num=1,\n):\n \"\"\"Spec augmentation Calculation Function.\n 'SpecAugment' have 3 steps for audio data augmentation.\n first step is time warping using Tensorflow's image_sparse_warp function.\n Second step is frequency masking, last step is time masking.\n # Arguments:\n mel_spectrogram(numpy array): audio file path of you want to warping and masking.\n time_warping_para(float): Augmentation parameter, \"time warp parameter W\".\n If none, default = 40.\n frequency_masking_para(float): Augmentation parameter, \"frequency mask parameter F\"\n If none, default = 27.\n time_masking_para(float): Augmentation parameter, \"time mask parameter T\"\n If none, default = 70.\n frequency_mask_num(float): number of frequency masking lines, \"m_F\".\n If none, default = 1.\n time_mask_num(float): number of time masking lines, \"m_T\".\n If none, default = 1.\n # Returns\n mel_spectrogram(numpy array): warped and masked mel spectrogram.\n \"\"\"\n mel_spectrogram = mel_spectrogram.unsqueeze(0)\n\n v = mel_spectrogram.shape[1]\n tau = mel_spectrogram.shape[2]\n\n # Step 1 : Time warping\n warped_mel_spectrogram = time_warp(mel_spectrogram)\n\n # Step 2 : Frequency masking\n for i in range(frequency_mask_num):\n f = np.random.uniform(low=0.0, high=frequency_masking_para)\n f = int(f)\n if v - f < 0:\n continue\n f0 = random.randint(0, v - f)\n warped_mel_spectrogram[:, f0 : f0 + f, :] = 0\n\n # Step 3 : Time masking\n for i in range(time_mask_num):\n t = np.random.uniform(low=0.0, high=time_masking_para)\n t = int(t)\n if tau - t < 0:\n continue\n t0 = random.randint(0, tau - t)\n warped_mel_spectrogram[:, :, t0 : t0 + t] = 0\n\n return warped_mel_spectrogram.squeeze()\n\n\ndef visualization_spectrogram(mel_spectrogram, title):\n \"\"\"visualizing result of SpecAugment\n # Arguments:\n mel_spectrogram(ndarray): mel_spectrogram to visualize.\n title(String): plot figure's title\n \"\"\"\n # Show mel-spectrogram using librosa's specshow.\n plt.figure(figsize=(10, 4))\n librosa.display.specshow(\n librosa.power_to_db(mel_spectrogram[0, :, :], ref=np.max),\n y_axis=\"mel\",\n fmax=8000,\n x_axis=\"time\",\n )\n # plt.colorbar(format='%+2.0f dB')\n plt.title(title)\n plt.tight_layout()\n plt.show()\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.random.uniform", "torch.tensor", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show" ] ]
tuannv0898/uwb-ips
[ "d790e75e6f5da916701b11a2fdf3e03b6a47086b" ]
[ "software/linux/gateway/mavlink-1.0.13/pymavlink/tools/mavgraph.py" ]
[ "#!/usr/bin/env python\n'''\ngraph a MAVLink log file\nAndrew Tridgell August 2011\n'''\nfrom __future__ import print_function\nfrom builtins import input\nfrom builtins import range\n\nimport datetime\nimport matplotlib\nimport os\nimport re\nimport sys\nimport time\nfrom math import *\n\ntry:\n from pymavlink.mavextra import *\nexcept:\n print(\"WARNING: Numpy missing, mathematical notation will not be supported.\")\n\nif sys.version_info[0] >= 3:\n text_types = frozenset([str,])\nelse:\n text_types = frozenset([unicode, str])\n\n# cope with rename of raw_input in python3\ntry:\n input = raw_input\nexcept NameError:\n pass\n\ncolourmap = {\n 'ardupilot' : {\n 'MANUAL' : (1.0, 0, 0),\n 'AUTO' : ( 0, 1.0, 0),\n 'LOITER' : ( 0, 0, 1.0),\n 'FBWA' : (1.0, 0.5, 0),\n 'RTL' : ( 1, 0, 0.5),\n 'STABILIZE' : (0.5, 1.0, 0),\n 'LAND' : ( 0, 1.0, 0.5),\n 'STEERING' : (0.5, 0, 1.0),\n 'HOLD' : ( 0, 0.5, 1.0),\n 'ALT_HOLD' : (1.0, 0.5, 0.5),\n 'CIRCLE' : (0.5, 1.0, 0.5),\n 'POSITION' : (1.0, 0.0, 1.0),\n 'GUIDED' : (0.5, 0.5, 1.0),\n 'ACRO' : (1.0, 1.0, 0),\n 'CRUISE' : ( 0, 1.0, 1.0)\n },\n 'px4' : {\n 'MANUAL' : (1.0, 0, 0),\n 'SEATBELT' : ( 0.5, 0.5, 0),\n 'EASY' : ( 0, 1.0, 0),\n 'AUTO' : ( 0, 0, 1.0),\n 'UNKNOWN' : ( 1.0, 1.0, 1.0)\n }\n }\ncolourmap[\"apm\"] = colourmap[\"ardupilot\"]\n\nedge_colour = (0.1, 0.1, 0.1)\n\nlowest_x = None\nhighest_x = None\n\ndef plotit(x, y, fields, colors=[]):\n '''plot a set of graphs using date for x axis'''\n global lowest_x, highest_x\n pylab.ion()\n fig = pylab.figure(num=1, figsize=(12,6))\n ax1 = fig.gca()\n ax2 = None\n xrange = 0.0\n for i in range(0, len(fields)):\n if len(x[i]) == 0: continue\n if lowest_x is None or x[i][0] < lowest_x:\n lowest_x = x[i][0]\n if highest_x is None or x[i][-1] > highest_x:\n highest_x = x[i][-1]\n if highest_x is None or lowest_x is None:\n return\n xrange = highest_x - lowest_x\n xrange *= 24 * 60 * 60\n formatter = matplotlib.dates.DateFormatter('%H:%M:%S')\n interval = 1\n intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,\n 900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]\n for interval in intervals:\n if xrange / interval < 15:\n break\n locator = matplotlib.dates.SecondLocator(interval=interval)\n if not args.xaxis:\n ax1.xaxis.set_major_locator(locator)\n ax1.xaxis.set_major_formatter(formatter)\n empty = True\n ax1_labels = []\n ax2_labels = []\n for i in range(0, len(fields)):\n if len(x[i]) == 0:\n print(\"Failed to find any values for field %s\" % fields[i])\n continue\n if i < len(colors):\n color = colors[i]\n else:\n color = 'red'\n (tz, tzdst) = time.tzname\n if axes[i] == 2:\n if ax2 is None:\n ax2 = ax1.twinx()\n ax = ax2\n if not args.xaxis:\n ax2.xaxis.set_major_locator(locator)\n ax2.xaxis.set_major_formatter(formatter)\n label = fields[i]\n if label.endswith(\":2\"):\n label = label[:-2]\n ax2_labels.append(label)\n else:\n ax1_labels.append(fields[i])\n ax = ax1\n if args.xaxis:\n if args.marker is not None:\n marker = args.marker\n else:\n marker = '+'\n if args.linestyle is not None:\n linestyle = args.linestyle\n else:\n linestyle = 'None'\n ax.plot(x[i], y[i], color=color, label=fields[i],\n linestyle=linestyle, marker=marker)\n else:\n if args.marker is not None:\n marker = args.marker\n else:\n marker = 'None'\n if args.linestyle is not None:\n linestyle = args.linestyle\n else:\n linestyle = '-'\n if len(y[i]) > 0 and type(y[i][0]) in text_types:\n # assume this is a piece of text to be rendered at a point in time\n last_text_time = -1\n last_text = None\n for n in range(0, len(x[i])):\n if last_text is None:\n last_text = \"[\" + y[i][n] + \"]\"\n last_text_time = x[i][n]\n elif x[i][n] == last_text_time:\n last_text += \"[\" + y[i][n] + \"]\"\n else:\n ax.text(x[i][n], 10, last_text,\n rotation=90,\n alpha=0.3,\n verticalalignment='baseline')\n last_text = None\n last_label_time = x[i][n]\n if last_text is not None:\n ax.text(x[i][n], 10, last_text,\n rotation=90,\n alpha=0.3,\n verticalalignment='baseline')\n else:\n ax.plot_date(x[i], y[i], color=color, label=fields[i],\n linestyle=linestyle, marker=marker, tz=None)\n empty = False\n if args.flightmode is not None:\n for i in range(len(modes)-1):\n c = colourmap[args.flightmode].get(modes[i][1], edge_colour)\n ax1.axvspan(modes[i][0], modes[i+1][0], fc=c, ec=edge_colour, alpha=0.1)\n c = colourmap[args.flightmode].get(modes[-1][1], edge_colour)\n ax1.axvspan(modes[-1][0], ax1.get_xlim()[1], fc=c, ec=edge_colour, alpha=0.1)\n if ax1_labels != []:\n ax1.legend(ax1_labels,loc=args.legend)\n if ax2_labels != []:\n ax2.legend(ax2_labels,loc=args.legend2)\n if empty:\n print(\"No data to graph\")\n return\n return fig\n\n\nfrom argparse import ArgumentParser\nparser = ArgumentParser(description=__doc__)\n\nparser.add_argument(\"--no-timestamps\", dest=\"notimestamps\", action='store_true', help=\"Log doesn't have timestamps\")\nparser.add_argument(\"--planner\", action='store_true', help=\"use planner file format\")\nparser.add_argument(\"--condition\", default=None, help=\"select packets by a condition\")\nparser.add_argument(\"--labels\", default=None, help=\"comma separated field labels\")\nparser.add_argument(\"--legend\", default='upper left', help=\"default legend position\")\nparser.add_argument(\"--legend2\", default='upper right', help=\"default legend2 position\")\nparser.add_argument(\"--marker\", default=None, help=\"point marker\")\nparser.add_argument(\"--linestyle\", default=None, help=\"line style\")\nparser.add_argument(\"--xaxis\", default=None, help=\"X axis expression\")\nparser.add_argument(\"--multi\", action='store_true', help=\"multiple files with same colours\")\nparser.add_argument(\"--zero-time-base\", action='store_true', help=\"use Z time base for DF logs\")\nparser.add_argument(\"--flightmode\", default=None,\n help=\"Choose the plot background according to the active flight mode of the specified type, e.g. --flightmode=apm for ArduPilot or --flightmode=px4 for PX4 stack logs. Cannot be specified with --xaxis.\")\nparser.add_argument(\"--dialect\", default=\"ardupilotmega\", help=\"MAVLink dialect\")\nparser.add_argument(\"--output\", default=None, help=\"provide an output format\")\nparser.add_argument(\"--timeshift\", type=float, default=0, help=\"shift time on first graph in seconds\")\nparser.add_argument(\"logs_fields\", metavar=\"<LOG or FIELD>\", nargs=\"+\")\nargs = parser.parse_args()\n\nfrom pymavlink import mavutil\n\nif args.flightmode is not None and args.xaxis:\n print(\"Cannot request flightmode backgrounds with an x-axis expression\")\n sys.exit(1)\n\nif args.flightmode is not None and args.flightmode not in colourmap:\n print(\"Unknown flight controller '%s' in specification of --flightmode (choose from %s)\" % (args.flightmode, \",\".join(colourmap.keys())))\n sys.exit(1)\n\n\nif args.output is not None:\n matplotlib.use('Agg')\n\nimport pylab\n\nfilenames = []\nfields = []\nfor f in args.logs_fields:\n if os.path.exists(f):\n filenames.append(f)\n else:\n fields.append(f)\nmsg_types = set()\nmultiplier = []\nfield_types = []\n\ncolors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow', 'brown', 'darkcyan', 'cornflowerblue', 'darkmagenta', 'deeppink', 'darkred']\n\n# work out msg types we are interested in\nx = []\ny = []\nmodes = []\naxes = []\nfirst_only = []\nre_caps = re.compile('[A-Z_][A-Z0-9_]+')\nfor f in fields:\n caps = set(re.findall(re_caps, f))\n msg_types = msg_types.union(caps)\n field_types.append(caps)\n y.append([])\n x.append([])\n axes.append(1)\n first_only.append(False)\n\ndef add_data(t, msg, vars, flightmode):\n '''add some data'''\n mtype = msg.get_type()\n if args.flightmode is not None and (len(modes) == 0 or modes[-1][1] != flightmode):\n modes.append((t, flightmode))\n if mtype not in msg_types:\n return\n for i in range(0, len(fields)):\n if mtype not in field_types[i]:\n continue\n f = fields[i]\n if f.endswith(\":2\"):\n axes[i] = 2\n f = f[:-2]\n if f.endswith(\":1\"):\n first_only[i] = True\n f = f[:-2]\n v = mavutil.evaluate_expression(f, vars)\n if v is None:\n continue\n if args.xaxis is None:\n xv = t\n else:\n xv = mavutil.evaluate_expression(args.xaxis, vars)\n if xv is None:\n continue\n y[i].append(v)\n x[i].append(xv)\n\ndef process_file(filename, timeshift):\n '''process one file'''\n print(\"Processing %s\" % filename)\n mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps, zero_time_base=args.zero_time_base, dialect=args.dialect)\n vars = {}\n\n while True:\n msg = mlog.recv_match(args.condition)\n if msg is None: break\n try:\n tdays = matplotlib.dates.date2num(datetime.datetime.fromtimestamp(msg._timestamp+timeshift))\n except ValueError:\n # this can happen if the log is corrupt\n # ValueError: year is out of range\n break\n add_data(tdays, msg, mlog.messages, mlog.flightmode)\n\nif len(filenames) == 0:\n print(\"No files to process\")\n sys.exit(1)\n\nif args.labels is not None:\n labels = args.labels.split(',')\n if len(labels) != len(fields)*len(filenames):\n print(\"Number of labels (%u) must match number of fields (%u)\" % (\n len(labels), len(fields)*len(filenames)))\n sys.exit(1)\nelse:\n labels = None\n\ntimeshift = args.timeshift\n\nfor fi in range(0, len(filenames)):\n f = filenames[fi]\n process_file(f, timeshift)\n timeshift = 0\n for i in range(0, len(x)):\n if first_only[i] and fi != 0:\n x[i] = []\n y[i] = []\n if labels:\n lab = labels[fi*len(fields):(fi+1)*len(fields)]\n else:\n lab = fields[:]\n if args.multi:\n col = colors[:]\n else:\n col = colors[fi*len(fields):]\n fig = plotit(x, y, lab, colors=col)\n for i in range(0, len(x)):\n x[i] = []\n y[i] = []\nif args.output is None:\n pylab.show()\n pylab.draw()\n input('press enter to exit....')\nelse:\n fname, fext = os.path.splitext(args.output)\n if fext == '.html':\n import mpld3\n html = mpld3.fig_to_html(fig)\n f_out = open(args.output, 'w')\n f_out.write(html)\n f_out.close()\n else:\n pylab.legend(loc=2,prop={'size':8})\n pylab.savefig(args.output, bbox_inches='tight', dpi=200)\n" ]
[ [ "matplotlib.use", "matplotlib.dates.DateFormatter", "matplotlib.dates.SecondLocator" ] ]
mramospe/minkit
[ "fa6808a6ca8063751da92f683f2b810a0690a462" ]
[ "performance/timing/roofit_script.py" ]
[ "#!/usr/bin/env python\n########################################\n# MIT License\n#\n# Copyright (c) 2020 Miguel Ramos Pernas\n########################################\n'''\nGenerate a sample using the RooFit package.\n'''\nimport time\nimport argparse\nimport roofit_models\nimport multiprocessing\nimport numpy as np\nimport ROOT as rt\nrt.PyConfig.IgnoreCommandLineOptions = True\n\n\ndef fit(pdf, nevts, repetitions, m, pars, ncpu):\n '''\n Generate data following the given model and fit it.\n '''\n times = np.empty(repetitions, dtype=np.float64)\n initials = {p.GetName(): np.random.uniform(p.getMin(), p.getMax())\n for p in pars}\n for i in range(len(times)):\n data = pdf.generate(rt.RooArgSet(m), nevts, rt.RooFit.NumCPU(ncpu))\n start = time.time()\n pdf.fitTo(data, rt.RooFit.Save(), rt.RooFit.NumCPU(ncpu))\n end = time.time()\n times[i] = end - start\n for p in pars:\n p.setVal(initials[p.GetName()])\n return times\n\n\ndef generate(pdf, nevts, repetitions, m, ncpu):\n '''\n Generate data following the given model.\n '''\n times = np.empty(repetitions, dtype=np.float64)\n for i in range(len(times)):\n start = time.time()\n pdf.generate(rt.RooArgSet(m), nevts, rt.RooFit.NumCPU(ncpu))\n end = time.time()\n times[i] = end - start\n return times\n\n\ndef main(jobtype, model, nevts, repetitions, outputfile, ncpu):\n\n pdf, pars, extra = getattr(roofit_models, model)()\n\n m = pars[0]\n\n if jobtype == 'generate':\n times = generate(pdf, nevts, repetitions, m, ncpu)\n elif jobtype == 'fit':\n times = fit(pdf, nevts, repetitions, m, pars[1:], ncpu)\n else:\n raise ValueError(f'Unknown job type {jobtype}')\n\n with open(outputfile, 'at') as f:\n f.write(f'{times.mean()} {times.std()}\\n')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('jobtype', type=str, choices=('generate', 'fit'),\n default='fit',\n help='Type of job to execute')\n parser.add_argument('model', type=str, choices=('basic', 'intermediate', 'numeric'),\n default='basic',\n help='Model to use')\n parser.add_argument('nevts', type=int, default=100000,\n help='Number of events to generate')\n parser.add_argument('repetitions', type=int, default=10,\n help='Number of repetitions')\n parser.add_argument('outputfile', type=str,\n help='Where to save the result')\n parser.add_argument('--ncpu', type=int, default=1, choices=tuple(range(multiprocessing.cpu_count())),\n help='Number of threads to run')\n args = parser.parse_args()\n main(**vars(args))\n" ]
[ [ "numpy.empty" ] ]
catycaldwell/CNTK
[ "86a20080b19255b96ada85b6a7ab6b8e7be7465b" ]
[ "bindings/python/setup.py" ]
[ "import sys\nimport os\nimport shutil\nfrom glob import glob\nimport platform\nfrom warnings import warn\nfrom setuptools import setup, Extension, find_packages\nimport numpy\n\nIS_WINDOWS = platform.system() == 'Windows'\n\n# TODO should handle swig path specified via build_ext --swig-path\nif os.system('swig -version 1>%s 2>%s' % (os.devnull, os.devnull)) != 0:\n print(\"Please install swig (>= 3.0.10) and include it in your path.\\n\")\n sys.exit(1)\n\nif IS_WINDOWS:\n if os.system('cl 1>%s 2>%s' % (os.devnull, os.devnull)) != 0:\n print(\"Compiler was not found in path.\\n\"\n \"Make sure you installed the C++ tools during Visual Studio 2015 install and \\n\"\n \"run vcvarsall.bat from a DOS command prompt:\\n\"\n \" \\\"C:\\\\Program Files (x86)\\\\Microsoft Visual Studio 14.0\\\\VC\\\\vcvarsall\\\" amd64\\n\")\n sys.exit(1)\n\n try:\n assert(os.environ[\"MSSdk\"] == \"1\");\n assert(os.environ[\"DISTUTILS_USE_SDK\"] == \"1\");\n except (KeyError, AssertionError) as e:\n print(\"Please set the environment variables MSSdk and DISTUTILS_USE_SDK to 1:\\n\"\n \" set MSSdk=1\\n\"\n \" set DISTUTILS_USE_SDK=1\\n\")\n sys.exit(1)\n\nCNTK_PATH = os.path.join(os.path.dirname(__file__), \"..\", \"..\")\nCNTK_SOURCE_PATH = os.path.join(CNTK_PATH, \"Source\")\nPROJ_LIB_PATH = os.path.join(os.path.dirname(__file__), \"cntk\", \"libs\")\n\nif 'CNTK_LIB_PATH' in os.environ:\n CNTK_LIB_PATH = os.environ['CNTK_LIB_PATH']\nelse:\n # Assumes GPU SKU is being built\n if IS_WINDOWS:\n CNTK_LIB_PATH = os.path.join(CNTK_PATH, \"x64\", \"Release\")\n else:\n CNTK_LIB_PATH = os.path.join(\n CNTK_PATH, \"build\", \"gpu\", \"release\", \"lib\")\n\nprint(\"Using CNTK sources at '%s'\" % os.path.abspath(CNTK_SOURCE_PATH))\nprint(\"Using CNTK libs at '%s'\" % os.path.abspath(CNTK_LIB_PATH))\n\n\ndef lib_path(fn):\n return os.path.normpath(os.path.join(CNTK_LIB_PATH, fn))\n\n\ndef proj_lib_path(fn):\n return os.path.normpath(os.path.join(PROJ_LIB_PATH, fn))\n\n\ndef strip_path(fn):\n return os.path.split(fn)[1]\n\n\ndef strip_ext(fn):\n return os.path.splitext(fn)[0]\n\nif IS_WINDOWS:\n libname_rt_ext = '.dll'\n\n link_libs = [\"CNTKLibrary-2.0\"]\nelse:\n link_libs = [\"cntklibrary-2.0\"]\n libname_rt_ext = '.so'\n\n\nrt_libs = [strip_path(fn) for fn in glob(os.path.join(CNTK_LIB_PATH,\n '*' + libname_rt_ext))]\n\n# copy over the libraries to the cntk base directory so that the rpath is\n# correctly set\nif os.path.exists(PROJ_LIB_PATH):\n shutil.rmtree(PROJ_LIB_PATH)\n\nos.mkdir(PROJ_LIB_PATH)\n\nfor fn in rt_libs:\n src_file = lib_path(fn)\n tgt_file = proj_lib_path(fn)\n shutil.copy(src_file, tgt_file)\n\nif 'CNTK_EXTRA_LIBRARIES' in os.environ:\n for lib in os.environ['CNTK_EXTRA_LIBRARIES'].split():\n shutil.copy(lib, PROJ_LIB_PATH)\n rt_libs.append(strip_path(lib))\n\n# For package_data we need to have names relative to the cntk module.\nrt_libs = [os.path.join('libs', fn) for fn in rt_libs]\n\nextra_compile_args = [\n \"-DSWIG\",\n \"-DUNICODE\"\n]\n\nif IS_WINDOWS:\n extra_compile_args += [\n \"/EHsc\",\n \"/DEBUG\",\n \"/Zi\",\n \"/WX\"\n ]\n extra_link_args = ['/DEBUG']\n runtime_library_dirs = []\nelse:\n extra_compile_args += [\n '--std=c++11',\n ]\n extra_link_args = []\n\n # Expecting the dependent libs (libcntklibrary-2.0.so, etc.) inside\n # site-packages/cntk/libs.\n runtime_library_dirs = ['$ORIGIN/cntk/libs']\n os.environ[\"CXX\"] = \"mpic++\"\n\ncntkV2LibraryInclude = os.path.join(CNTK_SOURCE_PATH, \"CNTKv2LibraryDll\", \"API\")\ncntkBindingCommon = os.path.join(CNTK_PATH, \"bindings\", \"common\")\n\ncntk_module = Extension(\n name=\"_cntk_py\",\n\n sources = [os.path.join(\"cntk\", \"cntk_py.i\")],\n swig_opts = [\"-c++\", \"-D_MSC_VER\", \"-I\" + cntkV2LibraryInclude, \"-I\" + cntkBindingCommon, \"-Werror\" ],\n libraries = link_libs,\n library_dirs = [CNTK_LIB_PATH],\n\n runtime_library_dirs = runtime_library_dirs,\n\n include_dirs = [\n cntkV2LibraryInclude,\n os.path.join(CNTK_SOURCE_PATH, \"Math\"),\n os.path.join(CNTK_SOURCE_PATH, \"Common\", \"Include\"),\n numpy.get_include(),\n ],\n\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n language=\"c++\",\n)\n\n# Do not include examples\npackages = [x for x in find_packages() if x.startswith('cntk') and not x.startswith('cntk.swig')]\n\npackage_data = { 'cntk': ['pytest.ini', 'io/tests/tf_data.txt'] }\n\nif IS_WINDOWS:\n # On Windows copy all runtime libs to the base folder of Python\n kwargs = dict(data_files = [('.', [ os.path.join('cntk', lib) for lib in rt_libs ])],\n package_data = package_data)\nelse:\n # On Linux copy all runtime libs into the cntk/lib folder. \n package_data['cntk'] += rt_libs\n kwargs = dict(package_data = package_data)\n\nsetup(name=\"cntk\",\n version=\"2.0.beta10.0\",\n url=\"http://cntk.ai\",\n ext_modules=[cntk_module],\n packages=packages,\n # install_requires=[\n # 'numpy>=1.11',\n # 'scipy>=0.17'\n #],\n **kwargs)\n" ]
[ [ "numpy.get_include" ] ]
eljost/QCElemental
[ "c6de3a330f686f5dad0a7f4cb0fcf2bf55a549e2" ]
[ "qcelemental/util/test_scipy_hungarian.py" ]
[ "# [Apr 2019] stolen directly from scipy so I can test getting an array back\n# https://github.com/scipy/scipy/blob/master/scipy/optimize/tests/test_hungarian.py\n# * change imports to local\n# * skip importing and testing `matrix`\n# * add testing of reduced cost for non-T (reduced matrix for T is different)\n\n# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck\n# License: BSD\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom pytest import raises as assert_raises\n\nfrom qcelemental.util.scipy_hungarian import linear_sum_assignment\n\n\ndef test_linear_sum_assignment():\n for cost_matrix, expected_cost, expected_reduced_cost_matrix in [\n # Square\n ([[400, 150, 400],\n [400, 450, 600],\n [300, 225, 300]],\n [150, 400, 300],\n [[250, 0, 175],\n [ 0, 50, 125],\n [ 75, 0, 0]]\n ),\n\n # Rectangular variant\n ([[400, 150, 400, 1],\n [400, 450, 600, 2],\n [300, 225, 300, 3]],\n [150, 2, 300],\n [[102, 0, 102, 0],\n [101, 299, 301, 0],\n [ 0, 73, 0, 0]]\n ),\n\n # Square\n ([[10, 10, 8],\n [9, 8, 1],\n [9, 7, 4]],\n [10, 1, 7],\n [[0, 0, 1],\n [5, 4, 0],\n [2, 0, 0]]\n ),\n\n # Rectangular variant\n ([[10, 10, 8, 11],\n [9, 8, 1, 1],\n [9, 7, 4, 10]],\n [10, 1, 4],\n [[0, 0, 0, 3],\n [6, 5, 0, 0],\n [3, 1, 0, 6]]\n ),\n\n # n == 2, m == 0 matrix\n ([[], []],\n [],\n [[], []]),\n ]: # yapf: disable\n cost_matrix = np.array(cost_matrix)\n (row_ind, col_ind), reduced_cost_matrix = linear_sum_assignment(cost_matrix, return_cost=True)\n assert_array_equal(row_ind, np.sort(row_ind))\n assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])\n assert_array_equal(expected_reduced_cost_matrix, reduced_cost_matrix)\n\n cost_matrix = cost_matrix.T\n row_ind, col_ind = linear_sum_assignment(cost_matrix)\n assert_array_equal(row_ind, np.sort(row_ind))\n assert_array_equal(np.sort(expected_cost), np.sort(cost_matrix[row_ind, col_ind]))\n\n\ndef test_linear_sum_assignment_input_validation():\n assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])\n\n C = [[1, 2, 3], [4, 5, 6]]\n assert_array_equal(linear_sum_assignment(C), linear_sum_assignment(np.asarray(C)))\n # assert_array_equal(linear_sum_assignment(C),\n # linear_sum_assignment(matrix(C)))\n\n I = np.identity(3)\n assert_array_equal(linear_sum_assignment(I.astype(np.bool)), linear_sum_assignment(I))\n assert_raises(ValueError, linear_sum_assignment, I.astype(str))\n\n I[0][0] = np.nan\n assert_raises(ValueError, linear_sum_assignment, I)\n\n I = np.identity(3)\n I[1][1] = np.inf\n assert_raises(ValueError, linear_sum_assignment, I)\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.testing.assert_array_equal", "numpy.identity", "numpy.sort" ] ]
JohnHuCC/mlintro
[ "6b5ca7ea7f8658d14ac0fa50c9d1514cfa2bdb6c" ]
[ "Neural_Network_classifier.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport os\nfrom sklearn import datasets\nfrom tensorflow.contrib.learn.python import SKCompat\nfrom pandas import DataFrame\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n# add layer\ndef add_hidden_layer(inputs, in_size, out_size, activation_function=None):\n weights = tf.Variable(tf.random.normal([in_size, out_size]))\n biases = tf.Variable(tf.zeros([1,out_size])+0.1)\n Wx_plus_biases = tf.matmul(inputs,weights) + biases\n if activation_function is None:\n outputs = Wx_plus_biases\n else:\n outputs = activation_function(Wx_plus_biases)\n return outputs\n\n#Neurql network with no hidden layer\ndef NN_no_hidden_layer(x_data, y_data):\n weights = tf.Variable(1.0, name=\"w\")\n noise = np.random.normal(0,0.05, x_data.shape)\n y = weights*x_data**3+weights*x_data**2-weights*x_data-1+noise\n loss = tf.reduce_mean(tf.reduce_sum(tf.square(y - y_data), reduction_indices=1))\n optimizer = tf.train.AdamOptimizer(0.01)\n train = optimizer.minimize(loss)\n init = tf.initialize_all_variables()\n sess = tf.Session()\n sess.run(init)\n for step in range(5000):\n sess.run(train)\n prediction_value = sess.run(weights) * x_data**3 + sess.run(weights)*x_data**2 - sess.run(weights)*x_data-1+noise\n # if step % 100 == 0:\n # print(\"loss: \", sess.run(loss))\n return prediction_value\n\n#Neurql network with one hidden layer\ndef NN_one_hidden_layer(x_data, y_data, num_of_nerve):\n xs = tf.placeholder(tf.float32, [None, 1])\n ys = tf.placeholder(tf.float32, [None, 1])\n h1 = add_hidden_layer(xs, 1, num_of_nerve, activation_function=tf.nn.relu)\n prediction = add_hidden_layer(h1, num_of_nerve, 1, activation_function=None)\n loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=1))\n optimizer = tf.train.AdamOptimizer(0.01)\n train = optimizer.minimize(loss)\n init = tf.initialize_all_variables()\n sess = tf.Session()\n sess.run(init)\n for step in range(5000):\n sess.run(train, feed_dict={xs: x_data, ys: y_data})\n prediction_value = sess.run(prediction, feed_dict={xs: x_data})\n return prediction_value\n\n#Neurql network with two hidden layer\ndef NN_two_hidden_layer(x_data, y_data, num_of_nerve):\n xs = tf.placeholder(tf.float32, [None, 1])\n ys = tf.placeholder(tf.float32, [None, 1])\n h1 = add_hidden_layer(xs, 1, num_of_nerve, activation_function=tf.nn.relu)\n h2 = add_hidden_layer(h1, num_of_nerve, num_of_nerve, activation_function=tf.nn.relu)\n prediction = add_hidden_layer(h2, num_of_nerve, 1, activation_function=None)\n\n # create tensorflow structure\n loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=1))\n optimizer = tf.train.AdamOptimizer(0.01)\n train = optimizer.minimize(loss)\n init = tf.initialize_all_variables()\n sess = tf.Session()\n sess.run(init)\n for step in range(5000):\n sess.run(train, feed_dict={xs: x_data, ys: y_data})\n prediction_value = sess.run(prediction, feed_dict={xs: x_data})\n # if step%100 == 0:\n # print(\"loss: \", sess.run(loss, feed_dict={xs: x_data, ys: y_data}))\n return prediction_value\n\ndef make_circke_data():\n circles_data, circles_data_labels = datasets.make_circles(n_samples=500, factor=0.1, noise=0.1)\n return circles_data,circles_data_labels\n\ndef classify_circle_data(circles_data, circles_data_labels):\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=2)]\n classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=2)\n DNN_classifier = classifier.fit(circles_data, circles_data_labels, steps=2000)\n return DNN_classifier\n\n#classify's accuracy score of DNNClassifier\ndef classify_accuracy_score():\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=2)]\n classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=2)\n accuracy_score = classifier.evaluate(circles_data, circles_data_labels, steps=1)[\"accuracy\"]\n return accuracy_score\n\ndef main():\n # part1\n # 利用tensorflow\n # 撰寫一個多層的神經網路去模擬一個函數產生器。請比較採用不同層級、不同神經元個數所達到的模擬效\n # 果。並請將實際及模擬的結果顯示在圖形上。\n # create data\n x_data = np.linspace(-1, 1, 300)[:, np.newaxis]\n noise = np.random.normal(0, 0.05, x_data.shape)\n y_data = x_data ** 3 + x_data ** 2 - x_data - 1\n y_data_noise = y_data + noise\n\n # print('prediction_value:' + str(prediction_value))\n # print(prediction_value.shape)\n # print('x_data:' + str(x_data))\n # print('y_data:' + str(y_data))\n # print('x_data.shape:' + str(x_data.shape))\n # print('y_data.shape:' + str(y_data.shape))\n\n #plot scatter\n fig = plt.figure(figsize=(10, 8))\n bx = fig.add_subplot(1, 1, 1)\n plt.xlabel('x_data')\n plt.ylabel('y_data')\n prediction_no_hidden = NN_no_hidden_layer(x_data, y_data_noise)\n prediction_one_hidden = NN_one_hidden_layer(x_data, y_data_noise, 20)\n prediction_two_hidden = NN_two_hidden_layer(x_data, y_data_noise, 20)\n prediction_one_hidden_less_nerve = NN_one_hidden_layer(x_data, y_data_noise, 5)\n prediction_two_hidden_less_nerve = NN_two_hidden_layer(x_data, y_data_noise, 5)\n bx.scatter(x_data, y_data, color='black') #原資料的折線圖\n bx.scatter(x_data, prediction_no_hidden, color='grey') #沒有隱藏層的圖用灰色線表示\n bx.scatter(x_data, prediction_one_hidden, color='darkgreen') #一個隱藏層用深綠線表示\n bx.scatter(x_data, prediction_two_hidden, color='darkred') #兩個隱藏層用深紅線表示\n bx.scatter(x_data, prediction_one_hidden_less_nerve, color='lightgreen') #一個隱藏層較少神經元用萊姆綠表示\n bx.scatter(x_data, prediction_two_hidden_less_nerve, color='pink') #兩個隱藏層較少神經元用粉紅表示\n plt.show()\n\n # part2\n # 請利用tensorflow撰寫一個神經網路針對產生的資料集做分類。\n # 將此一神經網路所判斷不同類別的區域分別塗上不同顏色。並且將資料集的資料也標示於圖上\n circles_data, circles_data_labels = make_circke_data()\n df = DataFrame(dict(x=circles_data[:, 0], y=circles_data[:, 1], label=circles_data_labels))\n DNN_classifier = classify_circle_data(circles_data, circles_data_labels)\n classifier_result = list(DNN_classifier.predict(circles_data))\n x_min, x_max = circles_data[circles_data_labels == 0, 0].min() - 0.2, circles_data[circles_data_labels == 0, 0].max() + 0.2\n y_min, y_max = circles_data[circles_data_labels == 0, 1].min() - 0.2, circles_data[circles_data_labels == 0, 1].max() + 0.2\n xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.005), np.arange(y_min, y_max, 0.005))\n Z = list(DNN_classifier.predict(np.c_[xx.ravel(), yy.ravel()]))\n Z=np.array(Z)\n\n X0,X1 = circles_data[circles_data_labels == 0],circles_data[circles_data_labels == 1]\n tp = (circles_data_labels == classifier_result)\n tp0, tp1 = tp[circles_data_labels == 0],tp[circles_data_labels == 1]\n X0_tp, X0_fp = X0[tp0], X0[~tp0]\n X1_tp, X1_fp = X1[tp1], X1[~tp1]\n cm_bright=ListedColormap(['pink','#0D8ECF'])\n Z=Z.reshape(xx.shape)\n plt.pcolormesh(xx,yy,Z,cmap=cm_bright)\n # class 0: dots\n plt.scatter(X0_tp[:, 0], X0_tp[:, 1], marker='.', color='red')\n plt.scatter(X0_fp[:, 0], X0_fp[:, 1], marker='x',\n s=20, color='#FFF3EE') # dark red\n\n # class 1: dots\n plt.scatter(X1_tp[:, 0], X1_tp[:, 1], marker='.', color='blue')\n plt.scatter(X1_fp[:, 0], X1_fp[:, 1], marker='x',\n s=20, color='#842B00') # dark blue\n plt.show()\nmain()\n\n#pytorch\n# circles_data, circles_data_labels = datasets.make_circles(n_samples=50, factor=0.5, noise=0.1)\n# x0,x1 = torch.from_numpy(circles_data[circles_data_labels == 0]),torch.from_numpy(circles_data[circles_data_labels == 1])\n# y0 = torch.from_numpy(circles_data_labels[circles_data_labels==0])\n# y1 = torch.from_numpy(circles_data_labels[circles_data_labels==1])\n# x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating\n# y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer\n#\n# class Net(torch.nn.Module):\n# def __init__(self, n_feature, n_hidden, n_output):\n# super(Net, self).__init__()\n# self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer\n# self.out = torch.nn.Linear(n_hidden, n_output) # output layer\n#\n# def forward(self, x):\n# x = F.relu(self.hidden(x)) # activation function for hidden layer\n# x = self.out(x)\n# return x\n#\n# net = Net(2,10,2)\n#\n# optimizer = torch.optim.SGD(net.parameters(), lr=0.02)\n# loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted\n#\n# for t in range(10000):\n# out = net(x) # input x and predict based on x\n# loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted\n#\n# optimizer.zero_grad() # clear gradients for next train\n# loss.backward() # backpropagation, compute gradients\n# optimizer.step() # apply gradients\n # if t % 2 == 0:\n # plot and show learning process\n\n# clf = LDA()\n# clf.fit(circles_data, circles_data_labels)\n#\n# x_min, x_max = circles_data[circles_data_labels==0, 0].min()-0.2, circles_data[circles_data_labels==0, 0].max()+0.2\n# y_min, y_max = circles_data[circles_data_labels==0, 1].min()-0.2, circles_data[circles_data_labels==0, 1].max()+0.2\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01), np.arange(y_min, y_max, 0.01))\n# print(xx,yy)\n# cm_bright = ListedColormap(['#D9E021', '#0D8ECF'])\n#\n#\n# plt.figure(figsize=(8,4))\n# plt.cla()\n# prediction = torch.max(out, 1)[1]\n# #Z = prediction.data.numpy()\n# #Z = Z.reshape(xx.shape)\n#\n# target_y = y.data.numpy()\n# #plt.pcolormesh(xx, yy, pred_y, cmap=cm_bright, alpha=0.6)\n# plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn',marker='.')\n# accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)\n# plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})\n# plt.pause(0.1)\n# #print(pred_y)\n# plt.show()\n" ]
[ [ "tensorflow.matmul", "numpy.random.normal", "tensorflow.contrib.learn.DNNClassifier", "tensorflow.Variable", "numpy.arange", "sklearn.datasets.make_circles", "numpy.array", "matplotlib.pyplot.pcolormesh", "tensorflow.train.AdamOptimizer", "tensorflow.initialize_all_variables", "tensorflow.zeros", "tensorflow.Session", "matplotlib.pyplot.figure", "tensorflow.placeholder", "matplotlib.pyplot.show", "tensorflow.random.normal", "matplotlib.pyplot.xlabel", "numpy.linspace", "tensorflow.contrib.layers.real_valued_column", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.colors.ListedColormap", "tensorflow.square" ] ]
huyalvchuan/CenterNet
[ "66ff00b618919f04b4f30ce9d37b7b11727b8c1d" ]
[ "src/lib/models/utils.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\n\ndef _sigmoid(x):\n y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)\n return y\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\ndef _tranpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n\ndef flip_tensor(x):\n return torch.flip(x, [3])\n # tmp = x.detach().cpu().numpy()[..., ::-1].copy()\n # return torch.from_numpy(tmp).to(x.device)\n\ndef flip_lr(x, flip_idx):\n tmp = x.detach().cpu().numpy()[..., ::-1].copy()\n shape = tmp.shape\n for e in flip_idx:\n tmp[:, e[0], ...], tmp[:, e[1], ...] = \\\n tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()\n return torch.from_numpy(tmp.reshape(shape)).to(x.device)\n\ndef flip_lr_off(x, flip_idx):\n tmp = x.detach().cpu().numpy()[..., ::-1].copy()\n shape = tmp.shape\n # 由于我们只有四个点17->4\n tmp = tmp.reshape(tmp.shape[0], 4, 2, \n tmp.shape[2], tmp.shape[3])\n tmp[:, :, 0, :, :] *= -1\n for e in flip_idx:\n tmp[:, e[0], ...], tmp[:, e[1], ...] = \\\n tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()\n return torch.from_numpy(tmp.reshape(shape)).to(x.device)" ]
[ [ "torch.flip" ] ]
Alexanders101/SPANet
[ "20731bb271b23f0746243e79203ff6b77556c852" ]
[ "spanet/network/symmetric_attention/symmetric_attention_base.py" ]
[ "from typing import List, Tuple\n\nimport torch\nfrom torch import nn\n\nfrom spanet.options import Options\nfrom spanet.network.utilities.group_theory import complete_indices, symmetry_group\n\n\n# noinspection SpellCheckingInspection\nclass SymmetricAttentionBase(nn.Module):\n WEIGHTS_INDEX_NAMES = \"ijklmn\"\n INPUT_INDEX_NAMES = \"xyzwuv\"\n DEFAULT_JET_COUNT = 16\n\n def __init__(self,\n options: Options,\n order: int,\n transformer_options: Tuple[int, int, int, float, str] = None,\n permutation_indices: List[Tuple[int, ...]] = None,\n attention_dim: int = None) -> None:\n super(SymmetricAttentionBase, self).__init__()\n\n self.attention_dim = attention_dim\n if attention_dim is None:\n self.attention_dim = options.hidden_dim\n\n self.permutation_indices = [] if permutation_indices is None else permutation_indices\n self.batch_size = options.batch_size\n self.features = options.hidden_dim\n self.order = order\n\n # Add any missing cycles to have a complete group\n self.permutation_indices = complete_indices(self.order, self.permutation_indices)\n self.permutation_group = symmetry_group(self.permutation_indices)\n self.no_identity_permutations = [p for p in self.permutation_group if sorted(p) != p]\n self.batch_no_identity_permutations = [(0,) + tuple(e + 1 for e in p) for p in self.no_identity_permutations]\n\n self.weights_scale = torch.sqrt(torch.scalar_tensor(self.features)) ** self.order\n" ]
[ [ "torch.scalar_tensor" ] ]
Milos9304/GasStationPrediction
[ "12d294d743e37cfdeb82553e4299a43ba76848f9" ]
[ "main.py" ]
[ "import pandas as pd\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import classification_report, confusion_matrix, mean_squared_error\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\n\nfrom perceptron import Perceptron\n\nSEED=2019\n\ndataset = pd.read_csv(\"GasStationsDataset.csv\")\nprint(\"Dataset entries: \" + str(dataset.shape[0]))\n\n#stations < 2500, parkings < 2500, traffic < 1500 \nX = dataset.iloc[: ,[6, 11, 14]]\n#4sq checking < 50\ny = dataset.iloc[:, 17]\n\nfig, ax = plt.subplots()\n\nplt.figure(figsize=(15, 10))\nsns.heatmap(dataset.corr(), annot=True)\n#plt.show()\n\nscaler = MinMaxScaler(feature_range=(0, 1))\nX = scaler.fit_transform(X)\nscores = []\n\ncolors = ['r', 'g', 'b']\n\nfig, ax = plt.subplots()\nfor i in range(3):\n ax.scatter(X[:,i], y, color=colors[i])\n#plt.show()\n\nscores=[]\n\ncv = KFold(n_splits=5, random_state=SEED, shuffle=True)\nfor train_index, test_index in cv.split(X):\n X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]\n p=Perceptron(3)\n epochs = 0\n\n # Instantiate mse for the loop\n mse =999\n\n while (epochs < 1000):\n\n # Epoch cumulative error\n error = 0\n\n # For each set in the training_data\n for i in range(len(X_train)):\n\n # Calculate the result\n output = p.result(X_train[i,:])\n\n # Calculate the error\n iter_error = abs(y_train.as_matrix()[i] - output)\n # Add the error to the epoch error\n error += iter_error\n\n # Adjust the weights based on inputs and the error\n p.weight_adjustment(X_train[i,:], iter_error)\n #print(str(output)+\"c\")\n #print(p.result(X_train[i,:]))\n\n # Calculate the MSE - epoch error / number of sets\n mse = float(error/len(X_train))\n\n # Print the MSE for each epoch\n #print(\"The MSE of %d epochs is %.10f\" + str(epochs) + \" \" + str(mse))\n\n # Every 100 epochs show the weight values\n #if epochs % 100 == 0:\n # print(\"0: %.10f - 1: %.10f - 2: %.10f - 3: %.10f\" % (p.w[0], p.w[1], p.w[2], p.w[3]))\n\n # Increment the epoch number\n epochs += 1\n #print(\"Train Index: \", train_index, \"\\n\")\n #print(\"Test Index: \", test_index)\n #best_svr = SVR(kernel='linear')\n error=0\n for i in range(len(X_test)):\n output = p.result(X_test[i,:])\n error += abs(y_test.as_matrix()[i]-output)\n score = float(error/len(X_test))\n scores.append(score)\n print(\"Fold score: \" + str(score))\nprint(\"Mean score: \" + str(np.mean(scores)))\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "numpy.mean", "sklearn.preprocessing.MinMaxScaler", "sklearn.model_selection.KFold", "pandas.read_csv" ] ]
erdogant/clusteval
[ "618e8133bb4bc2eb23f0d9ff83f944aa3ebe2aad" ]
[ "clusteval/examples.py" ]
[ "# EXAMPLE\nfrom sklearn.datasets import make_blobs\nimport clusteval\nprint(clusteval.__version__)\nprint(dir(clusteval))\n\nfrom clusteval import clusteval\n\n# %%\nfrom sklearn.datasets import make_blobs\n\nfrom clusteval import clusteval\nce = clusteval(cluster='dbscan')\nX, labels_true = make_blobs(n_samples=50, centers=[[1, 1], [-1, -1], [1, -1]], cluster_std=0.4,random_state=0)\n\n# Import library\nfrom clusteval import clusteval\n# Set the method\nce = clusteval(cluster='hdbscan')\n# Evaluate\nresults = ce.fit(X)\nce.plot()\n\n# %% Check\nfrom sklearn.datasets import make_blobs\n\nfrom clusteval import clusteval\nce = clusteval(cluster='dbscan')\nX, labels_true = make_blobs(n_samples=50, centers=[[1, 1], [-1, -1], [1, -1]], cluster_std=0.4,random_state=0)\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\ncluster_labels = results['labx']\n\n# %% Example with titanic dataset and one-hot array\nimport clusteval\ndf = clusteval.import_example()\ndel df['PassengerId']\nimport df2onehot\ndfhot = df2onehot.df2onehot(df, excl_background='0.0')['onehot']\nX = dfhot.values\n\nfrom clusteval import clusteval\nce = clusteval(cluster='agglomerative', metric='hamming', linkage='complete', min_clust=7, verbose=3)\n# ce = clusteval(cluster='dbscan', metric='hamming', linkage='complete', min_clust=7, verbose=3)\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\n\n# %%\n# from s_dbw import S_Dbw\n# score = S_Dbw(X, df['Survived'].values, centers_id=None, method='Tong', alg_noise='bind',centr='mean', nearest_centr=True, metric='euclidean')\n\n# from s_dbw import SD\n# score = SD(X, df['Survived'].values, k=5, centers_id=None, alg_noise='bind',centr='mean', nearest_centr=True, metric='euclidean')\n\n# %% Example with textual data\n# import clusteval\n# df = clusteval.import_example(data='retail')\n# corpus = df['Description'].values\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.pipeline import make_pipeline\n\ncorpus = ['This is the first document.',\n 'This document is the second document.',\n 'And this is the third one.',\n 'Is this the first document?',\n 'This about cats',\n 'This about red cars',\n 'hello world',\n ]\n\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(corpus)\nprint(vectorizer.get_feature_names())\n\nsvd = TruncatedSVD(n_components=2)\nnormalizer = Normalizer(copy=False, norm='l2')\nlsa = make_pipeline(svd, normalizer)\nX = lsa.fit_transform(X)\n\nfrom clusteval import clusteval\nce = clusteval(cluster='dbscan')\nce.fit(X)\nce.plot()\nce.scatter(X)\nce.dendrogram(labels=corpus)\n\n# %% Generate dataset\nX, labels_true = make_blobs(n_samples=50, centers=[[1, 1], [-1, -1], [1, -1]], cluster_std=0.4,random_state=0)\n# [X, labels_true] = make_blobs(n_samples=750, centers=[[1, 1], [-1, -1], [1, -1], [-1, 1]], cluster_std=0.4,random_state=0)\n# X, labels_true = make_blobs(n_samples=750, centers=4, n_features=6, cluster_std=0.5)\n# X, labels_true = make_blobs(n_samples=750, centers=6, n_features=10)\n\n# %% Silhouette\n# ce = clusteval(method='silhouette', metric='kmeans', savemem=True)\nfrom clusteval import clusteval\nce = clusteval(method='silhouette', verbose=3)\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\n\nresults = ce.dendrogram()\nresults = ce.dendrogram(max_d=9)\nresults = ce.dendrogram(X=X, linkage='single', metric='euclidean')\nresults = ce.dendrogram(X=X, linkage='single', metric='euclidean', max_d=0.8)\nresults = ce.dendrogram(X=X, linkage='complete', metric='euclidean', max_d=2)\nresults = ce.dendrogram(figsize=(15,8), show_contracted=True)\n\nresults['labx']\nresults['order_rows']\n\n# %% Silhouette\nfrom clusteval import clusteval\nce = clusteval(method='silhouette')\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\nresults_dendro = ce.dendrogram()\n\nfor i in zip(results['labx'], results_dendro['labx']):\n if not np.all(np.logical_and(np.where(results['labx']==i[0])[0]+1, np.where(results_dendro['labx']==i[1])[0]+1)):\n print('error')\n\n# %% dbindex\nfrom clusteval import clusteval\nce = clusteval(method='dbindex')\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\nresults_dendro = ce.dendrogram()\n\nresults['labx']\nresults_dendro['labx']\n\nfor i in zip(results['labx'], results_dendro['labx']):\n assert np.all(np.logical_and(np.where(results['labx']==i[0])[0]+1, np.where(results_dendro['labx']==i[1])[0]+1))\n\n# %% derivative\nfrom clusteval import clusteval\nce = clusteval(method='derivative')\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\nresults_dendro = ce.dendrogram()\n\nnp.unique(results_dendro['labx'])\nnp.unique(results['labx'])\n\nfor i in zip(results['labx'], results_dendro['labx']):\n assert np.all(np.logical_and(np.where(results['labx']==i[0])[0]+1, np.where(results_dendro['labx']==i[1])[0]+1))\n\n\n# %% dbscan\nfrom clusteval import clusteval\nce = clusteval(cluster='dbscan')\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\nresults_dendro = ce.dendrogram()\n\nfor i in zip(results['labx'], results_dendro['labx']):\n assert np.all(np.logical_and(np.where(results['labx']==i[0])[0]+1, np.where(results_dendro['labx']==i[1])[0]+1))\n\n# %% hdbscan\nfrom clusteval import clusteval\nce = clusteval(cluster='hdbscan')\nresults = ce.fit(X)\nce.plot()\nce.scatter(X)\nresults_dendro = ce.dendrogram(figsize=(15,8), orientation='top')\n\nfor i in zip(results['labx'], results_dendro['labx']):\n assert np.all(np.logical_and(np.where(results['labx']==i[0])[0]+1, np.where(results_dendro['labx']==i[1])[0]+1))\n\n# %% Directly use the dbindex method\nimport clusteval\nfrom sklearn.datasets import make_blobs\nX, labels_true = make_blobs(n_samples=750, centers=6, n_features=10)\n\n# dbindex\nresults = clusteval.dbindex.fit(X)\nfig,ax = clusteval.dbindex.plot(results)\n\n# silhouette\nresults = clusteval.silhouette.fit(X)\nfig,ax = clusteval.silhouette.plot(results)\n\n# derivative\nresults = clusteval.derivative.fit(X)\nfig,ax = clusteval.derivative.plot(results)\n\n# dbscan\nresults = clusteval.dbscan.fit(X)\nfig,ax1,ax2 = clusteval.dbscan.plot(results)\n" ]
[ [ "sklearn.preprocessing.Normalizer", "sklearn.datasets.make_blobs", "sklearn.pipeline.make_pipeline", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.decomposition.TruncatedSVD" ] ]
melorian94/minerl_imitation_learning
[ "3297909ab85a562da304175c7ab126849ad75070" ]
[ "model.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nimport math\n\n\nclass Network(nn.Module):\n def __init__(self, num_actions, image_channels, vec_size, cnn_module, hidden_size=256,\n dueling=True, double_channels=False):\n super().__init__()\n\n self.num_actions = num_actions\n self.dueling = dueling\n\n self.cnn = cnn_module(image_channels)\n\n self.conv_output_size = self.cnn.output_size\n self.fc_im = nn.Linear(self.conv_output_size, hidden_size)\n\n if not double_channels:\n vec_channel_size = 128\n else:\n vec_channel_size = 256\n\n self.fc_vec = nn.Linear(vec_size, vec_channel_size)\n\n self.fc_h_a = nn.Linear(hidden_size + vec_channel_size, hidden_size)\n \n self.fc_a = nn.Linear(hidden_size, num_actions)\n\n if self.dueling:\n self.fc_h_v = nn.Linear(hidden_size + vec_channel_size, hidden_size)\n self.fc_v = nn.Linear(hidden_size, 1)\n\n def forward(self, x, vec):\n x = self.cnn(x)\n x = x.view(-1, self.conv_output_size)\n x = self.fc_im(x)\n vec = self.fc_vec(vec)\n\n x = F.relu(torch.cat((x, vec), 1))\n\n output = self.fc_a(F.relu(self.fc_h_a(x)))\n \n if self.dueling:\n v = self.fc_v(F.relu(self.fc_h_v(x)))\n output = v + output - output.mean(1, keepdim=True)\n\n return output\n\n\nclass AtariCNN(nn.Module):\n def __init__(self, input_channels):\n super().__init__()\n self.conv_layers = nn.Sequential(nn.Conv2d(input_channels, 32, 8, stride=4, padding=0),\n nn.ReLU(),\n nn.Conv2d(32, 64, 4, stride=2, padding=0),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, stride=1, padding=0),\n nn.ReLU())\n\n self.output_size = 64 * 4 * 4\n\n def forward(self, x):\n return self.conv_layers(x)\n\n\nclass ImpalaResNetCNN(nn.Module):\n class _ImpalaResidual(nn.Module):\n\n def __init__(self, depth):\n super().__init__()\n self.conv1 = nn.Conv2d(depth, depth, 3, padding=1)\n self.conv2 = nn.Conv2d(depth, depth, 3, padding=1)\n\n def forward(self, x):\n out = F.relu(x)\n out = self.conv1(out)\n out = F.relu(out)\n out = self.conv2(out)\n return out + x\n\n def __init__(self, input_channels):\n super().__init__()\n depth_in = input_channels\n layers = []\n for depth_out in [32, 64, 64]:\n layers.extend([\n nn.Conv2d(depth_in, depth_out, 3, padding=1),\n nn.MaxPool2d(3, stride=2, padding=1),\n self._ImpalaResidual(depth_out),\n self._ImpalaResidual(depth_out),\n ])\n depth_in = depth_out\n self.conv_layers = nn.Sequential(*layers, nn.ReLU())\n self.output_size = math.ceil(64 / 8) ** 2 * depth_in\n\n def forward(self, x):\n return self.conv_layers(x)\n\n\nclass FixupResNetCNN(nn.Module):\n \"\"\"source: https://github.com/unixpickle/obs-tower2/blob/master/obs_tower2/model.py\"\"\"\n\n class _FixupResidual(nn.Module):\n def __init__(self, depth, num_residual):\n super().__init__()\n self.conv1 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)\n self.conv2 = nn.Conv2d(depth, depth, 3, padding=1, bias=False)\n for p in self.conv1.parameters():\n p.data.mul_(1 / math.sqrt(num_residual))\n for p in self.conv2.parameters():\n p.data.zero_()\n self.bias1 = nn.Parameter(torch.zeros([depth, 1, 1]))\n self.bias2 = nn.Parameter(torch.zeros([depth, 1, 1]))\n self.bias3 = nn.Parameter(torch.zeros([depth, 1, 1]))\n self.bias4 = nn.Parameter(torch.zeros([depth, 1, 1]))\n self.scale = nn.Parameter(torch.ones([depth, 1, 1]))\n\n def forward(self, x):\n x = F.relu(x)\n out = x + self.bias1\n out = self.conv1(out)\n out = out + self.bias2\n out = F.relu(out)\n out = out + self.bias3\n out = self.conv2(out)\n out = out * self.scale\n out = out + self.bias4\n return out + x\n\n def __init__(self, input_channels, double_channels=False):\n super().__init__()\n depth_in = input_channels\n\n layers = []\n if not double_channels:\n channel_sizes = [32, 64, 64]\n else:\n channel_sizes = [64, 128, 128]\n for depth_out in channel_sizes:\n layers.extend([\n nn.Conv2d(depth_in, depth_out, 3, padding=1),\n nn.MaxPool2d(3, stride=2, padding=1),\n self._FixupResidual(depth_out, 8),\n self._FixupResidual(depth_out, 8),\n ])\n depth_in = depth_out\n layers.extend([\n self._FixupResidual(depth_in, 8),\n self._FixupResidual(depth_in, 8),\n ])\n self.conv_layers = nn.Sequential(*layers, nn.ReLU())\n self.output_size = math.ceil(64 / 8) ** 2 * depth_in\n\n def forward(self, x):\n return self.conv_layers(x)\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.cat", "torch.nn.MaxPool2d", "torch.ones", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.relu" ] ]
quepas/ScenarioDomainModel
[ "ee351b7bd3629af0c1d1d800dcc5ac5426b9b804" ]
[ "domain_model/model.py" ]
[ "\"\"\" Class Model\n\nCreation date: 2018 10 30\nAuthor(s): Erwin de Gelder\n\nModifications:\n2018 11 05: Make code PEP8 compliant.\n2018 11 07: Make seperate classes for each type of model. Model itself becomes abstract class.\n2018 11 12: Add fit method to all models.\n2018 11 19: Make it possible to instantiate models from JSON code.\n2019 01 14: Add optional parameters tstart=0 and tend=2 to get_state and get_state_dor.\n2019 10 13: Update of terminology.\n2019 11 04: Add constant model.\n2020 08 15: Make sure that each model has the functions `get_state`, `get_state_dot`, and `fit`.\n2020 08 23: Enable the evaluation of the model at given time instants.\n2020 08 24: Spline model added.\n2020 10 02: Make Model a subclass of QualitativeElement.\n2020 10 04: Change way of creating object from JSON code.\n2020 10 30: For using options for the fit functions, use **kwargs instead of options.\n2020 11 06: Add Model MultiBSplines.\n\"\"\"\n\nimport sys\nfrom abc import abstractmethod\nimport numpy as np\nfrom scipy.interpolate import splrep, splev\nfrom .qualitative_element import QualitativeElement, _qualitative_element_props_from_json\nfrom .scenario_element import DMObjects, _object_from_json\n\n\nclass Model(QualitativeElement):\n \"\"\" Model\n\n Parameter Model describes the relation between the states variables and the\n parameters that specify an activity.\n\n Example :\n x = a * t\n\n In this case,\n - a is a parameter that should be also described for the activity.\n - x is a state variable of the activity.\n - t is from the timeline of an activity.\n It is assumed that the time t runs from 0 to 1.\n\n Attributes:\n modelname(str): The name of the model which is used to describe the\n relation between the state and time.\n default_options(dict): Dictionary with the default options that are used\n for fitting data to the model.\n uid (int): A unique ID.\n name (str): A name that serves as a short description of the actor\n category.\n tags (List[Tag]): The tags are used to determine whether a scenario\n category comprises a scenario.\n description(str): A string that qualitatively describes this thing.\n \"\"\"\n @abstractmethod\n def __init__(self, modelname: str, **kwargs):\n self._modelname = modelname\n self.default_options = dict()\n QualitativeElement.__init__(self, **kwargs)\n\n @abstractmethod\n def get_state(self, pars: dict, time: np.ndarray) -> np.ndarray:\n \"\"\" Return state vector.\n\n The state is calculated based on the provided parameters. The default\n time of the model is always on the interval [0, 1], so any time values\n outside this interval can be regarded as extrapolation.\n\n :param pars: A dictionary with the parameters.\n :param time: Time instances at which the model is to be evaluated.\n :return: Numpy array with the state.\n \"\"\"\n\n @abstractmethod\n def get_state_dot(self, pars: dict, time: np.ndarray) -> np.ndarray:\n \"\"\" Return the derivative of the state vector.\n\n The state derivative is calculated based on the provided parameters.\n The default time of the model is always on the interval [0, 1], so any\n time values outside this interval can be regarded as extrapolation.\n\n :param pars: A dictionary with the parameters.\n :param time: Time instances at which the model is to be evaluated.\n :return: Numpy array with the derivative of the state.\n \"\"\"\n\n @abstractmethod\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:\n \"\"\" Fit the data to the model and return the parameters\n\n The data is to be fit to the model and the resulting parameters are\n returned using a dictionary. The input data needs to be a n-by-m array,\n where n denotes the number of datapoints and each datapoint has\n dimension m. The time should be an vector with length of n.\n\n :param time: the time instants of the data.\n :param data: the data that will be fit to the model.\n :param kwargs: specify some model-specific options.\n :return: dictionary of the parameters.\n \"\"\"\n\n def to_json(self) -> dict:\n model = QualitativeElement.to_json(self)\n model[\"modelname\"] = self._modelname\n model[\"default_options\"] = self.default_options\n return model\n\n def _set_default_options(self, **kwargs) -> dict:\n # Make local copy in order to prevent changes to original.\n options = kwargs.copy()\n\n # Check for options that are not set by default --> this is an invalid options.\n for option in options:\n if option not in self.default_options:\n raise ValueError(\"Option '{:s}' is not a valid options.\".format(option))\n\n # Loop through the default options. If options is already set, then ignore it. If\n # options is not already set, then use the default options.\n for key, value in self.default_options.items():\n if key not in options.keys():\n options[key] = value\n return options\n\n\nclass Constant(Model):\n \"\"\" Constant model\n\n The output is a constant value. Parameters: xstart.\n \"\"\"\n def __init__(self, **kwargs):\n Model.__init__(self, \"Constant\", **kwargs)\n\n def get_state(self, pars: dict, time: np.ndarray) -> np.ndarray:\n return np.ones(len(time))*pars[\"xstart\"]\n\n def get_state_dot(self, pars: dict, time: np.ndarray) -> np.ndarray:\n return np.zeros(len(time))\n\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:\n return dict(xstart=np.mean(data))\n\n\nclass Linear(Model):\n \"\"\" Linear model\n\n Linear relation between time and state. Parameters: xstart, xend.\n\n As options for fitting, the \"method\" can be passed. Two different methods\n are possible:\n - least_squares: make a least squares fit (default).\n - endpoints: Only the starting point and the end point are used to fit\n the model.\n \"\"\"\n def __init__(self, endpoints=False, **kwargs):\n Model.__init__(self, \"Linear\", **kwargs)\n self.default_options = dict(endpoints=endpoints)\n\n def get_state(self, pars: dict, time: np.ndarray) -> np.ndarray:\n return pars[\"xstart\"] + time*(pars[\"xend\"] - pars[\"xstart\"])\n\n def get_state_dot(self, pars: dict, time: np.ndarray) -> np.ndarray:\n return np.ones(len(time)) * (pars[\"xend\"] - pars[\"xstart\"])\n\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:\n # Set the options correctly\n options = Model._set_default_options(self, **kwargs)\n\n if not options[\"endpoints\"]:\n # Use least squares regression to find the slope of the linear line.\n matrix = np.array([time, np.ones(len(time))]).T\n regression_result = np.linalg.lstsq(matrix, data, rcond=None)[0]\n time_begin = np.min(time)\n time_end = np.max(time)\n return {\"xstart\": regression_result[0]*time_begin + regression_result[1],\n \"xend\": regression_result[0]*time_end + regression_result[1]}\n\n # Use the end points of the data to fit the linear line.\n index_begin = np.argmin(time)\n index_end = np.argmax(time)\n return {\"xstart\": data[index_begin], \"xend\": data[index_end]}\n\n\nclass Sinusoidal(Model):\n \"\"\" Sinusoidal model\n\n A sinusoidal model. Parameters: xstart, xend.\n \"\"\"\n def __init__(self, **kwargs):\n Model.__init__(self, \"Sinusoidal\", **kwargs)\n\n def get_state(self, pars: dict, time: np.ndarray) -> np.ndarray:\n offset = (pars[\"xstart\"] + pars[\"xend\"]) / 2\n amplitude = (pars[\"xstart\"] - pars[\"xend\"]) / 2\n return amplitude*np.cos(np.pi*time) + offset\n\n def get_state_dot(self, pars: dict, time: np.ndarray) -> np.ndarray:\n amplitude = (pars[\"xstart\"] - pars[\"xend\"]) / 2\n return -np.pi*amplitude*np.sin(np.pi*time)\n\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:\n # Normalize the time\n time_normalized = (time - np.min(time)) / (np.max(time) - np.min(time))\n\n # Use least squares regression to find the amplitude and the offset\n matrix = np.array([np.cos(np.pi*time_normalized), np.ones(len(time))]).T\n lstlq_fit = np.linalg.lstsq(matrix, data, rcond=None)[0]\n\n # Return the parameters\n return {\"xstart\": lstlq_fit[0] + lstlq_fit[1],\n \"xend\": lstlq_fit[1] - lstlq_fit[0]}\n\n\nclass Spline3Knots(Model):\n \"\"\" Spline model with 3 knots (one interior knot)\n\n Two third order splines are used.\n Parameters: a1, b1, c1, d1, a2, b2, c2, d2.\n\n For fitting, the options `endpoints` (default: False) can be set to True if\n the spline function should ensure that the start and end values are the same\n as for the provided data.\n \"\"\"\n def __init__(self, endpoints=False, **kwargs):\n Model.__init__(self, \"Spline3Knots\", **kwargs)\n self.default_options = dict(endpoints=endpoints)\n\n self.constraint_matrix = np.array([[1, 2, 4, 8, -1, -2, -4, -8],\n [3, 4, 4, 0, -3, -4, -4, 0],\n [3, 2, 0, 0, -3, -2, 0, 0]])\n self.constraint_matrix_endpoints = np.concatenate((self.constraint_matrix,\n [[0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 1]]))\n self.vh_matrix = np.linalg.svd(self.constraint_matrix)[2]\n self.usvh_endpoints = np.linalg.svd(self.constraint_matrix_endpoints)\n\n def get_state(self, pars: dict, time: np.ndarray) -> np.ndarray:\n tdata1 = time[time < .5]\n tdata2 = time[time >= .5]\n ydata1 = (pars[\"a1\"]*tdata1**3 + pars[\"b1\"]*tdata1**2 + pars[\"c1\"]*tdata1 + pars[\"d1\"])\n ydata2 = (pars[\"a2\"]*tdata2**3 + pars[\"b2\"]*tdata2**2 + pars[\"c2\"]*tdata2 + pars[\"d2\"])\n return np.concatenate((ydata1, ydata2))\n\n def get_state_dot(self, pars: dict, time: np.ndarray) -> np.ndarray:\n tdata1 = time[time < .5]\n tdata2 = time[time >= .5]\n ydata1 = 3*pars[\"a1\"]*tdata1**2 + 2*pars[\"b1\"]*tdata1 + pars[\"c1\"]\n ydata2 = 3*pars[\"a2\"]*tdata2**2 + 2*pars[\"b2\"]*tdata2 + pars[\"c2\"]\n return np.concatenate((ydata1, ydata2))\n\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:\n options = self._set_default_options(**kwargs)\n\n # Normalize the time\n time_normalized = (time - np.min(time)) / (np.max(time) - np.min(time))\n\n # Create the matrix that will be used for the least squares regression\n matrix = np.array([time_normalized**3, time_normalized**2, time_normalized**1,\n np.ones(len(time_normalized))]).T\n matrix_left_spline = matrix.copy()\n matrix_left_spline[time_normalized >= 0.5] = 0\n matrix_right_spline = matrix.copy()\n matrix_right_spline[time_normalized < 0.5] = 0\n matrix = np.concatenate((matrix_left_spline, matrix_right_spline), axis=1)\n\n # Construct the constraint matrix, 3 constraints, 8 coefficients\n if options[\"endpoints\"]:\n theta_default = np.dot(self.usvh_endpoints[2][:5].T,\n np.dot(self.usvh_endpoints[0].T,\n np.array([0, 0, 0, data[0], data[-1]])) /\n self.usvh_endpoints[1])\n theta_fit = np.linalg.lstsq(np.dot(matrix, self.usvh_endpoints[2][5:].T),\n data - np.dot(matrix, theta_default), rcond=None)[0]\n theta = np.dot(self.usvh_endpoints[2][5:].T, theta_fit) + theta_default\n else:\n theta_fit = np.linalg.lstsq(np.dot(matrix, self.vh_matrix[3:].T), data,\n rcond=None)[0]\n theta = np.dot(self.vh_matrix[3:].T, theta_fit)\n\n return dict(a1=theta[0], b1=theta[1], c1=theta[2], d1=theta[3],\n a2=theta[4], b2=theta[5], c2=theta[6], d2=theta[7])\n\n\nclass Splines(Model):\n \"\"\" Spline model with a variable number of knots.\n\n Model using the spline functionality of scipy's splrep and splev.\n Parameters: knots, coefficients, degree\n\n When using the fit-function, the following options can be used:\n - degree: the degree of the splines (default=3).\n - n_knots: the number of interior knots (default=3).\n The interior knots will be evenly distributed.\n \"\"\"\n def __init__(self, degree=3, n_knots=3, **kwargs):\n Model.__init__(self, \"Splines\", **kwargs)\n self.default_options = dict(degree=degree, n_knots=n_knots)\n\n def get_state(self, pars: dict, time: np.ndarray) -> np.ndarray:\n return splev(time, (pars[\"knots\"], pars[\"coefficients\"], pars[\"degree\"]))\n\n def get_state_dot(self, pars: dict, time: np.ndarray) -> np.ndarray:\n return splev(time, (pars[\"knots\"], pars[\"coefficients\"], pars[\"degree\"]), 1)\n\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs) -> dict:\n # Normalize the time\n time_normalized = (time - np.min(time)) / (np.max(time) - np.min(time))\n\n # Set options.\n options = self._set_default_options(**kwargs)\n\n # Set interior knots.\n knots = np.arange(1, options[\"n_knots\"]+1) / (options[\"n_knots\"] + 1)\n\n # Compute spline coefficients.\n tck = splrep(time_normalized, data, k=options[\"degree\"], t=knots)\n pars = dict(knots=tck[0].tolist(), coefficients=tck[1].tolist(), degree=tck[2])\n return pars\n\n\nclass MultiBSplines(Model):\n \"\"\" BSplines, dealing with multivariate data. \"\"\"\n def __init__(self, dimension: int, degree=3, n_knots=3, **kwargs):\n Model.__init__(self, \"MultiBSplines\", **kwargs)\n\n # Initialize the b-splines.\n self.dimension = dimension\n self.default_options = dict(dimension=dimension, degree=degree, n_knots=n_knots)\n self.spline = Splines(degree=degree, n_knots=n_knots)\n\n def fit(self, time: np.ndarray, data: np.ndarray, **kwargs):\n # Set data correctly.\n n_data = len(time)\n if data.shape == (n_data, self.dimension):\n data = data.T\n elif not data.shape == (self.dimension, n_data):\n raise ValueError(\"Data should be n-by-d or d-by-n, where d is the provided dimension.\")\n\n # Loop through the different dimensions.\n all_pars = [self.spline.fit(time, data[i], **kwargs) for i in range(self.dimension)]\n pars = dict(coefficients=[par['coefficients'] for par in all_pars],\n knots=[par[\"knots\"] for par in all_pars],\n degree=[par[\"degree\"] for par in all_pars])\n\n return pars\n\n def get_state(self, pars: dict, time: np.ndarray = None) -> np.ndarray:\n result = np.array([self.spline.get_state(dict(coefficients=pars[\"coefficients\"][i],\n degree=pars[\"degree\"][i],\n knots=pars[\"knots\"][i]),\n time)\n for i in range(self.dimension)])\n return result\n\n def get_state_dot(self, pars: dict, time: np.ndarray = None) -> np.ndarray:\n result = np.array([self.spline.get_state_dot(dict(coefficients=pars[\"coefficients\"][i],\n degree=pars[\"degree\"][i],\n knots=pars[\"knots\"][i]),\n time)\n for i in range(self.dimension)])\n return result\n\n\ndef _model_props_from_json(json: dict) -> dict:\n props = json[\"default_options\"]\n props.update(_qualitative_element_props_from_json(json))\n return props\n\n\ndef _model_from_json(\n json: dict,\n attribute_objects: DMObjects # pylint: disable=unused-argument\n) -> Model:\n return getattr(sys.modules[__name__], json[\"modelname\"])(**_model_props_from_json(json))\n\n\ndef model_from_json(json: dict, attribute_objects: DMObjects = None) -> Model:\n \"\"\" Get Model object from JSON code\n\n It is assumed that the JSON code of the Model is created using\n Model.to_json().\n\n :param json: JSON code of Model.\n :param attribute_objects: A structure for storing all objects (optional).\n :return: Model object.\n \"\"\"\n return _object_from_json(json, _model_from_json, \"model\", attribute_objects)\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.sin", "numpy.dot", "numpy.argmin", "scipy.interpolate.splrep", "numpy.min", "numpy.mean", "numpy.linalg.lstsq", "numpy.argmax", "numpy.arange", "numpy.linalg.svd", "numpy.cos", "scipy.interpolate.splev" ] ]
jaberg/scikits-image
[ "eccc41907135cf81b99c4be18a480a9bc705485d", "eccc41907135cf81b99c4be18a480a9bc705485d" ]
[ "skimage/transform/tests/test_hough_transform.py", "skimage/filter/setup.py" ]
[ "import numpy as np\nfrom numpy.testing import *\n\nimport skimage.transform as tf\nimport skimage.transform.hough_transform as ht\nfrom skimage.transform import probabilistic_hough\n\n\ndef append_desc(func, description):\n \"\"\"Append the test function ``func`` and append\n ``description`` to its name.\n \"\"\"\n func.description = func.__module__ + '.' + func.__name__ + description\n\n return func\n\nfrom skimage.transform import *\n\n\ndef test_hough():\n # Generate a test image\n img = np.zeros((100, 100), dtype=int)\n for i in range(25, 75):\n img[100 - i, i] = 1\n\n out, angles, d = tf.hough(img)\n\n y, x = np.where(out == out.max())\n dist = d[y[0]]\n theta = angles[x[0]]\n\n assert_equal(dist > 70, dist < 72)\n assert_equal(theta > 0.78, theta < 0.79)\n\n\ndef test_hough_angles():\n img = np.zeros((10, 10))\n img[0, 0] = 1\n\n out, angles, d = tf.hough(img, np.linspace(0, 360, 10))\n\n assert_equal(len(angles), 10)\n\n\ndef test_py_hough():\n ht._hough, fast_hough = ht._py_hough, ht._hough\n\n yield append_desc(test_hough, '_python')\n yield append_desc(test_hough_angles, '_python')\n\n tf._hough = fast_hough\n\n\ndef test_probabilistic_hough():\n # Generate a test image\n img = np.zeros((100, 100), dtype=int)\n for i in range(25, 75):\n img[100 - i, i] = 100\n img[i, i] = 100\n # decrease default theta sampling because similar orientations may confuse\n # as mentioned in article of Galambos et al\n theta = np.linspace(0, np.pi, 45)\n lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10,\n line_gap=1)\n # sort the lines according to the x-axis\n sorted_lines = []\n for line in lines:\n line = list(line)\n line.sort(key=lambda x: x[0])\n sorted_lines.append(line)\n assert([(25, 75), (74, 26)] in sorted_lines)\n assert([(25, 25), (74, 74)] in sorted_lines)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "#!/usr/bin/env python\n\nimport os\nfrom skimage._build import cython\n\nbase_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs\n\n config = Configuration('filter', parent_package, top_path)\n config.add_data_dir('tests')\n\n cython(['_ctmf.pyx'], working_path=base_path)\n\n config.add_extension('_ctmf', sources=['_ctmf.c'],\n include_dirs=[get_numpy_include_dirs()])\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(maintainer='scikits-image Developers',\n author='scikits-image Developers',\n maintainer_email='[email protected]',\n description='Filters',\n url='https://github.com/scikits-image/scikits-image',\n license='SciPy License (BSD Style)',\n **(configuration(top_path='').todict())\n )\n" ]
[ [ "numpy.linspace", "numpy.zeros" ], [ "numpy.distutils.misc_util.Configuration", "numpy.distutils.misc_util.get_numpy_include_dirs" ] ]
XuYunqiu/DRN-WSOD-pytorch
[ "f08b97b912daf8a8ffd6f3ec5fd58c5026c08098" ]
[ "projects/WSL/wsl/data/datasets/builtin.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\n\"\"\"\nThis file registers pre-defined datasets at hard-coded paths, and their metadata.\n\nWe hard-code metadata for common datasets. This will enable:\n1. Consistency check when loading the datasets\n2. Use models on these standard datasets directly and run demos,\n without having to download the dataset annotations\n\nWe hard-code some paths to the dataset that's assumed to\nexist in \"./datasets/\".\n\nUsers SHOULD NOT use this file to create new dataset / metadata for new dataset.\nTo add new dataset, refer to the tutorial \"docs/DATASETS.md\".\n\"\"\"\n\nimport numpy as np\nimport os\n\nfrom detectron2.data.datasets.register_coco import register_coco_instances\n\n# fmt: off\nCLASS_NAMES = [\n \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\",\n \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\",\n \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"\n]\n# fmt: on\n\n# ==== Predefined datasets and splits for Flickr ==========\n\n_PREDEFINED_SPLITS_WEB = {}\n_PREDEFINED_SPLITS_WEB[\"flickr\"] = {\n \"flickr_voc\": (\"flickr_voc/images\", \"flickr_voc/images.json\"),\n \"flickr_coco\": (\"flickr_coco/images\", \"flickr_coco/images.json\"),\n}\n\n\ndef register_all_web(root):\n for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_WEB.items():\n for key, (image_root, json_file) in splits_per_dataset.items():\n # Assume pre-defined datasets live in `./datasets`.\n register_coco_instances(\n key,\n _get_builtin_metadata(key),\n # TODO add COCO class_names\n os.path.join(root, json_file) if \"://\" not in json_file else json_file,\n os.path.join(root, image_root),\n )\n\n\ndef register_all_voc_sbd(root):\n for key, (image_root, json_file) in _PREDEFINED_SPLITS_VOC_SBD.items():\n # Assume pre-defined datasets live in `./datasets`.\n register_coco_instances(\n key,\n _get_builtin_metadata(key),\n os.path.join(root, json_file) if \"://\" not in json_file else json_file,\n os.path.join(root, image_root),\n )\n\n\ndef uint82bin(n, count=8):\n \"\"\"returns the binary of integer n, count refers to amount of bits\"\"\" \"\"\n return \"\".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])\n\n\ndef labelcolormap(N):\n cmap = np.zeros((N, 3), dtype=np.uint8)\n for i in range(N):\n r = 0\n g = 0\n b = 0\n id = i\n for j in range(7):\n str_id = uint82bin(id)\n r = r ^ (np.uint8(str_id[-1]) << (7 - j))\n g = g ^ (np.uint8(str_id[-2]) << (7 - j))\n b = b ^ (np.uint8(str_id[-3]) << (7 - j))\n id = id >> 3\n cmap[i, 0] = r\n cmap[i, 1] = g\n cmap[i, 2] = b\n return cmap\n\n\ndef _get_builtin_metadata(dataset_name):\n # thing_ids = [i for i, k in enumerate(CLASS_NAMES)]\n # thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}\n thing_classes = CLASS_NAMES\n thing_colors = labelcolormap(len(CLASS_NAMES))\n ret = {\n # \"thing_dataset_id_to_contiguous_id\": thing_dataset_id_to_contiguous_id,\n \"thing_classes\": thing_classes,\n \"thing_colors\": thing_colors,\n }\n return ret\n\n\n# Register them all under \"./datasets\"\n_root = os.getenv(\"wsl_DATASETS\", \"datasets\")\nregister_all_web(_root)\n" ]
[ [ "numpy.uint8", "numpy.zeros" ] ]
datarevenue-berlin/sparsity
[ "cabac15148b473600ac61572152b28273bdb0158" ]
[ "sparsity/sparse_frame.py" ]
[ "# coding=utf-8\nimport functools\nimport traceback\nimport warnings\nfrom collections import OrderedDict\nfrom functools import partial, reduce\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api import types\nfrom pandas.core.common import _default_index\n\ntry:\n from pandas.indexes.base import _ensure_index\nexcept ImportError:\n from pandas.core.indexes.base import _ensure_index\nfrom sparsity.io_ import to_npz, read_npz, _just_read_array\nfrom scipy import sparse\n\nfrom sparsity.indexing import get_indexers_list\n\n\ndef _is_empty(data):\n if any(map(lambda x: x == 0, data.shape)):\n return True\n return False\n\n\ndef _append_zero_row(csr):\n return sparse.vstack(\n [csr,\n sparse.coo_matrix((1, csr.shape[1])).tocsr()]\n )\n\n\nclass SparseFrame(object):\n \"\"\" Two dimensional, size-mutable, homogenous tabular data structure with\n labeled axes (rows and columns). It adds pandas indexing abilities to a\n compressed row sparse frame based on scipy.sparse.csr_matrix. This makes\n indexing along the first axis extremely efficient and cheap. Indexing along\n the second axis should be avoided if possible though.\n\n For a distributed implementation see sparsity.dask.SparseFrame.\n \"\"\"\n\n def __init__(self, data, index=None, columns=None, **kwargs):\n \"\"\"Init SparseFrame\n\n Parameters\n ----------\n data: sparse.csr_matrix | np.ndarray | pandas.DataFrame\n Data to initialize matrix with. Can be one of above types, or\n anything accepted by sparse.csr_matrix along with the correct\n kwargs.\n index: pd.Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n input data has no indexing information and no index provided.\n columns : pd.Index or array-like\n Column labels to use for resulting frame. Defaults like in index.\n \"\"\"\n if len(data.shape) > 2:\n raise ValueError(\"Only two dimensional data supported\")\n\n if len(data.shape) == 1 and isinstance(data, pd.Series):\n data = data.to_frame()\n\n elif len(data.shape) == 1:\n data = data.reshape(-1, 1)\n\n self.empty = False\n N, K = data.shape\n\n if index is None:\n self._index = _default_index(N)\n elif len(index) != N and data.size:\n if columns is not None:\n implied_axis_1 = len(columns)\n else:\n implied_axis_1 = data.shape[1]\n raise ValueError('Shape of passed values is {},'\n 'indices imply {}'\n .format(data.shape, (len(index), implied_axis_1)))\n else:\n self._index = _ensure_index(index)\n\n if columns is None:\n self._columns = _default_index(K)\n elif len(columns) != K and data.size:\n if index is not None:\n implied_axis_0 = len(index)\n else:\n implied_axis_0 = data.shape[0]\n raise ValueError('Shape of passed values is {},'\n 'indices imply {}'\n .format(data.shape, (implied_axis_0, len(columns))))\n else:\n self._columns = _ensure_index(columns)\n\n if not sparse.isspmatrix_csr(data):\n try:\n self._init_values(data,\n init_index=index is None,\n init_columns=columns is None,\n **kwargs)\n except TypeError:\n raise TypeError(traceback.format_exc() +\n \"\\nThe error described above occurred while \"\n \"converting data to sparse matrix.\")\n else:\n self.empty = True if _is_empty(data) else False\n self._init_csr(data)\n\n self.ndim = 2\n\n @classmethod\n def _create_indexer(cls, name, indexer):\n \"\"\"Create an indexer like _name in the class.\"\"\"\n if getattr(cls, name, None) is None:\n _v = tuple(map(int, pd.__version__.split('.')))\n if _v >= (0, 23, 0):\n _indexer = functools.partial(indexer, name)\n else:\n _indexer = functools.partial(indexer, name=name)\n setattr(cls, name, property(_indexer, doc=indexer.__doc__))\n\n def _init_values(self, data, init_index=True, init_columns=True, **kwargs):\n if isinstance(data, pd.DataFrame):\n self.empty = data.empty\n self._init_csr(sparse.csr_matrix(data.values))\n if init_index:\n self._index = _ensure_index(data.index)\n else:\n warnings.warn(\"Passed index explicitly while initializing \"\n \"from pd.DataFrame. Original DataFrame's index \"\n \"will be ignored.\", SyntaxWarning)\n if init_columns:\n self._columns = _ensure_index(data.columns)\n else:\n warnings.warn(\"Passed columns explicitly while initializing \"\n \"from pd.DataFrame. Original DataFrame's columns\"\n \" will be ignored.\", SyntaxWarning)\n elif _is_empty(data):\n self.empty = True\n self._data = sparse.csr_matrix((len(self.index),\n len(self.columns)))\n self.shape = self._data.shape\n else:\n sparse_data = sparse.csr_matrix(data, **kwargs)\n self._init_csr(sparse_data)\n\n def toarray(self):\n \"\"\"Return dense np.array representation.\"\"\"\n return self.todense(pandas=False)\n\n def todense(self, pandas=True):\n \"\"\"Return dense representation.\n\n Parameters\n ----------\n pandas: bool\n If true returns a pandas DataFrame (default),\n else a numpy array is returned.\n\n Returns\n -------\n dense: pd.DataFrame | np.ndarray\n dense representation\n \"\"\"\n if not self.empty:\n dense = np.asarray(self.data.toarray())\n else:\n dense = np.empty(shape=(0, len(self.columns)))\n\n if self.shape[0] == 1 or self.shape[1] == 1:\n dense = dense.reshape(-1)\n\n if pandas:\n if self.empty:\n dense = pd.DataFrame(np.empty(shape=self.shape),\n columns=self.columns,\n index=self._index[:0])\n if self.data.shape[1] == 1: # 1 empty column => empty Series\n dense = dense.iloc[:, 0]\n elif len(dense.shape) == 1 and \\\n self.data.shape[1] == 1: # 1 column => Series\n dense = pd.Series(dense, index=self.index,\n name=self.columns[0])\n elif len(dense.shape) == 1 and \\\n self.data.shape[1] > 1: # 1 row => DataFrame\n dense = pd.DataFrame(dense.reshape(1, -1), index=self.index,\n columns=self.columns)\n else: # 2+ cols and 2+ rows\n # need to copy, as broadcast_to returns read_only array\n idx = np.broadcast_to(self.index, dense.shape[0])\\\n .copy()\n dense = pd.DataFrame(dense, index=idx,\n columns=self.columns)\n return dense\n\n def _init_csr(self, csr):\n \"\"\"Keep a zero row at the end of the csr matrix for aligns.\"\"\"\n self.shape = csr.shape\n if not self.empty:\n self._data = _append_zero_row(csr)\n else:\n self._data = csr\n\n def _get_axis(self, axis):\n \"\"\"Rudimentary indexing support.\"\"\"\n if axis == 0:\n return self._index\n if axis == 1:\n return self._columns\n\n def sum(self, *args, **kwargs):\n \"\"\"Sum elements.\"\"\"\n return self.data.sum(*args, **kwargs)\n\n def mean(self, *args, **kwargs):\n \"\"\"Calculate mean(s).\"\"\"\n return self.data.mean(*args, **kwargs)\n\n def max(self, *args, **kwargs):\n \"\"\"Find maximum element(s).\"\"\"\n return self.data.max(*args, **kwargs)\n\n def min(self, *args, **kwargs):\n \"\"\"Find minimum element(s)\"\"\"\n return self.data.min(*args, **kwargs)\n\n def copy(self, *args, deep=True, **kwargs):\n \"\"\"Copy frame\n\n Parameters\n ----------\n args:\n are passed to indizes and values copy methods\n deep: bool\n if true (default) data will be copied as well.\n kwargs:\n are passed to indizes and values copy methods\n\n Returns\n -------\n copy: SparseFrame\n \"\"\"\n if deep:\n return SparseFrame(self.data.copy(*args, **kwargs),\n self.index.copy(*args, **kwargs),\n self.columns.copy(*args, **kwargs))\n else:\n return SparseFrame(self.data,\n self.index.copy(*args, **kwargs),\n self.columns.copy(*args, **kwargs))\n\n def multiply(self, other, axis='columns'):\n \"\"\"\n Multiply SparseFrame row-wise or column-wise.\n\n Parameters\n ----------\n other: array-like\n Vector of numbers to multiply columns/rows by.\n axis: int | str\n - 1 or 'columns' to multiply column-wise (default)\n - 0 or 'index' to multiply row-wise\n \"\"\"\n try:\n other = other.toarray()\n except AttributeError:\n pass\n\n if axis in [0, 'index']:\n other = np.asarray(other).reshape(-1, 1)\n elif axis in [1, 'columns']:\n other = np.asarray(other).reshape(1, -1)\n else:\n raise ValueError(\"Axis should be one of 0, 1, 'index', 'columns'.\")\n\n data = self.data.multiply(other)\n assert data.shape == self.data.shape, \\\n \"Data shapes mismatch: {}, {}\".format(data.shape, self.data.shape)\n return SparseFrame(data, self.index, self.columns)\n\n def nnz(self):\n \"\"\"Get the count of explicitly stored values (nonzeros).\"\"\"\n return self.data.nnz\n\n def sample(self, n=None, frac=None, replace=False, weights=None,\n random_state=None, axis=None):\n \"\"\"Return a random sample of items from an axis of object.\n \n This function mimics pandas' API, but doesn't use all the arguments.\n \"\"\"\n if (n is None) == (frac is None):\n raise ValueError(\"Please specify either `n` or `frac`.\")\n if weights is not None:\n raise NotImplementedError(\"`weights` argument is not supported.\")\n axis = axis or 0\n length = self.shape[axis]\n n = n or int(length * frac)\n \n rs = np.random.RandomState(random_state)\n idx = rs.choice(np.arange(length), n, replace=replace)\n return self.take(idx, axis=axis)\n\n def take(self, idx, axis=0, **kwargs):\n \"\"\"Return data at integer locations.\n\n Parameters\n ----------\n idx: array-like | int\n array of integer locations\n axis:\n which axis to index\n kwargs:\n not used\n\n Returns\n -------\n indexed: SparseFrame\n reindexed sparse frame\n \"\"\"\n if axis == 0:\n return SparseFrame(self.data[idx, :],\n index=self.index[idx],\n columns=self.columns)\n elif axis == 1:\n return SparseFrame(self.data[:, idx],\n index=self.index,\n columns=self.columns[idx])\n\n def _take(self, *args, **kwargs):\n \"\"\"\n This function is to mimic pandas api (0.21.0)\n and support indexing.\n\n See https://github.com/pandas-dev/pandas/commit/458c1dc81b7e6f90180b06179ac91d9ed868cb05\n \"\"\"\n return self.take(*args, **kwargs)\n\n def _xs(self, key, *args, axis=0, **kwargs):\n \"\"\"Used for label based indexing.\"\"\"\n if axis == 0:\n loc = self.index.get_loc(key)\n new_data = self.data[loc]\n return SparseFrame(new_data,\n index=[key] * new_data.shape[0],\n columns=self.columns)\n else:\n loc = self.columns.get_loc(key)\n new_data = self.data[:, loc]\n return SparseFrame(new_data,\n columns=[key] * new_data.shape[1],\n index=self.index)\n\n\n @property\n def index(self):\n \"\"\" Return index labels\n\n Returns\n -------\n index: pd.Index\n \"\"\"\n return self._index\n\n @property\n def columns(self):\n \"\"\" Return column labels\n\n Returns\n -------\n index: pd.Index\n \"\"\"\n return self._columns\n\n @property\n def data(self):\n \"\"\" Return data matrix\n\n Returns\n -------\n data: scipy.spar.csr_matrix\n \"\"\"\n if self.empty:\n return self._data\n return self._data[:-1, :]\n\n def groupby_agg(self, by=None, level=None, agg_func=None):\n \"\"\" Aggregate data using callable.\n\n The `by` and `level` arguments are mutually exclusive.\n\n Parameters\n ----------\n by: array-like, string\n grouping array or grouping column name\n level: int\n which level from index to use if multiindex\n agg_func: callable\n Function which will be applied to groups. Must accept\n a SparseFrame and needs to return a vector of shape (1, n_cols).\n\n Returns\n -------\n sf: SparseFrame\n aggregated result\n \"\"\"\n by, cols = self._get_groupby_col(by, level)\n groups = pd.Index(np.arange(self.shape[0])).groupby(by)\n res = sparse.csr_matrix((len(groups), self.shape[1]))\n new_idx = []\n for i, (name, indices) in enumerate(groups.items()):\n new_idx.append(name)\n res[i] = agg_func(self.data[indices.values, :])\n res = SparseFrame(res, index=new_idx, columns=self.columns)\n return res[cols]\n\n def groupby_sum(self, by=None, level=0):\n \"\"\"Optimized sparse groupby sum aggregation.\n\n Simple operation using sparse matrix multiplication.\n Expects result to be sparse as well.\n\n The by and level arguments are mutually exclusive.\n\n Parameters\n ----------\n by: np.ndarray (optional)\n Alternative index.\n level: int\n Level of (multi-)index to group on.\n\n Returns\n -------\n df: sparsity.SparseFrame\n Grouped by and summed SparseFrame.\n \"\"\"\n by, cols = self._get_groupby_col(by, level)\n group_idx = by.argsort()\n gm = _create_group_matrix(by[group_idx])\n grouped_data = self._data[group_idx, :].T.dot(gm).T\n res = SparseFrame(grouped_data, index=np.unique(by),\n columns=self._columns)\n return res[cols]\n\n def _get_groupby_col(self, by, level):\n if by is None and level is None:\n raise ValueError(\"You have to supply one of 'by' and 'level'.\")\n other_cols = self._columns.tolist()\n if by is not None:\n try:\n if by in self._columns:\n other_cols.remove(by)\n by = self[by].toarray()\n except TypeError:\n assert len(by) == self.data.shape[0]\n by = np.array(by)\n else:\n if level and isinstance(self._index, pd.MultiIndex):\n by = self.index.get_level_values(level).values\n elif level > 0:\n raise ValueError(\n \"Cannot use level > 0 in a non-MultiIndex Frame.\")\n else: # level == 0\n by = np.asarray(self._index)\n return by, other_cols\n\n def join(self, other, axis=1, how='outer', level=None):\n \"\"\"\n Join two tables along their indices.\n\n Parameters\n ----------\n other: sparsity.SparseTable\n another SparseFrame\n axis: int\n along which axis to join\n how: str\n one of 'inner', 'outer', 'left', 'right'\n level: int\n if axis is MultiIndex, join using this level\n Returns\n -------\n joined: sparsity.SparseFrame\n \"\"\"\n if isinstance(self._index, pd.MultiIndex) \\\n or isinstance(other._index, pd.MultiIndex):\n raise NotImplementedError('MultiIndex not supported.')\n if not isinstance(other, SparseFrame):\n other = SparseFrame(other)\n if axis not in {0, 1}:\n raise ValueError(\"Axis mut be either 0 or 1.\")\n if axis == 0:\n if np.array_equal(other._columns.values, self._columns.values):\n # take short path if join axes are identical\n data = sparse.vstack([self.data, other.data])\n index = np.hstack([self.index, other.index])\n res = SparseFrame(data, index=index, columns=self._columns)\n else:\n data, new_index = _matrix_join(\n _append_zero_row(self.data.T.tocsr()),\n _append_zero_row(other.data.T.tocsr()),\n self._columns,\n other._columns,\n how=how,\n )\n res = SparseFrame(data.T.tocsr(),\n index=np.concatenate([self.index, other.index]),\n columns=new_index)\n elif axis == 1:\n if np.array_equal(self.index.values, other.index.values):\n # take short path if join axes are identical\n data = sparse.hstack([self.data, other.data])\n columns = np.hstack([self._columns, other._columns])\n res = SparseFrame(data, index=self.index, columns=columns)\n else:\n if other.empty:\n other_data = sparse.csr_matrix((1, other.shape[1]),\n dtype=other.data.dtype)\n else:\n other_data = other._data\n\n if self.empty:\n self_data = sparse.csr_matrix((1, self.shape[1]),\n dtype=self.data.dtype)\n else:\n self_data = self._data\n\n data, new_index = _matrix_join(self_data, other_data,\n self.index, other.index,\n how=how)\n res = SparseFrame(data,\n index=new_index,\n columns=np.concatenate([self._columns,\n other._columns]))\n else:\n raise ValueError('Axis must be either 0 or 1.')\n\n return res\n\n def __len__(self):\n return self.shape[0]\n\n def rename(self, columns, inplace=False):\n \"\"\"\n Rename columns by applying a callable to every column name.\n\n Parameters\n ----------\n columns: callable\n a callable that will accepts a column element and returns the\n new column label.\n inplace: bool\n if true the operation will be executed inplace\n\n Returns\n -------\n renamed: SparseFrame | None\n \"\"\"\n new_cols = self.columns.map(columns)\n if not inplace:\n return SparseFrame(self.data,\n index=self.index,\n columns=new_cols)\n else:\n self._columns = new_cols\n\n @property\n def values(self):\n \"\"\"CSR Matrix represenation of frame\"\"\"\n return self.data\n\n def sort_index(self):\n \"\"\"\n Sort table along index.\n\n Returns\n -------\n sorted: sparsity.SparseFrame\n \"\"\"\n passive_sort_idx = np.argsort(self._index)\n data = self._data[passive_sort_idx]\n index = self._index[passive_sort_idx]\n return SparseFrame(data, index=index, columns=self.columns)\n\n def fillna(self, value):\n \"\"\"Replace NaN values in explicitly stored data with `value`.\n\n Parameters\n ----------\n value: scalar\n Value to use to fill holes. value must be of same dtype as\n the underlying SparseFrame's data. If 0 is chosen\n new matrix will have these values eliminated.\n\n Returns\n -------\n filled: SparseFrame\n \"\"\"\n _data = self._data.copy()\n _data.data[np.isnan(self._data.data)] = value\n if value == 0:\n _data.eliminate_zeros()\n return SparseFrame(data=_data[:-1, :],\n index=self.index, columns=self.columns)\n\n def add(self, other, how='outer', fill_value=0, **kwargs):\n \"\"\"\n Aligned addition. Adds two tables by aligning them first.\n\n Parameters\n ----------\n other: sparsity.SparseFrame\n Another SparseFrame.\n how: str\n How to join frames along their indexes. Default is 'outer' which\n makes the result contain labels from both frames.\n fill_value: float\n Fill value if other frame is not exactly the same shape.\n For sparse data the only sensible fill value is 0. Passing\n any other value will result in a ValueError.\n\n Returns\n -------\n added: sparsity.SparseFrame\n \"\"\"\n if fill_value != 0:\n raise ValueError(\"Only 0 is accepted as fill_value \"\n \"for sparse data.\")\n assert np.all(self._columns == other.columns)\n data, new_idx = _aligned_csr_elop(self._data, other._data,\n self.index, other.index,\n how=how)\n # new_idx = self._index.join(other.index, how=how)\n res = SparseFrame(data, index=new_idx, columns=self._columns)\n return res\n\n def __sizeof__(self):\n return super().__sizeof__() + \\\n self._index.memory_usage(deep=True) + \\\n self._columns.memory_usage(deep=True) + \\\n self._data.data.nbytes + \\\n self._data.indptr.nbytes + self._data.indices.nbytes\n\n def _align_axis(self):\n raise NotImplementedError()\n\n def __repr__(self):\n nrows = min(5, self.shape[0])\n\n if len(self._columns) > 50:\n cols = self.columns[:25].append(self.columns[-25:])\n data = sparse.hstack([self.data[:nrows, :25],\n self.data[:nrows, -25:]])\n data = data.toarray()\n else:\n cols = self._columns\n data = self.data[:nrows, :].toarray()\n\n df = pd.DataFrame(data, columns=cols, index=self._index[:nrows])\n df_str = df.__repr__().splitlines()\n if df_str[-2] == '':\n df_str = df_str[:-2]\n\n sparse_str = \"[{nrows}x{ncols} SparseFrame of type '<class \" \\\n \"'{dtype}'>' \\n with {nnz} stored elements \" \\\n \"in Compressed Sparse Row format]\".format(\n nrows=self.shape[0],\n ncols=self.shape[1],\n dtype=self.data.dtype,\n nnz=self.data.nnz\n )\n repr = \"{data}\\n{sparse}\" \\\n .format(data='\\n'.join(df_str), sparse=sparse_str)\n return repr\n\n def __array__(self):\n return self.toarray()\n\n def head(self, n=1):\n \"\"\"Return rows from the top of the table.\n\n Parameters\n ----------\n n: int\n how many rows to return, default is 1\n\n Returns\n -------\n head: SparseFrame\n \"\"\"\n n = min(n, len(self._index))\n return pd.SparseDataFrame(self.data[:n, :].todense(),\n index=self.index[:n],\n columns=self.columns)\n\n def _slice(self, sliceobj):\n return SparseFrame(self.data[sliceobj, :],\n index=self.index[sliceobj],\n columns=self.columns)\n\n @classmethod\n def concat(cls, tables, axis=0):\n \"\"\"Concat a collection of SparseFrames along given axis.\n\n Uses join internally so it might not be very efficient.\n\n Parameters\n ----------\n tables: list\n a list of SparseFrames.\n axis:\n which axis to concatenate along.\n\n Returns\n -------\n\n \"\"\"\n func = partial(SparseFrame.join, axis=axis)\n return reduce(func, tables)\n\n def _ixs(self, key, axis=0):\n if axis != 0:\n raise NotImplementedError()\n new_idx = self.index[key]\n if not isinstance(new_idx, pd.Index):\n new_idx = [new_idx]\n return SparseFrame(self._data[key,:],\n index=new_idx,\n columns=self.columns)\n\n def assign(self, **kwargs):\n \"\"\"Assign new columns.\n\n Parameters\n ----------\n kwargs: dict\n Mapping from column name to values. Values must be of correct shape\n to be inserted successfully.\n\n Returns\n -------\n assigned: SparseFrame\n \"\"\"\n sf = self\n for key, value in kwargs.items():\n sf = sf._single_assign(key, value)\n return sf\n\n def __setitem__(self, key, value):\n if key in self.columns:\n raise NotImplementedError(\"Assigning to an existing column \"\n \"is currently not implemented. You can \"\n \"only assign values to new columns.\")\n new_cols, new_data = self._add_col(key, value)\n self._init_csr(new_data)\n self._columns = new_cols\n\n def _add_col(self, key, value):\n csc = self.data.tocsc()\n value = np.broadcast_to(np.atleast_1d(value), (self.shape[0],))\n val = value.reshape(-1, 1)\n new_data = sparse.hstack([csc, sparse.csc_matrix(val)]).tocsr()\n new_cols = self._columns.append(pd.Index([key]))\n return new_cols, new_data\n\n def _single_assign(self, key, value):\n if key in self.columns:\n raise NotImplementedError(\"Assigning to an existing column \"\n \"is currently not implemented. You can \"\n \"only assign values to new columns.\")\n new_cols, new_data = self._add_col(key, value)\n return SparseFrame(new_data, index=self.index, columns=new_cols)\n\n def drop(self, labels, axis=1):\n \"\"\"Drop label(s) from given axis.\n\n Currently works only for columns.\n\n Parameters\n ----------\n labels: array-like\n labels to drop from the columns\n axis: int\n only columns are supported atm.\n\n Returns\n -------\n df: SparseFrame\n \"\"\"\n if not isinstance(labels, (list, tuple, set)):\n labels = [labels]\n if axis == 1:\n mask = np.logical_not(self.columns.isin(labels))\n sf = self.loc[:, self.columns[mask].tolist()]\n else:\n raise NotImplementedError\n return sf\n\n def drop_duplicate_idx(self, **kwargs):\n \"\"\"Drop rows with duplicated index.\n\n Parameters\n ----------\n kwargs:\n kwds are passed to pd.Index.duplicated\n\n Returns\n -------\n dropped: SparseFrame\n \"\"\"\n mask = ~self.index.duplicated(**kwargs)\n return SparseFrame(self.data[mask], index=self.index.values[mask],\n columns=self.columns)\n\n def __getitem__(self, item):\n if item is None:\n raise ValueError('Cannot label index with a null key.')\n if not isinstance(item, (pd.Series, np.ndarray, pd.Index, list,\n tuple)):\n # TODO: tuple probably should be a separate case as in Pandas\n # where it is used with Multiindex\n item = [item]\n if len(item) > 0:\n indexer = self.loc._convert_to_indexer(item, axis=1)\n return self._take(indexer, axis=1)\n else:\n data = np.empty(shape=(self.shape[0], 0))\n return SparseFrame(data, index=self.index,\n columns=self.columns[[]])\n\n def dropna(self):\n \"\"\"Drop nans from index.\"\"\"\n mask = np.isnan(self.index.values)\n new_data = self.data[~mask, :]\n new_index = self.index.values[~mask]\n return SparseFrame(new_data, index=new_index, columns=self.columns)\n\n def set_index(self, column=None, idx=None, level=None, inplace=False):\n \"\"\"Set index from array, column or existing multi-index level.\n\n Parameters\n ----------\n column: str\n set index from existing column in data.\n idx: pd.Index, np.array\n Set the index directly with a pandas index object or array\n level: int\n set index from a multiindex level. useful for groupbys.\n inplace: bool\n perform data transformation inplace\n\n Returns\n -------\n sf: sp.SparseFrame | None\n the transformed sparse frame or None if inplace was True\n \"\"\"\n if column is None and idx is None and level is None:\n raise ValueError(\"Either column, idx or level should not be None\")\n elif idx is not None:\n assert len(idx) == self.data.shape[0]\n new_idx = idx\n elif level is not None and \\\n isinstance(self._index, pd.MultiIndex):\n new_idx = self.index.get_level_values(level)\n elif column is not None:\n new_idx = np.asarray(self.loc[:, column].data.todense()).reshape(-1)\n\n if inplace:\n self._index = _ensure_index(new_idx)\n else:\n return SparseFrame(self.data,\n index=new_idx,\n columns=self.columns)\n\n @classmethod\n def vstack(cls, frames):\n \"\"\"Vertical stacking given collection of SparseFrames.\"\"\"\n assert np.all([np.all(frames[0].columns == frame.columns)\n for frame in frames[1:]]), \"Columns don't match\"\n data = list(map(lambda x: x.data, frames))\n new_idx = frames[0].index\n for f in frames[1:]:\n new_idx = new_idx.append(f.index)\n return SparseFrame(sparse.vstack(data),\n index=new_idx,\n columns=frames[0].columns)\n\n @classmethod\n def read_npz(cls, filename, storage_options=None):\n \"\"\"Read from numpy npz format.\n\n Reads the sparse frame from a npz archive.\n Supports reading npz archives from remote locations\n with GCSFS and S3FS.\n\n Parameters\n ----------\n filename: str\n path or uri to location\n storage_options: dict\n further options for the underlying filesystem\n\n Returns\n -------\n sf: SparseFrame\n \"\"\"\n return cls(*read_npz(filename, storage_options))\n\n @property\n def axes(self):\n return [self.index, self.columns]\n\n def _get_axis_name(self, axis):\n try:\n return ['index', 'columns'][axis]\n except IndexError:\n raise ValueError('No axis named {} for {}'\n .format(axis, self.__class__))\n\n def _reindex_with_indexers(self, reindexers, **kwargs):\n \"\"\"allow_dups indicates an internal call here \"\"\"\n\n # reindex doing multiple operations on different axes if indicated\n new_data = self.copy()\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n\n if index is None:\n continue\n\n if axis == 0:\n new_mat = new_data.data[indexer, :]\n new_data = SparseFrame(new_mat, index=index,\n columns=self.columns)\n elif axis == 1:\n new_mat = new_data.data[:, indexer]\n new_data = SparseFrame(new_mat, columns=index,\n index=self.index)\n else:\n raise ValueError('Only supported axes are 0 and 1.')\n\n return new_data\n\n def reindex(self, labels=None, index=None, columns=None, axis=None,\n *args, **kwargs):\n \"\"\"Conform SparseFrame to new index.\n\n Missing values will be filled with zeroes.\n\n Parameters\n ----------\n labels: array-like\n New labels / index to conform the axis specified by ‘axis’ to.\n index, columns : array-like, optional\n New labels / index to conform to. Preferably an Index object to\n avoid duplicating data\n axis: int\n Axis to target. Can be either (0, 1).\n args, kwargs\n Will be passed to reindex_axis.\n\n Returns\n -------\n reindexed: SparseFrame\n \"\"\"\n\n if labels is not None and index is None and columns is None:\n if axis is None:\n axis = 0\n return self.reindex_axis(labels, axis=axis, *args, **kwargs)\n elif columns is not None and index is None:\n return self.reindex_axis(columns, axis=1, *args, **kwargs)\n elif columns is None and index is not None:\n return self.reindex_axis(index, axis=0, *args, **kwargs)\n elif columns is not None and index is not None:\n obj = self.reindex_axis(columns, axis=1, *args, **kwargs)\n return obj.reindex_axis(index, axis=0, *args, **kwargs)\n else:\n raise ValueError('Label parameter is mutually exclusive '\n 'with both index or columns')\n\n def reindex_axis(self, labels, axis=0, method=None,\n level=None, copy=True, limit=None, fill_value=0):\n \"\"\"Conform SparseFrame to new index.\n\n Missing values will be filled with zeros.\n\n Parameters\n ----------\n labels: array-like\n New labels / index to conform the axis specified by ‘axis’ to.\n axis: int\n Axis to target. Can be either (0, 1).\n method: None\n unsupported\n level: None\n unsupported\n copy: None\n unsupported\n limit: None\n unsupported\n fill_value: None\n unsupported\n\n Returns\n -------\n reindexed: SparseFrame\n \"\"\"\n if method is not None \\\n or not copy \\\n or level is not None \\\n or fill_value != 0 \\\n or limit is not None:\n raise NotImplementedError(\n 'Error only labels, index, columns and/or axis are supported')\n if axis == 0:\n self.index._can_reindex(labels)\n reindex_axis = 'index'\n other_axis = 'columns'\n new_index, idx = self.index.reindex(labels)\n if idx is None:\n return self.copy()\n new_data = self._data[idx]\n elif axis == 1:\n self.columns._can_reindex(labels)\n reindex_axis = 'columns'\n other_axis = 'index'\n new_index, idx = self.columns.reindex(labels)\n if idx is None:\n return self.copy()\n new_data = self._data.T[idx].T\n if not self.empty:\n # we have a hidden zero column to replace missing indices (-1)\n new_data = new_data[:-1]\n else:\n raise ValueError(\"Only two dimensional data supported.\")\n\n kwargs = {reindex_axis: new_index,\n other_axis: getattr(self, other_axis)}\n\n return SparseFrame(new_data, **kwargs)\n\n def reset_index(self, drop=False):\n if not drop:\n raise NotImplementedError(\"drop=False is not supported.\")\n new_idx = _default_index(len(self))\n return SparseFrame(self.data, index=new_idx, columns=self.columns)\n\n def to_npz(self, filename, block_size=None, storage_options=None):\n \"\"\"Save to numpy npz format.\n\n Parameters\n ----------\n filename: str\n path to local file ot s3 path starting with `s3://`\n block_size: int\n block size in bytes only has effect if writing to remote storage\n if set to None defaults to 100MB\n storage_options: dict\n additional parameters to pass to FileSystem class;\n only useful when writing to remote storages\n \"\"\"\n to_npz(self, filename, block_size, storage_options)\n\n\ndef _axis_is_empty(csr, axis=0):\n return csr.shape[axis] == 0\n\n\ndef _aligned_csr_elop(a, b, a_idx, b_idx, op='_plus_', how='outer'):\n \"\"\"Assume data == 0 at loc[-1]\"\"\"\n\n # handle emtpy cases\n if _axis_is_empty(a):\n return b[:-1, :], b_idx\n\n if _axis_is_empty(b):\n return a[:-1, :], a_idx\n\n join_idx, lidx, ridx = a_idx.join(b_idx, return_indexers=True, how=how)\n\n if lidx is None:\n a_new = a[:-1, :]\n else:\n a_new = sparse.csr_matrix(a[lidx])\n if ridx is None:\n b_new = b[:-1, :]\n else:\n b_new = sparse.csr_matrix(b[ridx])\n\n assert b_new.shape == a_new.shape\n added = a_new._binopt(b_new, op=op)\n return added, join_idx\n\n\ndef _matrix_join(a, b, a_idx, b_idx, how='outer'):\n \"\"\"Assume data == 0 at loc[-1]\"\"\"\n join_idx, lidx, ridx = a_idx.join(b_idx, return_indexers=True,\n how=how)\n if lidx is None:\n a_new = a[:-1, :]\n else:\n a_new = sparse.csr_matrix(a[lidx])\n if ridx is None:\n b_new = b[:-1, :]\n else:\n b_new = sparse.csr_matrix(b[ridx])\n\n data = sparse.hstack([a_new, b_new])\n\n return data, join_idx\n\n\ndef _create_group_matrix(group_idx, dtype='f8'):\n \"\"\"Create a matrix based on groupby index labels.\"\"\"\n if not isinstance(group_idx, pd.Categorical):\n group_idx = pd.Categorical(group_idx, np.unique(group_idx))\n col_idx = group_idx.codes\n row_idx = np.arange(len(col_idx))\n data = np.ones(len(row_idx))\n return sparse.coo_matrix((data, (row_idx, col_idx)),\n shape=(len(group_idx), len(group_idx.categories)),\n dtype=dtype).tocsr()\n\n\ndef sparse_one_hot(df, column=None, categories=None, dtype='f8',\n index_col=None, order=None, prefixes=False, sep='_',\n ignore_cat_order_mismatch=False):\n \"\"\"\n One-hot encode specified columns of a pandas.DataFrame.\n Returns a SparseFrame.\n\n See the documentation of :func:`sparsity.dask.reshape.one_hot_encode`.\n \"\"\"\n if column is not None:\n warnings.warn(\n '`column` argument of sparsity.sparse_frame.sparse_one_hot '\n 'and sparsity.dask.reshape.one_hot_encode functions is deprecated.'\n )\n if order is not None:\n raise ValueError('`order` and `column` arguments cannot be used '\n 'together.')\n categories = {column: categories}\n\n if categories is None:\n categories = df.dtypes\\\n .map(str)\\\n .map(lambda x: None if x == 'category' else False)\\\n .to_dict()\n\n if order is not None:\n assert set(order) == set(categories.keys()), \\\n \"`order` argument specifies different set of columns then \" \\\n \"`categories` argument. \\n\" \\\n \"In `order`: {} \\nIn categories: {}\"\\\n .format(sorted(order), sorted(categories.keys()))\n categories = OrderedDict([(column, categories[column])\n for column in order])\n\n new_cols = []\n csrs = []\n for column, column_cat in categories.items():\n if column_cat is False:\n # this column is skipped - we don't ohe it\n if not np.issubdtype(df.dtypes[column], np.number):\n raise TypeError(f\"Column `{column}` is not of numerical dtype, \"\n f\"but was requested to be included untouched \"\n f\"in a sparse one-hot-encoded frame.\")\n cols = [column]\n csr = sparse.csr_matrix(df[[column]].values)\n else:\n # we normally ohe this column\n if isinstance(column_cat, str):\n column_cat = _just_read_array(column_cat)\n cols, csr = _one_hot_series_csr(\n column_cat, dtype, df[column],\n ignore_cat_order_mismatch=ignore_cat_order_mismatch\n )\n if prefixes:\n column_tmpl = ''.join((column, sep, '{}'))\n cols = list(map(column_tmpl.format, map(str, cols)))\n new_cols.extend(cols)\n csrs.append(csr)\n if len(set(new_cols)) < len(new_cols):\n raise ValueError('Different columns have same categories. This would '\n 'result in duplicated column names. '\n 'Set `prefix` to True to manage this situation.')\n new_data = sparse.hstack(csrs, format='csr')\n\n if not isinstance(index_col, list):\n new_index = df[index_col] if index_col else df.index\n else:\n df = df.reset_index()\n new_index = pd.MultiIndex.from_arrays(df[index_col].values.T)\n return SparseFrame(new_data, index=new_index, columns=new_cols)\n\n\ndef _one_hot_series_csr(categories, dtype, oh_col,\n ignore_cat_order_mismatch=False):\n if types.is_categorical_dtype(oh_col):\n cat = oh_col.cat\n _check_categories_order(cat.categories, categories, oh_col.name,\n ignore_cat_order_mismatch)\n\n else:\n cat = pd.Categorical(oh_col, np.asarray(categories))\n codes = cat.codes\n n_features = len(cat.categories)\n n_samples = codes.size\n mask = codes != -1\n if np.any(~mask):\n raise ValueError(\"Unknown categorical features present \"\n \"during transform: %s.\" % np.unique(oh_col[~mask]))\n row_indices = np.arange(n_samples, dtype=np.int32)\n col_indices = codes\n data = np.ones(row_indices.size)\n data = sparse.coo_matrix((data, (row_indices, col_indices)),\n shape=(n_samples, n_features),\n dtype=dtype).tocsr()\n return cat.categories.values, data\n\n\ndef _check_categories_order(categories1, categories2, categorical_column_name,\n ignore_cat_order_mismatch):\n \"\"\"Check if two lists of categories differ. If they have different\n elements, raise an exception. If they differ only by order of elements,\n raise an exception unless ignore_cat_order_mismatch is set.\"\"\"\n\n if categories2 is None or list(categories2) == list(categories1):\n return\n\n if set(categories2) == set(categories1):\n mismatch_type = 'order'\n else:\n mismatch_type = 'set'\n\n if mismatch_type == 'set' or not ignore_cat_order_mismatch:\n raise ValueError(\n \"Got categorical column {column_name} whose categories \"\n \"{mismatch_type} doesn't match categories {mismatch_type} \"\n \"given as argument to this function.\".format(\n column_name=categorical_column_name,\n mismatch_type=mismatch_type\n )\n )\n\n\nfor _name, _indexer in get_indexers_list():\n SparseFrame._create_indexer(_name, _indexer)\n" ]
[ [ "numpy.array_equal", "scipy.sparse.isspmatrix_csr", "numpy.issubdtype", "pandas.api.types.is_categorical_dtype", "numpy.broadcast_to", "numpy.concatenate", "numpy.empty", "pandas.DataFrame", "numpy.arange", "scipy.sparse.csr_matrix", "scipy.sparse.coo_matrix", "numpy.array", "scipy.sparse.csc_matrix", "pandas.MultiIndex.from_arrays", "numpy.argsort", "numpy.hstack", "pandas.Index", "numpy.isnan", "pandas.core.common._default_index", "numpy.asarray", "numpy.random.RandomState", "scipy.sparse.hstack", "numpy.ones", "scipy.sparse.vstack", "pandas.__version__.split", "numpy.any", "numpy.atleast_1d", "pandas.Series", "numpy.all", "numpy.unique", "pandas.core.indexes.base._ensure_index" ] ]
icannistraci/nerfmm
[ "514ae420f41ba5b6447be853f73e37beca136f85" ]
[ "utils/comp_ray_dir.py" ]
[ "import torch\n\n\ndef comp_ray_dir_cam(H, W, focal):\n \"\"\"Compute ray directions in the camera coordinate, which only depends on intrinsics.\n This could be further transformed to world coordinate later, using camera poses.\n :return: (H, W, 3) torch.float32\n \"\"\"\n y, x = torch.meshgrid(torch.arange(H, dtype=torch.float32),\n torch.arange(W, dtype=torch.float32)) # (H, W)\n\n # Use OpenGL coordinate in 3D:\n # x points to right\n # y points to up\n # z points to backward\n #\n # The coordinate of the top left corner of an image should be (-0.5W, 0.5H, -1.0).\n dirs_x = (x - 0.5*W) / focal # (H, W)\n dirs_y = -(y - 0.5*H) / focal # (H, W)\n dirs_z = -torch.ones(H, W, dtype=torch.float32) # (H, W)\n rays_dir = torch.stack([dirs_x, dirs_y, dirs_z], dim=-1) # (H, W, 3)\n return rays_dir\n\n\ndef comp_ray_dir_cam_fxfy(H, W, fx, fy):\n \"\"\"Compute ray directions in the camera coordinate, which only depends on intrinsics.\n This could be further transformed to world coordinate later, using camera poses.\n :return: (H, W, 3) torch.float32\n \"\"\"\n y, x = torch.meshgrid(torch.arange(H, dtype=torch.float32, device=fx.device),\n torch.arange(W, dtype=torch.float32, device=fx.device)) # (H, W)\n\n # Use OpenGL coordinate in 3D:\n # x points to right\n # y points to up\n # z points to backward\n #\n # The coordinate of the top left corner of an image should be (-0.5W, 0.5H, -1.0).\n dirs_x = (x - 0.5*W) / fx # (H, W)\n dirs_y = -(y - 0.5*H) / fy # (H, W)\n dirs_z = -torch.ones(H, W, dtype=torch.float32, device=fx.device) # (H, W)\n rays_dir = torch.stack([dirs_x, dirs_y, dirs_z], dim=-1) # (H, W, 3)\n return rays_dir" ]
[ [ "torch.stack", "torch.arange", "torch.ones" ] ]
choi-jiwoo/financialdatapy
[ "a744252eab4bce981e3523c1b05dc0a3ddf445a0" ]
[ "financialdatapy/financials.py" ]
[ "\"\"\"This module states abstract class for financial statements.\"\"\"\nfrom abc import ABC, abstractmethod\nimport pandas as pd\nimport string\nfrom financialdatapy.request import Request\nfrom financialdatapy import search\n\n\nclass Financials(ABC):\n \"\"\"Abstract class representing financial statements of a company.\n\n :param symbol: Symbol of a company.\n :type symbol: str\n :param financial: One of the three financial statement.\n 'income_statement' or 'balance_sheet' or 'cash_flow', defaults to\n 'income_statement'.\n :type financial: str, optional\n :param period: Either 'annual' or 'quarter', defaults to 'annual'\n :type period: str, optional\n \"\"\"\n\n def __init__(self, symbol: str, financial: str = 'income_statement',\n period: str = 'annual') -> None:\n \"\"\"Initialize Financials.\"\"\"\n self.symbol = symbol.upper()\n self.financial = financial.lower()\n self.period = period.lower()\n\n @abstractmethod\n def get_financials(self) -> pd.DataFrame:\n pass\n\n @abstractmethod\n def open_report(self) -> None:\n pass\n\n def get_standard_financials(self) -> pd.DataFrame:\n \"\"\"Get standard financial statements of a company from investing.com.\n\n :return: Standard financial statement.\n :rtype: pandas.DataFrame\n \"\"\"\n\n financials = {\n 'income_statement': 'INC',\n 'balance_sheet': 'BAL',\n 'cash_flow': 'CAS',\n }\n periods = {\n 'annual': 'Annual',\n 'quarter': 'Interim',\n }\n symbol_search_result = search.Company(self.symbol)\n pair_id = symbol_search_result.search_pair_id()\n report_type = financials[self.financial]\n period = periods[self.period]\n params = {\n 'action': 'change_report_type',\n 'pair_ID': pair_id,\n 'report_type': report_type,\n 'period_type': period,\n }\n url = ('https://www.investing.com/instruments/Financials/'\n 'changereporttypeajax')\n res = Request(url, params=params)\n data = res.get_text()\n financial_statement = self._convert_to_table(data, report_type)\n\n return financial_statement\n\n def _convert_to_table(self, data: str, report_type: str) -> pd.DataFrame:\n \"\"\"Convert HTML text to a clean dataframe.\n\n :param data: Standard financial statement in HTML text.\n :type data: str\n :return: Standard financial statement.\n :param report_type: INC or BAL or CAS.\n :type report_type: str\n :rtype: pandas.DataFrame\n \"\"\"\n\n data_table = pd.read_html(data, index_col=0)[0]\n\n if report_type == 'CAS':\n data_table = self._convert_table_header(data_table, row_idx=2)\n else:\n data_table = self._convert_table_header(data_table, row_idx=1)\n\n data_table = data_table.replace(r'-$', '0', regex=True)\n\n for i in data_table:\n data_table[i] = pd.to_numeric(data_table[i], errors='coerce')\n\n data_table.dropna(inplace=True)\n\n values_unit = 1_000_000\n data_table = data_table * values_unit\n ignore_word = ['eps', 'dps']\n\n for i in data_table.index:\n for word in ignore_word:\n if word in i.lower():\n data_table.loc[i] /= 1_000_000\n\n data_table.index.rename(None, inplace=True)\n\n return data_table\n\n def _convert_table_header(self, df: pd.DataFrame,\n row_idx: int) -> pd.DataFrame:\n \"\"\"Convert date in string to datetime object.\n\n :param df: Standard financial statement.\n :type df: pd.DataFrame\n :param row_idx: Index number of row containing dates.\n :type row_idx: int\n :return: Standard financial statement with dates as columns.\n :rtype: pd.DataFrame\n \"\"\"\n\n table_header = df.iloc[-row_idx:].values[0]\n table_header = [\n element.translate(str.maketrans('', '', string.punctuation))\n for element\n in table_header\n ]\n table_header = pd.to_datetime(table_header, format='%Y%d%m')\n\n df.columns = table_header\n df = df.iloc[:-row_idx]\n\n return df\n" ]
[ [ "pandas.to_datetime", "pandas.to_numeric", "pandas.read_html" ] ]
aman16011/SML_Project_2019
[ "77f8aaca91aa5505a0183c9ffaa1c9272883f073" ]
[ "code/main.py" ]
[ "import numpy as np\r\nfrom glob import glob\r\nfrom helper_function import *\r\nfrom scipy.signal import welch\r\nfrom sklearn.metrics import accuracy_score\r\nimport matplotlib.pyplot as plt \r\nimport pickle as pkl\r\n\r\n# # Assigning class labels\r\nlabels = {'tongue':0,'foot':1, 'left':2, 'right':3}\r\n# Dataset extraction\r\nX_original,Y = get_data(\"../data/\") # trials X channels X values\r\n\r\n#Create Chunks\r\nX = np.zeros((len(X_original)*3,22,250)) \r\ncount1=0\r\ncount2=0\r\ncount3=0\r\nfor tr in range(len(X_original)):\r\n for ch in range(22):\r\n X[count1,ch,:] = X_original[tr,ch,750:1000]\r\n X[count2,ch,:] = X_original[tr,ch,1000:1250] \r\n X[count3,ch,:] = X_original[tr,ch,1250:1500]\r\n count1+=1\r\n count2+=1\r\n count3+=1\r\n\r\npkl.dump(X,open(r'X.pkl','wb'))\r\n \r\n#Showing PSD of 1st subject 1st trial all 22 channels: \r\nfor i in range(22):\r\n f,psd = welch(X[0,i,:],250)\r\n plt.plot(f,psd)\r\n plt.savefig('PSD before filtering.png')\r\n plt.xlabel('Frequency(Hz)')\r\n plt.ylabel('PSD')\r\n plt.title('Power spectral density (Before filtering) for subject 1 trial 1') \r\nfor l in range(len(Y)):\r\n Y[l] = labels[Y[l]]\r\n\r\n\r\n# Pre-processing\r\nX = preprocess(X)\r\n\r\n# Visualization of filtered signal - how only one frequency band (8-24Hz) remains now.\r\nfor i in range(22):\r\n f,psd = welch(X[0,i,:],250)\r\n plt.plot(f,psd)\r\n plt.savefig('PSD after filtering.png')\r\n plt.xlabel('Frequency(Hz)')\r\n plt.ylabel('PSD')\r\n plt.title('Power spectral density (After filtering) for subject 1 trial 1') \r\n plt.savefig('PSD after filtering.png')\r\n\r\n# Feature extraction\r\n# Average Bandpower features [Mu and Beta band power features 8-24Hz with 2Hz binning- 8 bins per channel]\r\n\r\nX = feature_extraction(X)\r\nprint(X.shape,Y.shape)\r\nnp.save(\"X_train.npy\",X)\r\nnp.save(\"Y_train.npy\",Y)\r\n\r\n#??? Class conditional density visualizations\r\n#??? Reduced Dimension Visualization\r\n\r\nX = np.load(\"X_train.npy\")\r\nY = np.load(\"Y_train.npy\")\r\nsplit = 2\r\n# K- Fold Split\r\n\r\nX_train,Y_train,X_val,Y_val = stratified_K_fold(split,X,Y)\r\nprint(\"splitting done\")\r\n\r\n# Results\r\nget_k_fold_result(X_train,Y_train,X_val,Y_val)\r\n\r\n \r\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.load", "matplotlib.pyplot.title", "numpy.save", "matplotlib.pyplot.ylabel", "scipy.signal.welch" ] ]
mtrebitsch/yt
[ "55c281448695bb9fe3b34920e43fd2f2ada2ad29" ]
[ "yt/visualization/plot_window.py" ]
[ "import abc\nfrom collections import defaultdict\nfrom functools import wraps\nfrom numbers import Number\nfrom typing import List, Optional, Type, Union\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom more_itertools import always_iterable\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom packaging.version import Version\nfrom pyparsing import ParseFatalException\nfrom unyt.exceptions import UnitConversionError\n\nfrom yt._maintenance.deprecation import issue_deprecation_warning\nfrom yt.config import ytcfg\nfrom yt.data_objects.image_array import ImageArray\nfrom yt.frontends.ytdata.data_structures import YTSpatialPlotDataset\nfrom yt.funcs import fix_axis, fix_unitary, is_sequence, iter_fields, mylog, obj_length\nfrom yt.units.unit_object import Unit # type: ignore\nfrom yt.units.unit_registry import UnitParseError # type: ignore\nfrom yt.units.yt_array import YTArray, YTQuantity\nfrom yt.utilities.exceptions import (\n YTCannotParseUnitDisplayName,\n YTDataTypeUnsupported,\n YTInvalidFieldType,\n YTPlotCallbackError,\n YTUnitNotRecognized,\n YTUnsupportedPlotCallback,\n)\nfrom yt.utilities.math_utils import ortho_find\nfrom yt.utilities.orientation import Orientation\n\nfrom ._commons import MPL_VERSION\nfrom .base_plot_types import CallbackWrapper, ImagePlotMPL\nfrom .fixed_resolution import (\n FixedResolutionBuffer,\n OffAxisProjectionFixedResolutionBuffer,\n)\nfrom .geo_plot_utils import get_mpl_transform\nfrom .plot_container import (\n ImagePlotContainer,\n apply_callback,\n get_log_minorticks,\n get_symlog_minorticks,\n invalidate_data,\n invalidate_figure,\n invalidate_plot,\n linear_transform,\n log_transform,\n symlog_transform,\n)\nfrom .plot_modifications import callback_registry\n\nimport sys # isort: skip\n\nif sys.version_info < (3, 10):\n # this function is deprecated in more_itertools\n # because it is superseded by the standard library\n from more_itertools import zip_equal\nelse:\n\n def zip_equal(*args):\n # FUTURE: when only Python 3.10+ is supported,\n # drop this conditional and call the builtin zip\n # function directly where due\n return zip(*args, strict=True)\n\n\ndef get_window_parameters(axis, center, width, ds):\n width = ds.coordinates.sanitize_width(axis, width, None)\n center, display_center = ds.coordinates.sanitize_center(center, axis)\n xax = ds.coordinates.x_axis[axis]\n yax = ds.coordinates.y_axis[axis]\n bounds = (\n display_center[xax] - width[0] / 2,\n display_center[xax] + width[0] / 2,\n display_center[yax] - width[1] / 2,\n display_center[yax] + width[1] / 2,\n )\n return (bounds, center, display_center)\n\n\ndef get_oblique_window_parameters(normal, center, width, ds, depth=None):\n display_center, center = ds.coordinates.sanitize_center(center, 4)\n width = ds.coordinates.sanitize_width(normal, width, depth)\n\n if len(width) == 2:\n # Transforming to the cutting plane coordinate system\n center = (center - ds.domain_left_edge) / ds.domain_width - 0.5\n (normal, perp1, perp2) = ortho_find(normal)\n mat = np.transpose(np.column_stack((perp1, perp2, normal)))\n center = np.dot(mat, center)\n\n w = tuple(el.in_units(\"code_length\") for el in width)\n bounds = tuple(((2 * (i % 2)) - 1) * w[i // 2] / 2 for i in range(len(w) * 2))\n\n return (bounds, center)\n\n\ndef get_axes_unit(width, ds):\n r\"\"\"\n Infers the axes unit names from the input width specification\n \"\"\"\n if ds.no_cgs_equiv_length:\n return (\"code_length\",) * 2\n if is_sequence(width):\n if isinstance(width[1], str):\n axes_unit = (width[1], width[1])\n elif is_sequence(width[1]):\n axes_unit = (width[0][1], width[1][1])\n elif isinstance(width[0], YTArray):\n axes_unit = (str(width[0].units), str(width[1].units))\n else:\n axes_unit = None\n else:\n if isinstance(width, YTArray):\n axes_unit = (str(width.units), str(width.units))\n else:\n axes_unit = None\n return axes_unit\n\n\ndef validate_mesh_fields(data_source, fields):\n # this check doesn't make sense for ytdata plot datasets, which\n # load mesh data as a particle field but nonetheless can still\n # make plots with it\n if isinstance(data_source.ds, YTSpatialPlotDataset):\n return\n canonical_fields = data_source._determine_fields(fields)\n invalid_fields = []\n for field in canonical_fields:\n finfo = data_source.ds.field_info[field]\n if finfo.sampling_type == \"particle\":\n if not hasattr(data_source.ds, \"_sph_ptypes\"):\n pass\n elif finfo.is_sph_field:\n continue\n invalid_fields.append(field)\n\n if len(invalid_fields) > 0:\n raise YTInvalidFieldType(invalid_fields)\n\n\nclass PlotWindow(ImagePlotContainer):\n r\"\"\"\n A plotting mechanism based around the concept of a window into a\n data source. It can have arbitrary fields, each of which will be\n centered on the same viewpoint, but will have individual zlimits.\n\n The data and plot are updated separately, and each can be\n invalidated as the object is modified.\n\n Data is handled by a FixedResolutionBuffer object.\n\n Parameters\n ----------\n\n data_source :\n :class:`yt.data_objects.selection_objects.base_objects.YTSelectionContainer2D`\n This is the source to be pixelized, which can be a projection,\n slice, or a cutting plane.\n bounds : sequence of floats\n Bounds are the min and max in the image plane that we want our\n image to cover. It's in the order of (xmin, xmax, ymin, ymax),\n where the coordinates are all in the appropriate code units.\n buff_size : sequence of ints\n The size of the image to generate.\n antialias : boolean\n This can be true or false. It determines whether or not sub-pixel\n rendering is used during data deposition.\n window_size : float\n The size of the window on the longest axis (in units of inches),\n including the margins but not the colorbar.\n right_handed : boolean\n Whether the implicit east vector for the image generated is set to make a right\n handed coordinate system with a north vector and the normal vector, the\n direction of the 'window' into the data.\n\n \"\"\"\n\n def __init__(\n self,\n data_source,\n bounds,\n buff_size=(800, 800),\n antialias=True,\n periodic=True,\n origin=\"center-window\",\n oblique=False,\n right_handed=True,\n window_size=8.0,\n fields=None,\n fontsize=18,\n aspect=None,\n setup=False,\n ):\n self.center = None\n self._periodic = periodic\n self.oblique = oblique\n self._right_handed = right_handed\n self._equivalencies = defaultdict(lambda: (None, {}))\n self.buff_size = buff_size\n self.antialias = antialias\n self._axes_unit_names = None\n self._transform = None\n self._projection = None\n\n self.aspect = aspect\n skip = list(FixedResolutionBuffer._exclude_fields) + data_source._key_fields\n\n fields = list(iter_fields(fields))\n self.override_fields = list(set(fields).intersection(set(skip)))\n self.fields = [f for f in fields if f not in skip]\n super().__init__(data_source, window_size, fontsize)\n\n self._set_window(bounds) # this automatically updates the data and plot\n self.origin = origin\n if self.data_source.center is not None and not oblique:\n ax = self.data_source.axis\n xax = self.ds.coordinates.x_axis[ax]\n yax = self.ds.coordinates.y_axis[ax]\n center, display_center = self.ds.coordinates.sanitize_center(\n self.data_source.center, ax\n )\n center = [display_center[xax], display_center[yax]]\n self.set_center(center)\n\n axname = self.ds.coordinates.axis_name[ax]\n transform = self.ds.coordinates.data_transform[axname]\n projection = self.ds.coordinates.data_projection[axname]\n self._projection = get_mpl_transform(projection)\n self._transform = get_mpl_transform(transform)\n\n for field in self.data_source._determine_fields(self.fields):\n finfo = self.data_source.ds._get_field_info(*field)\n if finfo.take_log:\n self._field_transform[field] = log_transform\n else:\n self._field_transform[field] = linear_transform\n\n log, linthresh = self._log_config[field]\n if log is not None:\n self.set_log(field, log, linthresh=linthresh)\n\n # Access the dictionary to force the key to be created\n self._units_config[field]\n\n self.setup_callbacks()\n self._setup_plots()\n\n def __iter__(self):\n for ds in self.ts:\n mylog.warning(\"Switching to %s\", ds)\n self._switch_ds(ds)\n yield self\n\n def piter(self, *args, **kwargs):\n for ds in self.ts.piter(*args, **kwargs):\n self._switch_ds(ds)\n yield self\n\n _frb = None\n\n @property\n def frb(self):\n if self._frb is None or not self._data_valid:\n self._recreate_frb()\n return self._frb\n\n @frb.setter\n def frb(self, value):\n self._frb = value\n self._data_valid = True\n\n @frb.deleter\n def frb(self):\n del self._frb\n self._frb = None\n self._data_valid = False\n\n def _recreate_frb(self):\n old_fields = None\n # If we are regenerating an frb, we want to know what fields we had before\n if self._frb is not None:\n old_fields = list(self._frb.keys())\n old_units = [str(self._frb[of].units) for of in old_fields]\n\n # Set the bounds\n if hasattr(self, \"zlim\"):\n bounds = self.xlim + self.ylim + self.zlim\n else:\n bounds = self.xlim + self.ylim\n\n # Generate the FRB\n self.frb = self._frb_generator(\n self.data_source,\n bounds,\n self.buff_size,\n self.antialias,\n periodic=self._periodic,\n )\n\n # At this point the frb has the valid bounds, size, aliasing, etc.\n if old_fields is None:\n self._frb._get_data_source_fields()\n\n # New frb, apply default units (if any)\n for field, field_unit in self._units_config.items():\n if field_unit is None:\n continue\n\n field_unit = Unit(field_unit, registry=self.ds.unit_registry)\n is_projected = getattr(self, \"projected\", False)\n if is_projected:\n # Obtain config\n path_length_units = Unit(\n ytcfg.get_most_specific(\n \"plot\", *field, \"path_length_units\", fallback=\"cm\"\n ),\n registry=self.ds.unit_registry,\n )\n units = field_unit * path_length_units\n else:\n units = field_unit\n try:\n self.frb[field].convert_to_units(units)\n except UnitConversionError:\n msg = (\n \"Could not apply default units from configuration.\\n\"\n \"Tried converting projected field %s from %s to %s, retaining units %s:\\n\"\n \"\\tgot units for field: %s\"\n )\n args = [\n field,\n self.frb[field].units,\n units,\n field_unit,\n units,\n ]\n if is_projected:\n msg += \"\\n\\tgot units for integration length: %s\"\n args += [path_length_units]\n\n msg += \"\\nCheck your configuration file.\"\n\n mylog.error(msg, *args)\n else:\n # Restore the old fields\n for key, units in zip(old_fields, old_units):\n self._frb[key]\n equiv = self._equivalencies[key]\n if equiv[0] is None:\n self._frb[key].convert_to_units(units)\n else:\n self.frb.set_unit(key, units, equiv[0], equiv[1])\n\n # Restore the override fields\n for key in self.override_fields:\n self._frb[key]\n\n @property\n def width(self):\n Wx = self.xlim[1] - self.xlim[0]\n Wy = self.ylim[1] - self.ylim[0]\n return (Wx, Wy)\n\n @property\n def bounds(self):\n return self.xlim + self.ylim\n\n @invalidate_data\n def zoom(self, factor):\n r\"\"\"This zooms the window by *factor* > 0.\n - zoom out with *factor* < 1\n - zoom in with *factor* > 1\n\n Parameters\n ----------\n factor : float\n multiplier for the current width\n\n \"\"\"\n if factor <= 0:\n raise ValueError(\"Only positive zooming factors are meaningful.\")\n Wx, Wy = self.width\n centerx = self.xlim[0] + Wx * 0.5\n centery = self.ylim[0] + Wy * 0.5\n nWx, nWy = Wx / factor, Wy / factor\n self.xlim = (centerx - nWx * 0.5, centerx + nWx * 0.5)\n self.ylim = (centery - nWy * 0.5, centery + nWy * 0.5)\n return self\n\n @invalidate_data\n def pan(self, deltas):\n r\"\"\"Pan the image by specifying absolute code unit coordinate deltas.\n\n Parameters\n ----------\n deltas : Two-element sequence of floats, quantities, or (float, unit)\n tuples.\n\n (delta_x, delta_y). If a unit is not supplied the unit is assumed\n to be code_length.\n\n \"\"\"\n if len(deltas) != 2:\n raise TypeError(\n f\"The pan function accepts a two-element sequence.\\nReceived {deltas}.\"\n )\n if isinstance(deltas[0], Number) and isinstance(deltas[1], Number):\n deltas = (\n self.ds.quan(deltas[0], \"code_length\"),\n self.ds.quan(deltas[1], \"code_length\"),\n )\n elif isinstance(deltas[0], tuple) and isinstance(deltas[1], tuple):\n deltas = (\n self.ds.quan(deltas[0][0], deltas[0][1]),\n self.ds.quan(deltas[1][0], deltas[1][1]),\n )\n elif isinstance(deltas[0], YTQuantity) and isinstance(deltas[1], YTQuantity):\n pass\n else:\n raise TypeError(\n \"The arguments of the pan function must be a sequence of floats,\\n\"\n \"quantities, or (float, unit) tuples. Received %s.\" % (deltas,)\n )\n self.xlim = (self.xlim[0] + deltas[0], self.xlim[1] + deltas[0])\n self.ylim = (self.ylim[0] + deltas[1], self.ylim[1] + deltas[1])\n return self\n\n @invalidate_data\n def pan_rel(self, deltas):\n r\"\"\"Pan the image by specifying relative deltas, to the FOV.\n\n Parameters\n ----------\n deltas : sequence of floats\n (delta_x, delta_y) in *relative* code unit coordinates\n\n \"\"\"\n Wx, Wy = self.width\n self.xlim = (self.xlim[0] + Wx * deltas[0], self.xlim[1] + Wx * deltas[0])\n self.ylim = (self.ylim[0] + Wy * deltas[1], self.ylim[1] + Wy * deltas[1])\n return self\n\n @invalidate_plot\n def set_unit(self, field, new_unit, equivalency=None, equivalency_kwargs=None):\n \"\"\"Sets a new unit for the requested field\n\n parameters\n ----------\n field : string or field tuple\n The name of the field that is to be changed.\n\n new_unit : string or Unit object\n The name of the new unit.\n\n equivalency : string, optional\n If set, the equivalency to use to convert the current units to\n the new requested unit. If None, the unit conversion will be done\n without an equivalency\n\n equivalency_kwargs : string, optional\n Keyword arguments to be passed to the equivalency. Only used if\n ``equivalency`` is set.\n \"\"\"\n if equivalency_kwargs is None:\n equivalency_kwargs = {}\n field = self.data_source._determine_fields(field)[0]\n for f, u in zip_equal(iter_fields(field), always_iterable(new_unit)):\n self.frb.set_unit(f, u, equivalency, equivalency_kwargs)\n self._equivalencies[f] = (equivalency, equivalency_kwargs)\n return self\n\n @invalidate_plot\n def set_origin(self, origin):\n \"\"\"Set the plot origin.\n\n Parameters\n ----------\n origin : string or length 1, 2, or 3 sequence.\n The location of the origin of the plot coordinate system. This\n is typically represented by a '-' separated string or a tuple of\n strings. In the first index the y-location is given by 'lower',\n 'upper', or 'center'. The second index is the x-location, given as\n 'left', 'right', or 'center'. Finally, whether the origin is\n applied in 'domain' space, plot 'window' space or 'native'\n simulation coordinate system is given. For example, both\n 'upper-right-domain' and ['upper', 'right', 'domain'] place the\n origin in the upper right hand corner of domain space. If x or y\n are not given, a value is inferred. For instance, 'left-domain'\n corresponds to the lower-left hand corner of the simulation domain,\n 'center-domain' corresponds to the center of the simulation domain,\n or 'center-window' for the center of the plot window. In the event\n that none of these options place the origin in a desired location,\n a sequence of tuples and a string specifying the\n coordinate space can be given. If plain numeric types are input,\n units of `code_length` are assumed. Further examples:\n\n =============================================== ===============================\n format example\n =============================================== ===============================\n '{space}' 'domain'\n '{xloc}-{space}' 'left-window'\n '{yloc}-{space}' 'upper-domain'\n '{yloc}-{xloc}-{space}' 'lower-right-window'\n ('{space}',) ('window',)\n ('{xloc}', '{space}') ('right', 'domain')\n ('{yloc}', '{space}') ('lower', 'window')\n ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')\n ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')\n (xloc, yloc, '{space}') (0.23, 0.5, 'domain')\n =============================================== ===============================\n \"\"\"\n self.origin = origin\n return self\n\n @invalidate_plot\n @invalidate_figure\n def set_mpl_projection(self, mpl_proj):\n r\"\"\"\n Set the matplotlib projection type with a cartopy transform function\n\n Given a string or a tuple argument, this will project the data onto\n the plot axes with the chosen transform function.\n\n Assumes that the underlying data has a PlateCarree transform type.\n\n To annotate the plot with coastlines or other annotations,\n `_setup_plots()` will need to be called after this function\n to make the axes available for annotation.\n\n Parameters\n ----------\n\n mpl_proj : string or tuple\n if passed as a string, mpl_proj is the specified projection type,\n if passed as a tuple, then tuple will take the form of\n ``(\"ProjectionType\", (args))`` or ``(\"ProjectionType\", (args), {kwargs})``\n Valid projection type options include:\n 'PlateCarree', 'LambertConformal', 'LabmbertCylindrical',\n 'Mercator', 'Miller', 'Mollweide', 'Orthographic',\n 'Robinson', 'Stereographic', 'TransverseMercator',\n 'InterruptedGoodeHomolosine', 'RotatedPole', 'OGSB',\n 'EuroPP', 'Geostationary', 'Gnomonic', 'NorthPolarStereo',\n 'OSNI', 'SouthPolarStereo', 'AlbersEqualArea',\n 'AzimuthalEquidistant', 'Sinusoidal', 'UTM',\n 'NearsidePerspective', 'LambertAzimuthalEqualArea'\n\n Examples\n --------\n\n This will create a Mollweide projection using Mollweide default values\n and annotate it with coastlines\n\n >>> import yt\n >>> ds = yt.load(\"\")\n >>> p = yt.SlicePlot(ds, \"altitude\", \"AIRDENS\")\n >>> p.set_mpl_projection(\"AIRDENS\", \"Mollweide\")\n >>> p._setup_plots()\n >>> p.plots[\"AIRDENS\"].axes.coastlines()\n >>> p.show()\n\n This will move the PlateCarree central longitude to 90 degrees and\n annotate with coastlines.\n\n >>> import yt\n >>> ds = yt.load(\"\")\n >>> p = yt.SlicePlot(ds, \"altitude\", \"AIRDENS\")\n >>> p.set_mpl_projection(\n ... \"AIRDENS\", (\"PlateCarree\", (), {\"central_longitude\": 90, \"globe\": None})\n ... )\n >>> p._setup_plots()\n >>> p.plots[\"AIRDENS\"].axes.set_global()\n >>> p.plots[\"AIRDENS\"].axes.coastlines()\n >>> p.show()\n\n\n This will create a RoatatedPole projection with the unrotated pole\n position at 37.5 degrees latitude and 177.5 degrees longitude by\n passing them in as args.\n\n\n >>> import yt\n >>> ds = yt.load(\"\")\n >>> p = yt.SlicePlot(ds, \"altitude\", \"AIRDENS\")\n >>> p.set_mpl_projection(\"RotatedPole\", (177.5, 37.5))\n >>> p._setup_plots()\n >>> p.plots[\"AIRDENS\"].axes.set_global()\n >>> p.plots[\"AIRDENS\"].axes.coastlines()\n >>> p.show()\n\n This will create a RoatatedPole projection with the unrotated pole\n position at 37.5 degrees latitude and 177.5 degrees longitude by\n passing them in as kwargs.\n\n >>> import yt\n >>> ds = yt.load(\"\")\n >>> p = yt.SlicePlot(ds, \"altitude\", \"AIRDENS\")\n >>> p.set_mpl_projection(\n ... (\"RotatedPole\", (), {\"pole_latitude\": 37.5, \"pole_longitude\": 177.5})\n ... )\n >>> p._setup_plots()\n >>> p.plots[\"AIRDENS\"].axes.set_global()\n >>> p.plots[\"AIRDENS\"].axes.coastlines()\n >>> p.show()\n\n \"\"\"\n\n self._projection = get_mpl_transform(mpl_proj)\n axname = self.ds.coordinates.axis_name[self.data_source.axis]\n transform = self.ds.coordinates.data_transform[axname]\n self._transform = get_mpl_transform(transform)\n return self\n\n @invalidate_data\n def _set_window(self, bounds):\n \"\"\"Set the bounds of the plot window.\n This is normally only called internally, see set_width.\n\n\n Parameters\n ----------\n\n bounds : a four element sequence of floats\n The x and y bounds, in the format (x0, x1, y0, y1)\n\n \"\"\"\n if self.center is not None:\n dx = bounds[1] - bounds[0]\n dy = bounds[3] - bounds[2]\n self.xlim = (self.center[0] - dx / 2.0, self.center[0] + dx / 2.0)\n self.ylim = (self.center[1] - dy / 2.0, self.center[1] + dy / 2.0)\n else:\n self.xlim = tuple(bounds[0:2])\n self.ylim = tuple(bounds[2:4])\n if len(bounds) == 6:\n self.zlim = tuple(bounds[4:6])\n mylog.info(\"xlim = %f %f\", self.xlim[0], self.xlim[1])\n mylog.info(\"ylim = %f %f\", self.ylim[0], self.ylim[1])\n if hasattr(self, \"zlim\"):\n mylog.info(\"zlim = %f %f\", self.zlim[0], self.zlim[1])\n\n @invalidate_data\n def set_width(self, width, unit=None):\n \"\"\"set the width of the plot window\n\n parameters\n ----------\n width : float, array of floats, (float, unit) tuple, or tuple of\n (float, unit) tuples.\n\n Width can have four different formats to support windows with\n variable x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10\n kiloparsecs wide in the x and y directions,\n ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs\n wide along the x axis and 15 kiloparsecs wide along the y axis.\n In the other two examples, code units are assumed, for example\n (0.2, 0.3) requests a plot that has an x width of 0.2 and a y\n width of 0.3 in code units. If units are provided the resulting\n plot axis labels will use the supplied units.\n unit : str\n the unit the width has been specified in. If width is a tuple, this\n argument is ignored. Defaults to code units.\n \"\"\"\n if isinstance(width, Number):\n if unit is None:\n width = (width, \"code_length\")\n else:\n width = (width, fix_unitary(unit))\n\n axes_unit = get_axes_unit(width, self.ds)\n\n width = self.ds.coordinates.sanitize_width(self.frb.axis, width, None)\n\n centerx = (self.xlim[1] + self.xlim[0]) / 2.0\n centery = (self.ylim[1] + self.ylim[0]) / 2.0\n\n self.xlim = (centerx - width[0] / 2, centerx + width[0] / 2)\n self.ylim = (centery - width[1] / 2, centery + width[1] / 2)\n\n if hasattr(self, \"zlim\"):\n centerz = (self.zlim[1] + self.zlim[0]) / 2.0\n mw = self.ds.arr(width).max()\n self.zlim = (centerz - mw / 2.0, centerz + mw / 2.0)\n\n self.set_axes_unit(axes_unit)\n\n return self\n\n @invalidate_data\n def set_center(self, new_center, unit=\"code_length\"):\n \"\"\"Sets a new center for the plot window\n\n parameters\n ----------\n new_center : two element sequence of floats\n The coordinates of the new center of the image in the\n coordinate system defined by the plot axes. If the unit\n keyword is not specified, the coordinates are assumed to\n be in code units.\n\n unit : string\n The name of the unit new_center is given in. If new_center is a\n YTArray or tuple of YTQuantities, this keyword is ignored.\n\n \"\"\"\n error = RuntimeError(\n \"\\n\"\n \"new_center must be a two-element list or tuple of floats \\n\"\n \"corresponding to a coordinate in the plot relative to \\n\"\n \"the plot coordinate system.\\n\"\n )\n if new_center is None:\n self.center = None\n elif is_sequence(new_center):\n if len(new_center) != 2:\n raise error\n for el in new_center:\n if not isinstance(el, Number) and not isinstance(el, YTQuantity):\n raise error\n if isinstance(new_center[0], Number):\n new_center = [self.ds.quan(c, unit) for c in new_center]\n self.center = new_center\n else:\n raise error\n self._set_window(self.bounds)\n return self\n\n @invalidate_data\n def set_antialias(self, aa):\n \"\"\"Turn antialiasing on or off.\n\n parameters\n ----------\n aa : boolean\n \"\"\"\n self.antialias = aa\n\n @invalidate_data\n def set_buff_size(self, size):\n \"\"\"Sets a new buffer size for the fixed resolution buffer\n\n parameters\n ----------\n size : int or two element sequence of ints\n The number of data elements in the buffer on the x and y axes.\n If a scalar is provided, then the buffer is assumed to be square.\n \"\"\"\n if is_sequence(size):\n self.buff_size = size\n else:\n self.buff_size = (size, size)\n return self\n\n @invalidate_plot\n def set_axes_unit(self, unit_name):\n r\"\"\"Set the unit for display on the x and y axes of the image.\n\n Parameters\n ----------\n unit_name : string or two element tuple of strings\n A unit, available for conversion in the dataset, that the\n image extents will be displayed in. If set to None, any previous\n units will be reset. If the unit is None, the default is chosen.\n If unit_name is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name. If unit_name is a tuple, the\n first element is assumed to be the unit for the x axis and the\n second element the unit for the y axis.\n\n Raises\n ------\n YTUnitNotRecognized\n If the unit is not known, this will be raised.\n\n Examples\n --------\n\n >>> from yt import load\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> p = ProjectionPlot(ds, \"y\", \"Density\")\n >>> p.set_axes_unit(\"kpc\")\n\n \"\"\"\n # blind except because it could be in conversion_factors or units\n if unit_name is not None:\n if isinstance(unit_name, str):\n unit_name = (unit_name, unit_name)\n for un in unit_name:\n try:\n self.ds.length_unit.in_units(un)\n except (UnitConversionError, UnitParseError) as e:\n raise YTUnitNotRecognized(un) from e\n self._axes_unit_names = unit_name\n return self\n\n @invalidate_plot\n def toggle_right_handed(self):\n self._right_handed = not self._right_handed\n\n def to_fits_data(self, fields=None, other_keys=None, length_unit=None, **kwargs):\n r\"\"\"Export the fields in this PlotWindow instance\n to a FITSImageData instance.\n\n This will export a set of FITS images of either the fields specified\n or all the fields already in the object.\n\n Parameters\n ----------\n fields : list of strings\n These fields will be pixelized and output. If \"None\", the keys of\n the FRB will be used.\n other_keys : dictionary, optional\n A set of header keys and values to write into the FITS header.\n length_unit : string, optional\n the length units that the coordinates are written in. The default\n is to use the default length unit of the dataset.\n \"\"\"\n return self.frb.to_fits_data(\n fields=fields, other_keys=other_keys, length_unit=length_unit, **kwargs\n )\n\n\nclass PWViewerMPL(PlotWindow):\n \"\"\"Viewer using matplotlib as a backend via the WindowPlotMPL.\"\"\"\n\n _current_field = None\n _frb_generator: Optional[Type[FixedResolutionBuffer]] = None\n _plot_type: Optional[str] = None\n _data_valid = False\n\n def __init__(self, *args, **kwargs):\n if self._frb_generator is None:\n self._frb_generator = kwargs.pop(\"frb_generator\")\n if self._plot_type is None:\n self._plot_type = kwargs.pop(\"plot_type\")\n self._splat_color = kwargs.pop(\"splat_color\", None)\n PlotWindow.__init__(self, *args, **kwargs)\n\n def _setup_origin(self):\n origin = self.origin\n axis_index = self.data_source.axis\n xc = None\n yc = None\n\n if isinstance(origin, str):\n origin = tuple(origin.split(\"-\"))\n\n if len(origin) > 3:\n raise ValueError(\n \"Invalid origin argument with too many elements; \"\n f\"expected 1, 2 or 3 elements, got {self.origin!r}, counting {len(origin)} elements. \"\n \"Use '-' as a separator for string arguments.\"\n )\n\n if len(origin) == 1:\n coord_system = origin[0]\n if coord_system not in (\"window\", \"domain\", \"native\"):\n raise ValueError(\n \"Invalid origin argument. \"\n \"Single element specification must be 'window', 'domain', or 'native'. \"\n f\"Got {self.origin!r}\"\n )\n origin = (\"lower\", \"left\", coord_system)\n\n elif len(origin) == 2:\n err_msg = \"Invalid origin argument. Using 2 elements:\\n\"\n\n if origin[0] in (\"left\", \"right\", \"center\"):\n o0map = {\"left\": \"lower\", \"right\": \"upper\", \"center\": \"center\"}\n origin = (o0map[origin[0]],) + origin\n elif origin[0] in (\"lower\", \"upper\"):\n origin = (origin[0], \"center\", origin[-1])\n else:\n err_msg += \" - the first one must be 'left', 'right', 'lower', 'upper' or 'center'\\n\"\n\n if origin[-1] not in (\"window\", \"domain\", \"native\"):\n err_msg += \" - the second one must be 'window', 'domain', or 'native'\\n\"\n\n if len(err_msg.split(\"\\n\")) > 2:\n err_msg += f\"Got {self.origin!r}\"\n raise ValueError(err_msg)\n\n elif len(origin) == 3:\n err_msg = \"Invalid origin argument. Using 3 elements:\\n\"\n if isinstance(origin[0], (int, float)):\n xc = self.ds.quan(origin[0], \"code_length\")\n elif isinstance(origin[0], tuple):\n xc = self.ds.quan(*origin[0])\n elif origin[0] not in (\"lower\", \"upper\", \"center\"):\n err_msg += \" - the first one must be 'lower', 'upper' or 'center' or a distance\\n\"\n\n if isinstance(origin[1], (int, float)):\n yc = self.ds.quan(origin[1], \"code_length\")\n elif isinstance(origin[1], tuple):\n yc = self.ds.quan(*origin[1])\n elif origin[1] not in (\"left\", \"right\", \"center\"):\n err_msg += \" - the second one must be 'left', 'right', 'center' or a distance\\n\"\n\n if origin[-1] not in (\"window\", \"domain\", \"native\"):\n err_msg += \" - the third one must be 'window', 'domain', or 'native'\\n\"\n\n if len(err_msg.split(\"\\n\")) > 2:\n err_msg += f\"Got {self.origin!r}\"\n raise ValueError(err_msg)\n\n assert not isinstance(origin, str)\n assert len(origin) == 3\n assert origin[2] in (\"window\", \"domain\", \"native\")\n\n if origin[2] == \"window\":\n xllim, xrlim = self.xlim\n yllim, yrlim = self.ylim\n elif origin[2] == \"domain\":\n xax = self.ds.coordinates.x_axis[axis_index]\n yax = self.ds.coordinates.y_axis[axis_index]\n xllim = self.ds.domain_left_edge[xax]\n xrlim = self.ds.domain_right_edge[xax]\n yllim = self.ds.domain_left_edge[yax]\n yrlim = self.ds.domain_right_edge[yax]\n elif origin[2] == \"native\":\n return (self.ds.quan(0.0, \"code_length\"), self.ds.quan(0.0, \"code_length\"))\n\n if xc is None and yc is None:\n assert origin[0] in (\"lower\", \"upper\", \"center\")\n assert origin[1] in (\"left\", \"right\", \"center\")\n\n if origin[0] == \"lower\":\n yc = yllim\n elif origin[0] == \"upper\":\n yc = yrlim\n elif origin[0] == \"center\":\n yc = (yllim + yrlim) / 2.0\n\n if origin[1] == \"left\":\n xc = xllim\n elif origin[1] == \"right\":\n xc = xrlim\n elif origin[1] == \"center\":\n xc = (xllim + xrlim) / 2.0\n\n x_in_bounds = xc >= xllim and xc <= xrlim\n y_in_bounds = yc >= yllim and yc <= yrlim\n\n if not x_in_bounds and not y_in_bounds:\n raise ValueError(\n \"origin inputs not in bounds of specified coordinate system domain; \"\n f\"got {self.origin!r} Bounds are {xllim, xrlim} and {yllim, yrlim} respectively\"\n )\n\n return xc, yc\n\n def _setup_plots(self):\n from matplotlib.mathtext import MathTextParser\n\n if self._plot_valid:\n return\n if not self._data_valid:\n self._recreate_frb()\n self._data_valid = True\n self._colorbar_valid = True\n for f in list(set(self.data_source._determine_fields(self.fields))):\n axis_index = self.data_source.axis\n\n xc, yc = self._setup_origin()\n if self.ds._uses_code_length_unit:\n # this should happen only if the dataset was initialized with\n # argument unit_system=\"code\" or if it's set to have no CGS\n # equivalent. This only needs to happen here in the specific\n # case that we're doing a computationally intense operation\n # like using cartopy, but it prevents crashes in that case.\n (unit_x, unit_y) = (\"code_length\", \"code_length\")\n elif self._axes_unit_names is None:\n unit = self.ds.get_smallest_appropriate_unit(\n self.xlim[1] - self.xlim[0]\n )\n unit_x = unit_y = unit\n coords = self.ds.coordinates\n if hasattr(coords, \"image_units\"):\n # check for special cases defined in\n # non cartesian CoordinateHandler subclasses\n image_units = coords.image_units[coords.axis_id[axis_index]]\n if image_units[0] in (\"deg\", \"rad\"):\n unit_x = \"code_length\"\n elif image_units[0] == 1:\n unit_x = \"dimensionless\"\n if image_units[1] in (\"deg\", \"rad\"):\n unit_y = \"code_length\"\n elif image_units[1] == 1:\n unit_y = \"dimensionless\"\n else:\n (unit_x, unit_y) = self._axes_unit_names\n\n # For some plots we may set aspect by hand, such as for spectral cube data.\n # This will likely be replaced at some point by the coordinate handler\n # setting plot aspect.\n if self.aspect is None:\n self.aspect = float(\n (self.ds.quan(1.0, unit_y) / self.ds.quan(1.0, unit_x)).in_cgs()\n )\n extentx = (self.xlim - xc)[:2]\n extenty = (self.ylim - yc)[:2]\n\n # extentx/y arrays inherit units from xlim and ylim attributes\n # and these attributes are always length even for angular and\n # dimensionless axes so we need to stip out units for consistency\n if unit_x == \"dimensionless\":\n extentx = extentx / extentx.units\n else:\n extentx.convert_to_units(unit_x)\n if unit_y == \"dimensionless\":\n extenty = extenty / extenty.units\n else:\n extenty.convert_to_units(unit_y)\n\n extent = [*extentx, *extenty]\n\n if f in self.plots.keys():\n zlim = (self.plots[f].zmin, self.plots[f].zmax)\n else:\n zlim = (None, None)\n\n image = self.frb[f]\n if self._field_transform[f] == log_transform:\n msg = None\n use_symlog = False\n if zlim != (None, None):\n pass\n elif np.nanmax(image) == np.nanmin(image):\n msg = f\"Plotting {f}: All values = {np.nanmax(image)}\"\n elif np.nanmax(image) <= 0:\n msg = (\n f\"Plotting {f}: All negative values. Max = {np.nanmax(image)}.\"\n )\n use_symlog = True\n elif not np.any(np.isfinite(image)):\n msg = f\"Plotting {f}: All values = NaN.\"\n elif np.nanmax(image) > 0.0 and np.nanmin(image) < 0:\n msg = (\n f\"Plotting {f}: Both positive and negative values. \"\n f\"Min = {np.nanmin(image)}, Max = {np.nanmax(image)}.\"\n )\n use_symlog = True\n elif (\n (Version(\"3.3\") <= MPL_VERSION < Version(\"3.5\"))\n and np.nanmax(image) > 0.0\n and np.nanmin(image) == 0\n ):\n # normally, a LogNorm scaling would still be OK here because\n # LogNorm will mask 0 values when calculating vmin. But\n # due to a bug in matplotlib's imshow, if the data range\n # spans many orders of magnitude while containing zero points\n # vmin can get rescaled to 0, resulting in an error when the image\n # gets drawn. So here we switch to symlog to avoid that until\n # a fix is in -- see PR #3161 and linked issue.\n cutoff_sigdigs = 15\n if (\n np.log10(np.nanmax(image[np.isfinite(image)]))\n - np.log10(np.nanmin(image[image > 0]))\n > cutoff_sigdigs\n ):\n msg = f\"Plotting {f}: Wide range and zeros.\"\n use_symlog = True\n if msg is not None:\n mylog.warning(msg)\n if use_symlog:\n mylog.warning(\"Switching to symlog colorbar scaling.\")\n self._field_transform[f] = symlog_transform\n self._field_transform[f].func = None\n else:\n mylog.warning(\"Switching to linear colorbar scaling.\")\n self._field_transform[f] = linear_transform\n\n font_size = self._font_properties.get_size()\n\n fig = None\n axes = None\n cax = None\n draw_colorbar = True\n draw_axes = True\n draw_frame = draw_axes\n if f in self.plots:\n draw_colorbar = self.plots[f]._draw_colorbar\n draw_axes = self.plots[f]._draw_axes\n draw_frame = self.plots[f]._draw_frame\n if self.plots[f].figure is not None:\n fig = self.plots[f].figure\n axes = self.plots[f].axes\n cax = self.plots[f].cax\n\n # This is for splatting particle positions with a single\n # color instead of a colormap\n if self._splat_color is not None:\n # make image a rgba array, using the splat color\n greyscale_image = self.frb[f]\n ia = np.zeros((greyscale_image.shape[0], greyscale_image.shape[1], 4))\n ia[:, :, 3] = 0.0 # set alpha to 0.0\n locs = greyscale_image > 0.0\n to_rgba = matplotlib.colors.colorConverter.to_rgba\n color_tuple = to_rgba(self._splat_color)\n ia[locs] = color_tuple\n ia = ImageArray(ia)\n else:\n ia = image\n self.plots[f] = WindowPlotMPL(\n ia,\n self._field_transform[f].name,\n self._field_transform[f].func,\n self._colormap_config[f],\n extent,\n zlim,\n self.figure_size,\n font_size,\n self.aspect,\n fig,\n axes,\n cax,\n self._projection,\n self._transform,\n )\n\n if not self._right_handed:\n ax = self.plots[f].axes\n ax.invert_xaxis()\n\n axes_unit_labels = self._get_axes_unit_labels(unit_x, unit_y)\n\n if self.oblique:\n labels = [\n r\"$\\rm{Image\\ x\" + axes_unit_labels[0] + \"}$\",\n r\"$\\rm{Image\\ y\" + axes_unit_labels[1] + \"}$\",\n ]\n else:\n coordinates = self.ds.coordinates\n axis_names = coordinates.image_axis_name[axis_index]\n xax = coordinates.x_axis[axis_index]\n yax = coordinates.y_axis[axis_index]\n\n if hasattr(coordinates, \"axis_default_unit_name\"):\n axes_unit_labels = [\n coordinates.axis_default_unit_name[xax],\n coordinates.axis_default_unit_name[yax],\n ]\n labels = [\n r\"$\\rm{\" + axis_names[0] + axes_unit_labels[0] + r\"}$\",\n r\"$\\rm{\" + axis_names[1] + axes_unit_labels[1] + r\"}$\",\n ]\n\n if hasattr(coordinates, \"axis_field\"):\n if xax in coordinates.axis_field:\n xmin, xmax = coordinates.axis_field[xax](\n 0, self.xlim, self.ylim\n )\n else:\n xmin, xmax = (float(x) for x in extentx)\n if yax in coordinates.axis_field:\n ymin, ymax = coordinates.axis_field[yax](\n 1, self.xlim, self.ylim\n )\n else:\n ymin, ymax = (float(y) for y in extenty)\n self.plots[f].image.set_extent((xmin, xmax, ymin, ymax))\n self.plots[f].axes.set_aspect(\"auto\")\n\n x_label, y_label, colorbar_label = self._get_axes_labels(f)\n\n if x_label is not None:\n labels[0] = x_label\n if y_label is not None:\n labels[1] = y_label\n\n self.plots[f].axes.set_xlabel(labels[0])\n self.plots[f].axes.set_ylabel(labels[1])\n\n color = self._background_color[f]\n\n self.plots[f].axes.set_facecolor(color)\n\n # Determine the units of the data\n units = Unit(self.frb[f].units, registry=self.ds.unit_registry)\n units = units.latex_representation()\n\n if colorbar_label is None:\n colorbar_label = image.info[\"label\"]\n if hasattr(self, \"projected\"):\n colorbar_label = \"$\\\\rm{Projected }$ %s\" % colorbar_label\n if units is None or units == \"\":\n pass\n else:\n colorbar_label += r\"$\\ \\ \\left(\" + units + r\"\\right)$\"\n\n parser = MathTextParser(\"Agg\")\n try:\n parser.parse(colorbar_label)\n except ParseFatalException as err:\n raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err)) from err\n\n self.plots[f].cb.set_label(colorbar_label)\n\n # x-y axes minorticks\n if f not in self._minorticks:\n self._minorticks[f] = True\n if self._minorticks[f]:\n self.plots[f].axes.minorticks_on()\n else:\n self.plots[f].axes.minorticks_off()\n\n # colorbar minorticks\n if f not in self._cbar_minorticks:\n self._cbar_minorticks[f] = True\n\n if self._cbar_minorticks[f]:\n vmin = np.float64(self.plots[f].cb.norm.vmin)\n vmax = np.float64(self.plots[f].cb.norm.vmax)\n\n if self._field_transform[f] == linear_transform:\n self.plots[f].cax.minorticks_on()\n\n elif self._field_transform[f] == symlog_transform:\n if Version(\"3.2.0\") <= MPL_VERSION < Version(\"3.5.0b\"):\n # no known working method to draw symlog minor ticks\n # see https://github.com/yt-project/yt/issues/3535\n pass\n else:\n flinthresh = 10 ** np.floor(\n np.log10(self.plots[f].cb.norm.linthresh)\n )\n mticks = get_symlog_minorticks(flinthresh, vmin, vmax)\n if MPL_VERSION < Version(\"3.5.0b\"):\n # https://github.com/matplotlib/matplotlib/issues/21258\n mticks = self.plots[f].image.norm(mticks)\n self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)\n\n elif self._field_transform[f] == log_transform:\n if MPL_VERSION >= Version(\"3.0.0\"):\n self.plots[f].cax.minorticks_on()\n self.plots[f].cax.xaxis.set_visible(False)\n else:\n mticks = self.plots[f].image.norm(\n get_log_minorticks(vmin, vmax)\n )\n self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)\n\n else:\n mylog.error(\n \"Unable to draw cbar minorticks for field \"\n \"%s with transform %s \",\n f,\n self._field_transform[f],\n )\n self._cbar_minorticks[f] = False\n\n if not self._cbar_minorticks[f]:\n self.plots[f].cax.minorticks_off()\n\n if not draw_axes:\n self.plots[f]._toggle_axes(draw_axes, draw_frame)\n\n if not draw_colorbar:\n self.plots[f]._toggle_colorbar(draw_colorbar)\n\n self._set_font_properties()\n self.run_callbacks()\n self._plot_valid = True\n\n def setup_callbacks(self):\n ignored = [\"PlotCallback\"]\n if self._plot_type.startswith(\"OffAxis\"):\n ignored += [\n \"ParticleCallback\",\n \"ClumpContourCallback\",\n \"GridBoundaryCallback\",\n ]\n if self._plot_type == \"OffAxisProjection\":\n ignored += [\n \"VelocityCallback\",\n \"MagFieldCallback\",\n \"QuiverCallback\",\n \"CuttingQuiverCallback\",\n \"StreamlineCallback\",\n \"LineIntegralConvolutionCallback\",\n ]\n elif self._plot_type == \"Particle\":\n ignored += [\n \"HopCirclesCallback\",\n \"HopParticleCallback\",\n \"ClumpContourCallback\",\n \"GridBoundaryCallback\",\n \"VelocityCallback\",\n \"MagFieldCallback\",\n \"QuiverCallback\",\n \"CuttingQuiverCallback\",\n \"StreamlineCallback\",\n \"ContourCallback\",\n ]\n\n def missing_callback_closure(cbname):\n def _(*args, **kwargs):\n raise YTUnsupportedPlotCallback(\n callback=cbname, plot_type=self._plot_type\n )\n\n return _\n\n for key in callback_registry:\n cbname = callback_registry[key]._type_name\n\n if key in ignored:\n self.__dict__[\"annotate_\" + cbname] = missing_callback_closure(cbname)\n continue\n\n # We need to wrap to create a closure so that\n # CallbackMaker is bound to the wrapped method.\n def closure():\n CallbackMaker = callback_registry[key]\n\n @wraps(CallbackMaker)\n def method(*args, **kwargs):\n # We need to also do it here as \"invalidate_plot\"\n # and \"apply_callback\" require the functions'\n # __name__ in order to work properly\n @wraps(CallbackMaker)\n def cb(self, *a, **kwa):\n # We construct the callback method\n # skipping self\n return CallbackMaker(*a, **kwa)\n\n # Create callback\n cb = invalidate_plot(apply_callback(cb))\n\n return cb(self, *args, **kwargs)\n\n return method\n\n self.__dict__[\"annotate_\" + cbname] = closure()\n\n @invalidate_plot\n def clear_annotations(self, index=None):\n \"\"\"\n Clear callbacks from the plot. If index is not set, clear all\n callbacks. If index is set, clear that index (ie 0 is the first one\n created, 1 is the 2nd one created, -1 is the last one created, etc.)\n \"\"\"\n if index is None:\n self._callbacks = []\n else:\n del self._callbacks[index]\n self.setup_callbacks()\n return self\n\n def list_annotations(self):\n \"\"\"\n List the current callbacks for the plot, along with their index. This\n index can be used with `clear_annotations` to remove a callback from the\n current plot.\n \"\"\"\n for i, cb in enumerate(self._callbacks):\n print(i, cb)\n\n def run_callbacks(self):\n for f in self.fields:\n keys = self.frb.keys()\n for name, (args, kwargs) in self._callbacks:\n cbw = CallbackWrapper(\n self,\n self.plots[f],\n self.frb,\n f,\n self._font_properties,\n self._font_color,\n )\n CallbackMaker = callback_registry[name]\n callback = CallbackMaker(*args[1:], **kwargs)\n try:\n callback(cbw)\n except YTDataTypeUnsupported as e:\n raise e\n except Exception as e:\n raise YTPlotCallbackError(callback._type_name) from e\n for key in self.frb.keys():\n if key not in keys:\n del self.frb[key]\n\n def export_to_mpl_figure(\n self,\n nrows_ncols,\n axes_pad=1.0,\n label_mode=\"L\",\n cbar_location=\"right\",\n cbar_size=\"5%\",\n cbar_mode=\"each\",\n cbar_pad=\"0%\",\n ):\n r\"\"\"\n Creates a matplotlib figure object with the specified axes arrangement,\n nrows_ncols, and maps the underlying figures to the matplotlib axes.\n Note that all of these parameters are fed directly to the matplotlib ImageGrid\n class to create the new figure layout.\n\n Parameters\n ----------\n\n nrows_ncols : tuple\n the number of rows and columns of the axis grid (e.g., nrows_ncols=(2,2,))\n axes_pad : float\n padding between axes in inches\n label_mode : one of \"L\", \"1\", \"all\"\n arrangement of axes that are labeled\n cbar_location : one of \"left\", \"right\", \"bottom\", \"top\"\n where to place the colorbar\n cbar_size : string (percentage)\n scaling of the colorbar (e.g., \"5%\")\n cbar_mode : one of \"each\", \"single\", \"edge\", None\n how to represent the colorbar\n cbar_pad : string (percentage)\n padding between the axis and colorbar (e.g. \"5%\")\n\n Returns\n -------\n\n The return is a matplotlib figure object.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load_sample(\"IsolatedGalaxy\")\n >>> fields = [\"density\", \"velocity_x\", \"velocity_y\", \"velocity_magnitude\"]\n >>> p = yt.SlicePlot(ds, \"z\", fields)\n >>> p.set_log(\"velocity_x\", False)\n >>> p.set_log(\"velocity_y\", False)\n >>> fig = p.export_to_mpl_figure((2, 2))\n >>> fig.tight_layout()\n >>> fig.savefig(\"test.png\")\n\n \"\"\"\n\n fig = plt.figure()\n grid = ImageGrid(\n fig,\n 111,\n nrows_ncols=nrows_ncols,\n axes_pad=axes_pad,\n label_mode=label_mode,\n cbar_location=cbar_location,\n cbar_size=cbar_size,\n cbar_mode=cbar_mode,\n cbar_pad=cbar_pad,\n )\n\n fields = self.fields\n if len(fields) > len(grid):\n raise IndexError(\"not enough axes for the number of fields\")\n\n for i, f in enumerate(self.fields):\n plot = self.plots[f]\n plot.figure = fig\n plot.axes = grid[i].axes\n plot.cax = grid.cbar_axes[i]\n\n self._setup_plots()\n\n return fig\n\n\nclass NormalPlot(abc.ABC):\n \"\"\"This is the abstraction for SlicePlot and ProjectionPlot, where\n we define the common sanitizing mechanism for user input (normal direction).\n \"\"\"\n\n @staticmethod\n def sanitize_normal_vector(ds, normal) -> Union[str, np.ndarray]:\n \"\"\"Return the name of a cartesian axis whener possible,\n or a 3-element 1D ndarray of float64 in any other valid case.\n Fail with a descriptive error message otherwise.\n \"\"\"\n axis_names = ds.coordinates.axis_order\n\n if isinstance(normal, str):\n if normal not in axis_names:\n names_str = \", \".join(f\"'{name}'\" for name in axis_names)\n raise ValueError(\n f\"'{normal}' is not a valid axis name. Expected one of {names_str}.\"\n )\n return normal\n\n if isinstance(normal, int):\n if normal not in (0, 1, 2):\n raise ValueError(\n f\"{normal} is not a valid axis identifier. Expected either 0, 1, or 2.\"\n )\n return axis_names[normal]\n\n if not is_sequence(normal):\n raise TypeError(\n f\"{normal} is not a valid normal vector identifier. \"\n \"Expected a string, integer or sequence of 3 floats.\"\n )\n\n if len(normal) != 3:\n raise ValueError(\n f\"{normal} with length {len(normal)} is not a valid normal vector. \"\n \"Expected a 3-element sequence.\"\n )\n\n try:\n retv = np.array(normal, dtype=\"float64\")\n if retv.shape != (3,):\n raise ValueError(f\"{normal} is incorrectly shaped.\")\n except ValueError as exc:\n raise TypeError(f\"{normal} is not a valid normal vector.\") from exc\n\n nonzero_idx = np.nonzero(retv)[0]\n if len(nonzero_idx) == 0:\n raise ValueError(f\"A null vector {normal} isn't a valid normal vector.\")\n if len(nonzero_idx) == 1:\n return axis_names[nonzero_idx[0]]\n\n return retv\n\n @staticmethod\n def _validate_init_args(*, normal, axis, fields) -> None:\n # TODO: remove this method in yt 4.2\n\n if axis is not None:\n issue_deprecation_warning(\n \"Argument 'axis' is a deprecated alias for 'normal'.\",\n since=\"4.1.0\",\n removal=\"4.2.0\",\n )\n if normal is not None:\n raise TypeError(\"Received incompatible arguments 'axis' and 'normal'\")\n normal = axis\n\n if normal is fields is None:\n raise TypeError(\n \"missing 2 required positional arguments: 'normal' and 'fields'\"\n )\n\n if fields is None:\n raise TypeError(\"missing required positional argument: 'fields'\")\n\n if normal is None:\n raise TypeError(\"missing required positional argument: 'normal'\")\n\n return normal\n\n\nclass SlicePlot(NormalPlot):\n r\"\"\"\n A dispatch class for :class:`yt.visualization.plot_window.AxisAlignedSlicePlot`\n and :class:`yt.visualization.plot_window.OffAxisSlicePlot` objects. This\n essentially allows for a single entry point to both types of slice plots,\n the distinction being determined by the specified normal vector to the\n projection.\n\n The returned plot object can be updated using one of the many helper\n functions defined in PlotWindow.\n\n Parameters\n ----------\n\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : int, str, or 3-element sequence of floats\n This specifies the normal vector to the slice.\n Valid int values are 0, 1 and 2. Coresponding str values depend on the\n geometry of the dataset and are generally given by `ds.coordinates.axis_order`.\n E.g. in cartesian they are 'x', 'y' and 'z'.\n An arbitrary normal vector may be specified as a 3-element sequence of floats.\n\n This returns a :class:`OffAxisSlicePlot` object or a\n :class:`AxisAlignedSlicePlot` object, depending on wether the requested\n normal directions corresponds to a natural axis of the dataset's geometry.\n\n fields : a (or a list of) 2-tuple of strings (ftype, fname)\n The name of the field(s) to be plotted.\n\n The following are nominally keyword arguments passed onto the respective\n slice plot objects generated by this function.\n\n Keyword Arguments\n -----------------\n\n center : A sequence floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set to\n 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\") or\n (\"max\",\"dark_matter_density\"). Units can be specified by passing in *center*\n as a tuple containing a coordinate and string unit name or by passing\n in a YTArray. If a list or unitless array is supplied, code units are\n assumed.\n width : tuple or a float.\n Width can have four different formats to support windows with variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a\n window that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) requests a plot that has an\n x width of 0.2 and a y width of 0.3 in code units. If units are\n provided the resulting plot axis labels will use the supplied units.\n axes_unit : string\n The name of the unit for the tick labels on the x and y axes.\n Defaults to None, which automatically picks an appropriate unit.\n If axes_unit is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name.\n origin : string or length 1, 2, or 3 sequence.\n The location of the origin of the plot coordinate system for\n `AxisAlignedSlicePlot` object; for `OffAxisSlicePlot` objects this\n parameter is discarded. This is typically represented by a '-'\n separated string or a tuple of strings. In the first index the\n y-location is given by 'lower', 'upper', or 'center'. The second index\n is the x-location, given as 'left', 'right', or 'center'. Finally, the\n whether the origin is applied in 'domain' space, plot 'window' space or\n 'native' simulation coordinate system is given. For example, both\n 'upper-right-domain' and ['upper', 'right', 'domain'] place the\n origin in the upper right hand corner of domain space. If x or y\n are not given, a value is inferred. For instance, 'left-domain'\n corresponds to the lower-left hand corner of the simulation domain,\n 'center-domain' corresponds to the center of the simulation domain,\n or 'center-window' for the center of the plot window. In the event\n that none of these options place the origin in a desired location,\n a sequence of tuples and a string specifying the\n coordinate space can be given. If plain numeric types are input,\n units of `code_length` are assumed. Further examples:\n\n =============================================== ===============================\n format example\n =============================================== ===============================\n '{space}' 'domain'\n '{xloc}-{space}' 'left-window'\n '{yloc}-{space}' 'upper-domain'\n '{yloc}-{xloc}-{space}' 'lower-right-window'\n ('{space}',) ('window',)\n ('{xloc}', '{space}') ('right', 'domain')\n ('{yloc}', '{space}') ('lower', 'window')\n ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')\n ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')\n (xloc, yloc, '{space}') (0.23, 0.5, 'domain')\n =============================================== ===============================\n north_vector : a sequence of floats\n A vector defining the 'up' direction in the `OffAxisSlicePlot`; not\n used in `AxisAlignedSlicePlot`. This option sets the orientation of the\n slicing plane. If not set, an arbitrary grid-aligned north-vector is\n chosen.\n fontsize : integer\n The size of the fonts for the axis, colorbar, and tick labels.\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source : YTSelectionContainer Object\n Object to be used for data selection. Defaults to a region covering\n the entire simulation.\n\n Raises\n ------\n\n ValueError or TypeError\n If `normal` cannot be interpreted as a valid normal direction.\n\n Examples\n --------\n\n >>> from yt import load\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> slc = SlicePlot(ds, \"x\", (\"gas\", \"density\"), center=[0.2, 0.3, 0.4])\n\n >>> slc = SlicePlot(\n ... ds, [0.4, 0.2, -0.1], (\"gas\", \"pressure\"), north_vector=[0.2, -0.3, 0.1]\n ... )\n\n \"\"\"\n\n # ignoring type check here, because mypy doesn't allow __new__ methods to\n # return instances of subclasses. The design we use here is however based\n # on the pathlib.Path class from the standard library\n # https://github.com/python/mypy/issues/1020\n def __new__( # type: ignore\n cls, ds, normal=None, fields=None, *args, axis=None, **kwargs\n ) -> Union[\"AxisAlignedSlicePlot\", \"OffAxisSlicePlot\"]:\n # TODO: in yt 4.2, remove default values for normal and fields, drop axis kwarg\n normal = cls._validate_init_args(normal=normal, axis=axis, fields=fields)\n\n if cls is SlicePlot:\n normal = cls.sanitize_normal_vector(ds, normal)\n if isinstance(normal, str):\n cls = AxisAlignedSlicePlot\n else:\n cls = OffAxisSlicePlot\n self = object.__new__(cls)\n return self\n\n\nclass ProjectionPlot(NormalPlot):\n r\"\"\"\n A dispatch class for :class:`yt.visualization.plot_window.AxisAlignedProjectionPlot`\n and :class:`yt.visualization.plot_window.OffAxisProjectionPlot` objects. This\n essentially allows for a single entry point to both types of projection plots,\n the distinction being determined by the specified normal vector to the\n slice.\n\n The returned plot object can be updated using one of the many helper\n functions defined in PlotWindow.\n\n Parameters\n ----------\n\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : int, str, or 3-element sequence of floats\n This specifies the normal vector to the slice.\n Valid int values are 0, 1 and 2. Coresponding str values depend on the\n geometry of the dataset and are generally given by `ds.coordinates.axis_order`.\n E.g. in cartesian they are 'x', 'y' and 'z'.\n An arbitrary normal vector may be specified as a 3-element sequence of floats.\n\n This function will return a :class:`OffAxisProjectionPlot` object or a\n :class:`AxisAlignedProjectionPlot` object, depending on wether the requested\n normal directions corresponds to a natural axis of the dataset's geometry.\n\n fields : a (or a list of) 2-tuple of strings (ftype, fname)\n The name of the field(s) to be plotted.\n\n\n Any additional positional and keyword arguments are passed down to the appropriate\n return class. See :class:`yt.visualization.plot_window.AxisAlignedProjectionPlot`\n and :class:`yt.visualization.plot_window.OffAxisProjectionPlot`.\n\n Raises\n ------\n\n ValueError or TypeError\n If `normal` cannot be interpreted as a valid normal direction.\n\n \"\"\"\n\n # ignoring type check here, because mypy doesn't allow __new__ methods to\n # return instances of subclasses. The design we use here is however based\n # on the pathlib.Path class from the standard library\n # https://github.com/python/mypy/issues/1020\n def __new__( # type: ignore\n cls, ds, normal=None, fields=None, *args, axis=None, **kwargs\n ) -> Union[\"AxisAlignedProjectionPlot\", \"OffAxisProjectionPlot\"]:\n # TODO: in yt 4.2, remove default values for normal and fields, drop axis kwarg\n normal = cls._validate_init_args(normal=normal, axis=axis, fields=fields)\n\n if cls is ProjectionPlot:\n normal = cls.sanitize_normal_vector(ds, normal)\n if isinstance(normal, str):\n cls = AxisAlignedProjectionPlot\n else:\n cls = OffAxisProjectionPlot\n self = object.__new__(cls)\n return self\n\n\nclass AxisAlignedSlicePlot(SlicePlot, PWViewerMPL):\n r\"\"\"Creates a slice plot from a dataset\n\n Given a ds object, an axis to slice along, and a field name\n string, this will return a PWViewerMPL object containing\n the plot.\n\n The plot can be updated using one of the many helper functions\n defined in PlotWindow.\n\n Parameters\n ----------\n ds : `Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : int or one of 'x', 'y', 'z'\n An int corresponding to the axis to slice along (0=x, 1=y, 2=z)\n or the axis name itself\n fields : string\n The name of the field(s) to be plotted.\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set to\n 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\") or\n (\"max\",\"dark_matter_density\"). Units can be specified by passing in *center*\n as a tuple containing a coordinate and string unit name or by passing\n in a YTArray. If a list or unitless array is supplied, code units are\n assumed.\n width : tuple or a float.\n Width can have four different formats to support windows with variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a\n window that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) requests a plot that has an\n x width of 0.2 and a y width of 0.3 in code units. If units are\n provided the resulting plot axis labels will use the supplied units.\n origin : string or length 1, 2, or 3 sequence.\n The location of the origin of the plot coordinate system. This\n is typically represented by a '-' separated string or a tuple of\n strings. In the first index the y-location is given by 'lower',\n 'upper', or 'center'. The second index is the x-location, given as\n 'left', 'right', or 'center'. Finally, whether the origin is\n applied in 'domain' space, plot 'window' space or 'native'\n simulation coordinate system is given. For example, both\n 'upper-right-domain' and ['upper', 'right', 'domain'] place the\n origin in the upper right hand corner of domain space. If x or y\n are not given, a value is inferred. For instance, 'left-domain'\n corresponds to the lower-left hand corner of the simulation domain,\n 'center-domain' corresponds to the center of the simulation domain,\n or 'center-window' for the center of the plot window. In the event\n that none of these options place the origin in a desired location,\n a sequence of tuples and a string specifying the\n coordinate space can be given. If plain numeric types are input,\n units of `code_length` are assumed. Further examples:\n\n =============================================== ===============================\n format example\n =============================================== ===============================\n '{space}' 'domain'\n '{xloc}-{space}' 'left-window'\n '{yloc}-{space}' 'upper-domain'\n '{yloc}-{xloc}-{space}' 'lower-right-window'\n ('{space}',) ('window',)\n ('{xloc}', '{space}') ('right', 'domain')\n ('{yloc}', '{space}') ('lower', 'window')\n ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')\n ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')\n (xloc, yloc, '{space}') (0.23, 0.5, 'domain')\n =============================================== ===============================\n axes_unit : string\n The name of the unit for the tick labels on the x and y axes.\n Defaults to None, which automatically picks an appropriate unit.\n If axes_unit is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name.\n right_handed : boolean\n Whether the implicit east vector for the image generated is set to make a right\n handed coordinate system with a normal vector, the direction of the\n 'window' into the data.\n fontsize : integer\n The size of the fonts for the axis, colorbar, and tick labels.\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: YTSelectionContainer object\n Object to be used for data selection. Defaults to ds.all_data(), a\n region covering the full domain\n buff_size: length 2 sequence\n Size of the buffer to use for the image, i.e. the number of resolution elements\n used. Effectively sets a resolution limit to the image if buff_size is\n smaller than the finest gridding.\n\n Examples\n --------\n\n This will save an image in the file 'sliceplot_Density.png'\n\n >>> from yt import load\n >>> ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> p = SlicePlot(ds, 2, \"density\", \"c\", (20, \"kpc\"))\n >>> p.save(\"sliceplot\")\n\n \"\"\"\n _plot_type = \"Slice\"\n _frb_generator = FixedResolutionBuffer\n\n def __init__(\n self,\n ds,\n normal=None,\n fields=None,\n center=\"c\",\n width=None,\n axes_unit=None,\n origin=\"center-window\",\n right_handed=True,\n fontsize=18,\n field_parameters=None,\n window_size=8.0,\n aspect=None,\n data_source=None,\n buff_size=(800, 800),\n *,\n north_vector=None,\n axis=None,\n ):\n # TODO: in yt 4.2, remove default values for normal and fields, drop axis kwarg\n if north_vector is not None:\n # this kwarg exists only for symmetry reasons with OffAxisSlicePlot\n mylog.warning(\n \"Ignoring 'north_vector' keyword as it is ill-defined for \"\n \"an AxisAlignedSlicePlot object.\"\n )\n del north_vector\n\n normal = self._validate_init_args(\n normal=normal,\n axis=axis,\n fields=fields,\n )\n normal = self.sanitize_normal_vector(ds, normal)\n # this will handle time series data and controllers\n axis = fix_axis(normal, ds)\n (bounds, center, display_center) = get_window_parameters(\n axis, center, width, ds\n )\n if field_parameters is None:\n field_parameters = {}\n\n if ds.geometry in (\n \"spherical\",\n \"cylindrical\",\n \"geographic\",\n \"internal_geographic\",\n ):\n mylog.info(\"Setting origin='native' for %s geometry.\", ds.geometry)\n origin = \"native\"\n\n if isinstance(ds, YTSpatialPlotDataset):\n slc = ds.all_data()\n slc.axis = axis\n if slc.axis != ds.parameters[\"axis\"]:\n raise RuntimeError(f\"Original slice axis is {ds.parameters['axis']}.\")\n else:\n slc = ds.slice(\n axis,\n center[axis],\n field_parameters=field_parameters,\n center=center,\n data_source=data_source,\n )\n slc.get_data(fields)\n validate_mesh_fields(slc, fields)\n PWViewerMPL.__init__(\n self,\n slc,\n bounds,\n origin=origin,\n fontsize=fontsize,\n fields=fields,\n window_size=window_size,\n aspect=aspect,\n right_handed=right_handed,\n buff_size=buff_size,\n )\n if axes_unit is None:\n axes_unit = get_axes_unit(width, ds)\n self.set_axes_unit(axes_unit)\n\n\nclass AxisAlignedProjectionPlot(ProjectionPlot, PWViewerMPL):\n r\"\"\"Creates a projection plot from a dataset\n\n Given a ds object, an axis to project along, and a field name\n string, this will return a PWViewerMPL object containing\n the plot.\n\n The plot can be updated using one of the many helper functions\n defined in PlotWindow.\n\n Parameters\n ----------\n ds : `Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : int or one of 'x', 'y', 'z'\n An int corresponding to the axis to slice along (0=x, 1=y, 2=z)\n or the axis name itself\n fields : string\n The name of the field(s) to be plotted.\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set to\n 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\") or\n (\"max\",\"dark_matter_density\"). Units can be specified by passing in *center*\n as a tuple containing a coordinate and string unit name or by passing\n in a YTArray. If a list or unitless array is supplied, code units are\n assumed.\n width : tuple or a float.\n Width can have four different formats to support windows with variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a\n window that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) requests a plot that has an\n x width of 0.2 and a y width of 0.3 in code units. If units are\n provided the resulting plot axis labels will use the supplied units.\n axes_unit : string\n The name of the unit for the tick labels on the x and y axes.\n Defaults to None, which automatically picks an appropriate unit.\n If axes_unit is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name.\n origin : string or length 1, 2, or 3 sequence.\n The location of the origin of the plot coordinate system. This\n is typically represented by a '-' separated string or a tuple of\n strings. In the first index the y-location is given by 'lower',\n 'upper', or 'center'. The second index is the x-location, given as\n 'left', 'right', or 'center'. Finally, whether the origin is\n applied in 'domain' space, plot 'window' space or 'native'\n simulation coordinate system is given. For example, both\n 'upper-right-domain' and ['upper', 'right', 'domain'] place the\n origin in the upper right hand corner of domain space. If x or y\n are not given, a value is inferred. For instance, 'left-domain'\n corresponds to the lower-left hand corner of the simulation domain,\n 'center-domain' corresponds to the center of the simulation domain,\n or 'center-window' for the center of the plot window. In the event\n that none of these options place the origin in a desired location,\n a sequence of tuples and a string specifying the\n coordinate space can be given. If plain numeric types are input,\n units of `code_length` are assumed. Further examples:\n\n =============================================== ===============================\n format example\n =============================================== ===============================\n '{space}' 'domain'\n '{xloc}-{space}' 'left-window'\n '{yloc}-{space}' 'upper-domain'\n '{yloc}-{xloc}-{space}' 'lower-right-window'\n ('{space}',) ('window',)\n ('{xloc}', '{space}') ('right', 'domain')\n ('{yloc}', '{space}') ('lower', 'window')\n ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')\n ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')\n (xloc, yloc, '{space}') (0.23, 0.5, 'domain')\n =============================================== ===============================\n\n right_handed : boolean\n Whether the implicit east vector for the image generated is set to make a right\n handed coordinate system with the direction of the\n 'window' into the data.\n data_source : YTSelectionContainer Object\n Object to be used for data selection. Defaults to a region covering\n the entire simulation.\n weight_field : string\n The name of the weighting field. Set to None for no weight.\n max_level: int\n The maximum level to project to.\n fontsize : integer\n The size of the fonts for the axis, colorbar, and tick labels.\n method : string\n The method of projection. Valid methods are:\n\n \"integrate\" with no weight_field specified : integrate the requested\n field along the line of sight.\n\n \"integrate\" with a weight_field specified : weight the requested\n field by the weighting field and integrate along the line of sight.\n\n \"mip\" : pick out the maximum value of the field in the line of sight.\n\n \"sum\" : This method is the same as integrate, except that it does not\n multiply by a path length when performing the integration, and is\n just a straight summation of the field along the given axis. WARNING:\n This should only be used for uniform resolution grid datasets, as other\n datasets may result in unphysical images.\n window_size : float\n The size of the window in inches. Set to 8 by default.\n aspect : float\n The aspect ratio of the plot. Set to None for 1.\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: YTSelectionContainer object\n Object to be used for data selection. Defaults to ds.all_data(), a\n region covering the full domain\n buff_size: length 2 sequence\n Size of the buffer to use for the image, i.e. the number of resolution elements\n used. Effectively sets a resolution limit to the image if buff_size is\n smaller than the finest gridding.\n\n Examples\n --------\n\n Create a projection plot with a width of 20 kiloparsecs centered on the\n center of the simulation box:\n\n >>> from yt import load\n >>> ds = load(\"IsolateGalaxygalaxy0030/galaxy0030\")\n >>> p = AxisAlignedProjectionPlot(ds, \"z\", (\"gas\", \"density\"), width=(20, \"kpc\"))\n\n \"\"\"\n _plot_type = \"Projection\"\n _frb_generator = FixedResolutionBuffer\n\n def __init__(\n self,\n ds,\n normal=None,\n fields=None,\n center=\"c\",\n width=None,\n axes_unit=None,\n weight_field=None,\n max_level=None,\n origin=\"center-window\",\n right_handed=True,\n fontsize=18,\n field_parameters=None,\n data_source=None,\n method=\"integrate\",\n window_size=8.0,\n buff_size=(800, 800),\n aspect=None,\n *,\n axis=None,\n ):\n # TODO: in yt 4.2, remove default values for normal and fields, drop axis kwarg\n normal = self._validate_init_args(normal=normal, fields=fields, axis=axis)\n normal = self.sanitize_normal_vector(ds, normal)\n\n axis = fix_axis(normal, ds)\n if ds.geometry in (\n \"spherical\",\n \"cylindrical\",\n \"geographic\",\n \"internal_geographic\",\n ):\n mylog.info(\"Setting origin='native' for %s geometry.\", ds.geometry)\n origin = \"native\"\n # If a non-weighted integral projection, assure field-label reflects that\n if weight_field is None and method == \"integrate\":\n self.projected = True\n (bounds, center, display_center) = get_window_parameters(\n axis, center, width, ds\n )\n if field_parameters is None:\n field_parameters = {}\n\n # We don't use the plot's data source for validation like in the other\n # plotting classes to avoid an exception\n test_data_source = ds.all_data()\n validate_mesh_fields(test_data_source, fields)\n\n if isinstance(ds, YTSpatialPlotDataset):\n proj = ds.all_data()\n proj.axis = axis\n if proj.axis != ds.parameters[\"axis\"]:\n raise RuntimeError(\n f\"Original projection axis is {ds.parameters['axis']}.\"\n )\n if weight_field is not None:\n proj.weight_field = proj._determine_fields(weight_field)[0]\n else:\n proj.weight_field = weight_field\n proj.center = center\n else:\n proj = ds.proj(\n fields,\n axis,\n weight_field=weight_field,\n center=center,\n data_source=data_source,\n field_parameters=field_parameters,\n method=method,\n max_level=max_level,\n )\n PWViewerMPL.__init__(\n self,\n proj,\n bounds,\n fields=fields,\n origin=origin,\n right_handed=right_handed,\n fontsize=fontsize,\n window_size=window_size,\n aspect=aspect,\n buff_size=buff_size,\n )\n if axes_unit is None:\n axes_unit = get_axes_unit(width, ds)\n self.set_axes_unit(axes_unit)\n\n\nclass OffAxisSlicePlot(SlicePlot, PWViewerMPL):\n r\"\"\"Creates an off axis slice plot from a dataset\n\n Given a ds object, a normal vector defining a slicing plane, and\n a field name string, this will return a PWViewerMPL object\n containing the plot.\n\n The plot can be updated using one of the many helper functions\n defined in PlotWindow.\n\n Parameters\n ----------\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : a sequence of floats\n The vector normal to the slicing plane.\n fields : string\n The name of the field(s) to be plotted.\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set to\n 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\") or\n (\"max\",\"dark_matter_density\"). Units can be specified by passing in *center*\n as a tuple containing a coordinate and string unit name or by passing\n in a YTArray. If a list or unitless array is supplied, code units are\n assumed.\n width : tuple or a float.\n Width can have four different formats to support windows with variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a\n window that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) requests a plot that has an\n x width of 0.2 and a y width of 0.3 in code units. If units are\n provided the resulting plot axis labels will use the supplied units.\n axes_unit : string\n The name of the unit for the tick labels on the x and y axes.\n Defaults to None, which automatically picks an appropriate unit.\n If axes_unit is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name.\n north_vector : a sequence of floats\n A vector defining the 'up' direction in the plot. This\n option sets the orientation of the slicing plane. If not\n set, an arbitrary grid-aligned north-vector is chosen.\n right_handed : boolean\n Whether the implicit east vector for the image generated is set to make a right\n handed coordinate system with the north vector and the normal, the direction of\n the 'window' into the data.\n fontsize : integer\n The size of the fonts for the axis, colorbar, and tick labels.\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source : YTSelectionContainer Object\n Object to be used for data selection. Defaults ds.all_data(), a\n region covering the full domain.\n buff_size: length 2 sequence\n Size of the buffer to use for the image, i.e. the number of resolution elements\n used. Effectively sets a resolution limit to the image if buff_size is\n smaller than the finest gridding.\n \"\"\"\n\n _plot_type = \"OffAxisSlice\"\n _frb_generator = FixedResolutionBuffer\n\n def __init__(\n self,\n ds,\n normal,\n fields,\n center=\"c\",\n width=None,\n axes_unit=None,\n north_vector=None,\n right_handed=True,\n fontsize=18,\n field_parameters=None,\n data_source=None,\n buff_size=(800, 800),\n *,\n origin=None,\n ):\n if origin is not None:\n # this kwarg exists only for symmetry reasons with AxisAlignedSlicePlot\n # in OffAxisSlicePlot, the origin is hardcoded\n mylog.warning(\n \"Ignoring 'origin' keyword as it is ill-defined for \"\n \"an OffAxisSlicePlot object.\"\n )\n del origin\n\n (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, ds)\n if field_parameters is None:\n field_parameters = {}\n\n if isinstance(ds, YTSpatialPlotDataset):\n cutting = ds.all_data()\n cutting.axis = 4\n cutting._inv_mat = ds.parameters[\"_inv_mat\"]\n else:\n cutting = ds.cutting(\n normal,\n center,\n north_vector=north_vector,\n field_parameters=field_parameters,\n data_source=data_source,\n )\n cutting.get_data(fields)\n validate_mesh_fields(cutting, fields)\n # Hard-coding the origin keyword since the other two options\n # aren't well-defined for off-axis data objects\n PWViewerMPL.__init__(\n self,\n cutting,\n bounds,\n fields=fields,\n origin=\"center-window\",\n periodic=False,\n right_handed=right_handed,\n oblique=True,\n fontsize=fontsize,\n buff_size=buff_size,\n )\n if axes_unit is None:\n axes_unit = get_axes_unit(width, ds)\n self.set_axes_unit(axes_unit)\n\n\nclass OffAxisProjectionDummyDataSource:\n _type_name = \"proj\"\n _key_fields: List[str] = []\n\n def __init__(\n self,\n center,\n ds,\n normal_vector,\n width,\n fields,\n interpolated,\n weight=None,\n volume=None,\n no_ghost=False,\n le=None,\n re=None,\n north_vector=None,\n method=\"integrate\",\n data_source=None,\n ):\n self.center = center\n self.ds = ds\n self.axis = 4 # always true for oblique data objects\n self.normal_vector = normal_vector\n self.width = width\n if data_source is None:\n self.dd = ds.all_data()\n else:\n self.dd = data_source\n fields = self.dd._determine_fields(fields)\n self.fields = fields\n self.interpolated = interpolated\n if weight is not None:\n weight = self.dd._determine_fields(weight)[0]\n self.weight_field = weight\n self.volume = volume\n self.no_ghost = no_ghost\n self.le = le\n self.re = re\n self.north_vector = north_vector\n self.method = method\n self.orienter = Orientation(normal_vector, north_vector=north_vector)\n\n def _determine_fields(self, *args):\n return self.dd._determine_fields(*args)\n\n\nclass OffAxisProjectionPlot(ProjectionPlot, PWViewerMPL):\n r\"\"\"Creates an off axis projection plot from a dataset\n\n Given a ds object, a normal vector to project along, and\n a field name string, this will return a PWViewerMPL object\n containing the plot.\n\n The plot can be updated using one of the many helper functions\n defined in PlotWindow.\n\n Parameters\n ----------\n ds : :class:`yt.data_objects.static_output.Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n normal : a sequence of floats\n The vector normal to the slicing plane.\n fields : string\n The name of the field(s) to be plotted.\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set to\n 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\") or\n (\"max\",\"dark_matter_density\"). Units can be specified by passing in *center*\n as a tuple containing a coordinate and string unit name or by passing\n in a YTArray. If a list or unitless array is supplied, code units are\n assumed.\n width : tuple or a float.\n Width can have four different formats to support windows with variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a\n window that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) requests a plot that has an\n x width of 0.2 and a y width of 0.3 in code units. If units are\n provided the resulting plot axis labels will use the supplied units.\n depth : A tuple or a float\n A tuple containing the depth to project through and the string\n key of the unit: (width, 'unit'). If set to a float, code units\n are assumed\n weight_field : string\n The name of the weighting field. Set to None for no weight.\n max_level: int\n The maximum level to project to.\n axes_unit : string\n The name of the unit for the tick labels on the x and y axes.\n Defaults to None, which automatically picks an appropriate unit.\n If axes_unit is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name.\n north_vector : a sequence of floats\n A vector defining the 'up' direction in the plot. This\n option sets the orientation of the slicing plane. If not\n set, an arbitrary grid-aligned north-vector is chosen.\n right_handed : boolean\n Whether the implicit east vector for the image generated is set to make a right\n handed coordinate system with the north vector and the normal, the direction of\n the 'window' into the data.\n fontsize : integer\n The size of the fonts for the axis, colorbar, and tick labels.\n method : string\n The method of projection. Valid methods are:\n\n \"integrate\" with no weight_field specified : integrate the requested\n field along the line of sight.\n\n \"integrate\" with a weight_field specified : weight the requested\n field by the weighting field and integrate along the line of sight.\n\n \"sum\" : This method is the same as integrate, except that it does not\n multiply by a path length when performing the integration, and is\n just a straight summation of the field along the given axis. WARNING:\n This should only be used for uniform resolution grid datasets, as other\n datasets may result in unphysical images.\n data_source: YTSelectionContainer object\n Object to be used for data selection. Defaults to ds.all_data(), a\n region covering the full domain\n buff_size: length 2 sequence\n Size of the buffer to use for the image, i.e. the number of resolution elements\n used. Effectively sets a resolution limit to the image if buff_size is\n smaller than the finest gridding.\n \"\"\"\n _plot_type = \"OffAxisProjection\"\n _frb_generator = OffAxisProjectionFixedResolutionBuffer\n\n def __init__(\n self,\n ds,\n normal,\n fields,\n center=\"c\",\n width=None,\n depth=(1, \"1\"),\n axes_unit=None,\n weight_field=None,\n max_level=None,\n north_vector=None,\n right_handed=True,\n volume=None,\n no_ghost=False,\n le=None,\n re=None,\n interpolated=False,\n fontsize=18,\n method=\"integrate\",\n data_source=None,\n buff_size=(800, 800),\n ):\n (bounds, center_rot) = get_oblique_window_parameters(\n normal, center, width, ds, depth=depth\n )\n fields = list(iter_fields(fields))[:]\n oap_width = ds.arr(\n (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])\n )\n OffAxisProj = OffAxisProjectionDummyDataSource(\n center_rot,\n ds,\n normal,\n oap_width,\n fields,\n interpolated,\n weight=weight_field,\n volume=volume,\n no_ghost=no_ghost,\n le=le,\n re=re,\n north_vector=north_vector,\n method=method,\n data_source=data_source,\n )\n\n validate_mesh_fields(OffAxisProj, fields)\n\n if max_level is not None:\n OffAxisProj.dd.max_level = max_level\n\n # If a non-weighted, integral projection, assure field label\n # reflects that\n if weight_field is None and OffAxisProj.method == \"integrate\":\n self.projected = True\n\n # Hard-coding the origin keyword since the other two options\n # aren't well-defined for off-axis data objects\n PWViewerMPL.__init__(\n self,\n OffAxisProj,\n bounds,\n fields=fields,\n origin=\"center-window\",\n periodic=False,\n oblique=True,\n right_handed=right_handed,\n fontsize=fontsize,\n buff_size=buff_size,\n )\n if axes_unit is None:\n axes_unit = get_axes_unit(width, ds)\n self.set_axes_unit(axes_unit)\n\n\nclass WindowPlotMPL(ImagePlotMPL):\n \"\"\"A container for a single PlotWindow matplotlib figure and axes\"\"\"\n\n def __init__(\n self,\n data,\n cbname,\n cblinthresh,\n cmap,\n extent,\n zlim,\n figure_size,\n fontsize,\n aspect,\n figure,\n axes,\n cax,\n mpl_proj,\n mpl_transform,\n ):\n from matplotlib.ticker import ScalarFormatter\n\n self._draw_colorbar = True\n self._draw_axes = True\n self._draw_frame = True\n self._fontsize = fontsize\n self._figure_size = figure_size\n self._projection = mpl_proj\n self._transform = mpl_transform\n\n # Compute layout\n fontscale = float(fontsize) / 18.0\n if fontscale < 1.0:\n fontscale = np.sqrt(fontscale)\n\n if is_sequence(figure_size):\n fsize = figure_size[0]\n else:\n fsize = figure_size\n self._cb_size = 0.0375 * fsize\n self._ax_text_size = [1.2 * fontscale, 0.9 * fontscale]\n self._top_buff_size = 0.30 * fontscale\n self._aspect = ((extent[1] - extent[0]) / (extent[3] - extent[2])).in_cgs()\n self._unit_aspect = aspect\n\n size, axrect, caxrect = self._get_best_layout()\n\n super().__init__(size, axrect, caxrect, zlim, figure, axes, cax)\n\n self._init_image(data, cbname, cblinthresh, cmap, extent, aspect)\n\n # In matplotlib 2.1 and newer we'll be able to do this using\n # self.image.axes.ticklabel_format\n # See https://github.com/matplotlib/matplotlib/pull/6337\n formatter = ScalarFormatter(useMathText=True)\n formatter.set_scientific(True)\n formatter.set_powerlimits((-2, 3))\n self.image.axes.xaxis.set_major_formatter(formatter)\n self.image.axes.yaxis.set_major_formatter(formatter)\n if cbname == \"linear\":\n self.cb.formatter.set_scientific(True)\n try:\n self.cb.formatter.set_useMathText(True)\n except AttributeError:\n # this is only available in mpl > 2.1\n pass\n self.cb.formatter.set_powerlimits((-2, 3))\n self.cb.update_ticks()\n\n def _create_axes(self, axrect):\n self.axes = self.figure.add_axes(axrect, projection=self._projection)\n\n\ndef plot_2d(\n ds,\n fields,\n center=\"c\",\n width=None,\n axes_unit=None,\n origin=\"center-window\",\n fontsize=18,\n field_parameters=None,\n window_size=8.0,\n aspect=None,\n data_source=None,\n):\n r\"\"\"Creates a plot of a 2D dataset\n\n Given a ds object and a field name string, this will return a\n PWViewerMPL object containing the plot.\n\n The plot can be updated using one of the many helper functions\n defined in PlotWindow.\n\n Parameters\n ----------\n ds : `Dataset`\n This is the dataset object corresponding to the\n simulation output to be plotted.\n fields : string\n The name of the field(s) to be plotted.\n center : A sequence of floats, a string, or a tuple.\n The coordinate of the center of the image. If set to 'c', 'center' or\n left blank, the plot is centered on the middle of the domain. If set to\n 'max' or 'm', the center will be located at the maximum of the\n ('gas', 'density') field. Centering on the max or min of a specific\n field is supported by providing a tuple such as (\"min\",\"temperature\") or\n (\"max\",\"dark_matter_density\"). Units can be specified by passing in *center*\n as a tuple containing a coordinate and string unit name or by passing\n in a YTArray. If a list or unitless array is supplied, code units are\n assumed. For plot_2d, this keyword accepts a coordinate in two dimensions.\n width : tuple or a float.\n Width can have four different formats to support windows with variable\n x and y widths. They are:\n\n ================================== =======================\n format example\n ================================== =======================\n (float, string) (10,'kpc')\n ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))\n float 0.2\n (float, float) (0.2, 0.3)\n ================================== =======================\n\n For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs\n wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a\n window that is 10 kiloparsecs wide along the x axis and 15\n kiloparsecs wide along the y axis. In the other two examples, code\n units are assumed, for example (0.2, 0.3) requests a plot that has an\n x width of 0.2 and a y width of 0.3 in code units. If units are\n provided the resulting plot axis labels will use the supplied units.\n origin : string or length 1, 2, or 3 sequence.\n The location of the origin of the plot coordinate system. This\n is typically represented by a '-' separated string or a tuple of\n strings. In the first index the y-location is given by 'lower',\n 'upper', or 'center'. The second index is the x-location, given as\n 'left', 'right', or 'center'. Finally, whether the origin is\n applied in 'domain' space, plot 'window' space or 'native'\n simulation coordinate system is given. For example, both\n 'upper-right-domain' and ['upper', 'right', 'domain'] place the\n origin in the upper right hand corner of domain space. If x or y\n are not given, a value is inferred. For instance, 'left-domain'\n corresponds to the lower-left hand corner of the simulation domain,\n 'center-domain' corresponds to the center of the simulation domain,\n or 'center-window' for the center of the plot window. In the event\n that none of these options place the origin in a desired location,\n a sequence of tuples and a string specifying the\n coordinate space can be given. If plain numeric types are input,\n units of `code_length` are assumed. Further examples:\n\n =============================================== ===============================\n format example\n =============================================== ===============================\n '{space}' 'domain'\n '{xloc}-{space}' 'left-window'\n '{yloc}-{space}' 'upper-domain'\n '{yloc}-{xloc}-{space}' 'lower-right-window'\n ('{space}',) ('window',)\n ('{xloc}', '{space}') ('right', 'domain')\n ('{yloc}', '{space}') ('lower', 'window')\n ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')\n ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')\n (xloc, yloc, '{space}') (0.23, 0.5, 'domain')\n =============================================== ===============================\n axes_unit : string\n The name of the unit for the tick labels on the x and y axes.\n Defaults to None, which automatically picks an appropriate unit.\n If axes_unit is '1', 'u', or 'unitary', it will not display the\n units, and only show the axes name.\n fontsize : integer\n The size of the fonts for the axis, colorbar, and tick labels.\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: YTSelectionContainer object\n Object to be used for data selection. Defaults to ds.all_data(), a\n region covering the full domain\n \"\"\"\n if ds.dimensionality != 2:\n raise RuntimeError(\"plot_2d only plots 2D datasets!\")\n if ds.geometry in [\"cartesian\", \"polar\", \"spectral_cube\"]:\n axis = \"z\"\n elif ds.geometry == \"cylindrical\":\n axis = \"theta\"\n elif ds.geometry == \"spherical\":\n axis = \"phi\"\n else:\n raise NotImplementedError(\n f\"plot_2d does not yet support datasets with {ds.geometry} geometries\"\n )\n # Part of the convenience of plot_2d is to eliminate the use of the\n # superfluous coordinate, so we do that also with the center argument\n if not isinstance(center, str) and obj_length(center) == 2:\n c0_string = isinstance(center[0], str)\n c1_string = isinstance(center[1], str)\n if not c0_string and not c1_string:\n if obj_length(center[0]) == 2 and c1_string:\n center = ds.arr(center[0], center[1])\n elif not isinstance(center, YTArray):\n center = ds.arr(center, \"code_length\")\n center.convert_to_units(\"code_length\")\n center = ds.arr([center[0], center[1], ds.domain_center[2]])\n return AxisAlignedSlicePlot(\n ds,\n axis,\n fields,\n center=center,\n width=width,\n axes_unit=axes_unit,\n origin=origin,\n fontsize=fontsize,\n field_parameters=field_parameters,\n window_size=window_size,\n aspect=aspect,\n data_source=data_source,\n )\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros", "matplotlib.mathtext.MathTextParser", "numpy.nonzero", "matplotlib.pyplot.figure", "numpy.float64", "numpy.nanmin", "numpy.sqrt", "numpy.isfinite", "numpy.log10", "matplotlib.ticker.ScalarFormatter", "numpy.column_stack", "numpy.nanmax" ] ]
adolgert/PyGSPN
[ "e8918357bf80d9f20f682ce39179a58b2378114e" ]
[ "gspn/distributions.py" ]
[ "import logging\nimport numpy as np\nimport scipy.stats\n\nlogger=logging.getLogger(__file__)\n\n\ndef anderson_sample_tester(distribution, now, cnt, rng):\n \"\"\"\n This checks whether hazard_integral and implicit_hazard_integral\n work by using them to sample a distribution.\n \"\"\"\n samples=np.zeros(cnt)\n maxdiff=0.0\n for i in range(cnt):\n interval=-np.log(rng.uniform(0, 1))\n last_mod=now\n firing_time=distribution.implicit_hazard_integral(interval, last_mod)\n next_fire=firing_time\n stops=np.linspace(now, firing_time, num=rng.randint(0,5))\n for stop in stops[:-1]:\n time_penalty=distribution.hazard_integral(last_mod, stop)\n interval-=time_penalty\n next_fire=distribution.implicit_hazard_integral(interval, stop)\n last_mod=stop\n maxdiff=max(abs(next_fire-firing_time), maxdiff)\n samples[i]=next_fire\n return samples\n\n\nclass ExponentialDistribution(object):\n \"\"\"\n This represents an exponential distribution.\n .. math::\n\n F(t) = 1-e^{-\\int_0^t \\lambda(s) ds}\n \"\"\"\n def __init__(self, lam, te):\n self.lam=lam\n self.te=te\n\n def sample(self, now, rng):\n return now+rng.exponential(scale=1.0/self.lam)\n\n def hazard_integral(self, t0, t1):\n return self.lam*(t1-t0)\n\n def implicit_hazard_integral(self, xa, t0):\n return t0+xa/self.lam\n\n def loglikelihood(self, t0, tf):\n return -self.lam*(tf-t0)\n\n def enabling_time(self):\n return self.te\n\n\nclass WeibullDistribution(object):\n \"\"\"\n This is a Weibull distribution.\n \"\"\"\n def __init__(self, lam, k, te, shift):\n self.lam=lam\n self.k=k\n self.te=te\n self.delta=shift\n\n def sample(self, now, rng):\n logger.debug(\"WeibullDistribution.sample l={0}, k={1}, te={2}\".format(\n self.lam, self.k, self.te))\n U=rng.uniform(0, 1)\n l=self.lam\n k=self.k\n d=now - (self.te - self.delta)\n value=0\n if d>0:\n value=l*np.power(-np.log(1-U) + np.power(d/l, k), 1/k)+self.te\n else:\n value=l*np.power(-np.log(1-U), 1/k)+self.te\n return now + value\n\n def hazard_integral(self, t0, t1):\n logger.debug(\"WeibullDistribution.hazard l={0}, k={1}, te={2}\".format(\n self.lam, self.k, self.te))\n return ( np.power((t1-self.te)/self.lam, self.k)\n -np.power((t0-self.te)/self.lam, self.k) )\n\n def implicit_hazard_integral(self, xa, t0):\n t1=t0 + self.lam * xa**(1/self.k)\n logger.debug((\"WeibullDistribution.implicit l={0}, k={1}, te={2} \"+\n \"xa={3} t0={4} t1={5}\").format(\n self.lam, self.k, self.te, xa, t0, t1))\n return t1\n\n def enabling_time(self):\n return self.te\n\n\nclass GammaDistribution(object):\n \"\"\"\n This is a gamma distribution with a shape and a rate,\n not a shape and a scale.\n Given a Gamma function,\n .. math::\n\n \\Gamma(t)=\\int_0^\\infty x^{t-1}e^{-x}dx\n\n and the (lower) incomplete gamma function,\n .. math::\n\n \\gamma(x;\\alpha)=\\int_0^x t^{\\alpha-1}e^{-t}dt\n\n the CDF of the gamma distribution is\n .. math::\n\n F(x;\\alpha, \\beta)=\\frac{\\gamma(\\alpha, \\beta x)}{\\Gamma(\\alpha)}\n\n The PDF is\n .. math::\n\n f(x)=\\frac{\\beta^{\\alpha}}{\\Gamma(\\alpha)}x^{\\alpha-1}e^{-\\beta x}\n\n This is sampled with possible left censoring. \n \"\"\"\n def __init__(self, alpha, beta, te):\n self.alpha=alpha\n self.beta=beta\n self.te=te\n\n def sample(self, now, rng):\n \"\"\"\n Sampling accounts for time shift and uses given random\n number generator.\n \"\"\"\n U=rng.uniform(low=0, high=1, size=1)\n d=now-self.te\n if d>0:\n cumulative=scipy.stats.gamma.cdf(x=d, a=self.alpha,\n scale=1.0/self.beta, loc=0)\n return scipy.stats.gamma.isf(1-U*(1-cumulative)-cumulative,\n self.alpha, scale=1.0/self.beta, loc=0) + self.te\n else:\n return scipy.stats.gamma.isf(1-U,\n self.alpha, scale=1.0/self.beta, loc=0) + self.te\n\n def hazard_integral(self, t0, t1):\n \"\"\"\n Our tools include\n\n - gammainc(a, x), normalized lower incomplete\n - gammaincinv(a, y), gammainc(a, x)=y\n - gammaincc(a, x), 1-gammainc(a, x)\n - gammainccinv(a, y), gammaincc(a, x)=y\n \"\"\"\n return np.log(\n (1-scipy.special.gammainc(self.alpha, self.beta*(t0-self.te)))/\n (1-scipy.special.gammainc(self.alpha, self.beta*(t1-self.te)))\n )\n\n def implicit_hazard_integral(self, xa, t0):\n quad=1-np.exp(-xa)*(1-scipy.special.gammainc(self.alpha,\n self.beta*(t0-self.te)))\n return self.te+scipy.special.gammaincinv(self.alpha, quad)/self.beta\n\n def loglikelihood(self, t0, tf):\n t0e=t0-self.te\n logf=(np.log(np.power(self.beta, self.alpha)*\n np.power(t0e, self.alpha-1))\n -self.beta*t0e-scipy.special.gammaln(self.alpha))\n integrated=self.hazard_integral(self.te, t0)\n return logf + integrated\n\n def enabling_time(self):\n return self.te\n\n\n\nclass UniformDistribution(object):\n \"\"\"\n Uniform distribution between a and b, offset by an enabling time te.\n \"\"\"\n def __init__(self, a, b, te):\n \"\"\"\n te is an absolute time.\n The earliest firing time is >te+a.\n The latest firing time is <=te+b.\n \"\"\"\n self.a=a\n self.b=b\n self.te=te\n\n def sample(self, now, rng):\n \"\"\"\n Sampling accounts for time shift and uses given random\n number generator.\n \"\"\"\n if now<=self.te+self.a:\n return rng.uniform(low=self.te+self.a, high=self.te+self.b)\n elif now<=self.te+self.b:\n return rng.uniform(low=now, high=self.te+self.b)\n else:\n return np.float(\"nan\")\n\n def hazard_integral(self, t0, t1):\n \"\"\"\n Integrate the hazard, taking into account when the uniform\n interval starts and stops.\n \"\"\"\n t0e=t0-self.te\n t1e=t1-self.te\n if t0e>self.b:\n return 0\n if t1e<self.a:\n return 0\n low=max(self.a, t0e)\n high=min(self.b, t1e)\n numerator=np.log(1-(t0e-self.a)/(self.b-self.a))\n denominator=np.log((self.b-t1e)/(self.b-self.a))\n return numerator-denominator\n\n def implicit_hazard_integral(self, xa, t0):\n Ft=1-np.exp(-xa)\n t0e=t0-self.te\n low=max(t0e, self.a)\n r=self.te+low*(1-Ft) + self.b*Ft\n return r\n\n def loglikelihood(self, t0, tf):\n t0e=t0-self.te\n tfe=tf-self.te\n if tfe<self.a or te>=self.b:\n return np.double(\"nan\")\n t0e=max(t0e, self.a)\n ln_pdf=np.log((tf-self.a)/(self.b-self.a))\n int_hazard=self.hazard_integral(self.te, t0)\n return ln_pdf + int_hazard\n\n def enabling_time(self):\n return self.te\n\n# LogLogistic\n# Gaussian\n# Histogram\n\n\nclass PiecewiseLinearDistribution(object):\n \"\"\"\n This is a piecewise linear hazard, not a piecewise linear probability.\n Whatever is the last point is treated as a horizontal line to infinity.\n \"\"\"\n def __init__(self, times, hazards, enabling_time):\n assert(times[0]<1e-6)\n self.b=np.array(times, dtype=np.double)\n self.w=np.array(hazards, dtype=np.double)\n if self.b[-1]<float(\"inf\"):\n self.b=np.hstack([self.b, float(\"inf\")])\n self.w=np.hstack([self.w, self.w[-1]])\n self.te=enabling_time\n self.wcopy=np.copy(self.w)\n\n def sample(self, now, rng):\n \"\"\"\n Sampling accounts for time shift and uses given random\n number generator.\n \"\"\"\n raise RuntimeError(\"Cannot sample this yet.\")\n\n def hazard_integral(self, t0, t1):\n \"\"\"\n Integrate the hazard, taking into account when the uniform\n interval starts and stops.\n \"\"\"\n t0e=t0-self.te\n t1e=t1-self.te\n if t1e<self.b[0]:\n return 0\n # Take the array of hazards at times and chop it off\n # on the left at t0e.\n b, w=(np.copy(self.b), np.copy(self.w))\n left=np.where(b<=t0e)[0][-1]\n b=b[left:]\n w=w[left:]\n b[0]=t0e\n # Chop off the right after t1e.\n right=np.where(b<t1e)[0][-1]\n b=b[:(right+1)]\n w=w[:(right+1)]\n # Add a point back for the end point.\n b=np.hstack([ b, np.array([t1e])])\n w=np.hstack([ w, np.array([w[-1]])])\n\n total=0.0\n for idx in range(b.shape[0]-1):\n db=b[idx+1]-b[idx]\n total+=0.5*db*(w[idx+1]+w[idx])\n assert(np.all(self.w==self.wcopy))\n return total\n\n\n def implicit_hazard_integral(self, xa, t0):\n t0e=t0-self.te\n # Take the array of hazards at times and chop it off\n # on the left at t0e.\n b, w=(np.copy(self.b), np.copy(self.w))\n left=np.where(b<=t0e)[0][-1]\n b=b[left:]\n w=w[left:]\n b[0]=t0e\n #logger.debug(\"Piecewise bleft {0} {1}\".format(b, w))\n b-=t0e # Now it starts at 0.\n #logger.debug(\"Piecewise bleft {0} {1}\".format(b, w))\n\n idx=0\n t1e=0\n # Each loop removes a chunk from the sum, xa.\n while xa>0:\n # Create a putative right-hand point, given the current\n # slope, and see if it's before the right-hand point\n # that starts the next segment.\n if (w[idx]+w[idx+1])/xa > 1e-20:\n t1e=b[idx]+2*xa/(w[idx]+w[idx+1])\n # The last entry is infinity, so this is safe.\n if t1e<b[idx+1]:\n xa=0\n else:\n xa-=(b[idx+1]-b[idx])*0.5*(w[idx]+w[idx+1])\n else:\n if np.isinf(b[idx+1]):\n raise RuntimeError(\"Could not solve for t0 {0}\".format(\n xa))\n else:\n pass # Go to next entry in table.\n idx+=1\n\n assert(np.all(self.w==self.wcopy))\n return t1e+t0\n\n def loglikelihood(self, t0, tf):\n return None\n \n def enabling_time(self):\n return self.te\n\n \n\nclass PiecewiseConstantDistribution(object):\n \"\"\"\n This is a piecewise linear hazard, not a piecewise linear probability.\n \"\"\"\n def __init__(self, times, hazards, enabling_time):\n assert(times[0]<1e-6)\n self.b=times\n self.w=hazards\n self.te=enabling_time\n self.partial_sum=np.zeros((len(times),), dtype=np.double)\n\n total=0\n for idx in range(len(times)-1):\n self.partial_sum[idx]=total\n total+=0.5*(hazards[idx]+hazards[idx+1])*(times[idx+1]-times[idx])\n self.partial_sum[len(times)-1]=total\n\n def sample(self, now, rng):\n \"\"\"\n Sampling accounts for time shift and uses given random\n number generator.\n \"\"\"\n raise RuntimeError(\"Cannot sample this yet.\")\n\n def hazard_integral(self, t0, t1):\n \"\"\"\n Integrate the hazard, taking into account when the uniform\n interval starts and stops.\n \"\"\"\n t0e=t0-self.te\n t1e=t1-self.te\n if t1e<self.b[0]:\n return 0\n # Take the array of hazards at times and chop it off\n # on the left at t0e.\n b, w=(np.copy(self.b), np.copy(self.w))\n left=np.where(b<=t0e)[0][-1]\n b=b[left:]\n w=w[left:]\n b[0]=t0e\n # Chop off the right after t1e.\n right=np.where(b<t1e)[0][-1]\n b=b[:(right+1)]\n w=w[:(right+1)]\n # Add a point back for the end point.\n b=np.hstack([ b, np.array([t1e])])\n w=np.hstack([ w, np.array([w[-1]])])\n\n total=0.0\n for idx in range(b.shape[0]-1):\n total+=w[idx]*(b[idx+1]-b[idx])\n return total\n\n\n def implicit_hazard_integral(self, xa, t0):\n t0e=t0-self.te\n # Take the array of hazards at times and chop it off\n # on the left at t0e.\n b, w=(np.copy(self.b), np.copy(self.w))\n left=np.where(b<=t0e)[0][-1]\n b=b[left:]\n w=w[left:]\n b[0]=t0e\n #logger.debug(\"Piecewise bleft {0} {1}\".format(b, w))\n b-=t0e # Now it starts at 0.\n #logger.debug(\"Piecewise bleft {0} {1}\".format(b, w))\n\n idx=0\n t1e=0\n while xa>0:\n x0=b[idx]\n if idx<b.shape[0]-1:\n x1=b[idx+1]\n else:\n x1=float(\"inf\")\n if w[idx]>0:\n t1e=x0+xa/w[idx]\n else:\n t1e=float(\"inf\")\n #logger.debug(\"Piecewise midcalc x0 {0} x1 {1} t1e {2}\".format(\n # x0, x1, t1e))\n if t1e<x1:\n xa=0\n else:\n xa-=(x1-x0)*w[idx]\n idx+=1\n\n return t1e+t0\n\n def loglikelihood(self, t0, tf):\n return None\n \n def enabling_time(self):\n return self.te\n\n\n\nclass EmpiricalDistribution(object):\n \"\"\"\n This distribution is used to collect samples and then\n compare the estimate of the sampled distribution with\n some known distribution.\n\n Wikipedia explains this. Kolmogorov-Smirnov test.\n https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test\n \"\"\"\n def __init__(self, samples):\n self.samples=samples\n\n def c_alpha(self):\n \"\"\"\n The hypothesis is that the two distributions are different.\n The null hypothesis is rejected at level alpha if\n Dnn*sqrt(nn/(n+n))>c_alpha. This returns a table of c_alpha.\n The upshot is that you should look for values from the\n two comparison functions that are less than two.\n \"\"\"\n return [[0.1, 1.22],\n [0.05, 1.36],\n [0.025, 1.48],\n [0.01, 1.63],\n [0.005, 1.73],\n [0.001, 1.95]]\n\n def compare_empirical(self, other):\n \"\"\"\n Compare two empirical distributions using Kolmogorov-Smirnov.\n This returns the Kolmogorov-smirnov goodness-of-fit,\n sqrt(n)*D_n, where D_n is the Kolmogorov-smirnov statistic.\n \"\"\"\n self.samples.sort()\n other.samples.sort()\n acnt=len(self.samples)\n aidx=-1\n bcnt=len(other.samples)\n bidx=-1\n maxdiff=0.0\n while aidx+1<acnt and bidx+1<bcnt:\n if bidx+1==bcnt:\n v=self.samples[aidx+1]\n while aidx+1!=acnt and self.samples[aidx+1]==v:\n aidx+=1\n maxdiff=max(maxdiff, abs((aidx+1)/acnt - (bidx+1)/bcnt))\n elif aidx+1==acnt:\n v=other.samples[bidx+1]\n while bidx+1!=bcnt and other.samples[bidx+1]==v:\n bidx+=1\n maxdiff=max(maxdiff, abs((aidx+1)/acnt - (bidx+1)/bcnt))\n elif self.samples[aidx+1]<other.samples[bidx+1]:\n v=self.samples[aidx+1]\n while aidx+1!=acnt and self.samples[aidx+1]==v:\n aidx+=1\n maxdiff=max(maxdiff, abs((aidx+1)/acnt - (bidx+1)/bcnt))\n else:\n v=other.samples[bidx+1]\n while aidx+1!=acnt and self.samples[aidx+1]==v:\n aidx+=1\n while bidx+1!=bcnt and other.samples[bidx+1]==v:\n bidx+=1\n maxdiff=max(maxdiff, abs((aidx+1)/acnt - (bidx+1)/bcnt))\n return maxdiff*np.sqrt(acnt*bcnt/(acnt+bcnt))\n\n def compare_theoretical(self, cdf):\n \"\"\"\n Compare this estimated CDF with a function which returns\n the CDF at a point. Use Kolmogorov-Smirnov to return a\n statistic which should be stable with increasing\n numbers of samples.\n This returns the Kolmogorov-smirnov goodness-of-fit,\n sqrt(n)*D_n, where D_n is the Kolmogorov-smirnov statistic.\n \"\"\"\n self.samples.sort()\n acnt=len(self.samples)\n aidx=-1\n maxdiff=0.0\n while aidx!=acnt:\n v=self.samples[aidx+1]\n while self.samples[aidx+1]==v:\n aidx+=1\n maxdiff=max(maxdiff,\n abs(cdf(self.samples[aidx])-(aidx+1)/acnt))\n return maxdiff*np.sqrt(acnt)\n\n" ]
[ [ "numpy.isinf", "numpy.array", "numpy.log", "numpy.zeros", "numpy.double", "numpy.float", "numpy.copy", "numpy.exp", "numpy.where", "numpy.power", "numpy.sqrt", "numpy.all", "numpy.hstack" ] ]
sydroth/sydirv
[ "2a43aaadffaeee5974780e51e6936a5e9f82f01c" ]
[ ".ipynb_checkpoints/Irv_EDA5-checkpoint.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[2]:\n\n\nbo_mojo = pd.read_csv('unzippedData/bom.movie_gross.csv')\n\n\n# In[3]:\n\n\nthe_numbers = pd.read_csv('unzippedData/tn.movie_budgets.csv')\n\n\n# In[4]:\n\n\n#in the-numbers:\n#convert release date to date format\n#convert dollar figures to int format\n\nthe_numbers['release_date']=pd.to_datetime(the_numbers['release_date'])\nthe_numbers['production_budget'] = the_numbers['production_budget'].str.replace(',', '').str.replace('$', '').astype(int)\nthe_numbers['domestic_gross'] = the_numbers['domestic_gross'].str.replace(',', '').str.replace('$', '').astype(int)\nthe_numbers['worldwide_gross'] = the_numbers['worldwide_gross'].str.replace(',', '').str.replace('$', '').astype(int)\n\n\n# In[5]:\n\n\n#add column for worldwide profitability\nthe_numbers['Worldwide Profitability'] = the_numbers['worldwide_gross']/the_numbers['production_budget']\n\n\n# In[6]:\n\n\n#add column for domestic profitability\nthe_numbers['Domestic Profitability'] = the_numbers['domestic_gross']/the_numbers['production_budget']\n\n\n# In[7]:\n\n\nimdb_title_basics=pd.read_csv('unzippedData/imdb.title.basics.csv') #movie title, year, runtime, genre\n\n\n# In[8]:\n\n\n#in imdb-title-basics:\n#only movies aafter 2009 >>> TURNS OUT THIS WAS UNNECESSARY\nimdb_title_basics=imdb_title_basics[imdb_title_basics['start_year']>2009] \n\n\n# In[9]:\n\n\n#cut out movies less than 75 minutes, more than 4 hours\nimdb_title_basics=imdb_title_basics[~(imdb_title_basics['runtime_minutes']>240) & ~(imdb_title_basics['runtime_minutes']<75)]\n\n\n# In[10]:\n\n\n#drop all duplicate movie titles\nimdb_title_basics = imdb_title_basics.drop_duplicates('primary_title', keep=False)\n\n\n# In[11]:\n\n\n#change imdb-title-basics column name to align with 'the-numbers' data\nimdb_title_basics.rename(columns={'primary_title':'movie'}, inplace=True)\n\n\n# In[12]:\n\n\n#make only one genre entry per column\nimdb_title_basics['Genre 1']=imdb_title_basics['genres'].str.split(',', expand=True)[0]\nimdb_title_basics['Genre 2']=imdb_title_basics['genres'].str.split(',', expand=True)[1]\nimdb_title_basics['Genre 3']=imdb_title_basics['genres'].str.split(',', expand=True)[2]\n\n\n# In[13]:\n\n\n#compile set of genres\ngenres_1 = list(imdb_title_basics['Genre 1'].unique())\ngenres_2 = list(imdb_title_basics['Genre 2'].unique())\ngenres_3 = list(imdb_title_basics['Genre 3'].unique())\ngenre_set = set(genres_1 + genres_2 + genres_3)\n\n\n# In[14]:\n\n\nthe_numbers.set_index('movie', inplace=True)\n\n\n# In[15]:\n\n\nimdb_title_basics.set_index('movie', inplace=True, drop=False)\n\n\n# In[16]:\n\n\n#the-numbers joined with IMDB-title-basics\njoined_df = the_numbers.join(imdb_title_basics, how='left') \n\n\n# In[17]:\n\n\n#READ IN THIRD DATASET: IMDB-TITLE-RATINGS\nimdb_title_ratings=pd.read_csv('unzippedData/imdb.title.ratings.csv') #average rating, number of votes\n\n\n# In[18]:\n\n\n#prepare to combine imdb-title-basics and imdb-title-ratings\nimdb_title_basics.set_index('tconst', inplace=True)\nimdb_title_ratings.set_index('tconst', inplace=True)\n\n\n# In[19]:\n\n\nimdb_combined = imdb_title_ratings.join(imdb_title_basics, how='left') #IMDB-title-ratings + IMDB-title-basics\n#imdb_combined.head()\n\n\n# In[20]:\n\n\n#can now add earnings/budget information from the-numbers\nimdb_combined.set_index('movie', inplace=True)\nimdb_combined.info() #~73000 entries\n#the_numbers.info() #5782 entries\n\n\n# In[21]:\n\n\n#combine the-numbers with the combined IMDB data\nthe_numbers_imdb_combined = the_numbers.join(imdb_combined, how='left')\n\n\n# In[22]:\n\n\n#remove entries that were only from the-numbers and not IMDB\nthe_numbers_imdb_combined2=the_numbers_imdb_combined[the_numbers_imdb_combined['averagerating'].isna()==False]\n#the_numbers_imdb_combined2.info()\n\n\n# In[23]:\n\n\nmain_df = the_numbers_imdb_combined2\n#main_df.head()\n\n\n# In[24]:\n\n\n#drop unnecessary columns\n#IF THE FOLLOWING RETURNS AN ERROR DON'T WORRY ABOUT IT!!!!!\nmain_df = main_df.drop(['original_title', 'start_year', 'genres'], axis=1)\n\n\n# In[25]:\n\n\n#align movie name column in preparation for join\nbo_mojo = bo_mojo.rename(columns={'title':'movie'})\nbo_mojo = bo_mojo.set_index('movie')\n\n\n# In[26]:\n\n\n#preserve title of movie\nbo_mojo['movie']=bo_mojo.index\n\n\n# In[27]:\n\n\n#strip movie of ending year-in-parentheses\nmovie_stripped =[]\nfor x in bo_mojo.movie:\n if '(' in x:\n movie_stripped.append(x[:(x.find('(')-1)])\n else:\n movie_stripped.append(x)\n\nbo_mojo['movie']=movie_stripped\n\n\n# In[28]:\n\n\n#set index as movie without ending year-in-parentheses\nbo_mojo.set_index('movie', inplace=True)\n\n\n# In[29]:\n\n\n#drop \nbo_mojo.drop(columns=['domestic_gross', 'foreign_gross'], axis=1, inplace=True)\n#bo_mojo.head()\n\n\n# In[30]:\n\n\n#combine the previous IMDB-thenumbers with bomojo\nmain_df2=main_df.join(bo_mojo, how='left')\n#main_df2.head()\n\n\n# In[31]:\n\n\nmain_df2.drop(['year'], axis=1, inplace=True)\n#main_df2.head()\n\n\n# In[32]:\n\n\nmain_df3=main_df2\n\n\n# In[33]:\n\n\n#restrict release date to 2010-\n#restrict production budget to >=$1,000,000\nmain_df3 = main_df3[main_df3['release_date'].dt.year>2009]\nmain_df3 = main_df3[main_df3['production_budget']>=1000000]\n\n\n# In[34]:\n\n\n#create new category: no studio given is \"small studio\"\nmain_df3['studio'].fillna(value='small studio', inplace=True)\n\n\n# In[35]:\n\n\n#replace $0 with NaN for worldwide gross\nmain_df3['worldwide_gross'] = main_df3['worldwide_gross'].replace(0,np.nan)\n# s = pd.Series([0, 1, 2, 0, 4])\n# s.replace(0, np.nan)\n\n\n# In[36]:\n\n\n#create list of Top10 biggest studios plus \"small studio\"\nstudio_list=list(main_df3['studio'].value_counts().head(11).index)\nbiggest_studios_df=main_df3[main_df3['studio'].isin(studio_list)]\n\n\n# In[37]:\n\n\n#create plot of profitability by studio\nsns.set(font_scale=3)\nplt.figure(figsize=(15,10))\nresult = biggest_studios_df.groupby([\"studio\"])['Worldwide Profitability'].aggregate(np.mean).reset_index().sort_values('Worldwide Profitability')\nax1 = sns.barplot('studio', 'Worldwide Profitability', data=biggest_studios_df, order=result['studio'], palette=sns.color_palette(\"coolwarm\", len(result)))\nplt.title('Profitability by Studio')\nplt.xlabel('Studio')\nplt.xticks(rotation=45)\nplt.ylabel('Worldwide Profitability')\nplt.show()\n\n\n# In[38]:\n\n\npalette_1 = sns.color_palette(\"coolwarm\", len(result))\nresult['palette'] = palette_1\npalette_df = result.drop(['Worldwide Profitability', 'studio'], axis=1)\n#palette_df\n\n\n# In[39]:\n\n\n#create plot of \nresult = biggest_studios_df.groupby([\"studio\"])['averagerating'].aggregate(np.mean).reset_index().sort_values('averagerating')\nresult=result.join(palette_df)\n\n\n# In[40]:\n\n\nplt.figure(figsize=(15,10))\nax1 = sns.barplot('studio', 'averagerating', data=biggest_studios_df, order=result['studio'], palette=sns.color_palette(list(result['palette'])))\nplt.title('Ratings by Studio')\nplt.xlabel('Studio')\nplt.xticks(rotation=45)\nplt.ylabel('Average Movie Rating')\nplt.show()\n\n\n# In[41]:\n\n\n#create dictionary with values as genres\ngenre_dict={}\nfor i, genre in zip(range(len(genre_set)), genre_set):\n genre_dict[i]=[genre]\n\n\n# In[42]:\n\n\n#append to dictionary values for average ratings and of worldwide profitability\nfor i in range(29):\n genre_df_current = main_df3[(main_df3['Genre 1']==genre_dict[i][0]) | (main_df3['Genre 2']==genre_dict[i][0]) | (main_df3['Genre 3']==genre_dict[i][0])]\n median_average_ratings = genre_df_current['averagerating'].median()\n mean_average_ratings = genre_df_current['averagerating'].mean()\n median_worldwide_profitability = genre_df_current['Worldwide Profitability'].median()\n mean_worldwide_profitability = genre_df_current['Worldwide Profitability'].mean()\n genre_dict[i].append(median_average_ratings)\n genre_dict[i].append(mean_average_ratings)\n genre_dict[i].append(median_worldwide_profitability)\n genre_dict[i].append(mean_worldwide_profitability)\n\n\n# In[43]:\n\n\n#exclude genres with no avilable data\nfor i in range(29):\n if str(genre_dict[i][1])=='nan':\n del genre_dict[i]\n else:\n pass\n\n\n# In[44]:\n\n\n#create dataframe based on genre dictionary\nmain_genre_df = pd.DataFrame.from_dict(genre_dict, orient='index',columns=['Genre', 'Median Average Ratings', 'Mean Average Ratings','Median Worldwide Profitability', 'Mean Worldwide Profitability']) \n\n\n# In[45]:\n\n\n#plot median rating by genre\nplt.figure(figsize=(15,10))\nresult = main_genre_df.sort_values('Median Average Ratings').reset_index()\nax1 = sns.barplot('Genre', 'Median Average Ratings', data=main_genre_df, order=result['Genre'], palette=sns.color_palette(\"coolwarm\", len(result)))\nplt.title('Ratings by Genre')\nplt.xlabel('Genre')\nplt.xticks(rotation=75)\nplt.ylabel('Average Movie Rating')\nplt.show()\n\n\n# In[46]:\n\n\npalette_1 = sns.color_palette(\"coolwarm\", len(result))\nresult['palette'] = palette_1\npalette_df = result.drop(['Median Average Ratings','Mean Average Ratings', 'Median Worldwide Profitability', 'Mean Worldwide Profitability', 'Genre'], axis=1)\npalette_df = palette_df.set_index('index')\n#palette_df\n\n\n# In[47]:\n\n\nresult = main_genre_df.sort_values('Median Worldwide Profitability').reset_index()\nresult = result.set_index('index')\nresult = result.join(palette_df)\n\n\n# In[48]:\n\n\n#plot median profitability by genre\nplt.figure(figsize=(15,10))\nax1 = sns.barplot('Genre', 'Median Worldwide Profitability', data=main_genre_df, order=result['Genre'], palette=sns.color_palette(list(result['palette'])))\nplt.title('Profitability by Genre')\nplt.xlabel('Genre')\nplt.xticks(rotation=75)\nplt.ylabel('Worldwide Profitability')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame.from_dict", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.xticks" ] ]
smerkousdavid/membrane-analysis
[ "f001db2a9ff836e304b8250531ef0d47882d5879" ]
[ "structure/test/fpw_measure.py" ]
[ "import unittest\nfrom analysis import make_fpw_measurements\nfrom tests.statistics import compare_statistics\nimport numpy as np\nimport math\nimport os\nimport cv2\n\n\nPY_PATH = os.path.dirname(os.path.realpath(__file__))\nIMAGE_FOLDER = os.path.join(PY_PATH, 'test_imgs')\nTEST_IMGS = [os.path.join(IMAGE_FOLDER, f).lower() for f in os.listdir(IMAGE_FOLDER) if 'png' in os.path.splitext(f)[1].lower()]\nMAX_DIFF = 0.1 # max difference in distances/pixel numbers\nTHRESHOLD = 50 # min value in image to turn into a binary 1\nWAITKEY = -1\n\n\nclass TestFPWMeasures(unittest.TestCase):\n def _compare(self, file, res, show=False):\n # make sure file exists (unless if it's a numpy array)\n if isinstance(file, (list, tuple, np.ndarray)):\n image = file\n else:\n fpath = os.path.join(IMAGE_FOLDER, file).lower()\n if fpath not in TEST_IMGS:\n raise ValueError('The image file %s does not exist in the test suite!' % file)\n\n image = cv2.imread(fpath, cv2.IMREAD_GRAYSCALE)\n blobs = np.ascontiguousarray(image)\n blob_uint = (blobs > 50).astype(np.uint8)\n\n # get the layers\n membrane = blob_uint[-2]\n slits = blob_uint[-1]\n\n # make the measurements\n fpw = make_fpw_measurements(membrane, slits, draw=show)\n \n # compare the stats\n compare_statistics(fpw.get_arc_stats().json(), res)\n\n # show results\n if show and fpw.has_image():\n cv2.imshow('File %s (draw on)' % file, cv2.resize(fpw.get_image(), (500, 500)))\n cv2.waitKey(WAITKEY)\n\n def ftest_disconnected_pair(self):\n image = np.zeros((300, 300), dtype=np.uint8)\n cv2.line(image, (10, 150), (135, 150), 255, 5)\n cv2.line(image, (160, 150), (290, 150), 255, 5)\n \n points = np.array([\n [20, 150], # left\n [280, 150], # right\n ], dtype=np.int32)\n\n self._compare(\n image,\n points,\n [\n {\n 'mean': 259.8284271247462,\n },\n {}\n ]\n )\n\n def test_drawn_circle(self):\n image = np.zeros((800, 800), dtype=np.uint8)\n cv2.circle(image, (400, 400), 300, 255, 5)\n cv2.circle(image, (400, 700), 5, 0, -1) # cutout bottom for endpoint detection\n\n # circle left to top extent to right extent (so sum should be arc of pi and mean should be arc of pi/2)\n points = np.array([\n [90, 400], # left\n [400, 90], # top\n [710, 400] # right\n ], dtype=np.int32)\n\n # compute pi/2 arc\n arc = 495.90158\n\n slits = np.zeros((800, 800), dtype=np.uint8)\n for point in points:\n p = tuple(point)\n cv2.circle(slits, p, 2, 255, -1)\n\n self._compare(\n [image, slits],\n {\n 'sum': 2.0 * arc,\n 'mean': arc,\n }\n )" ]
[ [ "numpy.ascontiguousarray", "numpy.array", "numpy.zeros" ] ]
PML-UCF/pinn
[ "dcedf25f7154dccd9872df735a19c1f9bcfca50c" ]
[ "samples/cumulative_damage/propagation_walker_model/model.py" ]
[ "# ______ _ _ _ _ _ _ _ \n# | ___ \\ | | | | (_) (_) | | (_) \n# | |_/ / __ ___ | |__ __ _| |__ _| |_ ___| |_ _ ___ \n# | __/ '__/ _ \\| '_ \\ / _` | '_ \\| | | / __| __| |/ __|\n# | | | | | (_) | |_) | (_| | |_) | | | \\__ \\ |_| | (__ \n# \\_| |_| \\___/|_.__/ \\__,_|_.__/|_|_|_|___/\\__|_|\\___|\n# ___ ___ _ _ \n# | \\/ | | | (_) \n# | . . | ___ ___| |__ __ _ _ __ _ ___ ___ \n# | |\\/| |/ _ \\/ __| '_ \\ / _` | '_ \\| |/ __/ __| \n# | | | | __/ (__| | | | (_| | | | | | (__\\__ \\ \n# \\_| |_/\\___|\\___|_| |_|\\__,_|_| |_|_|\\___|___/ \n# _ _ _ \n# | | | | | | \n# | | __ _| |__ ___ _ __ __ _| |_ ___ _ __ _ _ \n# | | / _` | '_ \\ / _ \\| '__/ _` | __/ _ \\| '__| | | |\n# | |___| (_| | |_) | (_) | | | (_| | || (_) | | | |_| |\n# \\_____/\\__,_|_.__/ \\___/|_| \\__,_|\\__\\___/|_| \\__, |\n# __/ |\n# |___/ \n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# MIT License\n# \n# Copyright (c) 2019 Probabilistic Mechanics Laboratory\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ==============================================================================\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Concatenate\n\nfrom pinn.layers import inputsSelection, CumulativeDamageCell\nfrom pinn.layers import StressIntensityRange, WalkerModel\n# Model\ndef create_model(F, alpha, gamma, C0, m , d0RNN, batch_input_shape, input_array, selectdK, selectprop, myDtype, return_sequences = False, unroll = False):\n \n batch_adjusted_shape = (batch_input_shape[2]+1,) #Adding state\n placeHolder = Input(shape=(batch_input_shape[2]+1,)) #Adding state\n \n filterdkLayer = inputsSelection(batch_adjusted_shape, selectdK)(placeHolder)\n \n filterdaLayer = inputsSelection(batch_adjusted_shape, selectprop)(placeHolder)\n \n dk_input_shape = filterdkLayer.get_shape()\n \n dkLayer = StressIntensityRange(input_shape = dk_input_shape, dtype = myDtype)\n dkLayer.build(input_shape = dk_input_shape)\n dkLayer.set_weights([np.asarray([F], dtype = dkLayer.dtype)])\n dkLayer.trainable = False\n dkLayer = dkLayer(filterdkLayer)\n \n wmInput = Concatenate(axis = -1)([dkLayer, filterdaLayer])\n wm_input_shape = wmInput.get_shape()\n \n wmLayer = WalkerModel(input_shape = wm_input_shape, dtype = myDtype)\n wmLayer.build(input_shape = wm_input_shape)\n wmLayer.set_weights([np.asarray([alpha, gamma, C0, m], dtype = wmLayer.dtype)])\n wmLayer.trainable = False\n wmLayer = wmLayer(wmInput)\n\n functionalModel = Model(inputs=[placeHolder], outputs=[wmLayer])\n \"-------------------------------------------------------------------------\"\n CDMCellHybrid = CumulativeDamageCell(model = functionalModel,\n batch_input_shape = batch_input_shape,\n dtype = myDtype,\n initial_damage = d0RNN)\n \n CDMRNNhybrid = tf.keras.layers.RNN(cell = CDMCellHybrid,\n return_sequences = return_sequences,\n return_state = False,\n batch_input_shape = batch_input_shape,\n unroll = unroll)\n \n model = tf.keras.Sequential()\n model.add(CDMRNNhybrid)\n model.compile(loss='mse', optimizer=tf.keras.optimizers.RMSprop(1e-12), metrics=['mae'])\n return model\n" ]
[ [ "numpy.asarray", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.RNN", "tensorflow.keras.models.Model", "tensorflow.keras.Sequential", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.layers.Concatenate" ] ]
ruoqi-liu/DeepChatBot
[ "6e6a61ff6fffc3eee76295c4b25aea85e3b7886a" ]
[ "seq2seq.py" ]
[ "import tensorflow as tf\nimport tensorflow.contrib as contrib\nfrom utils import *\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport random\nfrom sklearn.metrics import mean_absolute_error\n\ndata = unpickle_file('generate_movie_dialogue.pkl')\nunknown_symbol = '*'\nstart_symbol = '^'\nend_symbol = '$'\npadding_symbol = '#'\nword2id, id2word = unpickle_file('vocab.pkl')\n\n\ndef split_dataset():\n train_set, test_set = train_test_split(data, test_size=0.2, random_state=42)\n return train_set, test_set\n\n\ndef generate_vocab():\n word_freq = {}\n unique_set = set()\n word2id = {}\n id2word = {}\n for input, output in data:\n for word in input.split():\n word_freq[word] = word_freq.get(word, 0) + 1\n for word in output.split():\n word_freq[word] = word_freq.get(word, 0) + 1\n for word, freq in word_freq.items():\n if freq > 0:\n unique_set.add(word)\n\n for i, symbol in enumerate('#^$'):\n word2id[symbol] = i\n id2word[i] = symbol\n\n for i, word in enumerate(unique_set):\n word2id[word] = i+3\n id2word[i+3] = word\n\n pickle.dump((word2id, id2word), open('vocab.pkl', 'wb'))\n\n\ndef sentence_to_ids(sentence, word2id, padded_len):\n sentence = sentence.split()\n sent_ids = [word2id.get(word, 0) for word in sentence][:padded_len-1]+[word2id[end_symbol]]\n if len(sent_ids) < padded_len:\n sent_ids += [word2id[padding_symbol]] * (padded_len-len(sent_ids))\n sent_len = min(len(sentence)+1, padded_len)\n\n return sent_ids, sent_len\n\n\ndef ids_to_sentence(ids, id2word):\n return [id2word[i] for i in ids]\n\n\ndef batch_to_ids(sentences, word2id, max_len):\n max_len_in_batch = min(max(len(s.split()) for s in sentences)+1, max_len)\n batch_ids, batch_ids_len = [], []\n for sentence in sentences:\n ids, ids_len = sentence_to_ids(sentence, word2id, max_len_in_batch)\n batch_ids.append(ids)\n batch_ids_len.append(ids_len)\n return batch_ids, batch_ids_len\n\n\ndef generate_batches(samples, batch_size=64):\n X, Y = [], []\n for i, (x,y) in enumerate(samples, 1):\n X.append(x)\n Y.append(y)\n if i % batch_size == 0:\n yield X, Y\n X, Y = [], []\n\n if X and Y:\n yield X, Y\n\n\nclass Seq2SeqModel(object):\n def __declare_placeholders(self):\n self.input_batch = tf.placeholder(shape=(None, None), dtype=tf.int32, name='input_batch')\n self.input_batch_lengths = tf.placeholder(shape=(None, ), dtype=tf.int32, name='input_batch_lengths')\n\n self.ground_truth = tf.placeholder(shape=(None, None), dtype=tf.int32, name='ground_truth')\n self.ground_truth_lengths = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ground_truth_length')\n\n self.dropout_ph = tf.placeholder_with_default(tf.cast(1.0, tf.float32), shape=[])\n self.learning_rate_ph = tf.placeholder(shape=[], dtype=tf.float32)\n\n def __creare_embeddings(self, vocab_size, embeddings_size):\n random_initializer = tf.random_uniform((vocab_size, embeddings_size), -1.0, 1.0)\n self.embeddings = tf.Variable(random_initializer, dtype=tf.float32, name='embeddings')\n self.input_batch_embedded = tf.nn.embedding_lookup(self.embeddings, self.input_batch)\n\n def __build_encoder(self, hidden_size):\n encoder_cell = tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.GRUCell(hidden_size), input_keep_prob=self.dropout_ph)\n self.encoder_outputs, self.final_encoder_state = tf.nn.dynamic_rnn(\n encoder_cell,\n inputs=self.input_batch_embedded,\n sequence_length=self.input_batch_lengths,\n dtype=tf.float32)\n\n def __build_decoder(self, hidden_size, vocab_size, max_iter, start_symbol_id, end_symbol_id):\n batch_size = tf.shape(self.input_batch)[0]\n start_tokens = tf.fill([batch_size], start_symbol_id)\n ground_truth_as_input = tf.concat([tf.expand_dims(start_tokens, 1), self.ground_truth], 1)\n\n self.ground_truth_embedded = tf.nn.embedding_lookup(self.embeddings, ground_truth_as_input)\n\n train_helper = contrib.seq2seq.TrainingHelper(self.ground_truth_embedded, self.ground_truth_lengths)\n\n infer_helper = contrib.seq2seq.GreedyEmbeddingHelper(self.embeddings, start_tokens, end_symbol_id)\n\n def decode(helper, scope, reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n # decoder_cell = tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.GRUCell(hidden_size, reuse=reuse), input_keep_prob=self.dropout_ph)\n # decoder_cell = contrib.rnn.OutputProjectionWrapper(decoder_cell, vocab_size, reuse=reuse)\n # decoder = contrib.seq2seq.BasicDecoder(cell=decoder_cell, helper=helper, initial_state=self.final_encoder_state)\n # outputs, _, _ = contrib.seq2seq.dynamic_decode(decoder=decoder, maximum_iterations=max_iter,\n # output_time_major=False, impute_finished=True)\n\n\n memory = self.encoder_outputs\n attention_mechanism = contrib.seq2seq.BahdanauAttention(\n num_units=hidden_size, memory=memory,\n memory_sequence_length=self.input_batch_lengths)\n decoder_cell = tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.GRUCell(hidden_size, reuse=reuse),\n input_keep_prob=self.dropout_ph)\n attn_cell = contrib.seq2seq.AttentionWrapper(\n decoder_cell, attention_mechanism, attention_layer_size=hidden_size)\n out_cell = contrib.rnn.OutputProjectionWrapper(attn_cell, vocab_size, reuse=reuse)\n decoder = contrib.seq2seq.BasicDecoder(\n cell=out_cell, helper=helper,\n initial_state=out_cell.zero_state(dtype=tf.float32, batch_size=batch_size))\n\n outputs, _, _ = contrib.seq2seq.dynamic_decode(decoder=decoder, maximum_iterations=max_iter,\n output_time_major=False, impute_finished=True)\n\n return outputs\n\n self.train_outputs = decode(train_helper, 'decode')\n self.infer_outputs = decode(infer_helper, 'decode', reuse=True)\n\n def __compute_loss(self):\n weights = tf.cast(tf.sequence_mask(self.ground_truth_lengths), dtype=tf.float32)\n self.loss = contrib.seq2seq.sequence_loss(\n logits=self.train_outputs.rnn_output, targets=self.ground_truth, weights=weights)\n\n def __perform_optimization(self):\n self.train_op = contrib.layers.optimize_loss(\n loss=self.loss,\n global_step=tf.train.get_global_step(),\n learning_rate=self.learning_rate_ph,\n optimizer='Adam',\n clip_gradients=1.0)\n\n def __init__(self, vocab_size, embeddings_size, hidden_size, max_iter, start_symbol_id, end_symbol_id, padding_symbol_id):\n self.__declare_placeholders()\n self.__creare_embeddings(vocab_size, embeddings_size)\n self.__build_encoder(hidden_size)\n self.__build_decoder(hidden_size, vocab_size, max_iter, start_symbol_id, end_symbol_id)\n\n self.__compute_loss()\n self.__perform_optimization()\n\n self.train_predictions = self.train_outputs.sample_id\n self.infer_predictions = self.infer_outputs.sample_id\n\n def train_on_batch(self, session, X, X_seq_len, Y, Y_seq_len, learning_rate, dropout_keep_probability):\n feed_dict = {\n self.input_batch:X,\n self.input_batch_lengths:X_seq_len,\n self.ground_truth:Y,\n self.ground_truth_lengths:Y_seq_len,\n self.learning_rate_ph:learning_rate,\n self.dropout_ph:dropout_keep_probability\n }\n pred, loss, _ = session.run([\n self.train_predictions,\n self.loss,\n self.train_op\n ], feed_dict=feed_dict)\n\n return pred, loss\n\n def predict_for_batch(self, session, X, X_seq_len):\n feed_dict={\n self.input_batch:X,\n self.input_batch_lengths:X_seq_len\n }\n pred=session.run([self.infer_predictions], feed_dict=feed_dict)[0]\n return pred\n\n def predict_for_batch_with_loss(self, session, X, X_seq_len, Y, Y_seq_len):\n feed_dict={\n self.input_batch:X,\n self.input_batch_lengths:X_seq_len,\n self.ground_truth: Y,\n self.ground_truth_lengths: Y_seq_len\n }\n pred, loss = session.run([\n self.infer_predictions,\n self.loss\n ], feed_dict=feed_dict)\n return pred, loss\n\n\n\n\nif __name__ == '__main__':\n train_set, test_set = split_dataset()\n\n\n tf.reset_default_graph()\n model = Seq2SeqModel(\n vocab_size=len(word2id), embeddings_size=20, max_iter=8, hidden_size=512,\n start_symbol_id=word2id[start_symbol], end_symbol_id=word2id[end_symbol], padding_symbol_id=word2id[padding_symbol])\n batch_size = 128\n n_epochs = 30\n learning_rate = 0.001\n dropout_keep_probability = 0.9\n max_len = 20\n n_step = int(len(train_set)/batch_size)\n\n export_path = './savedmodel'\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n\n\n # all_model_predictions = []\n # all_ground_truth = []\n\n print('Start training... \\n')\n for epoch in range(n_epochs):\n random.shuffle(train_set)\n random.shuffle(test_set)\n print('Train: epoch', epoch + 1)\n for n_iter, (X_batch, Y_batch) in enumerate(generate_batches(train_set, batch_size=batch_size)):\n\n X_batch, lx = batch_to_ids(X_batch, word2id, max_len)\n Y_batch, ly = batch_to_ids(Y_batch, word2id, max_len)\n\n predictions, loss = model.train_on_batch(session, X_batch, lx, Y_batch, ly, learning_rate,\n dropout_keep_probability)\n\n if n_iter % 50 == 0:\n print(\"Epoch: [%d/%d], step: [%d/%d], loss: %f\" % (epoch + 1, n_epochs, n_iter + 1, n_step, loss))\n\n\n\n X_sent, Y_sent = next(generate_batches(test_set, batch_size=batch_size))\n X, lx = batch_to_ids(X_sent, word2id, max_len)\n Y, ly = batch_to_ids(Y_sent, word2id, max_len)\n predictions, loss = model.predict_for_batch_with_loss(session, X, lx, Y, ly)\n print('Test: epoch', epoch + 1, 'loss:', loss, )\n for x, y, p in list(zip(X, Y, predictions))[:3]:\n print('X:', ' '.join(ids_to_sentence(x, id2word)))\n print('Y:', ' '.join(ids_to_sentence(y, id2word)))\n print('O:', ' '.join(ids_to_sentence(p, id2word)))\n print('')\n\n # model_predictions = []\n # ground_truth = []\n #\n # for X_batch, Y_batch in generate_batches(test_set, batch_size=batch_size):\n #\n # X_batch, lx = batch_to_ids(X_batch, word2id, max_len)\n # Y_batch, ly = batch_to_ids(Y_batch, word2id, max_len)\n # pre = model.predict_for_batch(session, X_batch, lx)\n #\n # for y, p in zip(Y_batch, pre):\n # y_sent = ' '.join(ids_to_sentence(y, id2word))\n # y_sent = y_sent[:y_sent.find('$')]\n # p_sent = ' '.join(ids_to_sentence(p, id2word))\n # p_sent = p_sent[:p_sent.find('$')]\n #\n # model_predictions.append(int(p_sent))\n # ground_truth.append(int(y_sent))\n #\n # all_model_predictions.append(model_predictions)\n # all_ground_truth.append(ground_truth)\n\n print('\\n...training finished.')\n\n # Save the variables to disk.\n # inputs = {\n # \"input_batch\": model.input_batch,\n # \"input_batch_lengths\": model.input_batch_lengths\n # }\n # outputs = {\"prediction\": model.infer_predictions}\n # tf.saved_model.simple_save(\n # session, \"./seq2seq_model\", inputs, outputs)\n\n # save the model\n\n\n input_batch = tf.saved_model.utils.build_tensor_info(model.input_batch)\n input_batch_lengths = tf.saved_model.utils.build_tensor_info(model.input_batch_lengths)\n prediction = tf.saved_model.utils.build_tensor_info(model.infer_predictions)\n\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs = {\n \"input_batch\": input_batch,\n \"input_batch_lengths\": input_batch_lengths\n },\n outputs={\"prediction\": prediction},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))\n\n builder.add_meta_graph_and_variables(\n session, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n prediction_signature\n },\n )\n builder.save()\n\n\n x_b, l = batch_to_ids(['how are you', 'thank you', 'i love you', 'what is your name?'], word2id, max_len)\n pre = model.predict_for_batch(session, x_b, l)\n for x, p in zip(x_b, pre):\n pre_sent = ' '.join(ids_to_sentence(p, id2word))\n pre_sent = pre_sent[:pre_sent.find('$')]\n print(pre_sent)\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.saved_model.utils.build_tensor_info", "tensorflow.nn.embedding_lookup", "tensorflow.global_variables_initializer", "tensorflow.contrib.seq2seq.BahdanauAttention", "tensorflow.cast", "tensorflow.saved_model.signature_def_utils.build_signature_def", "tensorflow.shape", "tensorflow.random_uniform", "tensorflow.Variable", "tensorflow.variable_scope", "tensorflow.nn.dynamic_rnn", "tensorflow.train.get_global_step", "tensorflow.expand_dims", "tensorflow.Session", "tensorflow.fill", "tensorflow.contrib.rnn.OutputProjectionWrapper", "tensorflow.placeholder", "sklearn.model_selection.train_test_split", "tensorflow.contrib.seq2seq.GreedyEmbeddingHelper", "tensorflow.sequence_mask", "tensorflow.contrib.seq2seq.dynamic_decode", "tensorflow.reset_default_graph", "tensorflow.nn.rnn_cell.GRUCell", "tensorflow.saved_model.builder.SavedModelBuilder", "tensorflow.contrib.seq2seq.TrainingHelper", "tensorflow.contrib.seq2seq.sequence_loss", "tensorflow.contrib.seq2seq.AttentionWrapper" ] ]
ByronDev121/literature-review
[ "23c276e92534793d85c7af5c24d93603f8ee7678" ]
[ "literature-review/linear-regression/single_var_linear_regression.py" ]
[ "from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom single_var_gradient_decent import LinearRegressionUsingGD\n\n\ndef plot_result(x, y, y_predicted):\n plt.subplots(1, 1)\n\n # Actual data points\n plt.scatter(x, y, s=10)\n plt.xlabel('\\u03B80')\n plt.ylabel('\\u03B81')\n\n # Predicted data points\n plt.plot(x, y_predicted, color='r')\n plt.show()\n\n\ndef plot_cost_function_2d(theta_1, theta_2, cost):\n fig, ax = plt.subplots(1, 1)\n ax.contourf(theta_1,\n theta_2,\n cost,\n levels=[0, 1, 2, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],\n cmap=cm.coolwarm,\n antialiased=True)\n\n plt.xlabel('\\u03B81')\n plt.ylabel('\\u03B82')\n plt.show()\n\n\ndef plot_cost_function_3d(theta_1, theta_2, cost):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(theta_1,\n theta_2,\n cost,\n cmap=cm.coolwarm,\n linewidth=0, antialiased=True, )\n\n plt.xlabel('\\u03B80')\n plt.ylabel('\\u03B81')\n ax.set_zlabel('J(\\u03B80, \\u03B81)')\n ax.set_zlim(0, 25)\n plt.show()\n\n\ndef get_cost_function(theta_1, theta_2, x, y, points_n):\n m = theta_1.shape[0]\n cost = np.zeros([theta_1.shape[0], theta_1.shape[1]])\n for i in range(points_n):\n residuals = ((theta_1 * x[i] + theta_2) - y[i]) ** 2\n cost += residuals\n cost = cost / (2 * m)\n return cost\n\n\ndef create_mesh_grid():\n theta_1 = np.arange(-10, 14, 0.05)\n theta_2 = np.arange(-100, 100, 0.05)\n theta_1, theta_2 = np.meshgrid(theta_1, theta_2)\n return theta_1, theta_2\n\n\ndef plot_cost_function(x, y, points_n):\n theta_1, theta_2, = create_mesh_grid()\n cost = get_cost_function(theta_1, theta_2, x, y, points_n)\n plot_cost_function_3d(theta_1, theta_2, cost)\n plot_cost_function_2d(theta_1, theta_2, cost)\n\n\ndef plot_raw_data(x, y):\n plt.scatter(x, y, s=10)\n plt.xlabel('\\u03B80')\n plt.ylabel('\\u03B81')\n plt.show()\n\n\ndef create_data(points_n):\n x = np.random.rand(points_n, 1) * 20\n y = (2 * (x + (2 * np.random.rand(points_n, 1)))) + 1\n return x, y\n\n\ndef main():\n points_n = 50\n x, y = create_data(points_n)\n\n plot_raw_data(x, y)\n plot_cost_function(x, y, points_n)\n\n # Model initialization\n # Sci-kit learn implementation:\n # regression_model = LinearRegression()\n regression_model = LinearRegressionUsingGD()\n\n # Fit the data(train the model)\n regression_model.fit(x, y)\n\n # Predict\n y_predicted = regression_model.predict(x)\n\n # model evaluation\n rmse = mean_squared_error(y, y_predicted)\n r2 = r2_score(y, y_predicted)\n\n # For sci-kit learn implementation:\n # print('Slope:', regression_model.coef_)\n # print('Intercept:', regression_model.intercept_)\n print('Slope:', regression_model.w)\n print('Intercept:', regression_model.b)\n print('Root mean squared error: ', rmse)\n print('R2 score: ', r2)\n\n # plot\n plot_result(x, y, y_predicted)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.metrics.mean_squared_error", "numpy.random.rand", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.arange", "matplotlib.pyplot.ylabel", "sklearn.metrics.r2_score", "matplotlib.pyplot.scatter", "numpy.meshgrid" ] ]
ArseneLupinhb/py_al
[ "e2e4d25a00cb13d68da26c17f86f9cf1e47a79e1" ]
[ "learn/data_analysis/normal_data_analysis/seaborn_test.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pa\nimport seaborn as sb\n\n# 中文乱码\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\npath_random = r'C:\\Users\\AL\\Desktop\\test\\test.csv'\ntest_data_df = pa.read_csv(path_random)\ntest_data_df.head()\n# tick_label x轴对应标签\nplt.style.use('seaborn')\nplt.bar(test_data_df.index, test_data_df.age, tick_label=test_data_df.name1)\nplt.ylabel('age')\nplt.show()\n\n# seaborm 用法\n# hue 分类变量\nsb.barplot('name', 'age', data=test_data_df, hue='type', palette='husl')\n# for x, y in enumerate(test_data_df.age):\n# \tplt.text(x, y, \"%s岁\" % y, ha='center', fontsize=12)\nplt.legend(bbox_to_anchor=(1.01, 0.85), ncol=1)\nplt.show()\n\n# 散点图\nsb.scatterplot(x='age', y='score', data=test_data_df, hue=\"type\", style='type')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.bar" ] ]
nontas/menpo
[ "d915bec26de64a5711b96be75cd145661a32290e" ]
[ "menpo/io/test/test_io_import.py" ]
[ "import sys\nimport warnings\n\nimport menpo.io as mio\nimport numpy as np\nfrom PIL import Image as PILImage\nfrom mock import patch, MagicMock\nfrom pytest import raises\n\n\ndef test_import_incorrect_built_in():\n with raises(ValueError):\n mio.import_builtin_asset('adskljasdlkajd.obj')\n\n\ndef test_breaking_bad_import():\n img = mio.import_builtin_asset('breakingbad.jpg')\n assert(img.shape == (1080, 1920))\n assert(img.n_channels == 3)\n assert(img.landmarks['PTS'].n_points == 68)\n\n\ndef test_breaking_bad_import_kwargs():\n img = mio.import_builtin_asset('breakingbad.jpg', normalize=False)\n assert(img.pixels.dtype == np.uint8)\n\n\ndef test_takeo_import():\n img = mio.import_builtin_asset('takeo.ppm')\n assert(img.shape == (225, 150))\n assert(img.n_channels == 3)\n assert(img.landmarks['PTS'].n_points == 68)\n\n\ndef test_einstein_import():\n img = mio.import_builtin_asset('einstein.jpg')\n assert(img.shape == (1024, 817))\n assert(img.n_channels == 1)\n assert(img.landmarks['PTS'].n_points == 68)\n\n\ndef test_lenna_import():\n img = mio.import_builtin_asset('lenna.png')\n assert(img.shape == (512, 512))\n assert(img.n_channels == 3)\n assert (img.landmarks.n_groups == 2)\n assert(img.landmarks['LJSON'].n_points == 68)\n assert (img.landmarks['pupils'].n_points == 2)\n\n\ndef test_import_builtin_ljson():\n lmarks = mio.import_builtin_asset('lenna.ljson')['LJSON']\n assert(lmarks.n_points == 68)\n\n\ndef test_import_builtin_pts():\n lmarks = mio.import_builtin_asset('einstein.pts')['PTS']\n assert(lmarks.n_points == 68)\n\n\ndef test_resolve_from_paths_single_group():\n def resolver(path):\n test_dict = {'test': path.with_name('takeo.pts')}\n return mio.input.resolve_from_paths(test_dict)\n image = mio.import_image(mio.data_path_to('einstein.jpg'),\n landmark_resolver=resolver)\n assert(image.landmarks.n_groups == 1)\n assert(image.landmarks['test'].path == mio.data_path_to('takeo.pts'))\n\n\ndef test_resolve_from_paths_multi_group():\n def resolver(path):\n test_dict = {'test': path.with_name('lenna.ljson')}\n return mio.input.resolve_from_paths(test_dict)\n image = mio.import_image(mio.data_path_to('einstein.jpg'),\n landmark_resolver=resolver)\n assert(image.landmarks.n_groups == 2)\n assert(set(image.landmarks.keys()) == {'test_LJSON', 'test_pupils'})\n\n\ndef test_path():\n # choose a random asset (all should have it!)\n img = mio.import_builtin_asset('einstein.jpg')\n path = mio.data_path_to('einstein.jpg')\n assert(img.path == path)\n assert(img.path.stem == 'einstein')\n assert(img.path.suffix == '.jpg')\n assert(img.path.parent == mio.data_dir_path())\n assert(img.path.name == 'einstein.jpg')\n\n\n@patch('menpo.io.input.base._pathlib_glob_for_pattern')\ndef test_single_suffix_dot_in_path(pathlib_glob):\n import menpo.io.input.base as mio_base\n from pathlib import Path\n\n fake_path = Path('fake_path.t0.t1.t2')\n pathlib_glob.return_value = [fake_path]\n ext_map = MagicMock()\n ext_map.__contains__.side_effect = lambda x: x == '.t2'\n\n ret_val = next(mio_base.glob_with_suffix('*.t0.t1.t2', ext_map))\n assert (ret_val == fake_path)\n ext_map.__contains__.assert_called_with('.t2')\n\n\ndef test_upper_extension_mapped_to_lower():\n import menpo.io.input.base as mio_base\n from pathlib import Path\n ext_map = MagicMock()\n\n mio_base.importer_for_filepath(Path('fake_path.JPG'), ext_map)\n ext_map.get.assert_called_with('.jpg')\n\n\n@patch('menpo.io.input.base._pathlib_glob_for_pattern')\ndef test_double_suffix(pathlib_glob):\n import menpo.io.input.base as mio_base\n from pathlib import Path\n\n fake_path = Path('fake_path.t1.t2')\n pathlib_glob.return_value = [fake_path]\n ext_map = MagicMock()\n ext_map.__contains__.side_effect = lambda x: x == '.t1.t2'\n\n ret_val = next(mio_base.glob_with_suffix('*.t1.t2', ext_map))\n assert (ret_val == fake_path)\n ext_map.__contains__.assert_any_call('.t1.t2')\n ext_map.__contains__.assert_any_call('.t2')\n\n\ndef test_import_image():\n img_path = mio.data_dir_path() / 'einstein.jpg'\n im = mio.import_image(img_path)\n assert im.pixels.dtype == np.float\n assert im.n_channels == 1\n\n\ndef test_custom_landmark_resolver():\n def lmark_resolver(path):\n return mio.import_landmark_file(mio.data_path_to('takeo.pts'))\n\n img = mio.import_image(mio.data_path_to('lenna.png'),\n landmark_resolver=lmark_resolver)\n assert(img.has_landmarks)\n\n takeo_lmarks = mio.import_builtin_asset.takeo_pts()['PTS']\n np.allclose(img.landmarks['PTS'].points,\n takeo_lmarks.points)\n\n\ndef test_landmark_resolver_none():\n img = mio.import_image(mio.data_path_to('lenna.png'),\n landmark_resolver=None)\n assert(not img.has_landmarks)\n\n\ndef test_import_image_no_norm():\n img_path = mio.data_dir_path() / 'einstein.jpg'\n im = mio.import_image(img_path, normalize=False)\n assert im.pixels.dtype == np.uint8\n\n\ndef test_import_landmark_file():\n lm_path = mio.data_dir_path() / 'einstein.pts'\n mio.import_landmark_file(lm_path)\n\n\ndef test_import_images():\n imgs = list(mio.import_images(mio.data_dir_path()))\n imgs_filenames = set(i.path.stem for i in imgs)\n exp_imgs_filenames = {'einstein', 'takeo', 'tongue', 'breakingbad', 'lenna',\n 'menpo_thumbnail'}\n assert exp_imgs_filenames == imgs_filenames\n\n\ndef test_import_images_are_ordered_and_unduplicated():\n # we know that import_images returns images in path order\n imgs = list(mio.import_images(mio.data_dir_path()))\n imgs_filenames = [i.path.stem for i in imgs]\n print(imgs_filenames)\n exp_imgs_filenames = ['breakingbad', 'einstein', 'lenna', 'menpo_thumbnail', 'takeo', 'tongue']\n assert exp_imgs_filenames == imgs_filenames\n\n\ndef test_lsimgs_filenamess():\n assert(set(mio.ls_builtin_assets()) == {'breakingbad.jpg',\n 'einstein.jpg', 'einstein.pts',\n 'lenna.png', 'breakingbad.pts',\n 'lenna.ljson', 'takeo.ppm',\n 'takeo.pts', 'tongue.jpg',\n 'tongue.pts',\n 'menpo_thumbnail.jpg'})\n\n\ndef test_image_paths():\n ls = mio.image_paths(mio.data_dir_path())\n assert(len(list(ls)) == 6)\n\n\ndef test_import_images_wrong_path_raises_value_error():\n with raises(ValueError):\n list(mio.import_images('asldfjalkgjlaknglkajlekjaltknlaekstjlakj'))\n\n\ndef test_import_landmark_files_wrong_path_raises_value_error():\n with raises(ValueError):\n list(mio.import_landmark_files('asldfjalkgjlaknglkajlekjaltknlaekstjlakj'))\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_no_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('RGBA', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.png', normalize=False)\n assert im.shape == (15, 10)\n assert im.n_channels == 4\n assert im.pixels.dtype == np.uint8\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_RGBA_normalize(is_file, mock_image):\n from menpo.image import MaskedImage\n\n mock_image.return_value = PILImage.new('RGBA', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)\n assert im.shape == (15, 10)\n assert im.n_channels == 3\n assert im.pixels.dtype == np.float\n assert type(im) == MaskedImage\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_RGBA_no_normalize(is_file, mock_image):\n\n mock_image.return_value = PILImage.new('RGBA', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)\n assert im.shape == (15, 10)\n assert im.n_channels == 4\n assert im.pixels.dtype == np.uint8\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_L_no_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('L', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)\n assert im.shape == (15, 10)\n assert im.n_channels == 1\n assert im.pixels.dtype == np.uint8\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_L_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('L', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)\n assert im.shape == (15, 10)\n assert im.n_channels == 1\n assert im.pixels.dtype == np.float\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_I_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('I', (10, 15))\n is_file.return_value = True\n with raises(ValueError):\n mio.import_image('fake_image_being_mocked.ppm', normalize=True)\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_I_no_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('I', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)\n assert im.shape == (15, 10)\n assert im.n_channels == 1\n assert im.pixels.dtype == np.int32\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_1_normalize(is_file, mock_image):\n from menpo.image import BooleanImage\n\n mock_image.return_value = PILImage.new('1', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)\n assert im.shape == (15, 10)\n assert im.n_channels == 1\n assert im.pixels.dtype == np.bool\n assert type(im) == BooleanImage\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_1_no_normalize(is_file, mock_image):\n from menpo.image import BooleanImage\n\n mock_image.return_value = PILImage.new('1', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)\n assert im.shape == (15, 10)\n assert im.n_channels == 1\n assert im.pixels.dtype == np.bool\n assert type(im) == BooleanImage\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_1_proper_conversion(is_file, mock_image):\n from menpo.image import BooleanImage\n \n arr = np.zeros((10, 10), dtype=np.uint8)\n arr[4, 4] = 255\n mock_image.return_value = PILImage.fromarray(arr).convert('1')\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)\n assert im.shape == (10, 10)\n assert im.n_channels == 1\n assert im.pixels.dtype == np.bool\n assert type(im) == BooleanImage\n assert np.all(im.pixels == arr.astype(np.bool))\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_P_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('P', (10, 10))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=True)\n assert im.shape == (10, 10)\n assert im.n_channels == 3\n assert im.pixels.dtype == np.float\n\n\n@patch('PIL.Image.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_PIL_P_no_normalize(is_file, mock_image):\n mock_image.return_value = PILImage.new('P', (10, 15))\n is_file.return_value = True\n\n im = mio.import_image('fake_image_being_mocked.ppm', normalize=False)\n assert im.shape == (15, 10)\n assert im.n_channels == 3\n assert im.pixels.dtype == np.uint8\n\n\n@patch('subprocess.Popen')\n@patch('menpo.io.input.video.video_infos_ffprobe')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_ffmpeg_GIF_normalize(is_file, video_infos_ffprobe, pipe):\n video_infos_ffprobe.return_value = {'duration': 2, 'width': 100,\n 'height': 150, 'n_frames': 10, 'fps': 5}\n empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()\n pipe.return_value.stdout.read.return_value = empty_frame\n is_file.return_value = True\n\n ll = mio.import_image('fake_image_being_mocked.gif', normalize=True)\n assert ll.path.name == 'fake_image_being_mocked.gif'\n assert ll.fps == 5\n assert len(ll) == 10\n\n im = ll[0]\n assert im.shape == (150, 100)\n assert im.n_channels == 3\n assert im.pixels.dtype == np.float\n\n\n@patch('subprocess.Popen')\n@patch('menpo.io.input.video.video_infos_ffprobe')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_ffmpeg_GIF_no_normalize(is_file, video_infos_ffprobe, pipe):\n video_infos_ffprobe.return_value = {'duration': 2, 'width': 100,\n 'height': 150, 'n_frames': 10, 'fps': 5}\n empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()\n pipe.return_value.stdout.read.return_value = empty_frame\n is_file.return_value = True\n\n ll = mio.import_image('fake_image_being_mocked.gif', normalize=False)\n assert ll.path.name == 'fake_image_being_mocked.gif'\n assert ll.fps == 5\n assert len(ll) == 10\n\n im = ll[0]\n assert im.shape == (150, 100)\n assert im.n_channels == 3\n assert im.pixels.dtype == np.uint8\n\n\n@patch('menpo.io.input.landmark.json.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_v1_ljson_null_values(is_file, mock_open, mock_dict):\n v1_ljson = { \"groups\": [\n { \"connectivity\": [ [ 0, 1 ], [ 1, 2 ], [ 2, 3 ] ],\n \"label\": \"chin\", \"landmarks\": [\n { \"point\": [ 987.9, 1294.1 ] }, { \"point\": [ 96.78, 1246.8 ] },\n { \"point\": [ None, 0.1 ] }, { \"point\": [303.22, 167.2 ] } ] },\n { \"connectivity\": [ [ 0, 1 ] ],\n \"label\": \"leye\", \"landmarks\": [\n { \"point\": [ None, None ] },\n { \"point\": [ None, None ] }] }\n ], \"version\": 1 }\n mock_dict.return_value = v1_ljson\n is_file.return_value = True\n\n with warnings.catch_warnings(record=True) as w:\n lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson',\n group='LJSON')\n nan_points = np.isnan(lmark.points)\n\n # Should raise deprecation warning\n assert len(w) == 1\n assert nan_points[2, 0] # y-coord None point is nan\n assert not nan_points[2, 1] # x-coord point is not nan\n assert np.all(nan_points[4:, :]) # all of leye label is nan\n\n\n@patch('menpo.io.input.landmark.json.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_v2_ljson_null_values(is_file, mock_open, mock_dict):\n v2_ljson = { \"labels\": [\n { \"label\": \"left_eye\", \"mask\": [0, 1, 2] },\n { \"label\": \"right_eye\", \"mask\": [3, 4, 5] }\n ],\n \"landmarks\": {\n \"connectivity\": [ [0, 1], [1, 2], [2, 0], [3, 4],\n [4, 5], [5, 3] ],\n \"points\": [ [None, 200.5], [None, None],\n [316.8, 199.15], [339.48, 205.0],\n [358.54, 217.82], [375.0, 233.4]]\n },\n \"version\": 2 }\n\n mock_dict.return_value = v2_ljson\n is_file.return_value = True\n with warnings.catch_warnings(record=True) as w:\n lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson',\n group='LJSON')\n nan_points = np.isnan(lmark.points)\n assert nan_points[0, 0] # y-coord None point is nan\n assert not nan_points[0, 1] # x-coord point is not nan\n assert np.all(nan_points[1, :]) # all of leye label is nan\n\n\n@patch('menpo.io.input.landmark.json.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_v3_ljson_null_values(is_file, mock_open, mock_dict):\n v3_ljson = {\n \"groups\": {\n \"LJSON\": {\n \"labels\": [\n { \"label\": \"left_eye\", \"mask\": [0, 1, 2] },\n { \"label\": \"right_eye\", \"mask\": [3, 4, 5] }\n ],\n \"landmarks\": {\n \"connectivity\": [ [0, 1], [1, 2], [2, 0], [3, 4],\n [4, 5], [5, 3] ],\n \"points\": [ [None, 200.5], [None, None],\n [316.8, 199.15], [339.48, 205.0],\n [358.54, 217.82], [375.0, 233.4]]\n }\n }\n },\n \"version\": 3\n }\n\n mock_dict.return_value = v3_ljson\n is_file.return_value = True\n lmark_dict = mio.import_landmark_file('fake_lmark_being_mocked.ljson')\n assert isinstance(lmark_dict, dict)\n lmark = lmark_dict['LJSON']\n nan_points = np.isnan(lmark.points)\n assert nan_points[0, 0] # y-coord None point is nan\n assert not nan_points[0, 1] # x-coord point is not nan\n assert np.all(nan_points[1, :]) # all of leye label is nan\n\n\n@patch('random.shuffle')\ndef test_shuffle_kwarg_true_calls_shuffle(mock):\n list(mio.import_images(mio.data_dir_path(), shuffle=True))\n assert mock.called\n\n\ndef test_import_as_generator():\n import types\n gen = mio.import_images(mio.data_dir_path(), as_generator=True)\n assert isinstance(gen, types.GeneratorType)\n gen = mio.import_landmark_files(mio.data_dir_path(), as_generator=True)\n assert isinstance(gen, types.GeneratorType)\n\n\ndef test_import_lazy_list():\n from menpo.base import LazyList\n data_path = mio.data_dir_path()\n ll = mio.import_images(data_path)\n assert isinstance(ll, LazyList)\n ll = mio.import_landmark_files(data_path)\n assert isinstance(ll, LazyList)\n\n\n@patch('menpo.io.input.pickle.pickle.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_pickle(is_file, mock_open, mock_pickle):\n mock_pickle.return_value = {'test': 1}\n is_file.return_value = True\n\n objs = mio.import_pickle('mocked.pkl')\n assert isinstance(objs, dict)\n assert 'test' in objs\n assert objs['test'] == 1\n\n\n@patch('menpo.io.input.pickle.pickle.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.is_file')\n@patch('sys.version_info')\ndef test_importing_pickle_encoding_py3(version_info, is_file, mock_open,\n mock_pickle):\n version_info.major = 3\n mock_pickle.return_value = {'test': 1}\n is_file.return_value = True\n\n mio.import_pickle('mocked.pkl', encoding='latin1')\n assert mock_pickle.call_args[1].get('encoding') == 'latin1'\n\n\n@patch('menpo.io.input.pickle.pickle.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.is_file')\n@patch('sys.version_info')\ndef test_importing_pickle_encoding_ignored_py2(version_info, is_file, mock_open,\n mock_pickle):\n version_info.major = 2\n mock_pickle.return_value = {'test': 1}\n is_file.return_value = True\n\n mio.import_pickle('mocked.pkl', encoding='latin1')\n assert 'encoding' not in mock_pickle.call_args[1]\n\n\n@patch('menpo.io.input.pickle.pickle.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.glob')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_pickles(is_file, glob, mock_open, mock_pickle):\n from pathlib import Path\n from menpo.base import LazyList\n mock_pickle.return_value = {'test': 1}\n is_file.return_value = True\n glob.return_value = [Path('mocked1.pkl'), Path('mocked2.pkl')]\n\n objs = mio.import_pickles('*')\n assert isinstance(objs, LazyList)\n assert len(objs) == 2\n assert objs[0]['test'] == 1\n assert objs[1]['test'] == 1\n\n\n@patch('menpo.io.input.pickle.pickle.load')\n@patch('menpo.io.input.base.Path.open')\n@patch('menpo.io.input.base.Path.glob')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_pickles_as_generator(is_file, glob, mock_open, mock_pickle):\n from pathlib import Path\n import types\n mock_pickle.return_value = {'test': 1}\n is_file.return_value = True\n glob.return_value = [Path('mocked1.pkl'), Path('mocked2.pkl')]\n\n objs = mio.import_pickles('*', as_generator=True)\n assert isinstance(objs, types.GeneratorType)\n objs = list(objs)\n assert len(objs) == 2\n assert objs[0]['test'] == 1\n assert objs[1]['test'] == 1\n\n\n@patch('subprocess.Popen')\n@patch('menpo.io.input.video.video_infos_ffprobe')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_ffmpeg_avi_no_normalize(is_file, video_infos_ffprobe, pipe):\n video_infos_ffprobe.return_value = {'duration': 2, 'width': 100,\n 'height': 150, 'n_frames': 10, 'fps': 5}\n is_file.return_value = True\n empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()\n pipe.return_value.stdout.read.return_value = empty_frame\n ll = mio.import_video('fake_image_being_mocked.avi', normalize=False)\n assert ll.path.name == 'fake_image_being_mocked.avi'\n assert ll.fps == 5\n assert len(ll) == 5*2\n image = ll[0]\n assert image.shape == ((150, 100))\n assert image.n_channels == 3\n assert image.pixels.dtype == np.uint8\n\n\n@patch('subprocess.Popen')\n@patch('menpo.io.input.video.video_infos_ffprobe')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_ffmpeg_avi_normalize(is_file, video_infos_ffprobe, pipe):\n video_infos_ffprobe.return_value = {'duration': 2, 'width': 100,\n 'height': 150, 'n_frames': 10, 'fps': 5}\n is_file.return_value = True\n empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()\n pipe.return_value.stdout.read.return_value = empty_frame\n ll = mio.import_video('fake_image_being_mocked.avi', normalize=True)\n assert ll.path.name == 'fake_image_being_mocked.avi'\n assert ll.fps == 5\n assert len(ll) == 5*2\n image = ll[0]\n assert image.shape == (150, 100)\n assert image.n_channels == 3\n assert image.pixels.dtype == np.float\n\n\n@patch('menpo.io.input.video.video_infos_ffprobe')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_ffmpeg_exact_frame_count_no_ffprobe(is_file, video_infos_ffprobe):\n video_infos_ffprobe.side_effect = ValueError\n is_file.return_value = True\n with raises(ValueError):\n mio.import_video('fake_image_being_mocked.avi', normalize=True,\n exact_frame_count=True)\n\n\n@patch('subprocess.Popen')\n@patch('menpo.io.input.video.video_infos_ffprobe')\n@patch('menpo.io.input.video.video_infos_ffmpeg')\n@patch('menpo.io.input.base.Path.is_file')\ndef test_importing_ffmpeg_no_exact_frame_count_no_ffprobe(\n is_file, video_infos_ffmpeg, video_infos_ffprobe, pipe):\n video_infos_ffprobe.side_effect = ValueError\n video_infos_ffmpeg.return_value = {'duration': 2, 'width': 100,\n 'height': 150, 'n_frames': 10, 'fps': 5}\n is_file.return_value = True\n empty_frame = np.zeros(150*100*3, dtype=np.uint8).tostring()\n pipe.return_value.stdout.read.return_value = empty_frame\n ll = mio.import_video('fake_image_being_mocked.avi', normalize=True,\n exact_frame_count=False)\n assert ll.path.name == 'fake_image_being_mocked.avi'\n assert ll.fps == 5\n assert len(ll) == 5*2\n image = ll[0]\n assert image.shape == (150, 100)\n assert image.n_channels == 3\n assert image.pixels.dtype == np.float\n\n\ndef test_import_images_negative_max_images():\n with raises(ValueError):\n list(mio.import_images(mio.data_dir_path(), max_images=-2))\n\n\ndef test_import_images_zero_max_images():\n with raises(ValueError):\n # different since the conditional 'if max_assets' is skipped,\n # thus all images might be imported.\n list(mio.import_images(mio.data_dir_path(), max_images=0))\n\n\n# TODO: remove once the normalise argument is removed.\ndef test_import_image_deprecated_normalise_kwarg():\n with warnings.catch_warnings(record=True) as w:\n img = mio.import_builtin_asset('breakingbad.jpg', normalise=False)\n assert len(w) == 1\n assert img.pixels.dtype == np.uint8\n\n\n@patch('menpo.io.input.base.Path.is_file')\ndef test_register_image_importer(is_file):\n from menpo.image import Image\n image = Image.init_blank((10, 10))\n\n def foo_importer(filepath, **kwargs):\n return image\n\n is_file.return_value = True\n\n with patch.dict(mio.input.extensions.image_types, {}, clear=True):\n mio.register_image_importer('.foo', foo_importer)\n new_image = mio.import_image('fake.foo')\n assert image is new_image\n\n\n@patch('menpo.io.input.base.Path.is_file')\ndef test_register_landmark_importer(is_file):\n from menpo.shape import PointCloud\n lmark = PointCloud.init_2d_grid((1, 1))\n\n def foo_importer(filepath, **kwargs):\n return lmark\n\n is_file.return_value = True\n\n with patch.dict(mio.input.extensions.image_landmark_types, {}, clear=True):\n mio.register_landmark_importer('.foo', foo_importer)\n new_lmark = mio.import_landmark_file('fake.foo')\n assert lmark is new_lmark\n\n\n@patch('menpo.io.input.base.Path.is_file')\ndef test_register_video_importer(is_file):\n from menpo.image import Image\n from menpo.base import LazyList\n\n def foo_importer(filepath, **kwargs):\n return LazyList([lambda: Image.init_blank((10, 10))])\n\n is_file.return_value = True\n\n with patch.dict(mio.input.extensions.ffmpeg_video_types, {}, clear=True):\n mio.register_video_importer('.foo', foo_importer)\n new_video = mio.import_video('fake.foo')\n assert len(new_video) == 1\n\n\n@patch('menpo.io.input.base.Path.is_file')\ndef test_register_pickle_importer(is_file):\n obj = object()\n\n def foo_importer(filepath, **kwargs):\n return obj\n\n is_file.return_value = True\n\n with patch.dict(mio.input.extensions.pickle_types, {}, clear=True):\n mio.register_pickle_importer('.foo', foo_importer)\n new_obj = mio.import_pickle('fake.foo')\n assert new_obj is obj\n\n\ndef test_register_no_leading_period():\n ext_map = {}\n mio.input.base._register_importer(ext_map, 'foo', lambda x: x)\n assert '.foo' in ext_map\n assert 'foo' not in ext_map\n" ]
[ [ "numpy.all", "numpy.allclose", "numpy.isnan", "numpy.zeros" ] ]
nimeshgit/nyoka
[ "43bf049825922213eeb3e6a8f39864f9b75d01d5" ]
[ "nyoka/preprocessing/__init__.py" ]
[ "import queue\nimport numpy as np\nfrom sklearn.utils import check_array\nfrom sklearn.base import TransformerMixin\nFLOAT_DTYPES = (np.float64, np.float32, np.float16)\n\n\nclass Lag(TransformerMixin):\n \"\"\"\n The Lag class takes `value` number of previous record of the fields where it is applied and applies `aggregation` to those values.\n\n Parameters\n ----------\n aggregation : String\n aggregation type. The valid types are [\"min\", \"max\", \"sum\", \"avg\", \"median\", \"product\", \"stddev\"]\n value : Integer (default = 2)\n The number of previous record to aggregate. Should be greater than 1.\n\n \n \"\"\"\n \n _VALID_AGGS = [\"min\", \"max\", \"sum\", \"avg\", \"median\", \"product\", \"stddev\"]\n _AGG_FUNC_MAP = {\n \"min\" : np.min,\n \"max\" : np.max,\n \"sum\" : np.sum,\n \"avg\" : np.mean,\n \"median\" : np.median,\n \"product\" : np.product,\n \"stddev\" : np.std\n }\n \n def __init__(self, aggregation, value=2, copy=True):\n assert aggregation in self._VALID_AGGS, f\"Invalid `aggregation` type. Valid types are {self._VALID_AGGS}\"\n assert value > 1, \"`value` should be greater than 1\"\n self.aggregation = aggregation\n self.value = value\n self.copy = copy\n \n def fit(self, X, y=None):\n \"\"\"\n Does nothing.\n\n Returns\n -------\n The same object\n \"\"\" \n return self\n \n \n def transform(self, X, y=None):\n \"\"\"\n Trasforms the given X by taking `value` number of previous records and applying `aggregation` method\n\n Parameters\n ----------\n X : Pandas DataFrame or numpy array\n The input data\n y : \n It is ignored.\n\n Returns\n -------\n Transformed X as numpy array \n \"\"\"\n self._transformed_X = list()\n X = check_array(X, copy=self.copy, warn_on_dtype=True, estimator=self) \n q_list = [queue.Queue() for i in range(len(X[0]))]\n \n for _ in range(self.value):\n for q_ in q_list:\n q_.put(0.0)\n \n for row in X:\n aggregated_vals = [self._AGG_FUNC_MAP[self.aggregation](q_.queue) for q_ in q_list]\n self._transformed_X.append(aggregated_vals)\n for idx, col in enumerate(row):\n q_list[idx].put(col)\n q_list[idx].get()\n return np.array(self._transformed_X)\n \n \n def __repr__(self):\n return f\"Lag(aggregation='{self.aggregation}', value={self.value})\"" ]
[ [ "sklearn.utils.check_array", "numpy.array" ] ]
AllTheLonelyPeople/Chips-Circuits
[ "55e97579f97e63f6219c14fe5c59ae761729248c" ]
[ "linespacer_ratioy.py" ]
[ "\"\"\"\npath.py\n\nTom Kamstra, Izhar Hamer, Julia Linde\n\nFinds the optimal paths between the chips based on ratio between distance in x direction and total distance between chips.\n\"\"\"\nfrom mpl_toolkits import mplot3d\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom code.classes import classes as classs\nfrom code.functions import delete as delete\nfrom code.functions import change_coordinates as change\nimport copy\n\nimport csv\n\n# Create netlist by loading file in class\nnetlist = classs.Netlist(\"data/netlist_1.csv\").netlist\n\n# Create list for gate coordinates\ngate_coordinates = classs.Gate_coordinate(\"data/pritn_1.csv\").gate_coordinates\nprint(gate_coordinates)\n\n# Create dictionary for gate connections with corresponding shortest distance\ndistances = {}\n\nfor item in netlist:\n gate_start = int(item.gate_1)\n gate_end = int(item.gate_2)\n \n # Create tuple for gates that have to be connected\n connected_gate = (gate_start, gate_end)\n \n # Define coordinates of start and end gate\n coordinate_start = gate_coordinates[gate_start - 1]\n coordinate_end = gate_coordinates[gate_end - 1]\n \n # Define x and y coordinates for start and end gate\n x_coordinate_start = int(coordinate_start[0])\n y_coordinate_start = int(coordinate_start[1])\n\n x_coordinate_end = int(coordinate_end[0])\n y_coordinate_end = int(coordinate_end[1])\n\n # Calculate total shortest distance between gates\n total_dist = abs(x_coordinate_start - x_coordinate_end) + abs(y_coordinate_start - y_coordinate_end)\n y_dist = abs(y_coordinate_start - y_coordinate_end)\n dist_ratio = y_dist/total_dist\n \n distances.update({connected_gate: dist_ratio})\n\n# Sort connections from smallest to largest distance in dictionary\ndistances = list(distances.items())\nfor max_number in range(len(distances)-1, -1, -1):\n swapped = False\n for count in range(max_number):\n if distances[count][1] > distances[count + 1][1]:\n distances[count], distances[count + 1] = distances[count + 1], distances[count]\n swapped = True\n if not swapped:\n break\n\n# Create dictionary of wires with connected gates\ngate_connections = {}\n\ncount = 0\n\n# Saves all wires\nallwires = []\n\n# Defines maximum number of layers\nmax_num_layers = 7\n\n# Connect gates with eachother, starting with smallest distance\nfor chips in distances:\n gate_start = int(chips[0][0])\n gate_end = int(chips[0][1])\n \n connected_gate = (gate_start, gate_end)\n\n coordinate_begin = gate_coordinates[gate_start - 1]\n coordinate_end = gate_coordinates[gate_end - 1]\n \n print(\"COORDINATES\")\n print(coordinate_begin)\n print(coordinate_end)\n \n # Define x, y and z coordinates of start and end gate\n x_coordinate_start = int(coordinate_begin[0])\n y_coordinate_start = int(coordinate_begin[1])\n z_coordinate_start = int(coordinate_begin[2])\n\n x_coordinate_end = int(coordinate_end[0])\n y_coordinate_end = int(coordinate_end[1])\n z_coordinate_end = int(coordinate_end[2])\n\n # Create list for wire coordinates\n wires = []\n\n # Define all 5 coordinates that surround current start coordinate\n x_coordinate_startcheck = x_coordinate_start + 1\n coordinate_1 = [x_coordinate_startcheck, y_coordinate_start, z_coordinate_start]\n x_coordinate_startcheck2 = x_coordinate_start - 1\n coordinate_2 = [x_coordinate_startcheck2, y_coordinate_start, z_coordinate_start]\n y_coordinate_startcheck = y_coordinate_start + 1\n coordinate_3 = [x_coordinate_start, y_coordinate_startcheck, z_coordinate_start]\n y_coordinate_startcheck2 = y_coordinate_start - 1\n coordinate_4 = [x_coordinate_start, y_coordinate_startcheck2, z_coordinate_start]\n z_coordinate_startcheck = z_coordinate_start + 1\n coordinate_5 = [x_coordinate_start, y_coordinate_start, z_coordinate_startcheck]\n \n # Saves all coordinates around current start coordinate in list\n coordinate_check = [coordinate_1, coordinate_2, coordinate_3, coordinate_4, coordinate_5]\n \n # Creates list for all coordinates that are already occupied\n all_coordinates = []\n for coo in allwires:\n all_coordinates.append(coo.get_coordinate())\n\n # Checks whether wire can move in any direction, if at least one coordinate around current coordinate is free\n if all(elem in all_coordinates for elem in coordinate_check):\n for coor in coordinate_check:\n for item_start in allwires:\n if item_start.coordinate == coor and item_start.net[0] != gate_start and item_start.net[1] != gate_start:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item_start.net, distances, gate_connections, allwires)\n break\n print(\"COUNT\")\n print(count)\n \n # Create switch variable to switch start moving direction\n if count > len(netlist)+5:\n # Reconnect deleted wires in switched direction\n switch_variable = 0\n else:\n switch_variable = 1\n \n # Overwrite coordinate but save coordinate_begin in different variable \n coordinate = coordinate_begin\n \n checker = 0\n notcomplete = 0\n \n while coordinate != coordinate_end:\n # Determine direction in which wire has to move\n if x_coordinate_start > x_coordinate_end:\n step_x = -1\n elif x_coordinate_start < x_coordinate_end:\n step_x = 1\n\n if y_coordinate_start > y_coordinate_end:\n step_y = -1\n elif y_coordinate_start < y_coordinate_end:\n step_y = 1\n \n # Append start coordinate to wire\n wires.append(coordinate)\n print(coordinate)\n wire = classs.Wire(coordinate, connected_gate)\n allwires.append(wire)\n \n if switch_variable == 0:\n # Loop until x-coordinate from start gate equals x-coordinate from end gate\n while x_coordinate_start != x_coordinate_end:\n x_coordinate_start = x_coordinate_start + step_x\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n # Check for other gates or other wires\n if gate_connections:\n try:\n step_y = step_y\n except:\n step_y = 0\n (x_coordinate_start, z_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, x_coordinate_start, -step_x, z_coordinate_start, 1)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (z_coordinate_start, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, z_coordinate_start, -1, y_coordinate_start, step_y)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n # Coordinate changes 2 times in y-direction, so step_y is doubled\n (y_coor_notrelevant, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, y_coordinate_start, step_y, y_coordinate_start, (-2*step_y))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (y_coordinate_start, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, y_coordinate_start, step_y, x_coordinate_start, -step_x)\n elif coordinate in gate_coordinates and coordinate != coordinate_end:\n x_coordinate_start = x_coordinate_start - step_x\n # z kan nu niet meerdere stappen omhoog/omlaag\n z_coordinate_start = z_coordinate_start + 1\n #checken of na deze stap geen gate zit\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (z_coordinate_start, x_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, z_coordinate_start, -1, x_coordinate_start, -step_x)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, y_coordinate_start, step_y)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n # Coordinate changes 2 times in y-direction, so step_y is doubled\n (y_coor_notrelevant, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, y_coordinate_start, step_y, y_coordinate_start, (-2*step_y))\n \n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n wires.append(coordinate)\n print(coordinate)\n wire = classs.Wire(coordinate, connected_gate)\n allwires.append(wire)\n \n # Redefine step in direction of y-coordinate\n if y_coordinate_start < y_coordinate_end:\n step_y = 1\n elif y_coordinate_start > y_coordinate_end:\n step_y = -1\n \n # Change z-coordinate if x- and y-coordinates are same as those from end gate \n if x_coordinate_start == x_coordinate_end and y_coordinate_start == y_coordinate_end:\n while z_coordinate_start != z_coordinate_end:\n z_coordinate_start = z_coordinate_start - 1\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n if gate_connections:\n (z_coordinate_start, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, z_coordinate_start, 1, y_coordinate_start, step_y)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (y_coordinate_start, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, y_coordinate_start, -step_y, x_coordinate_start, step_x)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coor_notrelevant, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, x_coordinate_start, step_x, x_coordinate_start, (-2*step_x))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, x_coordinate_start, step_x, y_coordinate_start, -step_y)\n elif coordinate in gate_coordinates and coordinate != coordinate_end:\n z_coordinate_start = z_coordinate_start + 1\n y_coordinate_start = y_coordinate_start + step_y\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, y_coordinate_start, -step_y)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coor_notrelevant, x_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, x_coordinate_start, (-2*step_x))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, y_coordinate_start, -step_y)\n \n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n wires.append(coordinate)\n print(coordinate)\n wire = classs.Wire(coordinate, connected_gate)\n allwires.append(wire)\n \n # Redefine step in direction of y-coordinate \n if y_coordinate_start < y_coordinate_end:\n step_y = 1\n elif y_coordinate_start > y_coordinate_end:\n step_y = -1\n \n # Loop until y-coordinate from start gate equals y-coordinate from end gate\n while y_coordinate_start != y_coordinate_end:\n y_coordinate_start = y_coordinate_start + step_y\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n # Check for other gates or other wires\n if gate_connections:\n try:\n step_x = step_x\n except:\n step_x = 0\n (y_coordinate_start, z_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, y_coordinate_start, -step_y, z_coordinate_start, 1)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (z_coordinate_start, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, z_coordinate_start, -1, x_coordinate_start, step_x)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coor_notrelevant, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, x_coordinate_start, step_x, x_coordinate_start, (-2*step_x))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, x_coordinate_start, step_x, y_coordinate_start, -step_y)\n elif coordinate in gate_coordinates and coordinate != coordinate_end:\n y_coordinate_start = y_coordinate_start - step_y\n z_coordinate_start = z_coordinate_start + 1\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, z_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, z_coordinate_start, -1)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coor_notrelevant, x_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, x_coordinate_start, (-2*step_x))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, step_x, y_coordinate_start, -step_y)\n\n # Reset switch variable to be able to move in x-direction\n switch_variable = 0\n \n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n wires.append(coordinate)\n print(coordinate)\n wire = classs.Wire(coordinate, connected_gate)\n allwires.append(wire)\n \n # Redefine step in direction of y-coordinate\n if y_coordinate_start < y_coordinate_end:\n step_y = 1\n elif y_coordinate_start > y_coordinate_end:\n step_y = -1\n \n # Change z-coordinate if x- and y-coordinates are same as those from end gate\n if x_coordinate_start == x_coordinate_end and y_coordinate_start == y_coordinate_end:\n while z_coordinate_start != z_coordinate_end:\n z_coordinate_start = z_coordinate_start - 1\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n if gate_connections:\n (z_coordinate_start, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, z_coordinate_start, 1, x_coordinate_start, step_x)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, x_coordinate_start, -step_x, y_coordinate_start, step_y)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (y_coor_notrelevant, y_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, y_coordinate_start, step_y, y_coordinate_start, (-2*step_y))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (y_coordinate_start, x_coordinate_start) = change.change_coor(gate_connections, coordinate, gate_coordinates, wires, coordinate_end, y_coordinate_start, step_y, x_coordinate_start, -step_x)\n elif coordinate in gate_coordinates and coordinate != coordinate_end:\n z_coordinate_start = z_coordinate_start + 1\n x_coordinate_start = x_coordinate_start + step_x\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, -step_x, y_coordinate_start, step_y)\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (y_coor_notrelevant, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, y_coordinate_start, step_y, y_coordinate_start, (-2*step_y))\n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n (x_coordinate_start, y_coordinate_start) = change.change_coor2(coordinate, wires, gate_coordinates, coordinate_end, x_coordinate_start, -step_x, y_coordinate_start, step_y)\n \n coordinate = [x_coordinate_start, y_coordinate_start, z_coordinate_start]\n wires.append(coordinate)\n print(coordinate)\n wire = classs.Wire(coordinate, connected_gate)\n allwires.append(wire)\n \n # Check whether wire isn't running into forever loop\n if len(wires) > 100:\n try:\n step_y = step_y\n except:\n step_y = 0\n x_coordinate_check = x_coordinate_end + step_x\n check_coordinate = [x_coordinate_check, y_coordinate_end, z_coordinate_end]\n for item in allwires:\n if item.coordinate == check_coordinate and item.net[0] != gate_end and item.net[1] != gate_end:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n else:\n copy_gate_connections = copy.deepcopy(gate_connections)\n for key in copy_gate_connections:\n if len(copy_gate_connections[key]) > 75:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break \n x_coordinate_check = x_coordinate_end - step_x\n check_coordinate = [x_coordinate_check, y_coordinate_end, z_coordinate_end]\n for item in allwires:\n if item.coordinate == check_coordinate and item.net[0] != gate_end and item.net[1] != gate_end:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n else:\n copy_gate_connections = copy.deepcopy(gate_connections)\n for key in copy_gate_connections:\n if len(copy_gate_connections[key]) > 75:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n y_coordinate_check = y_coordinate_end + step_y\n check_coordinate = [x_coordinate_end, y_coordinate_check, z_coordinate_end]\n for item in allwires:\n if item.coordinate == check_coordinate and item.net[0] != gate_end and item.net[1] != gate_end:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n else:\n copy_gate_connections = copy.deepcopy(gate_connections)\n for key in copy_gate_connections:\n if len(copy_gate_connections[key]) > 75:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n y_coordinate_check = y_coordinate_end - step_y\n check_coordinate = [x_coordinate_end, y_coordinate_check, z_coordinate_end]\n for item in allwires:\n if item.coordinate == check_coordinate and item.net[0] != gate_end and item.net[1] != gate_end:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n else:\n copy_gate_connections = copy.deepcopy(gate_connections)\n for key in copy_gate_connections:\n if len(copy_gate_connections[key]) > 75:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, key, distances, gate_connections, allwires)\n break\n z_coordinate_check = z_coordinate_end + 1\n check_coordinate = [x_coordinate_end, y_coordinate_end, z_coordinate_check]\n for item in allwires:\n if item.coordinate == check_coordinate and item.net[0] != gate_end and item.net[1] != gate_end:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, item.net, distances, gate_connections, allwires)\n break\n else:\n copy_gate_connections = copy.deepcopy(gate_connections)\n for key in copy_gate_connections:\n if len(copy_gate_connections[key]) > 75:\n (wires, x_coordinate_start, y_coordinate_start, z_coordinate_start, coordinate, gate_connections, allwires) = delete.delete_wire(wires, coordinate_begin, key, distances, gate_connections, allwires)\n break\n \n # If no wire can be deleted and current wire can still not reach end gate\n wires = []\n x_coordinate_start = int(coordinate_begin[0])\n y_coordinate_start = int(coordinate_begin[1])\n z_coordinate_start = int(coordinate_begin[2])\n coordinate = coordinate_begin\n # Check y-coordinate first, then x-coordinate\n # Change value of switch variable to start moving in other direction\n switch_variable = 1\n else: \n checker += 1\n print(\"CHECKER\", checker)\n if checker > 200: \n notcomplete += 1\n print(\"NOTCOMPLETE\", notcomplete)\n break\n \n count += 1 \n print(\"WIRESSSS\")\n print(wires)\n \n wires_length = len(wires)\n \n # Delete part of wire when wire goes back and forth on one line\n if wires_length > 4:\n indices = []\n for count in range(wires_length-2):\n coor_1 = wires[count]\n coor_2 = wires[count + 1]\n coor_3 = wires[count + 2]\n\n # Save indices of wires list\n if coor_1 == coor_3:\n if count not in indices and (count + 1) not in indices:\n indices.append(count)\n indices.append(count + 1)\n \n # Delete wire coordinate with highest index first\n for delete_count in range(wires_length):\n for index in indices:\n if (wires_length - delete_count) == index:\n wires.pop(index)\n \n net = classs.Net(gate_start, gate_end)\n net.create_wires(wires)\n gate_connections.update({connected_gate: wires})\n \n if len(gate_connections) == len(netlist):\n # Check whether every wire reaches end gate\n for net in netlist:\n start_gate = int(net.gate_1)\n end_gate = int(net.gate_2)\n \n if (start_gate, end_gate) in gate_connections.keys():\n uu = gate_connections[(start_gate, end_gate)]\n gate_net = (start_gate, end_gate)\n new_net = (end_gate, start_gate)\n end_coordinate = gate_coordinates[end_gate - 1]\n elif (end_gate, start_gate) in gate_connections.keys():\n uu = gate_connections[(end_gate, start_gate)]\n gate_net = (end_gate, start_gate)\n new_net = (start_gate, end_gate)\n end_coordinate = gate_coordinates[start_gate - 1]\n else:\n uu = \"Key already deleted\"\n\n if uu != \"Key already deleted\":\n if end_coordinate not in uu:\n del gate_connections[gate_net]\n \n distances.append((new_net, 2))\n \n longest_wire_length = 0\n # Select 2 longest wires and create them again\n for connection in gate_connections:\n wire_length = len(gate_connections[connection])\n if wire_length > longest_wire_length:\n longest_wire_length = wire_length\n delete_gate = connection\n \n new1 = (delete_gate[1], delete_gate[0])\n distances.append((new1, 2))\n \n del gate_connections[delete_gate]\n \n longest_wire_length = 0\n # Select 2 longest wires and create them again\n for connection in gate_connections:\n wire_length = len(gate_connections[connection])\n if wire_length > longest_wire_length:\n longest_wire_length = wire_length\n delete_gate2 = connection\n \n new2 = (delete_gate2[1], delete_gate2[0])\n distances.append((new2, 2))\n \n del gate_connections[delete_gate2]\n \n longest_wire_length = 0\n # Select 2 longest wires and create them again\n for connection in gate_connections:\n wire_length = len(gate_connections[connection])\n if wire_length > longest_wire_length:\n longest_wire_length = wire_length\n delete_gate3 = connection\n \n new3 = (delete_gate3[1], delete_gate3[0])\n distances.append((new3, 2))\n \n del gate_connections[delete_gate3]\n \n longest_wire_length = 0\n # Select 2 longest wires and create them again\n for connection in gate_connections:\n wire_length = len(gate_connections[connection])\n if wire_length > longest_wire_length:\n longest_wire_length = wire_length\n delete_gate4 = connection\n \n new4 = (delete_gate4[1], delete_gate4[0])\n distances.append((new4, 2))\n \n del gate_connections[delete_gate4]\n\n deletewire = []\n # Delete blocking wire\n for i, item2 in enumerate(allwires):\n if item2.net == gate_net or item2.net == delete_gate or item2.net == delete_gate2:\n deletewire.append(allwires[i])\n\n for delete_wire in deletewire:\n allwires.remove(delete_wire)\n \nprint(gate_connections)\nprint(len(gate_connections))\nprint(\"JOEJOE\")\nprint(\"ALL WIRESSS\")\nprint(allwires[0].coordinate)\n# print(gate_connections[(17,10)])\n\n\nlength = 0\n# Calculate total length of wires\nfor key in gate_connections:\n wire = gate_connections[key]\n length = length + len(wire)\n \nprint(\"TOTAL LENGTH\")\nprint(length)\n\n\ndef make_grid(layers, size):\n for i in range(layers): \n GridX = np.linspace(0, size, (size + 1))\n GridY = np.linspace(0, size, (size + 1))\n X, Y = np.meshgrid(GridX, GridY)\n Z = (np.sin(np.sqrt(X ** 2 + Y ** 2)) * 0) + i\n # Plot grid\n # ax.plot_wireframe(X, Y, Z, lw=0.5, color='grey')\n #configure axes\n ax.set_zlim3d(0, layers)\n ax.set_xlim3d(0, size)\n ax.set_ylim3d(0, size)\n\n# Enter coordinates as list with: [X, Y, Z]\ndef draw_line(crdFrom, crdTo, colour): \n Xline = [crdFrom[0], crdTo[0]]\n Yline = [crdFrom[1], crdTo[1]]\n Zline = [crdFrom[2], crdTo[2]]\n # Draw line\n ax.plot(Xline, Yline, Zline,lw=2, color=colour, ms=12)\n\ndef set_gate(crd):\n PointX = [crd[0]]\n PointY = [crd[1]]\n PointZ = [crd[2]]\n # Plot points\n ax.plot(PointX, PointY, PointZ, ls=\"None\", marker=\"o\", color='red')\n\n\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\n\nmake_grid(8, 16)\n# plt.pause(3)\nfor gate_coordinate in gate_coordinates: \n set_gate(gate_coordinate)\n plt.pause(0.03)\n\nallConnections = []\ncolours = ['b','lightgreen','cyan','m','yellow','k', 'pink']\ncolourcounter = 0\nfor keys in gate_connections:\n allConnections = gate_connections[keys]\n allconnectionlist = []\n for listconnection in allConnections: \n allconnectionlist.append(listconnection)\n if colourcounter < 6:\n colourcounter += 1\n else: \n colourcounter = 0\n for i in range(len(allconnectionlist)):\n try:\n print(\"LineFromTo\", allconnectionlist[i], \"To\",allconnectionlist[i + 1] )\n draw_line(allconnectionlist[i], allconnectionlist[i+1], colours[colourcounter] )\n plt.pause(0.000001)\n except: \n break\n \nwith open('output.csv', mode= 'w') as outputfile:\n output_writer = csv.writer(outputfile, delimiter= ',')\n\n for keys in gate_connections:\n output_writer.writerow([keys, gate_connections[keys]])\n\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\n\nplt.show()" ]
[ [ "numpy.meshgrid", "matplotlib.pyplot.figure", "matplotlib.pyplot.pause", "numpy.sqrt", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.axes" ] ]
SLAMPAI/generalization-cellular-automata
[ "8758551eee455656156c48ecf72346fd1d315d75" ]
[ "run.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport data_generator as gen\nimport ca_unet as unet\nimport tensorflow as tf\nimport random\nimport argparse, pathlib\n\nparser = argparse.ArgumentParser(description='Generalization CA')\nparser.add_argument('--gen_level', help='Level of Generalization, try \"simple\", \"1\", \"2\", or \"extra\", \"inter\". See papr for eplanation')\nargs = parser.parse_args()\n\nif (args.gen_level == \"simple\"):\n import params.params_simple as params\nelif (args.gen_level == \"1\"):\n import params.params_level_1 as params\nelif (args.gen_level == \"2\"):\n import params.params_level_2 as params\nelif (args.gen_level == \"extra\"):\n import params.params_level_3_extra as params\nelif (args.gen_level == \"inter\"):\n import params.params_level_3_inter as params\n\n\n# set grid parameters\nsize = params.grid_size\ninitial_alive = params.initial_alive\ntimesteps = params.timesteps\n\n# generate the rules\ntotal_rules = params.total_rules\n\ntrain_rules = []\ntest_rules = []\n\n# set the Moore neighborhood sizes used for data generation (train and test set)\nneighborhood_sizes_train = params.neighborhood_sizes_train\nneighborhood_sizes_test = params.neighborhood_sizes_test\n\n# check for any duplicate rules in the train rules\nprint(\"Duplicate rules in train rules?\")\nprint(any(train_rules.count(x) > 1 for x in train_rules))\n\n# generate the trajectories with the specified rulesets\nfor i in range(total_rules):\n train_rules.append(gen.generate_rule(random.choice(neighborhood_sizes_train)))\n\nfor i in range(int(total_rules*0.1)):\n # 10% of training rules size in the test set\n test_rules.append(gen.generate_rule(random.choice(neighborhood_sizes_test)))\n\n# in our case we use the same rules for validation as for training\nval_rules = train_rules\n\n# set the number of training, validation and test states\ndiff_states_train = params.diff_states_train\ndiff_states_val = int(diff_states_train*0.2) # 20% validation\ndiff_states_test = int(diff_states_train*0.1) # 10% test\n\n# generate the data\nif (params.same_rules):\n test_rules = train_rules\n \n\nX_train, y_train, X_val, y_val, X_test, y_test, train_rules_vector, val_rules_vector, test_rules_vector = gen.generate_data(train_rules, val_rules, test_rules, diff_states_train, diff_states_val, diff_states_test, size, initial_alive, timesteps)\n\n# set training parameters\nbatch_size = params.batch_size\nepochs = params.epochs\nverbose = params.verbose\n\n# create the model\nmodel = unet.CA_Unet(size, timesteps)\n\n# creat a checkpoint so the model saves the best parameters\nmcp_save = tf.keras.callbacks.ModelCheckpoint('optimal_weights.hdf5', save_best_only=True, monitor='val_loss', mode='auto')\n\n# train the model\nhistory = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_val, y_val), callbacks=[mcp_save], verbose = 2)\n\n# evaluate the model on the different data sets\nmodel.load_weights('optimal_weights.hdf5')\n\nprint(\"Performance on train set:\")\nmodel.evaluate(X_train,y_train, batch_size=batch_size)\nprint(\"Performance on validation set:\")\nmodel.evaluate(X_val,y_val, batch_size=batch_size)\nprint(\"Performance on test set:\")\nmodel.evaluate(X_test,y_test, batch_size=batch_size)\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint" ] ]
tinaba96/fn2q
[ "cbee3ecaf30563826172a3c2e86cd82458fe08e3" ]
[ "convert.py" ]
[ "#!/usr/bin/env python2.7\r\n\r\nimport caffe\r\nfrom caffe.proto import caffe_pb2\r\nimport sys, os\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nimport argparse, tempfile\r\nimport numpy as np\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('caffe_model', help='input model in hdf5 or caffemodel format')\r\nparser.add_argument('prototxt_template',help='prototxt template')\r\nparser.add_argument('flownet2_pytorch', help='path to flownet2-pytorch')\r\n\r\nargs = parser.parse_args()\r\n\r\nargs.rgb_max = 255\r\nargs.fp16 = False\r\nargs.grads = {}\r\n\r\n# load models\r\nsys.path.append(args.flownet2_pytorch)\r\n\r\nimport models\r\nfrom utils.param_utils import *\r\n\r\nwidth = 256\r\nheight = 256\r\nkeys = {'TARGET_WIDTH': width, \r\n 'TARGET_HEIGHT': height,\r\n 'ADAPTED_WIDTH':width,\r\n 'ADAPTED_HEIGHT':height,\r\n 'SCALE_WIDTH':1.,\r\n 'SCALE_HEIGHT':1.,}\r\n\r\ntemplate = '\\n'.join(np.loadtxt(args.prototxt_template, dtype=str, delimiter='\\n'))\r\nfor k in keys:\r\n template = template.replace('$%s$'%(k),str(keys[k]))\r\n\r\nprototxt = tempfile.NamedTemporaryFile(mode='w', delete=True)\r\nprototxt.write(template)\r\nprototxt.flush()\r\n\r\nnet = caffe.Net(prototxt.name, args.caffe_model, caffe.TEST)\r\n\r\nweights = {}\r\nbiases = {}\r\n\r\nfor k, v in list(net.params.items()):\r\n weights[k] = np.array(v[0].data).reshape(v[0].data.shape)\r\n biases[k] = np.array(v[1].data).reshape(v[1].data.shape)\r\n print((k, weights[k].shape, biases[k].shape))\r\n\r\nif 'FlowNet2/' in args.caffe_model:\r\n model = models.FlowNet2(args)\r\n\r\n parse_flownetc(model.flownetc.modules(), weights, biases)\r\n parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')\r\n parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_')\r\n parse_flownetsd(model.flownets_d.modules(), weights, biases, param_prefix='netsd_')\r\n parse_flownetfusion(model.flownetfusion.modules(), weights, biases, param_prefix='fuse_')\r\n\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2_checkpoint.pth.tar'))\r\n\r\nelif 'FlowNet2-C/' in args.caffe_model:\r\n model = models.FlowNet2C(args)\r\n\r\n parse_flownetc(model.modules(), weights, biases)\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-C_checkpoint.pth.tar'))\r\n\r\nelif 'FlowNet2-CS/' in args.caffe_model:\r\n model = models.FlowNet2CS(args)\r\n\r\n parse_flownetc(model.flownetc.modules(), weights, biases)\r\n parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')\r\n\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CS_checkpoint.pth.tar'))\r\n\r\nelif 'FlowNet2-CSS/' in args.caffe_model:\r\n model = models.FlowNet2CSS(args)\r\n\r\n parse_flownetc(model.flownetc.modules(), weights, biases)\r\n parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')\r\n parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_')\r\n\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CSS_checkpoint.pth.tar'))\r\n\r\nelif 'FlowNet2-CSS-ft-sd/' in args.caffe_model:\r\n model = models.FlowNet2CSS(args)\r\n\r\n parse_flownetc(model.flownetc.modules(), weights, biases)\r\n parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')\r\n parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_')\r\n\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CSS-ft-sd_checkpoint.pth.tar'))\r\n\r\nelif 'FlowNet2-S/' in args.caffe_model:\r\n model = models.FlowNet2S(args)\r\n\r\n parse_flownetsonly(model.modules(), weights, biases, param_prefix='')\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-S_checkpoint.pth.tar'))\r\n\r\nelif 'FlowNet2-SD/' in args.caffe_model:\r\n model = models.FlowNet2SD(args)\r\n\r\n parse_flownetsd(model.modules(), weights, biases, param_prefix='')\r\n\r\n state = {'epoch': 0,\r\n 'state_dict': model.state_dict(),\r\n 'best_EPE': 1e10}\r\n torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-SD_checkpoint.pth.tar'))\r\n\r\nelse:\r\n print(('model type cound not be determined from input caffe model %s'%(args.caffe_model)))\r\n quit()\r\nprint((\"done converting \", args.caffe_model))" ]
[ [ "numpy.array", "numpy.loadtxt" ] ]
mikstr/qml
[ "552e273da080a3a1fb9f8c466e4562b7d64ed6bd" ]
[ "tests/test_distance.py" ]
[ "# MIT License\n#\n# Copyright (c) 2017 Anders Steen Christensen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom qml.distance import *\n\n\ndef test_manhattan():\n\n nfeatures = 5\n n1 = 7\n n2 = 9\n\n v1 = np.random.random((n1, nfeatures))\n v2 = np.random.random((n2, nfeatures))\n\n D = manhattan_distance(v1, v2)\n\n Dtest = np.zeros((n1, n2))\n\n for i in range(n1):\n for j in range(n2):\n for k in range(nfeatures):\n Dtest[i,j] += abs(v1[i, k] - v2[j, k])\n\n assert np.allclose(D, Dtest), \"Error in manhattan distance\"\n\ndef test_l2():\n\n nfeatures = 5\n n1 = 7\n n2 = 9\n\n v1 = np.random.random((n1, nfeatures))\n v2 = np.random.random((n2, nfeatures))\n\n D = l2_distance(v1, v2)\n\n Dtest = np.zeros((n1, n2))\n\n for i in range(n1):\n for j in range(n2):\n for k in range(nfeatures):\n Dtest[i,j] += (v1[i, k] - v2[j, k])**2\n\n np.sqrt(Dtest, out=Dtest)\n\n assert np.allclose(D, Dtest), \"Error in l2 distance\"\n\ndef test_p():\n\n nfeatures = 5\n n1 = 7\n n2 = 9\n\n v1 = np.random.random((n1, nfeatures))\n v2 = np.random.random((n2, nfeatures))\n\n D = p_distance(v1, v2, 3)\n\n\n Dtest = np.zeros((n1, n2))\n\n for i in range(n1):\n for j in range(n2):\n for k in range(nfeatures):\n Dtest[i,j] += abs(v1[i, k] - v2[j, k])**3\n\n Dtest = Dtest**(1.0/3)\n\n assert np.allclose(D, Dtest), \"Error in p-distance\"\n\n Dfloat = p_distance(v1, v2, 3.0)\n assert np.allclose(D, Dfloat), \"Floatingpoint Error in p-distance\"\n\nif __name__ == \"__main__\":\n test_manhattan()\n test_l2()\n test_p()\n" ]
[ [ "numpy.allclose", "numpy.random.random", "numpy.sqrt", "numpy.zeros" ] ]
jhouck/mne-python
[ "95facbd1a28e471cf81e1d86735fa272a66d13d1" ]
[ "mne/tests/test_annotations.py" ]
[ "# Authors: Jaakko Leppakangas <[email protected]>\n# Robert Luke <[email protected]>\n#\n# License: BSD-3-Clause\n\nfrom collections import OrderedDict\nfrom datetime import datetime, timezone\nfrom itertools import repeat\nimport sys\n\nimport os.path as op\n\nimport pytest\nfrom pytest import approx\nfrom numpy.testing import (assert_equal, assert_array_equal,\n assert_array_almost_equal, assert_allclose)\n\nimport numpy as np\n\nimport mne\nfrom mne import (create_info, read_annotations, annotations_from_events,\n events_from_annotations)\nfrom mne import Epochs, Annotations\nfrom mne.utils import (requires_version,\n catch_logging, requires_pandas)\nfrom mne.utils import (assert_and_remove_boundary_annot, _raw_annot,\n _dt_to_stamp, _stamp_to_dt, check_version)\nfrom mne.io import read_raw_fif, RawArray, concatenate_raws\nfrom mne.annotations import (_sync_onset, _handle_meas_date,\n _read_annotations_txt_parse_header)\nfrom mne.datasets import testing\n\ndata_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')\nfif_fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',\n 'test_raw.fif')\n\nfirst_samps = pytest.mark.parametrize('first_samp', (0, 10000))\n\n\nneeds_pandas = pytest.mark.skipif(\n not check_version('pandas'), reason='Needs pandas')\n\n\n# On Windows, datetime.fromtimestamp throws an error for negative times.\n# We mimic this behavior on non-Windows platforms for ease of testing.\nclass _windows_datetime(datetime):\n @classmethod\n def fromtimestamp(cls, timestamp, tzinfo=None):\n if timestamp < 0:\n raise OSError('[Errno 22] Invalid argument')\n return datetime.fromtimestamp(timestamp, tzinfo)\n\n\[email protected](scope='function')\ndef windows_like_datetime(monkeypatch):\n \"\"\"Ensure datetime.fromtimestamp is Windows-like.\"\"\"\n if not sys.platform.startswith('win'):\n monkeypatch.setattr('mne.annotations.datetime', _windows_datetime)\n yield\n\n\ndef test_basics():\n \"\"\"Test annotation class.\"\"\"\n raw = read_raw_fif(fif_fname)\n assert raw.annotations is not None\n assert len(raw.annotations.onset) == 0\n pytest.raises(IOError, read_annotations, fif_fname)\n onset = np.array(range(10))\n duration = np.ones(10)\n description = np.repeat('test', 10)\n dt = raw.info['meas_date']\n assert isinstance(dt, datetime)\n stamp = _dt_to_stamp(dt)\n # Test time shifts.\n for orig_time in [None, dt, stamp[0], stamp]:\n annot = Annotations(onset, duration, description, orig_time)\n if orig_time is None:\n assert annot.orig_time is None\n else:\n assert isinstance(annot.orig_time, datetime)\n assert annot.orig_time.tzinfo is timezone.utc\n\n pytest.raises(ValueError, Annotations, onset, duration, description[:9])\n pytest.raises(ValueError, Annotations, [onset, 1], duration, description)\n pytest.raises(ValueError, Annotations, onset, [duration, 1], description)\n\n # Test combining annotations with concatenate_raws\n raw2 = raw.copy()\n delta = raw.times[-1] + 1. / raw.info['sfreq']\n orig_time = (stamp[0] + stamp[1] * 1e-6 + raw2._first_time)\n offset = _dt_to_stamp(_handle_meas_date(raw2.info['meas_date']))\n offset = offset[0] + offset[1] * 1e-6\n offset = orig_time - offset\n assert_allclose(offset, raw._first_time)\n annot = Annotations(onset, duration, description, orig_time)\n assert annot.orig_time is not None\n assert ' segments' in repr(annot)\n raw2.set_annotations(annot)\n assert_allclose(raw2.annotations.onset, onset + offset)\n assert raw2.annotations is not annot\n assert raw2.annotations.orig_time is not None\n concatenate_raws([raw, raw2])\n assert_and_remove_boundary_annot(raw)\n assert_allclose(onset + offset + delta, raw.annotations.onset, rtol=1e-5)\n assert_array_equal(annot.duration, raw.annotations.duration)\n assert_array_equal(raw.annotations.description, np.repeat('test', 10))\n\n\ndef test_annot_sanitizing(tmpdir):\n \"\"\"Test description sanitizing.\"\"\"\n annot = Annotations([0], [1], ['a;:b'])\n fname = str(tmpdir.join('custom-annot.fif'))\n annot.save(fname)\n annot_read = read_annotations(fname)\n _assert_annotations_equal(annot, annot_read)\n\n # make sure pytest raises error on char-sequence that is not allowed\n with pytest.raises(ValueError, match='in description not supported'):\n Annotations([0], [1], ['a{COLON}b'])\n\n\ndef test_raw_array_orig_times():\n \"\"\"Test combining with RawArray and orig_times.\"\"\"\n data = np.random.randn(2, 1000) * 10e-12\n sfreq = 100.\n info = create_info(ch_names=['MEG1', 'MEG2'], ch_types=['grad'] * 2,\n sfreq=sfreq)\n meas_date = _handle_meas_date(np.pi)\n info['meas_date'] = meas_date\n raws = []\n for first_samp in [12300, 100, 12]:\n raw = RawArray(data.copy(), info, first_samp=first_samp)\n ants = Annotations([1., 2.], [.5, .5], 'x', np.pi + first_samp / sfreq)\n raw.set_annotations(ants)\n raws.append(raw)\n assert_allclose(raws[0].annotations.onset, [124, 125])\n raw = RawArray(data.copy(), info)\n assert not len(raw.annotations)\n raw.set_annotations(Annotations([1.], [.5], 'x', None))\n assert_allclose(raw.annotations.onset, [1.])\n raws.append(raw)\n raw = concatenate_raws(raws, verbose='debug')\n assert raw.info['meas_date'] == raw.annotations.orig_time == meas_date\n assert_and_remove_boundary_annot(raw, 3)\n assert_array_equal(raw.annotations.onset, [124., 125., 134., 135.,\n 144., 145., 154.])\n raw.annotations.delete(2)\n assert_array_equal(raw.annotations.onset, [124., 125., 135., 144.,\n 145., 154.])\n raw.annotations.append(5, 1.5, 'y')\n assert_array_equal(raw.annotations.onset,\n [5., 124., 125., 135., 144., 145., 154.])\n assert_array_equal(raw.annotations.duration,\n [1.5, .5, .5, .5, .5, .5, .5])\n assert_array_equal(raw.annotations.description,\n ['y', 'x', 'x', 'x', 'x', 'x', 'x'])\n\n # These three things should be equivalent\n stamp = _dt_to_stamp(raw.info['meas_date'])\n orig_time = _handle_meas_date(stamp)\n for empty_annot in (\n Annotations([], [], [], stamp),\n Annotations([], [], [], orig_time),\n Annotations([], [], [], None),\n None):\n raw.set_annotations(empty_annot)\n assert isinstance(raw.annotations, Annotations)\n assert len(raw.annotations) == 0\n assert raw.annotations.orig_time == orig_time\n\n\ndef test_crop(tmpdir):\n \"\"\"Test cropping with annotations.\"\"\"\n raw = read_raw_fif(fif_fname)\n events = mne.find_events(raw)\n onset = events[events[:, 2] == 1, 0] / raw.info['sfreq']\n duration = np.full_like(onset, 0.5)\n description = ['bad %d' % k for k in range(len(onset))]\n annot = mne.Annotations(onset, duration, description,\n orig_time=raw.info['meas_date'])\n raw.set_annotations(annot)\n\n split_time = raw.times[-1] / 2. + 2.\n split_idx = len(onset) // 2 + 1\n raw_cropped_left = raw.copy().crop(0., split_time - 1. / raw.info['sfreq'])\n assert_array_equal(raw_cropped_left.annotations.description,\n raw.annotations.description[:split_idx])\n assert_allclose(raw_cropped_left.annotations.duration,\n raw.annotations.duration[:split_idx])\n assert_allclose(raw_cropped_left.annotations.onset,\n raw.annotations.onset[:split_idx])\n raw_cropped_right = raw.copy().crop(split_time, None)\n assert_array_equal(raw_cropped_right.annotations.description,\n raw.annotations.description[split_idx:])\n assert_allclose(raw_cropped_right.annotations.duration,\n raw.annotations.duration[split_idx:])\n assert_allclose(raw_cropped_right.annotations.onset,\n raw.annotations.onset[split_idx:])\n raw_concat = mne.concatenate_raws([raw_cropped_left, raw_cropped_right],\n verbose='debug')\n assert_allclose(raw_concat.times, raw.times)\n assert_allclose(raw_concat[:][0], raw[:][0], atol=1e-20)\n assert_and_remove_boundary_annot(raw_concat)\n # Ensure we annotations survive round-trip crop->concat\n assert_array_equal(raw_concat.annotations.description,\n raw.annotations.description)\n for attr in ('onset', 'duration'):\n assert_allclose(getattr(raw_concat.annotations, attr),\n getattr(raw.annotations, attr),\n err_msg='Failed for %s:' % (attr,))\n\n raw.set_annotations(None) # undo\n\n # Test concatenating annotations with and without orig_time.\n raw2 = raw.copy()\n raw.set_annotations(Annotations([45.], [3], 'test', raw.info['meas_date']))\n raw2.set_annotations(Annotations([2.], [3], 'BAD', None))\n expected_onset = [45., 2. + raw._last_time]\n raw = concatenate_raws([raw, raw2])\n assert_and_remove_boundary_annot(raw)\n assert_array_almost_equal(raw.annotations.onset, expected_onset, decimal=2)\n\n # Test IO\n tempdir = str(tmpdir)\n fname = op.join(tempdir, 'test-annot.fif')\n raw.annotations.save(fname)\n annot_read = read_annotations(fname)\n for attr in ('onset', 'duration'):\n assert_allclose(getattr(annot_read, attr),\n getattr(raw.annotations, attr))\n assert annot_read.orig_time == raw.annotations.orig_time\n assert_array_equal(annot_read.description, raw.annotations.description)\n annot = Annotations((), (), ())\n annot.save(fname, overwrite=True)\n pytest.raises(IOError, read_annotations, fif_fname) # none in old raw\n annot = read_annotations(fname)\n assert isinstance(annot, Annotations)\n assert len(annot) == 0\n annot.crop() # test if cropping empty annotations doesn't raise an error\n # Test that empty annotations can be saved with an object\n fname = op.join(tempdir, 'test_raw.fif')\n raw.set_annotations(annot)\n raw.save(fname)\n raw_read = read_raw_fif(fname)\n assert isinstance(raw_read.annotations, Annotations)\n assert len(raw_read.annotations) == 0\n raw.set_annotations(None)\n raw.save(fname, overwrite=True)\n raw_read = read_raw_fif(fname)\n assert raw_read.annotations is not None\n assert len(raw_read.annotations.onset) == 0\n\n\n@first_samps\ndef test_chunk_duration(first_samp):\n \"\"\"Test chunk_duration.\"\"\"\n # create dummy raw\n raw = RawArray(data=np.empty([10, 10], dtype=np.float64),\n info=create_info(ch_names=10, sfreq=1.),\n first_samp=first_samp)\n raw.info['meas_date'] = _handle_meas_date(0)\n raw.set_annotations(Annotations(description='foo', onset=[0],\n duration=[10], orig_time=None))\n assert raw.annotations.orig_time == raw.info['meas_date']\n assert_allclose(raw.annotations.onset, [first_samp])\n\n # expected_events = [[0, 0, 1], [0, 0, 1], [1, 0, 1], [1, 0, 1], ..\n # [9, 0, 1], [9, 0, 1]]\n expected_events = np.atleast_2d(np.repeat(range(10), repeats=2)).T\n expected_events = np.insert(expected_events, 1, 0, axis=1)\n expected_events = np.insert(expected_events, 2, 1, axis=1)\n expected_events[:, 0] += first_samp\n\n events, events_id = events_from_annotations(raw, chunk_duration=.5,\n use_rounding=False)\n assert_array_equal(events, expected_events)\n\n # test chunk durations that do not fit equally in annotation duration\n expected_events = np.zeros((3, 3))\n expected_events[:, -1] = 1\n expected_events[:, 0] = np.arange(0, 9, step=3) + first_samp\n events, events_id = events_from_annotations(raw, chunk_duration=3.)\n assert_array_equal(events, expected_events)\n\n\ndef test_events_from_annotation_orig_time_none():\n \"\"\"Tests events_from_annotation with orig_time None and first_sampe > 0.\"\"\"\n # Create fake data\n sfreq, duration_s = 100, 10\n data = np.random.RandomState(42).randn(1, sfreq * duration_s)\n info = mne.create_info(ch_names=['EEG1'], ch_types=['eeg'], sfreq=sfreq)\n raw = mne.io.RawArray(data, info)\n\n # Add annotation toward the end\n onset = [8]\n duration = [1]\n description = ['0']\n annots = mne.Annotations(onset, duration, description)\n raw = raw.set_annotations(annots)\n\n # Crop start of raw\n raw.crop(tmin=7)\n\n # Extract epochs\n events, event_ids = mne.events_from_annotations(raw)\n epochs = mne.Epochs(\n raw, events, tmin=0, tmax=1, baseline=None, on_missing='warning')\n\n # epochs is empty\n assert_array_equal(epochs.get_data()[0], data[:, 800:901])\n\n\ndef test_crop_more():\n \"\"\"Test more cropping.\"\"\"\n raw = mne.io.read_raw_fif(fif_fname).crop(0, 11).load_data()\n raw._data[:] = np.random.RandomState(0).randn(*raw._data.shape)\n onset = np.array([0.47058824, 2.49773765, 6.67873287, 9.15837097])\n duration = np.array([0.89592767, 1.13574672, 1.09954739, 0.48868752])\n annotations = mne.Annotations(onset, duration, 'BAD')\n raw.set_annotations(annotations)\n assert len(raw.annotations) == 4\n delta = 1. / raw.info['sfreq']\n offset = raw.first_samp * delta\n raw_concat = mne.concatenate_raws(\n [raw.copy().crop(0, 4 - delta),\n raw.copy().crop(4, 8 - delta),\n raw.copy().crop(8, None)])\n assert_allclose(raw_concat.times, raw.times)\n assert_allclose(raw_concat[:][0], raw[:][0])\n assert raw_concat.first_samp == raw.first_samp\n assert_and_remove_boundary_annot(raw_concat, 2)\n assert len(raw_concat.annotations) == 4\n assert_array_equal(raw_concat.annotations.description,\n raw.annotations.description)\n assert_allclose(raw.annotations.duration, duration)\n assert_allclose(raw_concat.annotations.duration, duration)\n assert_allclose(raw.annotations.onset, onset + offset)\n assert_allclose(raw_concat.annotations.onset, onset + offset,\n atol=1. / raw.info['sfreq'])\n\n\[email protected]_testing_data\ndef test_read_brainstorm_annotations():\n \"\"\"Test reading for Brainstorm events file.\"\"\"\n fname = op.join(data_dir, 'events_sample_audvis_raw_bst.mat')\n annot = read_annotations(fname)\n assert len(annot) == 238\n assert annot.onset.min() > 40 # takes into account first_samp\n assert np.unique(annot.description).size == 5\n\n\n@first_samps\ndef test_raw_reject(first_samp):\n \"\"\"Test raw data getter with annotation reject.\"\"\"\n sfreq = 100.\n info = create_info(['a', 'b', 'c', 'd', 'e'], sfreq, ch_types='eeg')\n raw = RawArray(np.ones((5, 15000)), info, first_samp=first_samp)\n with pytest.warns(RuntimeWarning, match='outside the data range'):\n raw.set_annotations(Annotations([2, 100, 105, 148],\n [2, 8, 5, 8], 'BAD'))\n data, times = raw.get_data([0, 1, 3, 4], 100, 11200, # 1-112 sec\n 'omit', return_times=True)\n bad_times = np.concatenate([np.arange(200, 400),\n np.arange(10000, 10800),\n np.arange(10500, 11000)])\n expected_times = np.setdiff1d(np.arange(100, 11200), bad_times) / sfreq\n assert_allclose(times, expected_times)\n\n # with orig_time and complete overlap\n raw = read_raw_fif(fif_fname)\n raw.set_annotations(Annotations(onset=[1, 4, 5] + raw._first_time,\n duration=[1, 3, 1],\n description='BAD',\n orig_time=raw.info['meas_date']))\n t_stop = 18.\n assert raw.times[-1] > t_stop\n n_stop = int(round(t_stop * raw.info['sfreq']))\n n_drop = int(round(4 * raw.info['sfreq']))\n assert len(raw.times) >= n_stop\n data, times = raw.get_data(range(10), 0, n_stop, 'omit', True)\n assert data.shape == (10, n_stop - n_drop)\n assert times[-1] == raw.times[n_stop - 1]\n assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0])\n\n data, times = raw.get_data(range(10), 0, n_stop, 'NaN', True)\n assert_array_equal(data.shape, (10, n_stop))\n assert times[-1] == raw.times[n_stop - 1]\n t_1, t_2 = raw.time_as_index([1, 2], use_rounding=True)\n assert np.isnan(data[:, t_1:t_2]).all() # 1s -2s\n assert not np.isnan(data[:, :t_1].any())\n assert not np.isnan(data[:, t_2:].any())\n assert_array_equal(data[:, -100:], raw[:10, n_stop - 100:n_stop][0])\n assert_array_equal(raw.get_data(), raw[:][0])\n\n # Test _sync_onset\n times = [10, -88, 190]\n onsets = _sync_onset(raw, times)\n assert_array_almost_equal(onsets, times - raw.first_samp /\n raw.info['sfreq'])\n assert_array_almost_equal(times, _sync_onset(raw, onsets, True))\n\n\n@first_samps\ndef test_annotation_filtering(first_samp):\n \"\"\"Test that annotations work properly with filtering.\"\"\"\n # Create data with just a DC component\n data = np.ones((1, 1000))\n info = create_info(1, 1000., 'eeg')\n raws = [RawArray(data * (ii + 1), info, first_samp=first_samp)\n for ii in range(4)]\n kwargs_pass = dict(l_freq=None, h_freq=50., fir_design='firwin')\n kwargs_stop = dict(l_freq=50., h_freq=None, fir_design='firwin')\n # lowpass filter, which should not modify the data\n raws_pass = [raw.copy().filter(**kwargs_pass) for raw in raws]\n # highpass filter, which should zero it out\n raws_stop = [raw.copy().filter(**kwargs_stop) for raw in raws]\n # concat the original and the filtered segments\n raws_concat = concatenate_raws([raw.copy() for raw in raws])\n raws_zero = raws_concat.copy().apply_function(lambda x: x * 0)\n raws_pass_concat = concatenate_raws(raws_pass)\n raws_stop_concat = concatenate_raws(raws_stop)\n # make sure we did something reasonable with our individual-file filtering\n assert_allclose(raws_concat[0][0], raws_pass_concat[0][0], atol=1e-14)\n assert_allclose(raws_zero[0][0], raws_stop_concat[0][0], atol=1e-14)\n # ensure that our Annotations cut up the filtering properly\n raws_concat_pass = raws_concat.copy().filter(skip_by_annotation='edge',\n **kwargs_pass)\n assert_allclose(raws_concat[0][0], raws_concat_pass[0][0], atol=1e-14)\n raws_concat_stop = raws_concat.copy().filter(skip_by_annotation='edge',\n **kwargs_stop)\n assert_allclose(raws_zero[0][0], raws_concat_stop[0][0], atol=1e-14)\n # one last test: let's cut out a section entirely:\n # here the 1-3 second window should be skipped\n raw = raws_concat.copy()\n raw.annotations.append(1. + raw._first_time, 2., 'foo')\n with catch_logging() as log:\n raw.filter(l_freq=50., h_freq=None, fir_design='firwin',\n skip_by_annotation='foo', verbose='info')\n log = log.getvalue()\n assert '2 contiguous segments' in log\n raw.annotations.append(2. + raw._first_time, 1., 'foo') # shouldn't change\n with catch_logging() as log:\n raw.filter(l_freq=50., h_freq=None, fir_design='firwin',\n skip_by_annotation='foo', verbose='info')\n log = log.getvalue()\n assert '2 contiguous segments' in log\n # our filter will zero out anything not skipped:\n mask = np.concatenate((np.zeros(1000), np.ones(2000), np.zeros(1000)))\n expected_data = raws_concat[0][0][0] * mask\n assert_allclose(raw[0][0][0], expected_data, atol=1e-14)\n\n # Let's try another one\n raw = raws[0].copy()\n raw.set_annotations(Annotations([0.], [0.5], ['BAD_ACQ_SKIP']))\n my_data, times = raw.get_data(reject_by_annotation='omit',\n return_times=True)\n assert_allclose(times, raw.times[500:])\n assert my_data.shape == (1, 500)\n raw_filt = raw.copy().filter(skip_by_annotation='bad_acq_skip',\n **kwargs_stop)\n expected = data.copy()\n expected[:, 500:] = 0\n assert_allclose(raw_filt[:][0], expected, atol=1e-14)\n\n raw = raws[0].copy()\n raw.set_annotations(Annotations([0.5], [0.5], ['BAD_ACQ_SKIP']))\n my_data, times = raw.get_data(reject_by_annotation='omit',\n return_times=True)\n assert_allclose(times, raw.times[:500])\n assert my_data.shape == (1, 500)\n raw_filt = raw.copy().filter(skip_by_annotation='bad_acq_skip',\n **kwargs_stop)\n expected = data.copy()\n expected[:, :500] = 0\n assert_allclose(raw_filt[:][0], expected, atol=1e-14)\n\n\n@first_samps\ndef test_annotation_omit(first_samp):\n \"\"\"Test raw.get_data with annotations.\"\"\"\n data = np.concatenate([np.ones((1, 1000)), 2 * np.ones((1, 1000))], -1)\n info = create_info(1, 1000., 'eeg')\n raw = RawArray(data, info, first_samp=first_samp)\n raw.set_annotations(Annotations([0.5], [1], ['bad']))\n expected = raw[0][0]\n assert_allclose(raw.get_data(reject_by_annotation=None), expected)\n # nan\n expected[0, 500:1500] = np.nan\n assert_allclose(raw.get_data(reject_by_annotation='nan'), expected)\n got = np.concatenate([raw.get_data(start=start, stop=stop,\n reject_by_annotation='nan')\n for start, stop in ((0, 1000), (1000, 2000))], -1)\n assert_allclose(got, expected)\n # omit\n expected = expected[:, np.isfinite(expected[0])]\n assert_allclose(raw.get_data(reject_by_annotation='omit'), expected)\n got = np.concatenate([raw.get_data(start=start, stop=stop,\n reject_by_annotation='omit')\n for start, stop in ((0, 1000), (1000, 2000))], -1)\n assert_allclose(got, expected)\n pytest.raises(ValueError, raw.get_data, reject_by_annotation='foo')\n\n\ndef test_annotation_epoching():\n \"\"\"Test that annotations work properly with concatenated edges.\"\"\"\n # Create data with just a DC component\n data = np.ones((1, 1000))\n info = create_info(1, 1000., 'eeg')\n raw = concatenate_raws([RawArray(data, info) for ii in range(3)])\n assert raw.annotations is not None\n assert len(raw.annotations) == 4\n assert np.in1d(raw.annotations.description, ['BAD boundary']).sum() == 2\n assert np.in1d(raw.annotations.description, ['EDGE boundary']).sum() == 2\n assert_array_equal(raw.annotations.duration, 0.)\n events = np.array([[a, 0, 1] for a in [0, 500, 1000, 1500, 2000]])\n epochs = Epochs(raw, events, tmin=0, tmax=0.999, baseline=None,\n preload=True) # 1000 samples long\n assert_equal(len(epochs.drop_log), len(events))\n assert_equal(len(epochs), 3)\n assert_equal([0, 2, 4], epochs.selection)\n\n\ndef test_annotation_concat():\n \"\"\"Test if two Annotations objects can be concatenated.\"\"\"\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n b = Annotations([11, 12, 13], [1, 2, 2], [\"x\", \"y\", \"z\"])\n\n # test + operator (does not modify a or b)\n c = a + b\n assert_array_equal(c.onset, [1, 2, 3, 11, 12, 13])\n assert_array_equal(c.duration, [5, 5, 8, 1, 2, 2])\n assert_array_equal(c.description, [\"a\", \"b\", \"c\", \"x\", \"y\", \"z\"])\n assert_equal(len(a), 3)\n assert_equal(len(b), 3)\n assert_equal(len(c), 6)\n\n # test += operator (modifies a in place)\n a += b\n assert_array_equal(a.onset, [1, 2, 3, 11, 12, 13])\n assert_array_equal(a.duration, [5, 5, 8, 1, 2, 2])\n assert_array_equal(a.description, [\"a\", \"b\", \"c\", \"x\", \"y\", \"z\"])\n assert_equal(len(a), 6)\n assert_equal(len(b), 3)\n\n # test += operator (modifies a in place)\n b._orig_time = _handle_meas_date(1038942070.7201)\n with pytest.raises(ValueError, match='orig_time should be the same'):\n a += b\n\n\ndef test_annotations_crop():\n \"\"\"Test basic functionality of annotation crop.\"\"\"\n onset = np.arange(1, 10)\n duration = np.full_like(onset, 10)\n description = [\"yy\"] * onset.shape[0]\n\n a = Annotations(onset=onset,\n duration=duration,\n description=description,\n orig_time=0)\n\n # cropping window larger than annotations --> do not modify\n a_ = a.copy().crop(tmin=-10, tmax=42)\n assert_array_equal(a_.onset, a.onset)\n assert_array_equal(a_.duration, a.duration)\n\n # cropping with left shifted window\n with pytest.warns(None) as w:\n a_ = a.copy().crop(tmin=0, tmax=4.2)\n assert_array_equal(a_.onset, [1., 2., 3., 4.])\n assert_allclose(a_.duration, [3.2, 2.2, 1.2, 0.2])\n assert len(w) == 0\n\n # cropping with right shifted window\n with pytest.warns(None) as w:\n a_ = a.copy().crop(tmin=17.8, tmax=22)\n assert_array_equal(a_.onset, [17.8, 17.8])\n assert_allclose(a_.duration, [0.2, 1.2])\n assert len(w) == 0\n\n # cropping with centered small window\n a_ = a.copy().crop(tmin=11, tmax=12)\n assert_array_equal(a_.onset, [11, 11, 11, 11, 11, 11, 11, 11, 11])\n assert_array_equal(a_.duration, [0, 1, 1, 1, 1, 1, 1, 1, 1])\n\n # cropping with out-of-bounds window\n with pytest.warns(None) as w:\n a_ = a.copy().crop(tmin=42, tmax=100)\n assert_array_equal(a_.onset, [])\n assert_array_equal(a_.duration, [])\n assert len(w) == 0\n\n # test error raising\n with pytest.raises(ValueError, match='tmax should be greater than.*tmin'):\n a.copy().crop(tmin=42, tmax=0)\n\n # test warnings\n with pytest.warns(RuntimeWarning, match='Omitted .* were outside'):\n a.copy().crop(tmin=42, tmax=100, emit_warning=True)\n with pytest.warns(RuntimeWarning, match='Limited .* expanding outside'):\n a.copy().crop(tmin=0, tmax=12, emit_warning=True)\n\n\[email protected]_testing_data\ndef test_events_from_annot_in_raw_objects():\n \"\"\"Test basic functionality of events_fron_annot for raw objects.\"\"\"\n raw = read_raw_fif(fif_fname)\n events = mne.find_events(raw)\n event_id = {\n 'Auditory/Left': 1,\n 'Auditory/Right': 2,\n 'Visual/Left': 3,\n 'Visual/Right': 4,\n 'Visual/Smiley': 32,\n 'Motor/Button': 5\n }\n event_map = {v: k for k, v in event_id.items()}\n annot = Annotations(onset=raw.times[events[:, 0] - raw.first_samp],\n duration=np.zeros(len(events)),\n description=[event_map[vv] for vv in events[:, 2]],\n orig_time=None)\n raw.set_annotations(annot)\n\n events2, event_id2 = \\\n events_from_annotations(raw, event_id=event_id, regexp=None)\n assert_array_equal(events, events2)\n assert_equal(event_id, event_id2)\n\n events3, event_id3 = \\\n events_from_annotations(raw, event_id=None, regexp=None)\n\n assert_array_equal(events[:, 0], events3[:, 0])\n assert set(event_id.keys()) == set(event_id3.keys())\n\n # ensure that these actually got sorted properly\n expected_event_id = {\n desc: idx + 1 for idx, desc in enumerate(sorted(event_id.keys()))}\n assert event_id3 == expected_event_id\n\n first = np.unique(events3[:, 2])\n second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)\n assert_array_equal(first, second)\n\n first = np.unique(list(event_id3.values()))\n second = np.arange(1, len(event_id) + 1, 1).astype(first.dtype)\n assert_array_equal(first, second)\n\n events4, event_id4 =\\\n events_from_annotations(raw, event_id=None, regexp='.*Left')\n\n expected_event_id4 = {k: v for k, v in event_id.items() if 'Left' in k}\n assert_equal(event_id4.keys(), expected_event_id4.keys())\n\n expected_events4 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]\n assert_array_equal(expected_events4[:, 0], events4[:, 0])\n\n events5, event_id5 = \\\n events_from_annotations(raw, event_id=event_id, regexp='.*Left')\n\n expected_event_id5 = {k: v for k, v in event_id.items() if 'Left' in k}\n assert_equal(event_id5, expected_event_id5)\n\n expected_events5 = events[(events[:, 2] == 1) | (events[:, 2] == 3)]\n assert_array_equal(expected_events5, events5)\n\n with pytest.raises(ValueError, match='not find any of the events'):\n events_from_annotations(raw, regexp='not_there')\n\n with pytest.raises(ValueError, match='Invalid type for event_id'):\n events_from_annotations(raw, event_id='wrong')\n\n # concat does not introduce BAD or EDGE\n raw_concat = concatenate_raws([raw.copy(), raw.copy()])\n _, event_id = events_from_annotations(raw_concat)\n assert isinstance(event_id, dict)\n assert len(event_id) > 0\n for kind in ('BAD', 'EDGE'):\n assert '%s boundary' % kind in raw_concat.annotations.description\n for key in event_id.keys():\n assert kind not in key\n\n # remove all events\n raw.set_annotations(None)\n events7, _ = events_from_annotations(raw)\n assert_array_equal(events7, np.empty((0, 3), dtype=int))\n\n\ndef test_events_from_annot_onset_alingment():\n \"\"\"Test events and annotations onset are the same.\"\"\"\n raw = _raw_annot(meas_date=1, orig_time=1.5)\n # sec 0 1 2 3\n # raw . |--------xxxxxxxxx\n # annot . |---xx\n # raw.annot . |--------xx\n # latency . 0 1 2\n # . 0 0\n\n assert raw.annotations.orig_time == _handle_meas_date(1)\n assert raw.annotations.onset[0] == 1\n assert raw.first_samp == 10\n event_latencies, event_id = events_from_annotations(raw)\n assert event_latencies[0, 0] == 10\n assert raw.first_samp == event_latencies[0, 0]\n\n\ndef _create_annotation_based_on_descr(description, annotation_start_sampl=0,\n duration=0, orig_time=0):\n \"\"\"Create a raw object with annotations from descriptions.\n\n The returning raw object contains as many annotations as description given.\n All starting at `annotation_start_sampl`.\n \"\"\"\n # create dummy raw\n raw = RawArray(data=np.empty([10, 10], dtype=np.float64),\n info=create_info(ch_names=10, sfreq=1000.),\n first_samp=0)\n raw.set_meas_date(0)\n\n # create dummy annotations based on the descriptions\n onset = raw.times[annotation_start_sampl]\n onset_matching_desc = np.full_like(description, onset, dtype=type(onset))\n duration_matching_desc = np.full_like(description, duration,\n dtype=type(duration))\n annot = Annotations(description=description,\n onset=onset_matching_desc,\n duration=duration_matching_desc,\n orig_time=orig_time)\n\n if duration != 0:\n with pytest.warns(RuntimeWarning, match='Limited.*expanding outside'):\n # duration 0.1s is larger than the raw data expand\n raw.set_annotations(annot)\n else:\n raw.set_annotations(annot)\n\n # Make sure that set_annotations(annot) works\n assert all(raw.annotations.onset == onset)\n if duration != 0:\n expected_duration = (len(raw.times) / raw.info['sfreq']) - onset\n else:\n expected_duration = 0\n _duration = raw.annotations.duration[0]\n assert _duration == approx(expected_duration)\n assert all(raw.annotations.duration == _duration)\n assert all(raw.annotations.description == description)\n\n return raw\n\n\ndef test_event_id_function_default():\n \"\"\"Test[unit_test] for event_id_function default in event_from_annotations.\n\n The expected behavior is give numeric label for all those annotations not\n present in event_id, starting at 1.\n \"\"\"\n # No event_id given\n description = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n expected_event_id = dict(zip(description, range(1, 100)))\n expected_events = np.array([[3, 3, 3, 3, 3, 3, 3],\n [0, 0, 0, 0, 0, 0, 0],\n [1, 2, 3, 4, 5, 6, 7]]).T\n\n raw = _create_annotation_based_on_descr(description,\n annotation_start_sampl=3,\n duration=100)\n events, event_id = events_from_annotations(raw, event_id=None)\n\n assert_array_equal(events, expected_events)\n assert event_id == expected_event_id\n\n\ndef test_event_id_function_using_custom_function():\n \"\"\"Test [unit_test] arbitrary function to create the ids.\"\"\"\n def _constant_id(*args, **kwargs):\n return 42\n\n description = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n expected_event_id = dict(zip(description, repeat(42)))\n expected_events = np.repeat([[0, 0, 42]], len(description), axis=0)\n raw = _create_annotation_based_on_descr(description)\n events, event_id = events_from_annotations(raw, event_id=_constant_id)\n\n assert_array_equal(events, expected_events)\n assert event_id == expected_event_id\n\n\n# Test for IO with .csv files\n\n\ndef _assert_annotations_equal(a, b, tol=0):\n __tracebackhide__ = True\n assert_allclose(a.onset, b.onset, rtol=0, atol=tol)\n assert_allclose(a.duration, b.duration, rtol=0, atol=tol)\n assert_array_equal(a.description, b.description)\n assert_array_equal(a.ch_names, b.ch_names)\n a_orig_time = a.orig_time\n b_orig_time = b.orig_time\n assert a_orig_time == b_orig_time\n\n\n_ORIG_TIME = datetime.fromtimestamp(1038942071.7201, timezone.utc)\n\n\[email protected](scope='function', params=('ch_names', 'fmt'))\ndef dummy_annotation_file(tmpdir_factory, ch_names, fmt):\n \"\"\"Create csv file for testing.\"\"\"\n if fmt == 'csv':\n content = (\"onset,duration,description\\n\"\n \"2002-12-03 19:01:11.720100,1.0,AA\\n\"\n \"2002-12-03 19:01:20.720100,2.425,BB\")\n elif fmt == 'txt':\n content = (\"# MNE-Annotations\\n\"\n \"# orig_time : 2002-12-03 19:01:11.720100\\n\"\n \"# onset, duration, description\\n\"\n \"0, 1, AA \\n\"\n \"9, 2.425, BB\")\n else:\n assert fmt == 'fif'\n content = Annotations(\n [0, 9], [1, 2.425], ['AA', 'BB'], orig_time=_ORIG_TIME)\n\n if ch_names:\n if isinstance(content, Annotations):\n # this is a bit of a hack but it works\n content.ch_names[:] = ((), ('MEG0111', 'MEG2563'))\n else:\n content = content.splitlines()\n content[-3] += ',ch_names'\n content[-2] += ','\n content[-1] += ',MEG0111:MEG2563'\n content = '\\n'.join(content)\n\n fname = tmpdir_factory.mktemp('data').join(f'annotations-annot.{fmt}')\n if isinstance(content, str):\n fname.write(content)\n else:\n content.save(fname)\n return fname\n\n\[email protected]('ch_names', (False, True))\[email protected]('fmt', [\n pytest.param('csv', marks=needs_pandas),\n 'txt',\n 'fif'\n])\ndef test_io_annotation(dummy_annotation_file, tmpdir, fmt, ch_names):\n \"\"\"Test CSV, TXT, and FIF input/output (which support ch_names).\"\"\"\n annot = read_annotations(dummy_annotation_file)\n assert annot.orig_time == _ORIG_TIME\n kwargs = dict(orig_time=_ORIG_TIME)\n if ch_names:\n kwargs['ch_names'] = ((), ('MEG0111', 'MEG2563'))\n _assert_annotations_equal(\n annot, Annotations([0., 9.], [1., 2.425], ['AA', 'BB'], **kwargs),\n tol=1e-6)\n\n # Now test writing\n fname = tmpdir.join(f'annotations-annot.{fmt}')\n annot.save(fname)\n annot2 = read_annotations(fname)\n _assert_annotations_equal(annot, annot2)\n\n # Now without an orig_time\n annot._orig_time = None\n annot.save(fname, overwrite=True)\n annot2 = read_annotations(fname)\n _assert_annotations_equal(annot, annot2)\n\n\n@requires_version('pandas')\ndef test_broken_csv(tmpdir):\n \"\"\"Test broken .csv that does not use timestamps.\"\"\"\n content = (\"onset,duration,description\\n\"\n \"1.,1.0,AA\\n\"\n \"3.,2.425,BB\")\n\n fname = tmpdir.join('annotations_broken.csv')\n fname.write(content)\n with pytest.warns(RuntimeWarning, match='save your CSV as a TXT'):\n read_annotations(fname)\n\n\n# Test for IO with .txt files\n\[email protected](scope='function', params=('ch_names',))\ndef dummy_annotation_txt_file(tmpdir_factory, ch_names):\n \"\"\"Create txt file for testing.\"\"\"\n content = (\"3.14, 42, AA \\n\"\n \"6.28, 48, BB\")\n if ch_names:\n content = content.splitlines()\n content[0] = content[0].strip() + ','\n content[1] = content[1].strip() + ', MEG0111:MEG2563'\n content = '\\n'.join(content)\n\n fname = tmpdir_factory.mktemp('data').join('annotations.txt')\n fname.write(content)\n return fname\n\n\[email protected]('ch_names', (False, True))\ndef test_io_annotation_txt(dummy_annotation_txt_file, tmpdir_factory,\n ch_names):\n \"\"\"Test TXT input/output without meas_date.\"\"\"\n annot = read_annotations(str(dummy_annotation_txt_file))\n assert annot.orig_time is None\n kwargs = dict()\n if ch_names:\n kwargs['ch_names'] = [(), ('MEG0111', 'MEG2563')]\n _assert_annotations_equal(\n annot, Annotations([3.14, 6.28], [42., 48], ['AA', 'BB'], **kwargs))\n\n # Now test writing\n fname = str(tmpdir_factory.mktemp('data').join('annotations.txt'))\n annot.save(fname)\n annot2 = read_annotations(fname)\n _assert_annotations_equal(annot, annot2)\n\n # Now with an orig_time\n assert annot.orig_time is None\n annot._orig_time = _handle_meas_date(1038942071.7201)\n assert annot.orig_time is not None\n annot.save(fname, overwrite=True)\n annot2 = read_annotations(fname)\n assert annot2.orig_time is not None\n _assert_annotations_equal(annot, annot2)\n\n\[email protected]('meas_date, out', [\n pytest.param('toto', None, id='invalid string'),\n pytest.param(None, None, id='None'),\n pytest.param(42, 42.0, id='Scalar'),\n pytest.param(3.14, 3.14, id='Float'),\n pytest.param((3, 140000), 3.14, id='Scalar touple'),\n pytest.param('2002-12-03 19:01:11.720100', 1038942071.7201,\n id='valid iso8601 string'),\n pytest.param('2002-12-03T19:01:11.720100', None,\n id='invalid iso8601 string')])\ndef test_handle_meas_date(meas_date, out):\n \"\"\"Test meas date formats.\"\"\"\n if out is not None:\n assert out >= 0 # otherwise it'll break on Windows\n out = datetime.fromtimestamp(out, timezone.utc)\n assert _handle_meas_date(meas_date) == out\n\n\ndef test_read_annotation_txt_header(tmpdir):\n \"\"\"Test TXT orig_time recovery.\"\"\"\n content = (\"# A something \\n\"\n \"# orig_time : 42\\n\"\n \"# orig_time : 2002-12-03 19:01:11.720100\\n\"\n \"# orig_time : 42\\n\"\n \"# C\\n\"\n \"Done\")\n fname = tmpdir.join('header.txt')\n fname.write(content)\n orig_time = _read_annotations_txt_parse_header(fname)\n want = datetime.fromtimestamp(1038942071.7201, timezone.utc)\n assert orig_time == want\n\n\ndef test_read_annotation_txt_one_segment(tmpdir):\n \"\"\"Test empty TXT input/output.\"\"\"\n content = (\"# MNE-Annotations\\n\"\n \"# onset, duration, description\\n\"\n \"3.14, 42, AA\")\n fname = tmpdir.join('one-annotations.txt')\n fname.write(content)\n annot = read_annotations(fname)\n _assert_annotations_equal(annot, Annotations(3.14, 42, ['AA']))\n\n\ndef test_read_annotation_txt_empty(tmpdir):\n \"\"\"Test empty TXT input/output.\"\"\"\n content = (\"# MNE-Annotations\\n\"\n \"# onset, duration, description\\n\")\n fname = tmpdir.join('empty-annotations.txt')\n fname.write(content)\n annot = read_annotations(fname)\n _assert_annotations_equal(annot, Annotations([], [], []))\n\n\ndef test_annotations_simple_iteration():\n \"\"\"Test indexing Annotations.\"\"\"\n NUM_ANNOT = 5\n EXPECTED_ELEMENTS_TYPE = (np.float64, np.float64, np.str_)\n EXPECTED_ONSETS = EXPECTED_DURATIONS = [x for x in range(NUM_ANNOT)]\n EXPECTED_DESCS = [x.__repr__() for x in range(NUM_ANNOT)]\n\n annot = Annotations(onset=EXPECTED_ONSETS,\n duration=EXPECTED_DURATIONS,\n description=EXPECTED_DESCS,\n orig_time=None)\n\n for ii, elements in enumerate(annot[:2]):\n assert isinstance(elements, OrderedDict)\n expected_values = (ii, ii, str(ii))\n for elem, expected_type, expected_value in zip(elements.values(),\n EXPECTED_ELEMENTS_TYPE,\n expected_values):\n assert np.isscalar(elem)\n assert type(elem) == expected_type\n assert elem == expected_value\n\n\n@requires_version('numpy', '1.12')\ndef test_annotations_slices():\n \"\"\"Test indexing Annotations.\"\"\"\n NUM_ANNOT = 5\n EXPECTED_ONSETS = EXPECTED_DURATIONS = [x for x in range(NUM_ANNOT)]\n EXPECTED_DESCS = [x.__repr__() for x in range(NUM_ANNOT)]\n\n annot = Annotations(onset=EXPECTED_ONSETS,\n duration=EXPECTED_DURATIONS,\n description=EXPECTED_DESCS,\n orig_time=None)\n\n # Indexing returns a copy. So this has no effect in annot\n annot[0]['onset'] = 42\n annot[0]['duration'] = 3.14\n annot[0]['description'] = 'foobar'\n\n annot[:1].onset[0] = 42\n annot[:1].duration[0] = 3.14\n annot[:1].description[0] = 'foobar'\n\n # Slicing with single element returns a dictionary\n for ii in EXPECTED_ONSETS:\n assert annot[ii] == dict(zip(['onset', 'duration',\n 'description', 'orig_time'],\n [ii, ii, str(ii), None]))\n\n # Slices should give back Annotations\n for current in (annot[slice(0, None, 2)],\n annot[[bool(ii % 2) for ii in range(len(annot))]],\n annot[:1],\n annot[[0, 2, 2]],\n annot[(0, 2, 2)],\n annot[np.array([0, 2, 2])],\n annot[1::2],\n ):\n assert isinstance(current, Annotations)\n assert len(current) != len(annot)\n\n for bad_ii in [len(EXPECTED_ONSETS), 42, 'foo']:\n with pytest.raises(IndexError):\n annot[bad_ii]\n\n\ndef test_sorting():\n \"\"\"Test annotation sorting.\"\"\"\n annot = Annotations([10, 20, 30], [1, 2, 3], 'BAD')\n # assert_array_equal(annot.onset, [0, 5, 10])\n annot.append([5, 15, 25, 35], 0.5, 'BAD')\n onset = list(range(5, 36, 5))\n duration = list(annot.duration)\n assert_array_equal(annot.onset, onset)\n assert_array_equal(annot.duration, duration)\n annot.append([10, 10], [0.1, 9], 'BAD') # 0.1 should be before, 9 after\n want_before = onset.index(10)\n duration.insert(want_before, 0.1)\n duration.insert(want_before + 2, 9)\n onset.insert(want_before, 10)\n onset.insert(want_before, 10)\n assert_array_equal(annot.onset, onset)\n assert_array_equal(annot.duration, duration)\n\n\ndef test_date_none(tmpdir):\n \"\"\"Test that DATE_NONE is used properly.\"\"\"\n # Regression test for gh-5908\n n_chans = 139\n n_samps = 20\n data = np.random.random_sample((n_chans, n_samps))\n ch_names = ['E{}'.format(x) for x in range(n_chans)]\n ch_types = ['eeg'] * n_chans\n info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=2048)\n assert info['meas_date'] is None\n raw = RawArray(data=data, info=info)\n fname = op.join(str(tmpdir), 'test-raw.fif')\n raw.save(fname)\n raw_read = read_raw_fif(fname, preload=True)\n assert raw_read.info['meas_date'] is None\n\n\ndef test_negative_meas_dates(windows_like_datetime):\n \"\"\"Test meas_date previous to 1970.\"\"\"\n # Regression test for gh-6621\n raw = RawArray(data=np.empty((1, 1), dtype=np.float64),\n info=create_info(ch_names=1, sfreq=1.))\n raw.set_meas_date((-908196946, 988669))\n raw.set_annotations(Annotations(description='foo', onset=[0],\n duration=[0], orig_time=None))\n events, _ = events_from_annotations(raw)\n assert events[:, 0] == 0\n\n\ndef test_crop_when_negative_orig_time(windows_like_datetime):\n \"\"\"Test cropping with orig_time, tmin and tmax previous to 1970.\"\"\"\n # Regression test for gh-6621\n orig_time_stamp = -908196945.011331 # 1941-03-22 11:04:14.988669\n annot = Annotations(description='foo', onset=np.arange(0, 0.999, 0.1),\n duration=[0], orig_time=orig_time_stamp)\n stamp = _dt_to_stamp(annot.orig_time)\n assert_allclose(stamp[0] + stamp[1] * 1e-6, orig_time_stamp)\n t = stamp[0] + stamp[1] * 1e-6\n assert t == orig_time_stamp\n assert len(annot) == 10\n\n # do not raise\n annot.crop(verbose='debug')\n assert len(annot) == 10\n\n # Crop with negative tmin, tmax\n tmin, tmax = [orig_time_stamp + t for t in (0.25, .75)]\n assert tmin < 0 and tmax < 0\n crop_annot = annot.crop(tmin=tmin, tmax=tmax)\n assert_allclose(crop_annot.onset, [0.3, 0.4, 0.5, 0.6, 0.7])\n orig_dt = _stamp_to_dt(stamp)\n assert crop_annot.orig_time == orig_dt # orig_time does not change\n\n\ndef test_allow_nan_durations():\n \"\"\"Deal with \"n/a\" strings in BIDS events with nan durations.\"\"\"\n raw = RawArray(data=np.empty([2, 10], dtype=np.float64),\n info=create_info(ch_names=2, sfreq=1.),\n first_samp=0)\n raw.set_meas_date(0)\n\n ons = [1, 2., 15., 17.]\n dus = [np.nan, 1., 0.5, np.nan]\n descriptions = ['A'] * 4\n onsets = np.asarray(ons, dtype=float)\n durations = np.asarray(dus, dtype=float)\n annot = mne.Annotations(onset=onsets,\n duration=durations,\n description=descriptions)\n with pytest.warns(RuntimeWarning, match='Omitted 2 annotation'):\n raw.set_annotations(annot)\n\n\[email protected]_testing_data\ndef test_annotations_from_events():\n \"\"\"Test events to annotations conversion.\"\"\"\n raw = read_raw_fif(fif_fname)\n events = mne.find_events(raw)\n\n # 1. Automatic event description\n # -------------------------------------------------------------------------\n annots = annotations_from_events(events, raw.info['sfreq'],\n first_samp=raw.first_samp,\n orig_time=None)\n assert len(annots) == events.shape[0]\n\n # Convert back to events\n raw.set_annotations(annots)\n events_out, _ = events_from_annotations(raw, event_id=int)\n assert_array_equal(events, events_out)\n\n # 2. Explicit event mapping\n # -------------------------------------------------------------------------\n event_desc = {1: 'one', 2: 'two', 3: 'three', 32: None}\n annots = annotations_from_events(events, sfreq=raw.info['sfreq'],\n event_desc=event_desc,\n first_samp=raw.first_samp,\n orig_time=None)\n\n assert np.all([a in ['one', 'two', 'three'] for a in annots.description])\n assert len(annots) == events[events[:, 2] <= 3].shape[0]\n\n # 3. Pass list\n # -------------------------------------------------------------------------\n event_desc = [1, 2, 3]\n annots = annotations_from_events(events, sfreq=raw.info['sfreq'],\n event_desc=event_desc,\n first_samp=raw.first_samp,\n orig_time=None)\n\n assert np.all([a in ['1', '2', '3'] for a in annots.description])\n assert len(annots) == events[events[:, 2] <= 3].shape[0]\n\n # 4. Try passing callable\n # -------------------------------------------------------------------------\n event_desc = lambda d: 'event{}'.format(d) # noqa:E731\n annots = annotations_from_events(events, sfreq=raw.info['sfreq'],\n event_desc=event_desc,\n first_samp=raw.first_samp,\n orig_time=None)\n\n assert np.all(['event' in a for a in annots.description])\n assert len(annots) == events.shape[0]\n\n # 5. Pass numpy array\n # -------------------------------------------------------------------------\n event_desc = np.array([[1, 2, 3], [1, 2, 3]])\n with pytest.raises(ValueError, match='event_desc must be 1D'):\n annots = annotations_from_events(events, sfreq=raw.info['sfreq'],\n event_desc=event_desc,\n first_samp=raw.first_samp,\n orig_time=None)\n\n with pytest.raises(ValueError, match='Invalid type for event_desc'):\n annots = annotations_from_events(events, sfreq=raw.info['sfreq'],\n event_desc=1,\n first_samp=raw.first_samp,\n orig_time=None)\n\n event_desc = np.array([1, 2, 3])\n annots = annotations_from_events(events, sfreq=raw.info['sfreq'],\n event_desc=event_desc,\n first_samp=raw.first_samp,\n orig_time=None)\n assert np.all([a in ['1', '2', '3'] for a in annots.description])\n assert len(annots) == events[events[:, 2] <= 3].shape[0]\n\n\ndef test_repr():\n \"\"\"Test repr of Annotations.\"\"\"\n # short annotation repr (< 79 characters)\n r = repr(Annotations(range(3), [0] * 3, list(\"abc\")))\n assert r == '<Annotations | 3 segments: a (1), b (1), c (1)>'\n\n # long annotation repr (> 79 characters, will be shortened)\n r = repr(Annotations(range(14), [0] * 14, list(\"abcdefghijklmn\")))\n assert r == ('<Annotations | 14 segments: a (1), b (1), c (1), d (1), '\n 'e (1), f (1), g ...>')\n\n # empty Annotations\n r = repr(Annotations([], [], []))\n assert r == '<Annotations | 0 segments>'\n\n\n@requires_pandas\ndef test_annotation_to_data_frame():\n \"\"\"Test annotation class to data frame conversion.\"\"\"\n onset = np.arange(1, 10)\n durations = np.full_like(onset, [4, 5, 6, 4, 5, 6, 4, 5, 6])\n description = [\"yy\"] * onset.shape[0]\n\n a = Annotations(onset=onset,\n duration=durations,\n description=description,\n orig_time=0)\n\n df = a.to_data_frame()\n for col in ['onset', 'duration', 'description']:\n assert col in df.columns\n assert df.description[0] == 'yy'\n assert (df.onset[1] - df.onset[0]).seconds == 1\n assert df.groupby('description').count().onset['yy'] == 9\n\n\ndef test_annotation_ch_names():\n \"\"\"Test annotation ch_names updating and pruning.\"\"\"\n info = create_info(10, 1000., 'eeg')\n raw = RawArray(np.zeros((10, 1000)), info)\n onset = [0.1, 0.3, 0.6]\n duration = [0.05, 0.1, 0.2]\n description = ['first', 'second', 'third']\n ch_names = [[], raw.ch_names[4:6], raw.ch_names[5:7]]\n annot = Annotations(onset, duration, description, ch_names=ch_names)\n raw.set_annotations(annot)\n # renaming\n rename = {name: name + 'new' for name in raw.ch_names}\n raw_2 = raw.copy().rename_channels(rename)\n for ch_rename, ch in zip(raw_2.annotations.ch_names, annot.ch_names):\n assert all(name in raw_2.ch_names for name in ch_rename)\n assert all(name in raw.ch_names for name in ch)\n assert not any(name in raw.ch_names for name in ch_rename)\n assert not any(name in raw_2.ch_names for name in ch)\n raw_2.rename_channels({val: key for key, val in rename.items()})\n _assert_annotations_equal(raw.annotations, raw_2.annotations)\n # dropping\n raw_2.drop_channels(raw.ch_names[5:])\n annot_pruned = raw_2.annotations\n assert len(raw_2.annotations) == 2 # dropped the last one\n assert raw_2.annotations.ch_names[1] == tuple(raw.ch_names[4:5])\n for ch_drop in raw_2.annotations.ch_names:\n assert all(name in raw_2.ch_names for name in ch_drop)\n with pytest.raises(ValueError, match='channel name in annotations missin'):\n raw_2.set_annotations(annot)\n with pytest.warns(RuntimeWarning, match='channel name in annotations mis'):\n raw_2.set_annotations(annot, on_missing='warn')\n assert raw_2.annotations is not annot_pruned\n _assert_annotations_equal(raw_2.annotations, annot_pruned)\n\n\ndef test_annotation_rename():\n \"\"\"Test annotation renaming works.\"\"\"\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n assert isinstance(a.description, np.ndarray)\n assert len(a) == 3\n assert \"a\" in a.description\n assert \"b\" in a.description\n assert \"c\" in a.description\n assert \"new_name\" not in a.description\n\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n a.rename({\"a\": \"new_name\"})\n assert isinstance(a.description, np.ndarray)\n assert len(a) == 3\n assert \"a\" not in a.description\n assert \"new_name\" in a.description\n assert np.where([d == \"new_name\" for d in a.description])[0] == 0\n\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n a.rename({\"a\": \"new_name\", \"b\": \"new name b\"})\n assert len(a) == 3\n assert \"a\" not in a.description\n assert \"new_name\" in a.description\n assert \"b\" not in a.description\n assert \"new name b\" in a.description\n assert np.where([d == \"new_name\" for d in a.description])[0] == 0\n assert np.where([d == \"new name b\" for d in a.description])[0] == 1\n\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n a.rename({\"b\": \"new_name\", \"c\": \"new name c\"})\n assert isinstance(a.description, np.ndarray)\n assert len(a) == 3\n assert \"b\" not in a.description\n assert \"new_name\" in a.description\n assert \"c\" not in a.description\n assert \"new name c\" in a.description\n assert \"a\" in a.description\n assert np.where([d == \"new_name\" for d in a.description])[0] == 1\n assert np.where([d == \"new name c\" for d in a.description])[0] == 2\n assert len(np.where([d == \"new name b\" for d in a.description])[0]) == 0\n\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n with pytest.raises(ValueError, match=\"mapping missing from data\"):\n a.rename({\"aaa\": \"does not exist\"})\n with pytest.raises(ValueError, match=\"[' a']\"):\n a.rename({\" a\": \"does not exist\"})\n with pytest.raises(TypeError, match=\"dict, got <class 'str'> instead\"):\n a.rename(\"wrong\")\n with pytest.raises(TypeError, match=\"dict, got <class 'list'> instead\"):\n a.rename([\"wrong\"])\n with pytest.raises(TypeError, match=\"dict, got <class 'set'> instead\"):\n a.rename({\"wrong\"})\n\n\ndef test_annotation_duration_setting():\n \"\"\"Test annotation duration setting works.\"\"\"\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n assert isinstance(a.duration, np.ndarray)\n assert len(a) == 3\n assert a.duration[0] == 5\n assert a.duration[2] == 8\n a.set_durations({\"a\": 3})\n assert a.duration[0] == 3\n assert a.duration[2] == 8\n a.set_durations({\"a\": 313, \"c\": 18})\n assert a.duration[0] == 313\n assert a.duration[2] == 18\n a.set_durations({\"a\": 1, \"b\": 13})\n assert a.duration[0] == 1\n assert a.duration[1] == 13\n\n a = Annotations([1, 2, 3], [5, 5, 8], [\"a\", \"b\", \"c\"])\n assert len(a) == 3\n assert a.duration[0] == 5\n assert a.duration[2] == 8\n a.set_durations(7.2)\n assert isinstance(a.duration, np.ndarray)\n assert a.duration[0] == 7.2\n assert a.duration[2] == 7.2\n a.set_durations(2)\n assert a.duration[0] == 2\n\n with pytest.raises(ValueError, match=\"mapping missing from data\"):\n a.set_durations({\"aaa\": 2.2})\n with pytest.raises(TypeError, match=\" got <class 'set'> instead\"):\n a.set_durations({\"aaa\", 2.2})\n" ]
[ [ "numpy.repeat", "numpy.testing.assert_allclose", "numpy.where", "numpy.full_like", "numpy.empty", "numpy.testing.assert_array_almost_equal", "numpy.arange", "numpy.isfinite", "numpy.in1d", "numpy.array", "numpy.zeros", "numpy.testing.assert_equal", "numpy.random.randn", "numpy.random.random_sample", "numpy.isscalar", "numpy.insert", "numpy.isnan", "numpy.asarray", "numpy.random.RandomState", "numpy.testing.assert_array_equal", "numpy.ones", "numpy.all", "numpy.unique" ] ]
wovert/ai_tutorials
[ "74ce45183bab777cf7f1778e58b0d38b817b750c" ]
[ "numpy/nmpy_array.py" ]
[ "# coding: utf-8\n\nimport numpy\n\n# 一维矩阵\nvector = numpy.array([5, 10, '15', 20.0])\n\n# 二维矩阵\nmatrix = numpy.array([[2, 4, 8], [20, 25, 30], [40, 45, 48]])\nprint(vector)\nprint(matrix)\n\n# 行和列\nprint(vector.shape)\nprint(type(vector))\n\n# 2行3列\nprint(matrix.shape)\n\none = matrix[1, 1]\nprint(one) # 25\n\nprint(vector[0:3]) # 0, 1, 2 => '5', '10', '15'\n\n# 取某已列\nprint(matrix[:, 0]) # 2 20 第一列\nprint(matrix[:, 1]) # 4 24 第二列\n\nequal_to_ten = (vector == '10')\nprint(equal_to_ten) # [Flase True False False]\nprint(vector[equal_to_ten]) # '10'\n\n\n" ]
[ [ "numpy.array" ] ]
Womac/pyroomacoustics
[ "15a86425b68969b2109860ca3614f0cbf92b1bd0" ]
[ "pyroomacoustics/bss/common.py" ]
[ "# Common Functions used in BSS algorithms\n# Copyright (C) 2019 Robin Scheibler, Yaron Dibner, Virgile Hernicot, Juan Azcarreta\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# You should have received a copy of the MIT License along with this program. If\n# not, see <https://opensource.org/licenses/MIT>.\n\nimport numpy as np\n\n\ndef projection_back(Y, ref, clip_up=None, clip_down=None):\n \"\"\"\n This function computes the frequency-domain filter that minimizes\n the squared error to a reference signal. This is commonly used\n to solve the scale ambiguity in BSS.\n\n Here is the derivation of the projection.\n The optimal filter `z` minimizes the squared error.\n\n .. math::\n\n \\min E[|z^* y - x|^2]\n\n It should thus satsify the orthogonality condition\n and can be derived as follows\n\n .. math::\n\n 0 & = E[y^*\\\\, (z^* y - x)]\n\n 0 & = z^*\\\\, E[|y|^2] - E[y^* x]\n\n z^* & = \\\\frac{E[y^* x]}{E[|y|^2]}\n\n z & = \\\\frac{E[y x^*]}{E[|y|^2]}\n\n In practice, the expectations are replaced by the sample\n mean.\n\n Parameters\n ----------\n Y: array_like (n_frames, n_bins, n_channels)\n The STFT data to project back on the reference signal\n ref: array_like (n_frames, n_bins)\n The reference signal\n clip_up: float, optional\n Limits the maximum value of the gain (default no limit)\n clip_down: float, optional\n Limits the minimum value of the gain (default no limit)\n \"\"\"\n\n num = np.sum(np.conj(ref[:, :, None]) * Y, axis=0)\n denom = np.sum(np.abs(Y) ** 2, axis=0)\n\n c = np.ones(num.shape, dtype=np.complex)\n I = denom > 0.0\n c[I] = num[I] / denom[I]\n\n if clip_up is not None:\n I = np.logical_and(np.abs(c) > clip_up, np.abs(c) > 0)\n c[I] *= clip_up / np.abs(c[I])\n\n if clip_down is not None:\n I = np.logical_and(np.abs(c) < clip_down, np.abs(c) > 0)\n c[I] *= clip_down / np.abs(c[I])\n return c\n\n\ndef sparir(\n G,\n S,\n weights=np.array([]),\n gini=0,\n maxiter=50,\n tol=10,\n alpha=10,\n alphamax=1e5,\n alphamin=1e-7,\n):\n \"\"\"\n\n Fast proximal algorithm implementation for sparse approximation of relative impulse\n responses from incomplete measurements of the corresponding relative transfer function\n based on\n\n Z. Koldovsky, J. Malek, and S. Gannot, \"Spatial Source Subtraction based\n on Incomplete Measurements of Relative Transfer Function\", IEEE/ACM\n Transactions on Audio, Speech, and Language Processing, TASLP 2015.\n\n The original Matlab implementation can be found at\n http://itakura.ite.tul.cz/zbynek/dwnld/SpaRIR.m\n\n and it is referred in\n\n Z. Koldovsky, F. Nesta, P. Tichavsky, and N. Ono, *Frequency-domain blind\n speech separation using incomplete de-mixing transform*, EUSIPCO 2016.\n\n Parameters\n ----------\n G: ndarray (nfrequencies, 1)\n Frequency representation of the (incomplete) transfer function\n S: ndarray (kfrequencies)\n Indexes of active frequency bins for sparse AuxIVA\n weights: ndarray (kfrequencies) or int, optional\n The higher the value of weights(i), the higher the probability that g(i)\n is zero; if scalar, all weights are the same; if empty, default value is\n used\n gini: ndarray (nfrequencies)\n Initialization for the computation of g\n maxiter: int\n Maximum number of iterations before achieving convergence (default 50)\n tol: float\n Minimum convergence criteria based on the gradient difference between adjacent updates (default 10)\n alpha: float\n Inverse of the decreasing speed of the gradient at each iteration. This parameter\n is updated at every iteration (default 10)\n alphamax: float\n Upper bound for alpha (default 1e5)\n alphamin: float\n Lower bound for alpha (default 1e-7)\n\n Returns\n -------\n Returns the sparse approximation of the impulse response in the\n time-domain (real-valued) as an (nfrequencies) array.\n \"\"\"\n\n n_freq = G.shape[0]\n\n y = np.concatenate((np.real(G[S]), np.imag(G[S])), axis=0)\n M = y.shape[0]\n\n if gini == 0: # if no initialization is given\n g = np.zeros((n_freq, 1))\n g[0] = 1\n else:\n g = gini\n\n if weights.size == 0:\n tau = np.sqrt(n_freq) / (y.conj().T.dot(y))\n tau = tau * np.exp(0.11 * np.abs((np.arange(1.0, n_freq + 1.0).T)) ** 0.3)\n tau = tau.T\n elif weights.shape[0] == 1:\n tau = np.ones((n_freq, 1)) * weights\n else:\n tau = np.tile(weights.T, (1, 1)).reshape(n_freq)\n\n def soft(x, T):\n if np.sum(np.abs(T).flatten()) == 0:\n u = x\n else:\n u = np.max(np.abs(x) - T, 0)\n u = u / (u + T) * x\n return u\n\n aux = np.zeros((n_freq, 1), dtype=complex)\n G = np.fft.fft(g.flatten())\n Ag = np.concatenate((np.real(G[S]), np.imag(G[S])), axis=0)\n r = Ag - y.flatten() # instead of r = A * g - y\n aux[S] = np.expand_dims(r[0 : M // 2] + 1j * r[M // 2 :], axis=1)\n gradq = n_freq * np.fft.irfft(aux.flatten(), n_freq) # instead of gradq = A'*r\n gradq = np.expand_dims(gradq, axis=1)\n support = g != 0\n iter_ = 0 # initial iteration value\n\n # Define stopping criteria\n crit = np.zeros((maxiter, 1))\n criterion = -tau[support] * np.sign(g[support]) - gradq[support]\n crit[iter_] = np.sum(criterion ** 2)\n\n while (crit[iter_] > tol) and (iter_ < maxiter - 1):\n # Update gradient\n prev_r = r\n prev_g = g\n g = soft(prev_g - gradq * (1.0 / alpha), tau / alpha)\n dg = g - prev_g\n DG = np.fft.fft(dg.flatten())\n Adg = np.concatenate((np.real(DG[S]), np.imag(DG[S])), axis=0)\n r = prev_r + Adg.flatten() # faster than A * g - y\n dd = np.dot(dg.flatten().conj().T, dg.flatten())\n dGd = np.dot(Adg.flatten().conj().T, Adg.flatten())\n alpha = min(alphamax, max(alphamin, dGd / (np.finfo(np.float32).eps + dd)))\n iter_ += 1\n support = g != 0\n aux[S] = np.expand_dims(r[0 : M // 2] + 1j * r[M // 2 :], axis=1)\n gradq = n_freq * np.fft.irfft(aux.flatten(), n_freq)\n gradq = np.expand_dims(gradq, axis=1)\n # Update stopping criteria\n criterion = -tau[support] * np.sign(g[support]) - gradq[support]\n crit[iter_] = sum(criterion ** 2) + sum(\n abs(gradq[~support]) - tau[~support] > tol\n )\n\n return g.flatten()\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.tile", "numpy.real", "numpy.sign", "numpy.finfo", "numpy.conj", "numpy.arange", "numpy.abs", "numpy.sqrt", "numpy.imag", "numpy.expand_dims" ] ]
tinnguyen96/coupling-Gibbs-partition
[ "0238026cf3b491273ee19b9d1c9ee543c3458037" ]
[ "modules/predictive_experiment.py" ]
[ "\"\"\"\nReport the predictive density for DP posterior over gmm data.\n\"\"\"\n\n# Standard libaries \nimport argparse\nimport os\nfrom multiprocessing import Pool\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nsb.set_context('paper', rc={'xtick.labelsize': 15, 'ytick.labelsize': 15, 'lines.markersize': 5})\nsb.set_style('whitegrid')\nimport numpy as np\nnp.set_printoptions(precision=2)\nfrom scipy import stats\nimport imp\nimport time\ntry:\n clock = time.clock\nexcept AttributeError:\n clock = lambda : time.clock_gettime(1)\nimport pickle\n\n# our implementation \nimport utils\nfrom sampling import gibbs_sweep_single\nfrom estimate_utils import prop_in_k_clusters, usual_MCMC_est_crp, unbiased_est_crp, crp_prior, posterior_predictive_density_1Dgrid\nimport gmm_data\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.set_defaults(data_dir = \"../data/\", results_dir=\"../estimation_results_postBug/\", is_nested=False, Ndata=500, data_sd=2.0, data_sd0=10.0, data_K=10, data_alpha=0.5, data_seed=0, max_time=100, sd=2.0, sd0=3.0, alpha=1, init_type=\"crp_prior\", pool_size=100, num_replicates=100)\n ## info about data\n parser.add_argument(\"--data_dir\", type=str, dest=\"data_dir\",\n help=\"root directory containing data files\")\n parser.add_argument(\"--Ndata\", type=int, dest=\"Ndata\",\n help=\"number of observations\")\n parser.add_argument(\"--data_sd\", type=float, dest=\"data_sd\",\n help=\"std of observational likelihood that generated gmm data\")\n parser.add_argument(\"--data_sd0\", type=float, dest=\"data_sd0\",\n help=\"std of prior distribution over cluster means that generated gmm data\")\n parser.add_argument(\"--data_alpha\", type=float, dest=\"data_alpha\",\n help=\"concentration of Dirichlet parameter to generate cluster weights that generated gmm data\")\n parser.add_argument(\"--data_seed\", type=float, dest=\"data_seed\",\n help=\"random seed that generated gmm data\")\n ## info about estimator\n parser.add_argument(\"--max_time\", type=int, dest=\"max_time\",\n help=\"maximum processor time to run each replicate\")\n parser.add_argument(\"--max_iter\", type=int, dest=\"max_iter\",\n help=\"maximum number of sweeps through data when computing truth\")\n parser.add_argument(\"--sd\", type=float, dest=\"sd\",\n help=\"std of observational likelihood\")\n parser.add_argument(\"--sd0\", type=float, dest=\"sd0\",\n help=\"std of prior distribution over cluster means\")\n parser.add_argument(\"--alpha\", type=float, dest=\"alpha\",\n help=\"concentration of Dirichlet parameter to generate cluster weights\")\n parser.add_argument(\"--init_type\", type=str, dest=\"init_type\",\n help=\"how to initialize the Gibbs sampler\")\n parser.add_argument(\"--is_nested\", action=\"store_true\", dest=\"is_nested\",\n help=\"whether the gmm data was generated with the nested version\")\n ## info about multi-processing\n parser.add_argument(\"--pool_size\", type=int, dest=\"pool_size\",\n help=\"how many jobs in parallel to run for each replicate\")\n parser.add_argument(\"--num_replicates\", type=int, dest=\"num_replicates\",\n help=\"how many replicates\")\n options = parser.parse_args()\n return options \n\noptions = parse_args()\nprint(options)\n\nNdata = options.Ndata\nD = 1\nsd, sd0, alpha = options.sd, options.sd0, options.alpha\ninit_type = options.init_type\nis_nested = options.is_nested\n\ndata, grid_and_density = gmm_data.load_gmm_data(options.data_dir, D, Ndata, options.data_alpha, options.data_sd, options.data_sd0, options.data_K, options.data_seed, is_nested)\nsavedir = options.results_dir + gmm_data.make_experiment_name(Ndata, D, options.data_sd, options.data_sd0, options.data_alpha, options.data_K, options.data_seed, is_nested, sd, sd0, alpha)\n\nif not os.path.exists(savedir):\n print(\"Will make directory %s\" %savedir)\n os.makedirs(savedir)\n \n## estimate predictive density using coupled chains\ndef run_rep(h, maxIter, time_budget):\n \"\"\"\n Input:\n h: lambda function\n maxIter: scalar \n time_budget: scalar\n \n Return \n \"\"\"\n np.random.seed() # seed = None actually means we use the most randomness across processors\n s0_state = np.random.get_state()\n ests_ub = []\n num_sweeps = None\n num_est = 0\n st = clock() # hopefully this is process-specific time\n time_left = time_budget\n while True:\n est, X, Y, tau, _ = unbiased_est_crp(k, h, m, data, sd, sd0,\n alpha, time_left, pi0_its, init_type, coupling)\n if tau is None: break\n ests_ub += [est]\n time_left = time_budget - (clock() - st)\n num_est += 1\n if time_left <= 0: break\n # this is unlikely to happen if we set time_budget to be reasonable, but\n # just in case\n if (num_est == 0):\n result = None\n else:\n # average the result of successful meetings\n result = np.mean(ests_ub, axis=0)\n\n return 0, result, num_sweeps, num_est, s0_state\n\nk = 10 # burn-in\nm = 100 # minimum iterations\npi0_its = 5\ncoupling='Optimal'\n\npool_size = options.pool_size\nnum_reps = options.num_replicates\ngrid = grid_and_density[0] # evaluate posterior predictive where the synthetic data function evaluates\n\nh_predictive = lambda z: posterior_predictive_density_1Dgrid(grid, data, z, sd, sd0, alpha)\n\ndef simulate(_):\n result = run_rep(h_predictive, options.max_iter, options.max_time)\n print(\"completed pool job\")\n return result\n \nst = time.time()\n\nresults = []\nfor i in range(num_reps):\n with Pool(pool_size) as p:\n rep_results = p.map(simulate, range(pool_size))\n results.extend(rep_results)\n print(\"completed replicate number %d\" %i)\n\ntotal_count = num_reps*pool_size \n \nprint(\"Wall-clock time elasped in minutes %.2f\" %((time.time()-st)/60))\n\nsavepath = savedir + \"/coupled_estimates_totRep=%d_hType=predictive_initType=%s_maxTime=%d_burnin=%d_minIter=%d.pkl\" %(total_count, init_type, options.max_time,k,m)\nprint(\"Will save results to %s\" %savepath)\nprint()\nwith open(savepath, 'wb') as f:\n pickle.dump(results, f)" ]
[ [ "numpy.random.seed", "numpy.set_printoptions", "numpy.random.get_state", "numpy.mean" ] ]
wireless911/bert-text
[ "8a8b2775a2952d3bf99c445c0eb5e3937d221a33" ]
[ "sequence-label.py" ]
[ "import collections\nimport time\nimport torch\nfrom typing import Optional, Text\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom transformers import BertTokenizer\n\nfrom trainer import SequenceLabelTrainer\nfrom config import SequenceLabelConfig\nfrom utils import CustomSequenceLabelDataset\nfrom model import BiLSTM_CRF\n\nconfig = SequenceLabelConfig()\ntag_to_ix = SequenceLabelConfig.TAG_TO_ID\n# tokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n\ntrain_datasets = CustomSequenceLabelDataset(config.train_data, tokenizer, config)\neval_datasets = CustomSequenceLabelDataset(config.eval_data, tokenizer, config)\n\n# create model\nmodel = BiLSTM_CRF(tag_to_ix, config.max_sequence_length, config.hidden_dim,config.device)\nmodel.summuary()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate, weight_decay=0.1)\n\n# dataloader\ntrain_dataloader = DataLoader(train_datasets, batch_size=config.batch_size, shuffle=True)\neval_dataloader = DataLoader(eval_datasets, batch_size=config.batch_size, shuffle=True)\n\n# create trainer\ntrainer = SequenceLabelTrainer(\n model=model,\n args=None,\n train_dataloader=train_dataloader,\n eval_dataloader=eval_dataloader,\n epochs=config.epochs,\n learning_rate=config.learning_rate,\n device=config.device,\n padding_tag=config.TAG_TO_ID[config.PAD_TAG]\n)\n\n# train model\ntrainer.train()\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
zhtianxiao/DLA-Combined-IoUs
[ "0b9db0e8e2b2927928bd57c6032497d3b87e7905" ]
[ "dynamic_atss_core/modeling/roi_heads/mask_head/roi_mask_predictors.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom dynamic_atss_core.layers import Conv2d\nfrom dynamic_atss_core.layers import ConvTranspose2d\nfrom dynamic_atss_core.modeling import registry\n\n\[email protected]_MASK_PREDICTOR.register(\"MaskRCNNC4Predictor\")\nclass MaskRCNNC4Predictor(nn.Module):\n def __init__(self, cfg, in_channels):\n super(MaskRCNNC4Predictor, self).__init__()\n num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES\n dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]\n num_inputs = in_channels\n\n self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)\n self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)\n\n for name, param in self.named_parameters():\n if \"bias\" in name:\n nn.init.constant_(param, 0)\n elif \"weight\" in name:\n # Caffe2 implementation uses MSRAFill, which in fact\n # corresponds to kaiming_normal_ in PyTorch\n nn.init.kaiming_normal_(param, mode=\"fan_out\", nonlinearity=\"relu\")\n\n def forward(self, x):\n x = F.relu(self.conv5_mask(x))\n return self.mask_fcn_logits(x)\n\n\[email protected]_MASK_PREDICTOR.register(\"MaskRCNNConv1x1Predictor\")\nclass MaskRCNNConv1x1Predictor(nn.Module):\n def __init__(self, cfg, in_channels):\n super(MaskRCNNConv1x1Predictor, self).__init__()\n num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES\n num_inputs = in_channels\n\n self.mask_fcn_logits = Conv2d(num_inputs, num_classes, 1, 1, 0)\n\n for name, param in self.named_parameters():\n if \"bias\" in name:\n nn.init.constant_(param, 0)\n elif \"weight\" in name:\n # Caffe2 implementation uses MSRAFill, which in fact\n # corresponds to kaiming_normal_ in PyTorch\n nn.init.kaiming_normal_(param, mode=\"fan_out\", nonlinearity=\"relu\")\n\n def forward(self, x):\n return self.mask_fcn_logits(x)\n\n\ndef make_roi_mask_predictor(cfg, in_channels):\n func = registry.ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]\n return func(cfg, in_channels)\n" ]
[ [ "torch.nn.init.kaiming_normal_", "torch.nn.init.constant_" ] ]
TomKingsfordUoA/emotion-recognition-using-speech
[ "d3e115e32c06c511e70cb50a92097bafd00d5e6c" ]
[ "emotion_recognition_using_speech/create_csv.py" ]
[ "import glob\nimport os\n\nimport pandas as pd\n\n\ndef write_emodb_csv(emotions=[\"sad\", \"neutral\", \"happy\"], train_name=\"train_emo.csv\",\n test_name=\"test_emo.csv\", train_size=0.8, verbose=1):\n \"\"\"\n Reads speech emodb dataset from directory and write it to a metadata CSV file.\n params:\n emotions (list): list of emotions to read from the folder, default is ['sad', 'neutral', 'happy']\n train_name (str): the output csv filename for training data, default is 'train_emo.csv'\n test_name (str): the output csv filename for testing data, default is 'test_emo.csv'\n train_size (float): the ratio of splitting training data, default is 0.8 (80% Training data and 20% testing data)\n verbose (int/bool): verbositiy level, 0 for silence, 1 for info, default is 1\n \"\"\"\n target = {\"path\": [], \"emotion\": []}\n categories = {\n \"W\": \"angry\",\n \"L\": \"boredom\",\n \"E\": \"disgust\",\n \"A\": \"fear\",\n \"F\": \"happy\",\n \"T\": \"sad\",\n \"N\": \"neutral\"\n }\n # delete not specified emotions\n categories_reversed = { v: k for k, v in categories.items() }\n for emotion, code in categories_reversed.items():\n if emotion not in emotions:\n del categories[code]\n for file in glob.glob(\"data/emodb/wav/*.wav\"):\n try:\n emotion = categories[os.path.basename(file)[5]]\n except KeyError:\n continue\n target['emotion'].append(emotion)\n target['path'].append(file)\n if verbose:\n print(\"[EMO-DB] Total files to write:\", len(target['path']))\n \n # dividing training/testing sets\n n_samples = len(target['path'])\n test_size = int((1-train_size) * n_samples)\n train_size = int(train_size * n_samples)\n if verbose:\n print(\"[EMO-DB] Training samples:\", train_size)\n print(\"[EMO-DB] Testing samples:\", test_size) \n X_train = target['path'][:train_size]\n X_test = target['path'][train_size:]\n y_train = target['emotion'][:train_size]\n y_test = target['emotion'][train_size:]\n pd.DataFrame({\"path\": X_train, \"emotion\": y_train}).to_csv(train_name)\n pd.DataFrame({\"path\": X_test, \"emotion\": y_test}).to_csv(test_name)\n\n\ndef write_tess_ravdess_csv(emotions=[\"sad\", \"neutral\", \"happy\"], train_name=\"train_tess_ravdess.csv\",\n test_name=\"test_tess_ravdess.csv\", verbose=1):\n \"\"\"\n Reads speech TESS & RAVDESS datasets from directory and write it to a metadata CSV file.\n params:\n emotions (list): list of emotions to read from the folder, default is ['sad', 'neutral', 'happy']\n train_name (str): the output csv filename for training data, default is 'train_tess_ravdess.csv'\n test_name (str): the output csv filename for testing data, default is 'test_tess_ravdess.csv'\n verbose (int/bool): verbositiy level, 0 for silence, 1 for info, default is 1\n \"\"\"\n train_target = {\"path\": [], \"emotion\": []}\n test_target = {\"path\": [], \"emotion\": []}\n \n for category in emotions:\n # for training speech directory\n total_files = glob.glob(f\"data/training/Actor_*/*_{category}.wav\")\n for i, path in enumerate(total_files):\n train_target[\"path\"].append(path)\n train_target[\"emotion\"].append(category)\n if verbose and total_files:\n print(f\"[TESS&RAVDESS] There are {len(total_files)} training audio files for category:{category}\")\n \n # for validation speech directory\n total_files = glob.glob(f\"data/validation/Actor_*/*_{category}.wav\")\n for i, path in enumerate(total_files):\n test_target[\"path\"].append(path)\n test_target[\"emotion\"].append(category)\n if verbose and total_files:\n print(f\"[TESS&RAVDESS] There are {len(total_files)} testing audio files for category:{category}\")\n pd.DataFrame(test_target).to_csv(test_name)\n pd.DataFrame(train_target).to_csv(train_name)\n\n\ndef write_custom_csv(emotions=['sad', 'neutral', 'happy'], train_name=\"train_custom.csv\", test_name=\"test_custom.csv\",\n verbose=1):\n \"\"\"\n Reads Custom Audio data from data/*-custom and then writes description files (csv)\n params:\n emotions (list): list of emotions to read from the folder, default is ['sad', 'neutral', 'happy']\n train_name (str): the output csv filename for training data, default is 'train_custom.csv'\n test_name (str): the output csv filename for testing data, default is 'test_custom.csv'\n verbose (int/bool): verbositiy level, 0 for silence, 1 for info, default is 1\n \"\"\"\n module_root = os.path.dirname(__file__)\n train_target = {\"path\": [], \"emotion\": []}\n test_target = {\"path\": [], \"emotion\": []}\n for category in emotions:\n # train data\n for i, file in enumerate(glob.glob(os.path.join(module_root, f\"data/train-custom/*_{category}.wav\"))):\n train_target[\"path\"].append(file)\n train_target[\"emotion\"].append(category)\n if verbose:\n try:\n print(f\"[Custom Dataset] There are {i} training audio files for category:{category}\")\n except NameError:\n # in case {i} doesn't exist\n pass\n \n # test data\n for i, file in enumerate(glob.glob(os.path.join(module_root, f\"data/test-custom/*_{category}.wav\"))):\n test_target[\"path\"].append(file)\n test_target[\"emotion\"].append(category)\n if verbose:\n try:\n print(f\"[Custom Dataset] There are {i} testing audio files for category:{category}\")\n except NameError:\n pass\n \n # write CSVs\n if train_target[\"path\"]:\n pd.DataFrame(train_target).to_csv(os.path.join(module_root, train_name))\n\n if test_target[\"path\"]:\n pd.DataFrame(test_target).to_csv(os.path.join(module_root, test_name))\n" ]
[ [ "pandas.DataFrame" ] ]
Thundzz/advent-of-code-2021
[ "e91cd8a5756566ad3f47e07a51b0beb5b9f5326c" ]
[ "py/day_11/solve.py" ]
[ "\nimport numpy as np\nimport itertools\n\ndef parse_input(filename):\n cavern = np.zeros((10, 10))\n with open(filename) as file:\n for idx, line in enumerate(file.readlines()):\n cavern[idx, :] = [int(c) for c in line.strip()]\n return cavern\n\ndef neighbours(i, j, max_size):\n raw_neighbours = [\n (x + i, y + j)\n for x, y in itertools.product([-1, 0, 1],[-1, 0, 1])\n if not(x == 0 and y == 0)\n ]\n return [\n (ii, jj) for ii, jj in raw_neighbours\n if 0 <= ii < max_size and 0 <= jj < max_size\n ]\n\ndef update_flashed(new_state, flashed_octopi):\n newly_flashed = [\n (i, j) for i, j in np.argwhere(new_state > 9)\n if flashed_octopi[i, j] == 0\n ]\n for i, j in newly_flashed:\n flashed_octopi[i, j] = 1\n return newly_flashed, flashed_octopi\n\ndef step(cavern):\n new_state = cavern + 1\n flashed_octopi = np.zeros_like(cavern)\n newly_flashed, flashed_octopi = update_flashed(new_state, flashed_octopi)\n while newly_flashed:\n to_update = []\n for i, j in newly_flashed:\n to_update.extend(neighbours(i, j, 10))\n for i, j in to_update:\n new_state[i, j] += 1\n newly_flashed, flashed_octopi = update_flashed(new_state, flashed_octopi)\n\n for i, j in np.argwhere(flashed_octopi):\n new_state[i, j] = 0\n\n return new_state, len(np.argwhere(flashed_octopi))\n\ndef synchronized_flash(cavern):\n return (cavern == 0).all()\n\ndef main():\n cavern = parse_input(\"input.txt\")\n count = 0\n for i in range(1000):\n cavern, c = step(cavern)\n count += c\n if synchronized_flash(cavern):\n print(\"First synchronization is at\", i +1)\n break\n if i == 99:\n print(\"Number of flashes at 100 iterations is\", count)\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.argwhere" ] ]
daoo/autodesk
[ "de5be9593a4d6eb873240ac4455507df3a87c89d" ]
[ "tests/unit/test_operation.py" ]
[ "from autodesk.operation import Operation\nfrom autodesk.states import INACTIVE, ACTIVE\nfrom datetime import date, time\nfrom pandas import Timestamp\nimport pytest\n\n\ndef combine(dates, times):\n return [Timestamp.combine(day, stroke)\n for day in dates for stroke in times]\n\n\nondates = [\n date(2017, 2, 13), # monday\n date(2017, 2, 14), # tuesday\n date(2017, 2, 15), # wednesday\n date(2017, 2, 16), # thursday\n date(2017, 2, 17), # friday\n]\n\noffdates = [\n date(2017, 2, 18), # saturday\n date(2017, 2, 19), # sunday\n]\n\nontimes = [\n time(8, 0, 0),\n time(11, 34, 0),\n time(13, 38, 0),\n time(17, 30, 0),\n time(20, 0, 0),\n]\n\nofftimes = [\n time(6, 59, 0),\n time(20, 1, 0),\n time(23, 0, 0),\n time(3, 0, 0),\n time(6, 0, 0),\n]\n\nondatetimes = combine(ondates, ontimes)\noffdatetimes = \\\n combine(offdates, offtimes) + \\\n combine(ondates, offtimes) + \\\n combine(offdates, ontimes)\n\n\[email protected](\"at\", ondatetimes)\ndef test_session_inactive_at_allowed_time(at):\n assert not Operation().allowed(INACTIVE, at)\n\n\[email protected](\"at\", ondatetimes)\ndef test_session_active_at_allowed_time(at):\n assert Operation().allowed(ACTIVE, at)\n\n\[email protected](\"at\", offdatetimes)\ndef test_session_inactive_at_disallowed_time(at):\n assert not Operation().allowed(INACTIVE, at)\n\n\[email protected](\"at\", offdatetimes)\ndef test_session_active_at_disallowed_time(at):\n assert not Operation().allowed(ACTIVE, at)\n" ]
[ [ "pandas.Timestamp.combine" ] ]
wwjiang007/keras
[ "f630ad87a01ed2b4d08f91e5553b50c6a85601f6" ]
[ "keras/tests/automatic_outside_compilation_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for automatic outside compilation for TF 2.0/Keras.\"\"\"\n\nimport collections\nimport os\n\nfrom absl import flags\nfrom keras import callbacks\nfrom keras.distribute import distribute_strategy_test\nfrom keras.engine import base_layer\nfrom keras.engine import sequential as sequential_model_lib\nfrom keras.engine import training\nfrom keras.layers import convolutional as conv_layer_lib\nfrom keras.layers import core as layer_lib\nfrom keras.layers import pooling as pool_layer_lib\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorboard.plugins.histogram import summary_v2 as histogram_summary_v2\nfrom tensorboard.plugins.image import summary_v2 as image_summary_v2\nfrom tensorboard.plugins.scalar import summary_v2 as scalar_summary_v2\nfrom tensorflow.python.eager.context import set_soft_device_placement\n\nNUM_CLASSES = 4\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')\nflags.DEFINE_string('project', None, 'Name of GCP project with TPU.')\nflags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')\n\n\ndef get_tpu_cluster_resolver():\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n tpu=FLAGS.tpu,\n zone=FLAGS.zone,\n project=FLAGS.project,\n )\n return resolver\n\n\ndef get_tpu_strategy():\n resolver = get_tpu_cluster_resolver()\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n return tf.distribute.experimental.TPUStrategy(resolver)\n\n\nclass LayerForScalarSummary(base_layer.Layer):\n \"\"\"A pass-through layer that only records scalar values to summary.\"\"\"\n\n def call(self, x):\n # Add summary scalar using compat v2 implementation.\n scalar_summary_v2.scalar('custom_scalar_summary_v2', tf.reduce_sum(x))\n return x\n\n\nclass LayerForImageSummary(base_layer.Layer):\n \"\"\"A pass-through layer that only records image values to summary.\"\"\"\n\n def call(self, x):\n # Add summary image using compat v2 implementation.\n image_summary_v2.image('custom_image_summary_v2', x)\n\n return x\n\n\nclass LayerForHistogramSummary(base_layer.Layer):\n \"\"\"A pass-through layer that records histogram values to summary.\"\"\"\n\n def call(self, x):\n # Add summary histogram using compat v2 implementation.\n histogram_summary_v2.histogram('custom_histogram_summary_v2', x)\n\n return x\n\n\nclass CustomModel(training.Model):\n \"\"\"Custom model with summary ops in model call definition.\"\"\"\n\n def __init__(self, name=None):\n super(CustomModel, self).__init__()\n self._my_layers = [\n layer_lib.Dense(\n 4096,\n name='dense1',\n kernel_initializer=tf.compat.v1.glorot_normal_initializer(seed=0),\n use_bias=False),\n layer_lib.Dense(\n 4,\n name='dense2',\n kernel_initializer=tf.compat.v1.glorot_normal_initializer(seed=0),\n use_bias=False),\n ]\n self.histogram_summary_layer = LayerForHistogramSummary()\n self.scalar_summary_layer = LayerForScalarSummary()\n\n def call(self, x):\n for layer in self._my_layers:\n x = layer(x)\n x = self.scalar_summary_layer(x)\n return self.histogram_summary_layer(x)\n\n\ndef get_image_dataset():\n inputs = np.zeros((10, 28, 28, 3), dtype=np.float32)\n targets = np.zeros((10, NUM_CLASSES), dtype=np.float32)\n dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10, drop_remainder=True)\n return dataset\n\n\ndef mnist_model(input_shape):\n \"\"\"Creates a MNIST model.\"\"\"\n model = sequential_model_lib.Sequential()\n\n # Adding custom pass-through layer to visualize input images.\n model.add(LayerForImageSummary())\n\n model.add(\n conv_layer_lib.Conv2D(\n 32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu'))\n model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))\n model.add(layer_lib.Dropout(0.25))\n model.add(layer_lib.Flatten())\n model.add(layer_lib.Dense(128, activation='relu'))\n model.add(layer_lib.Dropout(0.5))\n model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax'))\n\n # Adding custom pass-through layer for summary recording.\n model.add(LayerForHistogramSummary())\n return model\n\n\nclass AutoOutsideCompilationWithKerasTest(tf.test.TestCase):\n\n def setUp(self):\n super(AutoOutsideCompilationWithKerasTest, self).setUp()\n tf.compat.v1.enable_v2_behavior()\n set_soft_device_placement(True)\n self.summary_dir = self.get_temp_dir()\n\n def validate_recorded_sumary_file(self, event_files, expected_event_counts):\n event_counts = collections.defaultdict(int)\n for event_file in event_files:\n for e in tf.compat.v1.train.summary_iterator(event_file):\n for v in e.summary.value:\n event_counts[v.tag] += 1\n\n event_counts = dict(event_counts) # Avoid defaultdict type in repr below.\n actual_event_counts = {\n k: v for k, v in event_counts.items() if k in expected_event_counts\n }\n self.assertEqual(\n expected_event_counts,\n actual_event_counts,\n msg='expected counts not found; all event counts: %r' % event_counts)\n\n def testV2SummaryWithKerasSequentialModel(self):\n strategy = get_tpu_strategy()\n\n with strategy.scope():\n model = mnist_model((28, 28, 3))\n model.compile('sgd', 'mse')\n\n dataset = get_image_dataset()\n tensorboard_callback = callbacks.TensorBoard(\n self.summary_dir, update_freq=2)\n model.fit(\n dataset,\n steps_per_epoch=10,\n epochs=1,\n callbacks=[tensorboard_callback])\n\n event_files = tf.io.gfile.glob(\n os.path.join(self.summary_dir, 'train', 'event*'))\n # Since total of 10 steps are ran and summary ops should be invoked\n # every 2 batches, we should see total of 5 event logs for each summary.\n expected_event_counts = {\n 'sequential/layer_for_histogram_summary/custom_histogram_summary_v2':\n 5,\n 'sequential/layer_for_image_summary/custom_image_summary_v2':\n 5,\n }\n self.validate_recorded_sumary_file(event_files, expected_event_counts)\n\n def testV2SummaryWithKerasSubclassedModel(self):\n strategy = get_tpu_strategy()\n\n with strategy.scope():\n model = CustomModel()\n model.compile('sgd', 'mse')\n\n dataset = distribute_strategy_test.get_dataset(strategy)\n tensorboard_callback = callbacks.TensorBoard(\n self.summary_dir, update_freq=2)\n model.fit(\n dataset,\n steps_per_epoch=10,\n epochs=1,\n callbacks=[tensorboard_callback])\n\n event_files = tf.io.gfile.glob(\n os.path.join(self.summary_dir, 'train', 'event*'))\n # Since total of 10 steps are ran and summary ops should be invoked\n # every 2 batches, we should see total of 5 event logs for each summary.\n expected_event_counts = {\n ('custom_model/layer_for_scalar_summary/'\n 'custom_scalar_summary_v2'):\n 5,\n ('custom_model/layer_for_histogram_summary/'\n 'custom_histogram_summary_v2'):\n 5\n }\n self.validate_recorded_sumary_file(event_files, expected_event_counts)\n\n def testSummaryWithCustomTrainingLoop(self):\n strategy = get_tpu_strategy()\n\n writer = tf.summary.create_file_writer(self.summary_dir)\n with strategy.scope():\n model = distribute_strategy_test.get_model()\n model.compile('sgd', 'mse')\n\n @tf.function\n def custom_function(dataset):\n\n def _custom_step(features, labels):\n del labels\n logits = model(features)\n with tf.summary.record_if(True), writer.as_default():\n scalar_summary_v2.scalar(\n 'logits',\n tf.reduce_sum(logits),\n step=model.optimizer.iterations)\n return logits\n\n iterator = iter(dataset)\n output = strategy.unwrap(\n strategy.run(_custom_step, args=(next(iterator))))\n return output\n\n dataset = strategy.experimental_distribute_dataset(\n distribute_strategy_test.get_dataset(strategy))\n\n custom_function(dataset)\n writer.close()\n\n event_files = tf.io.gfile.glob(\n os.path.join(self.summary_dir, 'event*'))\n expected_event_counts = {\n 'logits': 1,\n }\n self.validate_recorded_sumary_file(event_files, expected_event_counts)\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.compat.v1.enable_eager_execution", "tensorflow.python.eager.context.set_soft_device_placement", "tensorflow.compat.v2.tpu.experimental.initialize_tpu_system", "tensorflow.compat.v2.config.experimental_connect_to_cluster", "numpy.zeros", "tensorflow.compat.v2.compat.v1.glorot_normal_initializer", "tensorflow.compat.v2.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.compat.v2.summary.create_file_writer", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.summary.record_if", "tensorflow.compat.v2.compat.v1.train.summary_iterator", "tensorflow.compat.v2.compat.v1.enable_v2_behavior", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.distribute.experimental.TPUStrategy", "tensorflow.compat.v2.test.main" ] ]
tbekolay/nengodocs-rtd
[ "f57b45d14cf5ad748267d15616d6f6c11f41a165" ]
[ "examples/spa_parser.py" ]
[ "\n# coding: utf-8\n\n## Nengo example: Parsing simple commands\n\n# This example is a simplified version\n# of the language parsing model presented in\n# [Stewart & Eliasmith, 2013](http://compneuro.uwaterloo.ca/publications/stewart2013.html).\n# Please refer to that paper for the high-level details.\n\n# In[ ]:\n\n# Setup for the notebook\nimport matplotlib.pyplot as plt\n\nimport nengo\nfrom nengo import spa\n\n\n### Step 1: Create the model\n\n# In[ ]:\n\n# Number of dimensions for the SPs\ndimensions = 64\n\n# Make a model object with the SPA network\nmodel = spa.SPA(label='Parser')\n\nwith model:\n # Specify the modules to be used\n model.vision = spa.Buffer(dimensions=dimensions, neurons_per_dimension=100)\n model.phrase = spa.Buffer(dimensions=dimensions, neurons_per_dimension=100)\n model.motor = spa.Buffer(dimensions=dimensions, neurons_per_dimension=100)\n model.noun = spa.Memory(dimensions=dimensions, neurons_per_dimension=100)\n model.verb = spa.Memory(dimensions=dimensions, neurons_per_dimension=100)\n \n # Specify the action mapping\n actions = spa.Actions(\n 'dot(vision, WRITE) --> verb=vision',\n 'dot(vision, ONE+TWO+THREE) --> noun=vision',\n '0.5*(dot(NONE-WRITE-ONE-TWO-THREE, vision) '\n '+ dot(phrase, WRITE*VERB)) '\n '--> motor=phrase*~NOUN',\n )\n cortical_actions = spa.Actions(\n 'phrase=noun*NOUN + verb*VERB',\n )\n model.bg = spa.BasalGanglia(actions=actions)\n model.thal = spa.Thalamus(model.bg)\n model.cortical = spa.Cortical(actions=cortical_actions)\n\n\n### Step 2: Provide the input\n\n# In[ ]:\n\ndef input_vision(t):\n sequence = 'WRITE ONE NONE WRITE TWO NONE THREE WRITE NONE'.split()\n index = int(t / 0.5) % len(sequence)\n return sequence[index]\n\nwith model:\n model.input = spa.Input(vision=input_vision)\n\n\n### Step 3: Probe the output\n\n# In[ ]:\n\nwith model:\n vision = nengo.Probe(model.vision.state.output, synapse=0.03)\n phrase = nengo.Probe(model.phrase.state.output, synapse=0.03)\n motor = nengo.Probe(model.motor.state.output, synapse=0.03)\n noun = nengo.Probe(model.noun.state.output, synapse=0.03)\n verb = nengo.Probe(model.verb.state.output, synapse=0.03)\n actions = nengo.Probe(model.thal.actions.output, synapse=0.01)\n utility = nengo.Probe(model.bg.input, synapse=0.01)\n\n\n### Step 4: Run the model\n\n# In[ ]:\n\n# Create the simulator object\nwith nengo.Simulator(model) as sim:\n # Simulate the model for 4.5 seconds\n sim.run(4.5)\n\n\n### Step 5: Plot the results\n\n# In[ ]:\n\nfig = plt.figure(figsize=(16,12))\np1 = fig.add_subplot(7,1,1)\np1.plot(sim.trange(), model.similarity(sim.data, vision))\np1.legend(model.get_output_vocab('vision').keys, fontsize='x-small')\np1.set_ylabel('Vision')\n\np2 = fig.add_subplot(7,1,2)\np2.plot(sim.trange(), model.similarity(sim.data, phrase))\np2.legend(model.get_output_vocab('phrase').keys, fontsize='x-small')\np2.set_ylabel('Phrase')\n\np3 = fig.add_subplot(7,1,3)\np3.plot(sim.trange(), model.similarity(sim.data, motor))\np3.legend(model.get_output_vocab('motor').keys, fontsize='x-small')\np3.set_ylabel('Motor')\n\np4 = fig.add_subplot(7,1,4)\np4.plot(sim.trange(), model.similarity(sim.data, noun))\np4.legend(model.get_output_vocab('noun').keys, fontsize='x-small')\np4.set_ylabel('Noun')\n\np5 = fig.add_subplot(7,1,5)\np5.plot(sim.trange(), model.similarity(sim.data, verb))\np5.legend(model.get_output_vocab('verb').keys, fontsize='x-small')\np5.set_ylabel('Verb')\n\np6 = fig.add_subplot(7,1,6)\np6.plot(sim.trange(), sim.data[actions])\np6.set_ylabel('Action')\n\np7 = fig.add_subplot(7,1,7)\np7.plot(sim.trange(), sim.data[utility])\np7.set_ylabel('Utility')\n\nfig.subplots_adjust(hspace=0.2)\n\n" ]
[ [ "matplotlib.pyplot.figure" ] ]
Javicadserres/dnetworks
[ "173f9dfa35ae1cff8dbeec3c1d24cb00990b41a6" ]
[ "tests/test_activations.py" ]
[ "import numpy as np\nimport dnetworks\n\nfrom dnetworks.layers import (\n ReLU, Sigmoid, Tanh, LeakyReLU, Softmax\n)\n\ndef test_relu():\n \"\"\"\n Tests ReLU activation class.\n \"\"\"\n Z = np.array([1, -1, 0])\n dA = np.array([2, 3, 5])\n\n expected_A = np.array([1, 0, 0])\n expected_dZ = np.array([2, 0, 0])\n\n activation = ReLU()\n obtained_A = activation.forward(Z)\n obtained_dZ = activation.backward(dA)\n\n np.testing.assert_almost_equal(expected_A, obtained_A)\n np.testing.assert_almost_equal(expected_dZ, obtained_dZ)\n\n\ndef test_sigmoid():\n \"\"\"\n Tests Sigmoid activation class.\n \"\"\"\n Z = np.array([1, -1, 0])\n dA = np.array([2, 3, 5])\n\n expected_A = np.array([0.73105858, 0.26894142, 0.5])\n expected_dZ = np.array([0.39322387, 0.5898358 , 1.25])\n\n activation = Sigmoid()\n obtained_A = activation.forward(Z)\n obtained_dZ = activation.backward(dA)\n\n np.testing.assert_almost_equal(expected_A, obtained_A)\n np.testing.assert_almost_equal(expected_dZ, obtained_dZ)\n\n \ndef test_tanh():\n \"\"\"\n Tests hyperbolic tangent activation class.\n \"\"\"\n Z = np.array([1, -1, -1])\n dA = np.array([2, 3, 5])\n\n expected_A = np.array([0.76159416, -0.76159416, -0.76159416])\n expected_dZ = np.array([0.83994868, 1.25992302, 2.09987171])\n\n activation = Tanh()\n obtained_A = activation.forward(Z)\n obtained_dZ = activation.backward(dA)\n\n np.testing.assert_almost_equal(expected_A, obtained_A)\n np.testing.assert_almost_equal(expected_dZ, obtained_dZ)\n\n\ndef test_leakyrelu():\n \"\"\"\n Tests Leaky ReLU activation class.\n \"\"\"\n Z = np.array([1, -1, 0])\n dA = np.array([2, 3, 5])\n\n expected_A = np.array([1, -0.01, 0])\n expected_dZ = np.array([2, 0.03, 0.05])\n\n activation = LeakyReLU()\n obtained_A = activation.forward(Z)\n obtained_dZ = activation.backward(dA)\n\n np.testing.assert_almost_equal(expected_A, obtained_A)\n np.testing.assert_almost_equal(expected_dZ, obtained_dZ)\n\n\ndef test_softmax():\n \"\"\"\n Tests Softmax activation class.\n \"\"\"\n Z = np.array([[1], [0], [0]])\n dA = np.array([[2], [3], [5]])\n\n expected_A = np.array([[0.57611688], [0.21194156], [0.21194156]])\n expected_dZ = np.array([[-0.48841244], [0.03226466], [0.45614778]])\n\n activation = Softmax()\n obtained_A = activation.forward(Z)\n obtained_dZ = activation.backward(dA)\n\n np.testing.assert_almost_equal(expected_A, obtained_A)\n np.testing.assert_almost_equal(expected_dZ, obtained_dZ)" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.array" ] ]
SnarkAttack/gaming_ml
[ "3f4e9ce6af73e60adcdd027341158eca8419bd54" ]
[ "src/mcts/mcts.py" ]
[ "import numpy as np\nfrom operator import attrgetter\n\nclass MCTSNode(object):\n\n def __init__(self, game, parent=None, c=2):\n\n self._game = game\n self._parent = parent\n self._children = []\n self._visits = 0\n self._value = 0\n # c is a constant value that can be tweaked to weight\n # how important visits are compared to value\n self._c = c\n\n @property\n def game(self):\n return self._game\n\n @property\n def parent(self):\n return self._parent\n\n @property\n def children(self):\n return self._children\n\n @property\n def visits(self):\n return self._visits\n\n @property\n def value(self):\n return self._value\n\n @property\n def childless(self):\n return len(self._children) == 0\n\n @property\n def unvisited(self):\n return self._visits == 0\n\n @property\n def has_parent(self):\n return self._parent is not None\n\n @property\n def ucb(self):\n return self._value + self._c * np.sqrt(np.log(self.parent.visits)/self.visits)\n\n def expand(self):\n pass\n\n def get_promising_child(self):\n \"\"\"\n Return the child that maximized the upper confidence bound (UCB)\n \"\"\"\n return max(self.children, key=attrgetter('ucb'))\n\n def backprop(self, value):\n self._value += value\n self._visits += 1\n\n\nclass MCTS(object):\n\n def __init__(self, game, num_iterations=100):\n\n self._root = MCTSNode(game)\n\n for i in range(num_iterations):\n self._step()\n\n @property\n def root(self):\n return self._root\n\n def _step(self):\n\n node = self.root\n\n while not node.childless:\n node = node.get_promising_child()\n\n if node.unvisited:\n # Get network value from game state here\n value = 0\n else:\n node.expand()\n node = node.get_promising_child()\n # Get network value here\n value = 0\n\n while node.has_parent:\n node.backprop(value)\n node = node.parent\n\n" ]
[ [ "numpy.log" ] ]
gmjustforfun/code
[ "7551909edf61bddfeafdff223c2a3390661dc62f" ]
[ "examples/demo_de.py" ]
[ "import numpy as np\nimport math\nimport openpyxl\nimport pandas as pd\nimport matplotlib.pyplot as plt\n'''\nmin f(x1, x2, x3) = x1^2 + x2^2 + x3^2\ns.t.\n x1*x2 >= 1\n x1*x2 <= 5\n x2 + x3 = 1\n 0 <= x1, x2, x3 <= 5\n'''\n\"参数预设\"\n# [-100,100]\nset_lb_01 =np.ones(10)*(-100)\nset_ub_01 = np.ones(10)*100\n# [-10,10]\nset_lb_02 = np.ones(10)*(-10)\nset_ub_02 = np.ones(10)*10\n# [-30,30]\nset_lb_03= np.ones(10)*(-30)\nset_ub_03 = np.ones(10)*30\n# [-1.28,1.28]\nset_lb_04 = np.ones(10)*(-1.28)\nset_ub_04 = np.ones(10)*1.28\n# [-500,500]\nset_lb_05 = np.ones(10)*(-500)\nset_ub_05 = np.ones(10)*500\n# [-5.12,5.12]\nset_lb_06 = np.ones(10)*(-5.12)\nset_ub_06 = np.ones(10)*5.12\n# [-32,32]\nset_lb_07 = np.ones(10)*(-32)\nset_ub_07 = np.ones(10)*32\n\n# [-100,100]\nset_lb_011 = np.ones(30)*(-100)\nset_ub_011 = np.ones(30)*100\n# [-10,10]\nset_lb_021 = np.ones(30)*(-10)\nset_ub_021 = np.ones(30)*10\n# [-30,30]\nset_lb_031 = np.ones(30)*(-30)\nset_ub_031 = np.ones(30)*30\n# [-1.28,1.28]\nset_lb_041 = np.ones(30)*(-1.28)\nset_ub_041 = np.ones(30)*1.28\n# [-500,500]\nset_lb_051 = np.ones(30)*(-500)\nset_ub_051 = np.ones(30)*500\n# [-5.12,5.12]\nset_lb_061 = np.ones(30)*(-5.12)\nset_ub_061 = np.ones(30)*5.12\n# [-32,32]\nset_lb_071 = np.ones(30)*(-32)\nset_ub_071 = np.ones(30)*32\n\n\n\"定义测试函数\"\ndef demo_func(x):\n x1, x2, x3 = x\n return x1 ** 2 + (x2 - 0.05) ** 2 + x3 ** 2\ndef demo_func_01_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n return x1 ** 2 + x2 ** 2 + x3 ** 2+ x4 ** 2+ x5 ** 2+ x6 ** 2+ x7 ** 2+ x8 ** 2+ x9 ** 2+ x10 ** 2\ndef demo_func_01_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n return x1 ** 2 + x2 ** 2 + x3 ** 2+ x4 ** 2+ x5 ** 2+ x6 ** 2+ x7 ** 2+ x8 ** 2+ x9 ** 2+ x10 ** 2+x11 ** 2+x12 ** 2+x13 ** 2+x14 ** 2+x15 ** 2+x16 ** 2+x17 ** 2+x18 ** 2+x19 ** 2+x20 ** 2+ x21 ** 2+x22 ** 2+x23 ** 2+x24 ** 2+x25 ** 2+x26 ** 2+x27 ** 2+x28 ** 2+x29 ** 2+x30 ** 2\n\ndef demo_func_02_10(x):\n x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 = x\n return abs(x1)+ abs(x2)+ abs(x3)+ abs(x4)+ abs(x5)+ abs(x6)+ abs(x7)+ abs(x8)+ abs(x9)+ abs(x10)+abs(x1)*abs(x2)*abs(x3)*abs(x4)*abs(x5)*abs(x6)*abs(x7)*abs(x8)*abs(x9)*abs(x10)\ndef demo_func_02_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n return abs(x1)+ abs(x2)+ abs(x3)+ abs(x4)+ abs(x5)+ abs(x6)+ abs(x7)+ abs(x8)+ abs(x9)+ abs(x10)+abs(x1)*abs(x2)*abs(x3)*abs(x4)*abs(x5)*abs(x6)*abs(x7)*abs(x8)*abs(x9)*abs(x10)+abs(x11)+ abs(x12)+ abs(x13)+ abs(x14)+ abs(x15)+ abs(x16)+ abs(x17)+ abs(x18)+ abs(x19)+ abs(x20)+abs(x11)*abs(x12)*abs(x13)*abs(x14)*abs(x15)*abs(x16)*abs(x17)*abs(x18)*abs(x19)*abs(x20)+abs(x21)+ abs(x22)+ abs(x23)+ abs(x24)+ abs(x25)+ abs(x26)+ abs(x27)+ abs(x28)+ abs(x29)+ abs(x30)+abs(x21)*abs(x22)*abs(x23)*abs(x24)*abs(x25)*abs(x26)*abs(x27)*abs(x28)*abs(x29)*abs(x30)\n\ndef demo_func_03_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1,x2,x3,x4,x5,x6,x7,x8,x9,x10]\n # print(x)\n # print(x1)\n # print(\"xx------------------------------------------\")\n # print(xx)\n sum_total=0\n for i in range(10):\n sum = 0\n for j in range(i+1):\n sum += xx[j]\n sum = sum ** 2\n sum_total = sum_total+sum\n return sum_total\ndef demo_func_03_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30]\n sum_total = 0\n for i in range(30):\n sum = 0\n for j in range(i + 1):\n sum += xx[j]\n sum = sum ** 2\n sum_total = sum_total + sum\n return sum_total\ndef demo_func_04_10(x):\n x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1,x2,x3,x4,x5,x6,x7,x8,x9,x10]\n xx_1=xx.copy()\n for i in range(10):\n xx_1[i] = abs(xx[i])\n return max(xx_1)\ndef demo_func_04_30(x):\n x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n xx_1 = xx.copy()\n for i in range(30):\n xx_1[i] = abs(xx[i])\n return max(xx_1)\ndef demo_func_05_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]\n sum = 0\n for i in range(9):\n sum = sum + 100*((xx[i+1]-xx[i]**2)**2)+(xx[i]-1)**2\n return sum\ndef demo_func_05_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n sum = 0\n for i in range(29):\n sum = sum + 100*((xx[i + 1] - xx[i] ** 2) ** 2) + (xx[i] - 1) ** 2\n return sum\ndef demo_func_06_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]\n sum = 0\n for i in range(10):\n a = xx[i] + 0.5\n a1 = math.floor(a)\n a1 = a1**2\n sum = sum+a1\n return sum\ndef demo_func_06_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n sum = 0\n for i in range(30):\n a = xx[i] + 0.5\n a1 = math.floor(a)\n a1 = a1 ** 2\n sum = sum + a1\n return sum\n\"注意:这里文章中的公式表述不明\"\ndef demo_func_07_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]\n sum = 0\n for i in range(10):\n sum += (i+1)*(xx[i]**2)\n sum += np.random.rand(1)\n return sum\ndef demo_func_07_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n sum = 0\n for i in range(30):\n sum += (i + 1) * (xx[i] ** 2)\n sum += np.random.rand(1)\n return sum\ndef demo_func_08_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]\n sum = 0\n for i in range(10):\n sum = sum+(-(xx[i]*math.sin(math.sqrt(abs(xx[i])))))\n return sum\ndef demo_func_08_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n sum = 0\n for i in range(30):\n sum = sum + (-(xx[i] * math.sin(math.sqrt(abs(xx[i])))))\n return sum\ndef demo_func_09_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]\n sum = 0\n for i in range(10):\n sum += (xx[i]**2-10*math.cos(2*math.pi*xx[i])+10)\n return sum\ndef demo_func_09_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n sum = 0\n for i in range(30):\n sum += (xx[i] ** 2 - 10 * math.cos(2 * math.pi * xx[i]) + 10)\n return sum\ndef demo_func_10_10(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10]\n sum_01 = 0\n sum_02 = 0\n for i in range(10):\n sum_01 = sum_01+xx[i]**2\n sum_02 =sum_02+math.cos(2*math.pi*xx[i])\n sum_total = -20*math.exp(-0.2*math.sqrt((1/10)*sum_01))-math.exp((1/10)*sum_02)+20+math.e\n return sum_total\ndef demo_func_10_30(x):\n x1, x2, x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30 = x\n xx = [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24,\n x25, x26, x27, x28, x29, x30]\n sum_01 = 0\n sum_02 = 0\n for i in range(30):\n sum_01 = sum_01 + xx[i] ** 2\n sum_02 = sum_02 + math.cos(2 * math.pi * xx[i])\n sum_total = -20 * math.exp(-0.2 * math.sqrt((1 / 30) * sum_01)) - math.exp((1 / 30) * sum_02) + 20 + math.e\n return sum_total\n\n\ndef obj_func(p):\n x1, x2, x3 = p\n return x1 ** 2 + x2 ** 2 + x3 ** 2\n\n\nconstraint_eq = [\n lambda x: 1 - x[1] - x[2]\n]\n\nconstraint_ueq = [\n lambda x: 1 - x[0] * x[1],\n lambda x: x[0] * x[1] - 5\n]\n\n# %% Do DifferentialEvolution\nfrom sko.DE import DE\n\n\n\n# de = DE(func=demo_func_01_10, n_dim=10, size_pop=50, max_iter=2000, lb=set_lb_01, ub=set_ub_01)\n# best_x, best_y = de.run()\n# print('best_x:', best_x, '\\n', 'best_y:', best_y)\n# print(de.generation_best_Y)\n# plt.plot(de.generation_best_Y)\n# plt.show()\n#\n\nresult = np.zeros(100)\nfor i in range(100):\n de = DE(func=demo_func_01_10, n_dim=10, size_pop=50, max_iter=5000, lb=set_lb_01, ub=set_ub_01)\n best_x, best_y = de.run()\n result[i] = best_y\nwb = openpyxl.load_workbook('test.xlsx')\n# create a Pandas Excel writer using xlswriter,这是写入一个已经存在的文件中\nwriter = pd.ExcelWriter('test.xlsx',engine='openpyxl')\nwriter.book = wb\ndf01 = pd.DataFrame(data=result)\ndf01.to_excel(writer, sheet_name='100次DE单个函数(10dim)实验结果', startcol=0, index=False)\n\nwriter.save() # 一定要保存\n" ]
[ [ "numpy.random.rand", "numpy.zeros", "pandas.DataFrame", "numpy.ones", "pandas.ExcelWriter" ] ]
bsouhaib/qf-tpp
[ "a5adf3f7203b920528c1c397329c4afd9039c3b4" ]
[ "code/dpp/flows/affine.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom torch.distributions import constraints\n\nfrom dpp.flows.base import Flow\nfrom dpp.nn import Hypernet\nfrom dpp.utils import clamp_preserve_gradients\n\n\nclass FixedAffine(Flow):\n \"\"\"Affine transformation y = ax + b with fixed parameters.\"\"\"\n domain = constraints.real\n codomain = constraints.real\n\n def __init__(self, scale_init=1.0, shift_init=0.0, use_shift=False, trainable=False):\n super().__init__()\n log_scale_init = torch.tensor([scale_init]).log()\n self.log_scale = nn.Parameter(log_scale_init, requires_grad=trainable)\n if use_shift:\n shift_init = torch.tensor([shift_init])\n self.shift = nn.Parameter(shift_init, requires_grad=trainable)\n else:\n self.shift = 0.0\n\n def forward(self, x, **kwargs):\n y = torch.exp(self.log_scale) * x + self.shift\n log_det_jac = self.log_scale.expand(y.shape)\n return y, log_det_jac\n\n def inverse(self, y, **kwargs):\n x = (y - self.shift) * torch.exp(-self.log_scale)\n inv_log_det_jac = -self.log_scale.expand(x.shape)\n return x, inv_log_det_jac\n\n\nclass HyperAffine(Flow):\n \"\"\"Affine transformation where the parameters are generated by a hypernet.\"\"\"\n domain = constraints.real\n codomain = constraints.real\n\n def __init__(self, config, min_clip=-5.0, max_clip=3.0):\n super().__init__()\n self.use_history(config.use_history)\n self.use_embedding(config.use_embedding)\n\n self.min_clip = min_clip\n self.max_clip = max_clip\n self.hypernet = Hypernet(config, param_sizes=[1, 1])\n\n def get_params(self, h, emb):\n if not self.using_history:\n h = None\n if not self.using_embedding:\n emb = None\n if self.using_history or self.using_embedding:\n log_scale, shift = self.hypernet(h, emb)\n log_scale = clamp_preserve_gradients(log_scale, self.min_clip, self.max_clip)\n return log_scale.squeeze(-1), shift.squeeze(-1)\n\n def forward(self, x, h=None, emb=None):\n \"\"\"Forward transformation.\n\n Args:\n x: Samples to transform. shape (*)\n h: History for each sample. Shape should match x (except last dim).\n shape (*, history_size)\n emb: Embeddings for each sample. Shape should match x (except last dim).\n shape (*, embedding_size)\n \"\"\"\n log_scale, shift = self.get_params(h, emb)\n y = torch.exp(log_scale) * x + shift\n log_det_jac = log_scale.expand(y.shape)\n return y, log_det_jac\n\n def inverse(self, y, h=None, emb=None):\n \"\"\"Inverse transformation.\n\n Args:\n x: Samples to transform. shape (*)\n h: History for each sample. Shape should match x (except last dim).\n shape (*, history_size)\n emb: Embeddings for each sample. Shape should match x (except last dim).\n shape (*, embedding_size)\n \"\"\"\n log_scale, shift = self.get_params(h, emb)\n x = (y - shift) * torch.exp(-log_scale)\n inv_log_det_jac = -log_scale.expand(x.shape)\n return x, inv_log_det_jac\n" ]
[ [ "torch.tensor", "torch.exp", "torch.nn.Parameter" ] ]
shouno/FS_MCMC
[ "624b437444d8fbf8034cf15c83b754f2f0cdd3e3" ]
[ "IsingModel/energy_state.py" ]
[ "#\n# -*- coding: utf-8 -*-\n#\n\nimport numpy as np\nfrom tqdm import tqdm\n\n# 全探索バージョン\n\n\nclass IsingModelMC:\n def __init__(self, size, beta=1., h=0.0, state=None, J=None):\n self.size = size\n if J is None: # Ferro\n self.Jmat = np.ones((size, size)) - np.eye(size)\n else:\n self.Jmat = J\n\n self.beta = beta\n self.h = h\n if state is None:\n self.s = np.random.binomial(1, 0.5, size=size) * 2 - 1.\n else:\n self.s = state\n\n self.energy = self.H(self.s)\n self.acccnt = 0\n\n def H(self, value):\n return - 0.5 * value.dot(self.Jmat.dot(value)) / self.size\n\n def mcstep(self, value=None):\n if self.beta == 0.: # 温度∞ (beta=0.0) は全とっかえ\n self.s = np.random.binomial(1, 0.5, size=self.size) * 2 - 1.\n self.energy = self.H(self.s)\n self.acccnt += self.size\n return\n # 有限温度の場合\n if value is None:\n value = self.s\n order = np.random.permutation(self.size)\n rvals = np.random.uniform(0., 1., self.size)\n\n for idx in order:\n oldE = self.energy\n self.s[idx] *= -1\n newE = self.H(self.s)\n delta = newE - oldE\n pdelta = np.exp(-self.beta * delta)\n\n # print('r: %g, delta(new:%f - old:%f): %g' % (rvals[idx], newE, oldE, delta))\n if(rvals[idx] < pdelta): # 'accept'\n # print('accept')\n self.energy = newE\n self.acccnt += 1\n else: # 'reject' restore\n # print('reject')\n self.s[idx] *= -1\n\n def trace(self, iter, reset=False):\n Es = []\n States = []\n if reset is True:\n self.acccnt = 0\n\n for it in tqdm(range(iter)):\n self.mcstep()\n Es.append(self.energy)\n States.append(np.array(self.s))\n\n self.logs = {'energy': np.array(Es), 'state': np.array(States)}\n return self.logs\n\n\nif __name__ == \"__main__\":\n size = 24\n J0 = 1.0\n Jmat = J0 * (np.ones((size, size)) - np.eye(size))\n\n model = IsingModelMC(size, J=Jmat)\n\n Es = []\n for n in tqdm(range(2**size)):\n s1 = np.array([i for i in bin(n)[2:]], dtype=np.float)\n s0 = np.zeros((size - s1.shape[0],))\n s = np.hstack((s0, s1))\n\n Es.append(model.H(s))\n\n np.savez('Ising2ExSearch.npz', Es=np.array(Es))\n" ]
[ [ "numpy.array", "numpy.random.binomial", "numpy.zeros", "numpy.random.permutation", "numpy.ones", "numpy.exp", "numpy.eye", "numpy.random.uniform", "numpy.hstack" ] ]
Rahul-fix/stereo-depth
[ "b3722644ffe6268968bada1681cb2b6f4cd44a3b" ]
[ "src/models/joined_processing.py" ]
[ "import torch\nfrom torch import nn\nimport configparser\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nconfig = configparser.ConfigParser()\nconfig.read(\"configs/kitti.config\")\n\nin_channels = config.getint(\"Siamese\", \"channels\")\n\nheight = config.getint(\"Data\", \"height\")\nwidth = config.getint(\"Data\", \"width\")\nmax_disparity = config.getint(\"Data\", \"max_disparity\")\n\nchannels3d = [int(c) for c in config.get(\"CostVolume\", \"channels3d\")[1:-1].split(\",\")]\nnum_cost_blocks = config.getint(\"CostVolume\", \"num_cost_blocks\")\n\nbatch_size = config.getint(\"Training\", \"batch_size\")\n\n\n\nclass CostVolume(nn.Module):\n def __init__(self, block3d):\n super(CostVolume, self).__init__()\n self.batch_size = batch_size\n self.height = height\n self.width = width\n self.max_disparity = max_disparity\n self.in_channels = in_channels\n self.channels = channels3d\n self.num_cost_blocks = num_cost_blocks\n layers = list()\n for i in range(self.num_cost_blocks):\n layers.append(block3d(self.channels[i], self.channels[i + 1], kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1),\n change_dim=True))\n self.process_volume = nn.Sequential(*layers)\n\n def forward(self, x):\n left_feat, right_feat = x\n cost = self.features_to_cost_volume(left_feat, right_feat)\n processed_volume = self.process_volume(cost)\n return processed_volume\n\n def features_to_cost_volume(self, left_feat, right_feat):\n\n cost = torch.Tensor(self.batch_size, self.in_channels * 2, self.max_disparity // 4, left_feat.shape[-2] ,\n left_feat.shape[-1]).to(device)\n for i in range(self.max_disparity // 4):\n if i == 0:\n cost[:, :self.in_channels, i, :, :] = left_feat\n cost[:, self.in_channels:, i, :, :] = right_feat\n else:\n cost[:, :self.in_channels, i, :, i:] = left_feat[:, :, :, i:]\n cost[:, self.in_channels:, i, :, i:] = right_feat[:, :, :, :-i]\n\n return cost\n" ]
[ [ "torch.nn.Sequential", "torch.cuda.is_available", "torch.Tensor" ] ]
shi3z/image-gpt
[ "c996b872908d67a74cd5e79e6a5299f2fd5bee40" ]
[ "src/imgconv.py" ]
[ "# Sept. 5 2020 shi3z \n# MIT Licence\n\nimport os\nimport random\nfrom PIL import Image\nfrom imageio import imwrite\nimport numpy as np\n\nclusters = np.load(\"download/kmeans_centers.npy\")\n\n\n# Load Image \nim = Image.open('shi3z.png')\nim = im.resize((32,32))\n\nimr = np.array(im)\n\ndef dist(x,y): # Get Euclidean distance\n return np.sqrt(np.sum((x-y)**2))\n\ndef find_index(a): # Find the closest color from the lookup table.\n\t# Oh, you know how to write more complete code. But I wanted to save time investigating.\n\tmind = 10000 # mind:minium distance\n\tminidx = -1 # minidx: index of minimum distance color of CLUT.\n\tfor i in range(len(clusters)): \n\t\td = dist(a/127.5-1.0,clusters[i])\n\t\tif mind > d:\n\t\t\tmind = d\n\t\t\tminidx = i\n\treturn minidx # Return minimum index\n\n\n# RGB Image to palettized Image\nresult = []\nfor y in range(32):\n\tfor x in range(32):\n\t\tresult.append(find_index(imr[y,x]))\n\nsamples = np.array(result)\nnp.save(\"shi3z.npy\",samples) #save numpy data\n\n# Confirm generated image\nsamples = np.reshape(np.rint(127.5 * (clusters[samples] + 1.0)), [32, 32, 3]).astype(np.uint8)\nimwrite(\"res.png\", samples)\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.rint", "numpy.load", "numpy.save" ] ]
pprp/pytorch-cifar-model-zoo
[ "e7e68d8162a77aee2161bcd3cd70c20ddd7ef7ab" ]
[ "lib/mutator/random_mutator.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom lib.mutables.spaces import InputSpace, OperationSpace, ValueSpace\n\nfrom .default_mutator import Mutator\n\n__all__ = [\n 'RandomMutator',\n]\n\n\nclass RandomMutator(Mutator):\n def __init__(self, model, *args, **kwargs):\n super().__init__(model)\n\n def sample_search(self):\n result = dict()\n for mutable in self.mutables:\n if isinstance(mutable, OperationSpace):\n gen_index = torch.randint(high=mutable.length, size=(1, ))\n result[mutable.key] = F.one_hot(gen_index, num_classes=mutable.length).view(-1).bool()\n mutable.mask = result[mutable.key].detach()\n elif isinstance(mutable, InputSpace):\n if mutable.n_chosen is None:\n result[mutable.key] = torch.randint(high=2, size=(mutable.n_candidates,)).view(-1).bool()\n else:\n perm = torch.randperm(mutable.n_candidates)\n mask = [i in perm[:mutable.n_chosen] for i in range(mutable.n_candidates)]\n result[mutable.key] = torch.tensor(mask, dtype=torch.bool) # pylint: disable=not-callable\n mutable.mask = result[mutable.key].detach()\n elif isinstance(mutable, ValueSpace):\n gen_index = torch.randint(high=mutable.length, size=(1, ))\n result[mutable.key] = F.one_hot(gen_index, num_classes=mutable.length).view(-1).bool()\n mutable.mask = F.one_hot(gen_index, num_classes=mutable.length).view(-1).bool()\n\n return result\n\n def sample_final(self):\n return self.sample_search()\n" ]
[ [ "torch.randint", "torch.nn.functional.one_hot", "torch.randperm", "torch.tensor" ] ]
Asquidy/pyblp_alt
[ "cd3f79ddef51da8104df128399d6e981bf34f3bf" ]
[ "pyblp/construction.py" ]
[ "\"\"\"Data construction.\"\"\"\n\nfrom typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Union\n\nimport numpy as np\n\nfrom . import exceptions, options\nfrom .configurations.formulation import Formulation\nfrom .configurations.integration import Integration\nfrom .utilities.basics import Array, Groups, RecArray, extract_matrix, interact_ids, structure_matrices, get_indices\n\n\ndef build_id_data(T: int, J: int, F: int) -> RecArray:\n r\"\"\"Build a balanced panel of market and firm IDs.\n\n This function can be used to build ``id_data`` for :class:`Simulation` initialization.\n\n Parameters\n ----------\n T : `int`\n Number of markets.\n J : `int`\n Number of products in each market.\n F : `int`\n Number of firms. If ``J`` is divisible by ``F``, firms produce ``J / F`` products in each market. Otherwise,\n firms with smaller IDs will produce excess products.\n\n Returns\n -------\n `recarray`\n IDs that associate products with markets and firms. Each of the ``T * J`` rows corresponds to a product. Fields:\n\n - **market_ids** : (`object`) - Market IDs that take on values from ``0`` to ``T - 1``.\n\n - **firm_ids** : (`object`) - Firm IDs that take on values from ``0`` to ``F - 1``.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/build_id_data.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n if not isinstance(T, int) or not isinstance(F, int) or T < 1 or F < 1:\n raise ValueError(\"Both T and F must be positive ints.\")\n if not isinstance(J, int) or J < F:\n raise ValueError(\"J must be an int that is at least F.\")\n return structure_matrices({\n 'market_ids': (np.repeat(np.arange(T), J).astype(np.int), np.object),\n 'firm_ids': (np.floor(np.tile(np.arange(J), T) * F / J).astype(np.int), np.object)\n })\n\n\ndef build_ownership(\n product_data: Mapping, kappa_specification: Optional[Union[str, Callable[[Any, Any], float]]] = None) -> Array:\n r\"\"\"Build ownership matrices, :math:`O`.\n\n Ownership or product holding matrices are defined by their cooperation matrix counterparts, :math:`\\kappa`. For each\n market :math:`t`, :math:`\\mathscr{H}_{jk} = \\kappa_{fg}` where :math:`j \\in J_{ft}`, the set of products\n produced by firm :math:`f` in the market, and similarly, :math:`g \\in J_{gt}`.\n\n Parameters\n ----------\n product_data : `structured array-like`\n Each row corresponds to a product. Markets can have differing numbers of products. The following fields are\n required (except for ``firm_ids`` when ``kappa_specification`` is one of the special cases):\n\n - **market_ids** : (`object`) - IDs that associate products with markets.\n\n - **firm_ids** : (`object`) - IDs that associate products with firms. This field is ignored if\n ``kappa_specification`` is one of the special cases and not a function.\n\n kappa_specification : `str or callable, optional`\n Specification for each market's cooperation matrix, :math:`\\kappa`, which can either be a general function or a\n string that implements a special case. The general function is is of the following form::\n\n kappa(f, g) -> value\n\n where ``value`` is :math:`\\mathscr{H}_{jk}` and both ``f`` and ``g`` are firm IDs from the ``firm_ids`` field of\n ``product_data``.\n\n The default specification, ``lambda: f, g: int(f == g)``, constructs traditional ownership matrices. That is,\n :math:`\\kappa = I`, the identify matrix, implies that :math:`\\mathscr{H}_{jk}` is :math:`1` if the same firm\n produces products :math:`j` and :math:`k`, and is :math:`0` otherwise.\n\n If ``firm_ids`` happen to be indices for an actual :math:`\\kappa` matrix, ``lambda f, g: kappa[f, g]`` will\n build ownership matrices according to the matrix ``kappa``.\n\n When one of the special cases is specified, ``firm_ids`` in ``product_data`` are not required and if specified\n will be ignored:\n\n - ``'monopoly'`` - Monopoly ownership matrices are all ones: :math:`\\mathscr{H}_{jk} = 1` for all :math:`j`\n and :math:`k`.\n\n - ``'single'`` - Single product firm ownership matrices are identity matrices: :math:`\\mathscr{H}_{jk} = 1`\n if :math:`j = k` and :math:`0` otherwise.\n\n Returns\n -------\n `ndarray`\n Stacked :math:`J_t \\times J_t` ownership matrices, :math:`\\mathscr{H}`, for each market :math:`t`. If a market\n has fewer products than others, extra columns will contain ``numpy.nan``.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/build_ownership.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n\n # validate or use the default kappa specification\n if kappa_specification is None:\n kappa_specification = lambda f, g: np.where(f == g, 1, 0).astype(options.dtype)\n elif callable(kappa_specification):\n kappa_specification = np.vectorize(kappa_specification, [options.dtype])\n elif kappa_specification not in {'monopoly', 'single'}:\n raise ValueError(\"kappa_specification must be None, callable, 'monopoly', or 'single'.\")\n\n # extract and validate IDs\n market_ids = extract_matrix(product_data, 'market_ids')\n firm_ids = extract_matrix(product_data, 'firm_ids')\n if market_ids is None:\n raise KeyError(\"product_data must have a market_ids field.\")\n if market_ids.shape[1] > 1:\n raise ValueError(\"The market_ids field of product_data must be one-dimensional.\")\n if callable(kappa_specification):\n if firm_ids is None:\n raise KeyError(\"product_data must have a firm_ids field when kappa_specification is not a special case.\")\n if firm_ids.shape[1] > 1:\n raise ValueError(\"The firm_ids field of product_data must be one-dimensional.\")\n\n # determine the overall number of products and the maximum number in a market\n market_indices = get_indices(market_ids)\n N = market_ids.size\n max_J = max(i.size for i in market_indices.values())\n\n # construct the ownership matrices\n ownership = np.full((N, max_J), np.nan, options.dtype)\n for indices_t in market_indices.values():\n if kappa_specification == 'monopoly':\n ownership[indices_t, :indices_t.size] = 1\n elif kappa_specification == 'single':\n ownership[indices_t, :indices_t.size] = np.eye(indices_t.size)\n else:\n assert callable(kappa_specification) and firm_ids is not None\n ids_t = firm_ids[indices_t]\n tiled_ids_t = np.tile(np.c_[ids_t], ids_t.size)\n ownership[indices_t, :indices_t.size] = kappa_specification(tiled_ids_t, tiled_ids_t.T)\n\n return ownership\n\n\ndef build_blp_instruments(formulation: Formulation, product_data: Mapping) -> Array:\n r\"\"\"Construct \"sums of characteristics\" excluded BLP instruments.\n\n Traditional \"sums of characteristics\" BLP instruments are\n\n .. math:: Z^\\text{BLP}(X) = [Z^\\text{BLP,Other}(X), Z^\\text{BLP,Rival}(X)],\n\n in which :math:`X` is a matrix of product characteristics, :math:`Z^\\text{BLP,Other}(X)` is a second matrix that\n consists of sums over characteristics of non-rival goods, and :math:`Z^\\text{BLP,Rival}(X)` is a third matrix that\n consists of sums over rival goods. All three matrices have the same dimensions.\n\n .. note::\n\n To construct simpler, firm-agnostic instruments that are sums over characteristics of other goods, specify a\n constant column of firm IDs and keep only the first half of the instrument columns.\n\n Let :math:`x_{jt}` be the vector of characteristics in :math:`X` for product :math:`j` in market :math:`t`, which is\n produced by firm :math:`f`. That is, :math:`j \\in J_{ft}`. Then,\n\n .. math::\n\n Z_{jt}^\\text{BLP,Other}(X) = \\sum_{k \\in J_{ft} \\setminus \\{j\\}} x_{kt}, \\\\\n Z_{jt}^\\text{BLP,Rival}(X) = \\sum_{k \\notin J_{ft}} x_{kt}.\n\n .. note::\n\n Usually, any supply or demand shifters are added to these excluded instruments, depending on whether they are\n meant to be used for demand- or supply-side estimation.\n\n Parameters\n ----------\n formulation : `Formulation`\n :class:`Formulation` configuration for :math:`X`, the matrix of product characteristics used to build excluded\n instruments. Variable names should correspond to fields in ``product_data``.\n product_data : `structured array-like`\n Each row corresponds to a product. Markets can have differing numbers of products. The following fields are\n required:\n\n - **market_ids** : (`object`) - IDs that associate products with markets.\n\n - **firm_ids** : (`object`) - IDs that associate products with firms.\n\n Along with ``market_ids`` and ``firm_ids``, the names of any additional fields can be used as variables in\n ``formulation``.\n\n Returns\n -------\n `ndarray`\n Traditional \"sums of characteristics\" BLP instruments, :math:`Z^\\text{BLP}(X)`.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/build_blp_instruments.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n\n # load IDs\n market_ids = extract_matrix(product_data, 'market_ids')\n firm_ids = extract_matrix(product_data, 'firm_ids')\n if market_ids is None or firm_ids is None:\n raise KeyError(\"product_data must have market_ids and firm_ids fields.\")\n if market_ids.shape[1] > 1:\n raise ValueError(\"The market_ids field of product_data must be one-dimensional.\")\n if firm_ids.shape[1] > 1:\n raise ValueError(\"The firm_ids field of product_data must be one-dimensional.\")\n\n # initialize grouping objects\n market_groups = Groups(market_ids)\n paired_groups = Groups(interact_ids(market_ids, firm_ids))\n\n # build the instruments\n X = build_matrix(formulation, product_data)\n other = paired_groups.expand(paired_groups.sum(X)) - X\n rival = market_groups.expand(market_groups.sum(X)) - X - other\n return np.ascontiguousarray(np.c_[other, rival])\n\n\ndef build_differentiation_instruments(\n formulation: Formulation, product_data: Mapping, version: str = 'local', interact: bool = False) -> Array:\n r\"\"\"Construct excluded differentiation instruments.\n\n Differentiation instruments in the spirit of :ref:`references:Gandhi and Houde (2017)` are\n\n .. math:: Z^\\text{Diff}(X) = [Z^\\text{Diff,Other}(X), Z^\\text{Diff,Rival}(X)],\n\n in which :math:`X` is a matrix of product characteristics, :math:`Z^\\text{Diff,Other}(X)` is a second matrix that\n consists of sums over functions of differences between non-rival goods, and :math:`Z^\\text{Diff,Rival}(X)` is a\n third matrix that consists of sums over rival goods. Without optional interaction terms, all three matrices have the\n same dimensions.\n\n .. note::\n\n To construct simpler, firm-agnostic instruments that are sums over functions of differences between all different\n goods, specify a constant column of firm IDs and keep only the first half of the instrument columns.\n\n Let :math:`x_{jt\\ell}` be characteristic :math:`\\ell` in :math:`X` for product :math:`j` in market :math:`t`, which\n is produced by firm :math:`f`. That is, :math:`j \\in J_{ft}`. Then in the \"local\" version of\n :math:`Z^\\text{Diff}(X)`,\n\n .. math::\n :label: local_instruments\n\n Z_{jt\\ell}^\\text{Local,Other}(X) =\n \\sum_{k \\in J_{ft} \\setminus \\{j\\}} 1(|d_{jkt\\ell}| < \\text{SD}_\\ell), \\\\\n Z_{jt\\ell}^\\text{Local,Rival}(X) =\n \\sum_{k \\notin J_{ft}} 1(|d_{jkt\\ell}| < \\text{SD}_\\ell),\n\n where :math:`d_{jkt\\ell} = x_{kt\\ell} - x_{jt\\ell}` is the difference between products :math:`j` and :math:`k` in\n terms of characteristic :math:`\\ell`, :math:`\\text{SD}_\\ell` is the standard deviation of these pairwise differences\n computed across all markets, and :math:`1(|d_{jkt\\ell}| < \\text{SD}_\\ell)` indicates that products :math:`j` and\n :math:`k` are close to each other in terms of characteristic :math:`\\ell`.\n\n The intuition behind this \"local\" version is that demand for products is often most influenced by a small number of\n other goods that are very similar. For the \"quadratic\" version of :math:`Z^\\text{Diff}(X)`, which uses a more\n continuous measure of the distance between goods,\n\n .. math::\n :label: quadratic_instruments\n\n Z_{jtk}^\\text{Quad,Other}(X) = \\sum_{k \\in J_{ft} \\setminus\\{j\\}} d_{jkt\\ell}^2, \\\\\n Z_{jtk}^\\text{Quad,Rival}(X) = \\sum_{k \\notin J_{ft}} d_{jkt\\ell}^2.\n\n With interaction terms, which reflect covariances between different characteristics, the summands for the \"local\"\n versions are :math:`1(|d_{jkt\\ell}| < \\text{SD}_\\ell) \\times d_{jkt\\ell'}` for all characteristics :math:`\\ell'`,\n and the summands for the \"quadratic\" versions are :math:`d_{jkt\\ell} \\times d_{jkt\\ell'}` for all\n :math:`\\ell' \\geq \\ell`.\n\n .. note::\n\n Usually, any supply or demand shifters are added to these excluded instruments, depending on whether they are\n meant to be used for demand- or supply-side estimation.\n\n Parameters\n ----------\n formulation : `Formulation`\n :class:`Formulation` configuration for :math:`X`, the matrix of product characteristics used to build excluded\n instruments. Variable names should correspond to fields in ``product_data``.\n product_data : `structured array-like`\n Each row corresponds to a product. Markets can have differing numbers of products. The following fields are\n required:\n\n - **market_ids** : (`object`) - IDs that associate products with markets.\n\n - **firm_ids** : (`object`) - IDs that associate products with firms.\n\n Along with ``market_ids`` and ``firm_ids``, the names of any additional fields can be used as variables in\n ``formulation``.\n\n version : `str, optional`\n The version of differentiation instruments to construct:\n\n - ``'local'`` (default) - Construct the instruments in :eq:`local_instruments` that consider only the\n characteristics of \"close\" products in each market.\n\n - ``'quadratic'`` - Construct the more continuous instruments in :eq:`quadratic_instruments` that consider\n all products in each market.\n\n interact : `bool, optional`\n Whether to include interaction terms between different product characteristics, which can help capture\n covariances between product characteristics.\n\n Returns\n -------\n `ndarray`\n Excluded differentiation instruments, :math:`Z^\\text{Diff}(X)`.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/build_differentiation_instruments.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n\n # load IDs\n market_ids = extract_matrix(product_data, 'market_ids')\n firm_ids = extract_matrix(product_data, 'firm_ids')\n if market_ids is None or firm_ids is None:\n raise KeyError(\"product_data must have market_ids and firm_ids fields.\")\n if market_ids.shape[1] > 1:\n raise ValueError(\"The market_ids field of product_data must be one-dimensional.\")\n if firm_ids.shape[1] > 1:\n raise ValueError(\"The firm_ids field of product_data must be one-dimensional.\")\n\n # identify markets\n market_indices = get_indices(market_ids)\n\n # build the matrix and count its dimensions\n X = build_matrix(formulation, product_data)\n N, K = X.shape\n\n # for the local version, do a first pass to compute standard deviations of pairwise differences across all markets\n sd_mapping: Dict[int, Array] = {}\n if version == 'local':\n for k in range(K):\n distances_count = distances_sum = squared_distances_sum = 0\n for t, indices_t in market_indices.items():\n x = X[indices_t][:, [k]]\n distances = x - x.T\n np.fill_diagonal(distances, 0)\n distances_count += distances.size - x.size\n distances_sum += np.sum(distances)\n squared_distances_sum += np.sum(distances**2)\n sd_mapping[k] = np.sqrt(squared_distances_sum / distances_count - (distances_sum / distances_count)**2)\n\n # build instruments market-by-market to conserve memory\n other_blocks: List[List[Array]] = []\n rival_blocks: List[List[Array]] = []\n for t, indices_t in market_indices.items():\n # build distance matrices for all characteristics\n distances_mapping: Dict[int, Array] = {}\n for k in range(K):\n x = X[indices_t][:, [k]]\n distances_mapping[k] = x - x.T\n np.fill_diagonal(distances_mapping[k], 0 if version == 'quadratic' else np.inf)\n\n def generate_instrument_terms() -> Iterator[Array]:\n \"\"\"Generate terms that will be summed to create instruments.\"\"\"\n for k1 in range(K):\n if version == 'quadratic':\n for k2 in range(k1, K if interact else k1 + 1):\n yield distances_mapping[k1] * distances_mapping[k2]\n elif version == 'local':\n with np.errstate(invalid='ignore'):\n close = (np.abs(distances_mapping[k1]) < sd_mapping[k1]).astype(np.float64)\n if not interact:\n yield close\n else:\n for k2 in range(K):\n yield close * distances_mapping[k2]\n else:\n raise ValueError(\"version must be 'local' or 'quadratic'.\")\n\n # append instrument blocks\n other_blocks.append([])\n rival_blocks.append([])\n ownership = (firm_ids[indices_t] == firm_ids[indices_t].T).astype(np.float64)\n nonownership = 1 - ownership\n for term in generate_instrument_terms():\n other_blocks[-1].append((ownership * term).sum(axis=1, keepdims=True))\n rival_blocks[-1].append((nonownership * term).sum(axis=1, keepdims=True))\n\n return np.c_[np.block(other_blocks), np.block(rival_blocks)]\n\n\ndef build_integration(integration: Integration, dimensions: int) -> RecArray:\n r\"\"\"Build nodes and weights for integration over agent choice probabilities.\n\n This function can be used to build custom ``agent_data`` for :class:`Problem` initialization. Specifically, this\n function affords more flexibility than passing an :class:`Integration` configuration directly to :class:`Problem`.\n For example, if agents have unobserved tastes over only a subset of demand-side nonlinear product characteristics\n (i.e., if ``sigma`` in :meth:`Problem.solve` has columns of zeros), this function can be used to build agent data\n with fewer columns of integration nodes than the number of unobserved product characteristics, :math:`K_2`. This\n function can also be used to construct nodes that can be transformed into demographic variables.\n\n To build nodes and weights for multiple markets, this function can be called multiple times, once for each market.\n\n Parameters\n ----------\n integration : `Integration`\n :class:`Integration` configuration for how to build nodes and weights for integration.\n dimensions : `int`\n Number of dimensions over which to integrate, or equivalently, the number of columns of integration nodes.\n When an :class:`Integration` configuration is passed directly to :class:`Problem`, this is the number of\n demand-side nonlinear product characteristics, :math:`K_2`.\n\n Returns\n -------\n `recarray`\n Nodes and weights for integration over agent utilities. Fields:\n\n - **weights** : (`numeric`) - Integration weights, :math:`w`.\n\n - **nodes** : (`numeric`) - Unobserved agent characteristics called integration nodes, :math:`\\nu`.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/build_integration.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n if not isinstance(integration, Integration):\n raise TypeError(\"integration must be an Integration instance.\")\n if not isinstance(dimensions, int) or dimensions < 1:\n raise ValueError(\"dimensions must be a positive integer.\")\n nodes, weights = integration._build(dimensions)\n return structure_matrices({\n 'weights': (weights, options.dtype),\n 'nodes': (nodes, options.dtype)\n })\n\n\ndef build_matrix(formulation: Formulation, data: Mapping) -> Array:\n r\"\"\"Construct a matrix according to a formulation.\n\n Parameters\n ----------\n formulation : `Formulation`\n :class:`Formulation` configuration for the matrix. Variable names should correspond to fields in ``data``. The\n ``absorb`` argument of :class:`Formulation` can be used to absorb fixed effects after the matrix has been\n constructed.\n data : `structured array-like`\n Fields can be used as variables in ``formulation``.\n\n Returns\n -------\n `ndarray`\n The built matrix.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/build_matrix.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n if not isinstance(formulation, Formulation):\n raise TypeError(\"formulation must be a Formulation instance.\")\n matrix = formulation._build_matrix(data)[0]\n if formulation._absorbed_terms:\n absorb = formulation._build_absorb(formulation._build_ids(data))\n matrix, errors = absorb(matrix)\n if errors:\n raise exceptions.MultipleErrors(errors)\n\n return matrix\n\n\ndef data_to_dict(data: RecArray, ignore_empty: bool = True) -> Dict[str, Array]:\n r\"\"\"Convert a NumPy record array into a dictionary.\n\n Most data in PyBLP are structured as NumPy record arrays (e.g., :attr:`Problem.products` and\n :attr:`SimulationResults.product_data`) which can be cumbersome to work with when working with data types that can't\n represent matrices, such as the :class:`pandas.DataFrame`.\n\n This function converts record arrays created by PyBLP into dictionaries that map field names to one-dimensional\n arrays. Matrices in the original record array (e.g., ``demand_instruments``) are split into as many fields as there\n are columns (e.g., ``demand_instruments0``, ``demand_instruments1``, and so on).\n\n Parameters\n ----------\n data : `recarray`\n Record array created by PyBLP.\n ignore_empty : `bool, optional`\n Whether to ignore matrices with zero size. By default, these are ignored.\n\n Returns\n -------\n `dict`\n The data re-structured as a dictionary.\n\n Examples\n --------\n .. raw:: latex\n\n \\begin{examplenotebook}\n\n .. toctree::\n\n /_notebooks/api/data_to_dict.ipynb\n\n .. raw:: latex\n\n \\end{examplenotebook}\n\n \"\"\"\n if not isinstance(data, np.recarray):\n raise TypeError(\"data must be a NumPy record array.\")\n\n mapping: Dict[str, Array] = {}\n for key in data.dtype.names:\n if len(data[key].shape) > 2:\n raise ValueError(\"Arrays with more than two dimensions are not supported.\")\n if ignore_empty and data[key].size == 0:\n continue\n if len(data[key].shape) == 1 or data[key].shape[1] == 1 or data[key].size == 0:\n mapping[key] = data[key].flatten()\n continue\n for index in range(data[key].shape[1]):\n new_key = f'{key}{index}'\n if new_key in data.dtype.names:\n raise KeyError(f\"'{key}' cannot be split into columns because '{new_key}' is already a field.\")\n mapping[new_key] = data[key][:, index].flatten()\n\n return mapping\n" ]
[ [ "numpy.full", "numpy.fill_diagonal", "numpy.vectorize", "numpy.errstate", "numpy.ascontiguousarray", "numpy.sum", "numpy.block", "numpy.tile", "numpy.eye", "numpy.where", "numpy.arange", "numpy.sqrt", "numpy.abs" ] ]
waterljwant/UGV-KPNet
[ "99f9e3922cdbb8c1116cb3f807be3874133a1979" ]
[ "evaluate/tool_ugv_eval.py" ]
[ "__author__ = 'tsungyi'\n\nimport numpy as np\nimport datetime\nimport time\nfrom collections import defaultdict\n# from . import mask as maskUtils\nimport copy\n\n\nclass UGV_Eval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='keypoints'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType keypoints')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n # def computeIoU(self, imgId, catId):\n # p = self.params\n # if p.useCats:\n # gt = self._gts[imgId,catId]\n # dt = self._dts[imgId,catId]\n # else:\n # gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n # dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n # if len(gt) == 0 and len(dt) ==0:\n # return []\n # inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n # dt = [dt[i] for i in inds]\n # if len(dt) > p.maxDets[-1]:\n # dt=dt[0:p.maxDets[-1]]\n #\n # if p.iouType == 'segm':\n # g = [g['segmentation'] for g in gt]\n # d = [d['segmentation'] for d in dt]\n # elif p.iouType == 'bbox':\n # g = [g['bbox'] for g in gt]\n # d = [d['bbox'] for d in dt]\n # else:\n # raise Exception('unknown iouType for iou computation')\n #\n # # compute iou between each dt and gt region\n # iscrowd = [int(o['iscrowd']) for o in gt]\n # ious = maskUtils.iou(d,g,iscrowd)\n # return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n # sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n sigmas = np.array([1., 1., 1., 1.]) / 10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()\n\nclass Params:\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.count_nonzero", "numpy.logical_not", "numpy.zeros", "numpy.round", "numpy.ones", "numpy.exp", "numpy.mean", "numpy.where", "numpy.argsort", "numpy.cumsum", "numpy.repeat", "numpy.searchsorted", "numpy.unique", "numpy.spacing" ] ]
devmofl/M5_Accuracy_3rd
[ "b02bd4333520d71ff01b66c0446bb07324cfb0d1" ]
[ "pts/modules/distribution/constant.py" ]
[ "import torch\nfrom torch.distributions.distribution import Distribution\n\nclass ConstantDistribution(Distribution):\n r\"\"\"\n Creates a constant distribution, i.e. Var(x) = 0\n \n Args: \n loss_type: L1 or L2\n mu (Tensor): mean\n \"\"\"\n def __init__(self, loss_type, mu, validate_args=None):\n self.loss_type = loss_type\n self.mu = mu\n\n batch_shape = mu.size()\n super(ConstantDistribution, self).__init__(batch_shape, validate_args=validate_args)\n\n @property\n def mean(self):\n return self.mu\n\n @property\n def variance(self):\n return torch.zeros_like(self.mu)\n\n def sample(self, sample_shape=torch.Size()):\n return torch.ones_like(self.mu) * self.mu\n\n def log_prob(self, y_true):\n mu = self.mu\n\n if self.loss_type == \"L1\":\n loss = torch.abs(y_true - mu)\n elif self.loss_type == \"L2\":\n loss = (y_true - mu)**2\n else:\n raise NotImplementedError\n\n return -loss # loss == negative log_prob" ]
[ [ "torch.zeros_like", "torch.ones_like", "torch.abs", "torch.Size" ] ]
MagiMas/ArpesBandmass
[ "98785fabcb7977e110532a984d40971b915f09b3" ]
[ "Basefile.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d, interp2d\nfrom scipy.ndimage import convolve1d\n\nclass Spectra1D(object):\n \"\"\"\n Holds a 1d spectrum.\n \"\"\"\n\n def __init__(self, xdata, ydata, name=\"\", directory=\"\", plotname=\"\"):\n self.PLTNM = plotname\n self.PATH = directory\n self.NAME = name\n self.XDATA = xdata\n self.YDATA = ydata\n self.IDATA = interp1d(xdata, ydata, bounds_error=False)\n self.xlabel = \"\"\n self.ylabel = \"\"\n\n def plot1D(self):\n \"\"\"\n Simple plotting procedure for fast checking\n :return:\n \"\"\"\n fig = plt.figure(figsize=(4, 3), dpi=200)\n ax = fig.add_subplot(111)\n ax.plot(self.XDATA, self.IDATA(self.XDATA))\n ax.set_xlabel(self.xlabel)\n ax.set_ylabel(self.ylabel)\n plt.tight_layout()\n\n def reinterpolate_data(self):\n \"\"\"reinterpolate data after adjustment\"\"\"\n self.IDATA = interp1d(self.XDATA, self.YDATA)\n\n def adjust_range(self, dim, value):\n \"\"\"\n Adjusts the x or y range by the given value.\n :param dim: put \"x\" or \"y\" as string to define dimension\n :param value: float to add to the range\n \"\"\"\n if dim == \"x\":\n self.XDATA += value\n elif dim == \"y\":\n self.YDATA += value\n else:\n print(\"wrong dimension\")\n self.reinterpolate_data()\n\n\nclass SpectraBase(object):\n \"\"\"\n Holds all the data of an ARPES spectrum, intensities and xLimits/yLimits. Add features for data analysis here\n \"\"\"\n def __init__(self, data, xlimits, ylimits, filepath):\n self.PATH = os.path.dirname(filepath)\n self.NAME = os.path.basename(filepath)\n #self.DATA = data\n self.xvals = np.linspace(xlimits[0],xlimits[1],data.shape[1])\n self.yvals = np.linspace(ylimits[0],ylimits[1],data.shape[0])\n self.IDATA = interp2d(self.xvals, self.yvals, data, fill_value=0.) # linear interpolation of data\n self.xLimits = np.array(xlimits)\n self.yLimits = np.array(ylimits)\n\n def lineprofileX(self, yval, breadth=0.):\n \"\"\"\n returns the lineprofile along the x direction for a given y value with a broadening breadth\n :param yval: float\n :param breadth: float\n :return: xvalues, profile both as 1d arrays\n \"\"\"\n profile = np.sum(self.IDATA(self.xvals, [yval - 0.5*breadth + breadth*float(i)/20. for i in range(21)]),axis=0)\n return self.xvals, profile\n\n def lineprofileY(self, xval, breadth=0.):\n \"\"\"\n returns the lineprofile along the y direction for a given x value with a broadening breadth\n :param xval: float\n :param breadth: float\n :return: yvalues, profile both as 1d arrays\n \"\"\"\n profile = np.sum(self.IDATA([xval - 0.5 * breadth + breadth * float(i)/20.\n for i in range(21)], self.yvals), axis=1)\n return self.yvals, profile\n\n def lineprofileXY(self, dim, val, breadth=0.):\n \"\"\"\n combines lineprofileX and lineprofileY, choose dimension via dim\n :param dim: \"x\" or \"y\" string\n :param val: value where to cut the profile\n :param breadth: width of the profile\n :return: values and profile\n \"\"\"\n if dim == \"x\":\n vals, profile = self.lineprofileX(val, breadth)\n elif dim == \"y\":\n vals, profile = self.lineprofileY(val, breadth)\n else:\n print(\"lineprofile dim does not exist, \", dim)\n vals, profile = np.array([]),np.array([])\n return vals, profile\n\n def lineprofileFree(self, strtpnt, endpnt, N):\n \"\"\"\n Returns the line profile along an arbitrary line with N steps\n :param strtpnt: 2-point array [x,y] for starting position\n :param endpnt: 2-point array [x,y] for end position\n :param N: integer on how many steps\n :return: lineprofile as a 1d array\n \"\"\"\n dv = (np.array(endpnt) - np.array(strtpnt))/float(N)\n profile = [self.IDATA(strtpnt[0] + dv[0]*i, strtpnt[1]+dv[1]*i)[0] for i in range(N)]\n\n return np.linspace(0,np.linalg.norm(np.array(endpnt) - np.array(strtpnt)),N), profile\n\n def adjust_Fermilevel(self, xList, yList):\n \"\"\"\n Method to correct the Fermi Level of the Spectrum. Give the position of the Fermi level in x and y coordinates,\n the level will be adjusted to flatten the Fermilevel\n :param xList:\n :param yList:\n :return:\n \"\"\"\n level = interp1d(xList,yList,fill_value=yList[0],bounds_error=False)\n level0 = yList[0]\n temp_data = np.zeros((len(self.yvals), len(self.xvals)))\n count = 0\n for xi in self.xvals:\n temp_data[:, count] = self.IDATA(xi, self.yvals + level(xi) - level0)[:,0]\n count += 1\n #self.DATA = temp_data\n self.reinterpolate_data(temp_data)\n\n def adjust_range(self, dim, value):\n \"\"\"\n Adjusts the x or y range by the given value.\n :param dim: put \"x\" or \"y\" as string to define dimension\n :param value: float to add to the range\n \"\"\"\n if dim == \"x\":\n self.xLimits += value\n elif dim == \"y\":\n self.yLimits += value\n else:\n print(\"wrong dimension\")\n data = self.IDATA(self.xvals, self.yvals)\n self.regenerate_vals_from_limits()\n self.reinterpolate_data(data)\n\n def regenerate_vals_from_limits(self):\n \"\"\"\n generate new xvals and yvals after changing limits\n \"\"\"\n self.xvals = np.linspace(self.xLimits[0],self.xLimits[1],len(self.xvals))\n self.yvals = np.linspace(self.yLimits[0],self.yLimits[1],len(self.yvals))\n\n def reinterpolate_data(self,data):\n \"\"\"\n reinterpolate data after changing xvals or yvals\n \"\"\"\n self.IDATA = interp2d(self.xvals, self.yvals, data, fill_value=0.)\n\n def cutData(self, dim, min, max):\n \"\"\"\n Cut the data between min and max along dimension dim\n :param dim: \"x\" or \"y\" string\n :param min: float\n :param max: float\n :return:\n \"\"\"\n if dim == \"x\":\n self.xLimits = np.array([min, max], dtype=float)\n self.regenerate_vals_from_limits()\n data = self.IDATA(self.xvals, self.yvals)\n self.reinterpolate_data(data)\n elif dim == \"y\":\n self.yLimits = np.array([min, max], dtype=float)\n self.regenerate_vals_from_limits()\n data = self.IDATA(self.xvals, self.yvals)\n self.reinterpolate_data(data)\n\n def smoothData(self, dim, num, passes):\n \"\"\"\n Uses convolution to smooth self.DATA along dim using a boxcar smooting. Num gives the size of the boxcar,\n passes the amount of smoothing passes.\n :param dim: \"x\" or \"y\" string\n :param num: int for the size of the boxcar\n :param passes: int for the amount of passes\n :return: ndarray smoothed version of self.DATA\n \"\"\"\n if dim == \"x\":\n tempdat = self.IDATA(self.xvals, self.yvals)\n for p in range(passes):\n tempdat = convolve1d(tempdat, np.array([1.]*num),axis=1,mode='nearest')\n elif dim == \"y\":\n tempdat = self.IDATA(self.xvals, self.yvals)\n for p in range(passes):\n tempdat = convolve1d(tempdat, np.array([1.] * num), axis=0, mode='nearest')\n else:\n print(\"smoothing not possible: (dim, num, passes) \", dim,\",\", num,\",\", passes)\n tempdat = self.IDATA(self.xvals, self.yvals)\n return tempdat\n\n def secondDerivative(self, dim, num, passes):\n \"\"\"\n Second derivative of self.DATA, num gives the size of the Boxcar for smoothing, passes the amount of smoothing\n passes before applying the 2nd derivative. This method uses convolutions to get the 2nd derivative of the data.\n :param dim: \"x\" or \"y\" string for the dimension along which the derivative should be applied.\n :param num: int for size of boxcar\n :param passes: int for number of smoothing passes\n :return: ndarray 2nd derivative of self.DATA\n \"\"\"\n smth = self.smoothData(dim, num, passes)\n if dim == \"x\":\n d2 = convolve1d(smth, np.array([1.,-2.,1.]),axis=1, mode='nearest')\n elif dim == \"y\":\n d2 = convolve1d(smth, np.array([1.,-2.,1.]), axis=0, mode='nearest')\n else:\n print(\"2nd derivative not possible: (dim, num, passes) \", dim,\",\", num,\",\", passes)\n d2 = self.IDATA(self.xvals, self.yvals)\n return d2\n\n\nclass Spectra(SpectraBase):\n \"\"\"\n Builds on SpectraBase to add conversion to k Space and simple plotting procedures. In most cases this class should\n be used to hold the ARPES data.\n \"\"\"\n\n kSpace = False\n xlabel = 'Angle [deg]'\n xlabelK = 'Wavevector [$\\mathrm{\\AA^{-1}}]$'\n ylabel = 'Energy [eV]'\n\n def plot_data(self, mirrorY = True, mirrorX = False, save=False, plot=True):\n \"\"\"\n simple fast plotting to check the data without much features\n :return:\n \"\"\"\n fig, ax = plt.subplots(1,1, figsize=(4,3), dpi=200)\n ax.imshow(self.IDATA(self.xvals,self.yvals),\n extent=[self.xLimits[0],self.xLimits[1],self.yLimits[1],self.yLimits[0]],\n aspect='auto')\n if self.kSpace:\n ax.set_xlabel(self.xlabelK)\n else:\n ax.set_xlabel(self.xlabel)\n ax.set_ylabel(self.ylabel)\n plt.gca().invert_yaxis()\n if mirrorY:\n ax.invert_yaxis()\n if mirrorX:\n ax.invert_xaxis()\n plt.tight_layout()\n if save:\n plt.savefig(os.path.join(self.PATH, self.NAME[:-4]) + \".png\")\n if plot:\n plt.show()\n return ax\n\n def plot_data_on_ax(self, ax, mirrorY = False, mirrorX = False, save=False, plot=True):\n \"\"\"\n simple fast plotting to check the data without much features\n :return:\n \"\"\"\n ims = ax.get_images()\n for im in ims:\n if im:\n im.remove()\n ax.imshow(self.IDATA(self.xvals,self.yvals),\n extent=[self.xLimits[0],self.xLimits[1],self.yLimits[1],self.yLimits[0]],\n aspect='auto')\n ax.relim()\n\n #if self.kSpace:\n # ax.set_xlabel(self.xlabelK)\n #else:\n # ax.set_xlabel(self.xlabel)\n #ax.set_ylabel(self.ylabel)\n #plt.gca().invert_yaxis()\n if mirrorY:\n ax.invert_yaxis()\n if mirrorX:\n ax.invert_xaxis()\n plt.tight_layout()\n if save:\n plt.savefig(os.path.join(self.PATH, self.NAME[:-4]) + \".png\")\n if plot:\n plt.show()\n return ax\n\n def convertKtoAngle(self,k,E):\n \"\"\"\n convert K to Angle\n :param k: float Wavevector\n :param E: float Energy\n :return:\n \"\"\"\n hbar = 1.05e-34\n mass = 9.11e-31\n return np.sign(k)*np.arcsin(hbar*(np.sign(k)*k*1e10) / np.sqrt(2.*mass*E*1.6e-19))*180./np.pi\n\n def convertAngleToK(self,a,E):\n \"\"\"\n convert Angle to K space\n :param a: float angle\n :param E: float energy\n :return:\n \"\"\"\n hbar = 1.05e-34\n mass = 9.11e-31\n return np.sqrt(2.*mass*E*1.6e-19)*np.sin(a*np.pi/180.)*1e-10 / hbar\n\n def convertToKSpace(self):\n \"\"\"\n convert the Spectrum from Angle into K space\n :return:\n \"\"\"\n if not self.kSpace:\n self.kSpace = True\n self.NAME = self.NAME[:-4] + '_k' + self.NAME[-4:]\n temp = np.zeros((len(self.yvals),len(self.xvals)))\n amax, amin = np.amax(self.xvals), np.amin(self.xvals)\n Emax, Emin = np.amax(self.yvals), np.amin(self.yvals)\n if amax<0 and amin<0:\n kvals = np.linspace(self.convertAngleToK(amin,Emax),self.convertAngleToK(amax,Emin), len(self.xvals))\n elif amax>0 and amin>0:\n kvals = np.linspace(self.convertAngleToK(amin,Emin),self.convertAngleToK(amax,Emax), len(self.xvals))\n else:\n kvals = np.linspace(self.convertAngleToK(amin,Emax),self.convertAngleToK(amax,Emax), len(self.xvals))\n kmax, kmin = np.amax(kvals), np.amin(kvals)\n count = 0\n for E in self.yvals:\n temp[count,:] = self.IDATA(self.convertKtoAngle(kvals,E),E)\n count += 1\n #self.DATA = temp\n self.xLimits = np.array([kmin,kmax])\n self.regenerate_vals_from_limits()\n self.reinterpolate_data(temp)\n else:\n print(\"Spectra object already in k space\")\n\n\ndef load_a_spectrum(filelocation, transpose=True, mirrorY=True):\n # File should be formatted using the following format:\n # xmin, xmax = a, b\n # ymin, ymax = c, d\n # dimX, dimY = nx, ny\n # #DAT\n # lines of intensities, one line for each intensity\n\n f_ = open(filelocation, 'r')\n lines = f_.readlines()\n f_.close()\n\n xmin, xmax = 0., 0.\n ymin, ymax = 0., 0.\n dimX, dimY = 1, 1\n dataPart = False\n data = []\n for line in lines:\n if not dataPart:\n try:\n descriptions, values = line.split('=')\n descriptions = descriptions.split(',')\n values = values.split(',')\n if descriptions[0] == 'xmin':\n xmin = float(values[0])\n xmax = float(values[1])\n elif descriptions[0] == 'ymin':\n ymin = float(values[0])\n ymax = float(values[1])\n elif descriptions[0] == 'dimX':\n dimX = int(values[0])\n dimY = int(values[1])\n except:\n print(\"Couldn't parse line\")\n if line[:4] == \"#DAT\":\n print(\"This is the #DAT line\")\n else:\n print(\"NOT #DAT\")\n else:\n if line[:3] == 'NAN':\n line = \"0.\"\n data.append(float(line))\n if line[:4] == '#DAT':\n dataPart = True\n data = np.array(data)\n data = data.reshape((dimX, dimY))\n if transpose:\n data = data.T\n if mirrorY:\n data = data[::-1,:]\n spec = Spectra(data, [xmin, xmax], [ymin, ymax], filelocation)\n return spec\n\n\nif __name__ =='__main__':\n Spec = load_a_spectrum(r'C:\\Users\\Niels Ehlen\\PycharmProjects\\DataFitting\\temp\\temp.txt')\n Spec.plot_data()\n plt.show()" ]
[ [ "numpy.array", "scipy.interpolate.interp1d", "numpy.sin", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "numpy.sign", "numpy.amin", "numpy.amax", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "scipy.interpolate.interp2d", "matplotlib.pyplot.show", "numpy.linspace" ] ]
guanshaoheng/DataGenerationPlastic
[ "11cd3f0398f18f1b6c1ab4f63dd6e90aea829642" ]
[ "Gaussian.py" ]
[ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\nclass GuanssianRandomPath:\n \"\"\"\n Used for random loading path generation via Gaussian Process method\n \"\"\"\n def __init__(self, curlDegree, amplitudeValue, showFlag=False, maxEpsilonLimitation=0.8,\n numberPerSamples=3, numberOfPoints=1000, meanValue=-1e5):\n self.seed = 10001\n np.random.seed(self.seed)\n\n self.showFlag = showFlag\n self.maxEpsilonLimitation = maxEpsilonLimitation\n self.numberOfPoints = numberOfPoints\n self.numberOfFuncutions = numberPerSamples\n\n self.curlDegree = curlDegree # 1~5\n self.amplitudeValue = amplitudeValue # generally 0.25\n self.amplitude = np.linspace(0, self.amplitudeValue, int(self.numberOfPoints))\n self.meanValue = -meanValue\n if meanValue!=0:\n self.x = np.abs(self.meanValue)*self.curlDegree*np.linspace(0, 1., self.numberOfPoints)[:, np.newaxis]\n else:\n self.x = self.curlDegree*np.linspace(0, 1., self.numberOfPoints)[:, np.newaxis]\n self.cov = self.CovarienceMatrix(self.x, self.x)*self.amplitude\n\n self.y = np.random.multivariate_normal(mean=np.ones(self.numberOfPoints)*self.meanValue,\n cov=self.cov,\n size=self.numberOfFuncutions)\n\n self.confiningPreussure = []\n if self.showFlag:\n self.plotPaths()\n self.plotCovarianceMatrix(kernel=self.cov, curl=self.curlDegree)\n\n # if self.generatingNum > 0:\n # self.generation()\n\n def generation(self, generatingNum, path):\n print()\n print('='*80)\n print('\\t Loading path generation ...')\n i = 0\n numSample = 0\n while numSample < generatingNum:\n print('\\t\\tPath random % d seed %d' % (numSample, i))\n self.seed = i\n np.random.seed(self.seed)\n self.y = np.random.multivariate_normal(\n mean=np.ones(self.numberOfPoints)*self.meanValue,\n cov=self.cov,\n size=self.numberOfFuncutions)\n maxEpsilon = np.max(np.abs(self.y))\n if maxEpsilon > self.maxEpsilonLimitation and self.numberOfFuncutions == 3:\n i += 1\n continue\n else:\n if self.numberOfFuncutions == 3:\n self.plotPaths(path=path)\n self.writeDownPaths(numSample, self.curlDegree)\n else:\n self.plotPaths(path=path)\n self.confiningPreussure.append(self.y.T)\n i += 1\n numSample += 1\n if self.numberOfFuncutions == 1:\n self.writeDownPaths(numSample, curlDegree=self.curlDegree)\n\n def CovarienceMatrix(self, x, y):\n \"\"\"\n Use the kernel fucntion: $\\kappa(x_i, x_j)=\\mathrm{exp}(-\\sum_{k=1}^{m}\\theta_k(x_i^k-x_j^k)^2))$\n where the dimensional number is 1 in this project.\n\n Reference:\n [1] https://blog.dominodatalab.com/fitting-gaussian-process-models-python\n :param x:\n :param y:\n :return:\n \"\"\"\n mesh = np.meshgrid(x, y)\n kernel = np.exp(-(mesh[0]-mesh[1])**2)\n return kernel\n\n def plotCovarianceMatrix(self, kernel, curl):\n numberOfticksInFigure = 11\n interval = int(len(kernel)/numberOfticksInFigure)\n # interval = np.linspace(0.001, 1., 1000)\n # x_tick = interval\n # y_tick = interval\n # data = kernel\n # pd_data = pd.DataFrame(data, index=y_tick, columns=x_tick)\n # ax = sns.heatmap(pd_data, xticklabels=50, cmap=\"YlGnBu\")\n ax = sns.heatmap(kernel, xticklabels=[], yticklabels=[], cmap=\"YlGnBu\")\n plt.title('Degree of curl = %.2f' % curl)\n\n plt.tight_layout()\n if self.numberOfFuncutions == 3:\n plt.savefig('./figSav/CovariabceHeatMap_curl%d_new.png' % curl, dpi=200)\n else:\n plt.savefig('./ConfiningPressure/CovariabceHeatMap_curl%d_new.png' % curl, dpi=200)\n plt.close()\n\n def plotPaths(self, path='figSav'):\n # Plot the sampled functions\n fig, ax = plt.subplots(1, 1, figsize=(6, 4), dpi=100)\n totalPointsOnFigure = 50\n interval = int(len(self.y[0])/50)\n labelList = ['xx', 'yy', 'xy']\n for i in range(self.numberOfFuncutions):\n plt.plot(list(range(1, len(self.y[0]) + 1))[::interval], list(self.y[i])[::interval],\n linestyle='-', marker='o', markersize=4, label='$\\epsilon_{%s}$' % labelList[i])\n plt.xlabel('Loading step', fontsize=15)\n plt.ylabel('$\\epsilon$', fontsize=15)\n plt.xticks(fontsize=15)\n plt.xlim([0, len(self.y[0])])\n plt.tight_layout()\n plt.legend()\n figName = 'ConfiningPressureGP_curl%d_seed%d.png' % (self.curlDegree, self.seed)\n plt.savefig(os.path.join(path, figName), dpi=200)\n plt.close()\n\n def writeDownPaths(self, numSample, curlDegree):\n if self.numberOfFuncutions == 3:\n filePath = './vonmisesPaths/path_curlDegree%d_%d.dat' % (numSample, curlDegree)\n np.savetxt(fname=filePath, X=self.y.T, fmt='%10.5f', delimiter=',', header='epsilon_xx, epsilon_yy, epsilon_xy')\n elif self.numberOfFuncutions == 1:\n filePath = './ConfiningPressure/ConfiningPressurePath_curlDegree%d.dat' % curlDegree\n np.savetxt(fname=filePath, X=self.y.T, fmt='%10.5f', delimiter=',', header=' '.join(['%d' % i for i in range(numSample)]))\n\n\nif __name__ == \"__main__\":\n # confining pressure generation\n # gaussian = GuanssianRandomPath(curlDegree=2, amplitudeValue=0.15, showFlag=True, numberPerSamples=3, meanValue=-1e5,\n # numberOfPoints=100) # generally 1~5, 0.25\n # gaussian.generation(generatingNum=10, path='ConfiningPressure')\n\n # loading path for von-mises model\n for curlDegree in range(1, 6):\n gaussian = GuanssianRandomPath(curlDegree=curlDegree, amplitudeValue=0.15, showFlag=True, numberPerSamples=3, meanValue=0.0,\n numberOfPoints=1000) # generally 1~5, 0.25\n gaussian.generation(generatingNum=200, path='vonmisesPaths')\n\n\n" ]
[ [ "numpy.savetxt", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.exp", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "numpy.ones", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.linspace", "numpy.meshgrid", "matplotlib.pyplot.xticks" ] ]
shah-newaz/vaxrank
[ "65832878f28ce44ccaaf47be3e0c6d38a1743988" ]
[ "vaxrank/report.py" ]
[ "# Copyright (c) 2016-2018. Mount Sinai School of Medicine\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division\nfrom collections import namedtuple, OrderedDict\nfrom importlib import import_module\nimport logging\nimport os\nimport sys\nimport tempfile\n\nfrom astropy.io import ascii as asc\nimport jinja2\nimport pandas as pd\nimport pdfkit\nimport requests\nimport roman\nfrom varcode import load_vcf_fast\n\nfrom .manufacturability import ManufacturabilityScores\n\nlogger = logging.getLogger(__name__)\n\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=False,\n trim_blocks=True,\n lstrip_blocks=True,\n)\n\n\nPatientInfo = namedtuple(\"PatientInfo\", (\n \"patient_id\",\n \"vcf_paths\",\n \"bam_path\",\n \"mhc_alleles\",\n \"num_somatic_variants\",\n \"num_coding_effect_variants\",\n \"num_variants_with_rna_support\",\n \"num_variants_with_vaccine_peptides\",\n))\n\n\nclass TemplateDataCreator(object):\n def __init__(\n self,\n ranked_variants_with_vaccine_peptides,\n patient_info,\n final_review,\n reviewers,\n args_for_report,\n input_json_file,\n cosmic_vcf_filename=None):\n \"\"\"\n Construct a TemplateDataCreator object, from the output of the vaxrank pipeline.\n \"\"\"\n self.ranked_variants_with_vaccine_peptides = ranked_variants_with_vaccine_peptides\n self.patient_info = patient_info\n\n # filter output-related command-line args: we want to display everything else\n args_to_display_in_report = {\n k: v for k, v in args_for_report.items() if not k.startswith(\"output\")\n }\n\n self.template_data = {\n 'args': sorted(args_to_display_in_report.items()),\n 'reviewers': reviewers.split(',') if reviewers else [],\n 'final_review': final_review,\n 'input_json_file': input_json_file,\n # these report sections are optional\n 'include_manufacturability': args_for_report['manufacturability'],\n 'include_wt_epitopes': args_for_report['wt_epitopes'],\n }\n\n # map from peptide objects to their COSMIC IDs if they exist\n if cosmic_vcf_filename:\n logger.info('Loading COSMIC data...')\n self.cosmic_variant_collection = load_vcf_fast(\n cosmic_vcf_filename, allow_extended_nucleotides=True, include_info=False)\n logger.info('COSMIC data loaded.')\n else:\n self.cosmic_variant_collection = None\n\n def _patient_info(self):\n \"\"\"\n Returns an OrderedDict with patient info.\n \"\"\"\n patient_info = OrderedDict([\n ('Patient ID', self.patient_info.patient_id),\n ('VCF (somatic variants) path(s)', '; '.join(self.patient_info.vcf_paths)),\n ('BAM (RNAseq reads) path', self.patient_info.bam_path),\n ('MHC alleles', ' '.join(self.patient_info.mhc_alleles)),\n ('Total number of somatic variants', self.patient_info.num_somatic_variants),\n ('Somatic variants with predicted coding effects',\n self.patient_info.num_coding_effect_variants),\n ('Somatic variants with predicted coding effects and RNA support',\n self.patient_info.num_variants_with_rna_support),\n ('Somatic variants with predicted coding effects, RNA support and predicted MHC '\n 'ligands',\n self.patient_info.num_variants_with_vaccine_peptides),\n ])\n return patient_info\n\n def _variant_data(self, variant, top_vaccine_peptide):\n \"\"\"\n Returns an OrderedDict with info used to populate variant info section.\n \"\"\"\n variant_data = OrderedDict()\n mutant_protein_fragment = top_vaccine_peptide.mutant_protein_fragment\n top_score = _sanitize(top_vaccine_peptide.combined_score)\n igv_locus = \"chr%s:%d\" % (variant.contig, variant.start)\n variant_data = OrderedDict([\n ('IGV locus', igv_locus),\n ('Gene name', mutant_protein_fragment.gene_name),\n ('Top score', top_score),\n ('RNA reads supporting variant allele', mutant_protein_fragment.n_alt_reads),\n ('RNA reads supporting reference allele', mutant_protein_fragment.n_ref_reads),\n ('RNA reads supporting other alleles', mutant_protein_fragment.n_other_reads),\n ])\n return variant_data\n\n def _effect_data(self, predicted_effect):\n \"\"\"\n Returns an OrderedDict with info about the given predicted effect.\n \"\"\"\n effect_data = OrderedDict([\n ('Effect type', predicted_effect.__class__.__name__),\n ('Transcript name', predicted_effect.transcript_name),\n ('Transcript ID', predicted_effect.transcript_id),\n ('Effect description', predicted_effect.short_description),\n ])\n return effect_data\n\n def _peptide_header_display_data(self, vaccine_peptide, rank):\n \"\"\"\n Returns a dictionary with info used to populate the header section of a peptide table.\n\n Parameters\n ----------\n vaccine_peptide : VaccinePeptide\n The given peptide to convert to display form\n\n rank : int\n Rank of vaccine peptide in list\n \"\"\"\n mutant_protein_fragment = vaccine_peptide.mutant_protein_fragment\n amino_acids = mutant_protein_fragment.amino_acids\n mutation_start = mutant_protein_fragment.mutant_amino_acid_start_offset\n mutation_end = mutant_protein_fragment.mutant_amino_acid_end_offset\n aa_before_mutation = amino_acids[:mutation_start]\n aa_mutant = amino_acids[mutation_start:mutation_end]\n aa_after_mutation = amino_acids[mutation_end:]\n\n header_display_data = {\n 'num': roman.toRoman(rank + 1).lower(),\n 'aa_before_mutation': aa_before_mutation,\n 'aa_mutant': aa_mutant,\n 'aa_after_mutation': aa_after_mutation,\n }\n return header_display_data\n\n def _peptide_data(self, vaccine_peptide, transcript_name):\n \"\"\"\n Returns a dictionary with info used to populate peptide table contents.\n\n Parameters\n ----------\n vaccine_peptide : VaccinePeptide\n The given peptide to convert to display form\n\n transcript_name : str\n RNA transcript name (should match that displayed in effect section)\n \"\"\"\n mutant_protein_fragment = vaccine_peptide.mutant_protein_fragment\n amino_acids = mutant_protein_fragment.amino_acids\n peptide_data = OrderedDict([\n ('Transcript name', transcript_name),\n ('Length', len(amino_acids)),\n ('Expression score', _sanitize(vaccine_peptide.expression_score)),\n ('Mutant epitope score', _sanitize(vaccine_peptide.mutant_epitope_score)),\n ('Combined score', _sanitize(vaccine_peptide.combined_score)),\n ('Max coding sequence coverage',\n mutant_protein_fragment.n_alt_reads_supporting_protein_sequence),\n ('Mutant amino acids', mutant_protein_fragment.n_mutant_amino_acids),\n ('Mutation distance from edge',\n mutant_protein_fragment.mutation_distance_from_edge),\n ])\n return peptide_data\n\n def _manufacturability_data(self, vaccine_peptide):\n \"\"\"\n Returns an OrderedDict with manufacturability data for the given peptide.\n \"\"\"\n scores = vaccine_peptide.manufacturability_scores\n manufacturability_data = OrderedDict([\n ('C-terminal 7mer GRAVY score', _sanitize(scores.cterm_7mer_gravy_score)),\n ('Max 7mer GRAVY score', _sanitize(scores.max_7mer_gravy_score)),\n ('N-terminal Glutamine, Glutamic Acid, or Cysteine',\n int(scores.difficult_n_terminal_residue)),\n ('C-terminal Cysteine', int(scores.c_terminal_cysteine)),\n ('C-terminal Proline', int(scores.c_terminal_proline)),\n ('Total number of Cysteine residues', scores.cysteine_count),\n ('N-terminal Asparagine', int(scores.n_terminal_asparagine)),\n ('Number of Asparagine-Proline bonds', scores.asparagine_proline_bond_count),\n ])\n return manufacturability_data\n\n def _epitope_data(self, epitope_prediction):\n \"\"\"\n Returns an OrderedDict with epitope data from the given prediction.\n \"\"\"\n # if the WT peptide is too short, it's possible that we're missing a prediction for it\n if epitope_prediction.wt_ic50 is not None:\n wt_ic50_str = '%.2f nM' % epitope_prediction.wt_ic50\n else:\n wt_ic50_str = 'No prediction'\n epitope_data = OrderedDict([\n ('Sequence', epitope_prediction.peptide_sequence),\n ('IC50', '%.2f nM' % epitope_prediction.ic50),\n ('EL Percentile', epitope_prediction.percentile_rank),\n ('EL Score', _sanitize(epitope_prediction.logistic_epitope_score())),\n ('Allele', epitope_prediction.allele.replace('HLA-', '')),\n ('WT sequence', epitope_prediction.wt_peptide_sequence),\n ('WT IC50', wt_ic50_str),\n ])\n return epitope_data\n\n def _query_cosmic(self, variant):\n if not self.cosmic_variant_collection:\n return None\n if variant in self.cosmic_variant_collection.metadata:\n # IDs in the DB are of the form 'COSM725245'\n cosmic_id = self.cosmic_variant_collection.metadata[variant]['id']\n link_for_report = \\\n \"http://cancer.sanger.ac.uk/cosmic/gene/analysis?ln=%s\" % cosmic_id[4:]\n logger.info(\"Link for report: %s\", link_for_report)\n return link_for_report\n\n logger.info(\"Variant not in COSMIC\")\n return None\n\n def _query_wustl(self, predicted_effect, gene_name):\n \"\"\"\n Returns a link to the WUSTL page for this variant, if present.\n \"\"\"\n amino_acids = predicted_effect.short_description\n api_url = \"http://www.docm.info/api/v1/variants.json?amino_acids=%s&genes=%s\" % (\n amino_acids, gene_name.upper())\n logger.info(\"WUSTL link: %s\", api_url)\n\n try:\n contents = requests.get(api_url).json()\n if len(contents) > 0:\n hgvs = contents[0]['hgvs']\n link_for_report = \"http://docm.genome.wustl.edu/variants/%s\" % hgvs\n logger.info(\"Link for report: %s\", link_for_report)\n return link_for_report\n except requests.exceptions.ConnectionError as e:\n logger.warn('ConnectionError reaching WUSTL: %s', e)\n return None\n\n logger.info('Variant not found in WUSTL')\n return None\n\n def _databases(self, variant, predicted_effect, gene_name):\n databases = {}\n wustl_link = self._query_wustl(predicted_effect, gene_name)\n if wustl_link:\n databases['WUSTL'] = wustl_link\n\n cosmic_link = self._query_cosmic(variant)\n if cosmic_link:\n databases['COSMIC'] = cosmic_link\n\n return databases\n\n def compute_template_data(self):\n patient_info = self._patient_info()\n\n # list ranked variants with their peptides\n variants = []\n num = 0\n for (variant, vaccine_peptides) in self.ranked_variants_with_vaccine_peptides:\n variant_short_description = variant.short_description\n if len(vaccine_peptides) == 0:\n logger.info(\n \"Skipping gene(s) %s, variant %s: no vaccine peptides\",\n variant.gene_names, variant_short_description)\n continue\n num += 1\n\n top_peptide = vaccine_peptides[0]\n variant_data = self._variant_data(variant, top_peptide)\n predicted_effect = top_peptide.mutant_protein_fragment.predicted_effect()\n effect_data = self._effect_data(predicted_effect)\n\n databases = self._databases(\n variant, predicted_effect, top_peptide.mutant_protein_fragment.gene_name)\n\n peptides = []\n for j, vaccine_peptide in enumerate(vaccine_peptides):\n if not vaccine_peptide.contains_mutant_epitopes():\n logger.info('No epitopes for peptide: %s', vaccine_peptide)\n continue\n\n header_display_data = self._peptide_header_display_data(vaccine_peptide, j)\n peptide_data = self._peptide_data(vaccine_peptide, predicted_effect.transcript_name)\n manufacturability_data = self._manufacturability_data(vaccine_peptide)\n\n epitopes = []\n wt_epitopes = []\n for mutant_epitope_prediction in vaccine_peptide.mutant_epitope_predictions:\n epitopes.append(self._epitope_data(mutant_epitope_prediction))\n\n for wt_epitope_prediction in vaccine_peptide.wildtype_epitope_predictions:\n epitope_data = self._epitope_data(wt_epitope_prediction)\n key_list = ['Allele', 'IC50', 'Sequence']\n wt_epitopes.append({key: epitope_data[key] for key in key_list})\n\n # hack: make a nicely-formatted fixed width table for epitopes, used in ASCII report\n with tempfile.TemporaryFile(mode='r+') as temp:\n asc.write(epitopes, temp, format='fixed_width_two_line', delimiter_pad=' ')\n temp.seek(0)\n ascii_epitopes = temp.read()\n\n ascii_wt_epitopes = None\n if len(wt_epitopes) > 0:\n with tempfile.TemporaryFile(mode='r+') as temp:\n asc.write(\n wt_epitopes, temp, format='fixed_width_two_line', delimiter_pad=' ')\n temp.seek(0)\n ascii_wt_epitopes = temp.read()\n peptide_dict = {\n 'header_display_data': header_display_data,\n 'peptide_data': peptide_data,\n 'manufacturability_data': manufacturability_data,\n 'epitopes': epitopes,\n 'ascii_epitopes': ascii_epitopes,\n 'wt_epitopes': wt_epitopes,\n 'ascii_wt_epitopes': ascii_wt_epitopes,\n }\n peptides.append(peptide_dict)\n\n # if there are no peptides for this variant, exclude from report\n if len(peptides) == 0:\n logger.info('No peptides for variant: %s', variant)\n continue\n\n variant_dict = {\n 'num': num,\n 'short_description': variant_short_description,\n 'variant_data': variant_data,\n 'effect_data': effect_data,\n 'peptides': peptides,\n 'databases': databases,\n }\n variants.append(variant_dict)\n\n # add package metadata to the report\n package_versions = {}\n for name in ['vaxrank', 'isovar', 'mhctools', 'varcode', 'pyensembl']:\n module = import_module(name)\n version = getattr(module, '__version__')\n package_versions[name] = version\n\n self.template_data.update({\n 'patient_info': patient_info,\n 'variants': variants,\n 'package_versions': package_versions,\n })\n return self.template_data\n\n\ndef _make_report(\n template_data,\n file_handle,\n template_path):\n template = JINJA_ENVIRONMENT.get_template(template_path)\n report = template.render(template_data)\n file_handle.write(report)\n\ndef make_ascii_report(\n template_data,\n ascii_report_path):\n with open(ascii_report_path, \"w\") as f:\n _make_report(template_data, f, 'templates/template.txt')\n logger.info('Wrote ASCII report to %s', ascii_report_path)\n\ndef make_html_report(\n template_data,\n html_report_path):\n with open(html_report_path, \"w\") as f:\n _make_report(template_data, f, 'templates/template.html')\n logger.info('Wrote HTML report to %s', html_report_path)\n\ndef make_pdf_report(\n template_data,\n pdf_report_path):\n with tempfile.NamedTemporaryFile(mode='w', suffix='.html') as f:\n _make_report(template_data, f, 'templates/template.html')\n f.flush()\n\n options = {\n 'zoom': 0.55,\n 'margin-top': '20mm'\n }\n\n if sys.platform in ('linux', 'linux2'):\n # pdfkit uses wkhtmltopdf, which doesn't work on headless servers;\n # recommended workaround is to use xvfb, as documented here:\n # https://github.com/wkhtmltopdf/wkhtmltopdf/issues/2037#issuecomment-62019521\n from xvfbwrapper import Xvfb\n logger.info('Running pdfkit inside xvfb wrapper')\n with Xvfb():\n pdfkit.from_file(f.name, pdf_report_path, options=options)\n\n else:\n pdfkit.from_file(f.name, pdf_report_path, options=options)\n logger.info('Wrote PDF report to %s', pdf_report_path)\n\ndef new_columns():\n columns = OrderedDict([\n (\"amino_acids\", []),\n (\"chr\", []),\n (\"pos\", []),\n (\"ref\", []),\n (\"alt\", []),\n (\"variant_rank\", []),\n (\"peptide_rank\", []),\n (\"mutation_start\", []),\n (\"mutation_end\", []),\n (\"combined_score\", []),\n (\"expression_score\", []),\n (\"mutant_epitope_score\", []),\n ])\n for field in ManufacturabilityScores._fields:\n columns[field] = []\n return columns\n\ndef _str_sig_figs(input, n_sig_figs):\n return '{:g}'.format(float('{:.{p}g}'.format(input, p=n_sig_figs)))\n\ndef _sanitize(val):\n \"\"\"\n Converts values into display-friendly\n \"\"\"\n if type(val) == bool:\n return int(val)\n else:\n return _str_sig_figs(val, 5)\n\ndef resize_columns(worksheet, amino_acids_col, pos_col):\n \"\"\"\n Resizes amino acid and mutant position columns in the Excel sheet so that they don't\n have to be expanded.\n \"\"\"\n worksheet.set_column('%s:%s' % (amino_acids_col, amino_acids_col), 40)\n worksheet.set_column('%s:%s' % (pos_col, pos_col), 12)\n\ndef make_minimal_neoepitope_report(\n ranked_variants_with_vaccine_peptides,\n num_epitopes_per_peptide=None,\n excel_report_path=None):\n \"\"\"\n Creates a simple Excel spreadsheet containing one neoepitope per row\n\n Parameters\n ----------\n ranked_variants_with_vaccine_peptides :\n Ranked list of (variant, list of its vaccine peptides)\n\n num_epitopes_per_peptide : int\n The number of epitopes to include for each vaccine peptide; these are sorted before cutoff.\n If None, all epitopes will be included in the output\n\n excel_report_path : str\n Path to which to write the output Excel file\n \"\"\"\n rows = []\n # each row in the spreadsheet is a neoepitope\n for (variant, vaccine_peptides) in ranked_variants_with_vaccine_peptides:\n for vaccine_peptide in vaccine_peptides:\n # only include mutant epitopes\n for epitope_prediction in vaccine_peptide.mutant_epitope_predictions:\n row = OrderedDict([\n ('Allele', epitope_prediction.allele),\n ('Mutant peptide sequence', epitope_prediction.peptide_sequence),\n ('Score', vaccine_peptide.mutant_epitope_score),\n ('Predicted mutant pMHC affinity', '%.2f nM' % epitope_prediction.ic50),\n ('Variant allele RNA read count',\n vaccine_peptide.mutant_protein_fragment.n_alt_reads),\n ('Wildtype sequence', epitope_prediction.wt_peptide_sequence),\n ('Predicted wildtype pMHC affinity',\n '%.2f nM' % epitope_prediction.wt_ic50),\n ('Gene name', vaccine_peptide.mutant_protein_fragment.gene_name),\n ('Genomic variant', variant.short_description),\n ])\n rows.append(row)\n\n if len(rows) > 0:\n df = pd.DataFrame.from_dict(rows)\n writer = pd.ExcelWriter(excel_report_path, engine='xlsxwriter')\n df.to_excel(writer, sheet_name='Neoepitopes', index=False)\n\n # resize columns to be not crappy\n worksheet = writer.sheets['Neoepitopes']\n worksheet.set_column('%s:%s' % ('B', 'B'), 23)\n worksheet.set_column('%s:%s' % ('D', 'D'), 27)\n worksheet.set_column('%s:%s' % ('E', 'E'), 26)\n worksheet.set_column('%s:%s' % ('F', 'F'), 17)\n worksheet.set_column('%s:%s' % ('G', 'G'), 30)\n worksheet.set_column('%s:%s' % ('H', 'H'), 9)\n worksheet.set_column('%s:%s' % ('I', 'I'), 18)\n writer.save()\n logger.info('Wrote XLSX neoepitope report file to %s', excel_report_path)\n\n\ndef make_csv_report(\n ranked_variants_with_vaccine_peptides,\n excel_report_path=None,\n csv_report_path=None):\n \"\"\"\n Writes out CSV/XLSX reports as needed.\n \"\"\"\n # make a bunch of pd frames, save them in an OrderedDict with keys being descriptive Excel\n # sheet names (will be used later for making the Excel report if needed)\n frames = OrderedDict()\n for i, (variant, vaccine_peptides) in enumerate(ranked_variants_with_vaccine_peptides):\n any_vaccine_peptides = False\n if not vaccine_peptides:\n continue\n\n sheet_name = '%d_%s_chr%s_%d_%s_%s' % (\n i + 1, vaccine_peptides[0].mutant_protein_fragment.gene_name,\n variant.contig, variant.start, variant.ref, variant.alt)\n columns = new_columns()\n for j, vaccine_peptide in enumerate(vaccine_peptides):\n\n # if there are no predicted epitopes, exclude this peptide from the report\n if not vaccine_peptide.contains_mutant_epitopes():\n logger.info('No epitopes for peptide: %s', vaccine_peptide)\n continue\n\n columns[\"chr\"].append(variant.contig)\n columns[\"pos\"].append(variant.original_start)\n columns[\"ref\"].append(variant.original_ref)\n columns[\"alt\"].append(variant.original_alt)\n columns[\"variant_rank\"].append(i + 1)\n columns[\"peptide_rank\"].append(j + 1)\n columns[\"amino_acids\"].append(vaccine_peptide.mutant_protein_fragment.amino_acids)\n columns[\"mutation_start\"].append(\n vaccine_peptide.mutant_protein_fragment.mutant_amino_acid_start_offset)\n columns[\"mutation_end\"].append(\n vaccine_peptide.mutant_protein_fragment.mutant_amino_acid_end_offset)\n columns[\"combined_score\"].append(_sanitize(vaccine_peptide.combined_score))\n columns[\"expression_score\"].append(_sanitize(vaccine_peptide.expression_score))\n columns[\"mutant_epitope_score\"].append(_sanitize(vaccine_peptide.mutant_epitope_score))\n for field in ManufacturabilityScores._fields:\n columns[field].append(\n _sanitize(getattr(vaccine_peptide.manufacturability_scores, field)))\n any_vaccine_peptides = True\n\n if not any_vaccine_peptides:\n continue\n\n df = pd.DataFrame(columns, columns=columns.keys())\n frames[sheet_name] = df\n\n if not frames:\n logger.info('No data for CSV or XLSX report')\n return\n\n all_dfs = pd.concat(frames.values())\n if csv_report_path:\n all_dfs.to_csv(csv_report_path, index=False)\n logger.info('Wrote CSV report file to %s', csv_report_path)\n\n if excel_report_path:\n writer = pd.ExcelWriter(excel_report_path, engine='xlsxwriter')\n\n # copy the variant rank column to position 0, make first sheet called \"All\"\n all_dfs[''] = all_dfs['variant_rank']\n colnames = all_dfs.columns.tolist()\n colnames.insert(0, colnames.pop(colnames.index('')))\n all_dfs = all_dfs.reindex(columns=colnames)\n all_dfs.to_excel(writer, sheet_name='All', index=False)\n resize_columns(writer.sheets['All'], 'B', 'D')\n\n # add one sheet per variant\n for sheet_name, df in frames.items():\n # trim sheet names to 31 characters due to limit in Excel\n # should still be unique since they start with the variant\n # index\n shortened_sheet_name = sheet_name[:31]\n df.to_excel(writer, sheet_name=shortened_sheet_name, index=False)\n resize_columns(writer.sheets[shortened_sheet_name], 'A', 'C')\n\n writer.save()\n logger.info('Wrote manufacturer XLSX file to %s', excel_report_path)\n" ]
[ [ "pandas.DataFrame.from_dict", "pandas.ExcelWriter" ] ]
nuno-chicoria/GHC_2018
[ "d3a19c4f6293dd24ca06d24fdde58da04800781b" ]
[ "source_code/ghc2018.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 1 18:44:04 2018\n\n@author: JavaWizards\n\"\"\"\n\nimport numpy as np\n\nfile = \"/Users/nuno_chicoria/Downloads/b_should_be_easy.in\"\n\nhandle = open(file)\n\nR, C, F, N, B, T = handle.readline().split()\n\nrides = []\n\nindex = []\nfor i in range(int(N)):\n index.append(i)\n\nfor line in handle:\n rides.append(line.split())\n \nrides_np = np.asarray(rides)\nrides_np = np.column_stack([rides_np, index])\nrides_np = rides_np.astype(np.int)\nrides_np = rides_np[rides_np[:,5].argsort()]\n\nvehicles = {}\nfor i in range(int(F)):\n vehicles [i] = [\"A\", [0, 0], [0, 0], [0, 0], []]\n \nfor i in range(int(T)):\n rides_np = rides_np[rides_np[:,5] > i]\n for item in range(len(vehicles)):\n if vehicles[item][0] == \"A\":\n if rides_np.size != 0:\n if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i >= rides_np[0, 4]:\n if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i + abs(rides_np[0,0] - rides_np[0,2]) + abs(rides_np[0,1] - rides_np[0,3]) <= rides_np[0, 5]:\n vehicles[item][0] = \"C\"\n vehicles[item][2] = [rides_np[0, 0], rides_np[0, 1]]\n vehicles[item][3] = [rides_np[0, 2], rides_np[0, 3]]\n vehicles[item][4].append(rides_np[0, 6])\n rides_np = np.delete(rides_np, (0), axis=0)\n else:\n rides_np = np.delete(rides_np, (0), axis=0)\n for item in range(len(vehicles)):\n if vehicles[item][0] == \"C\":\n if vehicles[item][1][0] < vehicles[item][2][0]:\n vehicles[item][1][0] = vehicles[item][1][0] + 1\n elif vehicles[item][1][0] > vehicles[item][2][0]:\n vehicles[item][1][0] = vehicles[item][1][0] - 1\n elif vehicles[item][1][0] == vehicles[item][2][0]:\n if vehicles[item][1][1] < vehicles[item][2][1]:\n vehicles[item][1][1] = vehicles[item][1][1] + 1\n elif vehicles[item][1][1] > vehicles[item][2][1]:\n vehicles[item][1][1] = vehicles[item][1][1] - 1\n else:\n vehicles[item][0] = \"D\"\n for item in range(len(vehicles)):\n if vehicles[item][0] == \"D\":\n if vehicles[item][1][0] < vehicles[item][3][0]:\n vehicles[item][1][0] += 1\n elif vehicles[item][1][0] > vehicles[item][3][0]:\n vehicles[item][1][0] -= 1\n elif vehicles[item][1][0] == vehicles[item][3][0]:\n if vehicles[item][1][1] < vehicles[item][3][1]:\n vehicles[item][1][1] += 1\n elif vehicles[item][1][1] > vehicles[item][3][1]:\n vehicles[item][1][1] -= 1\n else:\n vehicles[item][0] = \"A\"\n vehicles[item][2] = None\n vehicles[item][3] = None\n \nresults = open(\"ghc2018.txt\", \"w+\")\nfor item in range(len(vehicles)):\n if len(vehicles[item][4]) !=0:\n results.write(str(len(vehicles[item][4])))\n for ride in vehicles[item][4]:\n results.write(\" \")\n results.write(str(ride))\n results.write(\"\\n\")\nresults.close()\n" ]
[ [ "numpy.column_stack", "numpy.asarray", "numpy.delete" ] ]
ArthurWangNet/Trading
[ "c7f39da2b7186f190b934725461b695521ab369b" ]
[ "4_update_fundamental_data.py" ]
[ "# This script will run after get fundamental data is done. It will update the latests data to production files.\nimport os\nimport Paths\nimport datetime\nimport pandas as pd\nimport multiprocessing as mp\nfrom multiprocessing import Pool\nimport time\nfrom joblib import Parallel, delayed\nimport tqdm\n\n# list all sub folders in the given folder\ndef list_subfolders(folder):\n\treturn [f.path for f in os.scandir(folder) if f.is_dir()]\n\ndef update_csv(csv_file):\n\t\"\"\"\n\tThis function will update the csv file to the latests data.\n\tTaking each csv file from the update csv file list, it will try to match existing file with the same name in production folder.\n\tIf there is a match, it will update the existing file with the latests data, by appending the latests data to the existing file.\n\tIf there is no match, it will create a new file with the latests data.\n\n\t: param csv_file: csv file to be updated\n\t: return: None\n\t\"\"\"\n\n\t#Check if the csv file is empty\n\tif os.stat(os.path.join(folder, csv_file)).st_size == 0:\n\t\treturn\n\n\tif csv_file in production_csv_files:\n\t\t\t\t#print(csv_file + \" already in production folder, updating.\")\n\t\t\t# append the update csv file to the production csv file\n\t\t\tdf_update = pd.read_csv(os.path.join(folder, csv_file))\n\t\t\t# Add new column to the dataframe with value, this is the update date stamp\n\t\t\tdf_update['Update_Date'] = update_time_stamp \n\n\t\t\t#For the production files:\n\t\t\tdf_production = pd.read_csv(os.path.join(Paths.Fundamental_Data_Production_Folder, csv_file))\n\t\t\t# df.append will be deprecated in future version of pandas.\n\t\t\t#df_production = df_production.append(df_update) \n\t\t\t#df_production = df_production.concat(df_production, df_update)\n\t\t\tdf_production = pd.concat([df_production, df_update], sort=False)\n\t\t\tdf_production.to_csv(os.path.join(Paths.Fundamental_Data_Production_Folder, csv_file), index=False)\n\t\t\t#\tprint(csv_file + \" updated.\")\n\telse:\n\t\t# create new csv file in the production folder with the update date stamp\n\t\t\t#print(csv_file + \" not in production folder, creating new file.\")\n\t\tdf_update = pd.read_csv(os.path.join(folder, csv_file))\n\t\t# Add new column to the dataframe with value, this is the update date stamp\n\t\tdf_update['Update_Date'] =update_time_stamp\n\t\tdf_update.to_csv(os.path.join(Paths.Fundamental_Data_Production_Folder, csv_file), index=False)\n\t\t\t#print(csv_file + \" created.\")\n\ndef multi_update():\n\t\"\"\"\n\tThis function will using multiprocessing to update the csv files in the update csv file list by calling the update_csv function.\n\t: return: None\n\t\"\"\"\n\t#Rewrite to joblib version to speed up the process\n\tParallel(n_jobs=os.cpu_count())(delayed(update_csv)(csv_file) for csv_file in tqdm.tqdm(update_csv_files))\n\n\t#Using multiprocessing to speed up the process\n\t#From Python 3.8, the multiprocessing module start using spawn method to create new process.\n\t#Which casue the child process can't access the parent process's global variable.\n\t#So we will mannuly set the start method to 'fork' to avoid this problem.\n\t# mp.set_start_method('fork')\n\t# pool = mp.Pool(os.cpu_count())\n\t# pool.map(update_csv, update_csv_files)\n\t# pool.close()\n\t# pool.join()\n\n\n# def update_fundamental_data(update_csv_files):\n# \tprogressbar = tqdm(update_csv_files)\n# \tfor file in progressbar:\n# \t\tupdate_csv(file)\n\nif __name__ == '__main__':\n\tstart_time = time.time()\n\tupdate_folders = list_subfolders(Paths.Fundamental_Data_Update_Folder)\n\tupdate_folders.sort()\n\n\t# Check each folder in the update folder, which should be sorted by date, and process them one by one\n\tfor folder in update_folders:\n\t\t# Get update folder names from path, it will be a update date stamp.\n\t\tupdate_time_stamp= folder.split('/')[-1]\n\t\tprint(\"Checking update folder: \" + update_time_stamp)\n\t\t# Get files from folder which ends with csv and store them into a list\n\t\tupdate_csv_files = [f for f in os.listdir(folder) if f.endswith('.csv')]\n\t\tproduction_csv_files = [f for f in os.listdir(Paths.Fundamental_Data_Production_Folder) if f.endswith('.csv')]\n\n\t\tprint(\"Start Multiprocessing for folder: \" + update_time_stamp)\n\t\t# Call the multi_update function to update the csv files in the update csv file list\n\n\t\tmulti_update()\n\n\t\t#Traditional for loop version\n\t\t#update_fundamental_data(update_csv_files)\n\t\t\n\t\tprint(\"Finished Multiprocessing for folder: \" + update_time_stamp)\n\t\t#Move the update folder and all its files to the archive folder\n\t\tos.rename(folder, os.path.join(Paths.Fundamental_Data_Archived_Folder, update_time_stamp))\n\t\tprint(\"Update folder moved to archive folder.\")\n\n\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n\n" ]
[ [ "pandas.concat" ] ]
zaverichintan/Convolutional-Sequence-to-Sequence-Model-for-Human-Dynamics
[ "f12206e3a63ca9bc73763db11b4256d481d3829b" ]
[ "src/VAE/ops.py" ]
[ "import tensorflow as tf\n\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n" ]
[ [ "tensorflow.maximum" ] ]
sinharitesh/tutorials-masterclasses
[ "810bbbbb29ada64dd4e618d6ae9037ad0c2c4aca" ]
[ "pytorch-tutorial/Sess-1-Linear-Regression.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[80]:\n\n\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# In[81]:\n\n\nX = torch.randn(100, 1)*10\ny = X + 3*torch.randn(100, 1)\nplt.plot(X.numpy(), y.numpy(), 'o')\nplt.ylabel('y')\nplt.xlabel('x')\n\n\n# In[82]:\n\n\nclass LR(nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n self.linear = nn.Linear(input_size, output_size)\n def forward(self, x):\n pred = self.linear(x)\n return pred\n\n\n# In[87]:\n\n\ntorch.manual_seed(100)\nmodel = LR(1,1)\nfor param in model.parameters():\n print(param)\n\n\n# In[41]:\n\n\n2 * -0.7767 + 0.6317\n\n\n# In[43]:\n\n\nmodel(torch.tensor(2.0).unsqueeze(-1)) \n\n\n# In[45]:\n\n\ntorch.tensor(2.0).shape\n\n\n# In[88]:\n\n\nplt.scatter(X,y)\nplt.scatter(X, model(X).detach().numpy())\n_ = plt.title(\"Random Model - Just initialized\")\n\n\n# In[48]:\n\n\n# Train the model.\n\n\n# In[89]:\n\n\ndef mse(y_hat, y): return ((y_hat-y)**2).mean()\n\n\n# In[150]:\n\n\ny_hat = model(X)\nloss = mse(y_hat, y)\nloss.backward()\nlr = .01\nprint('before updating parameters')\nfor param in model.parameters():\n print(param)\n\n\n# In[151]:\n\n\nwith torch.no_grad():\n for param in model.parameters():\n param.sub_(lr * param.grad)\n param.grad.zero_()\n \nprint('after')\nfor param in model.parameters():\n print(param)\n\n\n# In[92]:\n\n\nplt.scatter(X,y)\nplt.scatter(X, model(X).detach().numpy())\n_ = plt.title(\"Model after updating parameters\")\n\n\n# ## Putting this all together\n\n# In[153]:\n\n\nclass Linear_Model(nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n self.linear = nn.Linear(input_size, output_size)\n def forward(self, x):\n pred = self.linear(x)\n return pred\n\n\n# In[137]:\n\n\ndef do_training(model, num_epochs = 100):\n losses = []\n for i in range(num_epochs):\n y_hat = model(X)\n loss = mse(y_hat, y)\n losses.append(loss)\n if i % 20 == 0: print(i, loss)\n with torch.no_grad():\n loss.backward()\n for param in model.parameters():\n param.sub_(lr * param.grad)\n param.grad.zero_()\n return(model, losses)\n\n\n# In[144]:\n\n\ntorch.manual_seed(100)\nlm = Linear_Model(1,1)\nlr = .01\nfor param in model.parameters():\n print(param)\n\n\n# In[145]:\n\n\nepochs = 50\nlm, losses = do_training(lm, epochs)\n\n\n# In[146]:\n\n\nprint(lr)\n\n\n# In[147]:\n\n\nplt.scatter(X,y)\nplt.scatter(X, lm(X).detach().numpy())\n_ = plt.title(f\"Model after 200 epochs with learning_rate={lr}\")\n\n\n# In[148]:\n\n\nplt.plot(range(epochs), losses)\nplt.ylabel('Loss')\n_ = plt.xlabel('epoch')\n\n\n# ## Introducing Optimizer and Loss Functions\n\n# In[160]:\n\n\ntorch.manual_seed(100)\ncriterion = nn.MSELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr = 0.01)\nlm = Linear_Model(1,1)\n\n\n# In[161]:\n\n\nepochs = 50\nlosses = []\nfor i in range(epochs):\n y_pred = model.forward(X)\n loss = criterion(y_pred, y)\n if i % 20 == 0: print(i, loss)\n #print(\"epoch:\", i, \"loss:\", loss.item())\n losses.append(loss)\n optimizer.zero_grad() # Note that torch.no_grad \n #and param.zero_grad is taken away\n loss.backward()\n optimizer.step()\n\n\n# ### Gradient\n\n# In[169]:\n\n\nx = torch.tensor(2., requires_grad = True)\ny = x**3\ny.backward()\nprint(x.grad)\n# derivateive wrt x will be 3x**2 \n# at x =2, derivative = 12\n\n\n# In[172]:\n\n\nx = torch.tensor(2., requires_grad = True)\ny = torch.tensor(4., requires_grad = True)\nz = x**3 + 2*(y**2)\nz.backward()\nprint(x.grad)\nprint(y.grad)\n# derivateive wrt x will be 3x**2 \n# at x =2, derivative = 12\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.MSELoss", "matplotlib.pyplot.xlabel", "torch.no_grad", "matplotlib.pyplot.title", "torch.manual_seed", "torch.tensor", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "torch.randn" ] ]
ACIL-Group/DVHA
[ "76f120e5b219f084e48383a59139438cc029bc4a" ]
[ "src/processing/gather_raw.py" ]
[ "\"\"\"\n Copyright 2019 Islam Elnabarawy\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport csv\nimport itertools as it\nimport logging\nimport multiprocessing as mp\nimport os\n\nimport numpy as np\nfrom prettytable import PrettyTable\nfrom sklearn.metrics import adjusted_rand_score\n\nimport config\nfrom run import run_commands, make_parser\n\n__author__ = \"Islam Elnabarawy\"\n\nlogger = logging.getLogger(__name__)\n\ndescription = \"Gather run results from a directory using raw TopoART csv files instead of bin files\"\n\n\ndef process_row(row, targets):\n row_data = np.array(row, dtype=np.float32)\n ari = adjusted_rand_score(targets, row_data[7:])\n return row_data[:4], np.concatenate((row_data[5:7], [ari]))\n\n\ndef get_results(inputs_fname, targets_fname, module_ix, map_fn=it.starmap):\n targets = np.loadtxt(targets_fname, delimiter=',')\n with open(inputs_fname, 'r') as f:\n reader = csv.reader(f)\n params, results = zip(*map_fn(\n process_row, zip(filter(lambda row: int(row[4]) == module_ix, reader), it.repeat(targets))\n ))\n return np.array(params), np.array(results)\n\n\ndef gather(args):\n input_dir = args.input_dir\n targets_dir = args.targets_dir\n num_runs = args.num_runs\n module_ix = args.topo_module - 1 # convert to module ID to 0-based indexing\n\n x = PrettyTable()\n x.field_names = [\n 'Dataset', 'ARI Mean', 'ARI Std',\n 'nCat Mean', 'nCat Std', 'nClr Mean', 'nClr Std'\n ]\n\n writer = csv.writer(open(args.output_file, 'w'), dialect='excel')\n writer.writerow(x.field_names)\n\n pool = mp.Pool()\n\n for dataset in sorted(os.listdir(input_dir)):\n logger.info(\"Gathering dataset: %s\", dataset)\n\n input_fnames = [\"{0}/{1}/{1}-{2}.csv\".format(input_dir, dataset, ix + 1) for ix in range(num_runs)]\n target_fnames = [\"{0}/{1}/{2}.txt\".format(targets_dir, config.rand_seeds[ix], dataset) for ix in\n range(num_runs)]\n all_data = list(pool.starmap(\n get_results, zip(input_fnames, target_fnames, it.repeat(module_ix))\n ))\n all_results = np.array([data[1] for data in all_data], dtype=np.float32)\n\n # get the algorithm parameters from the first set of results\n param_list = all_data[0][0]\n\n ncat_results = all_results[:, :, 0]\n nclr_results = all_results[:, :, 1]\n perf_results = all_results[:, :, 2]\n\n avg_perf = perf_results.mean(axis=0)\n std_perf = perf_results.std(axis=0)\n avg_ncat = ncat_results.mean(axis=0)\n std_ncat = ncat_results.std(axis=0)\n avg_nclr = nclr_results.mean(axis=0)\n std_nclr = nclr_results.std(axis=0)\n\n # do a lexsort by best performance first, then smallest ncat, then smallest param values\n best_ix = np.lexsort(\n [param_list[:, ix] for ix in range(param_list.shape[1])] +\n [avg_nclr, avg_ncat, -avg_perf]\n )[0]\n\n row = [\n dataset,\n avg_perf[best_ix], std_perf[best_ix],\n avg_ncat[best_ix], std_ncat[best_ix],\n avg_nclr[best_ix], std_nclr[best_ix],\n ]\n x.add_row(row)\n writer.writerow(row)\n\n print(x)\n\n pool.close()\n\n\ndef setup_parser(parser):\n parser.add_argument(\"input_dir\", type=str, help=\"The path to the directory to gather results from\")\n parser.add_argument(\"targets_dir\", type=str, help=\"The path to the directory to get target labels from\")\n parser.add_argument(\"output_file\", type=str, help=\"Name of the CSV file to write the gathered results to\")\n parser.add_argument(\"--num-runs\", \"-n\", type=int, default=30,\n help=\"The number of runs to gather results from\")\n parser.add_argument(\"--topo-module\", \"-m\", type=int, default=2,\n help=\"Which TopoART module to gather results for (starting from 1)\")\n parser.set_defaults(run=gather)\n\n\ndef main():\n \"\"\"\n Entry point\n \"\"\"\n # parse CLI args\n parser = make_parser(description)\n setup_parser(parser)\n args = parser.parse_args()\n\n run_commands(args, logger)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.loadtxt", "sklearn.metrics.adjusted_rand_score" ] ]
anhquan0412/fastai_v1
[ "1f73d4a42dfbdbf1d9ce065b9cb97bb60fbbf957" ]
[ "my_note/nb_001b.py" ]
[ "\n #################################################\n ### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###\n #################################################\n # file to edit: dev_nb/001b_fit.ipynb\n\nimport pickle, gzip, torch, math, numpy as np, torch.nn.functional as F\nfrom pathlib import Path\nfrom IPython.core.debugger import set_trace\nfrom dataclasses import dataclass\nfrom typing import Any, Collection, Callable, NewType, List, Union, TypeVar, Optional\nfrom functools import partial, reduce\nfrom numbers import Number\n\nfrom numpy import array\nfrom torch import nn, optim, tensor, Tensor\nfrom torch.utils.data import TensorDataset, Dataset, DataLoader\n\nRank0Tensor = NewType('OneEltTensor', Tensor)\nLossFunction = Callable[[Tensor, Tensor], Rank0Tensor]\nModel = nn.Module\n\ndef is_listy(x:Any)->bool: return isinstance(x, (tuple,list))\n\ndef loss_batch(model:Model, xb:Tensor, yb:Tensor,\n loss_fn:LossFunction, opt:optim.Optimizer=None):\n \"calculate loss for the batch `xb,yb` and learn from the gradients with `opt`\"\n if not is_listy(xb): xb = [xb]\n if not is_listy(yb): yb = [yb]\n loss = loss_fn(model(*xb), *yb)\n\n if opt is not None:\n loss.backward()\n opt.step()\n opt.zero_grad()\n\n return loss.item(), len(yb)\n\ndef fit(epochs:int, model:Model, loss_fn:LossFunction,\n opt:optim.Optimizer, train_dl:DataLoader, valid_dl:DataLoader):\n \"train `model` on `train_dl` with `optim` then validate against `valid_dl`\"\n for epoch in range(epochs):\n model.train()\n for xb,yb in train_dl: loss,_ = loss_batch(model, xb, yb, loss_fn, opt)\n\n model.eval()\n with torch.no_grad():\n losses,nums = zip(*[loss_batch(model, xb, yb, loss_fn)\n for xb,yb in valid_dl])\n val_loss = np.sum(np.multiply(losses,nums)) / np.sum(nums)\n\n print(epoch, val_loss)\n\nLambdaFunc = Callable[[Tensor],Tensor]\nclass Lambda(nn.Module):\n \"an easy way to create a pytorch layer for a simple `func`\"\n def __init__(self, func:Callable[[Tensor],Tensor]):\n \"create a layer that simply calls `func` with `x`\"\n super().__init__()\n self.func=func\n\n def forward(self, x): return self.func(x)\n\ndef noop(x): return x\n\ndef ResizeBatch(*size:int) -> Tensor:\n \"Layer that resizes x to `size`, good for connecting mismatched layers\"\n return Lambda(lambda x: x.view((-1,)+size))\ndef Flatten()->Tensor:\n \"flattens `x` to a single dimension, often used at the end of a model\"\n return Lambda(lambda x: x.view((x.size(0), -1)))\ndef PoolFlatten()->nn.Sequential:\n \"apply `nn.AdaptiveAvgPool2d` to `x` and then flatten the result\"\n return nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten())\n\ndef conv2d(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias=False) -> nn.Conv2d:\n \"create `nn.Conv2d` layer: `ni` inputs, `nf` outputs, `ks` kernel size. `padding` defaults to `k//2`\"\n if padding is None: padding = ks//2\n return nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias)\n\ndef conv2d_relu(ni:int, nf:int, ks:int=3, stride:int=1,\n padding:int=None, bn:bool=False) -> nn.Sequential:\n \"create a `conv2d` layer with `nn.ReLU` activation and optional(`bn`) `nn.BatchNorm2d`\"\n layers = [conv2d(ni, nf, ks=ks, stride=stride, padding=padding), nn.ReLU()]\n if bn: layers.append(nn.BatchNorm2d(nf))\n return nn.Sequential(*layers)\n\ndef conv2d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0) -> nn.ConvTranspose2d:\n \"create `nn.nn.ConvTranspose2d` layer: `ni` inputs, `nf` outputs, `ks` kernel size. `padding` defaults to 0\"\n return nn.ConvTranspose2d(ni, nf, kernel_size=ks, stride=stride, padding=padding)\n\n@dataclass\nclass DatasetTfm(Dataset):\n \"Applies `tfm` to `ds`\"\n ds: Dataset\n tfm: Callable = None\n\n def __len__(self): return len(self.ds)\n\n def __getitem__(self,idx:int):\n \"Apply `tfm` to `x` and return `(x[idx],y[idx])`\"\n x,y = self.ds[idx]\n if self.tfm is not None: x = self.tfm(x)\n return x,y\n\ndef simple_cnn(actns:Collection[int], kernel_szs:Collection[int],\n strides:Collection[int]) -> nn.Sequential:\n \"CNN with `conv2d_relu` layers defined by `actns`, `kernel_szs` and `strides`\"\n layers = [conv2d_relu(actns[i], actns[i+1], kernel_szs[i], stride=strides[i])\n for i in range(len(strides))]\n layers.append(PoolFlatten())\n return nn.Sequential(*layers)\n\ndef ifnone(a:bool,b:Any):\n \"`a` if its not None, otherwise `b`\"\n return b if a is None else a\n\ndefault_device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nTensors = Union[Tensor, Collection['Tensors']]\n\ndef to_device(b:Tensors, device:torch.device):\n \"ensure `b` is on `device`\"\n device = ifnone(device, default_device)\n if is_listy(b): return [to_device(o, device) for o in b]\n return b.to(device)\n\n@dataclass\nclass DeviceDataLoader():\n \"`DataLoader` that ensures batches from `dl` are on `device`\"\n dl: DataLoader\n device: torch.device\n\n def __len__(self) -> int: return len(self.dl)\n def proc_batch(self,b:Tensors): return to_device(b, self.device)\n\n def __iter__(self)->Tensors:\n \"ensure batches from `dl` are on `device` as we iterate\"\n self.gen = map(self.proc_batch, self.dl)\n return iter(self.gen)\n\n @classmethod\n def create(cls, *args, device:torch.device=default_device, **kwargs): return cls(DataLoader(*args, **kwargs), device=device)\n\ndef fit(epochs:int, model:Model, loss_fn:LossFunction,\n opt:optim.Optimizer, train_dl:DataLoader, valid_dl:DataLoader) -> None:\n \"train `model` for `epochs` with `loss_fun` and `optim`\"\n for epoch in range(epochs):\n model.train()\n for xb,yb in train_dl: loss,_ = loss_batch(model, xb, yb, loss_fn, opt)\n\n model.eval()\n with torch.no_grad():\n losses,nums = zip(*[loss_batch(model, xb, yb, loss_fn)\n for xb,yb in valid_dl])\n val_loss = np.sum(np.multiply(losses,nums)) / np.sum(nums)\n\n print(epoch, val_loss)\n\nTItem = TypeVar('TItem')\nTfmCallable = Callable[[TItem],TItem]\nTfmList = Union[TfmCallable, Collection[TfmCallable]]\nTfms = Optional[TfmList]\n\n@dataclass\nclass DataBunch():\n \"bind `train_dl`, `valid_dl` to `device`\"\n train_dl:DataLoader\n valid_dl:DataLoader\n device:torch.device=None\n\n @classmethod\n def create(cls, train_ds:Dataset, valid_ds:Dataset, bs:int=64,\n train_tfm:Tfms=None, valid_tfm:Tfms=None, device:torch.device=None, **kwargs):\n return cls(DeviceDataLoader.create(DatasetTfm(train_ds, train_tfm), bs, shuffle=True, device=device, **kwargs),\n DeviceDataLoader.create(DatasetTfm(valid_ds, valid_tfm), bs*2, shuffle=False, device=device, **kwargs),\n device=device)\n\nclass Learner():\n \"train `model` on `data` for `epochs` using learning rate `lr` and `opt_fn` to optimize training\"\n def __init__(self, data:DataBunch, model:Model):\n self.data,self.model = data,to_device(model, data.device)\n\n def fit(self, epochs, lr, opt_fn=optim.SGD):\n opt = opt_fn(self.model.parameters(), lr=lr)\n loss_fn = F.cross_entropy\n fit(epochs, self.model, loss_fn, opt, self.data.train_dl, self.data.valid_dl)" ]
[ [ "torch.device", "torch.nn.Sequential", "numpy.sum", "torch.no_grad", "torch.nn.ConvTranspose2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "numpy.multiply", "torch.nn.Conv2d", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.AdaptiveAvgPool2d" ] ]
marhofmann/pandapower
[ "a7903c5cc0e23b8b1d397c1fc707a49490c2b62d" ]
[ "pandapower/test/api/test_file_io.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport os\nimport pytest\nimport copy\n\nimport pandas as pd\nimport pandapower as pp\nimport pandapower.topology as top\nfrom pandapower.test.toolbox import assert_net_equal, create_test_network, tempdir, net_in\nfrom pandapower.io_utils import collect_all_dtypes_df, restore_all_dtypes\nimport pandapower.networks as nw\nfrom pandapower.io_utils import PPJSONEncoder, PPJSONDecoder\nimport json\nimport numpy as np\n\n\ndef test_pickle(net_in, tempdir):\n filename = os.path.join(tempdir, \"testfile.p\")\n pp.to_pickle(net_in, filename)\n net_out = pp.from_pickle(filename)\n assert_net_equal(net_in, net_out)\n\n\ndef test_excel(net_in, tempdir):\n filename = os.path.join(tempdir, \"testfile.xlsx\")\n pp.to_excel(net_in, filename)\n net_out = pp.from_excel(filename)\n assert_net_equal(net_in, net_out)\n\n # test in user_pf_options are equal\n pp.set_user_pf_options(net_in, tolerance_mva=1e3)\n pp.to_excel(net_in, filename)\n net_out = pp.from_excel(filename)\n assert_net_equal(net_in, net_out)\n assert net_out.user_pf_options == net_in.user_pf_options\n\n\ndef test_json_basic(net_in, tempdir):\n # tests the basic json functionality with the encoder/decoder classes\n filename = os.path.join(tempdir, \"testfile.json\")\n with open(filename, 'w') as fp:\n json.dump(net_in, fp, cls=PPJSONEncoder)\n\n with open(filename) as fp:\n net_out = json.load(fp, cls=PPJSONDecoder)\n pp.convert_format(net_out)\n\n assert_net_equal(net_in, net_out)\n\n\ndef test_json(net_in, tempdir):\n filename = os.path.join(tempdir, \"testfile.json\")\n try:\n net_geo = copy.deepcopy(net_in)\n # make GeodataFrame\n from shapely.geometry import Point, LineString\n from fiona.crs import from_epsg\n import geopandas as gpd\n\n for tab in ('bus_geodata', 'line_geodata'):\n if tab == 'bus_geodata':\n geometry = net_geo[tab].apply(lambda x: Point(x.x, x.y), axis=1)\n else:\n geometry = net_geo[tab].coords.apply(LineString)\n net_geo[tab] = gpd.GeoDataFrame(net_geo[tab], geometry=geometry, crs=from_epsg(4326))\n\n pp.to_json(net_geo, filename)\n net_out = pp.from_json(filename)\n assert_net_equal(net_geo, net_out)\n except (NameError, ImportError):\n pass\n\n # check if restore_all_dtypes works properly:\n net_in.line['test'] = 123\n net_in.res_line['test'] = 123\n pp.to_json(net_in, filename)\n net_out = pp.from_json(filename)\n assert_net_equal(net_in, net_out)\n\n\ndef test_type_casting_json(net_in, tempdir):\n filename = os.path.join(tempdir, \"testfile.json\")\n net_in.sn_kva = 1000\n pp.to_json(net_in, filename)\n net = pp.from_json(filename)\n assert_net_equal(net_in, net)\n\n\ndef test_sqlite(net_in, tempdir):\n filename = os.path.join(tempdir, \"testfile.db\")\n pp.to_sqlite(net_in, filename)\n net_out = pp.from_sqlite(filename)\n assert_net_equal(net_in, net_out)\n\n\ndef test_convert_format(): # TODO what is this thing testing ?\n net = pp.from_pickle(os.path.join(pp.pp_dir, \"test\", \"api\", \"old_net.p\"))\n pp.runpp(net)\n assert net.converged\n\n\ndef test_restore_all_dtypes():\n net = create_test_network()\n pp.runpp(net)\n net['res_test'] = pd.DataFrame(columns=['test'], data=[1, 2, 3])\n net['test'] = pd.DataFrame(columns=['test'], data=[1, 2, 3])\n net.line['test'] = 123\n net.res_line['test'] = 123\n net.bus['test'] = 123\n net.res_bus['test'] = 123\n net.res_load['test'] = 123\n dtdf = collect_all_dtypes_df(net)\n restore_all_dtypes(net, dtdf)\n\n\ndef test_to_json_dtypes(tempdir):\n filename = os.path.join(tempdir, \"testfile.json\")\n net = create_test_network()\n pp.runpp(net)\n net['res_test'] = pd.DataFrame(columns=['test'], data=[1, 2, 3])\n net['test'] = pd.DataFrame(columns=['test'], data=[1, 2, 3])\n net.line['test'] = 123\n net.res_line['test'] = 123\n net.bus['test'] = 123\n net.res_bus['test'] = 123\n net.res_load['test'] = 123\n pp.to_json(net, filename)\n net1 = pp.from_json(filename)\n assert_net_equal(net, net1)\n\n\ndef test_json_encoding_decoding():\n net = nw.mv_oberrhein()\n net.tuple = (1, \"4\")\n net.mg = top.create_nxgraph(net)\n s = set(['1', 4])\n t = tuple(['2', 3])\n f = frozenset(['12', 3])\n a = np.array([1., 2.])\n d = {\"a\": net, \"b\": f}\n json_string = json.dumps([s, t, f, net, a, d], cls=PPJSONEncoder)\n s1, t1, f1, net1, a1, d1 = json.loads(json_string, cls=PPJSONDecoder)\n\n assert s == s1\n assert t == t1\n assert f == f1\n assert net.tuple == net1.tuple\n assert np.allclose(a, a1)\n\n # TODO line_geodata isn't the same since tuples inside DataFrames are converted to lists (see test_json_tuple_in_dataframe)\n assert pp.nets_equal(net, net1, exclude_elms=[\"line_geodata\"])\n assert pp.nets_equal(d[\"a\"], d1[\"a\"], exclude_elms=[\"line_geodata\"])\n assert d[\"b\"] == d1[\"b\"]\n assert_graphs_equal(net.mg, net1.mg)\n\n\ndef assert_graphs_equal(mg1, mg2):\n edge1 = mg1.edges(data=True)\n edge2 = mg2.edges(data=True)\n for (u, v, data), (u1, v1, data1) in zip(sorted(edge1), sorted(edge2)):\n assert u == u1\n assert v == v1\n if \"json_id\" in data1:\n del data1[\"json_id\"]\n if \"json_key\" in data1:\n del data1[\"json_key\"]\n assert data == data1\n\n\[email protected]\ndef test_json_tuple_in_pandas():\n s = pd.Series()\n s[\"test\"] = [(1, 2), (3, 4)]\n json_string = json.dumps(s, cls=PPJSONEncoder)\n s1 = json.loads(json_string, cls=PPJSONDecoder)\n assert (type(s[\"test\"][0]) == type(s1[\"test\"][0]))\n\n\nif __name__ == \"__main__\":\n pytest.main([\"test_file_io.py\", \"-x\"])\n" ]
[ [ "numpy.allclose", "pandas.DataFrame", "numpy.array", "pandas.Series" ] ]
tihoson/seawater
[ "024206f6944fe5ea977de6d17fa967a3d602a4e6" ]
[ "data.py" ]
[ "import pandas as pd\nimport gsw\n\nDATES_COLUMNS = ['Date', 'Time']\n\nclass Data:\n def __init__(self, path):\n self._read(path)\n self._clean()\n\n def _read(self, path):\n self.data = pd.read_csv(path, sep='\\s+', parse_dates=DATES_COLUMNS)\n for column in self.data.columns:\n if column not in DATES_COLUMNS:\n self.data[column].astype(float)\n\n def _clean(self):\n for_remove = []\n cur = 0\n\n for i in range(len(self.data)):\n if self.data.iloc[cur]['Pres'] > self.data.iloc[i]['Pres']:\n for_remove.append(self.data.index[i])\n continue\n else:\n removed = False\n\n for column in self.data.columns:\n if column not in DATES_COLUMNS:\n if self.data.iloc[i][column] < 1e-3:\n for_remove.append(self.data.index[i])\n removed = True\n break\n\n if not removed:\n cur = i\n\n self.data = self.data.drop(for_remove)\n\n def add_columns(self, columns):\n for column_name in columns:\n self.data[column_name] = columns[column_name]\n self.data[column_name].astype(float)\n\n\n def calc_teos10_columns(self, lat, lng):\n # Practical Salinity\n SP = gsw.SP_from_C(self.data['Cond'], self.data['Temp'], self.data['Pres'])\n # Absolute Salinity\n SA = gsw.SA_from_SP_Baltic(SP, lng, lat)\n # Conservative Temperature\n CT = gsw.CT_from_t(SA, self.data['Temp'], self.data['Pres'])\n # Sigma(density) with reference pressure of 0 dbar\n sigma = gsw.sigma0(SA, CT)\n # Depth \n depth = list(map(abs, gsw.z_from_p(self.data['Pres'], lat)))\n\n return {'PracticalSalinity' : SP,\n 'AbsoluteSalinity' : SA,\n 'ConservativeTemperature' : CT,\n 'Sigma(density)' : sigma,\n 'Depth' : depth}\n\n def save_to_file(self, path):\n with open(path, 'w') as f:\n f.write(self._get_csv_string())\n\n def _get_csv_string(self):\n new_data = self.data.copy()\n new_data['Date'] = new_data['Date'].apply(lambda x: x.strftime('%d-%m-%Y'))\n new_data['Time'] = new_data['Time'].apply(lambda x: x.strftime('%H:%M:%S.%f')[:11])\n return new_data.to_csv(sep=',', encoding='utf-8', index=False, float_format='%.3f')" ]
[ [ "pandas.read_csv" ] ]
Jason-ccm/covid19-forecast-hub-evaluation
[ "8e55ab8a9604cac71c7385f851fdf5e1a597aadb" ]
[ "power_rankings.py" ]
[ "from collections import defaultdict\nimport glob\n\nimport numpy as np\nimport pandas as pd\n\n\n\"\"\"\nCompute a \"Power Rankings\" based on the mean 'N weeks ahead' percentiles,\n where N is 1-6 weeks ahead. This includes both US and state-by-state rankings.\n\nThere are a total of 12 summaries: US + state-by-state for each of 1-6 weeks ahead.\n For each model, we take its mean percentile rank across all summaries.\n If a model is the best in every summary, it will have a mean percentile of 0.\n If a model is the median rank in every summary, it will have a mean percentile of 0.5.\n The lower the mean percentile, the better the model.\n\nIf a model does not appear in a summary (e.g. it does not make 6 week ahead forecasts),\n it does not get included in the mean percentile. But a model must be included in at least\n 6 summaries to appear in the Power Rankings.\n\n\"\"\"\n\nprint('========================================')\nprint('Power Rankings')\nprint('========================================')\nmodel_to_percentiles = defaultdict(list)\nfor fname in glob.glob('summary/summary_[1-6]_weeks_ahead*.csv'):\n df = pd.read_csv(fname, index_col=0)\n # Only count models with 3 or more entries in summary\n df_filt = df[[c for c in df.columns if 'mean_sq_abs_error' not in c]]\n df_filt = df_filt[(~pd.isnull(df_filt)).sum(axis=1) >= 3]\n\n n = len(df_filt) - 1\n for rank, model_name in enumerate(df_filt.index):\n model_to_percentiles[model_name].append(rank / n)\n\nmodel_to_mean_percentile = {}\nfor model, percentiles in model_to_percentiles.items():\n if len(percentiles) < 6:\n continue # only include models in 6 or more summaries\n model_to_mean_percentile[model] = (np.mean(percentiles), len(percentiles))\n\ndf_ranks = pd.DataFrame(model_to_mean_percentile,\n index=['mean_percentile', 'num_summaries']).T.sort_values('mean_percentile')\ndf_ranks['num_summaries'] = df_ranks['num_summaries'].astype(int)\nprint(df_ranks)\nout_fname = 'summary/power_rankings.csv'\ndf_ranks.to_csv(out_fname)\nprint('Saved rankings to:', out_fname)\n" ]
[ [ "pandas.isnull", "pandas.DataFrame", "pandas.read_csv", "numpy.mean" ] ]
aasquier/AI_Tower_Defense
[ "9430fade5083e61620d6af5e6f09502a671271d9" ]
[ "AI_Tower_Defense/src/agent/qLearningAgent.py" ]
[ "import numpy as np\n\nfrom constants.gameConstants import *\nfrom constants.aiConstants import *\n\nclass Qdata:\n\n def __init__(self):\n self.qTable = np.zeros((NUMBER_OF_TOWERS, STARTING_POSITIONS))\n # self.towers = []\n self.gameScores = []\n # self.currentScore = 0\n\n\nclass QLearningAgent:\n\n def __init__(self):\n self.record = Qdata()\n \n \n def initTowers(self):\n tempTowers = list(TOWER_GRID)\n np.random.shuffle(tempTowers)\n self.record.towers = tempTowers[:NUMBER_OF_STARTING_TOWERS]\n\n\n def updateRecord(self):\n return" ]
[ [ "numpy.random.shuffle", "numpy.zeros" ] ]