repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
cksisu/comps | [
"70cc87b143a98f6f58e233ed35da6a65099e5a12"
]
| [
"comps/conftest.py"
]
| [
"import os\nfrom pathlib import Path\n\nimport pytest\n\nimport numpy as np\nimport pandas as pd\n\[email protected]\ndef pd_bank_data():\n \"\"\"Returns bank data in pandas DataFrame for testing.\n\n Returns\n -------\n pandas : DataFrame \n Pandas DataFrame with bank data.\n \"\"\"\n return pd.read_csv(\n Path(__file__).resolve.parent.joinpath(\"datasets\", \"bank\", \"bank.csv\"),\n sep=\";\", header=0).convert_dtypes()\n \n\[email protected]\ndef int_array():\n \"\"\"Returns 10x4 NumPy array for testing.\n\n Returns\n -------\n numpy : Array \n 10x4 array with int 0 to 9 for each value.\n \"\"\"\n return np.array([\n [0, 1, 2, 3], [3, 2, 1, 0], [1, 3, 5, 7], [7, 5, 3, 1], [2, 4, 6, 8],\n [0, 1, 2, 3], [8, 6, 4, 2], [1, 3, 5, 7], [3, 5, 7, 9], [0, 1, 2, 3], ],\n np.int64)\n\n"
]
| [
[
"numpy.array"
]
]
|
minhnn-tiny/mycroft-precise | [
"5c9b9a3c1e79b1724e3458b5f487de7a4d9d83ad"
]
| [
"test/scripts/test_listen.py"
]
| [
"import numpy as np\nimport os\nimport shutil\n\nimport wave\nfrom prettyparse import Usage\n\nfrom precise_runner import PreciseEngine, PreciseRunner\nfrom precise_runner.runner import ListenerEngine\nfrom precise.network_runner import Listener\nfrom precise.scripts.base_script import BaseScript\nfrom precise.util import buffer_to_audio, load_audio, save_audio\nfrom precise.vectorization import vectorize\nfrom precise.params import pr, inject_params, ListenerParams\n\n\ndef clean_folder(folder):\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n\nclass EvaluateModel:\n usage = Usage('''\n Evaluate a model by the long wav file\n\n :model str\n Either Keras (.net) or TensorFlow (.pb) model to test\n\n :testFile str\n A long file to evaluate\n\n ...\n ''')\n\n def __init__(self, model: str, data_path: str, test_file: str, lable_file: str, params: ListenerParams):\n self.listener = Listener.find_runner(model)(model)\n self.raw_data = load_audio(test_file)\n self.params = params\n self.buffer_sample = params.buffer_samples\n self.hop_sample = params.hop_samples\n self.window_samples = params.window_samples\n self.num_predicts = (self.raw_data.shape[0] - self.window_samples) // self.hop_sample + 1\n self.lenght_audio = self.raw_data.shape[0] / 16000 / 60\n self.counter = 0\n self.label_file = label_file\n self.data_path = data_path\n\n def predict(self, output_file: str):\n output = []\n for i in range(self.num_predicts):\n data = self.raw_data[i*self.hop_sample : i*self.hop_sample+self.buffer_sample]\n feature = vectorize(data)\n predict = self.listener.predict(np.expand_dims(feature, axis=0))\n prob = np.squeeze(predict)\n if prob > 1 - self.params.threshold_center:\n output.append(self.get_time(i))\n np.savetxt(output_file, np.array(output), fmt='%6.4f')\n\n def process_output(self, output_file: str, processed_output_file: str, offset: float):\n output = np.loadtxt(output_file)\n processed_output = [output[0]]\n for i in range(1, output.shape[0]):\n if output[i] - output[i-1] > offset:\n processed_output.append(output[i])\n self.process_output_file = processed_output_file\n np.savetxt(processed_output_file, np.array(processed_output), fmt='%6.4f')\n\n def visuallize(self, predict_file: str, label_file: str, offset: float):\n predict = np.loadtxt(predict_file)\n label = np.loadtxt(label_file)\n TP, FA, FR = self._TP_FA_FR_cases(predict, label, offset)\n\n\n def save_predict_cases(self, lenght_audio: float, offset: float):\n predict = np.loadtxt(self.process_output_file)\n label = np.loadtxt(self.label_file)\n TP, FA, FR = self._TP_FA_FR_cases(predict, label, offset)\n prepend = lenght_audio - self.params.buffer_t\n clean_folder(os.path.join(self.data_path, 'true-positive'))\n clean_folder(os.path.join(self.data_path, 'false-alarm'))\n clean_folder(os.path.join(self.data_path, 'false-reject'))\n for i in range(TP.shape[0]):\n t = TP[i]\n index = int((t-prepend)*self.params.sample_rate)\n data = self.raw_data[index : index + self.params.buffer_samples]\n file_path = os.path.join(self.data_path, 'true-positive', f\"TP_{t//60:.0f}_{t%60:.0f}.wav\")\n save_audio(file_path, data)\n\n for i in range(FA.shape[0]):\n t = FA[i]\n index = int((t-prepend)*self.params.sample_rate)\n data = self.raw_data[index : index + self.params.buffer_samples]\n file_path = os.path.join(self.data_path, 'false-alarm', f\"FA_{t//60:.0f}_{t%60:.0f}.wav\")\n save_audio(file_path, data)\n\n for i in range(FR.shape[0]):\n t = FR[i]\n index = int((t-prepend)*self.params.sample_rate)\n data = self.raw_data[index : index + self.params.buffer_samples]\n file_path = os.path.join(self.data_path, 'false-reject', f\"FR_{t//60:.0f}_{t%60:.0f}.wav\")\n save_audio(file_path, data)\n\n def _TP_FA_FR_cases(self, predict: np.array, label: np.array, offset: float):\n TP = [] # True Positive\n FA = predict.copy() # False Alarm\n FR = label.copy() # False Reject\n for p in predict:\n for l in label:\n if abs(p - l) < offset:\n TP.append(p)\n FA = np.delete(FA, np.argwhere(FA == p))\n FR = np.delete(FR, np.argwhere(FR == l))\n continue\n return np.array(TP), FA, FR\n\n def get_time(self, counter):\n return counter * self.hop_sample / self.params.sample_rate\n\n\nif __name__ == '__main__':\n data_path = './data/test/test-case-00'\n model_file = './models/hey-sunshine-CRNN/hey_sunshine.net'\n test_file = os.path.join(data_path, 'test_case-00.wav')\n label_file = os.path.join(data_path, 'test_case-00.txt')\n output_file = os.path.join(data_path, 'output.npy')\n processed_output_file = os.path.join(data_path, 'processed_output.npy')\n\n tp_folder = os.path.join(data_path, 'true-positive')\n fa_folder = os.path.join(data_path, 'false-alarm')\n fr_folder = os.path.join(data_path, 'false-reject')\n os.makedirs(tp_folder, exist_ok=True)\n os.makedirs(fa_folder, exist_ok=True)\n os.makedirs(fr_folder, exist_ok=True)\n\n evaluator = EvaluateModel(model_file, data_path, test_file, label_file, pr)\n evaluator.predict(output_file)\n evaluator.process_output(output_file, processed_output_file, 2)\n evaluator.save_predict_cases(2, 2)\n output = np.loadtxt(processed_output_file)\n label = np.loadtxt(label_file)\n print('predict cases: ', output.shape)\n print('label cases: ', label.shape)\n\n TP, FA, FR = evaluator._TP_FA_FR_cases(output, label, 1.5)\n # print(np.array([f\"{t//60:.0f}:{t%60:.0f}\" for t in FA]))\n print('False reject rate: ', (FR.shape[0] / (FR.shape[0] + TP.shape[0])))\n print('False alarm rate: ', (FA.shape[0] / (evaluator.lenght_audio/60)))\n"
]
| [
[
"numpy.array",
"numpy.loadtxt",
"numpy.argwhere",
"numpy.squeeze",
"numpy.expand_dims"
]
]
|
charignon/financier | [
"5b5bc89a97c265376bbc134911419a06d938b7dd"
]
| [
"financier/visualizer.py"
]
| [
"#!/usr/bin/env python3\nfrom typing import Any, Optional\n\n\ndef plot_income(df: Any, fname: Optional[str] = None) -> None:\n \"\"\"Plot an offer df, if fname is None save to a file named fname\"\"\"\n from matplotlib import pyplot as plt # type: ignore\n\n ax = df.plot(kind=\"bar\", figsize=(20, 6), fontsize=15)\n ax.set_yticks((range(0, int(df.max()[0]), 50000)), minor=True)\n ax.grid(True, which=\"minor\", axis=\"y\")\n ax.set_ylabel(\"$ Amount\")\n plt.tight_layout()\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n if fname is None:\n plt.show()\n else:\n plt.savefig(fname)\n"
]
| [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout"
]
]
|
liuyuan000/yolox_sar | [
"007e493010d2ef2c3996e936c250bff89741cd8c"
]
| [
".history/tools/demo_20211008151155.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport argparse\nimport os\nimport time\nfrom loguru import logger\n\nimport cv2\n\nimport torch\nimport sys\nsys.path.append(\"D:\\mynetwork\\YOLOX-main\")\nfrom yolox.data.data_augment import ValTransform\n# from yolox.data.datasets import COCO_CLASSES\nfrom yolox.exp import get_exp\nfrom yolox.utils import fuse_model, get_model_info, postprocess, vis\n\nIMAGE_EXT = [\".jpg\", \".jpeg\", \".webp\", \".bmp\", \".png\", \".tif\"]\nCLASS_NAMES = (\"A220\",\"A330\",\"A320_321\",\"Boeing737/800\",\"Boeing787\",\"ARJ21\",\"other\")\n\ndef make_parser():\n parser = argparse.ArgumentParser(\"YOLOX Demo!\")\n parser.add_argument(\n \"--demo\", default=\"image\", help=\"demo type, eg. image, video and webcam\"\n )\n parser.add_argument(\"--save_txt\", default=\"result_txt\")\n parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=None)\n parser.add_argument(\"-n\", \"--name\", type=str, default=\"yolox-s\", help=\"model name\")\n\n parser.add_argument(\"-pic_path\", type=str)\n parser.add_argument(\"-save_xml\", type=str)\n parser.add_argument(\n \"--path\", default=\"./assets/\", help=\"path to images or video\"\n )\n parser.add_argument(\"--camid\", type=int, default=0, help=\"webcam demo camera id\")\n parser.add_argument(\n \"--save_result\",\n # action=\"store_true\",\n default=True,\n help=\"whether to save the inference result of image/video\",\n )\n\n # exp file\n parser.add_argument(\n \"-f\",\n \"--exp_file\",\n default=None,\n type=str,\n help=\"pls input your experiment description file\",\n )\n parser.add_argument(\"-c\", \"--ckpt\", default=\"checkpoint/best_ckpt.pth\", type=str, help=\"ckpt for eval\")\n parser.add_argument(\n \"--device\",\n default=\"cpu\",\n type=str,\n help=\"device to run our model, can either be cpu or gpu\",\n )\n parser.add_argument(\"--conf\", default=0.3, type=float, help=\"test conf\")\n parser.add_argument(\"--nms\", default=0.3, type=float, help=\"test nms threshold\")\n parser.add_argument(\"--tsize\", default=640, type=int, help=\"test img size\")\n parser.add_argument(\n \"--fp16\",\n dest=\"fp16\",\n default=False,\n action=\"store_true\",\n help=\"Adopting mix precision evaluating.\",\n )\n parser.add_argument(\n \"--legacy\",\n dest=\"legacy\",\n default=False,\n action=\"store_true\",\n help=\"To be compatible with older versions\",\n )\n parser.add_argument(\n \"--fuse\",\n dest=\"fuse\",\n default=False,\n action=\"store_true\",\n help=\"Fuse conv and bn for testing.\",\n )\n\n return parser\n\n\ndef get_image_list(path):\n image_names = []\n for maindir, subdir, file_name_list in os.walk(path):\n for filename in file_name_list:\n apath = os.path.join(maindir, filename)\n ext = os.path.splitext(apath)[1]\n if ext in IMAGE_EXT:\n image_names.append(apath)\n return image_names\n\n\nclass Predictor(object):\n def __init__(\n self,\n model,\n exp,\n # cls_names=COCO_CLASSES,\n cls_names =CLASS_NAMES,\n trt_file=None,\n decoder=None,\n device=\"cpu\",\n fp16=False,\n legacy=False,\n ):\n self.model = model\n self.cls_names = cls_names\n self.decoder = decoder\n self.num_classes = exp.num_classes\n self.confthre = exp.test_conf\n self.nmsthre = exp.nmsthre\n self.test_size = exp.test_size\n self.device = device\n self.fp16 = fp16\n self.preproc = ValTransform(legacy=legacy)\n if trt_file is not None:\n from torch2trt import TRTModule\n\n model_trt = TRTModule()\n model_trt.load_state_dict(torch.load(trt_file))\n\n x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()\n self.model(x)\n self.model = model_trt\n\n def inference(self, img):\n img_info = {\"id\": 0}\n if isinstance(img, str):\n img_info[\"file_name\"] = os.path.basename(img)\n img = cv2.imread(img)\n else:\n img_info[\"file_name\"] = None\n\n height, width = img.shape[:2]\n img_info[\"height\"] = height\n img_info[\"width\"] = width\n img_info[\"raw_img\"] = img\n\n ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])\n img_info[\"ratio\"] = ratio\n\n img, _ = self.preproc(img, None, self.test_size)\n img = torch.from_numpy(img).unsqueeze(0)\n img = img.float()\n if self.device == \"gpu\":\n img = img.cuda()\n if self.fp16:\n img = img.half() # to FP16\n\n with torch.no_grad():\n t0 = time.time()\n outputs = self.model(img)\n if self.decoder is not None:\n outputs = self.decoder(outputs, dtype=outputs.type())\n outputs = postprocess(\n outputs, self.num_classes, self.confthre,\n self.nmsthre, class_agnostic=True\n )\n logger.info(\"Infer time: {:.4f}s\".format(time.time() - t0))\n return outputs, img_info\n\n def visual(self, output, img_info, cls_conf=0.35):\n ratio = img_info[\"ratio\"]\n img = img_info[\"raw_img\"]\n if output is None:\n return img\n output = output.cpu()\n\n bboxes = output[:, 0:4]\n\n # preprocessing: resize\n bboxes /= ratio\n\n cls = output[:, 6]\n scores = output[:, 4] * output[:, 5]\n\n print(bboxes, scores, cls, cls_conf, self.cls_names)\n vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)\n return vis_res\n\n\ndef image_demo(predictor, vis_folder, path, current_time, save_result):\n if os.path.isdir(path):\n files = get_image_list(path)\n else:\n files = [path]\n files.sort()\n for image_name in files:\n with open(image_name.split(\"/\")[-1].split(\".\")[0] + \".xml\", \"w\") as f:\n f.write(\"<?xml version='1.0' encoding='utf-8'?>\\n\")\n f.write(\"<annotation>\\n\t<source>\\n\t\t<filename>{}</filename>\\n\t\t<origin>GF3</origin>\\n\t</source>\\n\t<research>\\n\t\t<version>1.0</version>\\n\t\t<provider>Company/School of team</provider>\\n\t\t<author>team name</author>\\n\t\t<pluginname>Airplane Detection and Recognition</pluginname>\\n\t\t<pluginclass>Detection</pluginclass>\\n\t\t<time>2021-07-2021-11</time>\\n\t</research>\\n\t<objects>\\n\".format(image_name.split(\"/\")[-1]))\n outputs, img_info = predictor.inference(image_name)\n output = outputs[0]\n cls_names = CLASS_NAMES\n cls_conf = 0.35\n ratio = img_info[\"ratio\"]\n img = img_info[\"raw_img\"]\n if output is None:\n return img\n output = output.cpu()\n\n bboxes = output[:, 0:4]\n\n # preprocessing: resize\n bboxes /= ratio\n\n cls = output[:, 6]\n scores = output[:, 4] * output[:, 5]\n\n for i, score in enumerate(scores):\n if score > cls_conf:\n f.write(\"\t\t<object>\\n\t\t\t<coordinate>pixel</coordinate>\\n\t\t\t<type>rectangle</type>\\n\t\t\t<description>None</description>\\n\t\t\t<possibleresult>\\n\t\t\t\t<name>{}</name>\\n\t\t\t\t<probability>{}</probability>\\n\t\t\t</possibleresult>\\n\t\t\t<points>\\n\".format(cls_names[int(cls[i])], score))\n bbox = bboxes[i]\n f.write(\"\t\t\t\t<point>{},{}</point>\\n\".format(int(bbox[0]), int(bbox[1])))\n f.write(\"\t\t\t\t<point>{},{}</point>\\n\".format(int(bbox[2]), int(bbox[1])))\n f.write(\"\t\t\t\t<point>{},{}</point>\\n\".format(int(bbox[2]), int(bbox[3])))\n f.write(\"\t\t\t\t<point>{},{}</point>\\n\".format(int(bbox[0]), int(bbox[3])))\n f.write(\"\t\t\t\t<point>{},{}</point>\\n\".format(int(bboxes[i][0]), int(bboxes[i][1])))\n \n f.write(\"\t\t\t</points>\\n\t\t\t</object>\\n\t\t</objects>\\n</annotation>\")\n result_image = vis(img, bboxes, scores, cls, cls_conf, cls_names)\n # result_image = predictor.visual(outputs[0], img_info, predictor.confthre)\n if save_result:\n save_folder = os.path.join(\n vis_folder, time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n )\n os.makedirs(save_folder, exist_ok=True)\n save_file_name = os.path.join(save_folder, os.path.basename(image_name))\n logger.info(\"Saving detection result in {}\".format(save_file_name))\n cv2.imwrite(save_file_name, result_image)\n ch = cv2.waitKey(0)\n if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n break\n\n\ndef imageflow_demo(predictor, vis_folder, current_time, args):\n cap = cv2.VideoCapture(args.path if args.demo == \"video\" else args.camid)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float\n fps = cap.get(cv2.CAP_PROP_FPS)\n save_folder = os.path.join(\n vis_folder, time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n )\n os.makedirs(save_folder, exist_ok=True)\n if args.demo == \"video\":\n save_path = os.path.join(save_folder, args.path.split(\"/\")[-1])\n else:\n save_path = os.path.join(save_folder, \"camera.mp4\")\n logger.info(f\"video save_path is {save_path}\")\n vid_writer = cv2.VideoWriter(\n save_path, cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (int(width), int(height))\n )\n while True:\n ret_val, frame = cap.read()\n if ret_val:\n outputs, img_info = predictor.inference(frame)\n result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)\n if args.save_result:\n vid_writer.write(result_frame)\n ch = cv2.waitKey(1)\n if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n break\n else:\n break\n\n\ndef main():\n args = make_parser().parse_args()\n exp = get_exp(args.exp_file, args.name)\n if not args.experiment_name:\n args.experiment_name = exp.exp_name\n\n file_name = os.path.join(exp.output_dir, args.experiment_name)\n os.makedirs(file_name, exist_ok=True)\n\n vis_folder = None\n if args.save_result:\n vis_folder = os.path.join(file_name, \"vis_res\")\n os.makedirs(vis_folder, exist_ok=True)\n\n logger.info(\"Args: {}\".format(args))\n\n if args.conf is not None:\n exp.test_conf = args.conf\n if args.nms is not None:\n exp.nmsthre = args.nms\n if args.tsize is not None:\n exp.test_size = (args.tsize, args.tsize)\n\n model = exp.get_model()\n logger.info(\"Model Summary: {}\".format(get_model_info(model, exp.test_size)))\n\n if args.device == \"gpu\":\n model.cuda()\n if args.fp16:\n model.half() # to FP16\n model.eval()\n\n if args.fuse:\n logger.info(\"\\tFusing model...\")\n model = fuse_model(model)\n\n if args.trt:\n assert not args.fuse, \"TensorRT model is not support model fusing!\"\n trt_file = os.path.join(file_name, \"model_trt.pth\")\n assert os.path.exists(\n trt_file\n ), \"TensorRT model is not found!\\n Run python3 tools/trt.py first!\"\n model.head.decode_in_inference = False\n decoder = model.head.decode_outputs\n logger.info(\"Using TensorRT to inference\")\n else:\n trt_file = None\n decoder = None\n\n predictor = Predictor(model, exp, CLASS_NAMES, trt_file, decoder, args.device, args.fp16, args.legacy)\n current_time = time.localtime()\n if args.demo == \"image\":\n image_demo(predictor, vis_folder, args.path, current_time, args.save_result)\n elif args.demo == \"video\" or args.demo == \"webcam\":\n imageflow_demo(predictor, vis_folder, current_time, args)\n\n\nif __name__ == \"__main__\":\n print(sys.argv)\n main()\n"
]
| [
[
"torch.ones",
"torch.no_grad",
"torch.load",
"torch.from_numpy"
]
]
|
rmitra/google-research | [
"88481d10a87947ffb9305dc7665682e008b27391"
]
| [
"towards_gan_benchmarks/lib/gan.py"
]
| [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Very minimal GAN library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import layers as contrib_layers\nfrom tensorflow.contrib import slim as contrib_slim\n\n\ndef set_flags(flags):\n \"\"\"Populate flags object with defaults.\"\"\"\n flags.set_if_empty('acts_loss', 0.)\n flags.set_if_empty('algorithm', 'vanilla')\n flags.set_if_empty('architecture', 'dcgan')\n flags.set_if_empty('dim', 64)\n flags.set_if_empty('dim_z', 128)\n flags.set_if_empty('extra_depth', 0)\n flags.set_if_empty('initializer_d', 'xavier')\n flags.set_if_empty('lr_decay', 'none')\n flags.set_if_empty('nonlinearity', 'default')\n flags.set_if_empty('norm', True)\n flags.set_if_empty('l2_reg_d', 1e-3)\n flags.set_if_empty('weight_clip_d', -1)\n flags.set_if_empty('weight_decay_g', None)\n flags.set_if_empty('weight_decay_d', None)\n flags.set_if_empty('z_seed', None)\n if flags.algorithm in [\n 'wgan-gp', 'wgan-lp', 'wgan-v3', 'wgan-gp-quadratic', 'r1', 'r1-ns'\n ]:\n flags.set_if_empty('lr', 1e-4)\n flags.set_if_empty('beta1', 0.)\n flags.set_if_empty('beta2', 0.9)\n flags.set_if_empty('disc_iters', 5)\n flags.set_if_empty('wgangp_lambda', 10)\n flags.set_if_empty('wgangp_minimax', False)\n flags.set_if_empty('wgangp_compressive_loss', False)\n elif flags.algorithm in ['vanilla', 'vanilla_minimax']:\n flags.set_if_empty('lr', 2e-4)\n flags.set_if_empty('beta1', 0.5)\n flags.set_if_empty('beta2', 0.999)\n flags.set_if_empty('disc_iters', 1)\n else:\n raise Exception('invalid gan flags.algorithm')\n flags.set_if_empty('dim_g', flags.dim)\n flags.set_if_empty('dim_d', flags.dim)\n flags.set_if_empty('extra_depth_g', flags.extra_depth)\n flags.set_if_empty('extra_depth_d', flags.extra_depth)\n flags.set_if_empty('downsample_conv_filt_size', 5)\n flags.set_if_empty('extra_conv_filt_size', 3)\n flags.set_if_empty('extra_top_conv', False)\n flags.set_if_empty('lr_d', flags.lr)\n flags.set_if_empty('lr_g', flags.lr)\n flags.set_if_empty('nonlinearity_g', flags.nonlinearity)\n flags.set_if_empty('nonlinearity_d', flags.nonlinearity)\n flags.set_if_empty('norm_g', flags.norm)\n flags.set_if_empty('norm_d', flags.norm)\n\n\ndef random_latents(batch_size, flags, antithetic_sampling=False):\n if antithetic_sampling:\n half = tf.random_normal([batch_size // 2, flags.dim_z], seed=flags.z_seed)\n return tf.concat([half, -half], axis=0)\n else:\n return tf.random_normal([batch_size, flags.dim_z], seed=flags.z_seed)\n\n\ndef _leaky_relu(x):\n return tf.maximum(0.2 * x, x)\n\n\ndef _swish(x):\n return x * tf.nn.sigmoid(x)\n\n\ndef _softplus(x):\n return tf.nn.softplus(x)\n\n\ndef _elu_softplus(x):\n \"\"\"softplus that looks roughly like elu but is smooth.\"\"\"\n return (tf.nn.softplus((2 * x) + 2) / 2) - 1\n\n\ndef nonlinearity_fn(flag, is_discriminator):\n \"\"\"Returns the appropriate nonlinearity function based on flags.\"\"\"\n if flag == 'default':\n if is_discriminator:\n return _leaky_relu\n else:\n return tf.nn.relu\n elif flag == 'leaky_relu':\n return _leaky_relu\n elif flag == 'relu':\n return tf.nn.relu\n elif flag == 'elu':\n return tf.nn.elu\n elif flag == 'swish':\n return _swish\n elif flag == 'softplus':\n return _softplus\n elif flag == 'elu_softplus':\n return _elu_softplus\n elif flag == 'exp':\n return tf.exp\n elif flag == 'tanh':\n return tf.tanh\n elif flag == 'sigmoid':\n return tf.nn.sigmoid\n else:\n raise Exception('invalid nonlinearity {}'.format(flag))\n\n\ndef generator(z, flags, scope=None, reuse=None):\n if flags.architecture == 'dcgan':\n return dcgan_generator(z, flags, scope, reuse)\n\n\ndef discriminator(x, flags, scope=None, reuse=None, return_acts=False):\n if flags.architecture == 'dcgan':\n return dcgan_discriminator(x, flags, scope, reuse, return_acts=return_acts)\n\n\ndef dcgan_generator(z, flags, scope=None, reuse=None):\n \"\"\"DCGAN-style generator network.\"\"\"\n nonlinearity = nonlinearity_fn(flags.nonlinearity_g, False)\n ds_fs = flags.downsample_conv_filt_size\n x_fs = flags.extra_conv_filt_size\n\n if not flags.norm_g:\n normalizer = None\n else:\n normalizer = contrib_slim.batch_norm\n\n with tf.variable_scope(scope, reuse=reuse):\n out = contrib_slim.fully_connected(\n z,\n 4 * 4 * (4 * flags.dim_g),\n scope='fc',\n normalizer_fn=normalizer,\n activation_fn=nonlinearity)\n out = tf.reshape(out, [-1, 4, 4, 4 * flags.dim_g])\n\n if flags.extra_top_conv:\n out = contrib_slim.conv2d(\n out,\n 4 * flags.dim_d,\n x_fs,\n scope='extratopconv',\n activation_fn=nonlinearity,\n normalizer_fn=normalizer)\n\n out = contrib_slim.conv2d_transpose(\n out,\n 2 * flags.dim_g,\n ds_fs,\n scope='conv1',\n stride=2,\n normalizer_fn=normalizer,\n activation_fn=nonlinearity)\n\n for i in range(flags.extra_depth_g):\n out = contrib_slim.conv2d(\n out,\n 2 * flags.dim_g,\n x_fs,\n scope='extraconv1.{}'.format(i),\n normalizer_fn=normalizer,\n activation_fn=nonlinearity)\n\n out = contrib_slim.conv2d_transpose(\n out,\n flags.dim_g,\n ds_fs,\n scope='conv2',\n stride=2,\n normalizer_fn=normalizer,\n activation_fn=nonlinearity)\n\n for i in range(flags.extra_depth_g):\n out = contrib_slim.conv2d(\n out,\n flags.dim_g,\n x_fs,\n scope='extraconv2.{}'.format(i),\n normalizer_fn=normalizer,\n activation_fn=nonlinearity)\n\n out = contrib_slim.conv2d_transpose(\n out, 3, ds_fs, scope='conv3', stride=2, activation_fn=tf.tanh)\n\n return out\n\n\ndef dcgan_discriminator(x, flags, scope=None, reuse=None, return_acts=False):\n \"\"\"DCGAN-style discriminator network.\"\"\"\n nonlinearity = nonlinearity_fn(flags.nonlinearity_d, True)\n ds_fs = flags.downsample_conv_filt_size\n x_fs = flags.extra_conv_filt_size\n\n acts = []\n with tf.variable_scope(scope, reuse=reuse):\n if not flags.norm_d:\n normalizer = None\n elif flags.algorithm == 'vanilla':\n normalizer = contrib_slim.batch_norm\n else:\n normalizer = contrib_slim.layer_norm\n\n if flags.initializer_d == 'xavier':\n initializer = contrib_layers.xavier_initializer()\n elif flags.initializer_d == 'orth_gain2':\n initializer = tf.orthogonal_initializer(gain=2.)\n elif flags.initializer_d == 'he':\n initializer = contrib_layers.variance_scaling_initializer()\n elif flags.initializer_d == 'he_uniform':\n initializer = contrib_layers.variance_scaling_initializer(uniform=True)\n\n out = contrib_slim.conv2d(\n x,\n flags.dim_d,\n ds_fs,\n scope='conv1',\n stride=2,\n activation_fn=nonlinearity,\n weights_initializer=initializer)\n acts.append(out)\n\n for i in range(flags.extra_depth_d):\n out = contrib_slim.conv2d(\n out,\n flags.dim_d,\n x_fs,\n scope='extraconv1.{}'.format(i),\n activation_fn=nonlinearity,\n normalizer_fn=normalizer,\n weights_initializer=initializer)\n acts.append(out)\n\n out = contrib_slim.conv2d(\n out,\n 2 * flags.dim_d,\n ds_fs,\n scope='conv2',\n stride=2,\n activation_fn=nonlinearity,\n normalizer_fn=normalizer,\n weights_initializer=initializer)\n acts.append(out)\n\n for i in range(flags.extra_depth_d):\n out = contrib_slim.conv2d(\n out,\n 2 * flags.dim_d,\n x_fs,\n scope='extraconv2.{}'.format(i),\n activation_fn=nonlinearity,\n normalizer_fn=normalizer,\n weights_initializer=initializer)\n acts.append(out)\n\n out = contrib_slim.conv2d(\n out,\n 4 * flags.dim_d,\n ds_fs,\n scope='conv3',\n stride=2,\n activation_fn=nonlinearity,\n normalizer_fn=normalizer,\n weights_initializer=initializer)\n acts.append(out)\n\n if flags.extra_top_conv:\n out = contrib_slim.conv2d(\n out,\n 4 * flags.dim_d,\n x_fs,\n scope='extratopconv',\n activation_fn=nonlinearity,\n normalizer_fn=normalizer,\n weights_initializer=initializer)\n acts.append(out)\n\n out = tf.reshape(out, [-1, 4 * 4 * (4 * flags.dim_d)])\n out = contrib_slim.fully_connected(out, 1, scope='fc', activation_fn=None)\n acts.append(out)\n\n if return_acts:\n return out, acts\n else:\n return out\n\n\ndef losses(generator_fn, discriminator_fn, real_data, z,\n disc_params, flags):\n \"\"\"Returns loss variables for the generator and discriminator.\"\"\"\n fake_data = generator_fn(z)\n\n if flags.acts_loss > 0.:\n disc_real, disc_real_acts = discriminator_fn(real_data, return_acts=True)\n disc_fake, disc_fake_acts = discriminator_fn(fake_data, return_acts=True)\n else:\n disc_real = discriminator_fn(real_data)\n disc_fake = discriminator_fn(fake_data)\n\n acts_l2_loss = 0.\n acts_count = 1.\n if flags.acts_loss > 0.:\n all_disc_acts = disc_real_acts + disc_fake_acts\n for act in all_disc_acts:\n acts_l2_loss += tf.nn.l2_loss(act)\n acts_count += tf.reduce_sum(tf.ones_like(act))\n\n l2_reg_d_cost = 0.\n if flags.l2_reg_d > 0:\n for p in disc_params:\n if 'weights' in p.name:\n l2_reg_d_cost += tf.nn.l2_loss(p)\n l2_reg_d_cost *= flags.l2_reg_d\n\n def cn(x):\n \"\"\"compressive nonlinearity.\"\"\"\n return tf.asinh(4. * x) / 4.\n\n if flags.algorithm == 'vanilla':\n gen_cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_fake, labels=tf.ones_like(disc_fake)))\n disc_cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_fake, labels=tf.zeros_like(disc_fake)))\n disc_cost += tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_real, labels=tf.ones_like(disc_real)))\n divergence = gen_cost\n disc_cost += l2_reg_d_cost\n disc_cost += flags.acts_loss * (acts_l2_loss / (1e-2 + acts_count))\n\n elif flags.algorithm == 'vanilla_minimax':\n disc_cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_fake, labels=tf.zeros_like(disc_fake)))\n disc_cost += tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_real, labels=tf.ones_like(disc_real)))\n gen_cost = -disc_cost\n divergence = ((-disc_cost) + tf.log(4.)) / 2.\n disc_cost += l2_reg_d_cost\n disc_cost += flags.acts_loss * (acts_l2_loss / (1e-2 + acts_count))\n\n elif flags.algorithm == 'wgan-gp':\n input_ndim = len(real_data.get_shape())\n if flags.wgangp_compressive_loss:\n disc_fake = cn(disc_fake)\n disc_real = cn(disc_real)\n wgan_disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)\n alpha = tf.random_uniform(\n shape=[tf.shape(real_data)[0]] + [1 for i in range(input_ndim - 1)],\n minval=0.,\n maxval=1.)\n differences = fake_data - real_data\n interpolates = real_data + (alpha * differences)\n if flags.acts_loss > 0.:\n disc_interps, disc_interp_acts = discriminator_fn(\n interpolates, return_acts=True)\n else:\n disc_interps = discriminator_fn(interpolates)\n gradients = tf.gradients(disc_interps, [interpolates])[0]\n slopes = tf.sqrt(1e-8 + tf.reduce_sum(\n tf.square(gradients),\n reduction_indices=[i for i in range(1, input_ndim)]))\n gradient_penalty = tf.reduce_mean((slopes - 1.)**2)\n disc_cost = wgan_disc_cost + (flags.wgangp_lambda * gradient_penalty)\n disc_cost += l2_reg_d_cost\n\n if flags.acts_loss > 0.:\n for act in disc_interp_acts:\n acts_l2_loss += flags.acts_loss * tf.nn.l2_loss(act)\n acts_count += tf.reduce_sum(tf.ones_like(act))\n disc_cost += flags.acts_loss * (acts_l2_loss / (1e-2 + acts_count))\n\n if flags.wgangp_minimax:\n gen_cost = -disc_cost\n divergence = -disc_cost\n else:\n gen_cost = -tf.reduce_mean(disc_fake)\n divergence = -wgan_disc_cost\n\n elif flags.algorithm == 'r1':\n disc_cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_fake, labels=tf.zeros_like(disc_fake)))\n disc_cost += tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_real, labels=tf.ones_like(disc_real)))\n gen_cost = -disc_cost\n divergence = ((-disc_cost) + tf.log(4.)) / 2.\n\n input_ndim = len(real_data.get_shape())\n gradients = tf.gradients(tf.nn.sigmoid(disc_real), [real_data])[0]\n slopes = tf.sqrt(1e-8 + tf.reduce_sum(\n tf.square(gradients),\n reduction_indices=[i for i in range(1, input_ndim)]))\n gradient_penalty = 0.5 * tf.reduce_mean(slopes**2)\n\n disc_cost += flags.wgangp_lambda * gradient_penalty\n disc_cost += l2_reg_d_cost\n disc_cost += flags.acts_loss * (acts_l2_loss / (1e-2 + acts_count))\n\n elif flags.algorithm == 'r1-ns':\n disc_cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_fake, labels=tf.zeros_like(disc_fake)))\n disc_cost += tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_real, labels=tf.ones_like(disc_real)))\n divergence = ((-disc_cost) + tf.log(4.)) / 2.\n gen_cost = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=disc_fake, labels=tf.ones_like(disc_fake)))\n\n input_ndim = len(real_data.get_shape())\n gradients = tf.gradients(tf.nn.sigmoid(disc_real), [real_data])[0]\n slopes = tf.sqrt(1e-8 + tf.reduce_sum(\n tf.square(gradients),\n reduction_indices=[i for i in range(1, input_ndim)]))\n gradient_penalty = 0.5 * tf.reduce_mean(slopes**2)\n\n disc_cost += flags.wgangp_lambda * gradient_penalty\n disc_cost += l2_reg_d_cost\n disc_cost += flags.acts_loss * (acts_l2_loss / (1e-2 + acts_count))\n\n return gen_cost, disc_cost, divergence\n\n\ndef gen_train_op(cost, params, step, iters, flags):\n \"\"\"Build the generator train op.\"\"\"\n if flags.lr_decay == 'linear':\n step_lr = (1. - (tf.cast(step, tf.float32) / iters))\n elif flags.lr_decay == 'quadratic':\n step_lr = ((1. - (tf.cast(step, tf.float32) / iters))**2)\n elif flags.lr_decay == 'none':\n step_lr = 1.\n train_op = tf.train.AdamOptimizer(step_lr * flags.lr_g, flags.beta1,\n flags.beta2).minimize(\n cost,\n var_list=params,\n colocate_gradients_with_ops=True)\n\n if flags.weight_decay_g is not None:\n decay = (step_lr * flags.weight_decay_g)\n with tf.control_dependencies([train_op]):\n weights = [p for p in params if 'weights' in p.name]\n decayed = [w - (decay * w) for w in weights]\n decay_op = tf.group(*[tf.assign(w, d) for w, d in zip(weights, decayed)])\n train_op = decay_op\n\n return train_op\n\n\ndef disc_train_op(cost, params, step, iters, flags):\n \"\"\"Build the discriminator train op.\"\"\"\n if flags.lr_decay == 'linear':\n step_lr = (1. - (tf.cast(step, tf.float32) / iters))\n elif flags.lr_decay == 'quadratic':\n step_lr = ((1. - (tf.cast(step, tf.float32) / iters))**2)\n elif flags.lr_decay == 'drop_after_90k':\n step_lr = tf.cond(step > 90000, lambda: 0.1, lambda: 1.0)\n elif flags.lr_decay == 'none':\n step_lr = 1.\n train_op = tf.train.AdamOptimizer(step_lr * flags.lr_d, flags.beta1,\n flags.beta2).minimize(\n cost,\n var_list=params,\n colocate_gradients_with_ops=True)\n\n if flags.weight_decay_d is not None:\n decay = (step_lr * flags.weight_decay_d)\n with tf.control_dependencies([train_op]):\n weights = [p for p in params if 'weights' in p.name]\n decayed = [w - (decay * w) for w in weights]\n decay_op = tf.group(*[tf.assign(w, d) for w, d in zip(weights, decayed)])\n train_op = decay_op\n\n if flags.weight_clip_d >= 0:\n # Clip *all* the params, like the original WGAN implementation\n clip = flags.weight_clip_d\n with tf.control_dependencies([train_op]):\n clipped = [tf.clip_by_value(p, -clip, clip) for p in params]\n clip_op = tf.group(*[tf.assign(p, c) for c, p in zip(clipped, params)])\n train_op = clip_op\n\n return train_op\n"
]
| [
[
"tensorflow.compat.v1.assign",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.asinh",
"tensorflow.compat.v1.nn.softplus",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.contrib.slim.conv2d_transpose",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.maximum",
"tensorflow.contrib.slim.conv2d",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.compat.v1.cond",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.orthogonal_initializer",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.random_normal",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.nn.l2_loss"
]
]
|
tnoumar/PSPNet | [
"5424e931681db4ee5cb98f6409df15b67e78137f"
]
| [
"train.py"
]
| [
"\"\"\"\r\nThis code is based on DrSleep's framework: https://github.com/DrSleep/tensorflow-deeplab-resnet \r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom model import PSPNet101\r\nfrom tools import prepare_label\r\nfrom image_reader import ImageReader\r\n\r\nIMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32)\r\n\r\nBATCH_SIZE = 2\r\nDATA_DIRECTORY = '/SSD_data/cityscapes_dataset/cityscape'\r\nDATA_LIST_PATH = './list/cityscapes_train_list.txt'\r\nIGNORE_LABEL = 255\r\nINPUT_SIZE = '713,713'\r\nLEARNING_RATE = 1e-3\r\nMOMENTUM = 0.9\r\nNUM_CLASSES = 19\r\nNUM_STEPS = 60001\r\nPOWER = 0.9\r\nRANDOM_SEED = 1234\r\nWEIGHT_DECAY = 0.0001\r\nRESTORE_FROM = './'\r\nSNAPSHOT_DIR = './model/'\r\nSAVE_NUM_IMAGES = 4\r\nSAVE_PRED_EVERY = 50\r\n\r\n\r\ndef get_arguments():\r\n parser = argparse.ArgumentParser(description=\"DeepLab-ResNet Network\")\r\n parser.add_argument(\"--batch-size\", type=int, default=BATCH_SIZE,\r\n help=\"Number of images sent to the network in one step.\")\r\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\r\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\r\n parser.add_argument(\"--data-list\", type=str, default=DATA_LIST_PATH,\r\n help=\"Path to the file listing the images in the dataset.\")\r\n parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\r\n help=\"The index of the label to ignore during the training.\")\r\n parser.add_argument(\"--input-size\", type=str, default=INPUT_SIZE,\r\n help=\"Comma-separated string with height and width of images.\")\r\n parser.add_argument(\"--is-training\", action=\"store_true\",\r\n help=\"Whether to updates the running means and variances during the training.\")\r\n parser.add_argument(\"--learning-rate\", type=float, default=LEARNING_RATE,\r\n help=\"Base learning rate for training with polynomial decay.\")\r\n parser.add_argument(\"--momentum\", type=float, default=MOMENTUM,\r\n help=\"Momentum component of the optimiser.\")\r\n parser.add_argument(\"--not-restore-last\", action=\"store_true\",\r\n help=\"Whether to not restore last (FC) layers.\")\r\n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\r\n help=\"Number of classes to predict (including background).\")\r\n parser.add_argument(\"--num-steps\", type=int, default=NUM_STEPS,\r\n help=\"Number of training steps.\")\r\n parser.add_argument(\"--power\", type=float, default=POWER,\r\n help=\"Decay parameter to compute the learning rate.\")\r\n parser.add_argument(\"--random-mirror\", action=\"store_true\",\r\n help=\"Whether to randomly mirror the inputs during the training.\")\r\n parser.add_argument(\"--random-scale\", action=\"store_true\",\r\n help=\"Whether to randomly scale the inputs during the training.\")\r\n parser.add_argument(\"--random-seed\", type=int, default=RANDOM_SEED,\r\n help=\"Random seed to have reproducible results.\")\r\n parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\r\n help=\"Where restore model parameters from.\")\r\n parser.add_argument(\"--save-num-images\", type=int, default=SAVE_NUM_IMAGES,\r\n help=\"How many images to save.\")\r\n parser.add_argument(\"--save-pred-every\", type=int, default=SAVE_PRED_EVERY,\r\n help=\"Save summaries and checkpoint every often.\")\r\n parser.add_argument(\"--snapshot-dir\", type=str, default=SNAPSHOT_DIR,\r\n help=\"Where to save snapshots of the model.\")\r\n parser.add_argument(\"--weight-decay\", type=float, default=WEIGHT_DECAY,\r\n help=\"Regularisation parameter for L2-loss.\")\r\n parser.add_argument(\"--update-mean-var\", action=\"store_true\",\r\n help=\"whether to get update_op from tf.Graphic_Keys\")\r\n parser.add_argument(\"--train-beta-gamma\", action=\"store_true\",\r\n help=\"whether to train beta & gamma in bn layer\")\r\n return parser.parse_args()\r\n\r\ndef save(saver, sess, logdir, step):\r\n model_name = 'model.ckpt'\r\n checkpoint_path = os.path.join(logdir, model_name)\r\n \r\n if not os.path.exists(logdir):\r\n os.makedirs(logdir)\r\n saver.save(sess, checkpoint_path, global_step=step)\r\n print('The checkpoint has been created.')\r\n\r\ndef load(saver, sess, ckpt_path):\r\n saver.restore(sess, ckpt_path)\r\n print(\"Restored model parameters from {}\".format(ckpt_path))\r\n\r\ndef main():\r\n \"\"\"Create the model and start the training.\"\"\"\r\n args = get_arguments()\r\n \r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n \r\n tf.set_random_seed(args.random_seed)\r\n \r\n coord = tf.train.Coordinator()\r\n \r\n with tf.name_scope(\"create_inputs\"):\r\n reader = ImageReader(\r\n args.data_dir,\r\n args.data_list,\r\n input_size,\r\n args.random_scale,\r\n args.random_mirror,\r\n args.ignore_label,\r\n IMG_MEAN,\r\n coord)\r\n image_batch, label_batch = reader.dequeue(args.batch_size)\r\n \r\n net = PSPNet101({'data': image_batch}, is_training=True, num_classes=args.num_classes)\r\n \r\n raw_output = net.layers['conv6']\r\n\r\n # According from the prototxt in Caffe implement, learning rate must multiply by 10.0 in pyramid module\r\n fc_list = ['conv5_3_pool1_conv', 'conv5_3_pool2_conv', 'conv5_3_pool3_conv', 'conv5_3_pool6_conv', 'conv6', 'conv5_4']\r\n restore_var = [v for v in tf.global_variables()]\r\n all_trainable = [v for v in tf.trainable_variables() if ('beta' not in v.name and 'gamma' not in v.name) or args.train_beta_gamma]\r\n fc_trainable = [v for v in all_trainable if v.name.split('/')[0] in fc_list]\r\n conv_trainable = [v for v in all_trainable if v.name.split('/')[0] not in fc_list] # lr * 1.0\r\n fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0\r\n fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0\r\n assert(len(all_trainable) == len(fc_trainable) + len(conv_trainable))\r\n assert(len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))\r\n \r\n # Predictions: ignoring all predictions with labels greater or equal than n_classes\r\n raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])\r\n label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]\r\n raw_gt = tf.reshape(label_proc, [-1,])\r\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)\r\n gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)\r\n prediction = tf.gather(raw_prediction, indices)\r\n \r\n # Pixel-wise softmax loss.\r\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)\r\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\r\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\r\n\r\n # Using Poly learning rate policy \r\n base_lr = tf.constant(args.learning_rate)\r\n step_ph = tf.placeholder(dtype=tf.float32, shape=())\r\n learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / args.num_steps), args.power))\r\n \r\n # Gets moving_mean and moving_variance update operations from tf.GraphKeys.UPDATE_OPS\r\n if args.update_mean_var == False:\r\n update_ops = None\r\n else:\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n\r\n with tf.control_dependencies(update_ops):\r\n opt_conv = tf.train.MomentumOptimizer(learning_rate, args.momentum)\r\n opt_fc_w = tf.train.MomentumOptimizer(learning_rate * 10.0, args.momentum)\r\n opt_fc_b = tf.train.MomentumOptimizer(learning_rate * 20.0, args.momentum)\r\n\r\n grads = tf.gradients(reduced_loss, conv_trainable + fc_w_trainable + fc_b_trainable)\r\n grads_conv = grads[:len(conv_trainable)]\r\n grads_fc_w = grads[len(conv_trainable) : (len(conv_trainable) + len(fc_w_trainable))]\r\n grads_fc_b = grads[(len(conv_trainable) + len(fc_w_trainable)):]\r\n\r\n train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable))\r\n train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))\r\n train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))\r\n\r\n train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)\r\n \r\n # Set up tf session and initialize variables. \r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n sess = tf.Session(config=config)\r\n init = tf.global_variables_initializer()\r\n \r\n sess.run(init)\r\n \r\n # Saver for storing checkpoints of the model.\r\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=10)\r\n\r\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n loader = tf.train.Saver(var_list=restore_var)\r\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\r\n load(loader, sess, ckpt.model_checkpoint_path)\r\n else:\r\n print('No checkpoint file found.')\r\n load_step = 0\r\n\r\n # Start queue threads.\r\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\r\n\r\n # Iterate over training steps.\r\n for step in range(args.num_steps):\r\n start_time = time.time()\r\n \r\n feed_dict = {step_ph: step}\r\n if step % args.save_pred_every == 0:\r\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\r\n save(saver, sess, args.snapshot_dir, step)\r\n else:\r\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\r\n duration = time.time() - start_time\r\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\r\n \r\n coord.request_stop()\r\n coord.join(threads)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n"
]
| [
[
"tensorflow.train.start_queue_runners",
"tensorflow.group",
"tensorflow.train.get_checkpoint_state",
"tensorflow.reshape",
"tensorflow.gradients",
"tensorflow.control_dependencies",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.trainable_variables",
"tensorflow.train.Saver",
"tensorflow.global_variables",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.ConfigProto",
"tensorflow.less_equal",
"tensorflow.get_collection",
"numpy.array",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.train.MomentumOptimizer",
"tensorflow.gather",
"tensorflow.pow",
"tensorflow.reduce_mean"
]
]
|
sathappanspm/geocoding | [
"bb49968dfacaad5d0e19b29a3b11d932726489bd"
]
| [
"TextGeo/src/geoutils/gazetteer.py"
]
| [
"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\"\"\"\n *.py: Description of what * does.\n Last Modified:\n\"\"\"\n\n__author__ = \"Sathappan Muthiah\"\n__email__ = \"[email protected]\"\n__version__ = \"0.0.1\"\n\n# import gevent\nfrom .dbManager import SQLiteWrapper, MongoDBWrapper\nimport pandas as pd\nfrom . import GeoPoint, encode, blacklist, loc_default, isempty\n# from . import loc_default, blacklist\n# from . import isempty\nfrom pylru import lrudecorator\nimport logging\n\nlog = logging.getLogger(\"rssgeocoder\")\n\n\nclass BaseGazetteer(object):\n \"\"\"\n Base Gazetteer class\n \"\"\"\n def __init__(self):\n pass\n\n def query(self, name, absoluteMatch=False):\n \"\"\"\n search for name\n \"\"\"\n pass\n\n\nclass GeoNames(BaseGazetteer):\n def __init__(self, dbpath, priority=None):\n self.db = SQLiteWrapper(dbpath)\n\n if priority is None:\n # TODO: Define default priority\n self.priority = None\n else:\n self.priority = priority\n\n @lrudecorator(10000)\n def query(self, name, min_popln=0):\n \"\"\"\n Search the locations DB for the given name\n params:\n name - string\n min_popln - integer\n return:\n list of possible locations the input string refers to\n \"\"\"\n if name in loc_default:\n name = loc_default[name]\n\n if name in blacklist:\n return []\n\n country = self._querycountry(name)\n #admin = self._querystate(name)\n #city = self._querycity(name, min_popln=min_popln)\n #alternateNames = self._query_alternatenames(name, min_popln)\n if country == []:\n admin = self._querystate(name)\n city = self._querycity(name, min_popln=min_popln)\n alternateNames = self._query_alternatenames(name, min_popln)\n #g1 = gevent.spawn(self._querystate, name)\n #g2 = gevent.spawn(self._querycity, name, min_popln=min_popln)\n #g3 = gevent.spawn(self._query_alternatenames, name, min_popln)\n #gevent.joinall([g1, g2, g3])\n #admin, city, alternateNames = g1.value, g2.value, g3.value\n else:\n admin, city, alternateNames = [], [], []\n\n ldist = (city + country + admin + alternateNames)\n if ldist == [] and \"'\" in name:\n log.info('splitting location name on quote mark-{}'.format(encode(name)))\n ldist = self._query_alternatenames(name.split(\"'\", 1)[0])\n\n if ldist != []:\n df = pd.DataFrame([i.__dict__ for i in ldist])\n df.drop_duplicates('geonameid', inplace=True)\n if df.shape[0] == 1:\n df['confidence'] = 1.0\n else:\n df['confidence'] = 0.5\n\n try:\n df['population'] = (df['population'] + 1).astype(float)\n except Exception as e:\n raise e\n\n #dfn = df[df[\"ltype\"] == \"city\"]\n #if not dfn.empty:\n # df.loc[df['ltype'] == 'city', 'confidence'] += ((dfn['population']) /\n # (2 * (dfn['population'].sum())))\n df['confidence'] += (df['population'] / (2 * df['population'].sum()))\n\n ldist = [GeoPoint(**d) for d in df.to_dict(orient='records')]\n\n return ldist\n\n def _querycountry(self, name):\n \"\"\"\n Check if name is a country name\n \"\"\"\n return self.db.query(u\"\"\"SELECT a.*, 'country' as 'ltype', c.population\n FROM allcountries as a\n INNER JOIN alternatenames as b ON a.geonameid=b.geonameid\n INNER JOIN allcities as c ON a.geonameid=c.id\n WHERE\n (country=? OR b.alternatename=?) LIMIT 1\"\"\", (name, name))\n\n def _querystate(self, name):\n \"\"\"\n Check if name is an admin name\n \"\"\"\n stmt = u\"\"\"SELECT a.geonameid, a.name as 'admin1', b.country as 'country',\n 'admin1' as 'ltype', 'ADM1' as 'featureCOde', 'A' as 'featureClass',\n b.ISO as 'countryCode', c.population\n FROM allcountries as b INNER JOIN alladmins as a on\n substr(a.key, 0, 3)=b.ISO\n INNER JOIN allcities as c ON c.id=a.geonameid\n WHERE (a.name=? or a.asciiname=?)\n \"\"\"\n return self.db.query(stmt, (name, name))\n\n def _querycity(self, name, min_popln=0):\n \"\"\"\n Check if name is a city name\n \"\"\"\n stmt = u\"\"\"SELECT a.id as geonameid, a.name,\n a.population,a.latitude, a.longitude, c.country as 'country',\n b.name as 'admin1',\n a.featureClass, a.countryCode as 'countryCode', a.featureCOde\n FROM allcountries as c\n INNER JOIN allcities as a ON a.countryCode=c.ISO\n LEFT OUTER JOIN alladmins as b ON a.countryCode||'.'||a.admin1 = b.key\n WHERE\n (a.featureCOde=\"ADM2\" or a.featureCOde=\"ADM3\" or a.featureClass=\"P\") and\n (a.name=? or a.asciiname=?) and a.population >= ?\n \"\"\"\n res = self.db.query(stmt, (name, name, min_popln))\n return res\n\n def _query_alternatenames(self, name, min_popln=0):\n \"\"\"\n check if name matches alternate name\n \"\"\"\n stmt = u\"\"\"SELECT DISTINCT a.id as geonameid, a.name, a.population,\n a.latitude, a.longitude, c.country as country, b.name as admin1,\n a.featureClass, a.featureCOde, a.countryCode\n FROM alternatenames as d\n INNER JOIN allcities as a ON a.id=d.geonameId\n INNER JOIN allcountries as c ON a.countryCode=c.ISO\n LEFT OUTER JOIN alladmins as b ON a.countryCode||'.'||a.admin1 = b.key\n WHERE\n (a.featureCOde=\"ADM2\" or a.featureCOde=\"ADM3\" or a.featureClass=\"P\")\n and alternatename=? and\n a.population >=?\"\"\"\n\n res = self.db.query(stmt, (name, min_popln))\n return res\n\n def normalize_statenames(self, country, admin):\n # get unofficial state name\n stmt = \"\"\" SELECT b.asciiname, c.country FROM allcities AS a INNER JOIN allcountries as c\n ON a.countryCode=c.ISO INNER JOIN alladmins as b\n ON b.geonameid=a.id\n WHERE\n (a.name=? or a.asciiname=?) and c.country=?\n and a.featureClass=\"A\"\n \"\"\"\n sub_res = self.db.query(stmt, (admin, admin, country))\n if sub_res:\n if len(sub_res) > 1:\n log.warning(\"More results returned than necessary-{}\".format(admin))\n\n admin = sub_res[0].asciiname\n return admin\n\n def get_locInfo(self, country=None, admin=None, city=None):\n \"\"\"\n return full loc tuple of name, admin1 name, country, population,\n longitude etc.\n \"\"\"\n if city and (city.lower() == \"ciudad de mexico\" or city.lower() == u\"ciudad de méxico\"):\n city = \"mexico city\"\n\n if not isempty(admin):\n admin = self.normalize_statenames(country, admin)\n\n stmt = u\"\"\"SELECT a.id as geonameid, a.name,\n a.population,a.latitude, a.longitude, c.country as 'country',\n b.name as 'admin1', a.featureClass,\n a.featureCOde, a.countryCode as 'countryCode'\n FROM allcities a\n INNER JOIN alladmins b ON a.countryCode||\".\"||a.admin1 = b.key\n INNER JOIN allcountries c ON a.countryCode=c.ISO\n WHERE \"\"\"\n\n if isempty(city) and isempty(admin):\n stmt += u\"\"\"c.country=? ORDER BY a.population DESC LIMIT 1\"\"\"\n params = (country,)\n\n elif isempty(city):\n stmt += u\"\"\" a.featureCOde == \"ADM1\" and (b.name=? or b.asciiname=?)\n and c.country=? ORDER BY a.population DESC LIMIT 1\"\"\"\n params = (admin, admin, country)\n else:\n stmt += u\"\"\" (a.name=? or a.asciiname=?) and\n (b.name=? or b.asciiname=?) and\n c.country=?\n ORDER BY a.population DESC LIMIT 1\"\"\"\n\n params = (city, city, admin, admin, country)\n\n res = self.db.query(stmt, params)\n if res == []:\n res = self._get_locInfo_from_alternate(country=country, admin=admin, city=city)\n\n return res\n\n def _get_locInfo_from_alternate(self, country=None, admin=None, city=None):\n stmt = u\"\"\"SELECT DISTINCT a.id as geonameid, a.name,\n a.population,a.latitude, a.longitude, c.country as 'country',\n b.name as 'admin1', a.featureClass,\n a.featureCOde, a.countryCode as 'countryCode'\n FROM alternatenames as d\n INNER JOIN allcities a ON a.id=d.geonameId\n LEFT OUTER JOIN alladmins b ON a.countryCode||\".\"||a.admin1 = b.key\n INNER JOIN allcountries c ON a.countryCode=c.ISO\n WHERE \"\"\"\n\n if isempty(city) and isempty(admin):\n stmt += u\"\"\"c.country=? ORDER BY a.population DESC\"\"\"\n params = (country, )\n elif isempty(city):\n stmt += u\"\"\"alternatename=?\n and a.featureCOde == \"ADM1\"\n and c.country=? ORDER BY a.population DESC\"\"\".format(admin, country)\n params = (admin, country)\n else:\n stmt += u\"\"\"alternatename=?\n and c.country=? ORDER BY a.population DESC\"\"\"\n params = (city, country)\n\n return self.db.query(stmt, params)\n\n def get_locById(self, locId):\n stmt = u\"\"\"SELECT a.id as geonameid, a.name,\n a.population,a.latitude, a.longitude, c.country as 'country',\n b.name as 'admin1',\n a.featureCOde, a.featureClass, a.countryCode as 'countryCode'\n FROM allcities a\n INNER JOIN alladmins b ON a.countryCode||'.'||a.admin1 = b.key\n INNER JOIN allcountries c ON a.countryCode=c.ISO\n WHERE\n a.id=?\n \"\"\"\n\n return self.db.query(stmt, (locId,))\n\n def get_country(self, cc2):\n res = self.db.query(\"\"\"SELECT *, 'country' as 'ltype' FROM\n allcountries where ISO=?\"\"\", (cc2,))\n for l in res:\n l.confidence = 0.50\n\n return res\n\n\nclass MOD_GeoNames(BaseGazetteer):\n \"\"\"\n Geonames in single table\n \"\"\"\n def __init__(self, dbname, collectionName):\n self.db = MongoDBWrapper(dbname, collectionName)\n\n def query(self, name, min_popln=0):\n pass\n"
]
| [
[
"pandas.DataFrame"
]
]
|
kinect59/ad_examples | [
"bf0bb75faa3f713a2efef04b6b093e6a313825af"
]
| [
"python/dnn/gan.py"
]
| [
"import numpy as np\nimport tensorflow as tf\nimport numpy.random as rnd\nfrom sklearn import mixture\nfrom common.gen_samples import *\nfrom common.nn_utils import get_train_batches\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\"\"\"\nA simple [conditional|Info] GAN with fully connected layers for both generator and discriminator.\nAlso supports AnoGAN.\n\nSee dnn/test_gan.py for usage.\n\nReferences:\n[1] Generative Adversarial Nets by Ian J. Goodfellow, Jean Pouget-Abadi, et al., NIPS 2014\n[2] Conditional Generative Adversarial Nets by Mehdi Mirza and Simon Osindero, 2014\n[3] Unsupervised Anomaly Detection with Generative Adversarial Networks to Guide Marker Discovery\n by Thomas Schlegl, Philipp Seebock, Sebastian M. Waldstein, Ursula Schmidt-Erfurth, Georg Langs, IPMI 2017\n[4] InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets\n by Xi Chen, Yan Duan, Rein Houthooft, John Schulman, Ilya Sutskever, Pieter Abbeel\n\"\"\"\n\nTINY = 1e-8 # as in virtually every InfoGAN implementation on the internet\n\n\ndef set_random_seeds(py_seed=42, np_seed=42, tf_seed=42):\n random.seed(py_seed)\n rnd.seed(np_seed)\n tf.set_random_seed(tf_seed)\n\n\nclass Listener(object):\n def __init__(self):\n pass\n\n def __call__(self, gan, epoch, epoch_start_tm):\n pass\n\n\ndef fit_gmm(x, val_x, min_k=1, max_k=10):\n cv_type = 'diag' # ['spherical', 'tied', 'diag', 'full']\n lowest_bic = np.infty\n bic = []\n best_gmm = None\n for k in range(min_k, max_k+1):\n gmm = mixture.GaussianMixture(n_components=k, covariance_type=cv_type)\n gmm.fit(x)\n bic.append(gmm.bic(val_x))\n if bic[-1] < lowest_bic:\n lowest_bic = bic[-1]\n best_gmm = gmm\n return best_gmm, lowest_bic, bic\n\n\ndef get_cluster_labels(x, min_k=1, max_k=10):\n \"\"\" Fits data to a Gaussian Mixture Model and assigns clusters \"\"\"\n gmm, _, _ = fit_gmm(x, x, min_k=min_k, max_k=max_k)\n logger.debug(\"best GMM k: %d\" % (gmm.n_components))\n y = gmm.predict(x)\n # logger.debug(\"y:\\n%s\" % (str(y)))\n return y, gmm\n\n\ndef get_nn_layer(layers, layer_from_top=1):\n return layers[len(layers) - layer_from_top]\n\n\nclass GAN(object):\n \"\"\" A GAN or a conditional GAN for simple i.i.d data \"\"\"\n def __init__(self, data_dim=1, discr_layer_nodes=None, discr_layer_activations=None,\n gen_input_dim=None, gen_layer_nodes=None, gen_layer_activations=None,\n label_smoothing=False, smoothing_prob=0.9, info_gan=False, info_gan_lambda=1.0,\n conditional=False, n_classes=0, pvals=None, enable_ano_gan=False,\n n_epochs=10, batch_size=25, shuffle=False, learning_rate=0.005,\n l2_lambda=0.001, listener=None, use_adam=False):\n \"\"\" Create the generator-discriminator networks\n\n :param data_dim: int\n number of input dimensions in original data\n :param discr_layer_nodes: list of int\n number of nodes in each discriminator layer (excluding input)\n :param discr_layer_activations: list\n list of activation functions for each discriminator layer (excluding input)\n :param gen_input_dim: int\n number of input dimensions in input generator samples\n :param gen_layer_nodes: list of int\n number of nodes in each generator layer (excluding input)\n :param gen_layer_activations: list\n list of activation functions for each generator layer (excluding input)\n :param label_smoothing: bool\n if True, then use one-sided label smoothing for discriminator loss\n :param smoothing_prob: float\n label-smoothing probability\n :param info_gan: bool\n if True, then use InfoGAN, else simple or conditional GAN\n :param info_gan_lambda: float\n InfoGAN regularization penalty\n :param conditional: bool\n if True, then use Conditional GAN, else simple or InfoGAN\n :param n_classes:\n number of class labels in conditional mode\n :param pvals: np.array(dtype=np.float32)\n probability of each class\n :param enable_ano_gan: bool\n whether to enable AnoGAN network (for anomaly detection)\n :param n_epochs: int\n max number of epochs for training\n :param batch_size: int\n mini-batch size for training\n :param shuffle: bool\n whether to shuffle the data in each epoch during training\n :param learning_rate: float\n :param l2_lambda: float\n :param listener: Listener\n call-back function that gets called at the end of each training epoch\n :param use_adam: bool\n whether to use ADAM. The default is GradientDescent\n \"\"\"\n self.label_smoothing = label_smoothing\n self.smoothing_prob = smoothing_prob\n self.info_gan = info_gan\n self.info_gan_lambda = info_gan_lambda\n self.conditional = conditional\n self.n_classes = n_classes\n self.pvals = pvals\n self.enable_ano_gan = enable_ano_gan\n\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.data_dim = data_dim\n self.learning_rate = learning_rate\n self.l2_lambda = l2_lambda\n self.listener = listener\n self.use_adam = use_adam\n\n # first create the generator network\n self.gen_input_dim = gen_input_dim\n self.gen_layer_nodes = gen_layer_nodes\n self.gen_layer_activations = gen_layer_activations\n self.z = self.gen = None\n\n # now, create the discriminator network\n self.discr_layer_nodes = discr_layer_nodes\n self.discr_layer_activations = discr_layer_activations\n\n self.x = self.y = None\n self.discr_data = self.discr_gen = None\n self.discr_loss = self.gen_loss = self.discr_training_op = self.gen_training_op = None\n\n # InfoGAN variables and losses\n self.q_network = self.q_pred = None\n self.info_gan_loss = None\n\n # AnoGAN variables and losses\n self.ano_gan_lambda = None\n self.ano_z = self.ano_gan_net_G = self.ano_gan_net_D = None\n self.ano_gan_training_op = self.ano_gan_loss = None\n self.ano_gan_loss_R = self.ano_gan_loss_D = self.ano_gan_info_loss = None\n self.ano_gan_q_network = None\n\n # Tensoflow session object\n self.session = None\n\n self.unif_lo = 0.0 # -1.0\n self.unif_hi = 1.0\n\n if self.conditional and self.info_gan:\n raise ValueError(\"Only one of conditional or info_gan should be true\")\n\n if (self.conditional or self.info_gan) and self.pvals is None:\n raise ValueError(\"pvals is required for ConditionalGAN and InfoGAN\")\n\n self.init_network()\n\n def init_network(self):\n\n self.x = tf.placeholder(tf.float32, shape=(None, self.data_dim), name=\"x\")\n self.z = tf.placeholder(tf.float32, shape=(None, self.gen_input_dim), name=\"z\")\n\n if self.conditional:\n if self.n_classes <= 0:\n raise ValueError(\"n_classes must be greater than 1 for conditional GAN\")\n self.y = tf.placeholder(tf.float32, shape=(None, self.n_classes), name=\"y\")\n\n with tf.variable_scope(\"GAN\"):\n # here will create the generator and discriminator networks with initial reuse=False\n self.gen = self.generator(z=self.z, y=self.y, reuse_gen=False)\n self.discr_data, self.discr_gen = self.discriminator(x=self.x, y=self.y, reuse_discr=False)\n\n if not self.label_smoothing:\n discr_loss_data = -tf.log(tf.nn.sigmoid(get_nn_layer(self.discr_data, layer_from_top=1)))\n else:\n logger.debug(\"Label smoothing enabled with smoothing probability: %f\" % self.smoothing_prob)\n discr_logit = get_nn_layer(self.discr_data, layer_from_top=1)\n discr_loss_data = tf.nn.sigmoid_cross_entropy_with_logits(logits=discr_logit,\n labels=tf.ones(shape=tf.shape(discr_logit)) * self.smoothing_prob)\n\n discr_gen_logit = get_nn_layer(self.discr_gen, layer_from_top=1)\n discr_gen_probs = tf.nn.sigmoid(discr_gen_logit)\n self.discr_loss = tf.reduce_mean(discr_loss_data - tf.log(1 - discr_gen_probs))\n self.gen_loss = tf.reduce_mean(-tf.log(discr_gen_probs))\n\n self.info_gan_loss = tf.constant(0.0)\n if self.info_gan:\n logger.debug(\"Adding InfoGAN regularization\")\n with tf.variable_scope(\"InfoGAN\"):\n # The last-but-one layer of the discriminator (when the input is from\n # fake generated data) will be the input to category prediction layer.\n # The expectation is w.r.t generator output.\n self.q_network = self.init_info_gan_network(get_nn_layer(self.discr_gen, layer_from_top=2),\n reuse=False)\n\n # the below will be used to predict category for debug; it is not required for training\n self.q_pred = self.init_info_gan_network(get_nn_layer(self.discr_data, layer_from_top=2),\n reuse=True)\n\n # get softmax output layer of q_network that predicts class\n q_out = get_nn_layer(self.q_network, layer_from_top=1)\n # compute entropy of class predictions\n self.info_gan_loss = self.marginal_mutual_info(q_out, self.pvals)\n\n vars = tf.trainable_variables()\n for v in vars: logger.debug(v.name)\n g_params = [v for v in vars if v.name.startswith('GAN/G/')]\n d_params = [v for v in vars if v.name.startswith('GAN/D/')]\n q_params = [v for v in vars if v.name.startswith('InfoGAN/')]\n if self.info_gan and len(q_params) == 0:\n # Just to be sure we do not have programmatic errors\n raise ValueError(\"No q_params found for InfoGAN\")\n\n if self.l2_lambda > 0:\n # add L2 regularization loss\n logger.debug(\"Adding L2 regularization\")\n l2_loss_g, l2_loss_d, l2_loss_q = self.get_l2_regularizers(g_params, d_params, q_params)\n self.gen_loss += self.l2_lambda * l2_loss_g\n self.discr_loss += self.l2_lambda * l2_loss_d\n\n if self.info_gan:\n self.info_gan_loss += self.l2_lambda * l2_loss_q\n g_params.extend(q_params)\n d_params.extend(q_params)\n\n self.gen_training_op = self.training_op(self.gen_loss + self.info_gan_lambda * self.info_gan_loss,\n var_list=g_params, use_adam=self.use_adam)\n self.discr_training_op = self.training_op(self.discr_loss + self.info_gan_lambda * self.info_gan_loss,\n var_list=d_params, use_adam=self.use_adam)\n\n if self.enable_ano_gan:\n # Prepare variables required for AnoGAN functionality\n #\n # Note: AnoGAN functionality will come in use only *after* the\n # GAN (simple|conditional|InfoGAN) has been fully trained.\n self.ano_gan_lambda = tf.placeholder(tf.float32, shape=(), name=\"ano_gan_lambda\")\n self.ano_z = tf.Variable(initial_value=tf.zeros([1, self.gen_input_dim]), trainable=True, name=\"ano_z\")\n with tf.variable_scope(\"GAN\", reuse=True):\n self.ano_gan_net_G, self.ano_gan_net_D = self.init_ano_gan_network(x=self.x, y=self.y, z=self.ano_z)\n\n ano_gan_G, ano_gan_D, ano_gan_D_features = self.ano_gan_outputs()\n\n # reconstruction loss: generate synthetic data in original\n # feature space that is close to input data\n self.ano_gan_loss_R = tf.reduce_sum(tf.abs(tf.subtract(self.x, ano_gan_G)))\n # ano_gan_loss_R = tf.nn.l2_loss(tf.subtract(self.x, ano_gan_G))\n\n # discrimination loss: encourage generated data to be\n # similar to real data\n self.ano_gan_loss_D = tf.reduce_sum(-tf.log(tf.nn.sigmoid(ano_gan_D)))\n\n self.ano_gan_info_loss = tf.constant(0.0)\n if self.info_gan:\n # apply appropriate variable scope for reuse\n with tf.variable_scope(\"InfoGAN\"):\n # The last-but-one layer of the discriminator will be the input to\n # category prediction layer. The expectation is w.r.t generator output.\n self.ano_gan_q_network = self.init_info_gan_network(ano_gan_D_features, reuse=True)\n\n # Compute the InfoGAN entropy regularization loss for\n # AnoGAN with the output of ano_gan_q_network\n self.ano_gan_info_loss = self.marginal_mutual_info(get_nn_layer(self.ano_gan_q_network,\n layer_from_top=1),\n self.pvals)\n\n self.ano_gan_loss = (1 - self.ano_gan_lambda) * self.ano_gan_loss_R + \\\n self.ano_gan_lambda * (self.ano_gan_loss_D + self.ano_gan_info_loss)\n\n self.ano_gan_training_op = self.training_op(self.ano_gan_loss, var_list=[self.ano_z], use_adam=self.use_adam)\n\n def marginal_mutual_info(self, q_c_x, c, include_h_c=False):\n \"\"\" Compute avg. entropy of probability distributions arcoss all rows of q_c_x\n\n Each row of q_c_x contains one probability distribution (likely computed with softmax)\n \"\"\"\n mi = -tf.reduce_mean(tf.reduce_sum(tf.multiply(c, tf.log(q_c_x + TINY)), axis=1))\n if include_h_c:\n # usually this is constant; hence add this only if asked for\n mi += -tf.reduce_mean(tf.reduce_sum(c * tf.log(c + TINY), axis=1))\n return mi\n\n def get_l2_regularizers(self, g_params, d_params, q_params=None):\n \"\"\" Returns L2 regularizers for generator and discriminator variables\n\n :param g_params: list of tf.Variable\n The generator parameters\n :param d_params: list of tf.Variable\n The discriminator parameters\n :param q_params: list of tf.Variable\n The InfoGAN regularization parameters\n :return: generator, discriminator, InfoGAN L2 regularizer losses\n \"\"\"\n l2_loss_g = 0.0\n l2_loss_d = 0.0\n l2_loss_q = 0.0\n for v in g_params:\n l2_loss_g += tf.nn.l2_loss(v)\n for v in d_params:\n l2_loss_d += tf.nn.l2_loss(v)\n if q_params is not None:\n for v in q_params:\n l2_loss_q += tf.nn.l2_loss(v)\n return l2_loss_g, l2_loss_d, l2_loss_q\n\n def generator(self, z, y=None, reuse_gen=False):\n inp = z\n if y is not None:\n inp = tf.concat(values=[z, y], axis=1)\n with tf.variable_scope('G'):\n gen_layer_names = [\"g_%d\" % (i+1) for i in range(len(self.gen_layer_nodes))]\n gen = self.gan_construct(inp, self.gen_layer_nodes, names=gen_layer_names,\n activations=self.gen_layer_activations, reuse=reuse_gen)\n return gen\n\n def discriminator(self, x, y=None, reuse_discr=False, prep_gen_input=True):\n \"\"\" Prepares the discriminator network\n\n Note: Assumes that the generator network has already been created so that it\n can be reused. The discriminator network is reused if reuse_discr=True.\n\n :param x: np.ndarray\n :param y: np.ndarray\n TensorFlow Variable that expects one-hot encoded labels\n :param reuse_discr: bool\n Whether to reuse previously declared discriminator variables in the scope\n :param prep_gen_input: bool\n Whether to return the network that takes generator output as input to discriminator\n :return: tf.Variable, tf.Variable\n \"\"\"\n with tf.variable_scope('D'):\n discr_layer_names = [\"d_%d\" % (i+1) for i in range(len(self.discr_layer_nodes))]\n\n inp = x if y is None else tf.concat(values=[x, y], axis=1)\n discr_data = self.gan_construct(inp, self.discr_layer_nodes, names=discr_layer_names,\n activations=self.discr_layer_activations, reuse=reuse_discr)\n\n discr_gen = None\n if prep_gen_input:\n # the discriminator's loss for the generated data needs to back-propagate through\n # the same network as that for the real data; hence reuse_discr=True\n gen_out = get_nn_layer(self.gen, layer_from_top=1)\n inp = gen_out if y is None else tf.concat(values=[gen_out, y], axis=1)\n discr_gen = self.gan_construct(inp, self.discr_layer_nodes, names=discr_layer_names,\n activations=self.discr_layer_activations, reuse=True)\n return discr_data, discr_gen\n\n def init_info_gan_network(self, x, reuse=False):\n return self.gan_construct(x, n_neurons=[self.n_classes], names=[\"q_out\"],\n activations=[tf.nn.softmax], reuse=reuse)\n\n def init_session(self):\n self.session = tf.Session()\n init = tf.global_variables_initializer()\n self.session.run(init)\n\n def training_op(self, loss, var_list=None, use_adam=False):\n if use_adam:\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n else:\n global_step = tf.Variable(0, trainable=False)\n learning_rate = tf.train.exponential_decay(self.learning_rate, global_step,\n 200, 0.96, staircase=True)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n\n return optimizer.minimize(loss, var_list=var_list)\n\n def init_ano_gan_network(self, x=None, y=None, z=None):\n # here we assume that all networks have already been created\n # and hence we will set reuse=True.\n # Might be redundant if reuse=True before entering this method.\n ano_gan_net_G = self.generator(z=z, y=y, reuse_gen=True)\n ano_gan_G = ano_gan_net_G[len(ano_gan_net_G) - 1]\n ano_gan_net_D, _ = self.discriminator(x=ano_gan_G, y=y, reuse_discr=True, prep_gen_input=False)\n return ano_gan_net_G, ano_gan_net_D\n\n def ano_gan_outputs(self):\n \"\"\" Returns layers of generator and discrminator which will be used by AnoGAN\n Returns the last layers of discriminator and generator,\n and last-but-one of discriminator. The last-but-one layer of\n discriminator is used for the entropy regularization if the GAN is InfoGAN variety.\n \"\"\"\n return self.ano_gan_net_G[len(self.ano_gan_net_G) - 1], \\\n self.ano_gan_net_D[len(self.ano_gan_net_D) - 1], \\\n self.ano_gan_net_D[len(self.ano_gan_net_D) - 2] if self.info_gan else None\n\n def get_gen_input_samples(self, n=1, gen_y=False):\n if gen_y and self.pvals is None:\n raise ValueError(\"pvals is required\")\n y = None\n if gen_y:\n y = np.random.multinomial(1, pvals=self.pvals, size=n).astype(float)\n return np.random.uniform(low=self.unif_lo, high=self.unif_hi, size=(n, self.gen_input_dim)), y\n\n def get_gen_output_samples(self, z, y=None):\n feed_dict = {self.z: z}\n if self.conditional: feed_dict.update({self.y: y})\n x = self.session.run([get_nn_layer(self.gen, layer_from_top=1)], feed_dict=feed_dict)[0]\n return x\n\n def gan_layer(self, x, n_neurons, name, activation=None, reuse=False):\n with tf.variable_scope(name, reuse=reuse):\n n_inputs = int(x.get_shape()[1])\n stddev = 2. / np.sqrt(n_inputs)\n init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)\n W = tf.get_variable(\"W\", initializer=init)\n b = tf.get_variable(\"b\", initializer=tf.zeros([n_neurons]))\n Z = tf.matmul(x, W) + b\n if activation is not None:\n return activation(Z)\n else:\n return Z\n\n def gan_construct(self, x, n_neurons, names, activations, reuse=False):\n layer_input = x\n layers = list()\n for i, name in enumerate(names):\n hidden = self.gan_layer(layer_input, n_neurons=n_neurons[i], name=names[i],\n activation=activations[i], reuse=reuse)\n layers.append(hidden)\n layer_input = hidden\n return layers\n\n def fit(self, x, y=None):\n if self.session is None:\n self.init_session()\n\n fit_tm = Timer()\n for epoch in range(self.n_epochs):\n tm = Timer()\n i = 0\n for x_batch, y_batch in get_train_batches(x, y=y, batch_size=self.batch_size, shuffle=self.shuffle):\n # for the discriminator, use the true y labels\n z, _ = self.get_gen_input_samples(n=x_batch.shape[0], gen_y=False)\n feed_dict_discr = {self.x: x_batch, self.z: z}\n if self.conditional: feed_dict_discr.update({self.y: y_batch})\n self.session.run([self.discr_training_op], feed_dict=feed_dict_discr)\n if i % 1 == 0: # train gen_loss only half as frequently as discr_loss\n # z, y_ = self.get_gen_input_samples(n=x_batch.shape[0], gen_y=False)\n feed_dict_gen = {self.z: z}\n if self.conditional: feed_dict_gen.update({self.y: y_batch})\n self.session.run([self.gen_training_op], feed_dict=feed_dict_gen)\n i += 1\n\n if self.listener is not None:\n self.listener(self, epoch=epoch, epoch_start_tm=tm)\n logger.debug(fit_tm.message(\"GAN fitted (max epochs: %d)\" % self.n_epochs))\n\n def get_discriminator_probability(self, x, y=None):\n \"\"\" Returns the probability of the input under the current discriminator model\n\n :param x: np.ndarray\n :param y: np.array\n This is like a list of integers. Should contain the class labels (*not* one-hot-encoded).\n :return: np.array\n Probability of each input data\n \"\"\"\n discr_data_out = get_nn_layer(self.discr_data, layer_from_top=1)\n if not self.conditional:\n feed_dict_discr = {self.x: x}\n probs = self.session.run([discr_data_out], feed_dict=feed_dict_discr)[0]\n probs = probs.reshape(-1)\n else:\n feed_dict_discr = {self.x: x}\n if y is not None:\n y_one_hot = np.zeros(shape=(x.shape[0], self.n_classes), dtype=np.float32)\n for i, c in enumerate(y):\n y_one_hot[i, c] = 1.\n feed_dict_discr.update({self.y: y_one_hot})\n probs = self.session.run([discr_data_out], feed_dict=feed_dict_discr)[0]\n probs = probs.reshape(-1)\n else:\n # marginalize over all classes\n probs = np.zeros(x.shape[0], dtype=np.float32)\n for c in range(self.n_classes):\n y_one_hot = np.zeros(shape=(x.shape[0], self.n_classes), dtype=np.float32)\n y_one_hot[:, c] = 1.\n feed_dict_discr.update({self.y: y_one_hot})\n probs_c = self.session.run([discr_data_out], feed_dict=feed_dict_discr)[0]\n probs += self.pvals[c] * probs_c.reshape(-1)\n return probs\n\n def get_log_likelihood(self, x, n_samples=None, n_reps=10, gmm_min_k=2, gmm_max_k=10):\n \"\"\" Returns the avg. and std. dev. of log-likelihood of samples in x under the trained GAN model\n\n This is a simple but rough technique, and might not be very accurate.\n\n In the original GAN paper (Goodfellow et al. 2014), the authors\n employed a parzen-windows based technique. The Gaussian Mixture Model\n is a coarse approximation to it.\n \"\"\"\n if n_samples is None:\n n_samples = x.shape[0]\n ll = []\n for i in range(n_reps):\n z, y = self.get_gen_input_samples(n=n_samples, gen_y=self.conditional)\n x_gen = self.get_gen_output_samples(z=z, y=y)\n try:\n gmm, _, _ = fit_gmm(x_gen, x_gen, min_k=gmm_min_k, max_k=gmm_max_k)\n ll.append(np.mean(gmm.score_samples(x)))\n except:\n logger.warning(\"Exception in iter %d/%d of gmm: %s\" % (i+1, n_reps, str(sys.exc_info()[0])))\n\n ll = np.array(ll, dtype=np.float32)\n return np.mean(ll), np.std(ll)\n\n def get_anomaly_score_z(self, x, y_one_hot=None, z=None, ano_gan_lambda=0.1):\n \"\"\" Get the anomaly score with an initialized z\n\n This corresponds to one back-prop step in AnoGAN for computing\n a reconstructed image, for the input test point x, starting from an initial z\n\n :param x: np.ndarray (one row-vector)\n Test instance whose image needs to be reconstructed\n :param y_one_hot: np.ndarray (one row-vector)\n :param z: np.ndarray (one row-vector)\n If this is None, a random z will be sampled, else the input z will be use\n :param ano_gan_lambda: float\n :return: gen_x, ano_z, loss, loss_R, loss_D\n gen_x: the reconstructed image for 'x' starting from latent representation 'z'\n ano_z: the optimal computed by back-propagation\n loss: AnoGAN loss\n loss_R: reconstruction loss component of the AnoGAN loss\n loss_D: descrimination loss component of the AnoGAN loss\n \"\"\"\n if not self.enable_ano_gan:\n raise RuntimeError(\"AnoGAN not enabled for this network\")\n\n if z is None:\n z, _ = self.get_gen_input_samples(n=1)\n\n # assign_z = self.ano_z.assign(z)\n # self.session.run(assign_z)\n\n # tf.Variable.load() is less expensive than adding new ops nodes to tf.Graph\n self.ano_z.load(z, self.session)\n\n ano_gan_G, ano_gan_D, _ = self.ano_gan_outputs()\n feed_dict = {self.x: x, self.ano_gan_lambda: ano_gan_lambda}\n if self.conditional:\n feed_dict.update({self.y: y_one_hot})\n self.session.run([self.ano_gan_training_op], feed_dict=feed_dict)\n rets = self.session.run([ano_gan_G, self.ano_gan_loss, self.ano_z,\n self.ano_gan_loss_R, self.ano_gan_loss_D, self.ano_gan_info_loss], feed_dict=feed_dict)\n gen_x = rets[0]\n loss = rets[1]\n ano_z = rets[2]\n loss_R = rets[3]\n loss_D = rets[4] + rets[5]\n\n # make z values in [lo, hi]\n ano_z = self.clip(ano_z, lo=self.unif_lo, hi=self.unif_hi)\n\n return gen_x, ano_z, loss, loss_R, loss_D\n\n def get_anomaly_score_xy(self, x, y=None, z=None, ano_gan_lambda=0.1, tol=1e-3, max_iters=100):\n \"\"\" Computes anomaly score per instance and y (if conditional)\n\n :param x: np.ndarray\n :param y: int\n if y is None, and self.conditional==True, then pvals will be used\n :param z: np.ndarray\n :param tol: float\n :param max_iters: int\n :return: gen_x, z, loss, trace\n \"\"\"\n tm = Timer()\n y_one_hot = None\n if self.conditional:\n if y is None:\n y_one_hot = np.array(self.pvals, dtype=np.float32).reshape((1, -1))\n else:\n y_one_hot = np.zeros(shape=(1, self.n_classes), dtype=np.float32)\n y_one_hot[0, y] = 1\n gen_x, z, loss, loss_R, loss_D = self.get_anomaly_score_z(x, y_one_hot=y_one_hot, z=z, ano_gan_lambda=ano_gan_lambda)\n losses = [loss]\n losses_R = [loss_R]\n losses_D = [loss_D]\n trace = []\n i = 0\n prev_loss = np.inf\n while i < max_iters and abs(loss - prev_loss) > tol:\n prev_loss = loss\n gen_x, z, loss, loss_R, loss_D = self.get_anomaly_score_z(x, y_one_hot=y_one_hot, z=z, ano_gan_lambda=ano_gan_lambda)\n losses.append(loss)\n losses_R.append(loss_R)\n losses_D.append(loss_D)\n trace.append(gen_x)\n i += 1\n logger.debug(tm.message(\"AnoGAN loss (iters: %d, final loss: %f)\" % (i, losses[-1])))\n # logger.debug(\"losses:\\n%s\" % (str(losses)))\n return gen_x, z, loss, loss_R, loss_D, np.vstack(trace)\n\n def clip(self, z, lo, hi):\n z = np.minimum(np.maximum(z, lo), hi)\n return z\n\n def get_anomaly_score_x(self, x, ano_gan_lambda=0.1, tol=1e-3, max_iters=100, use_loss=True, mode_avg=True):\n \"\"\" Try each label and return the generated instance with best metrics (loss or distance)\n\n :param x: np.ndarray\n :param tol: float\n :param max_iters: int\n :param use_loss: bool\n if use_loss==True, then use the composite loss, else use the\n euclidean distance to find best regenerated point when the GAN is conditional\n :param mode_avg: bool\n If self.conditional==True and mode_avg==True, then soft-membership\n as defined by self.pvals will be used instead of individual\n one-hot-encoding membership.\n :return:\n \"\"\"\n if mode_avg or not self.conditional:\n return self.get_anomaly_score_xy(x, y=None, z=None, ano_gan_lambda=ano_gan_lambda,\n tol=tol, max_iters=max_iters)\n\n gen_x = z = loss = loss_R = loss_D = trace = None\n best_dist = np.inf\n best_loss = np.inf\n for y in range(self.n_classes):\n gen_x_y, z_y, loss_y, loss_R_y, loss_D_y, trace_y = self.get_anomaly_score_xy(x, y=y, z=None,\n ano_gan_lambda=ano_gan_lambda,\n tol=tol, max_iters=max_iters)\n if use_loss:\n if loss_y < best_loss:\n best_loss = loss_y\n gen_x, z, loss, loss_R, loss_D, trace = (gen_x_y, z_y, loss_y, loss_R_y, loss_D_y, trace_y)\n else:\n dist = np.sum(np.square(np.subtract(x, gen_x_y)))\n if dist < best_dist:\n best_dist = dist\n gen_x, z, loss, loss_R, loss_D, trace = (gen_x_y, z_y, loss_y, loss_R_y, loss_D_y, trace_y)\n\n return gen_x, z, loss, loss_R, loss_D, trace\n\n def get_anomaly_score(self, x, ano_gan_lambda=0.1, tol=1e-3, max_iters=100, use_loss=True, mode_avg=True):\n \"\"\" Returns the anomaly score of test instance x\n\n :param x: np.ndarray (one row-vector)\n :param ano_gan_lambda: float\n :param tol: float\n loss tolerance to check for termination of back-propagation\n steps when computing reconstruction image\n :param max_iters: int\n :param use_loss: bool\n (applies only to conditional GAN and when mode_avg is False, default: True)\n If true, then employs the AnoGAN loss when selecting the best category for test instance\n :param mode_avg: bool\n (applies only to conditional GAN, default: True)\n :return:\n \"\"\"\n losses = np.zeros(x.shape[0], dtype=np.float32)\n losses_R = np.zeros(x.shape[0], dtype=np.float32)\n losses_D = np.zeros(x.shape[0], dtype=np.float32)\n traces = []\n new_x = np.zeros(shape=x.shape, dtype=x.dtype)\n for i in range(x.shape[0]):\n gen_x, z, loss, loss_R, loss_D, trace = self.get_anomaly_score_x(x[[i]], ano_gan_lambda=ano_gan_lambda,\n tol=tol, max_iters=max_iters,\n use_loss=use_loss, mode_avg=mode_avg)\n new_x[i, :] = gen_x[0, :]\n losses[i] = loss\n losses_R[i] = loss_R\n losses_D[i] = loss_D\n traces.append(trace)\n return new_x, losses, losses_R, losses_D, traces\n\n def save_session(self, file_path, overwrite=False):\n if tf.train.checkpoint_exists(file_path):\n if overwrite:\n logger.debug(\"Overwriting existing checkpoint for prefix %s\" % file_path)\n else:\n logger.debug(\"Checkpoint already exists for prefix %s\" % file_path)\n return None\n saver = tf.train.Saver()\n save_path = saver.save(self.session, file_path)\n logger.debug(\"Saved session to path %s\" % save_path)\n return save_path\n\n def load_session(self, file_path):\n if not tf.train.checkpoint_exists(file_path):\n logger.debug(\"Checkpoint does not exist for prefix %s\" % file_path)\n return False\n if self.session is None:\n self.session = tf.Session()\n saver = tf.train.Saver()\n saver.restore(self.session, file_path)\n logger.debug(\"Loaded saved session from path %s\" % file_path)\n return True\n\n def close_session(self):\n if self.session is not None:\n self.session.close()\n self.session = None\n\n\ndef get_gan_option_list():\n parser = ArgumentParser()\n parser.add_argument(\"--dataset\", type=str, default=\"airline\", required=False,\n help=\"Dataset name\")\n parser.add_argument(\"--results_dir\", action=\"store\", default=\"./temp\",\n help=\"Folder where the generated metrics will be stored\")\n parser.add_argument(\"--randseed\", action=\"store\", type=int, default=42,\n help=\"Random seed so that results can be replicated\")\n parser.add_argument(\"--label_smoothing\", action=\"store_true\", default=False,\n help=\"Whether to use one-sided label smoothing\")\n parser.add_argument(\"--smoothing_prob\", action=\"store\", type=float, default=0.9,\n help=\"Probability to use for one-sided label smoothing\")\n parser.add_argument(\"--ano_gan_lambda\", action=\"store\", type=float, default=0.1,\n help=\"The AnoGAN penalty term that balances reconstruction loss and discriminative loss\")\n parser.add_argument(\"--info_gan\", action=\"store_true\", default=False,\n help=\"Whether to use simple GAN or InfoGAN\")\n parser.add_argument(\"--info_gan_lambda\", action=\"store\", type=float, default=1.0,\n help=\"The InfoGAN penalty term\")\n parser.add_argument(\"--conditional\", action=\"store_true\", default=False,\n help=\"Whether to use simple GAN or Conditional GAN\")\n parser.add_argument(\"--ano_gan\", action=\"store_true\", default=False,\n help=\"Whether to enable AnoGAN functionality\")\n parser.add_argument(\"--ano_gan_individual\", action=\"store_true\", default=False,\n help=\"Whether to use each class individually for Conditional AnoGAN. \"\n \"By default the pval metric will be used instead of one-hot-encoding during test evaluation\")\n parser.add_argument(\"--ano_gan_use_dist\", action=\"store_true\", default=False,\n help=\"Whether to use euclidean dist-based reconstruction error for Conditional AnoGAN. \"\n \"By default, the composite loss will be used\")\n parser.add_argument(\"--n_ano_gan_test\", type=int, default=1, required=False,\n help=\"Number of times AnoGAN loss will be computed for each test instance\")\n parser.add_argument(\"--budget\", type=int, default=1, required=False,\n help=\"Budget for feedback\")\n parser.add_argument(\"--n_epochs\", type=int, default=200, required=False,\n help=\"Max training epochs\")\n parser.add_argument(\"--train_batch_size\", type=int, default=25, required=False,\n help=\"Batch size for stochastic gradient descent based training methods\")\n parser.add_argument(\"--log_file\", type=str, default=\"\", required=False,\n help=\"File path to debug logs\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False,\n help=\"Whether to enable output of debug statements\")\n parser.add_argument(\"--plot\", action=\"store_true\", default=False,\n help=\"Whether to plot figures\")\n return parser\n\n\nclass GanOpts(object):\n def __init__(self, args):\n self.dataset = args.dataset\n self.results_dir = args.results_dir\n self.randseed = args.randseed\n self.label_smoothing = args.label_smoothing\n self.smoothing_prob = args.smoothing_prob\n self.ano_gan_lambda = args.ano_gan_lambda\n self.ano_gan_individual = args.ano_gan_individual\n self.ano_gan_use_dist = args.ano_gan_use_dist\n self.info_gan = args.info_gan\n self.info_gan_lambda = args.info_gan_lambda\n self.conditional = args.conditional\n self.ano_gan = args.ano_gan\n self.ano_gan_individual = args.ano_gan_individual\n self.ano_gan_use_dist = args.ano_gan_use_dist\n self.n_ano_gan_test = args.n_ano_gan_test\n self.budget = args.budget\n self.n_epochs = args.n_epochs\n self.train_batch_size = args.train_batch_size\n self.log_file = args.log_file\n self.debug = args.debug\n self.plot = args.plot\n self.k = 0\n\n def get_opts_name_prefix(self):\n # ano_gan_sig = \"_ano\" if self.ano_gan else \"\"\n info_gan_sig = \"_info\" if self.info_gan else \"\"\n info_gan_lambda_sig = \"\" if self.info_gan_lambda == 1.0 else \"_il%d\" % int(self.info_gan_lambda*10)\n cond_sig = \"_cond\" if self.conditional else \"\"\n algo_sig = \"%s%s_gan\" % (cond_sig, info_gan_sig)\n k_sig = \"_k%d\" % self.k if self.k > 0 else \"\"\n smoothing_sig = \"_ls%d\" % (int(self.smoothing_prob*10)) if self.label_smoothing else \"\"\n name = \"%s%s%s%s%s_%d\" % (self.dataset, algo_sig, k_sig, smoothing_sig, info_gan_lambda_sig, self.n_epochs)\n return name\n\n def get_alad_metrics_name_prefix(self):\n return self.get_opts_name_prefix()\n\n def str_opts(self):\n name = self.get_alad_metrics_name_prefix()\n s = \"%s\" % name\n return s"
]
| [
[
"tensorflow.matmul",
"numpy.mean",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.set_random_seed",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.subtract",
"tensorflow.train.Saver",
"tensorflow.Variable",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.sqrt",
"tensorflow.nn.sigmoid",
"numpy.vstack",
"numpy.random.multinomial",
"numpy.array",
"tensorflow.zeros",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"tensorflow.truncated_normal",
"numpy.std",
"tensorflow.log",
"tensorflow.placeholder",
"tensorflow.get_variable",
"numpy.subtract",
"tensorflow.train.exponential_decay",
"numpy.random.seed",
"tensorflow.train.checkpoint_exists",
"sklearn.mixture.GaussianMixture",
"numpy.random.uniform",
"numpy.maximum"
]
]
|
astronomical-data-processing/curig | [
"4d0e944b8c67e99106e56decda00c9c424002625"
]
| [
"python/curagridder/cursl.py"
]
| [
"# Contents in this file are specified for RASCIL\n\nimport ctypes\nimport os\nimport warnings\n\nimport numpy as np\nfrom ctypes import c_double\nfrom ctypes import c_int\nfrom ctypes import c_float\nfrom ctypes import c_void_p\n\nc_int_p = ctypes.POINTER(c_int)\nc_float_p = ctypes.POINTER(c_float)\nc_double_p = ctypes.POINTER(c_double)\n\n# TODO: See if there is a way to improve this so it is less hacky.\nlib = None\n# Try to load a local library directly.\nlib_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"libcurafft.so\")\ntry:\n lib = ctypes.cdll.LoadLibrary(lib_path)\nexcept Exception:\n raise RuntimeError('Failed to find curagridder library')\n\n\n\n\nms2dirty_1 = lib.ms2dirty_1\n# the last two parameters have default value\nms2dirty_1.argtypes = [c_int, c_int, c_int, c_double, c_double, np.ctypeslib.ndpointer(np.double, flags='C'),\n np.ctypeslib.ndpointer(np.complex128, flags='C'), np.ctypeslib.ndpointer(np.complex128, flags='C'), c_double, c_double, c_int] \nms2dirty_1.restype = c_int\n\nms2dirty_2 = lib.ms2dirty_2\nms2dirty_2.argtypes = [c_int, c_int, c_int, c_double, c_double, np.ctypeslib.ndpointer(np.double, flags='C'),\n np.ctypeslib.ndpointer(np.complex128, flags='C'), np.ctypeslib.ndpointer(np.double, flags='C'), np.ctypeslib.ndpointer(np.complex128, flags='C'), c_double, c_double, c_int] \nms2dirty_2.restype = c_int\n\ndirty2ms_1 = lib.dirty2ms_1\n# the last two parameters have default value\ndirty2ms_1.argtypes = [c_int, c_int, c_int, c_double, c_double, np.ctypeslib.ndpointer(np.double, flags='C'),\n np.ctypeslib.ndpointer(np.complex128, flags='C'), np.ctypeslib.ndpointer(np.complex128, flags='C'), c_double, c_double, c_int] \ndirty2ms_1.restype = c_int\n\ndirty2ms_2 = lib.dirty2ms_2\ndirty2ms_2.argtypes = [c_int, c_int, c_int, c_double, c_double, np.ctypeslib.ndpointer(np.double, flags='C'),\n np.ctypeslib.ndpointer(np.complex128, flags='C'), np.ctypeslib.ndpointer(np.double, flags='C'), np.ctypeslib.ndpointer(np.complex128, flags='C'), c_double, c_double, c_int] \ndirty2ms_2.restype = c_int\n\n#----------------------------------------\n# the interfaces below are idential to NIFTY\n#-----------------------------------------\n\ndef ms2dirty(uvw, freq, ms, wgt, nxdirty, nydirty, rad_pix_x, rad_pix_y, nx, ny, epsilon, do_wstacking, *args):\n \"\"\"\n Generate an image from visibility by non-uniform fourier transform\n Arguments:\n uvw - 3D coordinates, numpy array, shape - (nrow,3)\n freq - frequencies\n ms - visibility, shape - (nrow,)\n wgt - weight\n nxdirty, nydirty - image size\n deg_pix_ - degree per pixel\n epsilon - tolerance of relative error (expect, default 1e-6)\n do_wstacking - True, improved w stacking.\n \n Return:\n dirty image - shape-[nxdirty,nydirty]\n \"\"\"\n nrow = uvw.shape[0]\n sigma = 2\n fov = rad_pix_x * nxdirty * 180 / np.pi\n dirty = np.zeros((nxdirty,nydirty),dtype=np.complex128)\n sign = -1\n # u = np.ctypeslib.as_ctypes(uvw[:,0])\n # v = np.ctypeslib.as_ctypes(uvw[:,1])\n # w = np.ctypeslib.as_ctypes(uvw[:,2])\n if(wgt is None):\n ms2dirty_1(nrow,nxdirty,nydirty,fov,freq[0],uvw\n ,ms,dirty,epsilon,sigma,sign)\n else:\n ms2dirty_2(nrow,nxdirty,nydirty,fov,freq[0],uvw\n ,ms,wgt,dirty,epsilon,sigma,sign)\n dirty = np.reshape(dirty,[nxdirty,nydirty])\n return dirty.real\n\ndef dirty2ms(uvw, freq, dirty, wgt, rad_pix_x, rad_pix_y, nx, ny, epsilon, do_wstacking, *args):\n \"\"\"\n Generate Visibility from dirty image by non-uniform fourier transform\n Arguments:\n uvw - 3D coordinates, numpy array, shape - (nrow,3)\n freq - frequencies\n ms - visibility, shape - (nrow,)\n wgt - weight\n nxdirty, nydirty - image size\n fov - field of view\n epsilon - tolerance of relative error (expect, default 1e-6)\n sigma - upsampling factor for grid (default 1.25)\n Return:\n vis - shape-[M,]\n \"\"\"\n nrow = uvw.shape[0]\n nxdirty = dirty.shape[0]\n nydirty = dirty.shape[1]\n sigma = 2\n fov = rad_pix_x * nxdirty * 180 / np.pi\n sign = -1\n ms = np.zeros((nrow,1),dtype=np.complex128)\n dirty1 = np.zeros(dirty.shape,dtype=np.complex128)\n dirty1.real = dirty\n\n if(wgt is None):\n dirty2ms_1(nrow,nxdirty,nydirty,fov,freq[0],uvw\n ,ms,dirty1,epsilon,sigma,sign)\n else:\n dirty2ms_2(nrow,nxdirty,nydirty,fov,freq[0],uvw\n ,ms,wgt,dirty1,epsilon,sigma,sign)\n return ms"
]
| [
[
"numpy.ctypeslib.ndpointer",
"numpy.reshape",
"numpy.zeros"
]
]
|
aotitoola/political-sa-reddit | [
"a27d2017730ca8bd87e227b0fd24372b83e1da8d"
]
| [
"app/util/utils.py"
]
| [
"import os\nimport streamlit as st\nimport joblib\nimport torch\nimport json\n\nimport base64\nimport uuid\nimport re\nimport jupytext\nfrom bokeh.models.widgets import Div\nimport math\nimport importlib.util\n\n\ndef import_from_file(module_name: str, filepath: str):\n \"\"\"\n Imports a module from file.\n Args:\n module_name (str): Assigned to the module's __name__ parameter (does not\n influence how the module is named outside of this function)\n filepath (str): Path to the .py file\n Returns:\n The module\n \"\"\"\n spec = importlib.util.spec_from_file_location(module_name, filepath)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef notebook_header(text):\n \"\"\"\n Insert section header into a jinja file, formatted as notebook cell.\n\n Leave 2 blank lines before the header.\n \"\"\"\n return f\"\"\"# # {text}\n\"\"\"\n\n\ndef code_header(text):\n \"\"\"\n Insert section header into a jinja file, formatted as Python comment.\n\n Leave 2 blank lines before the header.\n \"\"\"\n seperator_len = (75 - len(text)) / 2\n seperator_len_left = math.floor(seperator_len)\n seperator_len_right = math.ceil(seperator_len)\n return f\"# {'-' * seperator_len_left} {text} {'-' * seperator_len_right}\"\n\n\ndef to_notebook(code):\n \"\"\"Converts Python code to Jupyter notebook format.\"\"\"\n notebook = jupytext.reads(code, fmt=\"py\")\n return jupytext.writes(notebook, fmt=\"ipynb\")\n\n\ndef open_link(url, new_tab=True):\n \"\"\"Dirty hack to open a new web page with a streamlit button.\"\"\"\n # From: https://discuss.streamlit.io/t/how-to-link-a-button-to-a-webpage/1661/3\n if new_tab:\n js = f\"window.open('{url}')\" # New tab or window\n else:\n js = f\"window.location.href = '{url}'\" # Current tab\n html = '<img src onerror=\"{}\">'.format(js)\n div = Div(text=html)\n st.bokeh_chart(div)\n\n\ndef download_button(\n object_to_download, download_filename, button_text # , pickle_it=False\n):\n \"\"\"\n Generates a link to download the given object_to_download.\n\n From: https://discuss.streamlit.io/t/a-download-button-with-custom-css/4220\n Params:\n ------\n object_to_download: The object to be downloaded.\n download_filename (str): filename and extension of file. e.g. mydata.csv,\n some_txt_output.txt download_link_text (str): Text to display for download\n link.\n button_text (str): Text to display on download button (e.g. 'click here to download file')\n pickle_it (bool): If True, pickle file.\n Returns:\n -------\n (str): the anchor tag to download object_to_download\n Examples:\n --------\n download_link(your_df, 'YOUR_DF.csv', 'Click to download data!')\n download_link(your_str, 'YOUR_STRING.txt', 'Click to download text!')\n \"\"\"\n # if pickle_it:\n # try:\n # object_to_download = pickle.dumps(object_to_download)\n # except pickle.PicklingError as e:\n # st.write(e)\n # return None\n\n # else:\n # if isinstance(object_to_download, bytes):\n # pass\n\n # elif isinstance(object_to_download, pd.DataFrame):\n # object_to_download = object_to_download.to_csv(index=False)\n\n # # Try JSON encode for everything else\n # else:\n # object_to_download = json.dumps(object_to_download)\n\n try:\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n except AttributeError as e:\n b64 = base64.b64encode(object_to_download).decode()\n\n button_uuid = str(uuid.uuid4()).replace(\"-\", \"\")\n button_id = re.sub(\"\\d+\", \"\", button_uuid)\n\n custom_css = f\"\"\" \n <style>\n #{button_id} {{\n display: inline-flex;\n align-items: center;\n justify-content: center;\n background-color: rgb(255, 255, 255);\n color: rgb(38, 39, 48);\n padding: .25rem .75rem;\n position: relative;\n text-decoration: none;\n border-radius: 4px;\n border-width: 1px;\n border-style: solid;\n border-color: rgb(230, 234, 241);\n border-image: initial;\n }} \n #{button_id}:hover {{\n border-color: rgb(246, 51, 102);\n color: rgb(246, 51, 102);\n }}\n #{button_id}:active {{\n box-shadow: none;\n background-color: rgb(246, 51, 102);\n color: white;\n }}\n </style> \"\"\"\n\n dl_link = (\n custom_css\n + f'<a download=\"{download_filename}\" id=\"{button_id}\" href=\"data:file/txt;base64,{b64}\">{button_text}</a><br><br>'\n )\n st.markdown(dl_link, unsafe_allow_html=True)\n\n\ndef format_task_list(task):\n if task == 'Preprocessing':\n return 'Data & Visualization'\n return task\n\n\ndef capitalize(text):\n return text.capitalize()\n\n\ndef round_down(num, divisor):\n return num - (num % divisor)\n\n\ndef load_lstm_bert_pretrained_model(algo, filename):\n outdir = f'{os.getcwd()}/models/{algo}'\n fullpath = os.path.join(outdir, filename)\n\n loaded_model = None\n try:\n loaded_model = joblib.load(fullpath)\n print('model loaded successfully.')\n except FileNotFoundError as fnf_error:\n st.error(\"Model not found in directory.\")\n print(fnf_error)\n return loaded_model\n\n\ndef save_tfidf_model(algo, model, func, metrics, datalength, solver=None, depth=None):\n\n model_dir = f'{os.getcwd()}/models/{algo}/model'\n metrics_dir = f'{os.getcwd()}/models/{algo}/metrics'\n\n for dirr in [model_dir, metrics_dir]:\n if not os.path.exists(dirr):\n os.makedirs(dirr)\n\n if func == 'logistic_regression':\n model_file = f'{func}_{solver}_{datalength}.pkl'\n metrics_file = f'{func}_{solver}_{datalength}.json'\n else:\n model_file = f'{func}_{depth}_{datalength}.pkl'\n metrics_file = f'{func}_{depth}_{datalength}.json'\n\n model_path = os.path.join(model_dir, model_file)\n joblib.dump(model, model_path)\n\n metrics_path = os.path.join(metrics_dir, metrics_file)\n # save metrics\n with open(metrics_path, 'w+') as f:\n json.dump(metrics, f, indent=4)\n\n print('model saved successfully.')\n\n\ndef save_lstm_bert_model(algo, model, best_sampling, hyper_params, vocab, acc, f1):\n\n model_dir = f'{os.getcwd()}/models/{algo}/model'\n params_dir = f'{os.getcwd()}/models/{algo}/params'\n vocab_dir = f'{os.getcwd()}/models/{algo}/vocab'\n metrics_dir = f'{os.getcwd()}/models/{algo}/metrics'\n\n for dirr in [model_dir, params_dir, vocab_dir, metrics_dir]:\n if not os.path.exists(dirr):\n os.makedirs(dirr)\n\n model_file = f'{algo}_model_s{best_sampling}_e{hyper_params[\"epochs\"]}.dict'\n model_path = os.path.join(model_dir, model_file)\n\n params_file = f'{algo}_params_s{best_sampling}_e{hyper_params[\"epochs\"]}.json'\n params_path = os.path.join(params_dir, params_file)\n\n metrics_file = f'{algo}_metrics_s{best_sampling}_e{hyper_params[\"epochs\"]}.pth'\n metrics_path = os.path.join(metrics_dir, metrics_file)\n\n try:\n # save model\n torch.save(model.state_dict(), model_path)\n\n # save hyperparams\n with open(params_path, 'w+') as f:\n json.dump(hyper_params, f, indent=4)\n\n # save metrics\n metrics_data = {\n \"acc\": acc,\n \"f1\": f1\n }\n torch.save(metrics_data, metrics_path)\n\n if algo == 'lstm':\n # save vocab\n vocab_file = f'{algo}_vocab_s{best_sampling}_e{hyper_params[\"epochs\"]}.pth'\n vocab_path = os.path.join(vocab_dir, vocab_file)\n torch.save(vocab, vocab_path)\n\n print('model saved successfully.')\n except FileNotFoundError as fnf_error:\n print(fnf_error)\n\n\ndef lstm_bert_model_exists(algo, filename):\n filepath = f'{os.getcwd()}/models/{algo}/model/{filename}'\n if os.path.exists(filepath):\n return True\n return False\n\n\ndef load_lstm_bert_model(algo, model, filename):\n output_file = f'{filename}.pkl'\n outdir = f'{os.getcwd()}/models/{algo}/model'\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n fullpath = os.path.join(outdir, output_file)\n joblib.dump(model, fullpath)\n print('model saved successfully.')\n\n\ndef load_tfidf_model(algo, filename):\n # output_file = f'{filename}.pkl'\n outdir = f'{os.getcwd()}/models/{algo}/model'\n fullpath = os.path.join(outdir, filename)\n return joblib.load(fullpath)\n\n\ndef tfidf_model_exists(algo, filename):\n filepath = f'{os.getcwd()}/models/{algo}/model/{filename}'\n if os.path.exists(filepath):\n return True\n return False\n"
]
| [
[
"torch.save"
]
]
|
dontLoveBugs/box-convolutions | [
"caa244ced4a7c17e4668bc9419525d3a8e320583"
]
| [
"examples/ENet.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ENet(nn.Module):\n def __init__(self, n_classes=19):\n super().__init__()\n\n self.net = nn.ModuleList([\n Downsampler(3, 16),\n Bottleneck(16, 64, 0.01, downsample=True),\n\n Bottleneck(64, 64, 0.01),\n Bottleneck(64, 64, 0.01),\n Bottleneck(64, 64, 0.01),\n Bottleneck(64, 64, 0.01),\n\n Bottleneck(64, 128, 0.1, downsample=True),\n\n Bottleneck(128, 128, 0.1),\n Bottleneck(128, 128, 0.1, dilation=2),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n Bottleneck(128, 128, 0.1, dilation=4),\n Bottleneck(128, 128, 0.1),\n Bottleneck(128, 128, 0.1, dilation=8),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n Bottleneck(128, 128, 0.1, dilation=16),\n\n Bottleneck(128, 128, 0.1),\n Bottleneck(128, 128, 0.1, dilation=2),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n Bottleneck(128, 128, 0.1, dilation=4),\n Bottleneck(128, 128, 0.1),\n Bottleneck(128, 128, 0.1, dilation=8),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n Bottleneck(128, 128, 0.1, dilation=16),\n\n Upsampler(128, 64),\n\n Bottleneck(64, 64, 0.1),\n Bottleneck(64, 64, 0.1),\n\n Upsampler(64, 16),\n\n Bottleneck(16, 16, 0.1),\n\n nn.ConvTranspose2d(16, n_classes+1, (2,2), (2,2))])\n\n def forward(self, x):\n max_indices_stack = []\n\n for module in self.net:\n if isinstance(module, Upsampler):\n x = module(x, max_indices_stack.pop())\n else:\n x = module(x)\n\n if type(x) is tuple: # then it was a downsampling bottleneck block\n x, max_indices = x\n max_indices_stack.append(max_indices)\n\n return x\n\nclass BoxENet(ENet):\n def __init__(self, n_classes=19, max_input_h=512, max_input_w=1024):\n nn.Module.__init__(self)\n h, w = max_input_h, max_input_w # shorten names for convenience\n\n self.net = nn.ModuleList([\n Downsampler(3, 16),\n Bottleneck(16, 64, 0.01, downsample=True),\n\n Bottleneck(64, 64, 0.01),\n BottleneckBoxConv(64, 4, h // 4, w // 4, 0.15),\n Bottleneck(64, 64, 0.01),\n BottleneckBoxConv(64, 4, h // 4, w // 4, 0.15),\n\n Bottleneck(64, 128, 0.1, downsample=True),\n\n Bottleneck(128, 128, 0.1),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n Bottleneck(128, 128, 0.1),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n\n Bottleneck(128, 128, 0.1),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n Bottleneck(128, 128, 0.1),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n Bottleneck(128, 128, 0.1, asymmetric_ksize=5),\n BottleneckBoxConv(128, 4, h // 8, w // 8, 0.25),\n\n Upsampler(128, 64),\n\n Bottleneck(64, 64, 0.1),\n BottleneckBoxConv(64, 4, h // 4, w // 4, 0.1),\n\n Upsampler(64, 16),\n\n BottleneckBoxConv(16, 2, h // 2, w // 2, 0.1),\n\n nn.ConvTranspose2d(16, n_classes+1, (2,2), (2,2))])\n\n\nclass Upsampler(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n bt_channels = in_channels // 4\n\n self.main_branch = nn.Sequential(\n nn.Conv2d(in_channels, bt_channels, (1,1), bias=False),\n nn.BatchNorm2d(bt_channels, 1e-3),\n nn.ReLU(True),\n \n nn.ConvTranspose2d(bt_channels, bt_channels, (3,3), 2, 1, 1),\n nn.BatchNorm2d(bt_channels, 1e-3),\n nn.ReLU(True),\n\n nn.Conv2d(bt_channels, out_channels, (1,1), bias=False),\n nn.BatchNorm2d(out_channels, 1e-3))\n\n self.skip_connection = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, (1,1), bias=False),\n nn.BatchNorm2d(out_channels, 1e-3))\n\n def forward(self, x, max_indices):\n x_skip_connection = self.skip_connection(x)\n x_skip_connection = F.max_unpool2d(x_skip_connection, max_indices, (2,2))\n\n return F.relu(x_skip_connection + self.main_branch(x), inplace=True)\n\nclass Downsampler(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.conv = nn.Conv2d(in_channels, out_channels-in_channels, (3,3), 2, 1, bias=False)\n self.bn = nn.BatchNorm2d(out_channels, 1e-3)\n self.prelu = nn.PReLU(out_channels)\n\n def forward(self, x):\n x = torch.cat([F.max_pool2d(x, (2,2)), self.conv(x)], 1)\n x = self.bn(x)\n x = self.prelu(x)\n return x\n\nclass Bottleneck(nn.Module):\n def __init__(\n self, in_channels, out_channels, dropout_prob=0.0, downsample=False,\n asymmetric_ksize=None, dilation=1, use_prelu=True):\n\n super().__init__()\n bt_channels = in_channels // 4\n self.downsample = downsample\n self.channels_to_pad = out_channels-in_channels\n\n input_stride = 2 if downsample else 1\n\n main_branch = [\n nn.Conv2d(in_channels, bt_channels, input_stride, input_stride, bias=False),\n nn.BatchNorm2d(bt_channels, 1e-3),\n nn.PReLU(bt_channels) if use_prelu else nn.ReLU(True)\n ]\n \n if asymmetric_ksize is None:\n main_branch += [\n nn.Conv2d(bt_channels, bt_channels, (3,3), 1, dilation, dilation)\n ]\n else:\n assert type(asymmetric_ksize) is int\n ksize, padding = asymmetric_ksize, (asymmetric_ksize-1) // 2\n main_branch += [\n nn.Conv2d(bt_channels, bt_channels, (ksize,1), 1, (padding,0), bias=False),\n nn.Conv2d(bt_channels, bt_channels, (1,ksize), 1, (0,padding))\n ]\n \n main_branch += [\n nn.BatchNorm2d(bt_channels, 1e-3),\n nn.PReLU(bt_channels) if use_prelu else nn.ReLU(True),\n nn.Conv2d(bt_channels, out_channels, (1,1), bias=False),\n nn.BatchNorm2d(out_channels, 1e-3),\n nn.Dropout2d(dropout_prob)\n ]\n\n self.main_branch = nn.Sequential(*main_branch) \n self.output_activation = nn.PReLU(out_channels) if use_prelu else nn.ReLU(True)\n\n def forward(self, x):\n if self.downsample:\n x_skip_connection, max_indices = F.max_pool2d(x, (2,2), return_indices=True)\n else:\n x_skip_connection = x\n\n if self.channels_to_pad > 0:\n x_skip_connection = F.pad(x_skip_connection, (0,0, 0,0, 0,self.channels_to_pad))\n\n x = self.output_activation(x_skip_connection + self.main_branch(x))\n \n if self.downsample:\n return x, max_indices\n else:\n return x\n\nfrom box_convolution import BoxConv2d\n\nclass BottleneckBoxConv(nn.Module):\n def __init__(self, in_channels, num_boxes, max_input_h, max_input_w, dropout_prob=0.0):\n super().__init__()\n assert in_channels % num_boxes == 0\n bt_channels = in_channels // num_boxes # bottleneck channels\n\n self.main_branch = nn.Sequential(\n nn.Conv2d(in_channels, bt_channels, (1,1), bias=False),\n nn.BatchNorm2d(bt_channels),\n nn.ReLU(True),\n \n # BEHOLD:\n BoxConv2d(bt_channels, num_boxes, max_input_h, max_input_w),\n\n nn.BatchNorm2d(in_channels),\n nn.Dropout2d(dropout_prob))\n\n def forward(self, x):\n return F.relu(x + self.main_branch(x), inplace=True)\n"
]
| [
[
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.functional.max_unpool2d",
"torch.nn.Module.__init__",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.functional.pad",
"torch.nn.functional.max_pool2d",
"torch.nn.Dropout2d"
]
]
|
lauragustafson/fvcore | [
"1f43d07bf4d2ed987928c0c8501b31dfdbbca8a5"
]
| [
"tests/test_common.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport math\nimport time\nimport typing\nimport unittest\n\nimport numpy as np\nfrom fvcore.common.config import CfgNode\nfrom fvcore.common.history_buffer import HistoryBuffer\nfrom fvcore.common.registry import Registry\nfrom fvcore.common.timer import Timer\n\n\nclass TestHistoryBuffer(unittest.TestCase):\n def setUp(self) -> None:\n super().setUp()\n np.random.seed(42)\n\n @staticmethod\n def create_buffer_with_init(\n num_values: int, buffer_len: int = 1000000\n ) -> typing.Callable[[], typing.Union[object, np.ndarray]]:\n \"\"\"\n Return a HistoryBuffer of the given length filled with random numbers.\n\n Args:\n buffer_len: length of the created history buffer.\n num_values: number of random numbers added to the history buffer.\n \"\"\"\n\n max_value = 1000\n values: np.ndarray = np.random.randint(max_value, size=num_values)\n\n def create_buffer() -> typing.Union[object, np.ndarray]:\n buf = HistoryBuffer(buffer_len)\n for v in values:\n buf.update(v)\n return buf, values\n\n return create_buffer\n\n def test_buffer(self) -> None:\n \"\"\"\n Test creation of HistoryBuffer and the methods provided in the class.\n \"\"\"\n\n num_iters = 100\n for _ in range(num_iters):\n gt_len = 1000\n buffer_len = np.random.randint(1, gt_len)\n create_buffer = TestHistoryBuffer.create_buffer_with_init(\n gt_len, buffer_len\n )\n buf, gt = create_buffer() # pyre-ignore\n\n values, iterations = zip(*buf.values())\n self.assertEqual(len(values), buffer_len)\n self.assertEqual(len(iterations), buffer_len)\n # pyre-fixme[16]: `bool` has no attribute `all`.\n self.assertTrue((values == gt[-buffer_len:]).all())\n iterations_gt = np.arange(gt_len - buffer_len, gt_len)\n self.assertTrue(\n (iterations == iterations_gt).all(),\n \", \".join(str(x) for x in iterations),\n )\n self.assertAlmostEqual(buf.global_avg(), gt.mean())\n w = 100\n effective_w = min(w, buffer_len)\n self.assertAlmostEqual(\n buf.median(w),\n np.median(gt[-effective_w:]),\n None,\n \" \".join(str(x) for x in gt[-effective_w:]),\n )\n self.assertAlmostEqual(\n buf.avg(w),\n np.mean(gt[-effective_w:]),\n None,\n \" \".join(str(x) for x in gt[-effective_w:]),\n )\n\n\nclass TestTimer(unittest.TestCase):\n def test_timer(self) -> None:\n \"\"\"\n Test basic timer functions (pause, resume, and reset).\n \"\"\"\n timer = Timer()\n time.sleep(0.5)\n self.assertTrue(0.99 > timer.seconds() >= 0.5)\n\n timer.pause()\n time.sleep(0.5)\n\n self.assertTrue(0.99 > timer.seconds() >= 0.5)\n\n timer.resume()\n time.sleep(0.5)\n self.assertTrue(1.49 > timer.seconds() >= 1.0)\n\n timer.reset()\n self.assertTrue(0.49 > timer.seconds() >= 0)\n\n def test_avg_second(self) -> None:\n \"\"\"\n Test avg_seconds that counts the average time.\n \"\"\"\n for pause_second in (0.1, 0.15):\n timer = Timer()\n for t in (pause_second,) * 10:\n if timer.is_paused():\n timer.resume()\n time.sleep(t)\n timer.pause()\n self.assertTrue(\n math.isclose(pause_second, timer.avg_seconds(), rel_tol=1e-1),\n msg=\"{}: {}\".format(pause_second, timer.avg_seconds()),\n )\n\n\nclass TestCfgNode(unittest.TestCase):\n @staticmethod\n def gen_default_cfg() -> CfgNode:\n cfg = CfgNode()\n cfg.KEY1 = \"default\"\n cfg.KEY2 = \"default\"\n cfg.EXPRESSION = [3.0]\n\n return cfg\n\n def test_merge_from_file(self) -> None:\n \"\"\"\n Test merge_from_file function provided in the class.\n \"\"\"\n import pkg_resources\n\n base_yaml = pkg_resources.resource_filename(__name__, \"configs/base.yaml\")\n config_yaml = pkg_resources.resource_filename(__name__, \"configs/config.yaml\")\n\n cfg = TestCfgNode.gen_default_cfg()\n cfg.merge_from_file(base_yaml)\n self.assertEqual(cfg.KEY1, \"base\")\n self.assertEqual(cfg.KEY2, \"base\")\n\n cfg = TestCfgNode.gen_default_cfg()\n\n with self.assertRaises(Exception):\n # config.yaml contains unsafe yaml tags,\n # test if an exception is thrown\n cfg.merge_from_file(config_yaml)\n\n cfg.merge_from_file(config_yaml, allow_unsafe=True)\n self.assertEqual(cfg.KEY1, \"base\")\n self.assertEqual(cfg.KEY2, \"config\")\n self.assertEqual(cfg.EXPRESSION, [1, 4, 9])\n\n def test_merge_from_list(self) -> None:\n \"\"\"\n Test merge_from_list function provided in the class.\n \"\"\"\n cfg = TestCfgNode.gen_default_cfg()\n cfg.merge_from_list([\"KEY1\", \"list1\", \"KEY2\", \"list2\"])\n self.assertEqual(cfg.KEY1, \"list1\")\n self.assertEqual(cfg.KEY2, \"list2\")\n\n def test_setattr(self) -> None:\n \"\"\"\n Test __setattr__ function provided in the class.\n \"\"\"\n cfg = TestCfgNode.gen_default_cfg()\n cfg.KEY1 = \"new1\"\n cfg.KEY3 = \"new3\"\n self.assertEqual(cfg.KEY1, \"new1\")\n self.assertEqual(cfg.KEY3, \"new3\")\n\n # Test computed attributes, which can be inserted regardless of whether\n # the CfgNode is frozen or not.\n cfg = TestCfgNode.gen_default_cfg()\n cfg.COMPUTED_1 = \"computed1\"\n self.assertEqual(cfg.COMPUTED_1, \"computed1\")\n cfg.freeze()\n cfg.COMPUTED_2 = \"computed2\"\n self.assertEqual(cfg.COMPUTED_2, \"computed2\")\n\n # Test computed attributes, which should be 'insert only' (could not be\n # updated).\n cfg = TestCfgNode.gen_default_cfg()\n cfg.COMPUTED_1 = \"computed1\"\n with self.assertRaises(KeyError) as err:\n cfg.COMPUTED_1 = \"update_computed1\"\n self.assertTrue(\n \"Computed attributed 'COMPUTED_1' already exists\" in str(err.exception)\n )\n\n # Resetting the same value should be safe:\n cfg.COMPUTED_1 = \"computed1\"\n\n\nclass TestRegistry(unittest.TestCase):\n def test_registry(self) -> None:\n \"\"\"\n Test registering and accessing objects in the Registry.\n \"\"\"\n OBJECT_REGISTRY = Registry(\"OBJECT\")\n\n @OBJECT_REGISTRY.register()\n class Object1:\n pass\n\n with self.assertRaises(Exception) as err:\n OBJECT_REGISTRY.register(Object1)\n self.assertTrue(\n \"An object named 'Object1' was already registered in 'OBJECT' registry!\"\n in str(err.exception)\n )\n\n self.assertEqual(OBJECT_REGISTRY.get(\"Object1\"), Object1)\n\n with self.assertRaises(KeyError) as err:\n OBJECT_REGISTRY.get(\"Object2\")\n self.assertTrue(\n \"No object named 'Object2' found in 'OBJECT' registry!\"\n in str(err.exception)\n )\n\n items = list(OBJECT_REGISTRY)\n self.assertListEqual(\n items, [(\"Object1\", Object1)], \"Registry iterable contains valid item\"\n )\n"
]
| [
[
"numpy.random.seed",
"numpy.median",
"numpy.mean",
"numpy.arange",
"numpy.random.randint"
]
]
|
newnativeabq/mendeley-search | [
"a9d7514deb57c34d72e06b1f33b0e8e9bb9fb090"
]
| [
"google-trends/processdict.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nProcessDict\nReturns dataframe random sample of size n from medical dictionary sorted by\nLexical difficulty.\n\nCreated on Tue Jul 30 21:30:48 2019\n\n@author: vince\n\"\"\"\nimport pandas as pd\n\nclass request():\n def __init__(self, n=10, dictionary='meddict_clean.csv'):\n self.n = n\n self.dictionary = dictionary\n self.terms = self.getTerms()\n \n def getTerms(self, **kw):\n return loadDict(self.dictionary).sample(self.n)\n \n\ndef loadDict(filename):\n '''\n Loads dictionary file and returns dataframe object\n '''\n df = pd.read_csv(filename)\n return df\n\n"
]
| [
[
"pandas.read_csv"
]
]
|
Zrealshadow/ark-nlp | [
"159045d17747524bd4e9af7f65f1d0283e8098e6"
]
| [
"ark_nlp/model/ner/span_bert/span_bert_named_entity_recognition.py"
]
| [
"\"\"\"\n# Copyright 2020 Xiang Wang, Inc. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n\nAuthor: Xiang Wang, [email protected]\nStatus: Active\n\"\"\"\n\nimport torch\n\nfrom ark_nlp.factory.utils import conlleval\nfrom ark_nlp.factory.metric import SpanMetrics\nfrom ark_nlp.factory.metric import BiaffineSpanMetrics\nfrom ark_nlp.factory.task.base._token_classification import TokenClassificationTask\n\n\nclass SpanNERTask(TokenClassificationTask):\n \"\"\"\n Span模式的命名实体识别Task\n \n Args:\n module: 深度学习模型\n optimizer: 训练模型使用的优化器名或者优化器对象\n loss_function: 训练模型使用的损失函数名或损失函数对象\n class_num (:obj:`int` or :obj:`None`, optional, defaults to None): 标签数目\n scheduler (:obj:`class`, optional, defaults to None): scheduler对象\n n_gpu (:obj:`int`, optional, defaults to 1): GPU数目\n device (:obj:`class`, optional, defaults to None): torch.device对象,当device为None时,会自动检测是否有GPU\n cuda_device (:obj:`int`, optional, defaults to 0): GPU编号,当device为None时,根据cuda_device设置device\n ema_decay (:obj:`int` or :obj:`None`, optional, defaults to None): EMA的加权系数\n **kwargs (optional): 其他可选参数\n \"\"\" # noqa: ignore flake8\"\n\n def _get_train_loss(\n self,\n inputs,\n outputs,\n **kwargs\n ):\n loss = self._compute_loss(inputs, outputs, **kwargs)\n\n self._compute_loss_record(**kwargs)\n\n return outputs, loss\n\n def _get_evaluate_loss(\n self,\n inputs,\n outputs,\n **kwargs\n ):\n loss = self._compute_loss(inputs, outputs, **kwargs)\n self._compute_loss_record(**kwargs)\n\n return outputs, loss\n\n def _compute_loss(\n self,\n inputs,\n logits,\n verbose=True,\n **kwargs\n ):\n start_logits = logits[0]\n end_logits = logits[1]\n\n start_logits = start_logits.view(-1, len(self.id2cat))\n end_logits = end_logits.view(-1, len(self.id2cat))\n\n active_loss = inputs['attention_mask'].view(-1) == 1\n\n active_start_logits = start_logits[active_loss]\n active_end_logits = end_logits[active_loss]\n\n active_start_labels = inputs['start_label_ids'].long().view(-1)[active_loss]\n active_end_labels = inputs['end_label_ids'].long().view(-1)[active_loss]\n\n start_loss = self.loss_function(\n active_start_logits,\n active_start_labels\n )\n end_loss = self.loss_function(\n active_end_logits,\n active_end_labels\n )\n\n loss = start_loss + end_loss\n\n return loss\n\n def _on_evaluate_epoch_begin(self, **kwargs):\n\n self.metric = SpanMetrics(self.id2cat)\n\n if self.ema_decay:\n self.ema.store(self.module.parameters())\n self.ema.copy_to(self.module.parameters())\n\n self._on_epoch_begin_record(**kwargs)\n\n def _on_evaluate_step_end(self, inputs, logits, **kwargs):\n\n with torch.no_grad():\n # compute loss\n logits, loss = self._get_evaluate_loss(inputs, logits, **kwargs)\n\n length = inputs['attention_mask'].cpu().numpy().sum() - 2\n\n S = []\n start_logits = logits[0]\n end_logits = logits[1]\n\n start_pred = torch.argmax(start_logits, -1).cpu().numpy()[0][1:length+1]\n end_pred = torch.argmax(end_logits, -1).cpu().numpy()[0][1:length+1]\n\n for i, s_l in enumerate(start_pred):\n if s_l == 0:\n continue\n for j, e_l in enumerate(end_pred[i:]):\n if s_l == e_l:\n S.append((s_l, i, i + j))\n break\n\n self.metric.update(true_subject=inputs['label_ids'][0], pred_subject=S)\n\n self.evaluate_logs['eval_example'] += len(inputs['label_ids'])\n self.evaluate_logs['eval_step'] += 1\n self.evaluate_logs['eval_loss'] += loss.item()\n\n def _on_evaluate_epoch_end(\n self,\n validation_data,\n epoch=1,\n is_evaluate_print=True,\n id2cat=None,\n **kwargs\n ):\n\n if id2cat is None:\n id2cat = self.id2cat\n\n with torch.no_grad():\n eval_info, entity_info = self.metric.result()\n\n if is_evaluate_print:\n print('eval_info: ', eval_info)\n print('entity_info: ', entity_info)\n\n def _train_collate_fn(self, batch):\n \"\"\"将InputFeatures转换为Tensor\"\"\"\n\n input_ids = torch.tensor([f['input_ids'] for f in batch], dtype=torch.long)\n attention_mask = torch.tensor([f['attention_mask'] for f in batch], dtype=torch.long)\n token_type_ids = torch.tensor([f['token_type_ids'] for f in batch], dtype=torch.long)\n start_label_ids = torch.cat([f['start_label_ids'] for f in batch])\n end_label_ids = torch.cat([f['end_label_ids'] for f in batch])\n label_ids = [f['label_ids'] for f in batch]\n\n tensors = {\n 'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'token_type_ids': token_type_ids,\n 'start_label_ids': start_label_ids,\n 'end_label_ids': end_label_ids,\n 'label_ids': label_ids\n }\n\n return tensors\n\n def _evaluate_collate_fn(self, batch):\n return self._train_collate_fn(batch)\n"
]
| [
[
"torch.cat",
"torch.tensor",
"torch.argmax",
"torch.no_grad"
]
]
|
HelmholtzAI-Consultants-Munich/test-doi | [
"b5cc685eeff27ef4fad69b96396bdc72d7d33bde"
]
| [
"src/ml_pipeline_template/models/mnist_module.py"
]
| [
"from typing import Any, List\n\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torchmetrics import MaxMetric\nfrom torchmetrics.classification.accuracy import Accuracy\n\nfrom ml_pipeline_template.models.components.simple_dense_net import SimpleDenseNet\n\n\nclass MNISTLitModule(LightningModule):\n \"\"\"\n Example of LightningModule for MNIST classification.\n\n A LightningModule organizes your PyTorch code into 5 sections:\n - Computations (init).\n - Train loop (training_step)\n - Validation loop (validation_step)\n - Test loop (test_step)\n - Optimizers (configure_optimizers)\n\n Read the docs:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html\n \"\"\"\n\n def __init__(\n self,\n net: torch.nn.Module,\n lr: float = 0.001,\n weight_decay: float = 0.0005,\n ):\n super().__init__()\n\n # this line allows to access init params with 'self.hparams' attribute\n # it also ensures init params will be stored in ckpt\n self.save_hyperparameters(logger=False)\n\n self.net = net\n\n # loss function\n self.criterion = torch.nn.CrossEntropyLoss()\n\n # use separate metric instance for train, val and test step\n # to ensure a proper reduction over the epoch\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n # for logging best so far validation accuracy\n self.val_acc_best = MaxMetric()\n\n def forward(self, x: torch.Tensor):\n return self.net(x)\n\n def step(self, batch: Any):\n x, y = batch\n logits = self.forward(x)\n loss = self.criterion(logits, y)\n preds = torch.argmax(logits, dim=1)\n return loss, preds, y\n\n def training_step(self, batch: Any, batch_idx: int):\n loss, preds, targets = self.step(batch)\n\n # log train metrics\n acc = self.train_acc(preds, targets)\n self.log(\"train/loss\", loss, on_step=False, on_epoch=True, prog_bar=False)\n self.log(\"train/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n\n # we can return here dict with any tensors\n # and then read it in some callback or in `training_epoch_end()`` below\n # remember to always return loss from `training_step()` or else backpropagation will fail!\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def training_epoch_end(self, outputs: List[Any]):\n # `outputs` is a list of dicts returned from `training_step()`\n pass\n\n def validation_step(self, batch: Any, batch_idx: int):\n loss, preds, targets = self.step(batch)\n\n # log val metrics\n acc = self.val_acc(preds, targets)\n self.log(\"val/loss\", loss, on_step=False, on_epoch=True, prog_bar=False)\n self.log(\"val/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def validation_epoch_end(self, outputs: List[Any]):\n acc = self.val_acc.compute() # get val accuracy from current epoch\n self.val_acc_best.update(acc)\n self.log(\"val/acc_best\", self.val_acc_best.compute(), on_epoch=True, prog_bar=True)\n\n def test_step(self, batch: Any, batch_idx: int):\n loss, preds, targets = self.step(batch)\n\n # log test metrics\n acc = self.test_acc(preds, targets)\n self.log(\"test/loss\", loss, on_step=False, on_epoch=True)\n self.log(\"test/acc\", acc, on_step=False, on_epoch=True)\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def test_epoch_end(self, outputs: List[Any]):\n pass\n\n def on_epoch_end(self):\n # reset metrics at the end of every epoch\n self.train_acc.reset()\n self.test_acc.reset()\n self.val_acc.reset()\n\n def configure_optimizers(self):\n \"\"\"Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n See examples here:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n return torch.optim.Adam(\n params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay\n )\n"
]
| [
[
"torch.argmax",
"torch.nn.CrossEntropyLoss"
]
]
|
miketarpey/piper | [
"d1620727889228d61fbe448f4747cef9351ede59",
"d1620727889228d61fbe448f4747cef9351ede59"
]
| [
"piper/styler.py",
"piper/custom.py"
]
| [
"'''\r\nStyler function style summary\r\n=============================\r\nStyler.applymap(func) : element-wise styles\r\nStyler.apply(func, axis=0) : column-wise styles\r\nStyler.apply(func, axis=1) : row-wise styles\r\nStyler.apply(func, axis=None) : tablewise styles\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef boolean_filter(s, operator=None, value=0):\r\n ''' For series or value, evaluate if True/False against\r\n operator and value and return result\r\n\r\n Example\r\n -------\r\n boolean_filter(10, '>', 0)\r\n > True\r\n\r\n boolean_filter(xx['isna'], '>', 0)\r\n > True\r\n\r\n\r\n Parameters\r\n ----------\r\n s\r\n value(int, float, str, pd.Series)\r\n value(s) to be evaluated (True/False) against operator\r\n operator\r\n str - default '=' , possible values:\r\n '!=', '<', '<=', '>', '>=', 'min', 'max', 'null'\r\n value\r\n str/integer - value to compare against\r\n\r\n\r\n Returns\r\n -------\r\n ndArray of evaluated boolean values.\r\n\r\n '''\r\n if operator is None:\r\n operator = '='\r\n\r\n logger.debug(f's -> {type(s)}')\r\n logger.debug(f'operator -> {operator}')\r\n logger.debug(f'value -> {type(value)}')\r\n\r\n if isinstance(s, pd.Series) and s.dtype == 'O' and\\\r\n operator in ('min', 'max'):\r\n raise ValueError('min and max numeric values only')\r\n\r\n # If 1st and 2nd comparators are not Series,\r\n # Then use simple eval comparison\r\n if not isinstance(s, pd.Series) and \\\r\n not isinstance(value, pd.Series):\r\n\r\n # eval_selection = {'=': s == value, '!=': s != value}\r\n # if operator in ('<=', '>=', 'max', 'null', 'min'):\r\n # error_msg = f'String not comparable with Series using {operator}'\r\n # raise NameError(error_msg)\r\n\r\n eval_selection = {\r\n 'null': pd.isnull(s),\r\n '!null': ~pd.isnull(s),\r\n '=': s == value,\r\n '!=': s != value,\r\n '<': s < value,\r\n '<=': s <= value,\r\n '>': s > value,\r\n '>=': s >= value}\r\n\r\n result = eval_selection.get(operator)\r\n logger.debug(result)\r\n\r\n return result\r\n\r\n\r\n if isinstance(s, pd.Series):\r\n eval_selection = {\r\n 'min': s == s.min(),\r\n 'max': s == s.max(),\r\n 'null': np.isnan(s),\r\n '!null': ~np.isnan(s),\r\n '=': s == value,\r\n '!=': s != value,\r\n '<': s < value,\r\n '<=': s <= value,\r\n '>': s > value,\r\n '>=': s >= value\r\n }\r\n\r\n result = eval_selection.get(operator)\r\n logger.debug(result)\r\n\r\n return result\r\n\r\n\r\ndef highlight_min(s, css='color: blue; background-color: lightgrey'):\r\n ''' For given Series, highlight min values '''\r\n\r\n result = boolean_filter(s, operator='min', value=None)\r\n style = [css if v else '' for v in result]\r\n\r\n return style\r\n\r\n\r\ndef highlight_max(s, css='color: red; background-color: lightgrey'):\r\n ''' For given Series, highlight max values '''\r\n\r\n result = boolean_filter(s, operator='max', value=None)\r\n style = [css if v else '' for v in result]\r\n\r\n return style\r\n\r\n\r\ndef highlight_values(s, values=None, css=None):\r\n '''\r\n For given Series, highlight individual values using a\r\n specific (equal) value or list of values\r\n\r\n '''\r\n if css is None:\r\n css = 'color: red'\r\n\r\n if isinstance(values, list):\r\n style = [css if v in values else '' for v in s]\r\n else:\r\n style = [css if v == values else '' for v in s]\r\n\r\n return style\r\n\r\n\r\ndef highlight_rows(row, column=None, operator='=', value=0, css=None):\r\n '''\r\n\r\n Example:\r\n ========\r\n import pandas as pd\r\n import numpy as np\r\n\r\n np.random.seed(24)\r\n df = pd.DataFrame({'A': np.linspace(1, 10, 10)})\r\n df2 = pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))\r\n\r\n df = pd.concat([df, df2], axis=1)\r\n df.iloc[0, 2] = np.nan\r\n\r\n df.style.apply(highlight_rows, axis=1)\r\n '''\r\n if column is None:\r\n raise KeyError(f'column {column} not found!')\r\n\r\n row_length = row.shape[0]\r\n\r\n if css is None:\r\n css = 'color: blue'\r\n\r\n result = boolean_filter(row[column], operator=operator, value=value)\r\n\r\n row_css = [''] * row_length\r\n if np.any(result):\r\n row_css = [css] * row_length\r\n\r\n return row_css\r\n\r\n\r\ndef get_style(style='default'):\r\n '''\r\n '''\r\n if style == 'default':\r\n return _style_excel()\r\n\r\n if style == 'ibm':\r\n return _style_ibmi()\r\n\r\n\r\ndef _style_excel():\r\n ''' Retrieve default css format\r\n\r\n '''\r\n style = [{'selector': '*',\r\n\r\n 'props': [('border', '1px solid #9EB6CE'),\r\n ('border-width', '1px 1px 1px 1px'),\r\n ('font-size', '12px'),\r\n ('font-weight', 200),\r\n ('table-layout', 'auto'),\r\n ('padding', '8px 8px 8px 8px'),\r\n ('border-spacing', '0px'),\r\n ('border-collapse', 'collapse')\r\n ]},\r\n {'selector': 'caption',\r\n 'props': [('color', 'black'),\r\n ('font-size', '15px')\r\n ]},\r\n {'selector': 'tbody tr:hover td',\r\n 'props': [('background-color', '#DCDCDC !important'),\r\n ('color', '#000'),\r\n ]},\r\n {'selector': 'th',\r\n 'props': [('background-color', '#E4ECF7'),\r\n ('font-weight', 600),\r\n ('font-size', '13px'),\r\n ('border', '1px solid #9EB6CE'),\r\n ('border-width', '0px 1px 1px 0px')\r\n ]},\r\n {'selector': 'td',\r\n 'props': [('border', '0px'),\r\n ('font-size', '12px'),\r\n ('border', '1px solid #9EB6CE'),\r\n ('border-width', '1px 1px 1px 1px'),\r\n ]},\r\n ]\r\n\r\n return style\r\n\r\n\r\ndef _style_ibmi():\r\n ''' Retrieve ibmi format/properties\r\n\r\n (merge_df.style.set_properties(**get_style(style='ibm')))\r\n '''\r\n\r\n style = {'background-color': 'black',\r\n 'color': 'lightgreen',\r\n 'font-size': '140%'}\r\n\r\n return style\r\n",
"from datetime import date\r\nfrom datetime import datetime\r\nfrom time import strptime\r\nimport logging\r\nimport numpy as np #type: ignore\r\nimport pandas as pd #type: ignore\r\nfrom pandas.api.types import is_numeric_dtype\r\nfrom pandas.api.types import is_timedelta64_dtype\r\nimport re\r\nfrom typing import (\r\n Any,\r\n Callable,\r\n Dict,\r\n Hashable,\r\n Iterable,\r\n List,\r\n NamedTuple,\r\n Optional,\r\n Pattern,\r\n Set,\r\n Tuple,\r\n Union,\r\n)\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n# add_xl_formula() {{{1\r\ndef add_xl_formula(df: pd.DataFrame,\r\n column_name: str = 'xl_calc',\r\n formula: str = '=CONCATENATE(A{row}, B{row}, C{row})',\r\n offset: int = 2) -> pd.DataFrame:\r\n\r\n '''add Excel (xl) formula column\r\n\r\n Parameters\r\n ----------\r\n df\r\n pandas dataframe\r\n column_name\r\n the column name to be associated with the column formula values, default\r\n 'xl_calc'\r\n formula\r\n Excel formula to be applied. As an example:\r\n\r\n .. code-block::\r\n\r\n '=CONCATENATE(A{row}, B{row}, C{row})'\r\n\r\n where {row} is the defined replacement variable which will be replaced\r\n with actual individual row value.\r\n offset\r\n starting row value, default = 2 (resultant xl sheet includes headers)\r\n\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n formula = '=CONCATENATE(A{row}, B{row}, C{row})'\r\n add_xl_formula(df, column_name='X7', formula=formula)\r\n\r\n\r\n Returns\r\n -------\r\n pandas dataframe\r\n '''\r\n col_values = []\r\n for x in range(offset, df.shape[0] + offset):\r\n repl_str = re.sub('{ROW}', str(x), string=formula, flags=re.I)\r\n col_values.append(repl_str)\r\n\r\n df[column_name] = col_values\r\n\r\n return df\r\n\r\n\r\n# duration {{{1\r\ndef duration(s1: pd.Series,\r\n s2: pd.Series = None,\r\n unit: Union[str, None] = None,\r\n round: Union[bool, int] = 2,\r\n freq: str = 'd') -> pd.Series:\r\n ''' calculate duration between two columns (series)\r\n\r\n Parameters\r\n ----------\r\n s1\r\n 'from' datetime series\r\n s2\r\n 'to' datetime series.\r\n Default None. If None, defaults to today.\r\n interval\r\n default None - returns timedelta in days\r\n 'd' - days as an integer,\r\n 'years' (based on 365.25 days per year),\r\n 'months' (based on 30 day month)\r\n\r\n Other possible options are:\r\n - ‘W’, ‘D’, ‘T’, ‘S’, ‘L’, ‘U’, or ‘N’\r\n - ‘days’ or ‘day’\r\n - ‘hours’, ‘hour’, ‘hr’, or ‘h’\r\n - ‘minutes’, ‘minute’, ‘min’, or ‘m’\r\n - ‘seconds’, ‘second’, or ‘sec’\r\n - ‘milliseconds’, ‘millisecond’, ‘millis’, or ‘milli’\r\n - ‘microseconds’, ‘microsecond’, ‘micros’, or ‘micro’-\r\n - ‘nanoseconds’, ‘nanosecond’, ‘nanos’, ‘nano’, or ‘ns’.\r\n\r\n check out pandas\r\n `timedelta object <https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html>`_\r\n for details.\r\n round\r\n Default False. If duration result is an integer and this\r\n parameter contains a positive integer, the result is round to this\r\n decimal precision.\r\n freq\r\n Default is 'd'(days). If the duration result is a pd.Timedelta dtype,\r\n the value can be 'rounded' using this frequency parameter.\r\n\r\n Must be a fixed frequency like 'S' (second) not 'ME' (month end).\r\n For a list of valid values, check out\r\n `pandas offset aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_\r\n\r\n\r\n Returns\r\n -------\r\n series\r\n if unit is None - series is of data type timedelta64[ns]\r\n otherwise series of type int.\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n %%piper\r\n sample_data()\r\n >> select(['-countries', '-regions', '-ids', '-values_1', '-values_2'])\r\n >> assign(new_date_col=pd.to_datetime('2018-01-01'))\r\n >> assign(duration = lambda x: duration(x.new_date_col, x.order_dates, unit='months'))\r\n >> assign(duration_dates_age = lambda x: duration(x['dates']))\r\n >> head(tablefmt='plain')\r\n\r\n dates rder_dates new_date_col duration duration_dates_age\r\n 0 2020-01-01 2020-01-07 2018-01-01 25 452 days\r\n 1 2020-01-02 2020-01-08 2018-01-01 25 451 days\r\n 2 2020-01-03 2020-01-09 2018-01-01 25 450 days\r\n 3 2020-01-04 2020-01-10 2018-01-01 25 449 days\r\n\r\n '''\r\n if s2 is None:\r\n s2 = datetime.today()\r\n\r\n if unit is None:\r\n result = s2 - s1\r\n elif unit == 'years':\r\n result = ((s2 - s1) / pd.Timedelta(365.25, 'd'))\r\n elif unit == 'months':\r\n result = ((s2 - s1) / pd.Timedelta(30, 'd'))\r\n else:\r\n result = ((s2 - s1)) / pd.Timedelta(1, unit)\r\n\r\n if is_numeric_dtype(result):\r\n result = result.round(round)\r\n elif is_timedelta64_dtype(result):\r\n result = result.dt.round(freq=freq)\r\n\r\n return result\r\n\r\n\r\n# factorize {{{1\r\ndef factorize(series: pd.Series,\r\n categories: List = None,\r\n ordered: int = False) -> pd.Series:\r\n ''' factorize / make column a categorical dtype\r\n\r\n Parameters\r\n ----------\r\n series\r\n pd.Series object to be converted to categorical\r\n categories\r\n list of unique category values within pd.Series\r\n ordered\r\n If true, categorical is ordered.\r\n\r\n\r\n Returns\r\n -------\r\n pd.Series\r\n Returned series with categorical data type\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n cat_order = ['Tops & Blouses', 'Beachwear',\r\n 'Footwear', 'Jeans', 'Sportswear']\r\n\r\n %%piper\r\n sample_sales()\r\n >> assign(product=lambda x: factorize(x['product'],\r\n categories=cat_order,\r\n ordered=True))\r\n >> group_by(['location', 'product'])\r\n >> summarise(Total=('actual_sales', 'sum'))\r\n >> unstack()\r\n >> flatten_cols(remove_prefix='Total')\r\n >> head(tablefmt='plain')\r\n\r\n location Tops & Blouses Beachwear Footwear Jeans Sportswear\r\n London 339236 388762 274674 404440 291561\r\n Milan 523052 368373 444624 364343 319199\r\n Paris 481787 464725 383093 178117 150222\r\n\r\n '''\r\n if categories is None:\r\n series = series.astype('category')\r\n else:\r\n category_dtype = pd.CategoricalDtype(categories=categories,\r\n ordered=ordered)\r\n series = series.astype(category_dtype)\r\n\r\n return series\r\n\r\n\r\n# fiscal_year {{{1\r\ndef fiscal_year(date: Union[pd.Timestamp],\r\n start_month: int = 7,\r\n year_only: bool = False) -> str:\r\n '''Convert to fiscal year\r\n\r\n Used with pd.Series date objects to obtain Fiscal Year information.\r\n\r\n Parameters\r\n ----------\r\n date\r\n pd.TimeStamp object\r\n start_month\r\n Fiscal year starting month, default 7 (Australia)\r\n year_only\r\n Default False. Returns a 4 digit year e.g. 2019, 2020\r\n True Returns a 2 digit year e.g. 19, 20\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n assert fiscal_year(pd.Timestamp('2014-01-01')) == 'FY 2013/2014'\r\n assert fiscal_year(pd.to_datetime('2014-01-01')) == 'FY 2013/2014'\r\n\r\n assert fiscal_year(pd.Timestamp('2014-01-01'), year_only=True) == 'FY 13/14'\r\n assert fiscal_year(pd.to_datetime('2014-01-01'), year_only=True) == 'FY 13/14'\r\n\r\n assert pd.isna(from_excel(np.nan)) == pd.isna(np.nan)\r\n assert pd.isna(from_excel(pd.NaT)) == pd.isna(pd.NaT)\r\n\r\n df = pd.DataFrame()\r\n df['Date'] = pd.date_range('2020-01-01', periods=12, freq='M')\r\n df['Date'] = df['Date'].apply(fiscal_year)\r\n\r\n df.head()\r\n\r\n 0 FY 2018/2019\r\n 1 FY 2018/2019\r\n 2 FY 2018/2019\r\n 3 FY 2018/2019\r\n 4 FY 2018/2019\r\n Name: Date, dtype: object\r\n\r\n '''\r\n prefix = 'FY'\r\n\r\n if year_only:\r\n year = int(str(date.year)[2:])\r\n else:\r\n year = date.year\r\n\r\n if date.month < start_month:\r\n text = f'{prefix} {year-1}/{year}'\r\n else:\r\n text = f'{prefix} {year}/{year+1}'\r\n\r\n return text\r\n\r\n\r\n# from_julian {{{1\r\ndef from_julian(julian: Union[str, int],\r\n jde_format: bool = True) -> Any:\r\n ''' apply_date function: (JDE) Julian -> Gregorian\r\n\r\n References\r\n ----------\r\n # https://docs.oracle.com/cd/E26228_01/doc.93/e21961/julian_date_conv.htm#WEAWX259\r\n # http://nimishprabhu.com/the-mystery-of-jde-julian-date-format-solved.html\r\n\r\n Parameters\r\n ----------\r\n julian date\r\n julian date to be converted\r\n\r\n jde_format\r\n default True. If False, assume 'standard' julian format.\r\n\r\n Returns\r\n -------\r\n gregorian_date: Any Gregorian based format\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n %%piper\r\n sample_sales()\r\n >> select(['-target_profit', '-actual_profit'])\r\n # >> assign(month = lambda x: x['month'].apply(to_julian))\r\n >> across('month', to_julian)\r\n >> head(5)\r\n\r\n location product month target_sales actual_sales\r\n London Beachwear 121001 31749 29209.1\r\n London Beachwear 121001 37833 34049.7\r\n London Jeans 121001 29485 31549\r\n London Jeans 121001 37524 40901.2\r\n London Sportswear 121001 27216 29121.1\r\n '''\r\n if julian is None:\r\n return julian\r\n\r\n if isinstance(julian, str):\r\n if len(julian) > 6:\r\n return julian\r\n\r\n if isinstance(julian, date):\r\n return julian\r\n\r\n if isinstance(julian, int):\r\n\r\n if jde_format:\r\n julian = str(julian).zfill(6)\r\n else:\r\n julian = str(julian)\r\n\r\n try:\r\n if isinstance(julian, float):\r\n julian = str(int(julian))\r\n except ValueError:\r\n return julian\r\n\r\n if jde_format:\r\n\r\n if int(julian) < 70001:\r\n return julian\r\n\r\n if len(julian) > 6 or len(julian) < 5:\r\n return julian\r\n\r\n if isinstance(julian, str):\r\n julian = julian.zfill(6)\r\n\r\n century = int(julian[0]) + 19\r\n year = julian[1:3]\r\n days = julian[3:6]\r\n std_julian = f'{century}{year}{days}'\r\n greg_date = datetime.strptime(std_julian, '%Y%j').date()\r\n\r\n else:\r\n format_ = '%Y%j' if len(julian) == 7 else '%y%j'\r\n greg_date = datetime.strptime(julian, format_).date()\r\n\r\n return greg_date\r\n\r\n\r\n# from_excel {{{1\r\ndef from_excel(excel_date: Union[str, float, int, pd.Timestamp]\r\n ) -> pd.Timestamp:\r\n ''' apply_date function: excel serial date -> pd.Timestamp\r\n\r\n Converts excel serial format to pandas Timestamp object\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n assert from_excel(pd.Timestamp('2014-01-01 08:00:00')) == pd.Timestamp('2014-01-01 08:00:00')\r\n assert from_excel('41640.3333') == pd.Timestamp('2014-01-01 08:00:00')\r\n assert from_excel(41640.3333) == pd.Timestamp('2014-01-01 08:00:00')\r\n assert from_excel(44001) == pd.Timestamp('2020-06-19 00:00:00')\r\n assert from_excel('44001') == pd.Timestamp('2020-06-19 00:00:00')\r\n assert from_excel(43141) == pd.Timestamp('2018-02-10 00:00:00')\r\n assert from_excel('43962') == pd.Timestamp('2020-05-11 00:00:00')\r\n assert from_excel('') == ''\r\n assert from_excel(0) == 0\r\n assert pd.isna(from_excel(np.nan)) == pd.isna(np.nan)\r\n assert pd.isna(from_excel(pd.NaT)) == pd.isna(pd.NaT)\r\n\r\n Parameters\r\n ----------\r\n excel_date - serial excel date value\r\n\r\n Returns\r\n -------\r\n A pandas Timestamp object\r\n '''\r\n if isinstance(excel_date, pd.Timestamp):\r\n return excel_date\r\n\r\n if isinstance(excel_date, str):\r\n\r\n if re.search(r'[\\\\\\/\\-]', excel_date):\r\n return excel_date\r\n\r\n if len(excel_date) == 0:\r\n return excel_date\r\n\r\n try:\r\n excel_date = float(excel_date)\r\n except ValueError as _:\r\n excel_date = int(excel_date)\r\n return excel_date\r\n\r\n if excel_date == 0:\r\n return excel_date\r\n\r\n start_date = pd.to_datetime('1899-12-30')\r\n offset = pd.Timedelta(excel_date, 'd')\r\n\r\n if isinstance(excel_date, float):\r\n excel_date = start_date + offset\r\n excel_date = excel_date.round('1min')\r\n else:\r\n excel_date = start_date + offset\r\n\r\n return excel_date\r\n\r\n\r\n# ratio {{{1\r\ndef ratio(value1: Union[int, float, pd.Series],\r\n value2: Union[int, float, pd.Series],\r\n precision: int = 2,\r\n percent: bool = False,\r\n format: bool = True) -> Any:\r\n ''' Calculate the Ratio / percentage of two values\r\n\r\n Custom function which calculate the ratio and optionally\r\n percentage of two values or series.\r\n\r\n .. note::\r\n\r\n Passes back np.inf value for 'divide by zero' use case.\r\n\r\n Parameters\r\n ----------\r\n value1\r\n integer, float, or pd.Series\r\n value2\r\n integer, float, or pd.Series\r\n precision\r\n Default 2. Returned result is rounded to precision value.\r\n percent\r\n Default False. If True, calculates the percentage.\r\n format\r\n Default False. If True, returns a string, formatted as a\r\n percentage value e.g. 92.0%\r\n\r\n\r\n Returns\r\n -------\r\n float - if values1 and 2 are single int/float values\r\n pd.Series - if values1 and 2 are pd.Series\r\n\r\n Examples\r\n --------\r\n .. code-block::\r\n\r\n s1 = pd.Series([10, 20, 30])\r\n s2 = pd.Series([1.3, 5.4, 3])\r\n ratio(s1, s2)\r\n\r\n 0\r\n 0 7.69\r\n 1 3.70\r\n\r\n .. code-block::\r\n\r\n %%piper\r\n\r\n sample_sales()\r\n >> select(['-target_profit', '-actual_profit'])\r\n >> assign(std_ratio = lambda x: x.actual_sales / x.target_sales)\r\n >> assign(ratio = lambda x: ratio(x.actual_sales, x.target_sales,\r\n percent=True, format=True, precision=4))\r\n >> head(10)\r\n\r\n location product month target_sales actual_sales std_ratio ratio\r\n London Beachwear 2021-01-01 31749 29209.1 0.92 92.0%\r\n London Beachwear 2021-01-01 37833 34049.7 0.9 90.0%\r\n London Jeans 2021-01-01 29485 31549 1.07 107.0%\r\n London Jeans 2021-01-01 37524 40901.2 1.09 109.0%\r\n London Sportswear 2021-01-01 27216 29121.1 1.07 107.0%\r\n\r\n\r\n\r\n '''\r\n if isinstance(value1, pd.Series):\r\n\r\n if percent:\r\n result = (value1 * 100 / value2)\r\n\r\n if precision is not None:\r\n result = result.round(precision)\r\n\r\n if format:\r\n result = result.astype(str) + '%'\r\n else:\r\n result = (value1 / value2)\r\n\r\n if precision is not None:\r\n result = result.round(precision)\r\n\r\n return result\r\n\r\n # Assumption, if not pd.Series, we are dealing\r\n # with individual int or float values\r\n try:\r\n if percent:\r\n result = ((value1 * 100) / value2)\r\n\r\n if precision is not None:\r\n result = round(result, precision)\r\n\r\n if format:\r\n result = f'{result}%'\r\n else:\r\n result = value1 / value2\r\n\r\n if precision is not None:\r\n result = round(result, precision)\r\n\r\n except ZeroDivisionError as e:\r\n logger.info(e)\r\n return np.inf\r\n\r\n return result\r\n\r\n\r\n# to_julian {{{1\r\ndef to_julian(greg_date: Union[str, int], format: str = None):\r\n ''' apply_date function: Gregorian -> (JDE) Julian\r\n\r\n Parameters\r\n ----------\r\n greg_date : gregorian format date (string)\r\n\r\n\r\n Returns\r\n -------\r\n JDE Julian formatted string\r\n\r\n\r\n References\r\n ----------\r\n # https://docs.oracle.com/cd/E26228_01/doc.93/e21961/julian_date_conv.htm#WEAWX259\r\n # http://nimishprabhu.com/the-mystery-of-jde-julian-date-format-solved.html\r\n\r\n Examples\r\n --------\r\n\r\n .. code-block::\r\n\r\n %%piper\r\n\r\n sample_sales()\r\n >> select(['-target_profit', '-actual_profit'])\r\n # >> assign(month = lambda x: x['month'].apply(to_julian))\r\n >> across('month', to_julian)\r\n >> assign(month = lambda x: x['month'].apply(from_julian))\r\n # >> across('month', from_julian)\r\n >> head(5)\r\n\r\n location product month target_sales actual_sales\r\n London Beachwear 2021-01-01 31749 29209.1\r\n London Beachwear 2021-01-01 37833 34049.7\r\n London Jeans 2021-01-01 29485 31549\r\n London Jeans 2021-01-01 37524 40901.2\r\n London Sportswear 2021-01-01 27216 29121.1\r\n '''\r\n if greg_date in (np.NAN, pd.NaT, None, ''):\r\n return greg_date\r\n\r\n # if its an integer, convert to string first.\r\n if isinstance(greg_date, int):\r\n greg_date = str(greg_date)\r\n\r\n if format is None:\r\n format = '%Y-%m-%d'\r\n\r\n # Convert to pandas datetime format, then to Julian\r\n try:\r\n greg_date = pd.to_datetime(greg_date, format=format)\r\n except ValueError as _:\r\n return greg_date\r\n\r\n if isinstance(greg_date, datetime):\r\n century_prefix = str(greg_date.year)[:2]\r\n century_prefix = str(int(century_prefix) - 19)\r\n return int(century_prefix+greg_date.strftime('%y%j'))\r\n"
]
| [
[
"pandas.isnull",
"numpy.any",
"numpy.isnan"
],
[
"pandas.to_datetime",
"pandas.Timedelta",
"pandas.api.types.is_numeric_dtype",
"pandas.CategoricalDtype",
"pandas.api.types.is_timedelta64_dtype"
]
]
|
czh4/mmf-hateful-memes | [
"f1686ce612adda3c0553df1b5adf643d992a6afb"
]
| [
"mmf/models/m4c.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport functools\nimport logging\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom mmf.common.registry import registry\nfrom mmf.models.base_model import BaseModel\nfrom mmf.modules.layers import ClassifierLayer\nfrom mmf.utils.build import build_image_encoder\nfrom omegaconf import OmegaConf\nfrom torch import nn\nfrom transformers.modeling_bert import (\n BertConfig,\n BertEmbeddings,\n BertEncoder,\n BertLayerNorm,\n BertPreTrainedModel,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_model(\"m4c\")\nclass M4C(BaseModel):\n def __init__(self, config):\n super().__init__(config)\n self.mmt_config = BertConfig(**self.config.mmt)\n self._datasets = registry.get(\"config\").datasets.split(\",\")\n\n @classmethod\n def config_path(cls):\n return \"configs/models/m4c/defaults.yaml\"\n\n def build(self):\n # modules requiring custom learning rates (usually for finetuning)\n self.finetune_modules = []\n\n # split model building into several components\n self._build_txt_encoding()\n self._build_obj_encoding()\n self._build_ocr_encoding()\n self._build_mmt()\n self._build_output()\n\n def _build_encoder_config(self):\n return OmegaConf.create(\n {\n \"type\": \"finetune_faster_rcnn_fpn_fc7\",\n \"params\": {\n \"in_dim\": 2048,\n \"weights_file\": \"models/detectron.defaults/fc7_w.pkl\",\n \"bias_file\": \"models/detectron.defaults/fc7_b.pkl\",\n \"model_data_dir\": self.config.model_data_dir,\n },\n }\n )\n\n def _build_txt_encoding(self):\n TEXT_BERT_HIDDEN_SIZE = 768\n\n self.text_bert_config = BertConfig(**self.config.text_bert)\n if self.config.text_bert_init_from_bert_base:\n self.text_bert = TextBert.from_pretrained(\n \"bert-base-uncased\", config=self.text_bert_config\n )\n # Use a smaller learning rate on text bert when initializing\n # from BERT_BASE\n self.finetune_modules.append(\n {\"module\": self.text_bert, \"lr_scale\": self.config.lr_scale_text_bert}\n )\n else:\n logger.info(\"NOT initializing text_bert from BERT_BASE\")\n self.text_bert = TextBert(self.text_bert_config)\n\n # if the text bert output dimension doesn't match the\n # multimodal transformer (mmt) hidden dimension,\n # add a linear projection layer between the two\n if self.mmt_config.hidden_size != TEXT_BERT_HIDDEN_SIZE:\n logger.info(\n f\"Projecting text_bert output to {self.mmt_config.hidden_size} dim\"\n )\n\n self.text_bert_out_linear = nn.Linear(\n TEXT_BERT_HIDDEN_SIZE, self.mmt_config.hidden_size\n )\n else:\n self.text_bert_out_linear = nn.Identity()\n\n def _build_obj_encoding(self):\n # object appearance feature: Faster R-CNN\n self.obj_faster_rcnn_fc7 = build_image_encoder(\n self._build_encoder_config(), direct_features=True\n )\n # apply smaller lr to pretrained Faster R-CNN fc7\n self.finetune_modules.append(\n {\"module\": self.obj_faster_rcnn_fc7, \"lr_scale\": self.config.lr_scale_frcn}\n )\n self.linear_obj_feat_to_mmt_in = nn.Linear(\n self.config.obj.mmt_in_dim, self.mmt_config.hidden_size\n )\n\n # object location feature: relative bounding box coordinates (4-dim)\n self.linear_obj_bbox_to_mmt_in = nn.Linear(4, self.mmt_config.hidden_size)\n\n self.obj_feat_layer_norm = BertLayerNorm(self.mmt_config.hidden_size)\n self.obj_bbox_layer_norm = BertLayerNorm(self.mmt_config.hidden_size)\n self.obj_drop = nn.Dropout(self.config.obj.dropout_prob)\n\n def _build_ocr_encoding(self):\n self.remove_ocr_fasttext = getattr(\n self.config.ocr, \"remove_ocr_fasttext\", False\n )\n self.remove_ocr_phoc = getattr(self.config.ocr, \"remove_ocr_phoc\", False)\n self.remove_ocr_frcn = getattr(self.config.ocr, \"remove_ocr_frcn\", False)\n self.remove_ocr_semantics = getattr(\n self.config.ocr, \"remove_ocr_semantics\", False\n )\n self.remove_ocr_bbox = getattr(self.config.ocr, \"remove_ocr_bbox\", False)\n\n # OCR appearance feature: Faster R-CNN\n self.ocr_faster_rcnn_fc7 = build_image_encoder(\n self._build_encoder_config(), direct_features=True\n )\n self.finetune_modules.append(\n {\"module\": self.ocr_faster_rcnn_fc7, \"lr_scale\": self.config.lr_scale_frcn}\n )\n\n self.linear_ocr_feat_to_mmt_in = nn.Linear(\n self.config.ocr.mmt_in_dim, self.mmt_config.hidden_size\n )\n\n # OCR location feature: relative bounding box coordinates (4-dim)\n self.linear_ocr_bbox_to_mmt_in = nn.Linear(4, self.mmt_config.hidden_size)\n\n self.ocr_feat_layer_norm = BertLayerNorm(self.mmt_config.hidden_size)\n self.ocr_bbox_layer_norm = BertLayerNorm(self.mmt_config.hidden_size)\n self.ocr_drop = nn.Dropout(self.config.ocr.dropout_prob)\n\n def _build_mmt(self):\n self.mmt = MMT(self.mmt_config)\n\n # allow specifying a different/scaled lr for multimodal transformer\n self.finetune_modules.append(\n {\"module\": self.mmt, \"lr_scale\": self.config.lr_scale_mmt}\n )\n\n def _build_output(self):\n # dynamic OCR-copying scores with pointer network\n self.ocr_ptr_net = OcrPtrNet(**self.config.classifier.ocr_ptr_net)\n\n # fixed answer vocabulary scores\n num_choices = registry.get(self._datasets[0] + \"_num_final_outputs\")\n # remove the OCR copying dimensions in LoRRA's classifier output\n # (OCR copying will be handled separately)\n num_choices -= self.config.classifier.ocr_max_num\n self.classifier = ClassifierLayer(\n self.config.classifier.type,\n in_dim=self.mmt_config.hidden_size,\n out_dim=num_choices,\n **self.config.classifier.params,\n )\n\n self.answer_processor = registry.get(self._datasets[0] + \"_answer_processor\")\n\n def forward(self, sample_list):\n # fwd_results holds intermediate forward pass results\n # TODO possibly replace it with another sample list\n fwd_results = {}\n self._forward_txt_encoding(sample_list, fwd_results)\n self._forward_obj_encoding(sample_list, fwd_results)\n self._forward_ocr_encoding(sample_list, fwd_results)\n self._forward_mmt_and_output(sample_list, fwd_results)\n\n # only keep scores in the forward pass results\n results = {\"scores\": fwd_results[\"scores\"]}\n return results\n\n def _forward_txt_encoding(self, sample_list, fwd_results):\n fwd_results[\"txt_inds\"] = sample_list.text\n\n # binary mask of valid text (question words) vs padding\n fwd_results[\"txt_mask\"] = _get_mask(\n sample_list.text_len, sample_list.text.size(1)\n )\n\n def _forward_obj_encoding(self, sample_list, fwd_results):\n # object appearance feature: Faster R-CNN fc7\n obj_fc6 = sample_list.image_feature_0\n obj_fc7 = self.obj_faster_rcnn_fc7(obj_fc6)\n obj_fc7 = F.normalize(obj_fc7, dim=-1)\n\n obj_feat = obj_fc7\n obj_bbox = sample_list.obj_bbox_coordinates\n obj_mmt_in = self.obj_feat_layer_norm(\n self.linear_obj_feat_to_mmt_in(obj_feat)\n ) + self.obj_bbox_layer_norm(self.linear_obj_bbox_to_mmt_in(obj_bbox))\n obj_mmt_in = self.obj_drop(obj_mmt_in)\n fwd_results[\"obj_mmt_in\"] = obj_mmt_in\n\n # binary mask of valid object vs padding\n obj_nums = sample_list.image_info_0.max_features\n fwd_results[\"obj_mask\"] = _get_mask(obj_nums, obj_mmt_in.size(1))\n\n def _forward_ocr_encoding(self, sample_list, fwd_results):\n # OCR FastText feature (300-dim)\n ocr_fasttext = sample_list.context_feature_0\n ocr_fasttext = F.normalize(ocr_fasttext, dim=-1)\n assert ocr_fasttext.size(-1) == 300\n\n # OCR PHOC feature (604-dim)\n ocr_phoc = sample_list.context_feature_1\n ocr_phoc = F.normalize(ocr_phoc, dim=-1)\n assert ocr_phoc.size(-1) == 604\n\n # OCR appearance feature: Faster R-CNN fc7\n ocr_fc6 = sample_list.image_feature_1[:, : ocr_fasttext.size(1), :]\n ocr_fc7 = self.ocr_faster_rcnn_fc7(ocr_fc6)\n ocr_fc7 = F.normalize(ocr_fc7, dim=-1)\n\n # OCR order vectors (legacy from LoRRA model; set to all zeros)\n # TODO remove OCR order vectors; they are not needed\n ocr_order_vectors = torch.zeros_like(sample_list.order_vectors)\n\n if self.remove_ocr_fasttext:\n ocr_fasttext = torch.zeros_like(ocr_fasttext)\n if self.remove_ocr_phoc:\n ocr_phoc = torch.zeros_like(ocr_phoc)\n if self.remove_ocr_frcn:\n ocr_fc7 = torch.zeros_like(ocr_fc7)\n ocr_feat = torch.cat(\n [ocr_fasttext, ocr_phoc, ocr_fc7, ocr_order_vectors], dim=-1\n )\n ocr_bbox = sample_list.ocr_bbox_coordinates\n if self.remove_ocr_semantics:\n ocr_feat = torch.zeros_like(ocr_feat)\n if self.remove_ocr_bbox:\n ocr_bbox = torch.zeros_like(ocr_bbox)\n ocr_mmt_in = self.ocr_feat_layer_norm(\n self.linear_ocr_feat_to_mmt_in(ocr_feat)\n ) + self.ocr_bbox_layer_norm(self.linear_ocr_bbox_to_mmt_in(ocr_bbox))\n ocr_mmt_in = self.ocr_drop(ocr_mmt_in)\n fwd_results[\"ocr_mmt_in\"] = ocr_mmt_in\n\n # binary mask of valid OCR vs padding\n ocr_nums = sample_list.context_info_0.max_features\n fwd_results[\"ocr_mask\"] = _get_mask(ocr_nums, ocr_mmt_in.size(1))\n\n def _forward_mmt(self, sample_list, fwd_results):\n # first forward the text BERT layers\n text_bert_out = self.text_bert(\n txt_inds=fwd_results[\"txt_inds\"], txt_mask=fwd_results[\"txt_mask\"]\n )\n fwd_results[\"txt_emb\"] = self.text_bert_out_linear(text_bert_out)\n\n mmt_results = self.mmt(\n txt_emb=fwd_results[\"txt_emb\"],\n txt_mask=fwd_results[\"txt_mask\"],\n obj_emb=fwd_results[\"obj_mmt_in\"],\n obj_mask=fwd_results[\"obj_mask\"],\n ocr_emb=fwd_results[\"ocr_mmt_in\"],\n ocr_mask=fwd_results[\"ocr_mask\"],\n fixed_ans_emb=self.classifier.module.weight,\n prev_inds=fwd_results[\"prev_inds\"],\n )\n fwd_results.update(mmt_results)\n\n def _forward_output(self, sample_list, fwd_results):\n mmt_dec_output = fwd_results[\"mmt_dec_output\"]\n mmt_ocr_output = fwd_results[\"mmt_ocr_output\"]\n ocr_mask = fwd_results[\"ocr_mask\"]\n\n fixed_scores = self.classifier(mmt_dec_output)\n dynamic_ocr_scores = self.ocr_ptr_net(mmt_dec_output, mmt_ocr_output, ocr_mask)\n scores = torch.cat([fixed_scores, dynamic_ocr_scores], dim=-1)\n fwd_results[\"scores\"] = scores\n\n def _forward_mmt_and_output(self, sample_list, fwd_results):\n if self.training:\n fwd_results[\"prev_inds\"] = sample_list.train_prev_inds.clone()\n self._forward_mmt(sample_list, fwd_results)\n self._forward_output(sample_list, fwd_results)\n else:\n dec_step_num = sample_list.train_prev_inds.size(1)\n # fill prev_inds with BOS_IDX at index 0, and zeros elsewhere\n fwd_results[\"prev_inds\"] = torch.zeros_like(sample_list.train_prev_inds)\n fwd_results[\"prev_inds\"][:, 0] = self.answer_processor.BOS_IDX\n\n # greedy decoding at test time\n for _ in range(dec_step_num):\n self._forward_mmt(sample_list, fwd_results)\n self._forward_output(sample_list, fwd_results)\n\n # find the highest scoring output (either a fixed vocab\n # or an OCR), and add it to prev_inds for auto-regressive\n # decoding\n argmax_inds = fwd_results[\"scores\"].argmax(dim=-1)\n fwd_results[\"prev_inds\"][:, 1:] = argmax_inds[:, :-1]\n\n def get_optimizer_parameters(self, config):\n optimizer_param_groups = []\n\n base_lr = config.optimizer.params.lr\n # collect all the parameters that need different/scaled lr\n finetune_params_set = set()\n for m in self.finetune_modules:\n optimizer_param_groups.append(\n {\n \"params\": list(m[\"module\"].parameters()),\n \"lr\": base_lr * m[\"lr_scale\"],\n }\n )\n finetune_params_set.update(list(m[\"module\"].parameters()))\n # remaining_params are those parameters w/ default lr\n remaining_params = [\n p for p in self.parameters() if p not in finetune_params_set\n ]\n # put the default lr parameters at the beginning\n # so that the printed lr (of group 0) matches the default lr\n optimizer_param_groups.insert(0, {\"params\": remaining_params})\n\n return optimizer_param_groups\n\n @classmethod\n def update_registry_for_pretrained(cls, config, checkpoint, full_output):\n from omegaconf import OmegaConf\n\n # Hack datasets using OmegaConf\n datasets = full_output[\"full_config\"].datasets\n dataset = datasets.split(\",\")[0]\n config_mock = OmegaConf.create({\"datasets\": datasets})\n registry.register(\"config\", config_mock)\n registry.register(\n f\"{dataset}_num_final_outputs\",\n # Need to add as it is subtracted\n checkpoint[\"classifier.module.weight\"].size(0)\n + config.classifier.ocr_max_num,\n )\n # Fix this later, when processor pipeline is available\n answer_processor = OmegaConf.create({\"BOS_IDX\": 1})\n registry.register(f\"{dataset}_answer_processor\", answer_processor)\n\n\nclass TextBert(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.init_weights()\n\n def forward(self, txt_inds, txt_mask):\n encoder_inputs = self.embeddings(txt_inds)\n attention_mask = txt_mask\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n assert not extended_attention_mask.requires_grad\n head_mask = [None] * self.config.num_hidden_layers\n\n encoder_outputs = self.encoder(\n encoder_inputs, extended_attention_mask, head_mask=head_mask\n )\n seq_output = encoder_outputs[0]\n\n return seq_output\n\n\nclass MMT(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.prev_pred_embeddings = PrevPredEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.init_weights()\n\n def forward(\n self,\n txt_emb,\n txt_mask,\n obj_emb,\n obj_mask,\n ocr_emb,\n ocr_mask,\n fixed_ans_emb,\n prev_inds,\n ):\n\n # build embeddings for predictions in previous decoding steps\n # fixed_ans_emb is an embedding lookup table for each fixed vocabulary\n dec_emb = self.prev_pred_embeddings(fixed_ans_emb, ocr_emb, prev_inds)\n\n # a zero mask for decoding steps, so the encoding steps elements can't\n # attend to decoding steps.\n # A triangular causal mask will be filled for the decoding steps\n # later in extended_attention_mask\n dec_mask = torch.zeros(\n dec_emb.size(0), dec_emb.size(1), dtype=torch.float32, device=dec_emb.device\n )\n encoder_inputs = torch.cat([txt_emb, obj_emb, ocr_emb, dec_emb], dim=1)\n attention_mask = torch.cat([txt_mask, obj_mask, ocr_mask, dec_mask], dim=1)\n\n # offsets of each modality in the joint embedding space\n txt_max_num = txt_mask.size(-1)\n obj_max_num = obj_mask.size(-1)\n ocr_max_num = ocr_mask.size(-1)\n dec_max_num = dec_mask.size(-1)\n txt_begin = 0\n txt_end = txt_begin + txt_max_num\n ocr_begin = txt_max_num + obj_max_num\n ocr_end = ocr_begin + ocr_max_num\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, from_seq_length, to_seq_length]\n # So we can broadcast to\n # [batch_size, num_heads, from_seq_length, to_seq_length]\n to_seq_length = attention_mask.size(1)\n from_seq_length = to_seq_length\n\n # generate the attention mask similar to prefix LM\n # all elements can attend to the elements in encoding steps\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.repeat(\n 1, 1, from_seq_length, 1\n )\n # decoding step elements can attend to themselves in a causal manner\n extended_attention_mask[:, :, -dec_max_num:, -dec_max_num:] = _get_causal_mask(\n dec_max_num, encoder_inputs.device\n )\n\n # flip the mask, so that invalid attention pairs have -10000.\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n assert not extended_attention_mask.requires_grad\n head_mask = [None] * self.config.num_hidden_layers\n\n encoder_outputs = self.encoder(\n encoder_inputs, extended_attention_mask, head_mask=head_mask\n )\n\n mmt_seq_output = encoder_outputs[0]\n mmt_txt_output = mmt_seq_output[:, txt_begin:txt_end]\n mmt_ocr_output = mmt_seq_output[:, ocr_begin:ocr_end]\n mmt_dec_output = mmt_seq_output[:, -dec_max_num:]\n\n results = {\n \"mmt_seq_output\": mmt_seq_output,\n \"mmt_txt_output\": mmt_txt_output,\n \"mmt_ocr_output\": mmt_ocr_output,\n \"mmt_dec_output\": mmt_dec_output,\n }\n return results\n\n\nclass OcrPtrNet(nn.Module):\n def __init__(self, hidden_size, query_key_size=None):\n super().__init__()\n\n if query_key_size is None:\n query_key_size = hidden_size\n self.hidden_size = hidden_size\n self.query_key_size = query_key_size\n\n self.query = nn.Linear(hidden_size, query_key_size)\n self.key = nn.Linear(hidden_size, query_key_size)\n\n def forward(self, query_inputs, key_inputs, attention_mask):\n extended_attention_mask = (1.0 - attention_mask) * -10000.0\n assert extended_attention_mask.dim() == 2\n extended_attention_mask = extended_attention_mask.unsqueeze(1)\n\n query_layer = self.query(query_inputs)\n if query_layer.dim() == 2:\n query_layer = query_layer.unsqueeze(1)\n squeeze_result = True\n else:\n squeeze_result = False\n key_layer = self.key(key_inputs)\n\n scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n scores = scores / math.sqrt(self.query_key_size)\n scores = scores + extended_attention_mask\n if squeeze_result:\n scores = scores.squeeze(1)\n\n return scores\n\n\nclass PrevPredEmbeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n MAX_DEC_LENGTH = 100\n MAX_TYPE_NUM = 5\n hidden_size = config.hidden_size\n ln_eps = config.layer_norm_eps\n\n self.position_embeddings = nn.Embedding(MAX_DEC_LENGTH, hidden_size)\n self.token_type_embeddings = nn.Embedding(MAX_TYPE_NUM, hidden_size)\n\n self.ans_layer_norm = BertLayerNorm(hidden_size, eps=ln_eps)\n self.ocr_layer_norm = BertLayerNorm(hidden_size, eps=ln_eps)\n self.emb_layer_norm = BertLayerNorm(hidden_size, eps=ln_eps)\n self.emb_dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, ans_emb, ocr_emb, prev_inds):\n assert prev_inds.dim() == 2 and prev_inds.dtype == torch.long\n assert ans_emb.dim() == 2\n\n batch_size = prev_inds.size(0)\n seq_length = prev_inds.size(1)\n ans_num = ans_emb.size(0)\n\n # apply layer normalization to both answer embedding and OCR embedding\n # before concatenation, so that they have the same scale\n ans_emb = self.ans_layer_norm(ans_emb)\n ocr_emb = self.ocr_layer_norm(ocr_emb)\n assert ans_emb.size(-1) == ocr_emb.size(-1)\n ans_emb = ans_emb.unsqueeze(0).expand(batch_size, -1, -1)\n ans_ocr_emb_cat = torch.cat([ans_emb, ocr_emb], dim=1)\n raw_dec_emb = _batch_gather(ans_ocr_emb_cat, prev_inds)\n\n # Add position and type embedding for previous predictions\n position_ids = torch.arange(seq_length, dtype=torch.long, device=ocr_emb.device)\n position_ids = position_ids.unsqueeze(0).expand(batch_size, seq_length)\n position_embeddings = self.position_embeddings(position_ids)\n # Token type ids: 0 -- vocab; 1 -- OCR\n token_type_ids = prev_inds.ge(ans_num).long()\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = position_embeddings + token_type_embeddings\n embeddings = self.emb_layer_norm(embeddings)\n embeddings = self.emb_dropout(embeddings)\n dec_emb = raw_dec_emb + embeddings\n\n return dec_emb\n\n\ndef _get_mask(nums, max_num):\n # non_pad_mask: b x lq, torch.float32, 0. on PAD\n batch_size = nums.size(0)\n arange = torch.arange(0, max_num).unsqueeze(0).expand(batch_size, -1)\n non_pad_mask = arange.to(nums.device).lt(nums.unsqueeze(-1))\n non_pad_mask = non_pad_mask.type(torch.float32)\n return non_pad_mask\n\n\[email protected]_cache(maxsize=32)\ndef _get_causal_mask(seq_length, device):\n # generate a lower triangular mask\n mask = torch.zeros(seq_length, seq_length, device=device)\n for i in range(seq_length):\n for j in range(i + 1):\n mask[i, j] = 1.0\n return mask\n\n\ndef _batch_gather(x, inds):\n assert x.dim() == 3\n batch_size = x.size(0)\n length = x.size(1)\n dim = x.size(2)\n x_flat = x.view(batch_size * length, dim)\n\n batch_offsets = torch.arange(batch_size, device=inds.device) * length\n batch_offsets = batch_offsets.unsqueeze(-1)\n assert batch_offsets.dim() == inds.dim()\n inds_flat = batch_offsets + inds\n results = F.embedding(inds_flat, x_flat)\n return results\n"
]
| [
[
"torch.zeros",
"torch.nn.functional.normalize",
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.cat",
"torch.nn.Identity",
"torch.arange",
"torch.nn.functional.embedding",
"torch.zeros_like",
"torch.nn.Embedding"
]
]
|
nuannuanhcc/deep-high-resolution-net.pytorch | [
"163eb4797e5242d753ccbc69a4354b487657fdcb"
]
| [
"tools/train.py"
]
| [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport pprint\nimport shutil\n\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nfrom tensorboardX import SummaryWriter\n\nimport _init_paths\nfrom config import cfg\nfrom config import update_config\nfrom core.loss import JointsMSELoss\nfrom core.function import train\nfrom core.function import validate\nfrom utils.utils import get_optimizer\nfrom utils.utils import save_checkpoint\nfrom utils.utils import create_logger\nfrom utils.utils import get_model_summary\n\nimport dataset\nimport models\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train keypoints network')\n # general\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n # philly\n parser.add_argument('--modelDir',\n help='model directory',\n type=str,\n default='')\n parser.add_argument('--logDir',\n help='log directory',\n type=str,\n default='')\n parser.add_argument('--dataDir',\n help='data directory',\n type=str,\n default='')\n parser.add_argument('--prevModelDir',\n help='prev Model directory',\n type=str,\n default='')\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n update_config(cfg, args)\n\n logger, final_output_dir, tb_log_dir = create_logger(\n cfg, args.cfg, 'train')\n\n logger.info(pprint.pformat(args))\n logger.info(cfg)\n\n # cudnn related setting\n cudnn.benchmark = cfg.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED\n\n model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(\n cfg, is_train=True\n )\n\n # copy model file\n this_dir = os.path.dirname(__file__)\n shutil.copy2(\n os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),\n final_output_dir)\n # logger.info(pprint.pformat(model))\n\n writer_dict = {\n 'writer': SummaryWriter(log_dir=tb_log_dir),\n 'train_global_steps': 0,\n 'valid_global_steps': 0,\n }\n\n dump_input = torch.rand(\n (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0])\n )\n writer_dict['writer'].add_graph(model, (dump_input, ))\n\n logger.info(get_model_summary(model, dump_input))\n\n model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = JointsMSELoss(\n use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT\n ).cuda()\n\n # Data loading code\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n train_dataset = eval('dataset.'+cfg.DATASET.DATASET)(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,\n transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n )\n valid_dataset = eval('dataset.'+cfg.DATASET.DATASET)(\n cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,\n transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n )\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU*len(cfg.GPUS),\n shuffle=cfg.TRAIN.SHUFFLE,\n num_workers=cfg.WORKERS,\n pin_memory=cfg.PIN_MEMORY\n )\n valid_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=cfg.TEST.BATCH_SIZE_PER_GPU*len(cfg.GPUS),\n shuffle=False,\n num_workers=cfg.WORKERS,\n pin_memory=cfg.PIN_MEMORY\n )\n\n best_perf = 0.0\n best_model = False\n last_epoch = -1\n optimizer = get_optimizer(cfg, model)\n begin_epoch = cfg.TRAIN.BEGIN_EPOCH\n checkpoint_file = os.path.join(\n final_output_dir, 'checkpoint.pth'\n )\n\n if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):\n logger.info(\"=> loading checkpoint '{}'\".format(checkpoint_file))\n checkpoint = torch.load(checkpoint_file)\n begin_epoch = checkpoint['epoch']\n best_perf = checkpoint['perf']\n last_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(\n checkpoint_file, checkpoint['epoch']))\n\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,\n last_epoch=last_epoch\n )\n\n for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):\n lr_scheduler.step()\n\n # train for one epoch\n train(cfg, train_loader, model, criterion, optimizer, epoch,\n final_output_dir, tb_log_dir, writer_dict)\n\n\n # evaluate on validation set\n perf_indicator = validate(\n cfg, valid_loader, valid_dataset, model, criterion,\n final_output_dir, tb_log_dir, writer_dict\n )\n\n if perf_indicator >= best_perf:\n best_perf = perf_indicator\n best_model = True\n else:\n best_model = False\n\n logger.info('=> saving checkpoint to {}'.format(final_output_dir))\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': cfg.MODEL.NAME,\n 'state_dict': model.state_dict(),\n 'best_state_dict': model.module.state_dict(),\n 'perf': perf_indicator,\n 'optimizer': optimizer.state_dict(),\n }, best_model, final_output_dir)\n\n final_model_state_file = os.path.join(\n final_output_dir, 'final_state.pth'\n )\n logger.info('=> saving final model state to {}'.format(\n final_model_state_file)\n )\n torch.save(model.module.state_dict(), final_model_state_file)\n writer_dict['writer'].close()\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"torch.rand",
"torch.nn.DataParallel",
"torch.load",
"torch.optim.lr_scheduler.MultiStepLR"
]
]
|
dumpmemory/t5x | [
"463a23d577490a26498d9bbb2d7554be88afa316"
]
| [
"t5x/examples/t5/layers.py"
]
| [
"# Copyright 2021 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dense attention classes and mask/weighting functions.\"\"\"\n\n# pylint: disable=attribute-defined-outside-init,g-bare-generic\n\nimport dataclasses\nimport functools\nimport operator\nfrom typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union\n\nfrom flax import linen as nn\nfrom flax.linen import partitioning as nn_partitioning\nimport jax\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\nimport numpy as np\n\n\n# from flax.linen.partitioning import param_with_axes, with_sharding_constraint\nparam_with_axes = nn_partitioning.param_with_axes\nwith_sharding_constraint = nn_partitioning.with_sharding_constraint\n\n\n# Type annotations\nArray = jnp.ndarray\nDType = jnp.dtype\nPRNGKey = jnp.ndarray\nShape = Iterable[int]\nActivation = Callable[..., Array]\n# Parameter initializers.\nInitializer = Callable[[PRNGKey, Shape, DType], Array]\n\ndefault_embed_init = nn.initializers.variance_scaling(\n 1.0, 'fan_in', 'normal', out_axis=0)\n\n\ndef dot_product_attention(query: Array,\n key: Array,\n value: Array,\n bias: Optional[Array] = None,\n dropout_rng: Optional[PRNGKey] = None,\n dropout_rate: float = 0.,\n deterministic: bool = False,\n dtype: DType = jnp.float32,\n float32_logits: bool = False):\n \"\"\"Computes dot-product attention given query, key, and value.\n\n This is the core function for applying attention based on\n https://arxiv.org/abs/1706.03762. It calculates the attention weights given\n query and key and combines the values using the attention weights.\n\n Args:\n query: queries for calculating attention with shape of `[batch, q_length,\n num_heads, qk_depth_per_head]`.\n key: keys for calculating attention with shape of `[batch, kv_length,\n num_heads, qk_depth_per_head]`.\n value: values to be used in attention with shape of `[batch, kv_length,\n num_heads, v_depth_per_head]`.\n bias: bias for the attention weights. This should be broadcastable to the\n shape `[batch, num_heads, q_length, kv_length]` This can be used for\n incorporating causal masks, padding masks, proximity bias, etc.\n dropout_rng: JAX PRNGKey: to be used for dropout\n dropout_rate: dropout rate\n deterministic: bool, deterministic or not (to apply dropout)\n dtype: the dtype of the computation (default: float32)\n float32_logits: bool, if True then compute logits in float32 to avoid\n numerical issues with bfloat16.\n\n Returns:\n Output of shape `[batch, length, num_heads, v_depth_per_head]`.\n \"\"\"\n assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'\n assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], (\n 'q, k, v batch dims must match.')\n assert query.shape[-2] == key.shape[-2] == value.shape[-2], (\n 'q, k, v num_heads must match.')\n assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.'\n assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'\n\n # Casting logits and softmax computation for float32 for model stability.\n if float32_logits:\n query = query.astype(jnp.float32)\n key = key.astype(jnp.float32)\n\n # `attn_weights`: [batch, num_heads, q_length, kv_length]\n attn_weights = jnp.einsum('bqhd,bkhd->bhqk', query, key)\n\n # Apply attention bias: masking, dropout, proximity bias, etc.\n if bias is not None:\n attn_weights = attn_weights + bias.astype(attn_weights.dtype)\n\n # Normalize the attention weights across `kv_length` dimension.\n attn_weights = jax.nn.softmax(attn_weights).astype(dtype)\n\n # Apply attention dropout.\n if not deterministic and dropout_rate > 0.:\n keep_prob = 1.0 - dropout_rate\n # T5 broadcasts along the \"length\" dim, but unclear which one that\n # corresponds to in positional dimensions here, assuming query dim.\n dropout_shape = list(attn_weights.shape)\n dropout_shape[-2] = 1\n keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)\n keep = jnp.broadcast_to(keep, attn_weights.shape)\n multiplier = (\n keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))\n attn_weights = attn_weights * multiplier\n\n # Take the linear combination of `value`.\n return jnp.einsum('bhqk,bkhd->bqhd', attn_weights, value)\n\n\ndynamic_vector_slice_in_dim = jax.vmap(\n lax.dynamic_slice_in_dim, in_axes=(None, 0, None, None))\n\n\nclass MultiHeadDotProductAttention(nn.Module):\n \"\"\"Multi-head dot-product attention.\n\n Attributes:\n num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])\n should be divisible by the number of heads.\n head_dim: dimension of each head.\n dtype: the dtype of the computation.\n dropout_rate: dropout rate\n kernel_init: initializer for the kernel of the Dense layers.\n float32_logits: bool, if True then compute logits in float32 to avoid\n numerical issues with bfloat16.\n \"\"\"\n\n num_heads: int\n head_dim: int\n dtype: DType = jnp.float32\n dropout_rate: float = 0.\n kernel_init: Initializer = nn.initializers.variance_scaling(\n 1.0, 'fan_in', 'normal')\n float32_logits: bool = False # computes logits in float32 for stability.\n\n @nn.compact\n def __call__(self,\n inputs_q: Array,\n inputs_kv: Array,\n mask: Optional[Array] = None,\n bias: Optional[Array] = None,\n *,\n decode: bool = False,\n deterministic: bool = False) -> Array:\n \"\"\"Applies multi-head dot product attention on the input data.\n\n Projects the inputs into multi-headed query, key, and value vectors,\n applies dot-product attention and project the results to an output vector.\n\n There are two modes: decoding and non-decoding (e.g., training). The mode is\n determined by `decode` argument. For decoding, this method is called twice,\n first to initialize the cache and then for an actual decoding process. The\n two calls are differentiated by the presence of 'cached_key' in the variable\n dict. In the cache initialization stage, the cache variables are initialized\n as zeros and will be filled in the subsequent decoding process.\n\n In the cache initialization call, `inputs_q` has a shape [batch, length,\n q_features] and `inputs_kv`: [batch, length, kv_features]. During the\n incremental decoding stage, query, key and value all have the shape [batch,\n 1, qkv_features] corresponding to a single step.\n\n Args:\n inputs_q: input queries of shape `[batch, q_length, q_features]`.\n inputs_kv: key/values of shape `[batch, kv_length, kv_features]`.\n mask: attention mask of shape `[batch, num_heads, q_length, kv_length]`.\n bias: attention bias of shape `[batch, num_heads, q_length, kv_length]`.\n decode: Whether to prepare and use an autoregressive cache.\n deterministic: Disables dropout if set to True.\n\n Returns:\n output of shape `[batch, length, q_features]`.\n \"\"\"\n projection = functools.partial(\n DenseGeneral,\n axis=-1,\n features=(self.num_heads, self.head_dim),\n kernel_axes=('embed', 'joined_kv'),\n dtype=self.dtype)\n\n # NOTE: T5 does not explicitly rescale the attention logits by\n # 1/sqrt(depth_kq)! This is folded into the initializers of the\n # linear transformations, which is equivalent under Adafactor.\n depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype)\n query_init = lambda *args: self.kernel_init(*args) / depth_scaling\n\n # Project inputs_q to multi-headed q/k/v\n # dimensions are then [batch, length, num_heads, head_dim]\n query = projection(kernel_init=query_init, name='query')(inputs_q)\n key = projection(kernel_init=self.kernel_init, name='key')(inputs_kv)\n value = projection(kernel_init=self.kernel_init, name='value')(inputs_kv)\n\n query = with_sharding_constraint(query, ('batch', 'length', 'heads', 'kv'))\n key = with_sharding_constraint(key, ('batch', 'length', 'heads', 'kv'))\n value = with_sharding_constraint(value, ('batch', 'length', 'heads', 'kv'))\n\n if decode:\n # Detect if we're initializing by absence of existing cache data.\n is_initialized = self.has_variable('cache', 'cached_key')\n # The key and value have dimension [batch, length, num_heads, head_dim],\n # but we cache them as [batch, num_heads, head_dim, length] as a TPU\n # fusion optimization. This also enables the \"scatter via one-hot\n # broadcast\" trick, which means we do a one-hot broadcast instead of a\n # scatter/gather operations, resulting in a 3-4x speedup in practice.\n swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3])\n cached_key = self.variable('cache', 'cached_key', jnp.zeros,\n swap_dims(key.shape), key.dtype)\n cached_value = self.variable('cache', 'cached_value', jnp.zeros,\n swap_dims(value.shape), value.dtype)\n cache_index = self.variable('cache', 'cache_index',\n lambda: jnp.array(0, dtype=jnp.int32))\n if is_initialized:\n batch, num_heads, head_dim, length = (cached_key.value.shape)\n # During fast autoregressive decoding, we feed one position at a time,\n # and cache the keys and values step by step.\n # Sanity shape check of cached key against input query.\n expected_shape = (batch, 1, num_heads, head_dim)\n if expected_shape != query.shape:\n raise ValueError('Autoregressive cache shape error, '\n 'expected query shape %s instead got %s.' %\n (expected_shape, query.shape))\n\n # Create a OHE of the current index. NOTE: the index is increased below.\n cur_index = cache_index.value\n one_hot_indices = jax.nn.one_hot(cur_index, length, dtype=key.dtype)\n # In order to update the key, value caches with the current key and\n # value, we move the length axis to the back, similar to what we did for\n # the cached ones above.\n # Note these are currently the key and value of a single position, since\n # we feed one position at a time.\n one_token_key = jnp.moveaxis(key, -3, -1)\n one_token_value = jnp.moveaxis(value, -3, -1)\n # Update key, value caches with our new 1d spatial slices.\n # We implement an efficient scatter into the cache via one-hot\n # broadcast and addition.\n key = cached_key.value + one_token_key * one_hot_indices\n value = cached_value.value + one_token_value * one_hot_indices\n cached_key.value = key\n cached_value.value = value\n cache_index.value = cache_index.value + 1\n # Move the keys and values back to their original shapes.\n key = jnp.moveaxis(key, -1, -3)\n value = jnp.moveaxis(value, -1, -3)\n\n # Causal mask for cached decoder self-attention: our single query\n # position should only attend to those key positions that have already\n # been generated and cached, not the remaining zero elements.\n mask = combine_masks(\n mask,\n jnp.broadcast_to(\n jnp.arange(length) <= cur_index,\n # (1, 1, length) represent (head dim, query length, key length)\n # query length is 1 because during decoding we deal with one\n # index.\n # The same mask is applied to all batch elements and heads.\n (batch, 1, 1, length)))\n\n # Grab the correct relative attention bias during decoding. This is\n # only required during single step decoding.\n if bias is not None:\n # The bias is a full attention matrix, but during decoding we only\n # have to take a slice of it.\n # This is equivalent to bias[..., cur_index:cur_index+1, :].\n bias = dynamic_vector_slice_in_dim(\n jnp.squeeze(bias, axis=0), jnp.reshape(cur_index, (-1)), 1, -2)\n\n # Convert the boolean attention mask to an attention bias.\n if mask is not None:\n # attention mask in the form of attention bias\n attention_bias = lax.select(\n mask > 0,\n jnp.full(mask.shape, 0.).astype(self.dtype),\n jnp.full(mask.shape, -1e10).astype(self.dtype))\n else:\n attention_bias = None\n\n # Add provided bias term (e.g. relative position embedding).\n if bias is not None:\n attention_bias = combine_biases(attention_bias, bias)\n\n dropout_rng = None\n if not deterministic and self.dropout_rate > 0.:\n dropout_rng = self.make_rng('dropout')\n\n # Apply attention.\n x = dot_product_attention(\n query,\n key,\n value,\n bias=attention_bias,\n dropout_rng=dropout_rng,\n dropout_rate=self.dropout_rate,\n deterministic=deterministic,\n dtype=self.dtype,\n float32_logits=self.float32_logits)\n\n # Back to the original inputs dimensions.\n out = DenseGeneral(\n features=inputs_q.shape[-1], # output dim is set to the input dim.\n axis=(-2, -1),\n kernel_init=self.kernel_init,\n kernel_axes=('joined_kv', 'embed'),\n dtype=self.dtype,\n name='out')(\n x)\n return out\n\n\ndef _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]:\n # A tuple by convention. len(axes_tuple) then also gives the rank efficiently.\n return tuple([ax if ax >= 0 else ndim + ax for ax in axes])\n\n\ndef _canonicalize_tuple(x):\n if isinstance(x, Iterable):\n return tuple(x)\n else:\n return (x,)\n\n\n#------------------------------------------------------------------------------\n# DenseGeneral for attention layers.\n#------------------------------------------------------------------------------\nclass DenseGeneral(nn.Module):\n \"\"\"A linear transformation (without bias) with flexible axes.\n\n Attributes:\n features: tuple with numbers of output features.\n axis: tuple with axes to apply the transformation on.\n dtype: the dtype of the computation (default: float32).\n kernel_init: initializer function for the weight matrix.\n \"\"\"\n features: Union[Iterable[int], int]\n axis: Union[Iterable[int], int] = -1\n dtype: DType = jnp.float32\n kernel_init: Initializer = nn.initializers.variance_scaling(\n 1.0, 'fan_in', 'truncated_normal')\n kernel_axes: Tuple[str, ...] = ()\n\n @nn.compact\n def __call__(self, inputs: Array) -> Array:\n \"\"\"Applies a linear transformation to the inputs along multiple dimensions.\n\n Args:\n inputs: The nd-array to be transformed.\n\n Returns:\n The transformed input.\n \"\"\"\n features = _canonicalize_tuple(self.features)\n axis = _canonicalize_tuple(self.axis)\n\n inputs = jnp.asarray(inputs, self.dtype)\n axis = _normalize_axes(axis, inputs.ndim)\n\n kernel_shape = tuple([inputs.shape[ax] for ax in axis]) + features\n kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),\n np.prod(features))\n kernel = param_with_axes(\n 'kernel',\n self.kernel_init,\n kernel_param_shape,\n jnp.float32,\n axes=self.kernel_axes)\n kernel = jnp.asarray(kernel, self.dtype)\n kernel = jnp.reshape(kernel, kernel_shape)\n\n contract_ind = tuple(range(0, len(axis)))\n return lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ())))\n\n\ndef _convert_to_activation_function(\n fn_or_string: Union[str, Callable]) -> Callable:\n \"\"\"Convert a string to an activation function.\"\"\"\n if fn_or_string == 'linear':\n return lambda x: x\n elif isinstance(fn_or_string, str):\n return getattr(nn, fn_or_string)\n elif callable(fn_or_string):\n return fn_or_string\n else:\n raise ValueError(\"don't know how to convert %s to an activation function\" %\n (fn_or_string,))\n\n\nclass MlpBlock(nn.Module):\n \"\"\"Transformer MLP / feed-forward block.\n\n Attributes:\n intermediate_dim: Shared dimension of hidden layers.\n activations: Type of activations for each layer. Each element is either\n 'linear', a string function name in flax.linen, or a function.\n kernel_init: Kernel function, passed to the dense layers.\n deterministic: Whether the dropout layers should be deterministic.\n intermediate_dropout_rate: Dropout rate used after the intermediate layers.\n dtype: Type for the dense layer.\n \"\"\"\n intermediate_dim: int = 2048\n activations: Sequence[Union[str, Callable]] = ('relu',)\n kernel_init: Initializer = nn.initializers.variance_scaling(\n 1.0, 'fan_in', 'truncated_normal')\n intermediate_dropout_rate: float = 0.1\n dtype: Any = jnp.float32\n\n @nn.compact\n def __call__(self, inputs, decode: bool = False, deterministic: bool = False):\n \"\"\"Applies Transformer MlpBlock module.\"\"\"\n # Iterate over specified MLP input activation functions.\n # e.g. ('relu',) or ('linear', 'gelu') for gated-gelu.\n activations = []\n for idx, act_fn in enumerate(self.activations):\n dense_name = 'wi' if len(self.activations) == 1 else f'wi_{idx}'\n x = DenseGeneral(\n self.intermediate_dim,\n dtype=self.dtype,\n kernel_init=self.kernel_init,\n kernel_axes=('embed', 'mlp'),\n name=dense_name)(\n inputs)\n x = _convert_to_activation_function(act_fn)(x)\n activations.append(x)\n\n # Take elementwise product of above intermediate activations.\n x = functools.reduce(operator.mul, activations)\n # Apply dropout and final dense output projection.\n x = nn.Dropout(\n rate=self.intermediate_dropout_rate, broadcast_dims=(-2,))(\n x, deterministic=deterministic) # Broadcast along length.\n x = with_sharding_constraint(x, ('batch', 'length', 'mlp'))\n output = DenseGeneral(\n inputs.shape[-1],\n dtype=self.dtype,\n kernel_init=self.kernel_init,\n kernel_axes=('mlp', 'embed'),\n name='wo')(\n x)\n return output\n\n\nclass Embed(nn.Module):\n \"\"\"A parameterized function from integers [0, n) to d-dimensional vectors.\n\n Attributes:\n num_embeddings: number of embeddings.\n features: number of feature dimensions for each embedding.\n dtype: the dtype of the embedding vectors (default: float32).\n embedding_init: embedding initializer.\n one_hot: performs the gather with a one-hot contraction rather than a true\n gather. This is currently needed for SPMD partitioning.\n \"\"\"\n num_embeddings: int\n features: int\n cast_input_dtype: Optional[DType] = None\n dtype: DType = jnp.float32\n attend_dtype: Optional[DType] = None\n embedding_init: Initializer = default_embed_init\n one_hot: bool = False\n embedding: Array = dataclasses.field(init=False)\n\n def setup(self):\n self.embedding = param_with_axes(\n 'embedding',\n self.embedding_init, (self.num_embeddings, self.features),\n jnp.float32,\n axes=('vocab', 'embed'))\n\n def __call__(self, inputs: Array) -> Array:\n \"\"\"Embeds the inputs along the last dimension.\n\n Args:\n inputs: input data, all dimensions are considered batch dimensions.\n\n Returns:\n Output which is embedded input data. The output shape follows the input,\n with an additional `features` dimension appended.\n \"\"\"\n if self.cast_input_dtype:\n inputs = inputs.astype(self.cast_input_dtype)\n if not jnp.issubdtype(inputs.dtype, jnp.integer):\n raise ValueError('Input type must be an integer or unsigned integer.')\n if self.one_hot:\n iota = lax.iota(jnp.int32, self.num_embeddings)\n one_hot = jnp.array(inputs[..., jnp.newaxis] == iota, dtype=self.dtype)\n output = jnp.dot(one_hot, jnp.asarray(self.embedding, self.dtype))\n else:\n output = jnp.asarray(self.embedding, self.dtype)[inputs]\n output = with_sharding_constraint(output, ('batch', 'length', 'embed'))\n return output\n\n def attend(self, query: Array) -> Array:\n \"\"\"Attend over the embedding using a query array.\n\n Args:\n query: array with last dimension equal the feature depth `features` of the\n embedding.\n\n Returns:\n An array with final dim `num_embeddings` corresponding to the batched\n inner-product of the array of query vectors against each embedding.\n Commonly used for weight-sharing between embeddings and logit transform\n in NLP models.\n \"\"\"\n dtype = self.attend_dtype if self.attend_dtype is not None else self.dtype\n return jnp.dot(query, jnp.asarray(self.embedding, dtype).T)\n\n\nclass RelativePositionBiases(nn.Module):\n \"\"\"Adds T5-style relative positional embeddings to the attention logits.\n\n Attributes:\n num_buckets: Number of buckets to bucket distances between key and query\n positions into.\n max_distance: Maximum distance before everything is lumped into the last\n distance bucket.\n num_heads: Number of heads in the attention layer. Each head will get a\n different relative position weighting.\n dtype: Type of arrays through this module.\n embedding_init: initializer for relative embedding table.\n \"\"\"\n num_buckets: int\n max_distance: int\n num_heads: int\n dtype: Any\n embedding_init: Callable[..., Array] = nn.linear.default_embed_init\n\n @staticmethod\n def _relative_position_bucket(relative_position,\n bidirectional=True,\n num_buckets=32,\n max_distance=128):\n \"\"\"Translate relative position to a bucket number for relative attention.\n\n The relative position is defined as memory_position - query_position, i.e.\n the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are\n invalid.\n We use smaller buckets for small absolute relative_position and larger\n buckets for larger absolute relative_positions. All relative\n positions >=max_distance map to the same bucket. All relative\n positions <=-max_distance map to the same bucket. This should allow for\n more graceful generalization to longer sequences than the model has been\n trained on.\n\n Args:\n relative_position: an int32 array\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n\n Returns:\n a Tensor with the same shape as relative_position, containing int32\n values in the range [0, num_buckets)\n \"\"\"\n ret = 0\n n = -relative_position\n if bidirectional:\n num_buckets //= 2\n ret += (n < 0).astype(np.int32) * num_buckets\n n = np.abs(n)\n else:\n n = np.maximum(n, 0)\n # now n is in the range [0, inf)\n max_exact = num_buckets // 2\n is_small = (n < max_exact)\n val_if_large = max_exact + (\n np.log(n.astype(np.float32) / max_exact + np.finfo(np.float32).eps) /\n np.log(max_distance / max_exact) *\n (num_buckets - max_exact)).astype(np.int32)\n val_if_large = np.minimum(val_if_large, num_buckets - 1)\n ret += np.where(is_small, n, val_if_large)\n return ret\n\n @nn.compact\n def __call__(self, qlen, klen, bidirectional=True):\n \"\"\"Produce relative position embedding attention biases.\n\n Args:\n qlen: attention query length.\n klen: attention key length.\n bidirectional: whether to allow positive memory-query relative position\n embeddings.\n\n Returns:\n output: `(1, len, q_len, k_len)` attention bias\n \"\"\"\n # TODO(levskaya): should we be computing this w. numpy as a program\n # constant?\n context_position = np.arange(qlen, dtype=jnp.int32)[:, None]\n memory_position = np.arange(klen, dtype=jnp.int32)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position,\n bidirectional=bidirectional,\n num_buckets=self.num_buckets,\n max_distance=self.max_distance)\n relative_attention_bias = param_with_axes(\n 'rel_embedding',\n self.embedding_init, (self.num_heads, self.num_buckets),\n jnp.float32,\n axes=('heads', 'relpos_buckets'))\n\n relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)\n # Instead of using a slow gather, we create a leading-dimension one-hot\n # array from rp_bucket and use it to perform the gather-equivalent via a\n # contraction, i.e.:\n # (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).\n # This is equivalent to relative_attention_bias[:, rp_bucket]\n bcast_iota = lax.broadcasted_iota(jnp.int32, (self.num_buckets, 1, 1), 0)\n rp_bucket_one_hot = jnp.array(\n rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)\n # --> shape (qlen, klen, num_heads)\n values = lax.dot_general(\n relative_attention_bias,\n rp_bucket_one_hot,\n (\n ((1,), (0,)), # rhs, lhs contracting dims\n ((), ()))) # no batched dims\n # Add a singleton batch dimension.\n # --> shape (1, num_heads, qlen, klen)\n return values[jnp.newaxis, ...]\n\n\n#------------------------------------------------------------------------------\n# T5 Layernorm - no subtraction of mean or bias.\n#------------------------------------------------------------------------------\nclass LayerNorm(nn.Module):\n \"\"\"T5 Layer normalization operating on the last axis of the input data.\"\"\"\n epsilon: float = 1e-6\n dtype: Any = jnp.float32\n scale_init: Initializer = nn.initializers.ones\n\n @nn.compact\n def __call__(self, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies layer normalization on the input.\"\"\"\n x = jnp.asarray(x, jnp.float32)\n features = x.shape[-1]\n mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)\n y = jnp.asarray(x * lax.rsqrt(mean2 + self.epsilon), self.dtype)\n scale = param_with_axes(\n 'scale', self.scale_init, (features,), jnp.float32, axes=('embed',))\n\n scale = jnp.asarray(scale, self.dtype)\n return y * scale\n\n\n#------------------------------------------------------------------------------\n# Mask-making utility functions.\n#------------------------------------------------------------------------------\ndef make_attention_mask(query_input: Array,\n key_input: Array,\n pairwise_fn: Callable = jnp.multiply,\n extra_batch_dims: int = 0,\n dtype: DType = jnp.float32) -> Array:\n \"\"\"Mask-making helper for attention weights.\n\n In case of 1d inputs (i.e., `[batch, len_q]`, `[batch, len_kv]`, the\n attention weights will be `[batch, heads, len_q, len_kv]` and this\n function will produce `[batch, 1, len_q, len_kv]`.\n\n Args:\n query_input: a batched, flat input of query_length size\n key_input: a batched, flat input of key_length size\n pairwise_fn: broadcasting elementwise comparison function\n extra_batch_dims: number of extra batch dims to add singleton axes for, none\n by default\n dtype: mask return dtype\n\n Returns:\n A `[batch, 1, len_q, len_kv]` shaped mask for 1d attention.\n \"\"\"\n # [batch, len_q, len_kv]\n mask = pairwise_fn(\n # [batch, len_q] -> [batch, len_q, 1]\n jnp.expand_dims(query_input, axis=-1),\n # [batch, len_q] -> [batch, 1, len_kv]\n jnp.expand_dims(key_input, axis=-2))\n\n # [batch, 1, len_q, len_kv]. This creates the head dim.\n mask = jnp.expand_dims(mask, axis=-3)\n mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))\n return mask.astype(dtype)\n\n\ndef make_causal_mask(x: Array,\n extra_batch_dims: int = 0,\n dtype: DType = jnp.float32) -> Array:\n \"\"\"Make a causal mask for self-attention.\n\n In case of 1d inputs (i.e., `[batch, len]`, the self-attention weights\n will be `[batch, heads, len, len]` and this function will produce a\n causal mask of shape `[batch, 1, len, len]`.\n\n Note that a causal mask does not depend on the values of x; it only depends on\n the shape. If x has padding elements, they will not be treated in a special\n manner.\n\n Args:\n x: input array of shape `[batch, len]`\n extra_batch_dims: number of batch dims to add singleton axes for, none by\n default\n dtype: mask return dtype\n\n Returns:\n A `[batch, 1, len, len]` shaped causal mask for 1d attention.\n \"\"\"\n idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)\n return make_attention_mask(\n idxs,\n idxs,\n jnp.greater_equal,\n extra_batch_dims=extra_batch_dims,\n dtype=dtype)\n\n\ndef combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):\n \"\"\"Combine attention masks.\n\n Args:\n *masks: set of attention mask arguments to combine, some can be None.\n dtype: final mask dtype\n\n Returns:\n Combined mask, reduced by logical and, returns None if no masks given.\n \"\"\"\n masks = [m for m in masks if m is not None]\n if not masks:\n return None\n assert all(map(lambda x: x.ndim == masks[0].ndim, masks)), (\n f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')\n mask, *other_masks = masks\n for other_mask in other_masks:\n mask = jnp.logical_and(mask, other_mask)\n return mask.astype(dtype)\n\n\ndef combine_biases(*masks: Optional[Array]):\n \"\"\"Combine attention biases.\n\n Args:\n *masks: set of attention bias arguments to combine, some can be None.\n\n Returns:\n Combined mask, reduced by summation, returns None if no masks given.\n \"\"\"\n masks = [m for m in masks if m is not None]\n if not masks:\n return None\n assert all(map(lambda x: x.ndim == masks[0].ndim, masks)), (\n f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')\n mask, *other_masks = masks\n for other_mask in other_masks:\n mask = mask + other_mask\n return mask\n\n\ndef make_decoder_mask(decoder_target_tokens: Array,\n dtype: DType,\n decoder_causal_attention: Optional[Array] = None,\n decoder_segment_ids: Optional[Array] = None) -> Array:\n \"\"\"Compute the self-attention mask for a decoder.\n\n Decoder mask is formed by combining a causal mask, a padding mask and an\n optional packing mask. If decoder_causal_attention is passed, it makes the\n masking non-causal for positions that have value of 1.\n\n A prefix LM is applied to a dataset which has a notion of \"inputs\" and\n \"targets\", e.g., a machine translation task. The inputs and targets are\n concatenated to form a new target. `decoder_target_tokens` is the concatenated\n decoder output tokens.\n\n The \"inputs\" portion of the concatenated sequence can attend to other \"inputs\"\n tokens even for those at a later time steps. In order to control this\n behavior, `decoder_causal_attention` is necessary. This is a binary mask with\n a value of 1 indicating that the position belonged to \"inputs\" portion of the\n original dataset.\n\n Example:\n\n Suppose we have a dataset with two examples.\n\n ds = [{\"inputs\": [6, 7], \"targets\": [8]},\n {\"inputs\": [3, 4], \"targets\": [5]}]\n\n After the data preprocessing with packing, the two examples are packed into\n one example with the following three fields (some fields are skipped for\n simplicity).\n\n decoder_target_tokens = [[6, 7, 8, 3, 4, 5, 0]]\n decoder_segment_ids = [[1, 1, 1, 2, 2, 2, 0]]\n decoder_causal_attention = [[1, 1, 0, 1, 1, 0, 0]]\n\n where each array has [batch, length] shape with batch size being 1. Then,\n this function computes the following mask.\n\n mask = [[[[1, 1, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0]]]]\n\n mask[b, 1, :, :] represents the mask for the example `b` in the batch.\n Because mask is for a self-attention layer, the mask's shape is a square of\n shape [query length, key length].\n\n mask[b, 1, i, j] = 1 means that the query token at position i can attend to\n the key token at position j.\n\n Args:\n decoder_target_tokens: decoder output tokens. [batch, length]\n dtype: dtype of the output mask.\n decoder_causal_attention: a binary mask indicating which position should\n only attend to earlier positions in the sequence. Others will attend\n bidirectionally. [batch, length]\n decoder_segment_ids: decoder segmentation info for packed examples. [batch,\n length]\n\n Returns:\n the combined decoder mask.\n \"\"\"\n masks = []\n # The same mask is applied to all attention heads. So the head dimension is 1,\n # i.e., the mask will be broadcast along the heads dim.\n # [batch, 1, length, length]\n causal_mask = make_causal_mask(decoder_target_tokens, dtype=dtype)\n\n # Positions with value 1 in `decoder_causal_attneition` can attend\n # bidirectionally.\n if decoder_causal_attention is not None:\n # [batch, 1, length, length]\n inputs_mask = make_attention_mask(\n decoder_causal_attention,\n decoder_causal_attention,\n jnp.logical_and,\n dtype=dtype)\n masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype))\n else:\n masks.append(causal_mask)\n\n # Padding mask.\n masks.append(\n make_attention_mask(\n decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=dtype))\n\n # Packing mask\n if decoder_segment_ids is not None:\n masks.append(\n make_attention_mask(\n decoder_segment_ids, decoder_segment_ids, jnp.equal, dtype=dtype))\n\n return combine_masks(*masks, dtype=dtype)\n"
]
| [
[
"numpy.log",
"numpy.minimum",
"numpy.where",
"numpy.finfo",
"numpy.prod",
"numpy.arange",
"numpy.abs",
"numpy.maximum"
]
]
|
jroesch/torchdynamo | [
"0b1e34d53f53937b3066e61a14d210365a24b156"
]
| [
"torchdynamo/allowed_functions.py"
]
| [
"import builtins\nimport collections\nimport copy\nimport functools\nimport itertools\nimport math\nimport operator\nimport types\nimport warnings\nfrom functools import lru_cache\n\nimport numpy\nimport torch\n\n\n@lru_cache(None)\ndef _disallowed_function_ids():\n remove = [\n True,\n False,\n None,\n collections.OrderedDict,\n copy.copy,\n copy.deepcopy,\n torch.autocast_decrement_nesting,\n torch.autocast_increment_nesting,\n torch.clear_autocast_cache,\n torch.distributions.constraints.is_dependent,\n torch.distributions.normal.Normal,\n torch.inference_mode,\n torch.set_anomaly_enabled,\n torch.set_autocast_cache_enabled,\n torch.set_autocast_cpu_dtype,\n torch.set_autocast_cpu_enabled,\n torch.set_autocast_enabled,\n torch.set_autocast_gpu_dtype,\n warnings.warn,\n ]\n return {id(x) for x in remove}\n\n\n@lru_cache(None)\ndef _allowed_function_ids():\n \"\"\"\n Walk torch.* and get the ids of all the stuff in it\n \"\"\"\n warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.distributed\")\n torch.distributions.Distribution.set_default_validate_args(False)\n torch_object_ids = dict()\n\n def _find_torch_objects(module):\n if module.__name__.startswith(\"torch.distributions\"):\n return\n torch_object_ids[id(module)] = module.__name__\n for name, obj in list(module.__dict__.items()):\n if id(obj) not in torch_object_ids:\n if isinstance(obj, types.ModuleType):\n if obj.__name__.startswith(\"torch.\"):\n torch_object_ids[id(obj)] = f\"{module.__name__}.{name}\"\n _find_torch_objects(obj)\n else:\n torch_object_ids[id(obj)] = f\"{module.__name__}.{name}\"\n\n _find_torch_objects(torch)\n _find_torch_objects(math)\n\n for idx in _disallowed_function_ids():\n if idx in torch_object_ids:\n del torch_object_ids[idx]\n\n return torch_object_ids\n\n\ndef is_allowed(obj):\n \"\"\"Is this safe to trace like torch.add ?\"\"\"\n return id(obj) in _allowed_function_ids()\n\n\ndef is_disallowed(obj):\n \"\"\"Is this safe to trace like torch.add ?\"\"\"\n return id(obj) in _disallowed_function_ids()\n\n\n@lru_cache(None)\ndef _builtin_function_ids():\n rv = {\n id(v): f\"builtins.{k}\"\n for k, v in builtins.__dict__.items()\n if not k.startswith(\"_\") and callable(v)\n }\n rv.update(\n {\n id(v): f\"operator.{k}\"\n for k, v in operator.__dict__.items()\n if not k.startswith(\"_\") and callable(v)\n }\n )\n rv.update(\n {id(v): f\"functools.{v.__name__}\" for v in (itertools.chain, itertools.islice)}\n )\n rv[id(functools.reduce)] = \"functools.reduce\"\n return rv\n\n\ndef is_builtin(obj):\n return id(obj) in _builtin_function_ids()\n\n\n@lru_cache(None)\ndef _numpy_function_ids():\n rv = dict()\n for mod in (numpy, numpy.random):\n rv.update(\n {\n id(v): f\"{mod.__name__}.{k}\"\n for k, v in mod.__dict__.items()\n if callable(v)\n and (getattr(v, \"__module__\", None) or mod.__name__) == mod.__name__\n }\n )\n return rv\n\n\ndef is_numpy(obj):\n return isinstance(obj, numpy.ndarray) or id(obj) in _numpy_function_ids()\n"
]
| [
[
"torch.distributions.Distribution.set_default_validate_args"
]
]
|
ianthomas23/xarray | [
"aa1d1d19b822897399c8ed2cf346afbac71f45b3"
]
| [
"xarray/tests/test_formatting_html.py"
]
| [
"from __future__ import annotations\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport xarray as xr\nfrom xarray.core import formatting_html as fh\n\n\[email protected]\ndef dataarray():\n return xr.DataArray(np.random.RandomState(0).randn(4, 6))\n\n\[email protected]\ndef dask_dataarray(dataarray):\n pytest.importorskip(\"dask\")\n return dataarray.chunk()\n\n\[email protected]\ndef multiindex():\n mindex = pd.MultiIndex.from_product(\n [[\"a\", \"b\"], [1, 2]], names=(\"level_1\", \"level_2\")\n )\n return xr.Dataset({}, {\"x\": mindex})\n\n\[email protected]\ndef dataset():\n times = pd.date_range(\"2000-01-01\", \"2001-12-31\", name=\"time\")\n annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))\n\n base = 10 + 15 * annual_cycle.reshape(-1, 1)\n tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)\n tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)\n\n return xr.Dataset(\n {\n \"tmin\": ((\"time\", \"location\"), tmin_values),\n \"tmax\": ((\"time\", \"location\"), tmax_values),\n },\n {\"time\": times, \"location\": [\"<IA>\", \"IN\", \"IL\"]},\n attrs={\"description\": \"Test data.\"},\n )\n\n\ndef test_short_data_repr_html(dataarray) -> None:\n data_repr = fh.short_data_repr_html(dataarray)\n assert data_repr.startswith(\"<pre>array\")\n\n\ndef test_short_data_repr_html_non_str_keys(dataset) -> None:\n ds = dataset.assign({2: lambda x: x[\"tmin\"]})\n fh.dataset_repr(ds)\n\n\ndef test_short_data_repr_html_dask(dask_dataarray) -> None:\n assert hasattr(dask_dataarray.data, \"_repr_html_\")\n data_repr = fh.short_data_repr_html(dask_dataarray)\n assert data_repr == dask_dataarray.data._repr_html_()\n\n\ndef test_format_dims_no_dims() -> None:\n dims: dict = {}\n dims_with_index: list = []\n formatted = fh.format_dims(dims, dims_with_index)\n assert formatted == \"\"\n\n\ndef test_format_dims_unsafe_dim_name() -> None:\n dims = {\"<x>\": 3, \"y\": 2}\n dims_with_index: list = []\n formatted = fh.format_dims(dims, dims_with_index)\n assert \"<x>\" in formatted\n\n\ndef test_format_dims_non_index() -> None:\n dims, dims_with_index = {\"x\": 3, \"y\": 2}, [\"time\"]\n formatted = fh.format_dims(dims, dims_with_index)\n assert \"class='xr-has-index'\" not in formatted\n\n\ndef test_format_dims_index() -> None:\n dims, dims_with_index = {\"x\": 3, \"y\": 2}, [\"x\"]\n formatted = fh.format_dims(dims, dims_with_index)\n assert \"class='xr-has-index'\" in formatted\n\n\ndef test_summarize_attrs_with_unsafe_attr_name_and_value() -> None:\n attrs = {\"<x>\": 3, \"y\": \"<pd.DataFrame>\"}\n formatted = fh.summarize_attrs(attrs)\n assert \"<dt><span><x> :</span></dt>\" in formatted\n assert \"<dt><span>y :</span></dt>\" in formatted\n assert \"<dd>3</dd>\" in formatted\n assert \"<dd><pd.DataFrame></dd>\" in formatted\n\n\ndef test_repr_of_dataarray(dataarray) -> None:\n formatted = fh.array_repr(dataarray)\n assert \"dim_0\" in formatted\n # has an expanded data section\n assert formatted.count(\"class='xr-array-in' type='checkbox' checked>\") == 1\n # coords and attrs don't have an items so they'll be be disabled and collapsed\n assert (\n formatted.count(\"class='xr-section-summary-in' type='checkbox' disabled >\") == 2\n )\n\n with xr.set_options(display_expand_data=False):\n formatted = fh.array_repr(dataarray)\n assert \"dim_0\" in formatted\n # has an expanded data section\n assert formatted.count(\"class='xr-array-in' type='checkbox' checked>\") == 0\n # coords and attrs don't have an items so they'll be be disabled and collapsed\n assert (\n formatted.count(\"class='xr-section-summary-in' type='checkbox' disabled >\")\n == 2\n )\n\n\ndef test_repr_of_multiindex(multiindex) -> None:\n formatted = fh.dataset_repr(multiindex)\n assert \"(x)\" in formatted\n\n\ndef test_repr_of_dataset(dataset) -> None:\n formatted = fh.dataset_repr(dataset)\n # coords, attrs, and data_vars are expanded\n assert (\n formatted.count(\"class='xr-section-summary-in' type='checkbox' checked>\") == 3\n )\n assert \"<U4\" in formatted or \">U4\" in formatted\n assert \"<IA>\" in formatted\n\n with xr.set_options(\n display_expand_coords=False,\n display_expand_data_vars=False,\n display_expand_attrs=False,\n ):\n formatted = fh.dataset_repr(dataset)\n # coords, attrs, and data_vars are collapsed\n assert (\n formatted.count(\"class='xr-section-summary-in' type='checkbox' checked>\")\n == 0\n )\n assert \"<U4\" in formatted or \">U4\" in formatted\n assert \"<IA>\" in formatted\n\n\ndef test_repr_text_fallback(dataset) -> None:\n formatted = fh.dataset_repr(dataset)\n\n # Just test that the \"pre\" block used for fallback to plain text is present.\n assert \"<pre class='xr-text-repr-fallback'>\" in formatted\n\n\ndef test_variable_repr_html() -> None:\n v = xr.Variable([\"time\", \"x\"], [[1, 2, 3], [4, 5, 6]], {\"foo\": \"bar\"})\n assert hasattr(v, \"_repr_html_\")\n with xr.set_options(display_style=\"html\"):\n html = v._repr_html_().strip()\n # We don't do a complete string identity since\n # html output is probably subject to change, is long and... reasons.\n # Just test that something reasonable was produced.\n assert html.startswith(\"<div\") and html.endswith(\"</div>\")\n assert \"xarray.Variable\" in html\n\n\ndef test_repr_of_nonstr_dataset(dataset) -> None:\n ds = dataset.copy()\n ds.attrs[1] = \"Test value\"\n ds[2] = ds[\"tmin\"]\n formatted = fh.dataset_repr(ds)\n assert \"<dt><span>1 :</span></dt><dd>Test value</dd>\" in formatted\n assert \"<div class='xr-var-name'><span>2</span>\" in formatted\n\n\ndef test_repr_of_nonstr_dataarray(dataarray) -> None:\n da = dataarray.rename(dim_0=15)\n da.attrs[1] = \"value\"\n formatted = fh.array_repr(da)\n assert \"<dt><span>1 :</span></dt><dd>value</dd>\" in formatted\n assert \"<li><span>15</span>: 4</li>\" in formatted\n\n\ndef test_nonstr_variable_repr_html() -> None:\n v = xr.Variable([\"time\", 10], [[1, 2, 3], [4, 5, 6]], {22: \"bar\"})\n assert hasattr(v, \"_repr_html_\")\n with xr.set_options(display_style=\"html\"):\n html = v._repr_html_().strip()\n assert \"<dt><span>22 :</span></dt><dd>bar</dd>\" in html\n assert \"<li><span>10</span>: 3</li></ul>\" in html\n"
]
| [
[
"numpy.sin",
"numpy.random.RandomState",
"pandas.date_range",
"numpy.random.randn",
"pandas.MultiIndex.from_product"
]
]
|
INK-USC/CPL | [
"215c2850ac5d931eac206c03e2eca0fbbfd99948"
]
| [
"baselines/MultiHopKG-master/src/utils/ops.py"
]
| [
"\"\"\"\n Copyright (c) 2018, salesforce.com, inc.\n All rights reserved.\n SPDX-License-Identifier: BSD-3-Clause\n For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n \n Customized operators and utility functions.\n\"\"\"\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nEPSILON = float(np.finfo(float).eps)\nHUGE_INT = 1e31\n\n\ndef batch_lookup(M, idx, vector_output=True):\n \"\"\"\n Perform batch lookup on matrix M using indices idx.\n :param M: (Variable) [batch_size, seq_len] Each row of M is an independent population.\n :param idx: (Variable) [batch_size, sample_size] Each row of idx is a list of sample indices.\n :param vector_output: If set, return a 1-D vector when sample size is 1.\n :return samples: [batch_size, sample_size] samples[i, j] = M[idx[i, j]]\n \"\"\"\n batch_size, w = M.size()\n batch_size2, sample_size = idx.size()\n assert(batch_size == batch_size2)\n\n if sample_size == 1 and vector_output:\n samples = torch.gather(M, 1, idx).view(-1)\n else:\n samples = torch.gather(M, 1, idx)\n return samples\n\n\ndef convert_to_dist(x):\n x += EPSILON\n return x / x.sum(1, keepdim=True)\n\n\ndef detach_module(mdl):\n for param in mdl.parameters():\n param.requires_grad = False\n\n\ndef entropy(p):\n return torch.sum(-p * safe_log(p), 1)\n\n\ndef weighted_softmax(v, w, dim=-1):\n exp_v = torch.exp(v)\n weighted_exp_v = w * exp_v\n return weighted_exp_v / torch.sum(weighted_exp_v, dim, keepdim=True)\n\n\ndef format_triple(triple, kg):\n e1, e2, r = triple\n rel = kg.id2relation[r] if r != kg.self_edge else '<null>'\n if not rel.endswith('_inv'):\n return '{}-{}->{}'.format(\n kg.id2entity[e1], rel, kg.id2entity[e2])\n else:\n return '{}<-{}-{}'.format(\n kg.id2entity[e1], rel, kg.id2entity[e2])\n\n\ndef format_path(path_trace, kg):\n def get_most_recent_relation(j):\n relation_id = int(path_trace[j][0])\n if relation_id == kg.self_edge:\n return '<null>'\n else:\n return kg.id2relation[relation_id]\n\n def get_most_recent_entity(j):\n return kg.id2entity[int(path_trace[j][1])]\n\n path_str = get_most_recent_entity(0)\n for j in range(1, len(path_trace)):\n rel = get_most_recent_relation(j)\n if not rel.endswith('_inv'):\n path_str += '-{}->'.format(rel)\n else:\n path_str += '<-{}-'.format(rel[:-4])\n path_str += get_most_recent_entity(j)\n return path_str\n\n\ndef format_rule(rule, kg):\n rule_str = ''\n for j in range(len(rule)):\n relation_id = int(rule[j])\n rel = kg.id2relation[relation_id]\n if not rel.endswith('_inv'):\n rule_str += '-{}-> '.format(rel)\n else:\n rule_str += '<-{}-'.format(rel)\n return rule_str\n\n\ndef ones_var_cuda(s, requires_grad=False):\n return Variable(torch.ones(s), requires_grad=requires_grad).cuda()\n\n\ndef zeros_var_cuda(s, requires_grad=False):\n return Variable(torch.zeros(s), requires_grad=requires_grad).cuda()\n\n\ndef int_fill_var_cuda(s, value, requires_grad=False):\n return int_var_cuda((torch.zeros(s) + value), requires_grad=requires_grad)\n\n\ndef int_var_cuda(x, requires_grad=False):\n return Variable(x, requires_grad=requires_grad).long().cuda()\n\n\ndef var_cuda(x, requires_grad=False):\n return Variable(x, requires_grad=requires_grad).cuda()\n\n\ndef var_to_numpy(x):\n return x.data.cpu().numpy()\n\n\ndef pad_and_cat(a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0)\n\n\ndef rearrange_vector_list(l, offset):\n for i, v in enumerate(l):\n l[i] = v[offset]\n\ndef safe_log(x):\n return torch.log(x + EPSILON)\n\n\ndef tile_along_beam(v, beam_size, dim=0):\n \"\"\"\n Tile a tensor along a specified dimension for the specified beam size.\n :param v: Input tensor.\n :param beam_size: Beam size.\n \"\"\"\n if dim == -1:\n dim = len(v.size()) - 1\n v = v.unsqueeze(dim + 1)\n v = torch.cat([v] * beam_size, dim=dim+1)\n new_size = []\n for i, d in enumerate(v.size()):\n if i == dim + 1:\n new_size[-1] *= d\n else:\n new_size.append(d)\n return v.view(new_size)\n\n\n# Flatten and pack nested lists using recursion\ndef flatten(l):\n flatten_l = []\n for c in l:\n if type(c) is list or type(c) is tuple:\n flatten_l.extend(flatten(c))\n else:\n flatten_l.append(c)\n return flatten_l\n\n\ndef pack(l, a):\n \"\"\"\n Pack a flattened list l into the structure of the nested list a.\n \"\"\"\n nested_l = []\n for c in a:\n if type(c) is not list:\n nested_l.insert(l[0], 0)\n l.pop(0)\n\n\ndef unique_max(unique_x, x, values, marker_2D=None):\n unique_interval = 100\n unique_values, unique_indices = [], []\n # prevent memory explotion during decoding\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i+unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values)\n unique_idx = torch.cat(unique_indices)\n return unique_values, unique_idx\n\n\nif __name__ == '__main__':\n a = torch.randn(2)\n print(a)\n print(tile_along_beam(a, 4))\n print('--------------------------')\n b = torch.randn(2, 3)\n print(b)\n c = tile_along_beam(b, 4)\n print(c)\n print('--------------------------')\n print(c.view(2, -1))\n"
]
| [
[
"torch.zeros",
"torch.cat",
"torch.gather",
"torch.autograd.Variable",
"torch.nn.ConstantPad1d",
"torch.ones",
"numpy.finfo",
"torch.log",
"torch.exp",
"torch.randn",
"torch.sum"
]
]
|
KFilippopolitis/MIP-Engine | [
"11b43c80066ae3face1c7242c1dfafd4b974e5f8"
]
| [
"tests/unit_tests/test_logistic_regression.py"
]
| [
"import numpy as np\nimport pandas as pd\n\nfrom mipengine.algorithms.logistic_regression import label_binarize\n\n\ndef test_label_binarize_two_classes_equal():\n y = pd.DataFrame({\"y\": [\"a\", \"a\", \"b\", \"b\"]})\n ybin = label_binarize(y, classes=[\"a\", \"b\"])\n assert sum(ybin) == 2\n\n\ndef test_label_binarize_two_classes_unequal():\n y = pd.DataFrame({\"y\": [\"a\", \"a\", \"b\", \"b\", \"b\"]})\n ybin = label_binarize(y, classes=[\"b\", \"a\"])\n assert sum(ybin) == 2 or sum(ybin) == 3\n\n\ndef test_label_binarize_three_classes():\n y = pd.DataFrame({\"y\": [\"a\", \"a\", \"b\", \"b\", \"c\"]})\n ybin = label_binarize(y, classes=[\"a\"])\n expected = np.array([1, 1, 0, 0, 0])\n assert (ybin == expected).all()\n"
]
| [
[
"pandas.DataFrame",
"numpy.array"
]
]
|
iron316/pytorch-lightning | [
"1aba411da96ed95419d13ec1f86a0d38a232f73e"
]
| [
"pytorch_lightning/trainer/data_loading.py"
]
| [
"from abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import SequentialSampler, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\ndef _has_len(dataloader: DataLoader) -> bool:\n \"\"\" Checks if a given Dataloader has __len__ method implemented i.e. if\n it is a finite dataloader or infinite dataloader \"\"\"\n try:\n # try getting the length\n if len(dataloader) == 0:\n raise ValueError('Dataloader returned 0 length. Please make sure'\n ' that your Dataloader atleast returns 1 batch')\n return True\n except TypeError:\n return False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n proc_rank: int\n use_ddp: bool\n use_ddp2: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: Union[int, float]\n test_dataloaders: List[DataLoader]\n num_test_batches: Union[int, float]\n train_percent_check: float\n val_percent_check: float\n test_percent_check: float\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def _percent_range_check(self, name: str) -> None:\n value = getattr(self, name)\n msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'\n if name == 'val_check_interval':\n msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n if self.use_ddp or self.use_ddp2 or self.use_tpu:\n dl_args = {\n 'dataset': dataloader.dataset,\n 'batch_size': dataloader.batch_size,\n 'shuffle': False,\n 'num_workers': dataloader.num_workers,\n 'collate_fn': dataloader.collate_fn,\n 'pin_memory': dataloader.pin_memory,\n 'drop_last': dataloader.drop_last,\n 'timeout': dataloader.timeout,\n 'worker_init_fn': dataloader.worker_init_fn\n }\n\n if self.use_tpu:\n sampler = DistributedSampler(\n dataloader.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal()\n )\n dl_args['shuffle'] = False\n else:\n sampler = DistributedSampler(dataloader.dataset)\n dl_args['shuffle'] = False\n\n dl_args['sampler'] = sampler\n dataloader = DataLoader(**dl_args)\n\n return dataloader\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self._percent_range_check('train_percent_check')\n\n if not _has_len(self.train_dataloader):\n self.num_training_batches = float('inf')\n else:\n # try getting the length\n self.num_training_batches = len(self.train_dataloader)\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `val_percent_check` to 0.0 instead.')\n else:\n if not _has_len(self.train_dataloader):\n if self.val_check_interval == 1.0:\n self.val_check_batch = float('inf')\n else:\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n 'DataLoader does not implement `__len__`) for `train_dataloader`, '\n '`Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies '\n 'checking validation every k training batches.')\n else:\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(self, model: LightningModule,\n mode: str) -> Tuple[int, List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]\n\n num_batches = 0\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for dataloader in dataloaders:\n if not _has_len(dataloader):\n num_batches = float('inf')\n break\n\n percent_check = getattr(self, f'{mode}_percent_check')\n\n if num_batches != float('inf'):\n self._percent_range_check(f'{mode}_percent_check')\n\n num_batches = sum(len(dataloader) for dataloader in dataloaders)\n num_batches = int(num_batches * percent_check)\n elif percent_check not in (0.0, 1.0):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '\n f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')\n return num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('validation_step'):\n self.num_val_batches, self.val_dataloaders =\\\n self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('test_step'):\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n return dataloader\n\n def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,\n test_percent_check: float, overfit_pct: float) -> None:\n \"\"\"Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(\n f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n"
]
| [
[
"torch.distributed.barrier",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
]
]
|
mintproject/topoflow36 | [
"1dd25ce1b37cef129c8ee74b30af851658a7a8d9",
"1dd25ce1b37cef129c8ee74b30af851658a7a8d9"
]
| [
"topoflow/utils/idl_func.py",
"topoflow/utils/regrid_LAST.py"
]
| [
"\n# idl_func.py\n\n# Author: Dr. Scott D. Peckham, INSTAAR, Univ. of Colorado\n# Created: August 14, 2008\n\nfrom numpy import *\nimport numpy\nimport os\nimport os.path\nimport re\nimport platform\n \n#------------------------------------------\n# Define the \"bunch\" class for structures\n#----------------------------------------------\n# Note: IDL allows fields in a structure to\n# be accessed by their \"tag index\", as in:\n# <struct>.(<index>).\n# We can get the same thing here as follows:\n# y = bunch(a=5.0, b=1.0)\n# print y.__dict__.values()[0]\n#----------------------------------------------\nclass bunch:\n \"\"\"Used by I2PY to simulate IDL structures\"\"\"\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n \n#------------------------\n# IDL's BYTE function\n#------------------------\ndef byte(s):\n \"\"\"Reproduces IDL's BYTE function\"\"\"\n #---------------------------------------------------------\n # Note: IDL's BYTE function is overloaded.\n # If arg is a string, then a byte array of ASCII\n # ordinal values is returned.\n # Otherwise, if arg is scalar or array, type\n # is converted to (unsigned) byte. \n #--------------------------------------------------------- \n # If called with more than 1 arg, then it can\n # be used to extract 1 byte of data from 1st arg.\n # This part is not supported yet.\n #---------------------------------------------------------\n # is_string = not(str(s).isdigit) # (not as general)\n is_string = (type(s) == type('abc'))\n if (is_string):\n result = array(list(map(ord, s)), copy=0)\n else:\n result = array(s, copy=0).astype('uint8')\n # Is \"mod\" redundant in next line ??\n # result = array(mod(s,256), copy=0).astype('uint8')\n #----------------------------\n # This crashes Python/NumPy\n #----------------------------\n # result = mod(array(s, copy=0).astype('uint8'), 256)\n # result = array(s, copy=0).astype('uint8') % 256\n return result\n\n\n#--------------------------\n# IDL's BYTSCL function\n#--------------------------\ndef bytscl(a, max=None, min=None, top=255):\n \"\"\"Reproduces IDL's BYTSCL function\"\"\"\n amax = max\n amin = min\n if (amax is None): amax = a.max()\n if (amin is None): amin = a.min()\n top = (top % 256) # (force to byte range)\n\n #--------------------------------------\n # Case of integer or float argument ?\n #--------------------------------------\n type_str = str(a.dtype)\n if (type_str[0] == 'i'): \n b = ((top + 1) * (a - amin) - 1)/(amax - amin)\n else:\n b = (top + 0.9999) * (a - amin) / (amax - amin)\n return b\n\n#---------------------------------\n# IDL's !d.name system variable\n#---------------------------------\ndef device_name():\n \"\"\"Reproduces IDL's !d.name system variable\"\"\"\n #---------------------------------------------------\n # Note: Checked that !d.name on new Mac is 'X'.\n # Presumably will get 'X' on any Unix/Linus.\n #---------------------------------------------------\n sys_name = platform.system()\n if (sys_name == 'Windows'): return 'WIN'\n elif (sys_name == 'Darwin'): return 'X'\n else: return 'X'\n\n#---------------------------------------\n# IDL's EOF function (no longer used)\n#---------------------------------------\ndef eof(file_obj):\n \"\"\"Reproduces IDL's EOF function\"\"\"\n #-------------------------------------------\n # This recomputes file_size every time it\n # is called (as in a while loop), so it\n # has an extra cost. It is also not the\n # \"Python way\", but seems to work.\n #-------------------------------------------\n file_size = os.path.getsize(file_obj.name)\n file_pos = file_obj.tell()\n return (file_pos == file_size)\n\n#-------------------------------\n# IDL's FILE_DELETE procedure\n#-------------------------------\ndef file_delete(*files):\n \"\"\"Reproduces IDL's FILE_DELETE procedure\"\"\"\n for path in files:\n print(path)\n if (os.path.exists(path)):\n if (os.path.isdir(path)):\n os.rmdir(path)\n else:\n os.remove(path)\n \n#------------------------\n# IDL's FSTAT function\n#------------------------\ndef fstat(file_obj):\n \"\"\"Reproduces IDL's FSTAT function\"\"\"\n #-----------------------------------------------\n # Note: For IDL routines like FSTAT that take\n # the file unit number as an argument,\n # we instead pass a Python file object,\n # with a name constructed from the unit\n # variable name.\n #-----------------------------------------------\n # Note: Unsupported fields are:\n # uisagui, interactive, xdr, compress,\n # atime, ctime, mtime, transfer_count,\n # cur_ptr, rec_len.\n #-----------------------------------------------\n # If file is open for update, mode may\n # be 'U', and both \"read\" and \"write\"\n # should probably be True. ???\n #-----------------------------------------------\n file_size = os.path.getsize(file_obj.name)\n stat = bunch(size=file_size, \\\n name=file_obj.name, \\\n open=not(file_obj.closed), \\\n read=('r' in file_obj.mode), \\\n write=('w' in file_obj.mode), \\\n unit=(file_obj.fileno()), \\\n isatty=file_obj.isatty() )\n return stat\n\n#------------------------------\n# IDL's KEYWORD_SET function\n# (this one not used now)\n#------------------------------\ndef keyword_set(arg):\n \"\"\"Simulates IDL's KEYWORD_SET function\"\"\"\n return (arg is not None) and (arg != 0)\n\n#-----------------------------\n# IDL's N_ELEMENTS function\n#-----------------------------\ndef n_elements(arg):\n \"\"\"Simulates IDL's N_ELEMENTS function\"\"\"\n \n zero = numpy.int32(0) # returns a numpy object\n if ('arg' in locals()):\n #--------------------------------------------------------\n # Note: An unset IDL keyword will be assigned a default\n # value of \"None\" by I2PY and will therefore be\n # found in locals(). We want to return 0 in this\n # case as well. (numpy.size(None) equals 1.)\n #--------------------------------------------------------\n if (arg is None):\n return zero\n else:\n return numpy.size(arg)\n else:\n return zero\n \n#------------------------\n# IDL's READF function\n#------------------------\ndef readf(file_obj, *args, **keys):\n \"\"\"Simulates IDL's READF function\"\"\"\n #-----------------------------------------\n # Note: Also check out the \"csv\" module.\n #-----------------------------------------\n## args_out = list(args) # to allow assignment\n \n## args_out = []\n## for aa in args:\n## args_out.append(aa)\n\n str_type = type('abc')\n \n if not('format' in keys): \n line = file_obj.readline()\n if (len(args) == 1) and (type(args[0]) == str_type):\n #-----------------------------------------\n # Read entire line into a string\n # This assumes FORMAT keyword is not set\n #----------------------------------------------\n # 12/2/08. Bug fix. Return line itself to\n # avoid changing \"line\" from 'str' to 'list'\n #---------------------------------------------\n return line\n ## args_out[0] = line\n else: \n args_out = list(args) # to allow assignment\n vals = line.split()\n \n for k in range(len(args)):\n #---------------------------------------------\n # This works for scalar numbers and strings,\n # but doesn't work for arrays yet.\n #---------------------------------------------\n ## print 'type(args[k]) =', type(args[k]) #########\n \n if (type(args[k]) == str_type):\n args_out[k] = numpy.array(vals[k])\n else:\n dtype = str(args[k].dtype)\n # print 'dtype =', dtype\n \n if (dtype[:2] == '|S'):\n args_out[k] = numpy.array(vals[k])\n else:\n args_out[k] = numpy.array(vals[k]).astype(dtype)\n # print 'args_out[k] =', args_out[k]\n else:\n #-----------------------------------------------------\n # Assume values to read are separated by white space\n #-----------------------------------------------------\n # Use the string() function below somehow ??\n #----------------------------------------------\n args_out = list(args) # to allow assignment\n format = keys['format']\n format = format.replace('(','')\n format = format.replace(')','')\n format = format.replace(' ','')\n parts = format.split(',')\n for k in range(len(args)):\n #-----------------------------------\n # Extract n from formatting string\n #-----------------------------------------------\n # There shouldn't be any \"x\" codes for reading ?\n #-----------------------------------------------\n # if ('x' in parts[k]): ...\n r = re.split('[a-zA-Z]', parts[k])\n n = int(eval(r[1]))\n val_str = file_obj.read(n)\n dtype = str(args[k].dtype)\n if (dtype[:2] == '|S'):\n args_out[k] = numpy.array(val_str)\n else:\n args_out[k] = numpy.array(val_str).astype(dtype)\n\n## print 'args_out =', args_out\n## print ' '\n\n # Need this. \n if (len(args) == 1):\n return args_out[0]\n else:\n return args_out\n\n#------------------------\n# IDL's READS function\n#------------------------\ndef reads(line, *args, **keys):\n \"\"\"Simulates IDL's READS function\"\"\"\n #-----------------------------------------\n # Note: Also check out the \"csv\" module.\n #-----------------------------------------\n args = list(args) # to allow assignment\n if not('format' in keys): \n if (len(args) == 1) and (type(args[0]) == type('abc')):\n #-----------------------------------------\n # Read entire line into a string\n # This assumes FORMAT keyword is not set\n #-----------------------------------------\n args[0] = line\n else:\n vals = line.split()\n for k in range(len(args)):\n #---------------------------------------------\n # This works for scalar numbers and strings,\n # but doesn't work for arrays yet.\n #---------------------------------------------\n dtype = str(args[k].dtype)\n if (dtype[:2] == '|S'):\n args[k] = numpy.array(vals[k])\n else:\n args[k] = numpy.array(vals[k]).astype(dtype)\n else: \n #-------------------------------------\n # Use the string() function below ??\n #-------------------------------------\n format = keys['format']\n format = format.replace('(','')\n format = format.replace(')','')\n format = format.replace(' ','')\n parts = format.split(',')\n pos = 0\n for k in range(len(args)):\n #-----------------------------------\n # Extract n from formatting string\n #-----------------------------------------------\n # There shouldn't be any \"x\" codes for reading ?\n #-----------------------------------------------\n # if ('x' in parts[k]): ...\n r = re.split('[a-zA-Z]', parts[k])\n n = int(eval(r[1]))\n val_str = line[pos: pos+n-1]\n pos += n\n dtype = str(args[k].dtype)\n if (dtype[:2] == '|S'):\n args[k] = numpy.array(val_str)\n else:\n args[k] = numpy.array(val_str).astype(dtype)\n \n return args\n\n#--------------------------------------------\n# IDL's SINDGEN function (no longer used)\n# (only 1 argument supported now)\n#--------------------------------------------\ndef sindgen(n):\n \"\"\"Reproduces IDL's SINDGEN function\"\"\"\n a = arange(n, dtype='int32')\n return list(map(str, a))\n\n## s = []\n## for k in a: s.append(str(a[k]).rjust(12))\n## return s\n\n#---------------------------------------\n# IDL's SIZE function (not used yet)\n#---------------------------------------\ndef size(a, n_dimensions=False, n_elements=False, dimensions=False, \\\n _type=False):\n \"\"\"Reproduces IDL's SIZE function\"\"\"\n #---------------------------------------------------------\n # By default, IDL's SIZE function returns a vector of\n # values that describe an object's dimensions and type.\n #---------------------------------------------------------\n ndim_a = numpy.ndim(a)\n if (n_dimensions): return ndim_a\n if (n_elements): return numpy.size(a)\n if (dimensions):\n s = numpy.zeros(ndim_a, dtype='int32')\n #----------------------------------------\n # IDL returns dimension in reverse order\n #----------------------------------------\n for k in range(ndim_a):\n j = (ndim_a - k - 1)\n s[j] = numpy.size(a, k)\n return s\n if (_type):\n type_code = {'uint8':1, 'int16':2, 'int32':3, 'float32':4, \\\n 'float64':5, 'complex32':6, 'str':7}\n type_str = str(a.dtype)\n if (type_str[:2] == '|S'): type_str = 'str' ###\n return type_code[type_str]\n \n #---------------------------------------------------\n # Otherwise, return an integer array like IDL does\n # NB! IDL returns dimension in reverse order.\n #---------------------------------------------------\n s = numpy.zeros(ndim_a + 3, dtype='int32')\n s[0] = ndim_a\n for k in range(ndim_a):\n j = (ndim_a - k) # (now, don't subtract 1)\n s[j] = numpy.size(a, k)\n\n type_code = {'uint8':1, 'int16':2, 'int32':3, 'float32':4, \\\n 'float64':5, 'complex32':6, 'str':7}\n type_str = str(a.dtype)\n if (type_str[:2] == '|S'): type_str = 'str' ###\n s[ndim_a + 1] = type_code[type_str] \n s[ndim_a + 2] = numpy.size(a)\n return s \n\n#-------------------------\n# IDL's STRING function\n#-------------------------\ndef string(*args, **keys):\n \"\"\"Reproduces IDL's STRING function\"\"\"\n #-------------------------------------------------------------\n # Note: Now supports case where there is a single,\n # byte array argument and converts to string. i.e.\n \n # a = numpy.array([72,101,108,108,111], dtype='uint8')\n # string(a) returns \"Hello\"\n #-------------------------------------------------------------\n # Only the FORMAT keyword is supported so far.\n # IDL's STRING has the additional keywords:\n # AM_PM, DAYS_OF_WEEK, MONTHS, STDIO_NON_FINITE\n #-------------------------------------------------------------\n #print 'keys =', keys # equals \"{}\" if no keys\n FORMATTED = 'format' in keys\n if (FORMATTED):\n format = keys['format']\n if (format == None): FORMATTED=False\n \n## print 'format =', format\n## print 'type(format) =', type(format) ########\n \n if not(FORMATTED):\n SINGLE = (len(args) == 1)\n NDARRAY = (str(type(args[0])) == \"<type 'numpy.ndarray'>\")\n if (SINGLE and NDARRAY):\n BYTE_TYPE = (args[0].dtype == 'uint8')\n if (BYTE_TYPE):\n result_str = ''.join(map(chr, args[0]))\n else:\n result_str = ' '.join(map(str, args))\n else:\n result_str = ' '.join(map(str, args))\n return result_str\n\n #--------------------------------------------------\n # Convert IDL formatting string to Python version\n #--------------------------------------------------\n # First, remove all parentheses, not just ones at\n # the beginning and end. Remove all spaces, too.\n #--------------------------------------------------\n # Convert \"args\" from tuple to list. We need it\n # to be mutable so we can use insert() method to\n # handle case of \"4x\", etc. But near the end we\n # need a tuple again.\n #--------------------------------------------------\n ### format = str(format) #######\n \n args = list(args)\n f_str = format.replace(')','') ###\n f_str = f_str.replace('(','')\n f_str = f_str.replace(' ','')\n f_str = f_str.lower() ###\n\n parts = f_str.split(',')\n for m in range(len(parts)):\n s = parts[m]\n pos = s.find('x')\n if (pos == -1):\n if (s[0].isalpha()):\n #-----------------------------------------\n # Move \"format letter\" from start to end\n #-----------------------------------------\n parts[m] = s[1:len(s)] + s[0]\n else:\n #-------------------------------------- \n # Process a \"repetition count\" before\n # the \"format letter\".\n #--------------------------------------\n rep_str = ''\n len_str = ''\n FOUND = False\n for c in s:\n if not(FOUND):\n if c.isalpha():\n code = c\n FOUND = True\n else:\n rep_str += c\n else:\n len_str += c\n nreps = eval(rep_str)\n parts[m] = '%'.join(numpy.repeat(len_str + code, nreps))\n else: \n #-----------------------------------------\n # Insert \"blank\" strings into i to match\n # places with \"<n>x\" string formatting\n #-----------------------------------------\n nstr = s.replace('x','')\n nstr = nstr.replace(' ','')\n blank = ''.ljust(eval(nstr))\n args.insert( m+1, blank ) ### (m+1) ###\n\n # print 'args =', args\n # print 'len(args) =', len(args)\n \n f_str = \"%\" + \"%\".join(parts)\n f_str = f_str.replace('d', 'f') # for floats\n f_str = f_str.replace('i', 'd') # for integers\n f_str = f_str.replace('a', 's') # for strings\n f_str = f_str.replace('x', 's') # see above \n\n## print 'f_str =', f_str\n## print 'args =', args\n\n result_str = f_str % tuple(args) \n return result_str\n\n",
"\n# Copyright (c) 2019, Scott D. Peckham\n# August 2019\n\n#-------------------------------------------------------------------\n\n# regrid_geotiff_to_dem()\n# read_nc_grid()\n# gdal_open_nc_file()\n# get_raster_bounds()\n# bounds_disjoint()\n# gdal_regrid_to_dem_grid()\n# resave_grid_to_geotiff()\n# create_rts_from_nc_files()\n\n#-------------------------------------------------------------------\n# Set up a \"tf4\" conda environment (TopoFlow 4.0)\n#-------------------------------------------------------------------\n# % conda create --name tf4\n# % conda activate tf4\n# % conda install -c conda-forge gdal (to read geotiff)\n# % conda install -c conda-forge scipy (for gamma function)\n# (use conda-forge vs. anaconda; broken?)\n# % conda install -c conda-forge pydap\n# % conda install dask\n\n#-------------------------------------------------------------------\nimport numpy as np\ntry:\n from osgeo import gdal\nexcept ImportError:\n import gdal\nimport glob\n\n# import os.path\n#-------------------------------------------------------------------\ndef regrid_geotiff_to_dem(in_file=None, out_file=None, \n DEM_bounds=None, DEM_xres=None, DEM_yres=None ):\n\n #---------------------------------------------------------------\n # Note: DEM_bounds = [dem_xmin, dem_ymin, dem_xmax, dem_ymax]\n # Give xres, yres in decimal degrees for Geographic.\n # gdal.Warp() clips to a bounding box, and can also\n # resample to a different resolution.\n # gdal.Translate() is faster for simple clipping.\n #---------------------------------------------------------------\n if (in_file == None):\n #-----------------------------------------------------------\n # Use Pongo_30sec DEM as a test, which works well.\n # However, the soil data has same resolution (xres, yres)\n # as the DEM, of 30 arcseconds. In addition, grid cells\n # outside of South Sudan have NODATA values.\n #-----------------------------------------------------------\n in_file = 'SLTPPT_M_sl1_1km_South Sudan.tiff'\n out_file = 'Pongo_SLTPPT_sl1.tiff'\n DEM_bounds = [24.079583333333, 6.565416666666, 27.379583333333, 10.132083333333 ]\n DEM_xres = 1./120 # (30 arcsecs = 30/3600 degrees)\n DEM_yres = 1./120 # (30 arcsecs = 30/3600 degrees)\n \n f1 = gdal.Open( in_file, gdal.GA_ReadOnly )\n ## data_xres = f1.RasterXsize\n ### data_yres = f1.RasterYsize\n # print( f1.RasterCount )\n # print( data_xres, data_yres )\n \n out_unit = gdal.Warp( out_file, f1,\n format = 'GTiff', # (output format string)\n outputBounds=DEM_bounds, xRes=DEM_xres, yRes=DEM_yres,\n resampleAlg = gdal.GRA_Bilinear )\n ## resampleAlg = gdal.GRA_NearestNeighbour ) \n # (near, bilinear, cubic, cubicspline, lanczos, average, etc.)\n out_unit = None # Close out_file\n\n #-------------------------------------------------------- \n # Example: Use gdal.Translate to clip to bounding box.\n #-------------------------------------------------------- \n # ds = gdal.Open('original.tif')\n # ds = gdal.Translate('new.tif', ds, projWin = [-75.3, 5.5, -73.5, 3.7])\n # ds = None\n\n #-------------------------------------------------------- \n # This shows some of the other keywords to gdal.Warp.\n #-------------------------------------------------------- \n # WarpOptions(options=[], format=None, outputBounds=None,\n # outputBoundsSRS=None, xRes=None, yRes=None, targetAlignedPixels=False,\n # width=0, height=0, srcSRS=None, dstSRS=None, srcAlpha=False,\n # dstAlpha=False, warpOptions=None, errorThreshold=None,\n # warpMemoryLimit=None, creationOptions=None, outputType=GDT_Unknown,\n # workingType=GDT_Unknown, resampleAlg=None, srcNodata=None,\n # dstNodata=None, multithread=False, tps=False, rpc=False,\n # geoloc=False, polynomialOrder=None, transformerOptions=None,\n # cutlineDSName=None, cutlineLayer=None, cutlineWhere=None,\n # cutlineSQL=None, cutlineBlend=None, cropToCutline=False,\n # copyMetadata=True, metadataConflictValue=None,\n # setColorInterpretation=False, callback=None, callback_data=None)\n # \n # Create a WarpOptions() object that can be passed to gdal.Warp()\n # Keyword arguments are : options --- can be be an array of strings,\n # a string or let empty and filled from other keywords.\n\n \n# regrid_geotiff_to_dem()\n#-------------------------------------------------------------------\n# def download_data():\n# \n# from pydap.client import open_url\n# from pydap.cas.urs import setup_session\n# dataset_url = 'http://server.example.com/path/to/dataset'\n# session = setup_session(username, password, check_url=dataset_url)\n# dataset = open_url(dataset_url, session=session)\n# \n# # download_data()\n#-------------------------------------------------------------------\ndef read_nc_grid( nc_file=None, var_name='HQprecipitation',\n REPORT=False):\n\n if (nc_file == None):\n nc_file = 'TEST.nc4'\n\n ds = gdal.Open(\"NETCDF:{0}:{1}\".format(nc_file, layer_name))\n grid = ds.ReadAsArray(0, 0, ds.RasterXSize, ds.RasterYSize)\n ds = None # (close ds)\n \n if (REPORT):\n print(( 'grid.min() =', grid.min() ))\n print(( 'grid.max() =', grid.max() ))\n print(('grid.shape =', grid.shape ))\n\n return grid\n \n #--------------------\n # This doesn't work\n #--------------------\n# ds = gdal.Open( nc_file )\n# # print( ds.RasterCount )\n# # print( ds.RasterYSize, ds.RasterXsize )\n# data = ds.ReadAsArray()\n# # print( data.shape )\n# print( data.min() )\n# print( data.max() )\n# ds = None # (close ds)\n \n# read_nc_grid()\n#-------------------------------------------------------------------\n# def read_nc_as_array( nc_file=None, var_name='HQprecipitation',\n# REPORT=False):\n# \n# ds = gdal.Open( nc_file )\n# if (ds is None):\n# print( 'Open failed.')\n# sys.exit()\n# \n# if (ds.GetSubDatasets() >= 1):\n# subdataset = 'NETCDF:\"' + nc_file + '\":' + var_name\n# ds_sd = gdal.Open( subdataset )\n# NDV = ds_sd.GetRasterBand(1).GetNoDataValue()\n# ncols = ds_sd.RasterXsize\n# nrows = ds_sd.RasterYsize\n# GeoT = ds_sd.GetGeoTransform()\n# ds = None\n# ds_sd = None \n# \n# # read_nc_as_array()\n#------------------------------------------------------------------- \ndef gdal_open_nc_file( nc_file, var_name, VERBOSE=False):\n\n ### ds_in = gdal.Open(\"NETCDF:{0}:{1}\".format(nc_file, var_name), gdal.GA_ReadOnly )\n ds_in = gdal.Open(\"NETCDF:{0}:{1}\".format(nc_file, var_name) )\n band = ds_in.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n\n g1 = band.ReadAsArray()\n ## g1 = ds_in.ReadAsArray(0, 0, ds_in.RasterXSize, ds_in.RasterYSize)\n\n if (VERBOSE):\n print(( 'grid1: min =', g1.min(), 'max =', g1.max() ))\n print(( 'grid1.shape =', g1.shape ))\n print(( 'grid1.dtype =', g1.dtype ))\n print(( 'grid1 nodata =', nodata ))\n print( ' ' )\n\n return (ds_in, g1, nodata)\n\n# gdal_open_nc_file()\n#------------------------------------------------------------------- \ndef get_raster_bounds( ds, VERBOSE=True):\n\n #-------------------------------------------------------------\n # Note: The bounds depend on the map projection and are not\n # necessarily a Geographic bounding box of lons and lats. \n #-------------------------------------------------------------\n # ulx = upper left x = xmin\n # uly = upper left y = ymax\n # lrx = lower right x = xmax\n # lry = lower right y = ymin\n #-----------------------------\n ulx, xres, xskew, uly, yskew, yres = ds.GetGeoTransform()\n lrx = ulx + (ds.RasterXSize * xres)\n lry = uly + (ds.RasterYSize * yres)\n\n if (VERBOSE):\n print(('ulx, uly =', ulx, uly))\n print(('lrx, lry =', lrx, lry))\n print(('xres, yres = ', xres, yres))\n print(('xskew, yskew =', xskew, yskew))\n print('----------------------------------')\n\n return [ulx, lry, lrx, uly] # [xmin, ymin, xmax, ymax]\n\n# get_raster_bounds()\n#------------------------------------------------------------------- \ndef bounds_disjoint( bounds1, bounds2, VERBOSE=False):\n \n #-----------------------------------------------------------\n # Note. Assume both bounds are in same spatial reference\n # system (SRS), e.g. Geographic lons and lats.\n #------------------------------------------------------------------\n # https://gamedev.stackexchange.com/questions/586/\n # what-is-the-fastest-way-to-work-out-2d-bounding-box-intersection\n #------------------------------------------------------------------ \n b1_xmin = bounds1[0]\n b1_xmax = bounds1[2]\n b2_xmin = bounds2[0]\n b2_xmax = bounds2[2]\n# x_overlap1 = (b1_xmin < b2_xmin) and (b2_xmin < b1_xmax)\n# x_overlap2 = (b2_xmin < b1_xmin) and (b1_xmin < b2_xmax)\n# x_overlap = (x_overlap1 or x_overlap2)\n \n b1_ymin = bounds1[1]\n b1_ymax = bounds1[3]\n b2_ymin = bounds2[1]\n b2_ymax = bounds2[3]\n# y_overlap1 = (b1_ymin < b2_ymin) and (b2_ymin < b1_ymax) \n# y_overlap2 = (b2_ymin < b1_ymin) and (b1_ymin < b2_ymax)\n# y_overlap = (y_overlap1 or y_overlap2)\n# return not(x_overlap and y_overlap)\n\n disjoint = (b2_xmin > b1_xmax) or (b2_xmax < b1_xmin) or \\\n (b2_ymax < b1_ymin) or (b2_ymin > b1_ymax)\n\n return disjoint\n \n# bounds_disjoint()\n#------------------------------------------------------------------- \ndef gdal_regrid_to_dem_grid( ds_in, tmp_file, \n nodata, DEM_bounds, DEM_xres, DEM_yres,\n RESAMPLE_ALGO='bilinear', VERBOSE=False):\n\n #----------------------------------- \n # Specify the resampling algorithm\n #-----------------------------------\n algo_dict = {\n 'nearest' : gdal.GRA_NearestNeighbour,\n 'bilinear' : gdal.GRA_Bilinear,\n 'cubic' : gdal.GRA_Cubic,\n 'cubicspline' : gdal.GRA_CubicSpline,\n 'lanczos' : gdal.GRA_Lanczos,\n 'average' : gdal.GRA_Average,\n 'min' : gdal.GRA_Min,\n 'max' : gdal.GRA_Max,\n 'mode' : gdal.GRA_Mode,\n 'med' : gdal.GRA_Med }\n \n resample_algo = algo_dict[ RESAMPLE_ALGO ]\n\n #--------------------------------------------------\n # Use gdal.Warp to clip and resample to DEM grid\n # then save results to a GeoTIFF file (tmp_file).\n #--------------------------------------------------\n ds_tmp = gdal.Warp( tmp_file, ds_in,\n format = 'GTiff', # (output format string)\n outputBounds=DEM_bounds, xRes=DEM_xres, yRes=DEM_yres,\n srcNodata=nodata, ########\n ### dstNodata=nodata, ########\n resampleAlg = resample_algo )\n\n grid = ds_tmp.ReadAsArray()\n \n ds_tmp = None # Close tmp_file\n \n return grid\n\n# gdal_regrid_to_dem_grid()\n#------------------------------------------------------------------- \ndef resave_grid_to_geotiff( ds_in, new_file, grid1, nodata ):\n\n new_nodata = -9999.0\n grid1[ grid1 <= nodata ] = new_nodata\n \n ##### raster = gdal.Open( nc_file )\n raster = ds_in\n ncols = raster.RasterXSize\n nrows = raster.RasterYSize\n\n geotransform = raster.GetGeoTransform()\n originX = geotransform[0]\n originY = geotransform[3]\n pixelWidth = geotransform[1]\n pixelHeight = geotransform[5]\n\n driver = gdal.GetDriverByName('GTiff')\n outRaster = driver.Create(new_file, ncols, nrows, 1, gdal.GDT_Float32)\n outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray( grid1 )\n outRasterSRS = osr.SpatialReference()\n outRasterSRS.ImportFromWkt(raster.GetProjectionRef())\n outRaster.SetProjection(outRasterSRS.ExportToWkt())\n outband.FlushCache()\n\n# resave_grid_to_geotiff() \n#------------------------------------------------------------------- \ndef create_rts_from_nc_files( rts_file='TEST.rts',\n IN_MEMORY=False, VERBOSE=True):\n\n #------------------------------------------------------\n # For info on GDAL constants, see:\n # https://gdal.org/python/osgeo.gdalconst-module.html\n #------------------------------------------------------ \n if (rts_file == 'TEST.rts'):\n #-----------------------------------------------------------\n # Use Pongo_30sec DEM as a test, which works well.\n # However, the soil data has same resolution (xres, yres)\n # as the DEM, of 30 arcseconds. In addition, grid cells\n # outside of South Sudan have NODATA values.\n #-----------------------------------------------------------\n DEM_bounds = [24.079583333333, 6.565416666666, 27.379583333333, 10.132083333333 ]\n DEM_xres = 1./120 # (30 arcsecs = 30/3600 degrees)\n DEM_yres = 1./120 # (30 arcsecs = 30/3600 degrees)\n\n #----------------------------------------- \n # Use a temp file in memory or on disk ?\n #-----------------------------------------\n if (IN_MEMORY):\n tmp_file = '/vsimem/TEMP.tif'\n else:\n tmp_file = 'TEMP.tif'\n \n #------------------------- \n # Open RTS file to write\n #-------------------------\n rts_unit = open( rts_file, 'wb' )\n\n #------------------------------------------------\n # Get list of all nc files in working directory\n #------------------------------------------------\n ## nc_file_list = glob.glob( '*.nc4' )\n nc_file_list = glob.glob( '*.nc' )\n var_name = \"HQprecipitation\" # HQ = high quality; 1/2 hourly\n count = 0\n bad_count = 0\n \n for nc_file in nc_file_list:\n #-------------------------------\n # Open the original netCDF file\n #--------------------------------\n (ds_in, grid1, nodata) = gdal_open_nc_file( nc_file, var_name, VERBOSE=True)\n print(( gdal.Info( ds_in ) ))\n\n #----------------------------------------------- \n # Check if the bounding boxes actually overlap\n #-----------------------------------------------\n ds_bounds = get_raster_bounds( ds_in )\n if (bounds_disjoint( ds_bounds, DEM_bounds )):\n print( '###############################################')\n print( 'WARNING: Bounding boxes do not overlap.')\n print( ' New grid will contain only nodata.')\n print( '###############################################')\n print(( 'ds_bounds =', ds_bounds ))\n print(( 'DEM_bounds =', DEM_bounds ))\n print( ' ')\n bad_count += 1\n\n #-------------------------------------------\n # Replace nodata value and save as GeoTIFF\n #-------------------------------------------\n# new_file = 'TEMP2.tif'\n# resave_grid_to_geotiff( ds_in, new_file, grid1, nodata )\n# ds_in = None # Close the nc_file\n# ds_in = gdal.Open( new_file ) # Open the GeoTIFF file; new nodata\n\n #-------------------------------------------\n # Clip and resample data to the DEM's grid\n # then save to a temporary GeoTIFF file.\n #-------------------------------------------\n grid2 = gdal_regrid_to_dem_grid( ds_in, tmp_file,\n nodata, DEM_bounds, DEM_xres, DEM_yres,\n RESAMPLE_ALGO='bilinear' )\n print(( 'grid2: min =', grid2.min(), 'max =', grid2.max() ))\n print(( 'grid2.shape =', grid2.shape ))\n print(( 'grid2.dtype =', grid2.dtype ))\n print( ' ')\n ds_in = None # Close the tmp_file\n \n #--------------------------------------------\n # Read resampled data from tmp GeoTIFF file\n #--------------------------------------------\n ds_tmp = gdal.Open( tmp_file )\n ## ds_tmp = gdal.Open( tmp_file, gdal.GA_ReadOnly )\n ## print( gdal.Info( ds_tmp ) )\n grid3 = ds_tmp.ReadAsArray()\n print(( 'grid3: min, max =', grid3.min(), grid3.max() ))\n print(( 'grid3.shape =', grid3.shape))\n print(( 'grid3.dtype =', grid3.dtype))\n ds_tmp = None # Close tmp file\n \n if (IN_MEMORY):\n gdal.Unlink( tmp_file )\n \n #------------------------- \n # Write grid to RTS file\n #-------------------------\n grid3 = np.float32( grid3 )\n ## rts_unit.write( grid3 )\n grid3.tofile( rts_unit )\n count += 1\n if (VERBOSE):\n print(( 'count, min, max =', count, grid3.min(), grid3.max() )) #######\n \n if (count == 300): ##################################\n break\n\n #---------------------\n # Close the RTS file\n #---------------------\n rts_unit.close()\n\n print( ' ')\n print(( 'bad_count =', bad_count ))\n print(( 'n_grids =', count ))\n print( 'Finished saving data to rts file.')\n print( ' ')\n \n# create_rts_from_nc_files()\n#------------------------------------------------------------------- \n\n \n "
]
| [
[
"numpy.array",
"numpy.zeros",
"numpy.size",
"numpy.ndim",
"numpy.repeat",
"numpy.int32"
],
[
"numpy.float32"
]
]
|
GPrathap/rrt-algorithms | [
"b97af0b57306cdbc0e148f5c086345571d34e823"
]
| [
"examples/rrt_star/rrt_star_3d.py"
]
| [
"# This file is subject to the terms and conditions defined in\n# file 'LICENSE', which is part of this source code package.\nimport numpy as np\n\nfrom src.rrt.rrt_star import RRTStar\nfrom src.search_space.search_space import SearchSpace\nfrom src.utilities.plotting import Plot\nX_dimensions = np.array([(0, 100), (0, 100), (0, 100)]) # dimensions of Search Space\n# obstacles\nObstacles = np.array(\n [(20, 20, 20, 40, 40, 40), (20, 20, 60, 40, 40, 80), (20, 60, 20, 40, 80, 40), (60, 60, 20, 80, 80, 40),\n (60, 20, 20, 80, 40, 40), (60, 20, 60, 80, 40, 80), (20, 60, 60, 40, 80, 80), (60, 60, 60, 80, 80, 80)])\n\nObstacles = np.array(\n [(20, 20, 20, 40, 40, 40), (20, 20, 60, 40, 40, 80), (60, 60, 60, 80, 80, 80)])\n\n\n\nx_init = (0, 0, 0) # starting location\nx_goal = (100, 100, 100) # goal location\n\nQ = np.array([(8, 4)]) # length of tree edges\nr = 1 # length of smallest edge to check for intersection with obstacles\nmax_samples = 1024 # max number of samples to take before timing out\nrewire_count = 32 # optional, number of nearby branches to rewire\nprc = 0.1 # probability of checking for a connection to goal\n\n# create Search Space\nX = SearchSpace(X_dimensions, Obstacles)\n\n# create rrt_search\nrrt = RRTStar(X, Q, x_init, x_goal, max_samples, r, prc, rewire_count)\npath = rrt.rrt_star()\n\n# plot\nplot = Plot(\"rrt_star_3d\")\nplot.plot_tree(X, rrt.trees)\nif path is not None:\n plot.plot_path(X, path)\nplot.plot_obstacles(X, Obstacles)\nplot.plot_start(X, x_init)\nplot.plot_goal(X, x_goal)\nplot.draw(auto_open=True)\n"
]
| [
[
"numpy.array"
]
]
|
ryuwd/mplhep | [
"8a91aac0a2f1b867ece143cf37bdad6e65cd7c83"
]
| [
"tests/test_basic.py"
]
| [
"from __future__ import annotations\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\n\nos.environ[\"RUNNING_PYTEST\"] = \"true\"\n\nimport mplhep as hep # noqa: E402\n\n\"\"\"\nTo test run:\npytest --mpl\n\nWhen adding new tests, run:\npytest --mpl-generate-path=tests/baseline\n\"\"\"\n\nplt.switch_backend(\"Agg\")\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_simple():\n fig, ax = plt.subplots(figsize=(10, 10))\n h = [1, 3, 2]\n bins = [0, 1, 2, 3]\n hep.histplot(h, bins, yerr=True, label=\"X\")\n ax.legend()\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_simple_xerr():\n fig, ax = plt.subplots(figsize=(10, 10))\n h = np.array([1, 3, 2])\n bins = [0, 1, 2, 4]\n hep.histplot(h, bins, yerr=True, histtype=\"errorbar\")\n hep.histplot(h * 2, bins, yerr=True, histtype=\"errorbar\", xerr=0.1)\n hep.histplot(h * 3, bins, yerr=True, histtype=\"errorbar\", xerr=True)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_simple2d():\n fig, ax = plt.subplots()\n h = [[1, 3, 2], [1, 3, 2]]\n hep.hist2dplot(h)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_log():\n fig, axs = plt.subplots(2, 2, figsize=(10, 10))\n for ax in axs[0]:\n hep.histplot([1, 2, 3, 2], range(5), ax=ax)\n ax.semilogy()\n for ax in axs[1]:\n hep.histplot([1, 2, 3, 2], range(5), ax=ax, edges=False)\n ax.semilogy()\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default\", fontsize=18)\n hep.histplot(h, bins, ax=axs[0])\n\n axs[1].set_title(\"Plot No Edges\", fontsize=18)\n hep.histplot(h, bins, edges=False, ax=axs[1])\n\n axs[2].set_title(\"Plot Errorbars\", fontsize=18)\n hep.histplot(h, bins, yerr=np.sqrt(h), ax=axs[2])\n\n axs[3].set_title(\"Filled Histogram\", fontsize=18)\n hep.histplot(h, bins, histtype=\"fill\", ax=axs[3])\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_density():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default\", fontsize=18)\n hep.histplot(h, bins, ax=axs[0], density=True)\n\n axs[1].set_title(\"Plot No Edges\", fontsize=18)\n hep.histplot(h, bins, edges=False, ax=axs[1], density=True)\n\n axs[2].set_title(\"Plot Errorbars\", fontsize=18)\n hep.histplot(h, bins, yerr=np.sqrt(h), ax=axs[2], density=True)\n\n axs[3].set_title(\"Filled Histogram\", fontsize=18)\n hep.histplot(h, bins, histtype=\"fill\", ax=axs[3], density=True)\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_multiple():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default Overlay\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, ax=axs[0])\n\n axs[1].set_title(\"Default Overlay w/ Errorbars\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=[np.sqrt(h), np.sqrt(1.5 * h)], ax=axs[1])\n\n axs[2].set_title(\"Automatic Errorbars\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=True, ax=axs[2])\n\n axs[3].set_title(\"With Labels\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=True, ax=axs[3], label=[\"First\", \"Second\"])\n axs[3].legend(fontsize=16, prop={\"family\": \"Tex Gyre Heros\"})\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_stack():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, stack=True, ax=axs[0])\n\n axs[1].set_title(\"Plot No Edges\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, edges=False, stack=True, ax=axs[1])\n\n axs[2].set_title(\"Plot Errorbars\", fontsize=18)\n hep.histplot(\n [h, 1.5 * h], bins, yerr=[np.sqrt(h), np.sqrt(h)], stack=True, ax=axs[2]\n )\n\n axs[3].set_title(\"Filled Histogram\", fontsize=18)\n hep.histplot([1.5 * h, h], bins, histtype=\"fill\", stack=True, ax=axs[3])\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_hist2dplot():\n np.random.seed(0)\n xedges = np.arange(0, 11.5, 1.5)\n yedges = [0, 2, 3, 4, 6, 7]\n x = np.random.normal(5, 1.5, 100)\n y = np.random.normal(4, 1, 100)\n H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n\n fig, ax = plt.subplots()\n hep.hist2dplot(H, xedges, yedges, labels=True)\n return fig\n\n\[email protected](\"cbarextend\", [False, True])\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_hist2dplot_cbar(cbarextend):\n np.random.seed(0)\n xedges = np.arange(0, 11.5, 1.5)\n yedges = [0, 2, 3, 4, 6, 7]\n x = np.random.normal(5, 1.5, 100)\n y = np.random.normal(4, 1, 100)\n H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n\n fig, ax = plt.subplots()\n hep.hist2dplot(H, xedges, yedges, labels=True, cbar=True, cbarextend=cbarextend)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_hist2dplot_cbar_subplots():\n np.random.seed(0)\n xedges = np.arange(0, 11.5, 1.5)\n yedges = [0, 2, 3, 4, 6, 7]\n x = np.random.normal(5, 1.5, 100)\n y = np.random.normal(4, 1, 100)\n H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))\n hep.hist2dplot(H, xedges, yedges, labels=True, cbar=True, ax=ax1)\n hep.hist2dplot(H * 2, xedges, yedges, labels=True, cbar=True, ax=ax2)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_hist2dplot_custom_labels():\n np.random.seed(0)\n xedges = np.arange(0, 11.5, 1.5)\n yedges = [0, 2, 3, 4, 6, 7]\n x = np.random.normal(5, 1.5, 100)\n y = np.random.normal(4, 1, 100)\n H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n\n fig, ax = plt.subplots()\n\n @np.vectorize\n def _fmt(x):\n return f\"${x:.2f}$\"\n\n hep.hist2dplot(H, xedges, yedges, labels=_fmt(H).T)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_kwargs():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 1000), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n hep.histplot(\n [h * 2, h * 1, h * 0.5],\n bins,\n label=[\"1\", \"2\", \"3\"],\n stack=True,\n histtype=\"step\",\n linestyle=\"--\",\n color=[\"green\", \"black\", (1, 0, 0, 0.4)],\n ax=axs[0],\n )\n axs[0].legend()\n\n hep.histplot(\n [h, h, h],\n bins,\n label=[\"1\", \"2\", \"3\"],\n stack=True,\n histtype=\"step\",\n linestyle=[\"--\", \":\"],\n color=(1, 0, 0, 0.8),\n ax=axs[1],\n )\n axs[1].legend()\n\n hep.histplot(\n [h, h, h],\n bins,\n label=[\"1\", \"2\", \"3\"],\n histtype=\"step\",\n binwnorm=[0.5, 3, 6],\n linestyle=[\"--\", \":\"],\n color=(1, 0, 0, 0.8),\n ax=axs[2],\n )\n axs[2].legend()\n\n hep.histplot(\n [h, h, h],\n bins,\n label=[\"1\", \"2\", \"3\"],\n histtype=\"fill\",\n binwnorm=[0.5, 3, 6],\n linestyle=[\"--\", \":\"],\n color=[\"green\", \"darkorange\", \"red\"],\n alpha=[0.4, 0.7, 0.2],\n ax=axs[3],\n )\n axs[3].legend()\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_real():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 1000), bins=10)\n\n fig, axs = plt.subplots(2, 2, figsize=(10, 10))\n axs = axs.flatten()\n a, b, c = h, h * 2, np.random.poisson(h * 3)\n\n hep.histplot(\n [a, b, c], bins=bins, ax=axs[0], yerr=True, label=[\"MC1\", \"MC2\", \"Data\"]\n )\n hep.histplot([a, b], bins=bins, ax=axs[1], stack=True, label=[\"MC1\", \"MC2\"])\n hep.histplot(\n [c], bins=bins, ax=axs[1], yerr=True, histtype=\"errorbar\", label=\"Data\"\n )\n\n hep.histplot(\n [a, b], bins=bins, ax=axs[2], stack=True, label=[\"MC1\", \"MC2\"], binwnorm=[2, 1]\n )\n hep.histplot(\n c,\n bins=bins,\n ax=axs[2],\n yerr=True,\n histtype=\"errorbar\",\n label=\"Data\",\n binwnorm=1,\n )\n hep.histplot(\n [a, b], bins=bins, ax=axs[3], stack=True, label=[\"MC1\", \"MC2\"], density=True\n )\n hep.histplot(\n c,\n bins=bins,\n ax=axs[3],\n yerr=True,\n histtype=\"errorbar\",\n label=\"Data\",\n density=True,\n )\n for ax in axs:\n ax.legend()\n axs[0].set_title(\"Raw\")\n axs[1].set_title(\"Data/MC\")\n axs[2].set_title(\"Data/MC binwnorm\")\n axs[3].set_title(\"Data/MC Density\")\n\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_w2():\n fig, ax = plt.subplots()\n hep.histplot([0, 3, 0], range(4), w2=np.array([0, 3, 0]))\n return fig\n\n\[email protected]_image_compare(style=\"default\", remove_text=True)\ndef test_histplot_types():\n hs, bins = [[2, 3, 4], [5, 4, 3]], [0, 1, 2, 3]\n fig, axs = plt.subplots(3, 2, figsize=(8, 12))\n axs = axs.flatten()\n\n for i, htype in enumerate([\"step\", \"fill\", \"errorbar\"]):\n hep.histplot(hs[0], bins, yerr=True, histtype=htype, ax=axs[i * 2], alpha=0.7)\n hep.histplot(hs, bins, yerr=True, histtype=htype, ax=axs[i * 2 + 1], alpha=0.7)\n\n return fig\n\n\nh = np.geomspace(1, 10, 10)\n\n\[email protected](\"h\", [h, [h, h], [h]])\[email protected](\"yerr\", [h / 4, [h / 4, h / 4], 4])\[email protected](\"htype\", [\"step\", \"fill\", \"errorbar\"])\ndef test_histplot_inputs_pass(h, yerr, htype):\n bins = np.linspace(1, 10, 11)\n\n fig, ax = plt.subplots()\n hep.histplot(h, bins, yerr=yerr, histtype=htype)\n plt.close(fig)\n"
]
| [
[
"matplotlib.pyplot.switch_backend",
"numpy.random.normal",
"numpy.array",
"numpy.histogram2d",
"numpy.random.seed",
"numpy.random.poisson",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.geomspace",
"numpy.arange",
"numpy.sqrt",
"numpy.linspace"
]
]
|
Siwensun/Neural-Diffeomorphic-Flow----NDF | [
"dabbee6546069cb9c2c14408f9188b80937b48dc"
]
| [
"evaluate.py"
]
| [
"#!/usr/bin/env python3\n# Copyright 2004-present Facebook. All Rights Reserved.\n\nimport argparse\nimport logging\nimport json\nimport numpy as np\nimport os\nimport trimesh\nfrom pathos.multiprocessing import ProcessPool as Pool\n\nimport deep_sdf\nimport deep_sdf.workspace as ws\n\n\ndef evaluate_one_instance(dataset, \n class_name, \n instance_name, \n experiment_directory, \n checkpoint, \n data_dir, \n test_or_train='test',\n correspondence_level=None, \n correspondence_pts_num=0):\n logging.debug(\n \"evaluating \" + os.path.join(dataset, class_name, instance_name)\n )\n if test_or_train == 'test':\n mesh_filename = ws.get_reconstructed_mesh_filename(\n experiment_directory, checkpoint, dataset, class_name, instance_name, correspondence_level, correspondence_pts_num\n )\n\n logging.debug(\n 'reconstructed mesh is \"' + mesh_filename + '\"'\n )\n else:\n mesh_filename = ws.get_trained_mesh_filename(\n experiment_directory, checkpoint, dataset, class_name, instance_name, correspondence_level, correspondence_pts_num\n )\n\n logging.debug(\n 'trained mesh is \"' + mesh_filename + '\"'\n )\n\n if not os.path.isfile(mesh_filename):\n print('[WARNING] Skipping %s as it doesn\\'t exists' % mesh_filename)\n return \"\", 0\n\n ground_truth_points_samples_filename = os.path.join(\n data_dir,\n \"SurfaceSamples\",\n dataset,\n class_name,\n instance_name + \".ply\",\n )\n\n logging.debug(\n \"ground truth points samples are \" + ground_truth_points_samples_filename\n )\n\n ground_truth_mesh_samples_filename = os.path.join(\n data_dir,\n \"MeshSamples\",\n dataset,\n class_name,\n instance_name + \".ply\",\n )\n\n logging.debug(\n \"ground truth mesh samples are \" + ground_truth_mesh_samples_filename\n )\n\n normalization_params_filename = os.path.join(\n data_dir,\n \"NormalizationParameters\",\n dataset,\n class_name,\n instance_name + \".npz\",\n )\n\n logging.debug(\n \"normalization params are \" + normalization_params_filename\n )\n\n ground_truth_points = trimesh.load(ground_truth_points_samples_filename)\n ground_truth_mesh = trimesh.load(ground_truth_mesh_samples_filename)\n reconstruction = trimesh.load(mesh_filename)\n\n if os.path.exists(normalization_params_filename):\n normalization_params = np.load(normalization_params_filename)\n else:\n normalization_params = {\"offset\": 0, \"scale\": 1}\n\n metrics = {}\n \n chamfer_dist = deep_sdf.metrics.chamfer.compute_trimesh_chamfer(\n ground_truth_points,\n reconstruction,\n normalization_params[\"offset\"],\n normalization_params[\"scale\"],\n )\n metrics = {**metrics, **chamfer_dist}\n\n earthmover_dist = deep_sdf.metrics.emd.compute_trimesh_emd(\n ground_truth_points,\n reconstruction,\n normalization_params[\"offset\"],\n normalization_params[\"scale\"],\n )\n metrics = {**metrics, **earthmover_dist}\n\n non_manifold = deep_sdf.metrics.non_manifold.calculate_manifoldness(reconstruction)\n metrics = {**metrics, **non_manifold}\n \n normal_consistency = deep_sdf.metrics.normal_consistency.compute_geometric_metrics_points(\n ground_truth_mesh,\n reconstruction\n )\n metrics = {**metrics, **normal_consistency}\n \n for key in metrics:\n logging.debug(f\"{key}: {metrics[key]}\")\n\n return os.path.join(dataset, class_name, instance_name), metrics\n\n\ndef evaluate(experiment_directory, checkpoint, data_dir, split_filename, \n test_or_train='test', correspondence_level=None, correspondence_pts_num = 0):\n\n with open(split_filename, \"r\") as f:\n split = json.load(f)\n\n results = []\n p = Pool(8)\n ds = []\n cn = []\n inn = []\n exd = []\n ckp = []\n dtd = []\n tot = []\n cl = []\n cpn = []\n\n print('data_preparing')\n for dataset in split:\n for class_name in split[dataset]:\n for iii, instance_name in enumerate(split[dataset][class_name]):\n ds.append(dataset)\n cn.append(class_name)\n inn.append(instance_name)\n exd.append(experiment_directory)\n ckp.append(checkpoint)\n dtd.append(data_dir)\n tot.append(test_or_train)\n cl.append(correspondence_level)\n cpn.append(correspondence_pts_num)\n # results += [evaluate_one_instance(dataset, class_name, instance_name, experiment_directory,\n # checkpoint, data_dir, test_or_train, \n # corrspondence_level, correspondence_pts_num)]\n\n print('multi thread start')\n results = p.map(evaluate_one_instance, ds, cn, inn, exd, ckp, dtd, tot, cl, cpn)\n # print('results_length:', len(results))\n # print('q1', results[0])\n # print('q1 length:', len(results[0]))\n # print('q1', results[0])\n\n chamfer_dist_mean = np.mean([q[1]['chamfer_distance'] for q in results])\n chamfer_dist_median = np.median([q[1]['chamfer_distance'] for q in results])\n earth_mover_dist_mean = np.mean([q[1]['earthmover_distance'] for q in results])\n earth_mover_dist_median = np.median([q[1]['earthmover_distance'] for q in results])\n NMV_ratio_mean = np.mean([q[1]['NM-V'] for q in results])\n NMV_ratio_median = np.median([q[1]['NM-V'] for q in results])\n NME_ratio_mean = np.mean([q[1]['NM-E'] for q in results])\n NME_ratio_median = np.median([q[1]['NM-E'] for q in results])\n NMF_ratio_mean = np.mean([q[1]['NM-F'] for q in results])\n NMF_ratio_median = np.median([q[1]['NM-F'] for q in results])\n self_intersection_ratio_mean = np.mean([q[1]['self-intersection'] for q in results])\n self_intersection_ratio_median = np.median([q[1]['self-intersection'] for q in results])\n normal_consistency_mean = np.mean([q[1]['normal_consistency'] for q in results])\n normal_consistency_median = np.median([q[1]['normal_consistency'] for q in results])\n abs_normal_consistency_mean = np.mean([q[1]['abs_normal_consistency'] for q in results])\n abs_normal_consistency_median = np.median([q[1]['abs_normal_consistency'] for q in results])\n print(chamfer_dist_mean, chamfer_dist_median)\n print(earth_mover_dist_mean, earth_mover_dist_median)\n print(NMV_ratio_mean, NMV_ratio_median)\n print(NME_ratio_mean, NME_ratio_median)\n print(NMF_ratio_mean, NMF_ratio_median)\n print(self_intersection_ratio_mean, self_intersection_ratio_median)\n print(normal_consistency_mean, normal_consistency_median)\n print(abs_normal_consistency_mean, abs_normal_consistency_median)\n\n suffix = f'_{test_or_train}'\n if correspondence_level is not None:\n cl_suffix = correspondence_level\n cnp_suffix = correspondence_pts_num\n suffix += f'_{cl_suffix}_{cnp_suffix}'\n\n with open(\n os.path.join(\n ws.get_evaluation_dir(experiment_directory, checkpoint, True), f\"chamfer_and_emd_and_nonmanifold{suffix}.csv\"\n ),\n \"w\",\n ) as f:\n f.write(\"shape, chamfer_dist, earthmovers_dist, NMV_ratio, NME_ratio, NMF_ratio,\" +\\\n \" self_intersection_ratio, normal_consistency, abs_normal_consistency\\n\")\n for result in results:\n f.write(f\"{result[0]}, {result[1]['chamfer_distance']}, {result[1]['earthmover_distance']}, \" +\\\n f\"{result[1]['NM-V']}, {result[1]['NM-E']}, {result[1]['NM-F']}, {result[1]['self-intersection']}, \" +\\\n f\"{result[1]['normal_consistency']}, {result[1]['abs_normal_consistency']}\\n\")\n\n f.write(f\"CD_Mean, CD_Median, EMD_Mean, EMD_Median, NC_Mean, NC_Median, ANC_Mean, ANC_Median\\n\")\n f.write(f\"{chamfer_dist_mean}, {chamfer_dist_median}, {earth_mover_dist_mean}, {earth_mover_dist_median}, \" +\\\n f\"{normal_consistency_mean}, {normal_consistency_median}, {abs_normal_consistency_mean}, {abs_normal_consistency_median}\\n\")\n \n f.write(f\"NMV_Mean, NMV_Median, NME_Mean, NME_Median, NMF_Mean, NMF_Median, Self_Intersection_Mean, Self_Intersection_Median\\n\")\n f.write(f\"{NMV_ratio_mean}, {NMV_ratio_median}, {NME_ratio_mean}, {NME_ratio_median}, {NMF_ratio_mean}, {NMF_ratio_median}, {self_intersection_ratio_mean}, {self_intersection_ratio_median}\\n\")\n\n\nif __name__ == \"__main__\":\n\n arg_parser = argparse.ArgumentParser(description=\"Evaluate a NDF autodecoder\")\n arg_parser.add_argument(\n \"--experiment\",\n \"-e\",\n dest=\"experiment_directory\",\n required=True,\n help=\"The experiment directory. This directory should include experiment specifications in \"\n + '\"specs.json\", and logging will be done in this directory as well.',\n )\n arg_parser.add_argument(\n \"--checkpoint\",\n \"-c\",\n dest=\"checkpoint\",\n default=\"latest\",\n help=\"The checkpoint to test.\",\n )\n arg_parser.add_argument(\n \"--data\",\n \"-d\",\n dest=\"data_source\",\n required=True,\n help=\"The data source directory.\",\n )\n arg_parser.add_argument(\n \"--split\",\n \"-s\",\n dest=\"split_filename\",\n required=True,\n help=\"The split to evaluate.\",\n )\n arg_parser.add_argument(\n \"--test_or_train\",\n \"-t\",\n dest=\"test_or_train\",\n required=True,\n help=\"Whether to evaluate training meshes or reconstructed meshes\",\n )\n arg_parser.add_argument(\n \"--correspondence_level\",\n \"-l\",\n dest=\"correspondence_level\",\n default=0,\n help=\"Whether to evaluate meshes generated from template mapping or not,\" +\n \"in which level (coarse or fine)\"\n )\n arg_parser.add_argument(\n \"--correspondence_pts_num\",\n \"-n\",\n dest=\"correspondence_pts_num\",\n default=0,\n help=\"if evaluate meshes generated from template mapping, how many vertices in template meshes\"\n )\n\n deep_sdf.add_common_args(arg_parser)\n\n args = arg_parser.parse_args()\n\n deep_sdf.configure_logging(args)\n\n if args.correspondence_level == '0':\n args.correspondence_level = None\n\n evaluate(\n args.experiment_directory,\n args.checkpoint,\n args.data_source,\n args.split_filename,\n args.test_or_train,\n args.correspondence_level,\n args.correspondence_pts_num\n )\n"
]
| [
[
"numpy.median",
"numpy.load",
"numpy.mean"
]
]
|
renzobalb/pacific | [
"75f58c782e4a04c6996d353bc4c8c4ba3e607332"
]
| [
"scripts/PACIFIC.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 15 09:32:02 2020\n\nPACIFIC takes a FASTA/FASTQ input file and predicts the presence of the following viruses and their relative sample proportions:\n SARS-CoV-2,\n 128 taxonomic units from Influenza,\n 5 species from Metapneumovirus,\n 130 species from Rhinovirus, and\n 11 species from Coronaviridae (non-SARS-CoV-2). \n\n@author: Pablo Acera\n\n\"\"\"\n\nimport argparse\n\nparser = argparse.ArgumentParser(prog='PACIFIC v0.1', description=\n \"\"\" \n PACIFIC takes a FASTA/FASTQ input file and predicts the presence of the following viruses and their relative sample proportions:\n SARS-CoV-2,\n 128 taxonomic units from Influenza,\n 5 species from Metapneumovirus,\n 130 species from Rhinovirus, and\n 11 species from Coronaviridae (non-SARS-CoV-2).\n \n We recommend that users use default parameters to ensure high accuracy.\n \"\"\", usage='python PACIFIC.py [options] -i <in.fa>|<in.fq> -m <model> -t <tokenizer> -l <label-maker>\\nversion: %(prog)s')\n\nOPTIONAL = parser._action_groups.pop()\nREQUIRED = parser.add_argument_group('required arguments')\n\n#Inputs\n## CHANGE -m -t -l -f to OPTIONAL and CREATE RELATIVE PATHS FOR THESE FILES\n\nREQUIRED.add_argument(\"-i\", \"--input_file\",\n help=\"FASTA/FASTQ input file path\",\n metavar='\\b',\n required=True)\n\nREQUIRED.add_argument(\"-m\", \"--model\",\n help=\"PACIFIC model file path\",\n metavar='\\b',\n required=True)\n\nREQUIRED.add_argument(\"-t\", \"--tokenizer\",\n help=\"Tokenizer file path\",\n metavar='\\b',\n required=True)\n\nREQUIRED.add_argument(\"-l\", \"--label_maker\",\n help=\"Label maker object file path\",\n metavar='\\b',\n required=True)\n\n#arguments\nOPTIONAL.add_argument(\"-f\", \"--file_type\",\n help='FASTA or FASTQ training file format [fasta]',\n metavar='<fasta/fastq>',\n default='fasta',\n )\n\nOPTIONAL.add_argument(\"-o\", \"--outputdir\",\n help='Path to output directory [.]',\n metavar='<dir>',\n default=\".\")\n\n#OPTIONAL.add_argument(\"-k\", \"--k_mers\",\n# help='K-mer number use to train the model [9]',\n# default=9,\n# type=int)\n\nOPTIONAL.add_argument(\"-T\", \"--prediction_threshold\",\n help='Threshold/cutoff for predictions [0.95]',\n metavar='<float>',\n default=0.95,\n type=int\n )\n\nOPTIONAL.add_argument(\"-c\", \"--chunk_size\",\n help='Number of reads per chunk [10000]',\n metavar='<int>',\n default=50000,\n type=int\n ) \n\nOPTIONAL.add_argument(\"-O\", \"--output_fasta\",\n help='If this option is \"True\", a FASTA file containing predictions for each read will be provided [False]',\n default=False,\n action='store_true'\n )\n\nOPTIONAL.add_argument('-v', '--version', \n action='version', \n version='%(prog)s')\n\n\nparser._action_groups.append(OPTIONAL)\n\nARGS = parser.parse_args()\n\n# Inputs\nFILE_IN = ARGS.input_file\nMODEL = ARGS.model\nTOKENIZER = ARGS.tokenizer\nLABEL_MAKER = ARGS.label_maker\n\n\n# Arguments\nMODEL = ARGS.model\nFILE_TYPE = ARGS.file_type\nOUTPUTDIR = ARGS.outputdir\nTHRESHOLD_PREDICTION = ARGS.prediction_threshold\nOUTPUT_FASTA = ARGS.output_fasta\nCHUNK_SIZE = ARGS.chunk_size\n\n# import other packages\nfrom Bio import SeqIO\n\nimport pickle\nfrom keras.models import load_model\nimport random\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport os\n\n# hardcode paths to tokenizer and label maker\ndirname = os.path.dirname(__file__)\n#TOKENIZER = os.path.join(dirname, '../model', 'tokenizer.01.pacific_9mers.pickle')\n#LABEL_MAKER = os.path.join(dirname, '../model', 'label_maker.01.pacific_9mers.pickle')\n\n\ndef process_reads(sequences, kmer, names):\n '''\n '''\n r_reads = []\n new_names = []\n for i in enumerate(sequences):\n # check the reads does not contain weird characters\n if all(c in 'AGCT' for c in i[1].upper()) and len(i[1]) >= 150:\n read = i[1][:150]\n r_reads.append(' '.join(read[x:x+kmer].upper() for x in range(len(read) - kmer + 1)))\n new_names.append(names[i[0]])\n return r_reads, new_names\n\n\ndef main(all_transcripts, names, k_mer_size):\n '''\n '''\n reads, names_p = process_reads(all_transcripts, \n k_mer_size,\n names)\n\n return all_transcripts, reads, names_p\n\ndef accuracy(labels, predictions):\n '''\n calculate accuracy\n '''\n try:\n if labels.shape != predictions.shape:\n print('labels and predictions does not have same dimentions')\n return False\n \n correct = 0\n for i in range(len(labels)):\n if labels[i] == predictions[i]:\n correct +=1\n except:\n return 0\n \n return correct/len(labels)\n\ndef predict_chunk(sequences,\n names,\n K_MERS,\n FILE_TYPE,\n total_results,\n total_sequences):\n '''\n Predicting and write a chunk of reads\n '''\n \n total_sequences += len(sequences)\n \n reads, kmer_sequences, names = main(sequences,\n names,\n K_MERS,\n )\n \n kmer_sequences = tokenizer.texts_to_sequences(kmer_sequences)\n \n predictions = model.predict(np.array(kmer_sequences))\n labels = label_maker.inverse_transform(np.array(predictions), threshold=THRESHOLD_PREDICTION)\n \n if OUTPUT_FASTA is True:\n print()\n fasta_name_out = OUTPUTDIR+'/tmp_output_'+str(counter)\n print('writting temporary output file '+fasta_name_out)\n with open(fasta_name_out,'w') as output:\n for i in enumerate(names):\n print('>'+i[1]+':'+str(max(predictions[i[0]]))+':'+labels[i[0]], file=output)\n print(reads[i[0]], file=output)\n total_results[labels[i[0]]] += [max(predictions[i[0]])]\n \n return total_results, total_sequences\n\n\nif __name__ == '__main__':\n\n seed_value = 42\n random.seed(seed_value)# 3. Set `numpy` pseudo-random generator at a fixed value\n np.random.seed(seed_value)# 4. Set `tensorflow` pseudo-random generator at a fixed value\n try:\n tf.random.set_seed(seed_value)# 5. For layers that introduce randomness like dropout, make sure to set seed values \n except:\n tf.set_random_seed(seed_value)\n \n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.compat.v1.Session(config=config)\n \n K_MERS = 9\n \n model = load_model(MODEL)\n \n # Keras loading sequences tokenizer \n with open(TOKENIZER, 'rb') as handle:\n tokenizer = pickle.load(handle)\n \n # loading label maker\n with open(LABEL_MAKER, 'rb') as handle:\n label_maker = pickle.load(handle)\n \n print() \n print('Reading input file...')\n print()\n \n total_results = {'Sars_cov_2': [],\n 'Coronaviridae': [],\n 'Influenza': [],\n 'Metapneumovirus': [],\n 'Rhinovirus': [],\n 'Human': []\n }\n \n total_sequences = 0\n fasta_sequences = SeqIO.parse(open(FILE_IN), FILE_TYPE)\n sequences = []\n names = []\n counter = 0\n for fasta in fasta_sequences:\n name, sequence = fasta.id, str(fasta.seq)\n sequences.append(sequence)\n names.append(name)\n counter +=1\n if counter%CHUNK_SIZE == 0:\n \n total_results, total_sequences = predict_chunk(sequences,\n names,\n K_MERS,\n FILE_TYPE,\n total_results,\n total_sequences)\n sequences = []\n names = []\n print()\n print('predictig reads: '+str(counter-CHUNK_SIZE)+' '+str(counter))\n \n total_results, total_sequences = predict_chunk(sequences,\n names,\n K_MERS,\n FILE_TYPE,\n total_results,\n total_sequences)\n \n tmp_files = os.listdir(OUTPUTDIR)\n tmp_files = [i for i in tmp_files if i.startswith('tmp_output')]\n import shutil\n \n if OUTPUT_FASTA is True:\n print()\n print('Writting final output FASTA '+OUTPUTDIR+'/output_pacific.fasta')\n with open('output_PACIFIC.fasta','wb') as wfd:\n for f in tmp_files:\n with open(OUTPUTDIR+'/'+f,'rb') as fd:\n shutil.copyfileobj(fd, wfd)\n\n for delete_file in tmp_files:\n os.remove(OUTPUTDIR+'/'+delete_file)\n print()\n print('Deleting temporary file '+delete_file)\n \n \n processed_reads = len(total_results['Influenza'])+\\\n len(total_results['Coronaviridae'])+\\\n len(total_results['Metapneumovirus'])+\\\n len(total_results['Rhinovirus'])+\\\n len(total_results['Sars_cov_2'])+\\\n len(total_results['Human'])\n \n print()\n print('From a total of '+str(total_sequences)+' reads, '+str(total_sequences - processed_reads)+\\\n ' were discarded, (probabbly due to non-standart nucleotides or too short reads)')\n \n df_results = pd.DataFrame()\n \n df_results['Class'] = ['SARS-CoV-2', 'Coronaviridae', \n 'Influenza', 'Metapneumovirus', \n 'Rhinovirus','Human']\n\n df_results['# predicted reads'] = [len(total_results['Sars_cov_2']),\n len(total_results['Coronaviridae']),\n len(total_results['Influenza']),\n len(total_results['Metapneumovirus']),\n len(total_results['Rhinovirus']),\n len(total_results['Human'])\n ]\n \n percentage = {}\n for classes in total_results:\n number_class = len(total_results[classes])\n percentage[classes] = ( number_class/ processed_reads) *100\n \n df_results['# predicted reads (%)'] = [percentage['Sars_cov_2'],\n percentage['Coronaviridae'],\n percentage['Influenza'],\n percentage['Metapneumovirus'],\n percentage['Rhinovirus'],\n percentage['Human']\n ]\n threshold_reads = {}\n total_threshold_reads = 0\n for classes in total_results:\n numpy_class = np.array(total_results[classes])\n threshold_reads[classes] = len(numpy_class[numpy_class > THRESHOLD_PREDICTION])\n total_threshold_reads +=threshold_reads[classes]\n \n df_results['# predicted reads above '+str(THRESHOLD_PREDICTION)] = [threshold_reads['Sars_cov_2'],\n threshold_reads['Coronaviridae'],\n threshold_reads['Influenza'],\n threshold_reads['Metapneumovirus'],\n threshold_reads['Rhinovirus'],\n threshold_reads['Human']\n ]\n \n df_results['# predicted reads above '+str(THRESHOLD_PREDICTION)+' (%)'] = \\\n [threshold_reads['Sars_cov_2']/total_threshold_reads*100 ,\n threshold_reads['Coronaviridae']/total_threshold_reads*100,\n threshold_reads['Influenza']/total_threshold_reads*100,\n threshold_reads['Metapneumovirus']/total_threshold_reads*100,\n threshold_reads['Rhinovirus']/total_threshold_reads*100,\n threshold_reads['Human']/total_threshold_reads*100\n ]\n \n \n print()\n print(df_results)\n df_results.to_csv(OUTPUTDIR+'/output_PACIFIC.txt')\n print()\n print('Thank you for using PACIFIC =^)')\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"tensorflow.set_random_seed",
"numpy.array",
"numpy.random.seed",
"pandas.DataFrame",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.random.set_seed",
"tensorflow.compat.v1.Session"
]
]
|
sansoohan/Enneagram | [
"b9ed041ffd2e084ae80c26a767030626bd33b491"
]
| [
"BigDataAnalyzing/training/freeze.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.tools import freeze_graph\nfrom tensorflow.python.tools import optimize_for_inference_lib\n\n# Freeze the graph\nMODEL_NAME=\"enneagram_classification_model\"\ninput_graph_path = MODEL_NAME+'.pbtxt'\ncheckpoint_path = './'+MODEL_NAME+'.ckpt'\ninput_saver_def_path = \"\"\ninput_binary = False\noutput_node_names = \"output\"\nrestore_op_name = \"save/restore_all\"\nfilename_tensor_name = \"save/Const:0\"\noutput_frozen_graph_name = MODEL_NAME+'.pb'\noutput_optimized_graph_name = 'optimized_'+MODEL_NAME+'.pb'\nclear_devices = True\n\nfreeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, checkpoint_path, output_node_names,\n restore_op_name, filename_tensor_name,\n output_frozen_graph_name, clear_devices, \"\")\n\n# Optimize for inference\n\ninput_graph_def = tf.compat.v1.GraphDef()\nwith tf.io.gfile.GFile(output_frozen_graph_name, \"rb\") as f:\n data = f.read()\n input_graph_def.ParseFromString(data)\n\noutput_graph_def = optimize_for_inference_lib.optimize_for_inference(\n input_graph_def,\n [\"x\"], # an array of the input node(s)\n [\"output\"], # an array of output nodes\n tf.float32.as_datatype_enum)\n\n# Save the optimized graph\n\nf = tf.io.gfile.GFile(output_optimized_graph_name, \"w\")\nf.write(output_graph_def.SerializeToString())\n\n# tf.train.write_graph(output_graph_def, './', output_optimized_graph_name)"
]
| [
[
"tensorflow.python.tools.freeze_graph.freeze_graph",
"tensorflow.io.gfile.GFile",
"tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference",
"tensorflow.compat.v1.GraphDef"
]
]
|
DennisMcWherter/SingleImageDataAugmentation | [
"a5fb760ce852adcd89498fa8f8b5be1deaf03d26"
]
| [
"src/models/MobilenetV2.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchvision import models\n\nclass TestMobilenetV2(nn.Module):\n\n def __init__(self, num_classes):\n super(TestMobilenetV2, self).__init__()\n\n # Freeze mobilenet\n self.mobilenetv2 = models.mobilenet_v2(pretrained=True)\n for param in self.mobilenetv2.parameters():\n param.requires_grad = False\n\n # Expect input size of (224,224)\n # TODO: We shouldn't need to hardcode this, but it works for our experiments.\n self.fc1 = nn.Linear(11520, 128)\n self.fc2 = nn.Linear(128, 128)\n self.fc3 = nn.Linear(128, num_classes)\n\n def forward(self, x):\n x = F.avg_pool2d(self.mobilenetv2.features(x), 2)\n x = torch.flatten(x, start_dim=1, end_dim=3)\n x = F.dropout(x, p=0.2, training=self.training)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=0.2, training=self.training)\n x = F.relu(self.fc2(x))\n x = F.dropout(x, p=0.2, training=self.training)\n x = F.relu(self.fc3(x))\n return F.softmax(x, dim=0)\n\n\n"
]
| [
[
"torch.nn.Linear",
"torch.flatten",
"torch.nn.functional.dropout",
"torch.nn.functional.softmax"
]
]
|
ZhiangChen/ZhiangChen | [
"639c4bc7140b8253d89b11920187e88d07081ae7"
]
| [
"infer.py"
]
| [
"\"\"\"\ntraining.py\nZhiang Chen, April 2020\n\"\"\"\n\nimport torch\nimport torch.utils.data\nimport torchvision.datasets\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom utils import *\nimport torchvision.models as models\nfrom data import EurekaDataset\nimport pickle\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\ntorch.manual_seed(0)\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n\neureka_normalize = transforms.Normalize(mean=[0.44, 0.50, 0.43],\n std=[0.26, 0.25, 0.26])\n\neureka_transform = transforms.Compose([\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n eureka_normalize,])\n\n\ndef neural_network(architecture, nm_classes, pretrained=True, change_last_layer=True):\n assert architecture in model_names\n print(\"=> creating model '{}'\".format(architecture))\n model = models.__dict__[architecture](pretrained=pretrained)\n if change_last_layer:\n if architecture.startswith('densenet'):\n in_features = model.classifier.in_features\n model.classifier = nn.Linear(in_features=in_features, out_features=nm_classes)\n else:\n in_features = model.fc.in_features\n model.fc = nn.Linear(in_features=in_features, out_features=nm_classes)\n\n return model\n\ndef cifar10(root='./datasets/cifar10/', val=True):\n train = torchvision.datasets.CIFAR10(root, train=True, download=True, transform=train_transform)\n test = torchvision.datasets.CIFAR10(root, train=False, download=True, transform=test_transform)\n return train, test\n\ndef eureka():\n\ttrain = EurekaDataset('./datasets/Eureka/images/','./datasets/Eureka/class.json', eureka_transform)\n\ttest = EurekaDataset('./datasets/Eureka/images_valid/','./datasets/Eureka/class.json', eureka_transform)\n\t#test = EurekaDataset('./datasets/Eureka/images_test/','./datasets/Eureka/label_102.json', eureka_transform)\n\t#test.addJson('./datasets/Eureka/label_102.json')\n\treturn train, test\n\nif __name__ == '__main__':\n cuda = 'cuda:1'\n device = torch.device(cuda)\n nm_classes = 3\n train_dataset, test_dataset = eureka()\n print(len(train_dataset))\n print(len(test_dataset))\n\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=4, shuffle=True, num_workers=8, collate_fn=collate_fn)\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=4, shuffle=False, num_workers=8, collate_fn=collate_fn)\n\n model = neural_network('resnext101_32x8d', nm_classes)\n\n criterion = nn.CrossEntropyLoss().to(device)\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.00001)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.65)\n\n \"\"\"\n model.load_state_dict(torch.load(\"trained_param_resnext101/epoch_{:04d}.param\".format(52)))\n model.eval() \n model.to(device)\n results = infer(model, train_dataset, device)\n meta = train_dataset.dataset\n save_f = {\"pred\":results, \"meta\": meta}\n with open('3_101104_result.pickle', 'wb') as fp:\n pickle.dump(save_f, fp)\n \"\"\"\n model.load_state_dict(torch.load(\"trained_param_resnext101/epoch_{:04d}.param\".format(52)))\n model.eval()\n model.to(device)\n results = infer(model, test_dataset, device)\n meta = test_dataset.dataset\n save_f = {\"pred\":results, \"meta\": meta}\n with open('3_103_result.pickle', 'wb') as fp:\n pickle.dump(save_f, fp)\n"
]
| [
[
"torch.nn.Linear",
"torch.device",
"torch.optim.lr_scheduler.StepLR",
"torch.optim.SGD",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
]
]
|
natke/tutorials | [
"32e5407cfe848e9c50d62c3bfa69487af8b64a72"
]
| [
"beginner_source/examples_autograd/two_layer_net_autograd.py"
]
| [
"# -*- coding: utf-8 -*-\n\"\"\"\nPyTorch: Tensors and autograd\n-------------------------------\n\nA fully-connected ReLU network with one hidden layer and no biases, trained to\npredict y from x by minimizing squared Euclidean distance.\n\nThis implementation computes the forward pass using operations on PyTorch\nTensors, and uses PyTorch autograd to compute gradients.\n\n\nA PyTorch Tensor represents a node in a computational graph. If ``x`` is a\nTensor that has ``x.requires_grad=True`` then ``x.grad`` is another Tensor\nholding the gradient of ``x`` with respect to some scalar value.\n\"\"\"\nimport torch\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold input and outputs.\n# Setting requires_grad=False indicates that we do not need to compute gradients\n# with respect to these Tensors during the backward pass.\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\n# Create random Tensors for weights.\n# Setting requires_grad=True indicates that we want to compute gradients with\n# respect to these Tensors during the backward pass.\nw1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)\nw2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(500):\n # Forward pass: compute predicted y using operations on Tensors; these\n # are exactly the same operations we used to compute the forward pass using\n # Tensors, but we do not need to keep references to intermediate values since\n # we are not implementing the backward pass by hand.\n y_pred = x.mm(w1).clamp(min=0).mm(w2)\n\n # Compute and print loss using operations on Tensors.\n # Now loss is a Tensor of shape (1,)\n # loss.item() gets the scalar value held in the loss.\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # Use autograd to compute the backward pass. This call will compute the\n # gradient of loss with respect to all Tensors with requires_grad=True.\n # After this call w1.grad and w2.grad will be Tensors holding the gradient\n # of the loss with respect to w1 and w2 respectively.\n loss.backward()\n\n # Manually update weights using gradient descent. Wrap in torch.no_grad()\n # because weights have requires_grad=True, but we don't need to track this\n # in autograd.\n # An alternative way is to operate on weight.data and weight.grad.data.\n # Recall that tensor.data gives a tensor that shares the storage with\n # tensor, but doesn't track history.\n # You can also use torch.optim.SGD to achieve this.\n with torch.no_grad():\n w1 -= learning_rate * w1.grad\n w2 -= learning_rate * w2.grad\n\n # Manually zero the gradients after updating weights\n w1.grad.zero_()\n w2.grad.zero_()\n"
]
| [
[
"torch.device",
"torch.no_grad",
"torch.randn"
]
]
|
DocVaughan/CRAWLAB-Code-Snippets | [
"90c946bef0fbe37401f822d58ce5a6b3c5349616"
]
| [
"OpenAI Gym/openAI_variableLengthPendulumContinuous_learning.py"
]
| [
"import numpy as np\nimport gym\nimport variable_pendulum_continuous\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Flatten, Input, merge\nfrom keras.optimizers import Adam\n\nfrom rl.agents import DDPGAgent\nfrom rl.memory import SequentialMemory\nfrom rl.random import OrnsteinUhlenbeckProcess\n\n\nENV_NAME = 'variable_pendulum_continuous-v0'\n\n# ENV_NAME = 'Pendulum-v0'\n\n# Get the environment and extract the number of actions.\nenv = gym.make(ENV_NAME)\nnp.random.seed(123)\nenv.seed(123)\nnb_actions = env.action_space.shape[0]\n\n# Next, we build a very simple model.\nactor = Sequential()\nactor.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nactor.add(Dense(16))\nactor.add(Activation('relu'))\nactor.add(Dense(16))\nactor.add(Activation('relu'))\nactor.add(Dense(16))\nactor.add(Activation('relu'))\nactor.add(Dense(nb_actions))\nactor.add(Activation('linear'))\nprint(actor.summary())\n\naction_input = Input(shape=(nb_actions,), name='action_input')\nobservation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\nflattened_observation = Flatten()(observation_input)\nx = merge([action_input, flattened_observation], mode='concat')\nx = Dense(32)(x)\nx = Activation('relu')(x)\nx = Dense(32)(x)\nx = Activation('relu')(x)\nx = Dense(32)(x)\nx = Activation('relu')(x)\nx = Dense(1)(x)\nx = Activation('linear')(x)\ncritic = Model(input=[action_input, observation_input], output=x)\nprint(critic.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nmemory = SequentialMemory(limit=100000, window_length=1)\nrandom_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)\nagent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,\n memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,\n random_process=random_process, gamma=.99, target_model_update=1e-3,\n delta_clip=1.)\n# agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,\n# memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,\n# random_process=random_process, gamma=.99, target_model_update=1e-3)\nagent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])\n\n# Okay, now it's time to learn something! We visualize the training here for show, but this\n# slows down training quite a lot. You can always safely abort the training prematurely using\n# Ctrl + C.\nagent.fit(env, nb_steps=50000, visualize=True, verbose=1, nb_max_episode_steps=200)\n\n# After training is done, we save the final weights.\nagent.save_weights('ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True)\n\n# Finally, evaluate our algorithm for 5 episodes.\nagent.test(env, nb_episodes=5, visualize=True, nb_max_episode_steps=200)"
]
| [
[
"numpy.random.seed"
]
]
|
MitchellTesla/automl | [
"426b5a82b96afb2aa90e0eaf17335118e0aad8fd"
]
| [
"efficientnetv2/utils.py"
]
| [
"# Copyright 2021 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model utilities.\"\"\"\nimport contextlib\nimport functools\nimport os\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_addons.layers as tfa_layers\n\nfrom tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import\n\n\ndef activation_fn(features: tf.Tensor, act_fn: str):\n \"\"\"Customized non-linear activation type.\"\"\"\n if act_fn in ('silu', 'swish'):\n return tf.nn.swish(features)\n elif act_fn == 'silu_native':\n return features * tf.sigmoid(features)\n elif act_fn == 'hswish':\n return features * tf.nn.relu6(features + 3) / 6\n elif act_fn == 'relu':\n return tf.nn.relu(features)\n elif act_fn == 'relu6':\n return tf.nn.relu6(features)\n elif act_fn == 'elu':\n return tf.nn.elu(features)\n elif act_fn == 'leaky_relu':\n return tf.nn.leaky_relu(features)\n elif act_fn == 'selu':\n return tf.nn.selu(features)\n elif act_fn == 'mish':\n return features * tf.math.tanh(tf.math.softplus(features))\n else:\n raise ValueError('Unsupported act_fn {}'.format(act_fn))\n\n\ndef get_act_fn(act_fn):\n if not act_fn:\n return tf.nn.silu\n if isinstance(act_fn, str):\n return functools.partial(activation_fn, act_fn=act_fn)\n return act_fn\n\n\ndef cross_replica_mean(t, num_shards_per_group=None):\n \"\"\"Calculates the average value of input tensor across TPU replicas.\"\"\"\n num_shards = tpu_function.get_tpu_context().number_of_shards\n if not num_shards_per_group:\n return tf.compat.v1.tpu.cross_replica_sum(t) / tf.cast(num_shards, t.dtype)\n\n group_assignment = None\n if num_shards_per_group > 1:\n if num_shards % num_shards_per_group != 0:\n raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0' %\n (num_shards, num_shards_per_group))\n num_groups = num_shards // num_shards_per_group\n group_assignment = [[\n x for x in range(num_shards) if x // num_shards_per_group == y\n ] for y in range(num_groups)]\n return tf.compat.v1.tpu.cross_replica_sum(t, group_assignment) / tf.cast(\n num_shards_per_group, t.dtype)\n\n\nclass WarmupLearningRateSchedule(\n tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Provides a variety of learning rate decay schedules with warm up.\"\"\"\n\n def __init__(self,\n initial_lr,\n steps_per_epoch=None,\n lr_decay_type='exponential',\n decay_factor=0.97,\n decay_epochs=2.4,\n total_steps=None,\n warmup_epochs=5,\n minimal_lr=0):\n super(WarmupLearningRateSchedule, self).__init__()\n self.initial_lr = initial_lr\n self.steps_per_epoch = steps_per_epoch\n self.lr_decay_type = lr_decay_type\n self.decay_factor = decay_factor\n self.decay_epochs = decay_epochs\n self.total_steps = total_steps\n self.warmup_epochs = warmup_epochs\n self.minimal_lr = minimal_lr\n\n def __call__(self, step):\n if self.lr_decay_type == 'exponential':\n assert self.steps_per_epoch is not None\n decay_steps = self.steps_per_epoch * self.decay_epochs\n lr = tf.keras.optimizers.schedules.ExponentialDecay(\n self.initial_lr, decay_steps, self.decay_factor, staircase=True)(\n step)\n elif self.lr_decay_type == 'cosine':\n assert self.total_steps is not None\n lr = 0.5 * self.initial_lr * (\n 1 + tf.cos(np.pi * tf.cast(step, tf.float32) / self.total_steps))\n elif self.lr_decay_type == 'linear':\n assert self.total_steps is not None\n lr = (1.0 -\n tf.cast(step, tf.float32) / self.total_steps) * self.initial_lr\n elif self.lr_decay_type == 'constant':\n lr = self.initial_lr\n else:\n assert False, 'Unknown lr_decay_type : %s' % self.lr_decay_type\n\n if self.minimal_lr:\n lr = tf.math.maximum(lr, self.minimal_lr)\n\n if self.warmup_epochs:\n warmup_steps = int(self.warmup_epochs * self.steps_per_epoch)\n warmup_lr = (\n self.initial_lr * tf.cast(step, tf.float32) /\n tf.cast(warmup_steps, tf.float32))\n lr = tf.cond(step < warmup_steps, lambda: warmup_lr, lambda: lr)\n\n return lr\n\n def get_config(self):\n return {\n 'initial_lr': self.initial_lr,\n 'steps_per_epoch': self.steps_per_epoch,\n 'lr_decay_type': self.lr_decay_type,\n 'decay_factor': self.decay_factor,\n 'decay_epochs': self.decay_epochs,\n 'total_steps': self.total_steps,\n 'warmup_epochs': self.warmup_epochs,\n 'minimal_lr': self.minimal_lr,\n }\n\n\ndef build_optimizer(learning_rate,\n optimizer_name='rmsprop',\n decay=0.9,\n epsilon=0.001,\n momentum=0.9):\n \"\"\"Build optimizer.\"\"\"\n if optimizer_name == 'sgd':\n logging.info('Using SGD optimizer')\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=learning_rate)\n elif optimizer_name == 'momentum':\n logging.info('Using Momentum optimizer')\n optimizer = tf.compat.v1.train.MomentumOptimizer(\n learning_rate=learning_rate, momentum=momentum)\n elif optimizer_name == 'rmsprop':\n logging.info('Using RMSProp optimizer')\n optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate, decay,\n momentum, epsilon)\n elif optimizer_name == 'adam':\n logging.info('Using Adam optimizer')\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)\n else:\n logging.fatal('Unknown optimizer: %s', optimizer_name)\n\n return optimizer\n\n\nclass TpuBatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Cross replica batch normalization.\"\"\"\n\n def __init__(self, fused=False, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n if fused in (True, None):\n raise ValueError('TpuBatchNormalization does not support fused=True.')\n super().__init__(fused=fused, **kwargs)\n\n def _moments(self, inputs, reduction_axes, keep_dims):\n \"\"\"Compute the mean and variance: it overrides the original _moments.\"\"\"\n shard_mean, shard_variance = super()._moments(\n inputs, reduction_axes, keep_dims=keep_dims)\n\n num_shards = tpu_function.get_tpu_context().number_of_shards or 1\n num_shards_per_group = min(8, num_shards) # aggregate up to 32 cores.\n if num_shards_per_group > 1:\n logging.info('TpuBatchNormalization with num_shards_per_group %d',\n num_shards_per_group)\n # Compute variance using: Var[X]= E[X^2] - E[X]^2.\n shard_square_of_mean = tf.math.square(shard_mean)\n shard_mean_of_square = shard_variance + shard_square_of_mean\n group_mean = cross_replica_mean(shard_mean, num_shards_per_group)\n group_mean_of_square = cross_replica_mean(shard_mean_of_square,\n num_shards_per_group)\n group_variance = group_mean_of_square - tf.math.square(group_mean)\n return (group_mean, group_variance)\n else:\n return (shard_mean, shard_variance)\n\n def call(self, inputs, training=None):\n outputs = super().call(inputs, training)\n return outputs\n\n\nclass BatchNormalization(tf.keras.layers.BatchNormalization):\n \"\"\"Fixed default name of BatchNormalization to match TpuBatchNormalization.\"\"\"\n\n def __init__(self, **kwargs):\n if not kwargs.get('name', None):\n kwargs['name'] = 'tpu_batch_normalization'\n super().__init__(**kwargs)\n\n\ndef normalization(norm_type: str,\n axis=-1,\n epsilon=0.001,\n momentum=0.99,\n groups=8,\n name=None):\n \"\"\"Normalization after conv layers.\"\"\"\n if norm_type == 'gn':\n return tfa_layers.GroupNormalization(groups, axis, epsilon, name=name)\n\n if norm_type == 'tpu_bn':\n return TpuBatchNormalization(\n axis=axis, momentum=momentum, epsilon=epsilon, name=name)\n\n return BatchNormalization(\n axis=axis, momentum=momentum, epsilon=epsilon, name=name)\n\n\ndef archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):\n \"\"\"Archive a checkpoint if the metric is better.\"\"\"\n ckpt_dir, ckpt_name = os.path.split(ckpt_path)\n\n saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')\n saved_objective = float('-inf')\n if tf.io.gfile.exists(saved_objective_path):\n with tf.io.gfile.GFile(saved_objective_path, 'r') as f:\n saved_objective = float(f.read())\n if saved_objective > ckpt_objective:\n logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)\n return False\n\n filenames = tf.io.gfile.glob(ckpt_path + '.*')\n if filenames is None:\n logging.info('No files to copy for checkpoint %s', ckpt_path)\n return False\n\n # Clear the old folder.\n dst_dir = os.path.join(ckpt_dir, 'archive')\n if tf.io.gfile.exists(dst_dir):\n tf.io.gfile.rmtree(dst_dir)\n tf.io.gfile.makedirs(dst_dir)\n\n # Write checkpoints.\n for f in filenames:\n dest = os.path.join(dst_dir, os.path.basename(f))\n tf.io.gfile.copy(f, dest, overwrite=True)\n ckpt_state = tf.compat.v1.train.generate_checkpoint_state_proto(\n dst_dir,\n model_checkpoint_path=ckpt_name,\n all_model_checkpoint_paths=[ckpt_name])\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:\n f.write(str(ckpt_state))\n with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:\n f.write('%s' % ckpt_eval)\n\n # Update the best objective.\n with tf.io.gfile.GFile(saved_objective_path, 'w') as f:\n f.write('%f' % ckpt_objective)\n\n logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)\n return True\n\n\ndef get_ema_vars():\n \"\"\"Get all exponential moving average (ema) variables.\"\"\"\n ema_vars = tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(\n 'moving_vars')\n for v in tf.compat.v1.global_variables():\n # We maintain mva for batch norm moving mean and variance as well.\n if 'moving_mean' in v.name or 'moving_variance' in v.name:\n ema_vars.append(v)\n return list(set(ema_vars))\n\n\ndef drop_connect(inputs, is_training, survival_prob):\n \"\"\"Drop the entire conv with given survival probability.\"\"\"\n # \"Deep Networks with Stochastic Depth\", https://arxiv.org/pdf/1603.09382.pdf\n if not is_training:\n return inputs\n\n # Compute tensor.\n batch_size = tf.shape(inputs)[0]\n random_tensor = survival_prob\n random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)\n binary_tensor = tf.floor(random_tensor)\n # Unlike conventional way that multiply survival_prob at test time, here we\n # divide survival_prob at training time, such that no addition compute is\n # needed at test time.\n output = inputs / survival_prob * binary_tensor\n return output\n\n\ndef num_params_flops(readable_format=True):\n \"\"\"Return number of parameters and flops.\"\"\"\n nparams = np.sum([\n np.prod(v.get_shape().as_list())\n for v in tf.compat.v1.trainable_variables()\n ])\n options = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()\n options['output'] = 'none'\n flops = tf.compat.v1.profiler.profile(\n tf.compat.v1.get_default_graph(), options=options).total_float_ops\n # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof.\n flops = flops // 2\n if readable_format:\n nparams = float(nparams) * 1e-6\n flops = float(flops) * 1e-9\n return nparams, flops\n\n\nclass Pair(tuple):\n\n def __new__(cls, name, value):\n return super().__new__(cls, (name, value))\n\n def __init__(self, name, _): # pylint: disable=super-init-not-called\n self.name = name\n\n\ndef scalar(name, tensor, is_tpu=True):\n \"\"\"Stores a (name, Tensor) tuple in a custom collection.\"\"\"\n logging.info('Adding scalar summary %s', Pair(name, tensor))\n if is_tpu:\n tf.compat.v1.add_to_collection('scalar_summaries',\n Pair(name, tf.reduce_mean(tensor)))\n else:\n tf.summary.scalar(name, tf.reduce_mean(tensor))\n\n\ndef image(name, tensor, is_tpu=True):\n logging.info('Adding image summary %s', Pair(name, tensor))\n if is_tpu:\n tf.compat.v1.add_to_collection('image_summaries', Pair(name, tensor))\n else:\n tf.summary.image(name, tensor)\n\n\ndef get_tpu_host_call(global_step, model_dir, iterations_per_loop):\n \"\"\"Get TPU host call for summaries.\"\"\"\n scalar_summaries = tf.compat.v1.get_collection('scalar_summaries')\n if not scalar_summaries:\n return None # No summaries to write.\n\n def host_call_fn(global_step, *args):\n \"\"\"Training host call. Creates summaries for training metrics.\"\"\"\n gs = global_step[0]\n with tf.summary.create_file_writer(\n model_dir, max_queue=iterations_per_loop).as_default():\n with tf.summary.record_if(True):\n for i, _ in enumerate(scalar_summaries):\n name = scalar_summaries[i][0]\n tensor = args[i][0]\n tf.summary.scalar(name, tensor, step=gs)\n return tf.compat.v1.summary.all_v2_summary_ops()\n\n reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries]\n global_step_t = tf.reshape(global_step, [1])\n return host_call_fn, [global_step_t] + reshaped_tensors\n\n\[email protected]\ndef float16_scope():\n \"\"\"Scope class for float16.\"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Returns a custom getter that methods must be called under.\"\"\"\n cast_to_float16 = False\n requested_dtype = kwargs['dtype']\n if requested_dtype == tf.float16:\n kwargs['dtype'] = tf.float32\n cast_to_float16 = True\n var = getter(*args, **kwargs)\n if cast_to_float16:\n var = tf.cast(var, tf.float16)\n return var\n\n with tf.compat.v1.variable_scope(\n '', custom_getter=_custom_getter) as varscope:\n yield varscope\n\n\ndef set_precision_policy(policy_name=None):\n \"\"\"Set precision policy according to the name.\n\n Args:\n policy_name: precision policy name, one of 'float32', 'mixed_float16',\n 'mixed_bfloat16', or None.\n loss_scale: whether to use loss scale (only for training).\n \"\"\"\n if not policy_name:\n return\n\n assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32')\n logging.info('use mixed precision policy name %s', policy_name)\n tf.compat.v1.keras.layers.enable_v2_dtype_behavior()\n policy = tf.keras.mixed_precision.Policy(policy_name)\n tf.keras.mixed_precision.set_global_policy(policy)\n\n\ndef build_model_with_precision(pp, mm, ii, tt, *args, **kwargs):\n \"\"\"Build model with its inputs/params for a specified precision context.\n\n This is highly specific to this codebase, and not intended to be general API.\n Advanced users only. DO NOT use it if you don't know what it does.\n NOTE: short argument names are intended to avoid conficts with kwargs.\n\n Args:\n pp: A string, precision policy name, such as \"mixed_float16\".\n mm: A function, for rmodel builder.\n ii: A tensor, for model inputs.\n tt: A bool, If true, it is for training; otherwise, it is for eval.\n *args: A list of model arguments.\n **kwargs: A dict, extra model parameters.\n\n Returns:\n the output of mm model.\n \"\"\"\n del tt\n if pp == 'mixed_bfloat16':\n set_precision_policy(pp)\n inputs = tf.cast(ii, tf.bfloat16)\n with tf.compat.v1.tpu.bfloat16_scope():\n outputs = mm(inputs, *args, **kwargs)\n set_precision_policy('float32')\n elif pp == 'mixed_float16':\n set_precision_policy(pp)\n inputs = tf.cast(ii, tf.float16)\n with float16_scope():\n outputs = mm(inputs, *args, **kwargs)\n set_precision_policy('float32')\n elif not pp or pp == 'float32':\n outputs = mm(ii, *args, **kwargs)\n else:\n raise ValueError('Unknow precision name {}'.format(pp))\n\n # Users are responsible to convert the dtype of all outputs.\n return outputs\n\n\ndef get_ckpt_var_map(ckpt_path,\n ckpt_scope='',\n var_scope='',\n skip_mismatch=None,\n init_ema=True):\n \"\"\"Get a var map for restoring from pretrained checkpoints.\n\n Args:\n ckpt_path: string. A pretrained checkpoint path.\n ckpt_scope: string. Scope name for checkpoint variables.\n var_scope: string. Scope name for model variables.\n skip_mismatch: skip variables if shape mismatch.\n init_ema: If true, try to init from ema variables.\n\n Returns:\n var_map: a dictionary from checkpoint name to model variables.\n \"\"\"\n logging.info('Init model from checkpoint %s', ckpt_path)\n var_map = {}\n # Get the list of vars to restore.\n model_vars = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=var_scope)\n reader = tf.train.load_checkpoint(ckpt_path)\n ckpt_var_name_to_shape = reader.get_variable_to_shape_map()\n ckpt_var_names = set(reader.get_variable_to_shape_map().keys())\n for v in model_vars:\n v_name = v.op.name\n\n # filter special variables.\n flist = ['global_step', 'ExponentialMovingAverage', 'Momentum', 'RMSProp']\n if list(filter(lambda x, s=v_name: x in s, flist)):\n continue\n\n if not v.op.name.startswith(var_scope):\n logging.info('skip %s -- does not match scope %s', v_name, var_scope)\n cv_name = ckpt_scope + v.op.name[len(var_scope):]\n if init_ema and cv_name + '/ExponentialMovingAverage' in ckpt_var_names:\n cv_name = cv_name + '/ExponentialMovingAverage' # prefer ema vars.\n\n if cv_name not in ckpt_var_names:\n if skip_mismatch:\n logging.info('skip %s (%s) -- not in ckpt', v_name, cv_name)\n continue\n raise ValueError(f'{v.op} is not in ckpt {ckpt_path}')\n\n cv_shape = ckpt_var_name_to_shape[cv_name]\n if v.shape != cv_shape:\n if skip_mismatch:\n logging.info('skip %s (%s vs %s) -- shape mismatch', v_name, v.shape,\n cv_shape)\n continue\n raise ValueError(f'shape mismatch {v_name} ({v.shape} vs {cv_shape})')\n\n var_map[cv_name] = v\n\n if not var_map or len(var_map) < 5:\n raise ValueError(f'var_map={var_map} is almost empty, please check logs.')\n\n for (k, v) in var_map.items():\n logging.log_first_n(logging.INFO, f'Init {v.op.name} from ckpt var {k}', 10)\n\n return var_map\n\n\ndef restore_tf2_ckpt(model,\n ckpt_path_or_file,\n skip_mismatch=True,\n exclude_layers=None):\n \"\"\"Restore variables from a given checkpoint.\n\n Args:\n model: the keras model to be restored.\n ckpt_path_or_file: the path or file for checkpoint.\n skip_mismatch: whether to skip variables if shape mismatch,\n only works with tf1 checkpoint.\n exclude_layers: string list exclude layer's variables,\n only works with tf2 checkpoint.\n\n Raises:\n KeyError: if access unexpected variables.\n \"\"\"\n ckpt_file = ckpt_path_or_file\n if tf.io.gfile.isdir(ckpt_file):\n ckpt_file = tf.train.latest_checkpoint(ckpt_file)\n\n # Try to load object-based checkpoint (by model.save_weights).\n var_list = tf.train.list_variables(ckpt_file)\n if var_list[0][0] == '_CHECKPOINTABLE_OBJECT_GRAPH':\n print(f'Load checkpointable from {ckpt_file}, excluding {exclude_layers}')\n keys = {var[0].split('/')[0] for var in var_list}\n keys.discard('_CHECKPOINTABLE_OBJECT_GRAPH')\n if exclude_layers:\n exclude_layers = set(exclude_layers)\n keys = keys.difference(exclude_layers)\n ckpt = tf.train.Checkpoint(**{key: getattr(model, key, None)\n for key in keys\n if getattr(model, key, None)})\n status = ckpt.restore(ckpt_file)\n status.assert_nontrivial_match()\n return\n\n print(f'Load TF1 graph based checkpoint from {ckpt_file}.')\n var_dict = {v.name.split(':')[0]: v for v in model.weights}\n reader = tf.train.load_checkpoint(ckpt_file)\n var_shape_map = reader.get_variable_to_shape_map()\n for key, var in var_dict.items():\n if key in var_shape_map:\n if var_shape_map[key] != var.shape:\n msg = 'Shape mismatch: %s' % key\n if skip_mismatch:\n logging.warning(msg)\n else:\n raise ValueError(msg)\n else:\n var.assign(reader.get_tensor(key), read_value=False)\n logging.log_first_n(logging.INFO,\n f'Init {var.name} from {key} ({ckpt_file})', 10)\n else:\n msg = 'Not found %s in %s' % (key, ckpt_file)\n if skip_mismatch:\n logging.warning(msg)\n else:\n raise KeyError(msg)\n\n\nclass ReuableBackupAndRestore(tf.keras.callbacks.experimental.BackupAndRestore):\n \"\"\"A BackupAndRestore callback that can be used across multiple model.fit()s.\"\"\"\n\n def on_train_end(self, logs=None):\n # don't delete the backup, so it can be used for future model.fit()s\n pass\n"
]
| [
[
"tensorflow.nn.swish",
"tensorflow.compat.v1.profiler.ProfileOptionBuilder.float_operation",
"tensorflow.io.gfile.GFile",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.summary.all_v2_summary_ops",
"tensorflow.compat.v1.global_variables",
"tensorflow.reshape",
"tensorflow.io.gfile.copy",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.keras.mixed_precision.Policy",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.train.latest_checkpoint",
"tensorflow.compat.v1.train.RMSPropOptimizer",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.sigmoid",
"tensorflow.nn.leaky_relu",
"tensorflow.train.load_checkpoint",
"tensorflow.io.gfile.isdir",
"tensorflow.train.list_variables",
"tensorflow.floor",
"tensorflow.io.gfile.rmtree",
"tensorflow.nn.relu",
"tensorflow.summary.scalar",
"tensorflow.summary.create_file_writer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.random.uniform",
"tensorflow.compat.v1.train.MomentumOptimizer",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.gfile.exists",
"tensorflow.math.maximum",
"tensorflow.compat.v1.keras.layers.enable_v2_dtype_behavior",
"tensorflow.math.square",
"tensorflow.compat.v1.get_collection",
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.compat.v1.train.generate_checkpoint_state_proto",
"tensorflow.compat.v1.tpu.cross_replica_sum",
"tensorflow.summary.image",
"tensorflow.io.gfile.glob",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.cond",
"tensorflow.python.tpu.tpu_function.get_tpu_context",
"tensorflow.nn.relu6",
"tensorflow.compat.v1.tpu.bfloat16_scope",
"tensorflow.math.softplus",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.nn.selu",
"tensorflow.nn.elu",
"tensorflow.summary.record_if",
"tensorflow.reduce_mean"
]
]
|
sangyongjeong1604/safegail | [
"76828169fbf1f9dce7bcc7fc03638abc6ef7a425"
]
| [
"reference_src/4.windowsub0406/model.py"
]
| [
"import numpy as np\nimport tensorflow as tf\nimport csv\nimport matplotlib.pyplot as plt\nimport os, sys\nimport cv2\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential, Model\nfrom keras.layers.core import Dense, Dropout, Activation,Lambda\nfrom keras.optimizers import Adam\nfrom keras.utils import np_utils\nfrom keras.layers import Convolution2D, MaxPooling2D, Flatten, Input, ELU\nfrom keras import initializations\nfrom keras.models import load_model, model_from_json\nfrom keras.layers.normalization import BatchNormalization\nfrom sklearn.utils import shuffle\nfrom keras import backend as K\nimport json\nimport gc\n\ncsv_path = 'driving_log.csv' # my data (fantastic graphic mode)\ncsv_path1 = 'data/driving_log.csv' # udacity data (fastest graphic mode)\n\ncenter_db, left_db, right_db, steer_db = [], [], [], []\nRows, Cols = 64, 64\noffset = 0.22\n\n# read csv file\nwith open(csv_path1) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if float(row['steering']) != 0.0:\n center_db.append(row['center'])\n left_db.append(row['left'].strip())\n right_db.append(row['right'].strip())\n steer_db.append(float(row['steering']))\n else:\n prob = np.random.uniform()\n if prob <= 0.15:\n center_db.append(row['center'])\n left_db.append(row['left'].strip())\n right_db.append(row['right'].strip())\n steer_db.append(float(row['steering']))\n\n# shuffle a dataset\ncenter_db, left_db, right_db, steer_db = shuffle(center_db, left_db, right_db, steer_db)\n\n# split train & valid data\nimg_train, img_valid, steer_train, steer_valid = train_test_split(center_db, steer_db, test_size=0.1, random_state=42)\n\nplt.hist(steer_db, bins= 50, color= 'orange')\nplt.xlabel('steering value')\nplt.ylabel('counts')\n# plt.show()\n\ndef select_img(center, left, right, steer, num, offsets=0.22):\n \"\"\"\n randomly select among center, left, right images\n\n add ±0.22 to left, right steering angle.\n couldn't find exact left, right steering angle by using geometric method because we didn't have enough information.\n \"\"\"\n rand = np.random.randint(3)\n\n if rand == 0:\n image, steering = cv2.imread(center[num]), steer[num]\n elif rand == 1:\n image, steering = cv2.imread(left[num]), steer[num] + offsets\n elif rand == 2:\n image, steering = cv2.imread(right[num]), steer[num] - offsets\n if abs(steering) > 1:\n steering = -1 if (steering < 0) else 1\n\n return image, steering\n\ndef valid_img(valid_image, valid_steer, num):\n \"\"\" using only center image for validation \"\"\"\n steering = valid_steer[num]\n image = cv2.imread(valid_image[num])\n return image, steering\n\ndef crop_img(image):\n \"\"\" crop unnecessary parts \"\"\"\n cropped_img = image[63:136, 0:319]\n resized_img = cv2.resize(cropped_img, (Cols, Rows), cv2.INTER_AREA)\n img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB)\n return resized_img\n\ndef shift_img(image, steer):\n \"\"\"\n randomly shift image horizontally\n add proper steering angle to each image\n \"\"\"\n max_shift = 55\n max_ang = 0.14 # ang_per_pixel = 0.0025\n\n rows, cols, _ = image.shape\n\n random_x = np.random.randint(-max_shift, max_shift + 1)\n dst_steer = steer + (random_x / max_shift) * max_ang\n if abs(dst_steer) > 1:\n dst_steer = -1 if (dst_steer < 0) else 1\n\n mat = np.float32([[1, 0, random_x], [0, 1, 0]])\n dst_img = cv2.warpAffine(image, mat, (cols, rows))\n return dst_img, dst_steer\n\ndef brightness_img(image):\n \"\"\"\n randomly change brightness by converting Y value\n \"\"\"\n br_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n coin = np.random.randint(2)\n if coin == 0:\n random_bright = 0.2 + np.random.uniform(0.2, 0.6)\n br_img[:, :, 2] = br_img[:, :, 2] * random_bright\n br_img = cv2.cvtColor(br_img, cv2.COLOR_HSV2RGB)\n return br_img\n\ndef generate_shadow(image, min_alpha=0.5, max_alpha = 0.75):\n \"\"\"generate random shadow in random region\"\"\"\n\n top_x, bottom_x = np.random.randint(0, Cols, 2)\n coin = np.random.randint(2)\n rows, cols, _ = image.shape\n shadow_img = image.copy()\n if coin == 0:\n rand = np.random.randint(2)\n vertices = np.array([[(50, 65), (45, 0), (145, 0), (150, 65)]], dtype=np.int32)\n if rand == 0:\n vertices = np.array([[top_x, 0], [0, 0], [0, rows], [bottom_x, rows]], dtype=np.int32)\n elif rand == 1:\n vertices = np.array([[top_x, 0], [cols, 0], [cols, rows], [bottom_x, rows]], dtype=np.int32)\n mask = image.copy()\n channel_count = image.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (0,) * channel_count\n cv2.fillPoly(mask, [vertices], ignore_mask_color)\n rand_alpha = np.random.uniform(min_alpha, max_alpha)\n cv2.addWeighted(mask, rand_alpha, image, 1 - rand_alpha, 0., shadow_img)\n\n return shadow_img\n\ndef flip_img(image, steering):\n \"\"\" randomly flip image to gain right turn data (track1 is biaed in left turn) \"\"\"\n flip_image = image.copy()\n flip_steering = steering\n num = np.random.randint(2)\n if num == 0:\n flip_image, flip_steering = cv2.flip(image, 1), -steering\n return flip_image, flip_steering\n\ndef network_model():\n \"\"\"\n designed with 4 convolutional layer & 3 fully connected layer\n weight init : glorot_uniform\n activation func : relu\n pooling : maxpooling\n used dropout\n \"\"\"\n\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(Rows, Cols, 3)))\n model.add(Convolution2D(32, 3, 3, border_mode='same', subsample=(2, 2), activation='relu', name='Conv1'))\n #model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='same'))\n model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(2, 2), activation='relu', name='Conv2'))\n #model.add(BatchNormalization())\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='same'))\n model.add(Convolution2D(128, 3, 3, border_mode='same', subsample=(1, 1), activation='relu', name='Conv3'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='same'))\n #model.add(BatchNormalization())\n model.add(Convolution2D(128, 2, 2, border_mode='same', subsample=(1, 1), activation='relu', name='Conv4'))\n #model.add(BatchNormalization())\n model.add(Flatten())\n model.add(Dropout(0.2))\n model.add(Dense(128, activation='relu', name='FC1'))\n model.add(Dropout(0.5))\n model.add(Dense(128, activation='relu', name='FC2'))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation='relu', name='FC3'))\n model.add(Dense(1))\n model.summary()\n return model\n\ndef generate_train(center, left, right, steer):\n \"\"\"\n data augmentation\n transformed image & crop\n \"\"\"\n\n num = np.random.randint(0, len(steer))\n # to avoid bias in straight angle\n #bal = True\n #while bal:\n # num = np.random.randint(0, len(steer))\n # check_steer = steer[num]\n # if check_steer == 0:\n # rand = np.random.uniform()\n # if rand <= 0.25:\n # bal = False\n # else:\n # bal = False\n\n image, steering = select_img(center, left, right, steer, num, offset)\n\n image, steering = shift_img(image, steering)\n image, steering = flip_img(image, steering)\n image = brightness_img(image)\n # image = generate_shadow(image)\n image = crop_img(image)\n return image, steering\n\ndef generate_valid(img_valid, steer_valid):\n \"\"\" generate validation set \"\"\"\n img_set = np.zeros((len(img_valid), Rows, Cols, 3))\n steer_set = np.zeros(len(steer_valid))\n\n for i in range(len(img_valid)):\n img, steer = valid_img(img_valid, steer_valid, i)\n img_set[i] = crop_img(img)\n\n steer_set[i] = steer\n return img_set, steer_set\n\ndef generate_train_batch(center, left, right, steering, batch_size):\n \"\"\" compose training batch set \"\"\"\n image_set = np.zeros((batch_size, Rows, Cols, 3))\n steering_set = np.zeros(batch_size)\n\n while 1:\n for i in range(batch_size):\n img, steer = generate_train(center, left, right, steering)\n image_set[i] = img\n steering_set[i] = steer\n yield image_set, steering_set\n\nbatch_size = 256\nepoch = 10\n\ntrain_generator = generate_train_batch(center_db, left_db, right_db, steer_db, batch_size)\nimage_val, steer_val = generate_valid(img_valid, steer_valid)\n\nmodel = network_model()\n\nadam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nmodel.compile(optimizer=adam, loss='mse')\n\nmodel_json = 'model.json'\nmodel_weights = 'model.h5'\n\nhistory = model.fit_generator(train_generator, samples_per_epoch=20480, nb_epoch=epoch,\n validation_data=(image_val, steer_val), verbose=1)\n\njson_string = model.to_json()\n\ntry:\n os.remove(model_json)\n os.remove(model_weights)\nexcept OSError:\n pass\n\nwith open(model_json, 'w') as jfile:\n json.dump(json_string, jfile)\nmodel.save_weights(model_weights)\n\n# to avoid \" 'NoneType' object has no attribute 'TF_DeleteStatus' \" error\ngc.collect()\nK.clear_session()\n"
]
| [
[
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hist",
"numpy.float32",
"numpy.random.randint",
"numpy.random.uniform",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle"
]
]
|
Unity05/IronyDetection | [
"02dcf3d992efe35bcf23f9c724a4627778659fa3"
]
| [
"src/audio_streaming.py"
]
| [
"import pyaudio\nimport struct\nimport numpy as np\n\n\ndef audio_streaming(output_queue):\n CHUNK = 1024\n SAMPLE_FORMAT = pyaudio.paInt16\n # SAMPLE_FORMAT = pyaudio.paFloat32\n CHANNELS = 1\n RATE = 16000\n\n REQUIRED_SILENCE_LENGTH = (RATE / CHUNK) * 0.3\n REQUIRED_SILENCE_LENGTH_FOR_SHUTDOWN = 50 * REQUIRED_SILENCE_LENGTH\n\n p = pyaudio.PyAudio()\n\n print('Recording.')\n\n stream = p.open(\n format=SAMPLE_FORMAT,\n channels=CHANNELS,\n rate=RATE,\n frames_per_buffer=CHUNK,\n input=True\n )\n\n SILENCE_COUNTER = 0\n save_frames = []\n last_frame = []\n i = 0\n while SILENCE_COUNTER < REQUIRED_SILENCE_LENGTH_FOR_SHUTDOWN:\n data = stream.read(CHUNK)\n data_int = struct.unpack(str(CHUNK) + 'h', data)\n frames = list(data_int)\n if np.mean(np.absolute(data_int)) < 100:\n SILENCE_COUNTER += 1\n if save_frames != []:\n if len(save_frames) < 1500:\n save_frames = []\n else:\n save_frames += frames\n else:\n last_frame = frames\n else:\n if save_frames == []:\n save_frames += last_frame\n save_frames += frames\n SILENCE_COUNTER = 0\n if SILENCE_COUNTER >= REQUIRED_SILENCE_LENGTH:\n if i > (REQUIRED_SILENCE_LENGTH + 1):\n output_queue.put(save_frames)\n save_frames = []\n i = 0\n i += 1\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n print('Finished recording.')\n\n return\n"
]
| [
[
"numpy.absolute"
]
]
|
CherokeeLanguage/IMS-Toucan | [
"5d48d9ad371b7564abc807078ecde9532e5320d6",
"5d48d9ad371b7564abc807078ecde9532e5320d6"
]
| [
"InferenceInterfaces/InferenceFastSpeech2.py",
"TrainingInterfaces/TrainingPipelines/pretrain_aligner_chr.py"
]
| [
"import itertools\nimport os\n\nimport librosa.display as lbd\nimport matplotlib.pyplot as plt\nimport noisereduce\nimport sounddevice\nimport soundfile\nimport torch\n\nfrom InferenceInterfaces.InferenceArchitectures.InferenceFastSpeech2 import FastSpeech2\nfrom InferenceInterfaces.InferenceArchitectures.InferenceHiFiGAN import HiFiGANGenerator\nfrom Preprocessing.ArticulatoryCombinedTextFrontend import ArticulatoryCombinedTextFrontend\nfrom Preprocessing.ArticulatoryCombinedTextFrontend import get_language_id\nfrom Preprocessing.ProsodicConditionExtractor import ProsodicConditionExtractor\n\n\nclass InferenceFastSpeech2(torch.nn.Module):\n\n def __init__(self, device=\"cpu\", model_name=\"Meta\", language=\"en\", noise_reduce=False):\n super().__init__()\n self.device = device\n self.text2phone = ArticulatoryCombinedTextFrontend(language=language, add_silence_to_end=True)\n checkpoint = torch.load(os.path.join(\"Models\", f\"FastSpeech2_{model_name}\", \"best.pt\"), map_location='cpu')\n self.use_lang_id = True\n try:\n self.phone2mel = FastSpeech2(weights=checkpoint[\"model\"]).to(torch.device(device)) # multi speaker multi language\n except RuntimeError:\n try:\n self.use_lang_id = False\n self.phone2mel = FastSpeech2(weights=checkpoint[\"model\"], lang_embs=None).to(torch.device(device)) # multi speaker single language\n except RuntimeError:\n self.phone2mel = FastSpeech2(weights=checkpoint[\"model\"], lang_embs=None, utt_embed_dim=None).to(torch.device(device)) # single speaker\n self.mel2wav = HiFiGANGenerator(path_to_weights=os.path.join(\"Models\", \"HiFiGAN_combined\", \"best.pt\")).to(torch.device(device))\n self.default_utterance_embedding = checkpoint[\"default_emb\"].to(self.device)\n self.phone2mel.eval()\n self.mel2wav.eval()\n if self.use_lang_id:\n self.lang_id = get_language_id(language)\n else:\n self.lang_id = None\n self.to(torch.device(device))\n self.noise_reduce = noise_reduce\n if self.noise_reduce:\n self.prototypical_noise = None\n self.update_noise_profile()\n\n def set_utterance_embedding(self, path_to_reference_audio):\n wave, sr = soundfile.read(path_to_reference_audio)\n self.default_utterance_embedding = ProsodicConditionExtractor(sr=sr).extract_condition_from_reference_wave(wave).to(self.device)\n if self.noise_reduce:\n self.update_noise_profile()\n\n def update_noise_profile(self):\n self.noise_reduce = False\n self.prototypical_noise = self(\"~.\" * 100, input_is_phones=True).cpu().numpy()\n self.noise_reduce = True\n\n def set_language(self, lang_id):\n \"\"\"\n The id parameter actually refers to the shorthand. This has become ambiguous with the introduction of the actual language IDs\n \"\"\"\n self.text2phone = ArticulatoryCombinedTextFrontend(language=lang_id, add_silence_to_end=True)\n if self.use_lang_id:\n self.lang_id = get_language_id(lang_id).to(self.device)\n else:\n self.lang_id = None\n\n def forward(self, text, view=False, durations=None, pitch=None, energy=None, input_is_phones=False):\n with torch.inference_mode():\n phones = self.text2phone.string_to_tensor(text, input_phonemes=input_is_phones).to(torch.device(self.device))\n mel, durations, pitch, energy = self.phone2mel(phones,\n return_duration_pitch_energy=True,\n utterance_embedding=self.default_utterance_embedding,\n durations=durations,\n pitch=pitch,\n energy=energy,\n lang_id=self.lang_id)\n mel = mel.transpose(0, 1)\n wave = self.mel2wav(mel)\n if view:\n from Utility.utils import cumsum_durations\n fig, ax = plt.subplots(nrows=2, ncols=1)\n ax[0].plot(wave.cpu().numpy())\n lbd.specshow(mel.cpu().numpy(),\n ax=ax[1],\n sr=16000,\n cmap='GnBu',\n y_axis='mel',\n x_axis=None,\n hop_length=256)\n ax[0].yaxis.set_visible(False)\n ax[1].yaxis.set_visible(False)\n duration_splits, label_positions = cumsum_durations(durations.cpu().numpy())\n ax[1].set_xticks(duration_splits, minor=True)\n ax[1].xaxis.grid(True, which='minor')\n ax[1].set_xticks(label_positions, minor=False)\n ax[1].set_xticklabels(self.text2phone.get_phone_string(text))\n ax[0].set_title(text)\n plt.subplots_adjust(left=0.05, bottom=0.1, right=0.95, top=.9, wspace=0.0, hspace=0.0)\n plt.show()\n if self.noise_reduce:\n wave = torch.tensor(noisereduce.reduce_noise(y=wave.cpu().numpy(), y_noise=self.prototypical_noise, sr=48000, stationary=True), device=self.device)\n return wave\n\n def read_to_file(self, text_list, file_location, silent=False, dur_list=None, pitch_list=None, energy_list=None):\n \"\"\"\n Args:\n silent: Whether to be verbose about the process\n text_list: A list of strings to be read\n file_location: The path and name of the file it should be saved to\n energy_list: list of energy tensors to be used for the texts\n pitch_list: list of pitch tensors to be used for the texts\n dur_list: list of duration tensors to be used for the texts\n \"\"\"\n if not dur_list:\n dur_list = []\n if not pitch_list:\n pitch_list = []\n if not energy_list:\n energy_list = []\n wav = None\n silence = torch.zeros([24000])\n for (text, durations, pitch, energy) in itertools.zip_longest(text_list, dur_list, pitch_list, energy_list):\n if text.strip() != \"\":\n if not silent:\n print(\"Now synthesizing: {}\".format(text))\n if wav is None:\n if durations is not None:\n durations = durations.to(self.device)\n if pitch is not None:\n pitch = pitch.to(self.device)\n if energy is not None:\n energy = energy.to(self.device)\n wav = self(text, durations=durations, pitch=pitch, energy=energy).cpu()\n wav = torch.cat((wav, silence), 0)\n else:\n wav = torch.cat((wav, self(text, durations=durations, #\n pitch=pitch, #\n energy=energy).cpu()), 0)\n wav = torch.cat((wav, silence), 0)\n soundfile.write(file=file_location, data=wav.cpu().numpy(), samplerate=48000)\n\n def read_aloud(self, text, view=False, blocking=False):\n if text.strip() == \"\":\n return\n wav = self(text, view).cpu()\n wav = torch.cat((wav, torch.zeros([24000])), 0)\n if not blocking:\n sounddevice.play(wav.numpy(), samplerate=48000)\n else:\n sounddevice.play(torch.cat((wav, torch.zeros([12000])), 0).numpy(), samplerate=48000)\n sounddevice.wait()\n",
"import os\nimport random\nfrom typing import Tuple\n\nimport torch\nfrom torch.utils.data import ConcatDataset\n\nfrom TrainingInterfaces.Text_to_Spectrogram.AutoAligner.autoaligner_train_loop import train_loop as train_aligner\nfrom Utility.corpus_preparation import prepare_aligner_corpus\nfrom Utility.path_to_transcript_dicts import *\n\n\ndef run(gpu_id, resume_checkpoint, finetune, model_dir, resume):\n if gpu_id == \"cpu\":\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n device = torch.device(\"cpu\")\n\n else:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n device = torch.device(\"cuda\")\n\n torch.manual_seed(131714)\n random.seed(131714)\n torch.random.manual_seed(131714)\n\n print(\"Preparing\")\n langs: List[str] = [\"chr\", \"de\", \"en\", \"fr\", \"nl\", \"ru\"]\n source_base: str = \"/mount/resources/speech/corpora\"\n # Non Cherokee before Cherokee to get better quality voice weights as the default for the model\n sources: List[str] = [\"other-audio-data\", \"cherokee-audio-data\", \"cherokee-audio-data-private\"]\n datasets = list()\n\n for source in sources:\n for lang in langs:\n toucan_file = os.path.join(source_base, source, f\"ims-toucan-{lang}.txt\")\n corpus_dir = os.path.join(\"Corpora\", f\"aligner-{source}-{lang}\")\n path_to_transcript_dict: Dict[str, str] = dict()\n if not os.path.exists(toucan_file):\n continue\n with open(toucan_file, \"r\") as r:\n for line in r:\n line = line.strip()\n parts = line.split(\"|\")\n transcript: str = parts[1]\n wav = os.path.join(source_base, source, parts[0])\n wav = os.path.realpath(wav)\n path_to_transcript_dict[wav] = transcript\n\n max_size: int\n max_size = 8_000\n items: List[Tuple[str, str]] = [*path_to_transcript_dict.items()]\n while len(items) < max_size:\n items.extend(items.copy())\n subset = dict(random.sample(items, max_size))\n datasets.append(prepare_aligner_corpus(transcript_dict=subset,\n corpus_dir=corpus_dir,\n lang=lang,\n device=device,\n loading_processes=8))\n\n train_set = ConcatDataset(datasets)\n save_dir = os.path.join(\"Models\", \"Aligner\")\n os.makedirs(save_dir, exist_ok=True)\n save_dir_aligner = save_dir + \"/aligner\"\n os.makedirs(save_dir_aligner, exist_ok=True)\n\n train_aligner(train_dataset=train_set,\n device=device,\n save_directory=save_dir,\n steps=500_001,\n batch_size=32,\n path_to_checkpoint=resume_checkpoint,\n fine_tune=finetune,\n debug_img_path=save_dir_aligner,\n resume=resume)\n"
]
| [
[
"torch.zeros",
"torch.device",
"torch.cat",
"matplotlib.pyplot.subplots",
"torch.inference_mode",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust"
],
[
"torch.manual_seed",
"torch.utils.data.ConcatDataset",
"torch.random.manual_seed",
"torch.device"
]
]
|
ShkalikovOleh/OptAlg | [
"03399eee50203dcba834a4d9ab48751142a6de2b"
]
| [
"optalg/unconstrained/evolutional/generator/binary.py"
]
| [
"import numpy as np\nfrom .generator_base import Generator\n\n\nclass BinaryGenerator(Generator):\n\n def __init__(self, n_genes: int = 22) -> None:\n super().__init__(n_genes)\n\n def __call__(self, population_size: int, n_variables: int) -> np.ndarray:\n shape = (population_size, n_variables, self._n_genes)\n size = population_size * n_variables * self._n_genes\n return np.random.randint(2, size=size).reshape(shape)\n"
]
| [
[
"numpy.random.randint"
]
]
|
Odin-son/ATSS | [
"e111ee9927b408c5a762d356be4bd08f63f04468"
]
| [
"tests/checkpoint.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nfrom collections import OrderedDict\nimport os\nfrom tempfile import TemporaryDirectory\nimport unittest\n\nimport torch\nfrom torch import nn\n\nfrom atss_core.utils.model_serialization import load_state_dict\nfrom atss_core.utils.checkpoint import Checkpointer\n\n\nclass TestCheckpointer(unittest.TestCase):\n def create_model(self):\n return nn.Sequential(nn.Linear(2, 3), nn.Linear(3, 1))\n\n def create_complex_model(self):\n m = nn.Module()\n m.block1 = nn.Module()\n m.block1.layer1 = nn.Linear(2, 3)\n m.layer2 = nn.Linear(3, 2)\n m.res = nn.Module()\n m.res.layer2 = nn.Linear(3, 2)\n\n state_dict = OrderedDict()\n state_dict[\"layer1.weight\"] = torch.rand(3, 2)\n state_dict[\"layer1.bias\"] = torch.rand(3)\n state_dict[\"layer2.weight\"] = torch.rand(2, 3)\n state_dict[\"layer2.bias\"] = torch.rand(2)\n state_dict[\"res.layer2.weight\"] = torch.rand(2, 3)\n state_dict[\"res.layer2.bias\"] = torch.rand(2)\n\n return m, state_dict\n\n def test_from_last_checkpoint_model(self):\n # test that loading works even if they differ by a prefix\n for trained_model, fresh_model in [\n (self.create_model(), self.create_model()),\n (nn.DataParallel(self.create_model()), self.create_model()),\n (self.create_model(), nn.DataParallel(self.create_model())),\n (\n nn.DataParallel(self.create_model()),\n nn.DataParallel(self.create_model()),\n ),\n ]:\n\n with TemporaryDirectory() as f:\n checkpointer = Checkpointer(\n trained_model, save_dir=f, save_to_disk=True\n )\n checkpointer.save(\"checkpoint_file\")\n\n # in the same folder\n fresh_checkpointer = Checkpointer(fresh_model, save_dir=f)\n self.assertTrue(fresh_checkpointer.has_checkpoint())\n self.assertEqual(\n fresh_checkpointer.get_checkpoint_file(),\n os.path.join(f, \"checkpoint_file.pth\"),\n )\n _ = fresh_checkpointer.load()\n\n for trained_p, loaded_p in zip(\n trained_model.parameters(), fresh_model.parameters()\n ):\n # different tensor references\n self.assertFalse(id(trained_p) == id(loaded_p))\n # same content\n self.assertTrue(trained_p.equal(loaded_p))\n\n def test_from_name_file_model(self):\n # test that loading works even if they differ by a prefix\n for trained_model, fresh_model in [\n (self.create_model(), self.create_model()),\n (nn.DataParallel(self.create_model()), self.create_model()),\n (self.create_model(), nn.DataParallel(self.create_model())),\n (\n nn.DataParallel(self.create_model()),\n nn.DataParallel(self.create_model()),\n ),\n ]:\n with TemporaryDirectory() as f:\n checkpointer = Checkpointer(\n trained_model, save_dir=f, save_to_disk=True\n )\n checkpointer.save(\"checkpoint_file\")\n\n # on different folders\n with TemporaryDirectory() as g:\n fresh_checkpointer = Checkpointer(fresh_model, save_dir=g)\n self.assertFalse(fresh_checkpointer.has_checkpoint())\n self.assertEqual(fresh_checkpointer.get_checkpoint_file(), \"\")\n _ = fresh_checkpointer.load(os.path.join(f, \"checkpoint_file.pth\"))\n\n for trained_p, loaded_p in zip(\n trained_model.parameters(), fresh_model.parameters()\n ):\n # different tensor references\n self.assertFalse(id(trained_p) == id(loaded_p))\n # same content\n self.assertTrue(trained_p.equal(loaded_p))\n\n def test_complex_model_loaded(self):\n for add_data_parallel in [False, True]:\n model, state_dict = self.create_complex_model()\n if add_data_parallel:\n model = nn.DataParallel(model)\n\n load_state_dict(model, state_dict)\n for loaded, stored in zip(model.state_dict().values(), state_dict.values()):\n # different tensor references\n self.assertFalse(id(loaded) == id(stored))\n # same content\n self.assertTrue(loaded.equal(stored))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
]
| [
[
"torch.nn.Linear",
"torch.rand",
"torch.nn.Module",
"torch.nn.DataParallel"
]
]
|
DJ-LYH/dgl | [
"480a4ae35c84c4497bfa901c25e6e6eca85b67eb"
]
| [
"python/dgl/nn/pytorch/explain/gnnexplainer.py"
]
| [
"\"\"\"Torch Module for GNNExplainer\"\"\"\n# pylint: disable= no-member, arguments-differ, invalid-name\nfrom math import sqrt\nimport torch\n\nfrom torch import nn\nfrom tqdm import tqdm\n\nfrom ....base import NID, EID\nfrom ....subgraph import khop_in_subgraph\n\nclass GNNExplainer(nn.Module):\n r\"\"\"GNNExplainer model from `GNNExplainer: Generating Explanations for\n Graph Neural Networks <https://arxiv.org/abs/1903.03894>`__\n\n It identifies compact subgraph structures and small subsets of node features that play a\n critical role in GNN-based node classification and graph classification.\n\n Parameters\n ----------\n model : nn.Module\n The GNN model to explain.\n\n * The required arguments of its forward function are graph and feat.\n The latter one is for input node features.\n * It should also optionally take an eweight argument for edge weights\n and multiply the messages by it in message passing.\n * The output of its forward function is the logits for the predicted\n node/graph classes.\n\n See also the example in :func:`explain_node` and :func:`explain_graph`.\n num_hops : int\n The number of hops for GNN information aggregation.\n lr : float, optional\n The learning rate to use, default to 0.01.\n num_epochs : int, optional\n The number of epochs to train.\n log : bool, optional\n If True, it will log the computation process, default to True.\n \"\"\"\n\n coeffs = {\n 'edge_size': 0.005,\n 'edge_ent': 1.0,\n 'node_feat_size': 1.0,\n 'node_feat_ent': 0.1\n }\n\n def __init__(self,\n model,\n num_hops,\n lr=0.01,\n num_epochs=100,\n log=True):\n super(GNNExplainer, self).__init__()\n self.model = model\n self.num_hops = num_hops\n self.lr = lr\n self.num_epochs = num_epochs\n self.log = log\n\n def _init_masks(self, graph, feat):\n r\"\"\"Initialize learnable feature and edge mask.\n\n Parameters\n ----------\n graph : DGLGraph\n Input graph.\n feat : Tensor\n Input node features.\n\n Returns\n -------\n feat_mask : Tensor\n Feature mask of shape :math:`(1, D)`, where :math:`D`\n is the feature size.\n edge_mask : Tensor\n Edge mask of shape :math:`(E)`, where :math:`E` is the\n number of edges.\n \"\"\"\n num_nodes, feat_size = feat.size()\n num_edges = graph.num_edges()\n device = feat.device\n\n std = 0.1\n feat_mask = nn.Parameter(torch.randn(1, feat_size, device=device) * std)\n\n std = nn.init.calculate_gain('relu') * sqrt(2.0 / (2 * num_nodes))\n edge_mask = nn.Parameter(torch.randn(num_edges, device=device) * std)\n\n return feat_mask, edge_mask\n\n def _loss_regularize(self, loss, feat_mask, edge_mask):\n r\"\"\"Add regularization terms to the loss.\n\n Parameters\n ----------\n loss : Tensor\n Loss value.\n feat_mask : Tensor\n Feature mask of shape :math:`(1, D)`, where :math:`D`\n is the feature size.\n edge_mask : Tensor\n Edge mask of shape :math:`(E)`, where :math:`E`\n is the number of edges.\n\n Returns\n -------\n Tensor\n Loss value with regularization terms added.\n \"\"\"\n # epsilon for numerical stability\n eps = 1e-15\n\n edge_mask = edge_mask.sigmoid()\n # Edge mask sparsity regularization\n loss = loss + self.coeffs['edge_size'] * torch.sum(edge_mask)\n # Edge mask entropy regularization\n ent = - edge_mask * torch.log(edge_mask + eps) - \\\n (1 - edge_mask) * torch.log(1 - edge_mask + eps)\n loss = loss + self.coeffs['edge_ent'] * ent.mean()\n\n feat_mask = feat_mask.sigmoid()\n # Feature mask sparsity regularization\n loss = loss + self.coeffs['node_feat_size'] * torch.mean(feat_mask)\n # Feature mask entropy regularization\n ent = -feat_mask * torch.log(feat_mask + eps) - \\\n (1 - feat_mask) * torch.log(1 - feat_mask + eps)\n loss = loss + self.coeffs['node_feat_ent'] * ent.mean()\n\n return loss\n\n def explain_node(self, node_id, graph, feat, **kwargs):\n r\"\"\"Learn and return a node feature mask and subgraph that play a\n crucial role to explain the prediction made by the GNN for node\n :attr:`node_id`.\n\n Parameters\n ----------\n node_id : int\n The node to explain.\n graph : DGLGraph\n A homogeneous graph.\n feat : Tensor\n The input feature of shape :math:`(N, D)`. :math:`N` is the\n number of nodes, and :math:`D` is the feature size.\n kwargs : dict\n Additional arguments passed to the GNN model. Tensors whose\n first dimension is the number of nodes or edges will be\n assumed to be node/edge features.\n\n Returns\n -------\n new_node_id : Tensor\n The new ID of the input center node.\n sg : DGLGraph\n The subgraph induced on the k-hop in-neighborhood of :attr:`node_id`.\n feat_mask : Tensor\n Learned feature importance mask of shape :math:`(D)`, where :math:`D` is the\n feature size. The values are within range :math:`(0, 1)`.\n The higher, the more important.\n edge_mask : Tensor\n Learned importance mask of the edges in the subgraph, which is a tensor\n of shape :math:`(E)`, where :math:`E` is the number of edges in the\n subgraph. The values are within range :math:`(0, 1)`.\n The higher, the more important.\n\n Examples\n --------\n\n >>> import dgl\n >>> import dgl.function as fn\n >>> import torch\n >>> import torch.nn as nn\n >>> from dgl.data import CoraGraphDataset\n >>> from dgl.nn import GNNExplainer\n\n >>> # Load dataset\n >>> data = CoraGraphDataset()\n >>> g = data[0]\n >>> features = g.ndata['feat']\n >>> labels = g.ndata['label']\n >>> train_mask = g.ndata['train_mask']\n\n >>> # Define a model\n >>> class Model(nn.Module):\n ... def __init__(self, in_feats, out_feats):\n ... super(Model, self).__init__()\n ... self.linear = nn.Linear(in_feats, out_feats)\n ...\n ... def forward(self, graph, feat, eweight=None):\n ... with graph.local_scope():\n ... feat = self.linear(feat)\n ... graph.ndata['h'] = feat\n ... if eweight is None:\n ... graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n ... else:\n ... graph.edata['w'] = eweight\n ... graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h'))\n ... return graph.ndata['h']\n\n >>> # Train the model\n >>> model = Model(features.shape[1], data.num_classes)\n >>> criterion = nn.CrossEntropyLoss()\n >>> optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n >>> for epoch in range(10):\n ... logits = model(g, features)\n ... loss = criterion(logits[train_mask], labels[train_mask])\n ... optimizer.zero_grad()\n ... loss.backward()\n ... optimizer.step()\n\n >>> # Explain the prediction for node 10\n >>> explainer = GNNExplainer(model, num_hops=1)\n >>> new_center, sg, feat_mask, edge_mask = explainer.explain_node(10, g, features)\n >>> new_center\n tensor([1])\n >>> sg.num_edges()\n 12\n >>> # Old IDs of the nodes in the subgraph\n >>> sg.ndata[dgl.NID]\n tensor([ 9, 10, 11, 12])\n >>> # Old IDs of the edges in the subgraph\n >>> sg.edata[dgl.EID]\n tensor([51, 53, 56, 48, 52, 57, 47, 50, 55, 46, 49, 54])\n >>> feat_mask\n tensor([0.2638, 0.2738, 0.3039, ..., 0.2794, 0.2643, 0.2733])\n >>> edge_mask\n tensor([0.0937, 0.1496, 0.8287, 0.8132, 0.8825, 0.8515, 0.8146, 0.0915, 0.1145,\n 0.9011, 0.1311, 0.8437])\n \"\"\"\n self.model.eval()\n num_nodes = graph.num_nodes()\n num_edges = graph.num_edges()\n\n # Extract node-centered k-hop subgraph and\n # its associated node and edge features.\n sg, inverse_indices = khop_in_subgraph(graph, node_id, self.num_hops)\n sg_nodes = sg.ndata[NID].long()\n sg_edges = sg.edata[EID].long()\n feat = feat[sg_nodes]\n for key, item in kwargs.items():\n if torch.is_tensor(item) and item.size(0) == num_nodes:\n item = item[sg_nodes]\n elif torch.is_tensor(item) and item.size(0) == num_edges:\n item = item[sg_edges]\n kwargs[key] = item\n\n # Get the initial prediction.\n with torch.no_grad():\n logits = self.model(graph=sg, feat=feat, **kwargs)\n pred_label = logits.argmax(dim=-1)\n\n feat_mask, edge_mask = self._init_masks(sg, feat)\n\n params = [feat_mask, edge_mask]\n optimizer = torch.optim.Adam(params, lr=self.lr)\n\n if self.log:\n pbar = tqdm(total=self.num_epochs)\n pbar.set_description('Explain node {node_id}')\n\n for _ in range(self.num_epochs):\n optimizer.zero_grad()\n h = feat * feat_mask.sigmoid()\n logits = self.model(graph=sg, feat=h,\n eweight=edge_mask.sigmoid(), **kwargs)\n log_probs = logits.log_softmax(dim=-1)\n loss = -log_probs[inverse_indices, pred_label[inverse_indices]]\n loss = self._loss_regularize(loss, feat_mask, edge_mask)\n loss.backward()\n optimizer.step()\n\n if self.log:\n pbar.update(1)\n\n if self.log:\n pbar.close()\n\n feat_mask = feat_mask.detach().sigmoid().squeeze()\n edge_mask = edge_mask.detach().sigmoid()\n\n return inverse_indices, sg, feat_mask, edge_mask\n\n def explain_graph(self, graph, feat, **kwargs):\n r\"\"\"Learn and return a node feature mask and an edge mask that play a\n crucial role to explain the prediction made by the GNN for a graph.\n\n Parameters\n ----------\n graph : DGLGraph\n A homogeneous graph.\n feat : Tensor\n The input feature of shape :math:`(N, D)`. :math:`N` is the\n number of nodes, and :math:`D` is the feature size.\n kwargs : dict\n Additional arguments passed to the GNN model. Tensors whose\n first dimension is the number of nodes or edges will be\n assumed to be node/edge features.\n\n Returns\n -------\n feat_mask : Tensor\n Learned feature importance mask of shape :math:`(D)`, where :math:`D` is the\n feature size. The values are within range :math:`(0, 1)`.\n The higher, the more important.\n edge_mask : Tensor\n Learned importance mask of the edges in the graph, which is a tensor\n of shape :math:`(E)`, where :math:`E` is the number of edges in the\n graph. The values are within range :math:`(0, 1)`. The higher,\n the more important.\n\n Examples\n --------\n\n >>> import dgl.function as fn\n >>> import torch\n >>> import torch.nn as nn\n >>> from dgl.data import GINDataset\n >>> from dgl.dataloading import GraphDataLoader\n >>> from dgl.nn import AvgPooling, GNNExplainer\n\n >>> # Load dataset\n >>> data = GINDataset('MUTAG', self_loop=True)\n >>> dataloader = GraphDataLoader(data, batch_size=64, shuffle=True)\n\n >>> # Define a model\n >>> class Model(nn.Module):\n ... def __init__(self, in_feats, out_feats):\n ... super(Model, self).__init__()\n ... self.linear = nn.Linear(in_feats, out_feats)\n ... self.pool = AvgPooling()\n ...\n ... def forward(self, graph, feat, eweight=None):\n ... with graph.local_scope():\n ... feat = self.linear(feat)\n ... graph.ndata['h'] = feat\n ... if eweight is None:\n ... graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n ... else:\n ... graph.edata['w'] = eweight\n ... graph.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h'))\n ... return self.pool(graph, graph.ndata['h'])\n\n >>> # Train the model\n >>> feat_size = data[0][0].ndata['attr'].shape[1]\n >>> model = Model(feat_size, data.gclasses)\n >>> criterion = nn.CrossEntropyLoss()\n >>> optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n >>> for bg, labels in dataloader:\n ... logits = model(bg, bg.ndata['attr'])\n ... loss = criterion(logits, labels)\n ... optimizer.zero_grad()\n ... loss.backward()\n ... optimizer.step()\n\n >>> # Explain the prediction for graph 0\n >>> explainer = GNNExplainer(model, num_hops=1)\n >>> g, _ = data[0]\n >>> features = g.ndata['attr']\n >>> feat_mask, edge_mask = explainer.explain_graph(g, features)\n >>> feat_mask\n tensor([0.2362, 0.2497, 0.2622, 0.2675, 0.2649, 0.2962, 0.2533])\n >>> edge_mask\n tensor([0.2154, 0.2235, 0.8325, ..., 0.7787, 0.1735, 0.1847])\n \"\"\"\n self.model.eval()\n\n # Get the initial prediction.\n with torch.no_grad():\n logits = self.model(graph=graph, feat=feat, **kwargs)\n pred_label = logits.argmax(dim=-1)\n\n feat_mask, edge_mask = self._init_masks(graph, feat)\n\n params = [feat_mask, edge_mask]\n optimizer = torch.optim.Adam(params, lr=self.lr)\n\n if self.log:\n pbar = tqdm(total=self.num_epochs)\n pbar.set_description('Explain graph')\n\n for _ in range(self.num_epochs):\n optimizer.zero_grad()\n h = feat * feat_mask.sigmoid()\n logits = self.model(graph=graph, feat=h,\n eweight=edge_mask.sigmoid(), **kwargs)\n log_probs = logits.log_softmax(dim=-1)\n loss = -log_probs[0, pred_label[0]]\n loss = self._loss_regularize(loss, feat_mask, edge_mask)\n loss.backward()\n optimizer.step()\n\n if self.log:\n pbar.update(1)\n\n if self.log:\n pbar.close()\n\n feat_mask = feat_mask.detach().sigmoid().squeeze()\n edge_mask = edge_mask.detach().sigmoid()\n\n return feat_mask, edge_mask\n"
]
| [
[
"torch.is_tensor",
"torch.no_grad",
"torch.optim.Adam",
"torch.nn.init.calculate_gain",
"torch.mean",
"torch.log",
"torch.randn",
"torch.sum"
]
]
|
gustavotcustodio/AutomaticSummarization | [
"da0111c84eb2f23c7c0380ac606b622272b335c1"
]
| [
"RefreshWordEmbeddings/my_flags.py"
]
| [
"####################################\n# Author: Shashi Narayan\n# Date: September 2016\n# Project: Document Summarization\n# H2020 Summa Project\n####################################\n\n\"\"\"\nMy flags\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n########### ============ Set Global FLAGS ============= #############\n\n### Temporary Directory to avoid conflict with others\n\n# VERY IMPORTANT # : SET this directory as TMP by exporting it\n\ntf.app.flags.DEFINE_string(\"tmp_directory\", \"/tmp\", \"Temporary directory used by rouge code.\")\n\ntf.app.flags.DEFINE_string(\"use_gpu\", \"/gpu:3\", \"Specify which gpu to use.\")\n\n### Global setting\n\ntf.app.flags.DEFINE_string(\"exp_mode\", \"train\", \"Training 'train' or Test 'test' Mode.\")\n\n# 100\ntf.app.flags.DEFINE_integer(\"model_to_load\", 10, \"Model to load for testing.\")\n\ntf.app.flags.DEFINE_boolean(\"use_fp16\", False, \"Use fp16 instead of fp32.\")\n\ntf.app.flags.DEFINE_string(\"data_mode\", \"paperlist\", \"cnn or dailymail or cnn-dailymail\")\n\n### Pretrained wordembeddings features\n\ntf.app.flags.DEFINE_integer(\"wordembed_size\", 200, \"Size of wordembedding (<= 200).\")\n\ntf.app.flags.DEFINE_boolean(\"trainable_wordembed\", False, \"Is wordembedding trainable?\")\n# UNK and PAD are always trainable and non-trainable respectively.\n\n### Sentence level features\n\n# 1702 pra n esquecer\ntf.app.flags.DEFINE_integer(\"max_sent_length\", 100, \"Maximum sentence length (word per sent.)\")\n\n# 350\ntf.app.flags.DEFINE_integer(\"sentembed_size\", 350, \"Size of sentence embedding.\")\n\n### Document level features\n\ntf.app.flags.DEFINE_integer(\"max_doc_length\", 811, \"Maximum Document length (sent. per document).\")\n\ntf.app.flags.DEFINE_integer(\"max_title_length\", 0, \"Maximum number of top title to consider.\") # 1\n\ntf.app.flags.DEFINE_integer(\"max_image_length\", 0, \"Maximum number of top image captions to consider.\") # 10\n\ntf.app.flags.DEFINE_integer(\"target_label_size\", 2, \"Size of target label (1/0).\")\n\n### Convolution Layer features\n\n# 7\ntf.app.flags.DEFINE_integer(\"max_filter_length\", 7, \"Maximum filter length.\")\n# Filter of sizes 1 to max_filter_length will be used, each producing\n# one vector. 1-7 same as Kim and JP. max_filter_length <=\n# max_sent_length\n\ntf.app.flags.DEFINE_string(\"handle_filter_output\", \"concat\", \"sum or concat\")\n# If concat, make sure that sentembed_size is multiple of max_filter_length.\n# Sum is JP's model\n\n### LSTM Features\n\ntf.app.flags.DEFINE_integer(\"size\", 150, \"Size of each model layer.\")\n\ntf.app.flags.DEFINE_integer(\"num_layers\", 1, \"Number of layers in the model.\")\n\ntf.app.flags.DEFINE_string(\"lstm_cell\", \"lstm\", \"Type of LSTM Cell: lstm or gru.\")\n\n### Encoder Layer features\n\n# Document Encoder: Unidirectional LSTM-RNNs\ntf.app.flags.DEFINE_boolean(\"doc_encoder_reverse\", True, \"Encoding sentences inorder or revorder.\")\n\n### Extractor Layer features\n\ntf.app.flags.DEFINE_boolean(\"attend_encoder\", False, \"Attend encoder outputs (JP model).\")\n\ntf.app.flags.DEFINE_boolean(\"authorise_gold_label\", True, \"Authorise Gold Label for JP's Model.\")\n\n### Reinforcement Learning\n\ntf.app.flags.DEFINE_boolean(\"rouge_reward_fscore\", True, \"Fscore if true, otherwise recall.\") # Not used, always use fscore\n\ntf.app.flags.DEFINE_integer(\"train_epoch_wce\", 10, \"Number of training epochs per step.\")\n\ntf.app.flags.DEFINE_integer(\"num_sample_rollout\", 1, \"Number of Multiple Oracles Used.\") # default 10\n\n### Training features\n\ntf.app.flags.DEFINE_string(\"train_dir\", \"./train_dir\", \"Training directory.\")\n\ntf.app.flags.DEFINE_float(\"learning_rate\", 0.001, \"Learning rate.\")\n\ntf.app.flags.DEFINE_boolean(\"weighted_loss\", True, \"Weighted loss to ignore padded parts.\")\n\ntf.app.flags.DEFINE_integer(\"batch_size\", 10, \"Batch size to use during training.\")\n\ntf.app.flags.DEFINE_integer(\"training_checkpoint\", 1, \"How many training steps to do per checkpoint.\")\n\n###### Input file addresses: No change needed\n\n# Pretrained wordembeddings data\n\ntf.app.flags.DEFINE_string(\"pretrained_wordembedding\",\n \"./data/1-billion-word-language-modeling-benchmark-r13output.word2vec.vec\",\n \"Pretrained wordembedding file trained on the one million benchmark data.\")\n# Data directory address\n\ntf.app.flags.DEFINE_string(\"preprocessed_data_directory\", \"./data/my_preprocessed\",\n \"Pretrained news articles for various types of word embeddings.\")\n\ntf.app.flags.DEFINE_string(\"gold_summary_directory\",\n \"./data/papers_highlights\",\n \"Gold summary directory.\")\n\ntf.app.flags.DEFINE_string(\"doc_sentence_directory\",\n \"./data/sentences\",\n \"Directory where document sentences are kept.\")\n\n############ Create FLAGS\nFLAGS = tf.app.flags.FLAGS\n"
]
| [
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.flags.DEFINE_string"
]
]
|
Shark-y/qiskit-sdk-py | [
"c1361b823dc1a3fab76545e62975c2afb02e442d",
"c1361b823dc1a3fab76545e62975c2afb02e442d"
]
| [
"qiskit/extensions/quantum_initializer/_initializer.py",
"test/performance/state_tomography.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# Copyright 2017 IBM RESEARCH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nInitialize qubit registers to desired arbitrary state.\n\"\"\"\n\nimport math\nimport numpy\nimport scipy\n\nfrom qiskit import CompositeGate\nfrom qiskit import Gate\nfrom qiskit import QISKitError\nfrom qiskit import QuantumCircuit\nfrom qiskit.extensions.standard.cx import CnotGate\nfrom qiskit.extensions.standard.ry import RYGate\nfrom qiskit.extensions.standard.rz import RZGate\n\n_EPS = 1e-10 # global variable used to chop very small numbers to zero\n\n\nclass InitializeGate(CompositeGate):\n \"\"\"Complex amplitude initialization.\n\n Class that implements the (complex amplitude) initialization of some\n flexible collection of qubit registers (assuming the qubits are in the\n zero state).\n\n Implements a recursive initialization algorithm including optimizations\n from \"Synthesis of Quantum Logic Circuits\" Shende, Bullock, Markov\n https://arxiv.org/abs/quant-ph/0406176v5\n\n Additionally implements some extra optimizations: remove zero rotations and\n double cnots.`\n\n It inherits from CompositeGate in the same way that the Fredkin (cswap)\n gate does. Therefore self.data is the list of gates (in order) that must\n be applied to implement this meta-gate.\n\n param = list of complex amplitudes\n arg = list of qubits\n circ = QuantumCircuit or CompositeGate containing this gate\n \"\"\"\n def __init__(self, param, arg, circ=None):\n \"\"\"Create new initialize composite gate.\"\"\"\n num_qubits = math.log2(len(param))\n\n # Check if param is a power of 2\n if num_qubits == 0 or not num_qubits.is_integer():\n raise QISKitError(\"Desired vector not a positive power of 2.\")\n\n self.num_qubits = int(num_qubits)\n\n # Check if number of desired qubits agrees with available qubits\n if len(arg) != self.num_qubits:\n raise QISKitError(\"Number of complex amplitudes do not correspond \"\n \"to the number of qubits.\")\n\n # Check if probabilities (amplitudes squared) sum to 1\n if not math.isclose(sum(numpy.absolute(param) ** 2), 1.0,\n abs_tol=_EPS):\n raise QISKitError(\"Sum of amplitudes-squared does not equal one.\")\n\n super().__init__(\"init\", param, arg, circ)\n\n # call to generate the circuit that takes the desired vector to zero\n self.gates_to_uncompute()\n # remove zero rotations and double cnots\n self.optimize_gates()\n # invert the circuit to create the desired vector from zero (assuming\n # the qubits are in the zero state)\n self.inverse()\n\n def nth_qubit_from_least_sig_qubit(self, nth):\n \"\"\"\n Return the qubit that is nth away from the least significant qubit\n (LSB), so n=0 corresponds to the LSB.\n \"\"\"\n # if LSB is first (as is the case with the IBM QE) and significance is\n # in order:\n return self.arg[nth]\n # if MSB is first: return self.arg[self.num_qubits - 1 - n]\n # equivalent to self.arg[-(n+1)]\n # to generalize any mapping could be placed here or even taken from\n # the user\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to the corresponding qubits in circ.\"\"\"\n self._modifiers(circ.initialize(self.name, self.param, self.arg))\n\n def gates_to_uncompute(self):\n \"\"\"\n Call to populate the self.data list with gates that takes the\n desired vector to zero.\n \"\"\"\n # kick start the peeling loop\n remaining_param = self.param\n\n for i in range(self.num_qubits):\n # work out which rotations must be done to disentangle the LSB\n # qubit (we peel away one qubit at a time)\n (remaining_param,\n thetas,\n phis) = InitializeGate._rotations_to_disentangle(remaining_param)\n\n # perform the required rotations to decouple the LSB qubit (so that\n # it can be \"factored\" out, leaving a\n # shorter amplitude vector to peel away)\n self._attach(self._multiplex(RZGate, i, phis))\n self._attach(self._multiplex(RYGate, i, thetas))\n\n @staticmethod\n def _rotations_to_disentangle(local_param):\n \"\"\"\n Static internal method to work out Ry and Rz rotation angles used\n to disentangle the LSB qubit.\n These rotations make up the block diagonal matrix U (i.e. multiplexor)\n that disentangles the LSB.\n\n [[Ry(theta_1).Rz(phi_1) 0 . . 0],\n [0 Ry(theta_2).Rz(phi_2) . 0],\n .\n .\n 0 0 Ry(theta_2^n).Rz(phi_2^n)]]\n \"\"\"\n remaining_vector = []\n thetas = []\n phis = []\n\n param_len = len(local_param)\n\n for i in range(param_len // 2):\n # Ry and Rz rotations to move bloch vector from 0 to \"imaginary\"\n # qubit\n # (imagine a qubit state signified by the amplitudes at index 2*i\n # and 2*(i+1), corresponding to the select qubits of the\n # multiplexor being in state |i>)\n (remains,\n add_theta,\n add_phi) = InitializeGate._bloch_angles(\n local_param[2*i: 2*(i + 1)])\n\n remaining_vector.append(remains)\n\n # rotations for all imaginary qubits of the full vector\n # to move from where it is to zero, hence the negative sign\n thetas.append(-add_theta)\n phis.append(-add_phi)\n\n return remaining_vector, thetas, phis\n\n @staticmethod\n def _bloch_angles(pair_of_complex):\n \"\"\"\n Static internal method to work out rotation to create the passed in\n qubit from the zero vector.\n \"\"\"\n [a_complex, b_complex] = pair_of_complex\n # Force a and b to be complex, as otherwise numpy.angle might fail.\n a_complex = complex(a_complex)\n b_complex = complex(b_complex)\n mag_a = numpy.absolute(a_complex)\n final_r = float(numpy.sqrt(mag_a ** 2 + numpy.absolute(b_complex) ** 2))\n if final_r < _EPS:\n theta = 0\n phi = 0\n final_r = 0\n final_t = 0\n else:\n theta = float(2 * numpy.arccos(mag_a / final_r))\n a_arg = numpy.angle(a_complex)\n b_arg = numpy.angle(b_complex)\n final_t = a_arg + b_arg\n phi = b_arg - a_arg\n\n return final_r * numpy.exp(1.J * final_t/2), theta, phi\n\n def _multiplex(self, bottom_gate, bottom_qubit_index, list_of_angles):\n \"\"\"\n Internal recursive method to create gates to perform rotations on the\n imaginary qubits: works by rotating LSB (and hence ALL imaginary\n qubits) by combo angle and then flipping sign (by flipping the bit,\n hence moving the complex amplitudes) of half the imaginary qubits\n (CNOT) followed by another combo angle on LSB, therefore executing\n conditional (on MSB) rotations, thereby disentangling LSB.\n \"\"\"\n list_len = len(list_of_angles)\n target_qubit = self.nth_qubit_from_least_sig_qubit(bottom_qubit_index)\n\n # Case of no multiplexing = base case for recursion\n if list_len == 1:\n return bottom_gate(list_of_angles[0], target_qubit)\n\n local_num_qubits = int(math.log2(list_len)) + 1\n control_qubit = self.nth_qubit_from_least_sig_qubit(\n local_num_qubits - 1 + bottom_qubit_index)\n\n # calc angle weights, assuming recursion (that is the lower-level\n # requested angles have been correctly implemented by recursion\n angle_weight = scipy.kron([[0.5, 0.5], [0.5, -0.5]],\n numpy.identity(2 ** (local_num_qubits - 2)))\n\n # calc the combo angles\n list_of_angles = (angle_weight * numpy.matrix(\n list_of_angles).transpose()).reshape(-1).tolist()[0]\n\n combine_composite_gates = CompositeGate(\n \"multiplex\" + local_num_qubits.__str__(), [], self.arg)\n\n # recursive step on half the angles fulfilling the above assumption\n combine_composite_gates._attach(\n self._multiplex(bottom_gate, bottom_qubit_index,\n list_of_angles[0:(list_len // 2)]))\n\n # combine_composite_gates.cx(control_qubit,target_qubit) -> does not\n # work as expected because checks circuit\n # so attach CNOT as follows, thereby flipping the LSB qubit\n combine_composite_gates._attach(CnotGate(control_qubit, target_qubit))\n\n # implement extra efficiency from the paper of cancelling adjacent\n # CNOTs (by leaving out last CNOT and reversing (NOT inverting) the\n # second lower-level multiplex)\n sub_gate = self._multiplex(\n bottom_gate, bottom_qubit_index, list_of_angles[(list_len // 2):])\n if isinstance(sub_gate, CompositeGate):\n combine_composite_gates._attach(sub_gate.reverse())\n else:\n combine_composite_gates._attach(sub_gate)\n\n # outer multiplex keeps final CNOT, because no adjacent CNOT to cancel\n # with\n if self.num_qubits == local_num_qubits + bottom_qubit_index:\n combine_composite_gates._attach(CnotGate(control_qubit,\n target_qubit))\n\n return combine_composite_gates\n\n @staticmethod\n def chop_num(numb):\n \"\"\"\n Set very small numbers (as defined by global variable _EPS) to zero.\n \"\"\"\n return 0 if abs(numb) < _EPS else numb\n\n\n# ###############################################################\n# Add needed functionality to other classes (it feels\n# weird following the QISKit convention of adding functionality to other\n# classes like this ;),\n# TODO: multiple inheritance might be better?)\n\n\ndef reverse(self):\n \"\"\"\n Reverse (recursively) the sub-gates of this CompositeGate. Note this does\n not invert the gates!\n \"\"\"\n new_data = []\n for gate in reversed(self.data):\n if isinstance(gate, CompositeGate):\n new_data.append(gate.reverse())\n else:\n new_data.append(gate)\n self.data = new_data\n\n # not just a high-level reverse:\n # self.data = [gate for gate in reversed(self.data)]\n\n return self\n\n\nQuantumCircuit.reverse = reverse\nCompositeGate.reverse = reverse\n\n\ndef optimize_gates(self):\n \"\"\"Remove Zero rotations and Double CNOTS.\"\"\"\n self.remove_zero_rotations()\n while self.remove_double_cnots_once():\n pass\n\n\nQuantumCircuit.optimize_gates = optimize_gates\nCompositeGate.optimize_gates = optimize_gates\n\n\ndef remove_zero_rotations(self):\n \"\"\"\n Remove Zero Rotations by looking (recursively) at rotation gates at the\n leaf ends.\n \"\"\"\n # Removed at least one zero rotation.\n zero_rotation_removed = False\n new_data = []\n for gate in self.data:\n if isinstance(gate, CompositeGate):\n zero_rotation_removed |= gate.remove_zero_rotations()\n if gate.data:\n new_data.append(gate)\n else:\n if ((not isinstance(gate, Gate)) or\n (not (gate.name == \"rz\" or gate.name == \"ry\" or\n gate.name == \"rx\") or\n (InitializeGate.chop_num(gate.param[0]) != 0))):\n new_data.append(gate)\n else:\n zero_rotation_removed = True\n\n self.data = new_data\n\n return zero_rotation_removed\n\n\nQuantumCircuit.remove_zero_rotations = remove_zero_rotations\nCompositeGate.remove_zero_rotations = remove_zero_rotations\n\n\ndef number_atomic_gates(self):\n \"\"\"Count the number of leaf gates. \"\"\"\n num = 0\n for gate in self.data:\n if isinstance(gate, CompositeGate):\n num += gate.number_atomic_gates()\n else:\n if isinstance(gate, Gate):\n num += 1\n return num\n\n\nQuantumCircuit.number_atomic_gates = number_atomic_gates\nCompositeGate.number_atomic_gates = number_atomic_gates\n\n\ndef remove_double_cnots_once(self):\n \"\"\"\n Remove Double CNOTS paying attention that gates may be neighbours across\n Composite Gate boundaries.\n \"\"\"\n num_high_level_gates = len(self.data)\n\n if num_high_level_gates == 0:\n return False\n else:\n if num_high_level_gates == 1 and isinstance(self.data[0],\n CompositeGate):\n return self.data[0].remove_double_cnots_once()\n\n # Removed at least one double cnot.\n double_cnot_removed = False\n\n # last gate might be composite\n if isinstance(self.data[num_high_level_gates - 1], CompositeGate):\n double_cnot_removed = \\\n double_cnot_removed or\\\n self.data[num_high_level_gates - 1].remove_double_cnots_once()\n\n # don't start with last gate, using reversed so that can del on the go\n for i in reversed(range(num_high_level_gates - 1)):\n if isinstance(self.data[i], CompositeGate):\n double_cnot_removed =\\\n double_cnot_removed \\\n or self.data[i].remove_double_cnots_once()\n left_gate_host = self.data[i].last_atomic_gate_host()\n left_gate_index = -1\n # TODO: consider adding if semantics needed:\n # to remove empty composite gates\n # if left_gate_host == None: del self.data[i]\n else:\n left_gate_host = self.data\n left_gate_index = i\n\n if ((left_gate_host is not None) and\n left_gate_host[left_gate_index].name == \"cx\"):\n if isinstance(self.data[i + 1], CompositeGate):\n right_gate_host = self.data[i + 1].first_atomic_gate_host()\n right_gate_index = 0\n else:\n right_gate_host = self.data\n right_gate_index = i + 1\n\n if (right_gate_host is not None) \\\n and right_gate_host[right_gate_index].name == \"cx\" \\\n and (left_gate_host[left_gate_index].arg ==\n right_gate_host[right_gate_index].arg):\n del right_gate_host[right_gate_index]\n del left_gate_host[left_gate_index]\n double_cnot_removed = True\n\n return double_cnot_removed\n\n\nQuantumCircuit.remove_double_cnots_once = remove_double_cnots_once\nCompositeGate.remove_double_cnots_once = remove_double_cnots_once\n\n\ndef first_atomic_gate_host(self):\n \"\"\"Return the host list of the leaf gate on the left edge.\"\"\"\n if self.data:\n if isinstance(self.data[0], CompositeGate):\n return self.data[0].first_atomic_gate_host()\n return self.data\n\n return None\n\n\nQuantumCircuit.first_atomic_gate_host = first_atomic_gate_host\nCompositeGate.first_atomic_gate_host = first_atomic_gate_host\n\n\ndef last_atomic_gate_host(self):\n \"\"\"Return the host list of the leaf gate on the right edge.\"\"\"\n if self.data:\n if isinstance(self.data[-1], CompositeGate):\n return self.data[-1].last_atomic_gate_host()\n return self.data\n\n return None\n\n\nQuantumCircuit.last_atomic_gate_host = last_atomic_gate_host\nCompositeGate.last_atomic_gate_host = last_atomic_gate_host\n\n\ndef initialize(self, params, qubits):\n \"\"\"Apply initialize to circuit.\"\"\"\n self._check_dups(qubits)\n for i in qubits:\n self._check_qubit(i)\n # TODO: make initialize an Instruction, and insert reset\n # TODO: avoid explicit reset if compiler determines a |0> state\n\n return self._attach(InitializeGate(params, qubits, self))\n\n\nQuantumCircuit.initialize = initialize\nCompositeGate.initialize = initialize\n",
"# -*- coding: utf-8 -*-\n\n# Copyright 2017 IBM RESEARCH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"\nQuantum State Tomography.\nGenerates many small circuits, thus good for profiling compiler overhead.\nNumber of circuits grows like 3^n_qubits\n\"\"\"\n\nimport sys\nimport numpy as np\nimport argparse\nimport time\n\n# import qiskit modules\nfrom qiskit import QuantumProgram\nfrom qiskit import QISKitError\n\n# import tomography libary and other useful tools\nimport qiskit.tools.qcvv.tomography as tomo\nfrom qiskit.tools.qi.qi import state_fidelity, purity\nfrom qiskit.tools.qi.qi import outer, random_unitary_matrix\n\n\n# circuit that outputs the target state\ndef target_prep(qp, state, target):\n # quantum circuit to make an entangled cat state\n if state == 'cat':\n n_qubits = int(np.log2(target.size))\n qr = qp.create_quantum_register('qr', n_qubits)\n cr = qp.create_classical_register('cr', n_qubits)\n cat = qp.create_circuit('prep', [qr], [cr])\n cat.h(qr[0])\n for i in range(1, n_qubits):\n cat.cx(qr[0], qr[i])\n\n # quantum circuit to prepare arbitrary given state\n elif state == 'random':\n n_qubits = int(np.log2(target.size))\n qr = qp.create_quantum_register('qr', n_qubits)\n cr = qp.create_classical_register('cr', n_qubits)\n random = qp.create_circuit('prep', [qr], [cr])\n random.initialize(\"Qinit\", target, [qr[i] for i in range(n_qubits)])\n\n return qp\n\n\n# add basis measurements to the Quantum Program for tomography\n# XX..X, XX..Y, .., ZZ..Z\ndef add_tomo_circuits(qp):\n # Construct state tomography set for measurement of qubits in the register\n qr_name = list(qp.get_quantum_register_names())[0]\n cr_name = list(qp.get_classical_register_names())[0]\n qr = qp.get_quantum_register(qr_name)\n cr = qp.get_classical_register(cr_name)\n tomo_set = tomo.state_tomography_set(list(range(qr.size)))\n\n # Add the state tomography measurement circuits to the Quantum Program\n tomo_circuits = tomo.create_tomography_circuits(qp, 'prep', qr, cr, tomo_set)\n\n return qp, tomo_set, tomo_circuits\n\n\n# perform quantum state tomography and assess quality of reconstructed vector\ndef state_tomography(state, n_qubits, shots):\n # cat target state: [1. 0. 0. ... 0. 0. 1.]/sqrt(2.)\n if state == 'cat':\n target = np.zeros(pow(2, n_qubits))\n target[0] = 1\n target[pow(2, n_qubits)-1] = 1.0\n target /= np.sqrt(2.0)\n # random target state: first column of a random unitary\n elif state == 'random':\n target = random_unitary_matrix(pow(2, n_qubits))[0]\n else:\n raise QISKitError(\"Unknown state for tomography.\")\n\n print(\"target: {}\".format(target))\n\n # Use the local qasm simulator\n backend = 'local_qiskit_simulator'\n\n qp = QuantumProgram()\n\n # Prepared target state and assess quality\n qp = target_prep(qp, state, target)\n prep_result = qp.execute(['prep'], backend='local_qasm_simulator', shots=1)\n prep_state = prep_result.get_data('prep')['quantum_state']\n F_prep = state_fidelity(prep_state, target)\n print('Prepared state fidelity =', F_prep)\n\n # Run state tomography simulation and fit data to reconstruct circuit\n qp, tomo_set, tomo_circuits = add_tomo_circuits(qp)\n tomo_result = qp.execute(tomo_circuits, backend=backend, shots=shots)\n tomo_data = tomo.tomography_data(tomo_result, 'prep', tomo_set)\n rho_fit = tomo.fit_tomography_data(tomo_data)\n\n # calculate fidelity and purity of fitted state\n F_fit = state_fidelity(rho_fit, target)\n pur = purity(rho_fit)\n print('Fitted state fidelity =', F_fit)\n print('Fitted state purity =', str(pur))\n\n return qp\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Performance testing for compiler, using state tomography.\")\n parser.add_argument('--state', default='cat', help='state for tomography')\n parser.add_argument('--n_qubits', type=int, default=5, help='num qubits')\n parser.add_argument('--shots', type=int, default=1024, help='shots per measurement basis')\n args = parser.parse_args()\n\n tstart = time.time()\n qp = state_tomography(args.state, args.n_qubits, args.shots)\n tend = time.time()\n\n all_circuits = list(qp.get_circuit_names())\n avg = sum(qp.get_qasm(c).count(\"\\n\") for c in all_circuits) / len(all_circuits)\n\n print(\"---- Number of circuits: {}\".format(len(all_circuits)))\n print(\"---- Avg circuit size: {}\".format(avg))\n print(\"---- Elapsed time: {}\".format(tend - tstart))\n"
]
| [
[
"numpy.matrix",
"numpy.arccos",
"numpy.angle",
"numpy.exp",
"numpy.identity",
"numpy.absolute"
],
[
"numpy.log2",
"numpy.sqrt"
]
]
|
elfadys/pyquadkey | [
"81eb631fd96a0067e5366022c0667900ca09e9c4"
]
| [
"tile_system.py"
]
| [
"from util import precondition\nfrom math import pi\nimport numpy as np\nimport pandas as pd\n\n\ndef valid_level(level):\n LEVEL_RANGE = (1, 23)\n return LEVEL_RANGE[0] <= level <= LEVEL_RANGE[1]\n\n\n@precondition(lambda key: valid_level(len(key)))\ndef valid_key(key):\n return TileSystem.KEY_PATTERN.match(key) is not None\n\n\nclass TileSystem:\n\n \"\"\"\n Class with static method to build quadkeys from lat, lon, levels\n see http://msdn.microsoft.com/en-us/library/bb259689.aspx\n \"\"\"\n import re\n KEY_PATTERN = re.compile(\"^[0-3]+$\")\n\n EARTH_RADIUS = 6378137\n LATITUDE_RANGE = (-85.05112878, 85.05112878)\n LONGITUDE_RANGE = (-180., 180.)\n\n @staticmethod\n @precondition(lambda n, minMax: minMax[0] <= minMax[1])\n def clip(n, minMax):\n \"\"\"\tClips number to specified values \"\"\"\n return np.minimum(np.maximum(n, minMax[0]), minMax[1])\n\n @staticmethod\n @precondition(valid_level)\n def map_size(level):\n \"\"\"Determines map height and width in pixel space at level\"\"\"\n return 256 << level\n\n @staticmethod\n @precondition(lambda lat, lvl: valid_level(lvl))\n def ground_resolution(lat, level):\n \"\"\"Gets ground res in meters / pixel\"\"\"\n lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE)\n return np.cos(lat * pi / 180) * 2 * pi * TileSystem.EARTH_RADIUS / TileSystem.map_size(level)\n\n @staticmethod\n @precondition(lambda lat, lvl, dpi: valid_level(lvl))\n def map_scale(lat, level, dpi):\n \"\"\"Gets the scale of the map expressed as ratio 1\t: N. Returns N\"\"\"\n return TileSystem.ground_resolution(lat, level) * dpi / 0.0254\n\n @staticmethod\n @precondition(lambda geo, lvl: valid_level(lvl))\n def geo_to_pixel(geo, level):\n lat, lon = geo[:,0].astype(np.float64), geo[:,1].astype(np.float64)\n lat = TileSystem.clip(lat, TileSystem.LATITUDE_RANGE)\n lon = TileSystem.clip(lon, TileSystem.LONGITUDE_RANGE)\n x = (lon + 180) / 360\n sin_lat = np.sin(lat * pi / 180)\n y = 0.5 - np.log((1 + sin_lat) / (1 - sin_lat)) / (4 * pi)\n map_size = TileSystem.map_size(level)\n pixel_x = TileSystem.clip(x * map_size + 0.5, (0, map_size - 1))\n pixel_y = TileSystem.clip(y * map_size + 0.5, (0, map_size - 1))\n return pixel_x.astype(np.int64), pixel_y.astype(np.int64)\n\n @staticmethod\n @precondition(lambda pix, lvl: valid_level(lvl))\n def pixel_to_geo(pixel, level):\n \"\"\"Transform from pixel to geo coordinates\"\"\"\n pixel_x = pixel[:,0]\n pixel_y = pixel[:,1]\n map_size = float(TileSystem.map_size(level))\n x = (TileSystem.clip(pixel_x, (0, map_size - 1)) / map_size) - 0.5\n y = 0.5 - (TileSystem.clip(pixel_y, (0, map_size - 1)) / map_size)\n lat = 90 - 360 * np.arctan(np.exp(-y * 2 * pi)) / pi\n lon = 360 * x\n return np.round(lat, 6), np.round(lon, 6)\n \n @staticmethod\n def pixel_to_tile(pixel):\n \"\"\"Transform pixel to tile coordinates\"\"\"\n return (pixel[:,0] / 256).astype(np.int64), (pixel[:,1] / 256).astype(np.int64)\n\n @staticmethod\n def tile_to_pixel(tile, centered=False):\n \"\"\"Transform tile to pixel coordinates\"\"\"\n pixel_x = tile[:,0] * 256\n pixel_y = tile[:,1] * 256\n if centered:\n # should clip on max map size\n pixel_x += 128\n pixel_y += 128\n return pixel_x, pixel_y\n\n @staticmethod\n @precondition(lambda tile, lvl: valid_level(lvl))\n def tile_to_quadkey(tile, level):\n \"\"\"Transform tile coordinates to a quadkey\"\"\"\n tile_x = tile[:,0]\n tile_y = tile[:,1]\n quadkey = pd.DataFrame(\"'\",index=np.arange(len(tile)),columns=['qk'])\n for i in range(level):\n bit = level - i\n digit = np.zeros_like(tile_x)\n mask = 1 << (bit - 1) # if (bit - 1) > 0 else 1 >> (bit - 1)\n digit += ((tile_x & mask) != 0)*1\n digit += ((tile_y & mask) != 0)*2\n quadkey.qk = quadkey.qk + digit.astype(str)\n\n return quadkey.values.astype(str)\n\n @staticmethod\n def quadkey_to_tile(quadkey):\n \"\"\"Transform quadkey to tile coordinates\"\"\"\n tile_x = np.zeros((len(quadkey),1)).astype(int)\n tile_y = np.zeros((len(quadkey),1)).astype(int)\n level = len(quadkey[0])\n qk = pd.DataFrame(quadkey,columns=['qk'])\n \n for i in range(level):\n bit = level - i\n mask = 1 << (bit - 1)\n \n tile_x[qk['qk'].str[level - bit] == '1'] |= mask\n tile_y[qk['qk'].str[level - bit] == '2'] |= mask\n tile_x[qk['qk'].str[level - bit] == '3'] |= mask\n tile_y[qk['qk'].str[level - bit] == '3'] |= mask\n \n return tile_x, tile_y, level\n"
]
| [
[
"numpy.sin",
"numpy.zeros_like",
"numpy.log",
"numpy.round",
"pandas.DataFrame",
"numpy.exp",
"numpy.cos",
"numpy.maximum"
]
]
|
everguard-inc/dino | [
"49c8142167867704b22cc9420e3aa18f8ce3cd00"
]
| [
"main_dino.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport datetime\nimport json\nimport math\nimport os\nimport sys\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torchvision import datasets\nfrom torchvision import models as torchvision_models\nfrom torchvision import transforms\n\nimport utils\nimport vision_transformer as vits\nfrom vision_transformer import DINOHead\n\n\ntorchvision_archs = sorted(\n name\n for name in torchvision_models.__dict__\n if name.islower() and not name.startswith(\"__\") and callable(torchvision_models.__dict__[name])\n)\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser(\"DINO\", add_help=False)\n\n # Model parameters\n parser.add_argument(\n \"--arch\",\n default=\"vit_small\",\n type=str,\n choices=[\"vit_tiny\", \"vit_small\", \"vit_base\", \"deit_tiny\", \"deit_small\"] + torchvision_archs,\n help=\"\"\"Name of architecture to train. For quick experiments with ViTs,\n we recommend using vit_tiny or vit_small.\"\"\",\n )\n parser.add_argument(\n \"--patch_size\",\n default=16,\n type=int,\n help=\"\"\"Size in pixels\n of input square patches - default 16 (for 16x16 patches). Using smaller\n values leads to better performance but requires more memory. Applies only\n for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling\n mixed precision training (--use_fp16 false) to avoid unstabilities.\"\"\",\n )\n parser.add_argument(\n \"--out_dim\",\n default=65536,\n type=int,\n help=\"\"\"Dimensionality of\n the DINO head output. For complex and large datasets large values (like 65k) work well.\"\"\",\n )\n parser.add_argument(\n \"--norm_last_layer\",\n default=True,\n type=utils.bool_flag,\n help=\"\"\"Whether or not to weight normalize the last layer of the DINO head.\n Not normalizing leads to better performance but can make the training unstable.\n In our experiments, we typically set this paramater to False with vit_small and True with vit_base.\"\"\",\n )\n parser.add_argument(\n \"--momentum_teacher\",\n default=0.996,\n type=float,\n help=\"\"\"Base EMA\n parameter for teacher update. The value is increased to 1 during training with cosine schedule.\n We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.\"\"\",\n )\n parser.add_argument(\n \"--use_bn_in_head\",\n default=False,\n type=utils.bool_flag,\n help=\"Whether to use batch normalizations in projection head (Default: False)\",\n )\n\n # Temperature teacher parameters\n parser.add_argument(\n \"--warmup_teacher_temp\",\n default=0.04,\n type=float,\n help=\"\"\"Initial value for the teacher temperature: 0.04 works well in most cases.\n Try decreasing it if the training loss does not decrease.\"\"\",\n )\n parser.add_argument(\n \"--teacher_temp\",\n default=0.04,\n type=float,\n help=\"\"\"Final value (after linear warmup)\n of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend\n starting with the default value of 0.04 and increase this slightly if needed.\"\"\",\n )\n parser.add_argument(\n \"--warmup_teacher_temp_epochs\",\n default=0,\n type=int,\n help=\"Number of warmup epochs for the teacher temperature (Default: 30).\",\n )\n\n # Training/Optimization parameters\n parser.add_argument(\n \"--use_fp16\",\n type=utils.bool_flag,\n default=True,\n help=\"\"\"Whether or not\n to use half precision for training. Improves training time and memory requirements,\n but can provoke instability and slight decay of performance. We recommend disabling\n mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.\"\"\",\n )\n parser.add_argument(\n \"--weight_decay\",\n type=float,\n default=0.04,\n help=\"\"\"Initial value of the\n weight decay. With ViT, a smaller value at the beginning of training works well.\"\"\",\n )\n parser.add_argument(\n \"--weight_decay_end\",\n type=float,\n default=0.4,\n help=\"\"\"Final value of the\n weight decay. We use a cosine schedule for WD and using a larger decay by\n the end of training improves performance for ViTs.\"\"\",\n )\n parser.add_argument(\n \"--clip_grad\",\n type=float,\n default=3.0,\n help=\"\"\"Maximal parameter\n gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can\n help optimization for larger ViT architectures. 0 for disabling.\"\"\",\n )\n parser.add_argument(\n \"--batch_size_per_gpu\",\n default=64,\n type=int,\n help=\"Per-GPU batch-size : number of distinct images loaded on one GPU.\",\n )\n parser.add_argument(\"--epochs\", default=100, type=int, help=\"Number of epochs of training.\")\n parser.add_argument(\n \"--freeze_last_layer\",\n default=1,\n type=int,\n help=\"\"\"Number of epochs\n during which we keep the output layer fixed. Typically doing so during\n the first epoch helps training. Try increasing this value if the loss does not decrease.\"\"\",\n )\n parser.add_argument(\n \"--lr\",\n default=0.0005,\n type=float,\n help=\"\"\"Learning rate at the end of\n linear warmup (highest LR used during training). The learning rate is linearly scaled\n with the batch size, and specified here for a reference batch size of 256.\"\"\",\n )\n parser.add_argument(\n \"--warmup_epochs\", default=10, type=int, help=\"Number of epochs for the linear learning-rate warm up.\"\n )\n parser.add_argument(\n \"--min_lr\",\n type=float,\n default=1e-6,\n help=\"\"\"Target LR at the\n end of optimization. We use a cosine LR schedule with linear warmup.\"\"\",\n )\n parser.add_argument(\n \"--optimizer\",\n default=\"adamw\",\n type=str,\n choices=[\"adamw\", \"sgd\", \"lars\"],\n help=\"\"\"Type of optimizer. We recommend using adamw with ViTs.\"\"\",\n )\n\n # Multi-crop parameters\n parser.add_argument(\n \"--global_crops_scale\",\n type=float,\n nargs=\"+\",\n default=(0.4, 1.0),\n help=\"\"\"Scale range of the cropped image before resizing, relatively to the origin image.\n Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we\n recommand using a wider range of scale (\"--global_crops_scale 0.14 1.\" for example)\"\"\",\n )\n parser.add_argument(\n \"--local_crops_number\",\n type=int,\n default=8,\n help=\"\"\"Number of small\n local views to generate. Set this parameter to 0 to disable multi-crop training.\n When disabling multi-crop we recommend to use \"--global_crops_scale 0.14 1.\" \"\"\",\n )\n parser.add_argument(\n \"--local_crops_scale\",\n type=float,\n nargs=\"+\",\n default=(0.05, 0.4),\n help=\"\"\"Scale range of the cropped image before resizing, relatively to the origin image.\n Used for small local view cropping of multi-crop.\"\"\",\n )\n\n # Misc\n parser.add_argument(\n \"--data_path\",\n default=\"/path/to/imagenet/train/\",\n type=str,\n help=\"Please specify path to the ImageNet training data.\",\n )\n parser.add_argument(\"--output_dir\", default=\".\", type=str, help=\"Path to save logs and checkpoints.\")\n parser.add_argument(\"--saveckp_freq\", default=20, type=int, help=\"Save checkpoint every x epochs.\")\n parser.add_argument(\"--seed\", default=0, type=int, help=\"Random seed.\")\n parser.add_argument(\"--num_workers\", default=10, type=int, help=\"Number of data loading workers per GPU.\")\n parser.add_argument(\n \"--dist_url\",\n default=\"env://\",\n type=str,\n help=\"\"\"url used to set up\n distributed training; see https://pytorch.org/docs/stable/distributed.html\"\"\",\n )\n parser.add_argument(\"--local_rank\", default=0, type=int, help=\"Please ignore and do not set this argument.\")\n parser.add_argument(\"--pretrained\", default=None, type=str, nargs=2, help=\"Pretrained weights\")\n return parser\n\n\ndef train_dino(args):\n utils.init_distributed_mode(args)\n utils.fix_random_seeds(args.seed)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n print(\"\\n\".join(\"%s: %s\" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))\n cudnn.benchmark = True\n\n # ============ preparing data ... ============\n transform = DataAugmentationDINO(args.global_crops_scale, args.local_crops_scale, args.local_crops_number,)\n dataset = datasets.ImageFolder(args.data_path, transform=transform)\n sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True)\n data_loader = torch.utils.data.DataLoader(\n dataset,\n sampler=sampler,\n batch_size=args.batch_size_per_gpu,\n num_workers=args.num_workers,\n pin_memory=True,\n drop_last=True,\n )\n print(f\"Data loaded: there are {len(dataset)} images.\")\n\n # ============ building student and teacher networks ... ============\n # we changed the name DeiT-S for ViT-S to avoid confusions\n args.arch = args.arch.replace(\"deit\", \"vit\")\n # if the network is a vision transformer (i.e. vit_tiny, vit_small, vit_base)\n if args.arch in vits.__dict__.keys():\n student = vits.__dict__[args.arch](patch_size=args.patch_size, drop_path_rate=0.1,) # stochastic depth\n teacher = vits.__dict__[args.arch](patch_size=args.patch_size)\n embed_dim = student.embed_dim\n # otherwise, we check if the architecture is in torchvision models\n elif args.arch in torchvision_models.__dict__.keys():\n student = torchvision_models.__dict__[args.arch]()\n teacher = torchvision_models.__dict__[args.arch]()\n embed_dim = student.fc.weight.shape[1]\n else:\n print(f\"Unknow architecture: {args.arch}\")\n\n if args.pretrained is not None:\n pretrained_model = torch.hub.load(*args.pretrained)\n pretrained_model_state_dict = pretrained_model.state_dict()\n student_state_dict = student.state_dict()\n missing_dict = {\n key: student_state_dict[key] for key in student_state_dict if key not in pretrained_model_state_dict\n }\n print(f\"Missing keys: {missing_dict.keys()}\")\n pretrained_model_state_dict.update(missing_dict)\n student.load_state_dict(pretrained_model_state_dict)\n teacher.load_state_dict(pretrained_model_state_dict)\n\n # multi-crop wrapper handles forward with inputs of different resolutions\n student = utils.MultiCropWrapper(\n student, DINOHead(embed_dim, args.out_dim, use_bn=args.use_bn_in_head, norm_last_layer=args.norm_last_layer,)\n )\n teacher = utils.MultiCropWrapper(teacher, DINOHead(embed_dim, args.out_dim, args.use_bn_in_head),)\n # move networks to gpu\n student, teacher = student.cuda(), teacher.cuda()\n # synchronize batch norms (if any)\n if utils.has_batchnorms(student):\n student = nn.SyncBatchNorm.convert_sync_batchnorm(student)\n teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)\n\n # we need DDP wrapper to have synchro batch norms working...\n teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu])\n teacher_without_ddp = teacher.module\n else:\n # teacher_without_ddp and teacher are the same thing\n teacher_without_ddp = teacher\n student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu])\n # teacher and student start with the same weights\n teacher_without_ddp.load_state_dict(student.module.state_dict())\n # there is no backpropagation through the teacher, so no need for gradients\n for p in teacher.parameters():\n p.requires_grad = False\n print(f\"Student and Teacher are built: they are both {args.arch} network.\")\n\n # ============ preparing loss ... ============\n dino_loss = DINOLoss(\n args.out_dim,\n args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number\n args.warmup_teacher_temp,\n args.teacher_temp,\n args.warmup_teacher_temp_epochs,\n args.epochs,\n ).cuda()\n\n # ============ preparing optimizer ... ============\n params_groups = utils.get_params_groups(student)\n if args.optimizer == \"adamw\":\n optimizer = torch.optim.AdamW(params_groups) # to use with ViTs\n elif args.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler\n elif args.optimizer == \"lars\":\n optimizer = utils.LARS(params_groups) # to use with convnet and large batches\n # for mixed precision training\n fp16_scaler = None\n if args.use_fp16:\n fp16_scaler = torch.cuda.amp.GradScaler()\n\n # ============ init schedulers ... ============\n lr_schedule = utils.cosine_scheduler(\n args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256.0, # linear scaling rule\n args.min_lr,\n args.epochs,\n len(data_loader),\n warmup_epochs=args.warmup_epochs,\n )\n wd_schedule = utils.cosine_scheduler(args.weight_decay, args.weight_decay_end, args.epochs, len(data_loader),)\n # momentum parameter is increased to 1. during training with a cosine schedule\n momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1, args.epochs, len(data_loader))\n print(f\"Loss, optimizer and schedulers ready.\")\n\n # ============ optionally resume training ... ============\n to_restore = {\"epoch\": 0}\n utils.restart_from_checkpoint(\n os.path.join(args.output_dir, \"checkpoint.pth\"),\n run_variables=to_restore,\n student=student,\n teacher=teacher,\n optimizer=optimizer,\n fp16_scaler=fp16_scaler,\n dino_loss=dino_loss,\n )\n start_epoch = to_restore[\"epoch\"]\n\n start_time = time.time()\n print(\"Starting DINO training !\")\n for epoch in range(start_epoch, args.epochs):\n data_loader.sampler.set_epoch(epoch)\n\n # ============ training one epoch of DINO ... ============\n train_stats = train_one_epoch(\n student,\n teacher,\n teacher_without_ddp,\n dino_loss,\n data_loader,\n optimizer,\n lr_schedule,\n wd_schedule,\n momentum_schedule,\n epoch,\n fp16_scaler,\n args,\n )\n\n # ============ writing logs ... ============\n save_dict = {\n \"student\": student.state_dict(),\n \"teacher\": teacher.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"epoch\": epoch + 1,\n \"args\": args,\n \"dino_loss\": dino_loss.state_dict(),\n }\n if fp16_scaler is not None:\n save_dict[\"fp16_scaler\"] = fp16_scaler.state_dict()\n utils.save_on_master(save_dict, os.path.join(args.output_dir, \"checkpoint.pth\"))\n if args.saveckp_freq and epoch % args.saveckp_freq == 0:\n utils.save_on_master(save_dict, os.path.join(args.output_dir, f\"checkpoint{epoch:04}.pth\"))\n log_stats = {**{f\"train_{k}\": v for k, v in train_stats.items()}, \"epoch\": epoch}\n if utils.is_main_process():\n with (Path(args.output_dir) / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print(\"Training time {}\".format(total_time_str))\n\n\ndef train_one_epoch(\n student,\n teacher,\n teacher_without_ddp,\n dino_loss,\n data_loader,\n optimizer,\n lr_schedule,\n wd_schedule,\n momentum_schedule,\n epoch,\n fp16_scaler,\n args,\n):\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = \"Epoch: [{}/{}]\".format(epoch, args.epochs)\n for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):\n # update weight decay and learning rate according to their schedule\n it = len(data_loader) * epoch + it # global training iteration\n for i, param_group in enumerate(optimizer.param_groups):\n param_group[\"lr\"] = lr_schedule[it]\n if i == 0: # only the first group is regularized\n param_group[\"weight_decay\"] = wd_schedule[it]\n\n # move images to gpu\n images = [im.cuda(non_blocking=True) for im in images]\n # teacher and student forward passes + compute dino loss\n with torch.cuda.amp.autocast(fp16_scaler is not None):\n teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher\n student_output = student(images)\n loss = dino_loss(student_output, teacher_output, epoch)\n\n if not math.isfinite(loss.item()):\n print(\"Loss is {}, stopping training\".format(loss.item()), force=True)\n sys.exit(1)\n\n # student update\n optimizer.zero_grad()\n param_norms = None\n if fp16_scaler is None:\n loss.backward()\n if args.clip_grad:\n param_norms = utils.clip_gradients(student, args.clip_grad)\n utils.cancel_gradients_last_layer(epoch, student, args.freeze_last_layer)\n optimizer.step()\n else:\n fp16_scaler.scale(loss).backward()\n if args.clip_grad:\n fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n param_norms = utils.clip_gradients(student, args.clip_grad)\n utils.cancel_gradients_last_layer(epoch, student, args.freeze_last_layer)\n fp16_scaler.step(optimizer)\n fp16_scaler.update()\n\n # EMA update for the teacher\n with torch.no_grad():\n m = momentum_schedule[it] # momentum parameter\n for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()):\n param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)\n\n # logging\n torch.cuda.synchronize()\n metric_logger.update(loss=loss.item())\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n metric_logger.update(wd=optimizer.param_groups[0][\"weight_decay\"])\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n\n\nclass DINOLoss(nn.Module):\n def __init__(\n self,\n out_dim,\n ncrops,\n warmup_teacher_temp,\n teacher_temp,\n warmup_teacher_temp_epochs,\n nepochs,\n student_temp=0.1,\n center_momentum=0.9,\n ):\n super().__init__()\n self.student_temp = student_temp\n self.center_momentum = center_momentum\n self.ncrops = ncrops\n self.register_buffer(\"center\", torch.zeros(1, out_dim))\n # we apply a warm up for the teacher temperature because\n # a too high temperature makes the training instable at the beginning\n self.teacher_temp_schedule = np.concatenate(\n (\n np.linspace(warmup_teacher_temp, teacher_temp, warmup_teacher_temp_epochs),\n np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp,\n )\n )\n\n def forward(self, student_output, teacher_output, epoch):\n \"\"\"\n Cross-entropy between softmax outputs of the teacher and student networks.\n \"\"\"\n student_out = student_output / self.student_temp\n student_out = student_out.chunk(self.ncrops)\n\n # teacher centering and sharpening\n temp = self.teacher_temp_schedule[epoch]\n teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)\n teacher_out = teacher_out.detach().chunk(2)\n\n total_loss = 0\n n_loss_terms = 0\n for iq, q in enumerate(teacher_out):\n for v in range(len(student_out)):\n if v == iq:\n # we skip cases where student and teacher operate on the same view\n continue\n loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)\n total_loss += loss.mean()\n n_loss_terms += 1\n total_loss /= n_loss_terms\n self.update_center(teacher_output)\n return total_loss\n\n @torch.no_grad()\n def update_center(self, teacher_output):\n \"\"\"\n Update center used for teacher output.\n \"\"\"\n batch_center = torch.sum(teacher_output, dim=0, keepdim=True)\n dist.all_reduce(batch_center)\n batch_center = batch_center / (len(teacher_output) * dist.get_world_size())\n\n # ema update\n self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)\n\n\nclass DataAugmentationDINO(object):\n def __init__(self, global_crops_scale, local_crops_scale, local_crops_number):\n flip_and_color_jitter = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply(\n [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], p=0.8\n ),\n transforms.RandomGrayscale(p=0.2),\n ]\n )\n normalize = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),]\n )\n\n # first global crop\n self.global_transfo1 = transforms.Compose(\n [\n transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n utils.GaussianBlur(1.0),\n normalize,\n ]\n )\n # second global crop\n self.global_transfo2 = transforms.Compose(\n [\n transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n utils.GaussianBlur(0.1),\n utils.Solarization(0.2),\n normalize,\n ]\n )\n # transformation for the local small crops\n self.local_crops_number = local_crops_number\n self.local_transfo = transforms.Compose(\n [\n transforms.RandomResizedCrop(96, scale=local_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n utils.GaussianBlur(p=0.5),\n normalize,\n ]\n )\n\n def __call__(self, image):\n crops = []\n crops.append(self.global_transfo1(image))\n crops.append(self.global_transfo2(image))\n for _ in range(self.local_crops_number):\n crops.append(self.local_transfo(image))\n return crops\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"DINO\", parents=[get_args_parser()])\n args = parser.parse_args()\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n train_dino(args)\n"
]
| [
[
"torch.distributed.get_world_size",
"torch.optim.AdamW",
"torch.cuda.amp.autocast",
"torch.hub.load",
"torch.sum",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.utils.data.DataLoader",
"torch.zeros",
"torch.utils.data.DistributedSampler",
"torch.optim.SGD",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.cuda.amp.GradScaler",
"torch.cuda.synchronize",
"torch.no_grad",
"numpy.ones",
"torch.distributed.all_reduce",
"numpy.linspace"
]
]
|
guigolab/DeepFryer | [
"83ddde69134c28a662735f153f568b247cb4d091"
]
| [
"deepfryer/batcheffect/correction.py"
]
| [
"# -*- coding: utf-8 -*-\nimport copy\nimport sys\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.linear_model import LinearRegression\n\n\ndef assign_val(dataset, valueset, var, name):\n\t''' It takes two datasets and map values from one to the other.\n\t\t-dataset: Pandas DataFrame to where the values are going to be added.\n\t\t-valueset: Pandas DataFrame to where the values are going to be taken.\n\t\t-var: String of the value taken in valueset\n\t\t-name: String. New name of the column in dataset. If the name is already in the Dframe it will overwrite values.'''\n\tif dataset.index[0] in valueset.index:\n\t\tdataset.loc[dataset.index, name] = valueset.loc[dataset.index, var]\n\telse:\t\n\t\tdataset.loc[:,'SUBID'] = np.array([i.split('-')[0]+'-'+i.split('-')[1] for i in dataset.index])\n\t\tdataset.loc[:,name] = valueset.loc[dataset['SUBID'], var].values\n\t\tdataset = dataset.drop('SUBID', axis = 1)\n\tsys.stderr.write(str(var)+' values assigned.\\n')\n\treturn dataset\n\ndef correct_linear_cov(dset, labels, meta, cov_list):\n\t'''It takes dset values and corrects the effect of variables contained in cov_list using labels as the receiving dframe for values in metadata.\n\t-dset: DataFrame of independent variables.\n\t-labels: DataFrame of dependent variables.\n\t-meta: DataFrame of covariates.\n\t-cov_list: List of covariables.'''\n\ty_model = copy.deepcopy(dset)\n\tx_model = copy.deepcopy(labels)\n\tcov = copy.deepcopy(meta)\n\tfor w in cov_list:\n\t\tfor tiss in np.unique(labels[0]):\n\t\t\tx_class = x_model[x_model[0] == tiss]\n\t\t\ty_class = y_model[y_model.index.isin(x_class.index)]\n\t\t\tif w.startswith('MH') and (cov[w].dtype == 'float64'):\n\t\t\t\tcov[w] = cov.loc[:,w].astype('category').cat.codes\n\t\t\t\tx_class = assign_val(x_class, cov, w, 0)\n\t\t\t\tx_class = pd.get_dummies(x_class)\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class, y_class)\n\t\t\t\tr2 = lm.score(x_class, y_class)\n\t\t\t\tsys.stdout.write(tiss+\" pre-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\tres = y_class - np.matmul(x_class.astype('float32'), lm.coef_.T)\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class, res)\n\t\t\t\tr2 = lm.score(x_class, res)\n\t\t\t\tsys.stdout.write(tiss+\" post-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\ty_model.loc[res.index,:] = res.loc[res.index,:]\n\t\t\telif cov[w].dtype == object:\n\t\t\t\tcov[w] = cov.loc[:,w].astype('category').cat.codes\n\t\t\t\tx_class = assign_val(x_class, cov, w, 0)\n\t\t\t\tx_class = pd.get_dummies(x_class)\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class, y_class)\n\t\t\t\tr2 = lm.score(x_class, y_class)\n\t\t\t\tsys.stdout.write(tiss+\" pre-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\tres = y_class - np.matmul(x_class.x_class.astype('float32'), lm.coef_.T)\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class, res)\n\t\t\t\tr2 = lm.score(x_class, res)\n\t\t\t\tsys.stdout.write(tiss+\" post-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\ty_model.loc[res.index,:] = res.loc[res.index,:]\n\t\t\telif cov[w].dtype == 'int64' and w != 'AGE':\n\t\t\t\tcov[w] = cov.loc[:,w].astype('category').cat.codes\n\t\t\t\tx_class = assign_val(x_class, cov, w, 0)\n\t\t\t\tx_class = pd.get_dummies(x_class)\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class, y_class)\n\t\t\t\tr2 = lm.score(x_class, y_class)\n\t\t\t\tsys.stdout.write(tiss+\" pre-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\tres = y_class - np.matmul(x_class.astype('float32'), lm.coef_.T)\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class, res)\n\t\t\t\tr2 = lm.score(x_class, res)\n\t\t\t\tsys.stdout.write(tiss+\" post-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\ty_model.loc[res.index,:] = res.loc[res.index,:]\n\t\t\telse:\n\t\t\t\tx_class = assign_val(x_class, cov, w, 0)\n\t\t\t\tif x_class[0].max() != 0.0:\n\t\t\t\t\tx_class = x_class/x_class.max()\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class.values.reshape(-1,1), y_class)\n\t\t\t\tr2 = lm.score(x_class.values.reshape(-1,1), y_class)\n\t\t\t\tsys.stdout.write(tiss+\" pre-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\tres = y_class - np.matmul(x_class.astype('float32'), lm.coef_.reshape(1,-1))\n\t\t\t\tlm = LinearRegression()\n\t\t\t\tlm.fit(x_class.values.reshape(-1,1), res)\n\t\t\t\tr2 = lm.score(x_class.values.reshape(-1,1), res)\n\t\t\t\tsys.stdout.write(tiss+\" post-correction R² for \"+w+\": \"+str(r2)+'\\n')\n\t\t\t\ty_model.loc[res.index,:] = res.loc[res.index,:]\n\treturn y_model\n"
]
| [
[
"sklearn.linear_model.LinearRegression",
"numpy.unique",
"pandas.get_dummies"
]
]
|
slawekkopacz/dl-az | [
"1a49ed3c205b2e2aaa35d64050864f08df1b0543"
]
| [
"Deep_Learning_A_Z/Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Section 4 - Building an ANN/Artificial_Neural_Networks/ann.py"
]
| [
"# Artificial Neural Network\n\n# Installing Theano\n# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git\n\n# Installing Tensorflow\n# pip install tensorflow\n\n# Installing Keras\n# pip install --upgrade keras\n\n# Part 1 - Data Preprocessing\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Encoding categorical data \nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [1])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Part 2 - Now let's make the ANN!\n\n# Importing the Keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n# Initialising the ANN\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))\n\n# Adding the second hidden layer\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n\n# Adding the output layer\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n \n# Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fitting the ANN to the Training set\nclassifier.fit(X_train, y_train, batch_size = 10, epochs = 20)\n\n# Part 3 - Making predictions and evaluating the model\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)"
]
| [
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.confusion_matrix",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder"
]
]
|
loco-3d/multicontact-locomotion-planning | [
"86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2"
]
| [
"python/mlp/utils/trajectories.py"
]
| [
"import numpy as np\nimport numpy.matlib\nfrom numpy.polynomial.polynomial import polyval\n#from numpy import polyder\nfrom numpy.linalg import pinv\nfrom pinocchio import SE3, log3, exp3, Motion, Quaternion\nfrom pinocchio.utils import zero as mat_zeros\nfrom mlp.utils.util import effectorPositionFromHPPPath\n\n\n# COM Trajecotry smoothing using cubic spline\ndef polyder(coeffs):\n return np.polyder(coeffs[::-1])[::-1]\n\n\nclass SmoothedFootTrajectory(object):\n def __init__(self, time_intervals, foot_placements, z_amplitude=0.05, name=\"Foot ref trajectory\"):\n self.name = name\n self.time_intervals = time_intervals\n self.foot_placements = foot_placements\n self.z_amplitude = z_amplitude\n self._R = np.identity(3)\n self.__compute()\n self.t0_l = []\n for k in range(len(time_intervals)):\n self.t0_l.append(time_intervals[k])\n\n def setOrientation(self, R):\n self._R = R\n\n def __compute(self):\n self.polycoeff_l = []\n self.dpolycoeff_l = []\n self.ddpolycoeff_l = []\n num_intervals = len(self.time_intervals)\n\n for k in range(num_intervals):\n xyz_polycoeff = []\n dxyz_polycoeff = []\n ddxyz_polycoeff = []\n\n foot_init_position = self.foot_placements[0].translation\n foot_end_position = self.foot_placements[1].translation\n\n # X trajectory\n x0 = foot_init_position[0, 0]\n x1 = foot_end_position[0, 0]\n\n nx = 6\n Px = np.zeros([nx, nx])\n Px[0, 0] = 1.\n Px[1, :] += 1.\n Px[2, 1] = 1.\n Px[3, 1:] = range(1, nx)\n Px[4, 2] = 1.\n Px[5, 2:] = np.array(range(2, nx)) * np.array(range(1, nx - 1))\n\n bx = np.array([x0, x1, 0., 0., 0., 0.])\n x_coeff = pinv(Px).dot(bx)\n xyz_polycoeff.append(x_coeff)\n\n dx_coeff = polyder(x_coeff)\n dxyz_polycoeff.append(dx_coeff)\n\n ddx_coeff = polyder(dx_coeff)\n ddxyz_polycoeff.append(ddx_coeff)\n\n # Y trajectory\n y0 = foot_init_position[1, 0]\n y1 = foot_end_position[1, 0]\n\n if y0 == y1:\n xyz_polycoeff.append(y0)\n dxyz_polycoeff.append([])\n ddxyz_polycoeff.append([])\n else:\n ny = 6\n Py = np.zeros([ny, ny])\n Py[0, 0] = 1.\n Py[1, :] += 1.\n Py[2, 1] = 1.\n Py[3, 1:] = range(1, ny)\n Py[4, 2] = 1.\n Py[5, 2:] = np.array(range(2, ny)) * np.array(range(1, ny - 1))\n\n by = np.array([y0, y1, 0., 0., 0., 0.])\n y_coeff = pinv(Py).dot(by)\n xyz_polycoeff.append(y_coeff)\n\n dy_coeff = polyder(y_coeff)\n dxyz_polycoeff.append(dy_coeff)\n\n ddy_coeff = polyder(dy_coeff)\n ddxyz_polycoeff.append(ddy_coeff)\n\n # Z trajectory depends directly on X not on time\n z0 = foot_init_position[2, 0]\n z1 = foot_end_position[2, 0]\n\n nz = 7 # number of coefficients for polynome on z\n Pz = np.zeros([nz, nz])\n # Position\n Pz[0, 0] = 1.\n Pz[1, :] += 1.\n # Velocity\n Pz[2, 1] = 1.\n Pz[3, 1:nz] = range(1, nz)\n # Mid trajectory constraint\n t_max = 0.4\n t_max = 0.5\n Pz[4, :] = np.power(t_max, range(nz))\n Pz[5, 1:] = range(1, nz) * np.power(t_max, range(nz - 1))\n Pz[6, 2:] = np.array(range(2, nz)) * np.array(range(1, nz - 1)) * np.power(t_max, range(nz - 2))\n\n bz = np.array([z0, z1, 2., -0., 0.5 * (z0 + z1) + self.z_amplitude, 0., -0.1])\n bz = np.array([z0, z1, 1., -0.8, 0.5 * (z0 + z1) + self.z_amplitude, 0., -0.1])\n z_coeff = pinv(Pz).dot(bz)\n xyz_polycoeff.append(z_coeff)\n\n dz_coeff = polyder(z_coeff)\n dxyz_polycoeff.append(dz_coeff)\n\n ddz_coeff = polyder(dz_coeff)\n ddxyz_polycoeff.append(ddz_coeff)\n\n self.polycoeff_l.append(xyz_polycoeff)\n self.dpolycoeff_l.append(dxyz_polycoeff)\n self.ddpolycoeff_l.append(ddxyz_polycoeff)\n\n def __call__(self, t):\n # assert t <= self.time_intervals[-1][1], \"t must be lower than the final time tf={}\".format(self.time_intervals[-1][1])\n\n index = len(self.t0_l) - 1\n if t > self.time_intervals[1]:\n t = self.time_intervals[1]\n elif t < self.time_intervals[0]:\n t = self.time_intervals[0]\n index = 0\n else:\n for k in range(len(self.t0_l)):\n if self.t0_l[k] > t:\n index = k - 1\n break\n\n xyz_polycoeff = self.polycoeff_l[index]\n dxyz_polycoeff = self.dpolycoeff_l[index]\n ddxyz_polycoeff = self.ddpolycoeff_l[index]\n\n t0 = self.time_intervals[0]\n t1 = self.time_intervals[1]\n\n if t0 == t1:\n tau = 0.\n dtau_dt = 0.\n else:\n tau = (t - t0) / (t1 - t0)\n dtau_dt = 1.\n\n # Evaluate X\n x = polyval(tau, xyz_polycoeff[0])\n if len(dxyz_polycoeff[0]):\n x_dot = polyval(tau, dxyz_polycoeff[0]) * dtau_dt\n else:\n x_dot = 0.\n\n if len(ddxyz_polycoeff[0]):\n x_dotdot = polyval(tau, ddxyz_polycoeff[0]) * dtau_dt**2\n else:\n x_dotdot = 0.\n\n # Evaluate Y\n y = polyval(tau, xyz_polycoeff[1])\n if len(dxyz_polycoeff[1]):\n y_dot = polyval(tau, dxyz_polycoeff[1]) * dtau_dt\n else:\n y_dot = 0.\n\n if len(ddxyz_polycoeff[1]):\n y_dotdot = polyval(tau, ddxyz_polycoeff[1]) * dtau_dt**2\n else:\n y_dotdot = 0.\n\n # Evaluate Z\n x0 = polyval(0., xyz_polycoeff[0])\n x1 = polyval(1., xyz_polycoeff[0])\n if x0 == x1:\n tau_x = 0.\n dtau_x_dt = 0.\n else:\n tau_x = (x - x0) / (x1 - x0)\n dtau_x_dt = x_dot\n\n z = polyval(tau_x, xyz_polycoeff[2])\n if len(dxyz_polycoeff[2]):\n z_dot = polyval(tau_x, dxyz_polycoeff[2]) * x_dot\n else:\n z_dot = 0.\n\n if len(ddxyz_polycoeff[2]):\n z_dotdot = polyval(tau_x, ddxyz_polycoeff[2]) * x_dot**2 + polyval(tau_x, dxyz_polycoeff[2]) * x_dotdot\n else:\n z_dotdot = 0.\n\n M = SE3.Identity()\n v = Motion.Zero()\n a = Motion.Zero()\n\n M.translation = np.matrix([x, y, z]).T\n M.rotation = self._R\n v.linear = np.matrix([x_dot, y_dot, z_dot]).T\n a.linear = np.matrix([x_dotdot, y_dotdot, z_dotdot]).T\n\n return M, v, a\n\n\nclass RefTrajectory(object):\n def __init__(self, name):\n self._name = name\n self._dim = 0\n\n @property\n def dim(self):\n return self._dim\n\n def has_trajectory_ended(self, t):\n return True\n\n def __call__(self, t):\n return np.matrix([]).reshape(0, 0)\n\n\nclass HPPEffectorTrajectory(RefTrajectory):\n def __init__(self, eeName, fullBody, problem, pid, name=\"HPP-effector-trajectory\"):\n RefTrajectory.__init__(self, name)\n self._dim = 3\n self._eeName = eeName\n self._fb = fullBody\n self._problem = problem\n self._pid = pid\n self._length = problem.pathLength(pid)\n\n def __call__(self, t):\n if t < 0.:\n print(\"Trajectory called with negative time.\")\n t = 0.\n elif t > self._length:\n print(\"Trajectory called after final time.\")\n t = self._length\n return effectorPositionFromHPPPath(self._fb, self._problem, self._eeName, self._pid, t)\n"
]
| [
[
"numpy.polyder",
"numpy.array",
"numpy.matrix",
"numpy.zeros",
"numpy.polynomial.polynomial.polyval",
"numpy.linalg.pinv",
"numpy.identity"
]
]
|
mvdbeek/seaplotlib | [
"af465b58fc321ddc29649f08860f81a874d95605"
]
| [
"seaplotlib/data.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib.font_manager import FontProperties\n\nclass DataDescription(object):\n\n def __init__(self, df):\n self.data = df\n\n\nclass CanDisplayScatter(object):\n \"\"\"Can be displayed as scatterplot without conversion.\"\"\"\n\n def plot_scatterplot(self, **kwargs):\n return sns.scatterplot(\n x=self.x,\n y=self.y,\n hue=self.hue,\n data=self.data,\n legend=kwargs.pop('legend', False),\n **kwargs)\n\n\nclass CanDisplayLabel(object):\n\n def plot_label(self, ax, **kwargs):\n data = self.data\n highlight_in = self.highlight_in\n label_in = self.label_in\n default_label_color = sns.axes_style()['text.color']\n font0 = FontProperties()\n font0.set_weight('bold')\n for idx in data.index:\n if idx in highlight_in:\n color = default_label_color\n if hasattr(self, 'highlight_text_color') and self.highlight_text_color is not None:\n color = self.highlight_text_color\n if (isinstance(label_in, str) and label_in == 'all') or idx in set(label_in) or (idx in highlight_in and self.label_highlight):\n if isinstance(label_in, dict):\n label = label_in[idx]\n else:\n label = idx\n x = self.data.loc[idx][self.x]\n y = self.data.loc[idx][self.y]\n ax.annotate(label, xy=(x, y),\n xytext=(x + 0.4, y + 0.2),\n arrowprops=dict(facecolor='black', shrink=0.05),\n )\n return ax\n\n\nclass MaPlotData(DataDescription, CanDisplayScatter, CanDisplayLabel):\n\n \"\"\"Simplfies MaPlot'ing.\"\"\"\n\n def __init__(self, df, highlight_in=None, label_in=None, label_highlight=False, highlight_label=None, highlight_text_color=None, **kwargs):\n super(MaPlotData, self).__init__(df=df)\n self._xlabel = 'log2(Base mean)'\n self._ylabel = 'log2(FC)'\n self.highlight_in = highlight_in\n self.label_in = label_in\n self.label_highlight = label_highlight\n self.highlight_label = highlight_label\n self.highlight_text_color = highlight_text_color\n\n @property\n def x(self):\n self.data[self.xlabel] = np.log2(self.data['Base mean'])\n return self.xlabel\n\n @property\n def y(self):\n return self._ylabel\n\n @property\n def xlabel(self):\n return self._xlabel\n\n @property\n def ylabel(self):\n return self._ylabel\n\n @property\n def significance(self):\n return 'P-adj'\n\n @property\n def hue(self):\n if self.highlight_in:\n highlight_label = self.highlight_label or 'highlight_in'\n self.data[highlight_label] = self.data.index.isin(self.highlight_in)\n return highlight_label\n return None\n\n\nclass VolcanoPlotData(MaPlotData):\n\n def __init__(self, df, highlight_in=None, label_in=None, label_highlight=False, highlight_label=None):\n super(VolcanoPlotData, self).__init__(df=df, highlight_in=highlight_in, label_in=label_in, label_highlight=label_highlight, highlight_label=highlight_label)\n self._xlabel = 'log2(FC)'\n self._ylabel = 'P-adj'\n\n @property\n def x(self):\n return self.xlabel\n\n @property\n def y(self):\n self.data[self.ylabel] = -np.log10(self.data[self._ylabel])\n return self.ylabel\n\n @property\n def ylabel(self):\n return '-log10(P-adj)'\n\n\nclass TwoColumnScatterData(MaPlotData):\n\n def __init__(self, df, highlight_in=None, label_in=None, logx=None, logy=None):\n super(TwoColumnScatterData, self).__init__(df=df, highlight_in=highlight_in, label_in=label_in)\n self.logx = logx\n self.logy = logy\n self._xlabel = self.data.columns[1]\n self._ylabel = self.data.columns[0]\n\n @property\n def _transformed_xlabel(self):\n if self.logx:\n xlabel = \"{logx}({xlabel})\".format(logx=self.logx, xlabel=self.xlabel)\n self.data[xlabel] = getattr(np, self.logx)(self.data[self._xlabel])\n return xlabel\n return None\n\n @property\n def _transformed_ylabel(self):\n if self.logx:\n ylabel = \"{logy}({ylabel})\".format(logy=self.logx, ylabel=self.ylabel)\n self.data[ylabel] = getattr(np, self.logy)(self.data[self._ylabel])\n return ylabel\n return None\n\n @property\n def x(self):\n return self._transformed_xlabel or self._xlabel\n\n @property\n def y(self):\n return self._transformed_ylabel or self._ylabel\n\n\nclass DataReader(object):\n\n @staticmethod\n def read_normalized_table(path):\n df = pd.read_csv(path, sep='\\t', index_col=0)\n df.index = [x.split('_')[1] for x in df.index]\n return df\n\n @staticmethod\n def read_deseq2_table(path):\n columns = ['Base mean', 'log2(FC)', 'StdErr', 'Wald-Stats', 'P-value', 'P-adj']\n df = pd.read_csv(path, sep='\\t', header=None, index_col=0).dropna()\n df.columns = columns\n df.index = [x.split('_')[-1] for x in df.index]\n return df\n\n @staticmethod\n def order_by_mean(df, columns=12):\n mean_df = df.loc[:, df.columns[columns:]]\n mean_df = mean_df.reindex(mean_df.mean(axis=1).sort_values(ascending=False).index)\n return mean_df\n"
]
| [
[
"numpy.log10",
"matplotlib.font_manager.FontProperties",
"numpy.log2",
"pandas.read_csv"
]
]
|
wmonteiro92/xmoai | [
"032602a4f6a33f2cc798ff7f7afe5aefcc9b30e7"
]
| [
"xmoai/problems/objectives.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 11 23:58:52 2020\n\n@author: wmonteiro92\n\"\"\"\nimport numpy as np\nimport gower\n\nnum_objectives = 3 # number of objectives used by xMOAI\n\ndef get_difference_target_regression(model, x, y_desired, method='predict'):\n \"\"\"Calculates the objective 1 (f1), where it attempts to minimize the \n unsigned difference between y_desired and the value found by the \n prediction found for the proposed x - i.e. between the target and the \n value found by the subject. Valid only for regression problems.\n\n :param model: a machine learning model\n :type model: Object\n :param x: the individual (or individuals) to be evaluated\n :type x: numpy.array\n :param y_desired: the prediction result targeted\n :type y_desired: numpy.array\n :param method: the method responsible of determining the prediction\n :type method: string, defaults to `predict`\n \n :return: two objects. The first are the objective 1 (f1) values and\n the second are the predicted values related to `x` and found by `model` \n using `method`\n :rtype: np.array (first object) and np.array (second object)\n \"\"\"\n prediction = getattr(model, method)(x)\n return np.abs(prediction - y_desired), prediction\n\ndef get_difference_target_classification_proba(model, x, class_column,\n method='predict_proba'):\n \"\"\"Calculates the objective 1 (f1), where it attempts to maximize the \n probability of the desired class. Valid only for classification problems \n with methods returning the probability estimates for each class.\n\n :param model: a machine learning model\n :param x: the individual (or individuals) to be evaluated\n :type x: numpy.array\n :param class_column: the column index of the prediction class targeted\n :type class_column: Integer\n :param method: the method responsible of determining the prediction\n :type method: string, defaults to `predict_proba`\n \n :return: two objects. The first are the objective 1 (f1) values and\n the second are the predicted values related to `x` and found by `model` \n using `method`\n :rtype: np.array (first object) and np.array (second object)\n \"\"\"\n prediction = getattr(model, method)(x)\n return 1 - prediction[:, class_column], prediction\n\ndef get_difference_target_classification_simple(model, x, y_desired,\n method='predict'):\n \"\"\"Calculates the objective 1 (f1), where it assigns 1 if the predicted \n class differs from the desired class and 0 otherwise. Valid only for \n classification problems with methods returning the predicted class.\n\n :param model: a machine learning model\n :param x: the individual (or individuals) to be evaluated\n :type x: numpy.array\n :param y_desired: the class targeted\n :type y_desired: object\n :param method: the method responsible of determining the prediction\n :type method: string, defaults to `predict`\n \n :return: two objects. The first are the objective 1 (f1) values and\n the second are the predicted values related to `x` and found by `model` \n using `method`\n :rtype: np.array (first object) and np.array (second object)\n \"\"\"\n prediction = getattr(model, method)(x)\n return np.where(prediction==y_desired, 0, 1), prediction\n\ndef get_difference_attributes(x, x_original, categorical_columns):\n \"\"\"Calculates the objective 2 (f2), where it attempts to minimize the \n difference between the modified and original values through the Gower \n distance.\n \n :param x: the individual (or individuals) to be evaluated\n :type x: numpy.array\n :param x_original: the original individual\n :type x_original: numpy.array\n :param categorical_columns: the categorical columns used by the dataset\n :type categorical_columns: dict\n \n :return: the Gower distance between x and x_original\n :rtype: np.array\n \"\"\"\n if categorical_columns==None or len(categorical_columns.keys()) == 0:\n cat_features = np.array([False]*x_original.shape[0])\n else:\n cat_features = np.isin(np.array(range(x_original.shape[0])), \n np.array([x for x in categorical_columns.keys()]))\n \n return gower.gower_matrix(data_x=np.nan_to_num(x, nan=-2**32-1), \n data_y=np.nan_to_num(x_original.reshape(1, -1), nan=-2**32-1),\n cat_features=cat_features).flatten()\n \ndef get_modified_attributes(x, x_original):\n \"\"\"Calculates the objective 3 (f3), where it attempts to minimize the \n number of modified attributes (columns).\n \n :param x: the individual (or individuals) to be evaluated\n :type x: numpy.array\n :param x_original: the original individual\n :type x_original: numpy.array\n \n :return: the number of modified attributes for each one of the solutions \n (rows) provided in x and compared against x_original\n :rtype: np.array\n \"\"\"\n # f3: minimize the number of modified attributes\n return np.count_nonzero(np.nan_to_num(x_original, nan=-2**32-1) - \n np.nan_to_num(x, nan=-2**32-1), axis=1)"
]
| [
[
"numpy.where",
"numpy.array",
"numpy.nan_to_num",
"numpy.abs"
]
]
|
SHARANTANGEDA/CueObserve | [
"c3fbd77058101154ab59fc0e88bf6c02f8e2b446"
]
| [
"api/ops/tasks.py"
]
| [
"import json\nimport traceback\nimport datetime as dt\nimport pandas as pd\nimport html2text\nfrom django.template import Template, Context\nfrom celery import shared_task, group\nfrom celery.result import allow_join_result\n\nfrom anomaly.models import Anomaly, AnomalyDefinition, RunStatus, AnomalyCardTemplate\nfrom anomaly.serializers import AnomalySerializer\nfrom access.data import Data\nfrom access.utils import prepareAnomalyDataframes\nfrom ops.anomalyDetection import anomalyService\nfrom anomaly.services.slack import SlackAlert\n\nANOMALY_DAILY_TEMPLATE = \"Anomaly Daily Template\"\nANOMALY_HOURLY_TEMPLATE= \"Anomaly Hourly Template\"\n\nANOMALY_DETECTION_RUNNING = \"RUNNING\"\nANOMALY_DETECTION_SUCCESS = \"SUCCESS\"\nANOMALY_DETECTION_ERROR = \"ERROR\"\n\n@shared_task\ndef _anomalyDetectionSubTask(anomalyDef_id, dimVal, contriPercent, dfDict):\n \"\"\"\n Internal anomaly detection subtask to be grouped by celery for each anomaly object\n \"\"\"\n anomalyDefinition = AnomalyDefinition.objects.get(id=anomalyDef_id)\n anomalyServiceResult = anomalyService(anomalyDefinition, dimVal, contriPercent, pd.DataFrame(dfDict))\n return anomalyServiceResult\n\n\n@shared_task\ndef anomalyDetectionJob(anomalyDef_id: int, manualRun: bool = False):\n \"\"\"\n Method to find initiate anomaly detection for a given anomaly definition\n :param anomalyDef_id: ID of the anomaly definition\n :param manualRun: Boolean determining whether task was manually initiated\n \"\"\"\n runType = \"Manual\" if manualRun else \"Scheduled\"\n anomalyDefinition = AnomalyDefinition.objects.get(id=anomalyDef_id)\n anomalyDefinition.anomaly_set.update(published=False)\n runStatusObj = RunStatus.objects.create(anomalyDefinition=anomalyDefinition, status=ANOMALY_DETECTION_RUNNING, runType=runType)\n logs = {}\n allTasksSucceeded = False\n try:\n datasetDf = Data.fetchDatasetDataframe(anomalyDefinition.dataset)\n dimValsData = prepareAnomalyDataframes(datasetDf, anomalyDefinition.dataset.timestampColumn, anomalyDefinition.metric, anomalyDefinition.dimension,anomalyDefinition.operation ,int(anomalyDefinition.value))\n detectionJobs = group(\n _anomalyDetectionSubTask.s(anomalyDef_id, obj[\"dimVal\"], obj[\"contriPercent\"], obj[\"df\"].to_dict(\"records\")) for obj in dimValsData\n )\n _detectionJobs = detectionJobs.apply_async()\n with allow_join_result():\n result = _detectionJobs.get()\n Anomaly.objects.filter(id__in=[anomaly[\"anomalyId\"] for anomaly in result if anomaly[\"success\"]]).update(latestRun=runStatusObj)\n logs[\"numAnomaliesPulished\"] = len([anomaly for anomaly in result if anomaly.get(\"published\")])\n logs[\"numAnomalySubtasks\"] = len(_detectionJobs)\n logs[\"log\"] = json.dumps({detection.id: detection.result for detection in _detectionJobs})\n allTasksSucceeded = all([anomalyTask[\"success\"] for anomalyTask in result])\n except Exception as ex:\n logs[\"log\"] = json.dumps({\"stackTrace\": traceback.format_exc(), \"message\": str(ex)})\n runStatusObj.status = ANOMALY_DETECTION_ERROR\n else:\n runStatusObj.status = ANOMALY_DETECTION_SUCCESS\n if not allTasksSucceeded:\n runStatusObj.status = ANOMALY_DETECTION_ERROR\n runStatusObj.logs = logs\n runStatusObj.endTimestamp = dt.datetime.now()\n runStatusObj.save()\n\n # Slack alerts\n title = \"CueObserve Alerts\"\n if runStatusObj.status == ANOMALY_DETECTION_SUCCESS:\n if logs.get(\"numAnomaliesPulished\", 0) > 0:\n message = f\"{logs['numAnomaliesPulished']} anomalies published. \\n\"\n topNtext = f\" Top {anomalyDefinition.value}\" if int(anomalyDefinition.value) > 0 else \"\"\n message = message + f\"Anomaly Definition: {anomalyDefinition.metric} {anomalyDefinition.dimension} {anomalyDefinition.highOrLow}{topNtext} \\n\"\n message = message + f\"Dataset: {anomalyDefinition.dataset.name} | Granularity: {anomalyDefinition.dataset.granularity} \\n \\n\"\n \n highestContriAnomaly = anomalyDefinition.anomaly_set.order_by(\"data__contribution\").last()\n data = AnomalySerializer(highestContriAnomaly).data\n templateName = ANOMALY_DAILY_TEMPLATE if anomalyDefinition.dataset.granularity == \"day\" else ANOMALY_HOURLY_TEMPLATE\n cardTemplate = AnomalyCardTemplate.objects.get(templateName=templateName)\n data.update(data[\"data\"][\"anomalyLatest\"])\n \n details = html2text.html2text(Template(cardTemplate.title).render(Context(data))) + \"\\n\"\n details = details + html2text.html2text(Template(cardTemplate.bodyText).render(Context(data)))\n\n name = \"anomalyAlert\"\n SlackAlert.slackAlertHelper(title, message, name, details=details)\n \n if runStatusObj.status == ANOMALY_DETECTION_ERROR:\n message = \"Anomaly Detection Job failed on AnomalyDefintion id : \" + str(anomalyDef_id) + \"\\n\"\n message = message + str(logs[\"log\"])\n name = \"appAlert\"\n SlackAlert.slackAlertHelper(title, message, name)"
]
| [
[
"pandas.DataFrame"
]
]
|
unnat/active_gcn_ner | [
"eed43687cc09fd6469371b69a5d8f0079f30c012"
]
| [
"train.py"
]
| [
"from gcn_ner import GCNNer\n\nif __name__ == '__main__':\n\t\n\tfrom numpy import genfromtxt\n\timport numpy as np\n\t\n\t# al_length = 3750\n\t# al_list = list(np.random.randint(0,45112,al_length))\n\t# GCNNer.train_and_save(dataset='./data/labeled.conll', saving_dir='./data/unlabeled_50_random', epochs=20, al_args=al_list, load_ckpt=\"./data/unlabeled_50/ner-gcn-9.tf\")\n\n\t# my_data = genfromtxt('unlabeled_50_scores_sorted.csv', delimiter=',')\n\t# al_length = 3750\n\t# al_list = list(my_data[:3750,0].astype(np.int))\n\t# print(\"Total finetuning samples: {}\".format(len(al_list)))\n\t# GCNNer.train_and_save(dataset='./data/labeled.conll', saving_dir='./data/unlabeled_50_uncertain_2', epochs=20, al_args=al_list, load_ckpt=\"./data/unlabeled_50/ner-gcn-9.tf\")\n\n\n\tmy_data = genfromtxt('unlabeled_50_scores_sorted.csv', delimiter=',')\n\tal_length = 3750\n\tal_list = list(my_data[:3750,0].astype(np.int))\n\tal_list.extend(range(45112, 45112+15177))\n\tprint(\"Total finetuning samples: {}\".format(len(al_list)))\n\tGCNNer.train_and_save(dataset='./data/labeled_and_unlabeled_50.conll', saving_dir='./data/unlabeled_50_uncertain_combined', epochs=20, al_args=al_list, load_ckpt=\"./data/unlabeled_50/ner-gcn-9.tf\")\n\n\t# al_length = 3750\n\t# al_list = list(np.random.randint(0,45112,al_length))\n\t# al_list.extend(range(45112, 45112+15177))\n\t# print(\"Total finetuning samples: {}\".format(len(al_list)))\n\t# GCNNer.train_and_save(dataset='./data/labeled_and_unlabeled_50.conll', saving_dir='./data/unlabeled_50_random_combined', epochs=20, al_args=al_list, load_ckpt=\"./data/unlabeled_50/ner-gcn-9.tf\")\n\n\t# my_data = genfromtxt('unlabeled_50_scores_sorted.csv', delimiter=',')\n\t# al_length = 3750\n\t# al_list = list(my_data[:3750,0].astype(np.int))\n\t# al_list.extend(range(45112, 45112+15177))\n\t# print(\"Total finetuning samples: [UC] {}\".format(len(al_list)))\n\t# GCNNer.train_and_save(dataset='./data/labeled_and_unlabeled_50.conll', saving_dir='./data/unlabeled_50_uncertain_combined_scratch', epochs=30, al_args=al_list)\n\n\t# al_length = 3750\n\t# al_list = list(np.random.randint(0,45112,al_length))\n\t# al_list.extend(range(45112, 45112+15177))\n\t# print(\"Total finetuning samples: [Random] {}\".format(len(al_list)))\n\t# GCNNer.train_and_save(dataset='./data/labeled_and_unlabeled_50.conll', saving_dir='./data/unlabeled_50_random_combined_scratch', epochs=30, al_args=al_list)"
]
| [
[
"numpy.genfromtxt"
]
]
|
Fragalli/brFinance | [
"f06d7b148d20d07361c89158837d47225c4fea1f"
]
| [
"brFinance/scraper/cvm/search.py"
]
| [
"import re\nimport time\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import Tuple, Any\n\nimport lxml.html as LH\nimport pandas as pd\nfrom selenium import webdriver\n\nfrom brFinance.utils.browser import Browser\n\n\nclass Search(ABC):\n \"\"\"\n Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx\n \"\"\"\n\n DELAY: int = 1\n cvm_code_df: pd.DataFrame = None\n driver: webdriver = None\n\n def check_cvm_code_exists(self, cod_cvm: int) -> bool:\n \"\"\"Check if CVM code exists\n\n Parameters\n ----------\n cod_cvm : int\n CVM company code\n\n Returns\n -------\n bool\n True if cvm code exist, otherwise False\n \"\"\"\n\n cvm_codes_available = self.get_cvm_codes()\n cvm_code_exists = str(cod_cvm) in [str(cod_cvm_aux) for cod_cvm_aux in cvm_codes_available['codCVM'].values]\n return cvm_code_exists\n\n\n def _instantiate_driver(self) -> webdriver:\n \"\"\"Returns a driver object\n\n Returns\n -------\n selenium.webdriver\n webdriver created for searching\n \"\"\"\n\n if self.driver is None: return Browser.run_chromedriver()\n\n return self.driver\n\n def _fetch_data(self, cvm_code: int, category: int, initial_date: str, final_date: str) -> Tuple[pd.DataFrame, Any]:\n \"\"\"Returns dataframe and html document from search\n\n Parameters\n ----------\n initial_date : str\n Initial date for search\n final_date : str\n Final date for search\n\n Returns\n -------\n pandas.Dataframe\n Dataframe containing search results\n lxml object\n Object containing html data from search\n \"\"\"\n\n driver = self._instantiate_driver()\n\n driver.get(f\"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx?codigoCVM={cvm_code}\")\n\n # Wait until page is loaded and click Period button\n while True:\n try:\n period_button_xpath = \"//html/body/form[1]/div[3]/div/fieldset/div[4]/div[1]/label[4]\"\n #driver.find_element_by_xpath(period_button_xpath).click()\n driver.find_element_by_id(\"rdPeriodo\").click()\n break\n except Exception:\n print(\"[LOG]: Waiting for period button\")\n time.sleep(1)\n\n # Wait until page is loaded and send keys for initial date\n while True:\n try:\n period_init_id = \"txtDataIni\"\n driver.find_element_by_id(period_init_id).send_keys(initial_date)\n break\n except Exception:\n print(\"[LOG]: Waiting for initial date input\")\n time.sleep(1)\n\n # Wait until page is loaded and send keys for end date\n while True:\n try:\n period_end_id = \"txtDataFim\"\n driver.find_element_by_id(period_end_id).send_keys(final_date)\n break\n except Exception:\n print(\"[LOG]: Waiting for final date input\")\n time.sleep(1)\n\n # Wait until page is loaded and click Categories button\n while True:\n try:\n category_button_id = 'cboCategorias_chosen'\n driver.find_element_by_id(category_button_id).click()\n break\n except Exception:\n print(\"[LOG]: Waiting for Categories button\")\n time.sleep(1)\n\n # Wait until page is loaded and select category from user input\n while True:\n try:\n category_selection_xpath = f\"//html/body/form[1]/div[3]/div/fieldset/div[\"\"5]/div[1]/div/div/ul/li[\" \\\n f\"@data-option-array-index='{category}']\"\n driver.find_element_by_xpath(category_selection_xpath).click()\n break\n except Exception:\n print(\"[LOG]: Waiting for category dropdown menu\")\n time.sleep(1)\n\n # Wait until page is loaded and click on Consult button\n while True:\n try:\n consult_button_id = \"btnConsulta\"\n driver.find_element_by_id(consult_button_id).click()\n break\n except Exception:\n print(\"[LOG]: Waiting for consult button\")\n time.sleep(1)\n\n # Wait html table load the results (grdDocumentos)\n while True:\n try:\n table_html = str(driver.find_element_by_id('grdDocumentos').get_attribute(\"outerHTML\"))\n if (\"DFP - Demonstrações Financeiras Padronizadas\" in table_html) or \\\n (\"ITR - Informações Trimestrais\" in table_html):\n break\n except Exception:\n print(\"[LOG]: Waiting for results\")\n time.sleep(1)\n\n \n table = LH.fromstring(table_html)\n df = pd.read_html(table_html)[0]\n\n if self.driver is None: driver.quit()\n\n return df, table\n\n\n def _clean_data(self, cvm_code: int, df_enet_search_result: pd.DataFrame, table: Any) -> pd.DataFrame:\n \"\"\"\n Perform data cleaning and add link to view or download reports documents\n\n Parameters\n ----------\n cvm_code : int\n cvm_code\n df_enet_search_result : DataFrame\n ENET Search dataframe result\n table : HTML string with ENET search table result containing links to download and view the reports.\n\n Returns\n -------\n pandas.Dataframe\n Dataframe containing search cleaned results\n \"\"\"\n\n # Cleaning data for CVM code and reference date\n df_enet_search_result[\"Código CVM\"] = cvm_code\n df_enet_search_result['Data Referência'] = df_enet_search_result['Data Referência'].str.split(' ', 1).str[1]\n df_enet_search_result['Data Referência'] = pd.to_datetime(df_enet_search_result[\"Data Referência\"], format=\"%d/%m/%Y\", errors=\"coerce\")\n\n # Creating a collumn for document visualization link\n link_view = []\n for expression in table.xpath(\"//tr/td/i[1]/@onclick\"):\n link_view.append(\"https://www.rad.cvm.gov.br/ENET/\" + re.findall(\"(?<=\\')(.*?)(?=\\')\", expression)[0])\n\n df_enet_search_result[\"linkView\"] = link_view\n\n # Creating a collumn for document download link\n link_download = []\n for expression in table.xpath(\"//tr/td/i[2]/@onclick\"):\n try:\n data = expression.split(\",\")\n if \"OpenDownloadDocumentos\" in data:\n sequencia, versao, protocolo, tipo = [re.findall(\"(?<=\\')(.*?)(?=\\')\", d)[0] for d in data]\n link_download.append(f\"https://www.rad.cvm.gov.br/ENET/frmDownloadDocumento.aspx?Tela=ext&\"\n f\"numSequencia={sequencia}&\"\n f\"numVersao={versao}&\"\n f\"numProtocolo={protocolo}&\"\n f\"descTipo={tipo}&\"\n f\"CodigoInstituicao=1\")\n else:\n link_download.append(None)\n except IndexError:\n link_download.append(None)\n df_enet_search_result[\"linkDownload\"] = link_download\n\n # Filtering for documents which Status is Active\n df_enet_search_result = df_enet_search_result.drop(df_enet_search_result[df_enet_search_result[\"Status\"] != \"Ativo\"].index)\n\n # Deleting Actions column\n del df_enet_search_result[\"Ações\"]\n\n return df_enet_search_result\n\n def get_cvm_codes(self) -> pd.DataFrame:\n \"\"\"Returns a dataframe of all CVM codes and Company names\n\n Returns\n -------\n pandas.Dataframe\n Dataframe of all CVM codes and company names\n \"\"\"\n if Search.cvm_code_df is not None: return Search.cvm_code_df\n\n driver = self._instantiate_driver()\n\n driver.get(\"https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx\")\n\n # Wait until page is loaded and get all companies data\n while True:\n try:\n companies_result_id = \"hdnEmpresas\"\n html_data = driver.find_element_by_id(companies_result_id).get_attribute(\"value\")\n if len(html_data) == 0:\n continue\n break\n except Exception:\n print(\"[LOG]: Waiting CVM codes\")\n time.sleep(1)\n\n # Selecting company name and CVM code\n list_cod_cvm = re.findall(r\"(?<=_)(.*?)(?=\\')\", html_data)\n list_nome_emp = re.findall(r\"(?<=-)(.*?)(?=\\')\", html_data)\n\n # Adding selected information to a Dataframe\n df = pd.DataFrame(list(zip(list_cod_cvm, list_nome_emp)), columns=['codCVM', 'nome_empresa'])\n df['codCVM'] = pd.to_numeric(df['codCVM'])\n\n Search.cvm_code_df = df\n\n if self.driver is None: driver.quit()\n\n return Search.cvm_code_df\n\n @abstractmethod\n def search(self, cvm_code: int, initial_date: str, final_date: str) -> pd.DataFrame:\n \"\"\"\n Returns dataframe of search results including cod_cvm, report's url, etc.\n\n Parameters\n ----------\n cvm_code : int\n CVM company code\n initial_date: str\n Ex: 01012010 for 01/01/2010\n final_date: str\n Ex 30072021 for 30/07/2021\n\n Returns\n -------\n pandas.Dataframe\n Dataframe of search results\n \"\"\"\n pass\n\n\nclass SearchDFP(Search):\n \"\"\"\n Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx for category\n \"Demonstração Financeira Padronizada\"\n \"\"\"\n\n def __init__(self, driver: webdriver = None):\n \"\"\"\n Parameters\n ----------\n driver : webdriver\n Optional parameter for webdriver created by user\n \"\"\"\n\n self.driver = driver\n self.category = 21\n\n def search(self,\n cvm_code: int,\n initial_date: str = '01012010',\n final_date: str = datetime.today().strftime('%d%m%Y')) -> pd.DataFrame:\n assert self.check_cvm_code_exists(cvm_code), \"CVM code not found\"\n\n df, table = self._fetch_data(cvm_code, self.category, initial_date, final_date)\n \n df = self._clean_data(cvm_code, df, table)\n\n return df\n\n\nclass SearchITR(Search):\n \"\"\"\n Perform webscraping on the page https://www.rad.cvm.gov.br/ENET/frmConsultaExternaCVM.aspx for category\n \"Informações Trimestrais\"\n \"\"\"\n\n def __init__(self, driver: webdriver = None):\n \"\"\"\n Parameters\n ----------\n driver : webdriver\n Optional parameter for webdriver created by user\n \"\"\"\n\n self.driver = driver\n self.category = 39\n\n\n def search(self,\n cvm_code: int,\n initial_date: str = '01012010',\n final_date: str = datetime.today().strftime('%d%m%Y')) -> pd.DataFrame:\n assert self.check_cvm_code_exists(cvm_code), \"CVM code not found\"\n\n df, table = self._fetch_data(cvm_code, self.category, initial_date, final_date)\n \n df = self._clean_data(cvm_code, df, table)\n\n return df\n"
]
| [
[
"pandas.to_datetime",
"pandas.to_numeric",
"pandas.read_html"
]
]
|
ingako/lifelong-ml | [
"a0108502b3e1ba5556a6cf6f1123037900db6427"
]
| [
"eval/mean-stats-eval.py"
]
| [
"#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport math\nimport pandas as pd\nimport numpy as np\nfrom statistics import stdev\n\nclass Config:\n def __init__(self, kappa, ed, reuse_rate=0, reuse_window_size=0, lossy_window=0):\n self.kappa = kappa\n self.ed = ed\n self.reuse_rate = reuse_rate\n self.reuse_window_size = reuse_window_size\n self.lossy_window = lossy_window\n\n\ndef is_empty_file(fpath):\n return False if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else True\n\ndef get_acc(output):\n df = pd.read_csv(output)\n\n accuracy = df[\"accuracy\"]\n acc_mean = np.mean(accuracy) * 100\n acc_std = np.std(accuracy) * 100\n\n return acc_mean # , acc_std\n\ndef get_kappa(output, is_moa=False):\n df = pd.read_csv(output)\n\n kappa = df[\"kappa\"]\n\n is_nans = np.isnan(kappa)\n kappa[is_nans] = 1\n\n kappa_mean = np.mean(kappa) * 100\n kappa_std = np.std(kappa) * 100\n\n return kappa_mean # , kappa_std\n\ndef get_acc_sum(output):\n df = pd.read_csv(output)\n acc_sum = df[\"accuracy\"].sum() * 100\n return acc_sum\n\ndef get_kappa_sum(output, is_moa=False):\n df = pd.read_csv(output)\n\n kappa = df[\"kappa\"]\n is_nans = np.isnan(kappa)\n kappa[is_nans] = 1\n\n kappa_sum = kappa.sum() * 100\n\n return kappa_sum\n\ndef get_mem(output):\n df = pd.read_csv(output)\n return df[\"memory\"].iloc[-1] / 1024\n\ndef get_time(output):\n f = open(output,'r')\n out = f.readlines()\n return float(out[0][:-1]) / 60\n\nbase_dir = os.getcwd()\n\ndataset='agrawal-3'\nkappa=0.2\ned=110\n\n# dataset='agrawal-gradual'\n# kappa=0.2\n# ed=110\n\n# dataset='agrawal-6-gradual'\n# kappa=0.2\n# ed=90\n\ncur_data_dir = f\"{base_dir}/../data/{dataset}\"\n\n# arf results\narf_acc_results = []\narf_kappa_results = []\narf_time_results = []\narf_mem_results = []\narf_acc_sum_results = []\narf_kappa_sum_results = []\n\nfor seed in range(10):\n arf_output = f'{cur_data_dir}/result-{seed}.csv'\n\n arf_acc_results.append(get_acc(arf_output))\n arf_acc_sum_results.append(get_acc_sum(arf_output))\n\n arf_kappa_results.append(get_kappa(arf_output))\n arf_kappa_sum_results.append(get_kappa_sum(arf_output))\n\n arf_time_output = f'{cur_data_dir}/time-{seed}.log'\n if is_empty_file(arf_time_output):\n continue\n arf_time_results.append(get_time(arf_time_output))\n\n# pattern matching results\nsarf_acc_results = []\nsarf_kappa_results = []\nsarf_time_results = []\nsarf_mem_results = []\nsarf_acc_gain_results = []\nsarf_kappa_gain_results = []\n\npattern_matching_dir = f'{cur_data_dir}/k{kappa}-e{ed}/'\nfor seed in range(10):\n sarf_output = f'{pattern_matching_dir}/result-sarf-{seed}.csv'\n\n sarf_mem_results.append(get_mem(sarf_output))\n\n sarf_acc_results.append(get_acc(sarf_output))\n sarf_acc_gain_results.append(get_acc_sum(sarf_output) - arf_acc_sum_results[seed])\n\n sarf_kappa_results.append(get_kappa(sarf_output))\n sarf_kappa_gain_results.append(get_kappa_sum(sarf_output) - arf_kappa_sum_results[seed])\n\n sarf_time_output = f'{pattern_matching_dir}/time-sarf-{seed}.log'\n if is_empty_file(sarf_time_output):\n continue\n sarf_time_results.append(get_time(sarf_time_output))\n\n# pearl results\npearl_acc_results = []\npearl_kappa_results = []\npearl_time_results = []\npearl_mem_results = []\npearl_acc_gain_results = []\npearl_kappa_gain_results = []\n\nfor seed in range(10):\n acc = 0\n kappa = 0\n\n max_acc_gain = -1\n max_kappa_gain = 0\n\n mem = 0\n time = 0\n\n cur_param_dir = ''\n reuse_params = [f for f in os.listdir(pattern_matching_dir) if os.path.isdir(os.path.join(pattern_matching_dir, f))]\n\n for reuse_param in reuse_params:\n\n cur_reuse_param = f\"{pattern_matching_dir}/{reuse_param}\"\n\n lossy_params = \\\n [f for f in os.listdir(cur_reuse_param) if os.path.isdir(os.path.join(cur_reuse_param, f))]\n\n for lossy_param in lossy_params:\n lossy_output = f'{cur_reuse_param}/{lossy_param}/result-parf-{seed}.csv'\n if is_empty_file(lossy_output):\n print(f'file does not exist: {lossy_output}')\n continue\n\n cur_acc_gain = get_acc_sum(lossy_output) - arf_acc_sum_results[seed]\n if max_acc_gain < cur_acc_gain:\n max_acc_gain = cur_acc_gain\n acc = get_acc(lossy_output)\n\n kappa = get_kappa(lossy_output)\n max_kappa_gain = get_kappa_sum(lossy_output) - arf_kappa_sum_results[seed]\n\n mem = get_mem(lossy_output)\n\n time_output = f'{cur_reuse_param}/{lossy_param}/time-parf-{seed}.log'\n if is_empty_file(time_output):\n continue\n time = get_time(time_output)\n\n if max_acc_gain < sarf_acc_gain_results[seed]:\n acc = sarf_acc_results[seed]\n max_acc_gain = sarf_acc_gain_results[seed]\n kappa = sarf_kappa_results[seed]\n max_kappa_gain = sarf_kappa_gain_results[seed]\n mem = sarf_mem_results[seed]\n\n # time_output = f'{cur_reuse_param}/{lossy_param}/time-parf-{seed}.log'\n # if is_empty_file(time_output):\n # continue\n time = sarf_time_results[seed]\n\n pearl_acc_results.append(acc)\n pearl_kappa_results.append(kappa)\n pearl_time_results.append(time)\n pearl_mem_results.append(mem)\n\n pearl_acc_gain_results.append(max_acc_gain)\n pearl_kappa_gain_results.append(max_kappa_gain)\n\n\ndef eval_mean(results):\n result_strs = []\n for result in results:\n mean = sum(result) / len(result)\n std = stdev(result)\n result_strs.append(f'${mean:.2f}\\pm{std:.2f}$')\n print(' & '.join(result_strs))\n\nprint('===============results===============')\nprint('accuracy & kappa avg.')\neval_mean([arf_acc_results, sarf_acc_results, pearl_acc_results,\n arf_kappa_results, sarf_kappa_results, pearl_kappa_results])\nprint('\\n')\n\nprint('accuracy & kappa gain')\neval_mean([sarf_acc_gain_results, pearl_acc_gain_results,\n sarf_kappa_gain_results, pearl_kappa_gain_results])\nprint('\\n')\n\nprint('memory & runtime')\neval_mean([sarf_mem_results, pearl_mem_results,\n arf_time_results, sarf_time_results, pearl_time_results])\nprint('\\n')\n"
]
| [
[
"numpy.std",
"pandas.read_csv",
"numpy.isnan",
"numpy.mean"
]
]
|
pnode-dev/pnode | [
"be42bce4b2e904525c11bcaa952c269e4c8ad45c"
]
| [
"examples/models/sqnxt_PETSc.py"
]
| [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super(BasicBlock, self).__init__()\n reduction = 0.5\n if 2 == stride:\n reduction = 1\n elif in_channels > out_channels:\n reduction = 0.25\n \n self.conv1 = nn.Conv2d(in_channels, int(in_channels * reduction), 1, stride, bias = True)\n self.bn1 = nn.BatchNorm2d(int(in_channels * reduction))\n self.conv2 = nn.Conv2d(int(in_channels * reduction), int(in_channels * reduction * 0.5), 1, 1, bias = True)\n self.bn2 = nn.BatchNorm2d(int(in_channels * reduction * 0.5))\n self.conv3 = nn.Conv2d(int(in_channels * reduction * 0.5), int(in_channels * reduction), (1, 3), 1, (0, 1), bias = True)\n self.bn3 = nn.BatchNorm2d(int(in_channels * reduction))\n self.conv4 = nn.Conv2d(int(in_channels * reduction), int(in_channels * reduction), (3, 1), 1, (1, 0), bias = True)\n self.bn4 = nn.BatchNorm2d(int(in_channels * reduction))\n self.conv5 = nn.Conv2d(int(in_channels * reduction), out_channels, 1, 1, bias = True)\n self.bn5 = nn.BatchNorm2d(out_channels)\n \n self.shortcut = nn.Sequential()\n if 2 == stride or in_channels != out_channels:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, stride, bias = True),\n nn.BatchNorm2d(out_channels)\n )\n \n def forward(self, input):\n output = F.relu(self.bn1(self.conv1(input)))\n output = F.relu(self.bn2(self.conv2(output)))\n output = F.relu(self.bn3(self.conv3(output)))\n output = F.relu(self.bn4(self.conv4(output)))\n output = F.relu(self.bn5(self.conv5(output)))\n output += F.relu(self.shortcut(input))\n output = F.relu(output)\n return output\n\nclass BasicBlock2(nn.Module):\n def __init__(self, dim):\n super(BasicBlock2, self).__init__()\n in_channels = dim\n out_channels = dim\n reduction = 0.5\n stride = 1\n self.nfe = 0\n \n self.conv1 = nn.Conv2d(in_channels, int(in_channels * reduction), 1, stride, bias = True)\n self.bn1 = nn.BatchNorm2d(int(in_channels * reduction))\n self.conv2 = nn.Conv2d(int(in_channels * reduction), int(in_channels * reduction * 0.5), 1, 1, bias = True)\n self.bn2 = nn.BatchNorm2d(int(in_channels * reduction * 0.5))\n self.conv3 = nn.Conv2d(int(in_channels * reduction * 0.5), int(in_channels * reduction), (1, 3), 1, (0, 1), bias = True)\n self.bn3 = nn.BatchNorm2d(int(in_channels * reduction))\n self.conv4 = nn.Conv2d(int(in_channels * reduction), int(in_channels * reduction), (3, 1), 1, (1, 0), bias = True)\n self.bn4 = nn.BatchNorm2d(int(in_channels * reduction))\n self.conv5 = nn.Conv2d(int(in_channels * reduction), out_channels, 1, 1, bias = True)\n self.bn5 = nn.BatchNorm2d(out_channels)\n \n \n def forward(self, t, x):\n self.nfe += 1\n output = F.relu(self.bn1(self.conv1(x)))\n output = F.relu(self.bn2(self.conv2(output)))\n output = F.relu(self.bn3(self.conv3(output)))\n output = F.relu(self.bn4(self.conv4(output)))\n output = F.relu(self.bn5(self.conv5(output)))\n return output\n\nclass SqueezeNext(nn.Module):\n def __init__(self, width_x, blocks, num_classes, ODEBlock_, Train):\n super(SqueezeNext, self).__init__()\n self.in_channels = 64\n self.ODEBlock = ODEBlock_\n \n self.conv1 = nn.Conv2d(3, int(width_x * self.in_channels), 3, 1, 1, bias=True) # For Cifar10\n self.bn1 = nn.BatchNorm2d(int(width_x * self.in_channels))\n self.stage1_1 = self._make_layer1(1, width_x, 32, 1)\n self.stage1_2 = self._make_layer2(blocks[0] - 1, width_x, 32, 1, Train)\n\n self.stage2_1 = self._make_layer1(1, width_x, 64, 2)\n self.stage2_2 = self._make_layer2(blocks[1] - 1, width_x, 64, 1, Train)\n\n self.stage3_1 = self._make_layer1(1, width_x, 128, 2)\n self.stage3_2 = self._make_layer2(blocks[2] - 1, width_x, 128, 1, Train)\n\n self.stage4_1 = self._make_layer1(1, width_x, 256, 2)\n self.stage4_2 = self._make_layer2(blocks[3] - 1, width_x, 256, 1, Train)\n self.conv2 = nn.Conv2d(int(width_x * self.in_channels), int(width_x * 128), 1, 1, bias = True)\n self.bn2 = nn.BatchNorm2d(int(width_x * 128))\n self.linear = nn.Linear(int(width_x * 128), num_classes)\n \n \n # with residual connection mismatch\n def _make_layer1(self, num_block, width_x, out_channels, stride):\n print(\"in_channels = \", self.in_channels)\n strides = [stride] + [1] * (num_block - 1)\n layers = []\n for _stride in strides:\n layers.append(BasicBlock(int(width_x * self.in_channels), int(width_x * out_channels), _stride))\n self.in_channels = out_channels\n return nn.Sequential(*layers)\n \n def _make_layer2(self, num_block, width_x, out_channels, stride, Train):\n print(\"out_channels= \", out_channels)\n print(\"in_channels = \", self.in_channels)\n print(\"num_blocks = \", num_block)\n print(\"width_x = \", width_x)\n print(\"stride = \",stride)\n \n strides = [stride] + [1] * (num_block - 1)\n \n layers = []\n for _stride in strides:\n layers.append(self.ODEBlock(BasicBlock2(int(width_x * self.in_channels)), tuple([out_channels,int(1024/out_channels),int(1024/out_channels)]),Train ))\n self.in_channels = out_channels\n \n return nn.Sequential(*layers)\n \n def forward(self, input):\n output = F.relu(self.bn1(self.conv1(input)))\n output = self.stage1_1(output)\n output = self.stage1_2(output)\n output = self.stage2_1(output)\n output = self.stage2_2(output)\n output = self.stage3_1(output)\n output = self.stage3_2(output)\n output = self.stage4_1(output)\n output = self.stage4_2(output)\n output = F.relu(self.bn2(self.conv2(output)))\n output = F.avg_pool2d(output, 4)\n output = output.view(output.size(0), -1)\n output = self.linear(output)\n return output\n \ndef SqNxt_23_1x(num_classes, ODEBlock, Train):\n return SqueezeNext(1.0, [2, 2, 2, 2], num_classes, ODEBlock, Train)\ndef lr_schedule(lr, epoch):\n optim_factor = 0\n if epoch > 250:\n optim_factor = 2\n elif epoch > 150:\n optim_factor = 1\n return lr / math.pow(10, (optim_factor))\n\n"
]
| [
[
"torch.nn.functional.avg_pool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.functional.relu"
]
]
|
yueqiw/attentive_clustering_processes | [
"3925eebbbce5b17f86805836b720c7bb518af9f4"
]
| [
"acp/encoders/mog_encoder.py"
]
| [
"\nimport torch\nimport torch.nn as nn\n\n\ndef get_mog_encoder(params):\n return MOG_Encoder(\n in_dim=params['x_dim'],\n out_dim=params['e_dim'],\n H_dim=params['H_dim'])\n\n\nclass MOG_Encoder(nn.Module):\n\n def __init__(self, in_dim, out_dim, H_dim):\n super().__init__()\n\n H = H_dim\n self.h = torch.nn.Sequential(\n torch.nn.Linear(in_dim, H),\n torch.nn.PReLU(),\n torch.nn.Linear(H, H),\n torch.nn.PReLU(),\n torch.nn.Linear(H, H),\n torch.nn.PReLU(),\n torch.nn.Linear(H, H),\n torch.nn.PReLU(),\n torch.nn.Linear(H, out_dim),\n )\n\n def forward(self, x):\n return self.h(x)\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.PReLU"
]
]
|
anesh-ml/Document-and-ML-track-tool | [
"9b176985b0a5513a1e34ca8ff112e20629346470"
]
| [
"ml_track_tool/summary_plots.py"
]
| [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport plotly\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score,accuracy_score,f1_score,precision_score,recall_score\nimport glob\n\n\n# In[2]:\n\n\ndef get_exp_folders(exp_path):\n experiments=glob.glob(exp_path+\"/*\")\n experiments=list(map(lambda x:x.replace(\"\\\\\",\"/\"),experiments))\n experiments_list=list(map(lambda x:x.split(\"/\")[-1],experiments))\n experiments_list_=[]\n experiments=[]\n for e in experiments_list:\n if not(\".\" in e):\n experiments.append(f\"{exp_path}/{e}\")\n experiments_list_.append(e)\n return experiments,experiments_list_\n\n\n# In[64]:\n\n\ndef get_metrics(exp_path):\n metrics_exp={}\n experiments,experiments_list_=get_exp_folders(exp_path)\n for i,exp in enumerate(experiments):\n prediction_path=f\"{exp}/prediction/prediction.json\"\n if os.path.exists(prediction_path):\n metrics_exp[experiments_list_[i]]={}\n pred_file = open(prediction_path, \"r\")\n pred_dict=json.load(pred_file)\n pred_file.close()\n y_true=np.array(pred_dict['y_true'])\n y_pred_proba=np.array(pred_dict['y_pred'])\n if(np.ndim(y_true)>1):\n y_true=np.argmax(y_true,axis=1)\n y_pred=np.argmax(y_pred_proba,axis=1)\n accuracy=accuracy_score(y_true,y_pred)*100\n f1score=f1_score(y_true,y_pred)*100\n precision=precision_score(y_true,y_pred)*100\n recall=recall_score(y_true,y_pred)*100\n metrics=['accuracy','f1score','precision','recall']\n for metric in metrics:\n metrics_exp[experiments_list_[i]][metric]=round(eval(metric),3)\n else:\n y_pred=np.round(y_pred_proba)\n accuracy=accuracy_score(y_true,y_pred)\n f1score=f1_score(y_true,y_pred)\n precision=precision_score(y_true,y_pred)\n recall=recall_score(y_true,y_pred)\n metrics=['accuracy','f1score','precision','recall']\n for metric in metrics:\n metrics_exp[experiments_list_[i]]['accuracy']=round(eval(metric),3)\n return metrics_exp \n\n\n# In[65]:\n\n\ndef get_memory(exp_path):\n memory_exp={}\n experiments,experiments_list_=get_exp_folders(exp_path)\n for i,exp in enumerate(experiments):\n memory_path=f\"{exp}/memory_info/memory_metrics.json\"\n if os.path.exists(memory_path):\n memory_exp[experiments_list_[i]]={}\n memory_file = open(memory_path, \"r\")\n memory_dict=json.load(memory_file)\n memory_exp[experiments_list_[i]]['ram']=memory_dict[\"max_ram_consumption\"]\n memory_exp[experiments_list_[i]]['gpu']=memory_dict[\"max_gpu_consumption\"]\n memory_exp[experiments_list_[i]]['execution_time']=memory_dict[\"execution_time\"]\n memory_file.close()\n return memory_exp\n\n\n# In[66]:\n\n\ndef plot_summary(exp_path):\n metrics_exp=get_metrics(exp_path)\n memory_exp=get_memory(exp_path)\n experiment_names_metrics = list(metrics_exp.keys())\n figs=[]\n if len(experiment_names_metrics)>0:\n fig1 = go.Figure()\n\n fig1.add_trace(go.Scatter(\n x=[metrics_exp[exp]['accuracy'] for exp in experiment_names_metrics],\n y=experiment_names_metrics,\n marker=dict(color=\"crimson\", size=12),\n mode=\"markers\",\n name=\"accuracy\",\n ))\n\n fig1.add_trace(go.Scatter(\n x=[metrics_exp[exp]['f1score'] for exp in experiment_names_metrics],\n y=experiment_names_metrics,\n marker=dict(color=\"gold\", size=12),\n mode=\"markers\",\n name=\"f1 score\"\n ))\n fig1.add_trace(go.Scatter(\n x=[metrics_exp[exp]['precision'] for exp in experiment_names_metrics],\n y=experiment_names_metrics,\n marker=dict(color=\"blue\", size=12),\n mode=\"markers\",\n name=\"precision\"\n ))\n fig1.add_trace(go.Scatter(\n x=[metrics_exp[exp]['recall'] for exp in experiment_names_metrics],\n y=experiment_names_metrics,\n marker=dict(color=\"purple\", size=12),\n mode=\"markers\",\n name=\"recall\"\n ))\n update_layout_(fig1,\"Performance comparison\",\"Percentage\",\"Experiments\")\n figs.append(fig1)\n experiment_names_memory = list(memory_exp.keys())\n if len(experiment_names_memory)>0:\n fig2 = go.Figure(data=[\n go.Bar(name='gpu', x=[memory_exp[exp]['gpu'] for exp in experiment_names_memory], y=experiment_names_memory,orientation='h'),\n go.Bar(name='ram', x=[memory_exp[exp]['ram'] for exp in experiment_names_memory], y=experiment_names_memory,orientation='h'), \n ])\n fig3 = go.Figure(data=[\n go.Bar(name='execution time', x=[memory_exp[exp]['execution_time'] for exp in experiment_names_memory], y=experiment_names_memory,orientation='h')\n\n ])\n # Change the bar mode\n fig2.update_layout(barmode='group')\n update_layout_(fig2,\"Memory Consumption\",\"memory(MB)\",\"Experiments\")\n update_layout_(fig3,\"Total execution time\",\"time(seconds)\",\"Experiments\")\n figs.append(fig2)\n figs.append(fig3)\n return figs\n\ndef update_layout_(fig_,title,xaxis_title,yaxis_title):\n fig_.update_layout(\n title=title,\n xaxis_title=xaxis_title,\n yaxis_title=yaxis_title,\n font=dict(\n family=\"times new roman\",\n size=16,\n color=\"black\"\n )\n)\n\n"
]
| [
[
"numpy.array",
"numpy.round",
"sklearn.metrics.accuracy_score",
"numpy.argmax",
"numpy.ndim",
"sklearn.metrics.precision_score",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score"
]
]
|
Marcel-Busschers/former | [
"5380fad4c0890503188e01f9b2cbd06fdb33a7af"
]
| [
"experiments/latent.py"
]
| [
"from argparse import ArgumentParser\n\nfrom _context import former\nfrom former import TransformerVAE\nfrom generate import load_financial_data, batchByTokens, pad, splitArray\n\nfrom sklearn.manifold import TSNE\n\nimport torch\nimport os\nimport seaborn as sns\nimport pandas as pd\nfrom fpdf import FPDF\n\ndef go(arg):\n if arg.sequence is None: exit()\n assert arg.model_path is not None, 'Need a path to the model (.pt file)'\n model_pt = torch.load(arg.model_path)\n date_path = f'former/runs/{arg.model_path.split(\"/\")[2]}/latent_representations'\n if not os.path.exists(date_path):\n os.mkdir(date_path)\n\n # Create model\n model = TransformerVAE(\n emb=model_pt['emb'], \n heads=model_pt['heads'], \n depth=model_pt['depth'], \n seq_length=model_pt['seq_length'], \n num_tokens=model_pt['num_tokens'], \n attention_type=model_pt['attention_type'],\n dropoutProb=model_pt['dropoutProb'],\n latentSize=model_pt['latentSize'],\n wordDropout=model_pt['wordDropout'])\n\n # Load in model\n model.load_state_dict(model_pt['model_state_dict'])\n\n # Load in data\n data = load_financial_data('former/data/EURUSD240.csv')\n data = batchByTokens(data, batchSize=256)\n\n emb = model_pt['latentSize']\n\n with torch.no_grad():\n outputs = []\n for index, batch in enumerate(data):\n input = pad(batch)\n output = model.encoder(input)['output']\n output = output[:, :emb] # mean\n outputs.append(output)\n z = torch.cat(outputs, dim=0)\n\n # Visualise\n tsne = TSNE(learning_rate=10)\n z_2d = tsne.fit_transform(z)\n\n # Make pdf with plot\n pdf = FPDF()\n pngs = []\n\n if arg.sequence[0] == 0:\n seq = range(1,365,1)\n description = 'all'\n else:\n seq = arg.sequence\n description = arg.description\n assert description is not None, 'Please give a description using argument -d'\n\n for sequence in seq:\n assert sequence > 0, 'Sequence number must be greater than 0'\n pdf.add_page()\n\n d = {'x': z_2d[:,0], 'y': z_2d[:,1]}\n df = pd.DataFrame(d)\n seq = df.iloc[sequence-1]\n seq = pd.DataFrame({'x': [seq['x']], 'y': [seq['y']]})\n df = df.drop(sequence-1)\n\n # plot = sns.scatterplot(x=z_2d[:,0], y=z_2d[:,1]).figure\n plot = sns.scatterplot(x='x', y='y', data=df, alpha=.5).figure\n plot = sns.scatterplot(x='x', y='y', data=seq, palette='red').set_title(f'Sequence {sequence}').figure\n png_path = f'{date_path}/latent{sequence}.png'\n pngs.append(png_path)\n plot.savefig(png_path)\n pdf.image(png_path, w=150)\n plot.clf()\n\n index = 0\n s = None\n for batch in data:\n for seq in batch:\n index += 1\n if index == sequence: \n s = seq\n break\n \n plot = sns.lineplot(x=range(len(s)), y=s).set_title(f'Sequence {sequence} market').figure\n png_path = f'{date_path}/market{sequence}.png'\n pngs.append(png_path)\n plot.savefig(png_path)\n pdf.image(png_path, w=150)\n plot.clf()\n\n\n # Save pdf\n i = 0\n path = f\"{date_path}/{description}.pdf\"\n while os.path.exists(path):\n i+=1\n path = f\"{date_path}/{description}-{i}.pdf\"\n pdf.output(path, \"F\") # Save File\n for path in pngs:\n os.remove(path)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n \n parser.add_argument(\"--model-dir\",\n dest=\"model_path\",\n help=\"Relative Directory Path where the .pt file is stored\",\n default=None, type=str)\n\n parser.add_argument(\"--from-sequence\",\n dest=\"sequence\",\n help=\"The sequence you want highlighted in the plot\",\n default=None, nargs='+', type=int)\n\n parser.add_argument(\"-d\",\n dest=\"description\",\n help=\"What type of sequence it is (Uptrend, etc)\",\n default=None, type=str)\n\n options = parser.parse_args()\n\n go(options)"
]
| [
[
"torch.cat",
"pandas.DataFrame",
"torch.no_grad",
"sklearn.manifold.TSNE",
"torch.load"
]
]
|
karlnapf/kernel_goodness_of_fit | [
"b76ad54481475df5f061615b0922dec812d48eda"
]
| [
"density_estimation/increasing_features_fixed_test_plot.py"
]
| [
"import os\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom tools.latex_plot_init import plt\n\n\nfname = \"increasing_features_fixed_test.txt\"\n\nfields = ['p_value']\nfield_plot_names = {\n 'p_value': 'p-value',\n 'm': r'$m$'\n }\ndef kwargs_gen(**kwargs):\n return kwargs\n\nconditions = kwargs_gen(\n D=1,\n N_test=500,\n N_fit=50000,\n num_bootstrap=200,\n sigma=1,\n lmbda=0.01,\n )\n\n# x-axis of plot\nx_field = 'm'\nx_field_values = [5, 10, 50, 100, 500, 2000, 5000]\n\ndf = pd.read_csv(fname, index_col=0)\n\nfor field in fields:\n plt.figure()\n \n # filter out desired entries\n mask = (df[field] == df[field])\n for k, v in conditions.items():\n mask &= (df[k] == v)\n current = df.loc[mask]\n \n # only use desired values of x_fields\n current = current.loc[[True if x in x_field_values else False for x in current[x_field]]]\n\n # use ints on x-axis\n current[x_field] = current[x_field].astype(int)\n\n sns.set_style(\"whitegrid\")\n sns.boxplot(x=x_field, y=field, data=current.sort(x_field))\n\n plt.xlabel(field_plot_names[x_field])\n plt.ylabel(field_plot_names[field])\n \n plt.tight_layout()\n\n fname_base = os.path.splitext(fname)[0]\n plt.savefig(fname_base + \".png\", bbox_inches='tight')\n plt.savefig(fname_base + \".eps\", bbox_inches='tight')\n \n # print info on number of trials\n print(field)\n print(\"Average number of trials: %d\" % int(np.round(current.groupby(x_field).apply(len).mean())))\n print(current.groupby(x_field).apply(len))\n \nplt.show()\n"
]
| [
[
"pandas.read_csv"
]
]
|
TxusLopez/curie | [
"84e71916f05448a415661776491ba74127e6f913"
]
| [
"CA_VonNeumann_estimator.py"
]
| [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 2 10:38:41 2019\n\n@author: txuslopez\n\"\"\" \n\nfrom sklearn.base import BaseEstimator\nfrom copy import deepcopy\nfrom sklearn.base import clone\nfrom collections import deque\nfrom functools import reduce\n\nimport numpy as np\nimport operator\nimport random\n\n############################## FUNCTIONS ##############################\ndef empties(b):\n invB = np.flip(b, axis=0)\n empty = []\n for b in invB:\n build = deepcopy(empty)\n empty = []\n for i in range(0,b):\n empty.append(build)\n\n return np.array(empty).tolist()\n\n# Increment index counter function\n# Needed because number of dimensions is unknown\ndef increment(cntr, dims):\n\n cntr[cntr.shape[0]-1] += 1\n zeros = False\n if np.where(dims-cntr == 0)[0] != 0: zeros = True\n while zeros:\n idx = np.where(dims-cntr == 0)[0][0]\n cntr[idx] = 0\n cntr[idx-1] += 1\n if np.where(dims-cntr == 0)[0] != 0: zeros = True\n else: zeros = False\n \n return cntr\n\n############################## CLASSES ##############################\n\nclass cell():\n \n def __init__(self, s):\n self.species = s\n \nclass CA_VonNeumann_Classifier(BaseEstimator):\n \n def __init__(self, bins_margin=0.1, bins = [], dimensions=[3,3], cells=empties([3,3])):\n self.bins = bins\n self.dimensions = dimensions\n self.cells = cells\n self.bins_margin=bins_margin\n\n def fit(self, data, classes):#,bins_margin\n\n # dimension variables\n dims = np.array(self.dimensions)\n n = len(self.dimensions)\n limits=[]\n\n # Creation of bins\n for j in range(0,n):\n \n min_dat = np.min(data[:, j]) - self.bins_margin*(np.max(data[:, j])-np.min(data[:, j]))\n max_dat = np.max(data[:, j]) + self.bins_margin*(np.max(data[:, j])-np.min(data[:, j]))\n delta = (max_dat-min_dat)/dims[j]\n \n self.bins.append(np.arange(min_dat, max_dat, delta)+delta) \n limits.append([np.min(data[:, j]),np.max(data[:, j])])\n \n # Sorting of data into bins\n for i, r in enumerate(data): \n idxs = []\n for j, c in enumerate(r):\n idxs.append(np.argmax(c <= self.bins[j]))\n \n self.plant(idxs, cell(classes[i]))\n \n # Competition step needed to ensure there is only one cell per bin\n # Species with max cells in neighborhood is given control of the bin\n self.contest()\n \n #Se comprueba que después de evolucionar no queden celdas vacias. Si es así se evoluciona de nuevo hasta que no queden vacías\n empties_VN=True\n while empties_VN:\n self,abun_VN,empties_VN = self.evolve() \n\n return self,limits\n\n def partial_fit(self, data, classes,s,limits_VN):#,bins_margin\n \n dims = np.array(self.dimensions)\n n = len(self.dimensions)\n new_bin=[]\n \n #Bins updating\n for j in range(0,n):\n if data[0][j]<limits_VN[j][0]:\n minim=data[0][j]\n min_dat = minim - self.bins_margin*(limits_VN[j][1]-minim)\n else:\n minim=limits_VN[j][0]\n min_dat = minim - self.bins_margin*(limits_VN[j][1]-minim)\n\n limits_VN[j][0]=minim\n\n if limits_VN[j][1]<data[0][j]:\n maxim=data[0][j]\n max_dat = maxim + self.bins_margin*(maxim-limits_VN[j][0])\n else:\n maxim=limits_VN[j][1]\n max_dat = maxim + self.bins_margin*(maxim-limits_VN[j][0])\n\n limits_VN[j][1]=maxim\n\n delta = (max_dat-min_dat)/dims[j]\n new_bin.append(np.arange(min_dat, max_dat, delta)+delta)\n \n ''' \n if len(new_bin[0])>n:\n print('len(new_bin[0]):',len(new_bin[0]))\n print('n:',n)\n print('Entra 1')\n new_bin[0]=new_bin[0][:-1]\n if len(new_bin[1])>n:\n print('len(new_bin[1]):',len(new_bin[1]))\n print('n:',n)\n print('Entra 2')\n new_bin[1]=new_bin[1][:-1]\n '''\n \n self.bins=new_bin\n \n muta=False\n # Sorting of data into bins\n for i, r in enumerate(data):\n\n idxs = []\n for j, c in enumerate(r):\n idxs.append(np.argmax(c <= self.bins[j])) \n \n cel = self.get_cell(self.cells, idxs)\n\n #Se considera que no hay evolucion al hacer fit, y entonces pueden quedar celdas vacias\n if len(cel)==0:\n self.plant(idxs, cell(classes[i])) \n else:\n if cel[0].species!=classes[i]: \n muta=True \n# print('Muta en :',s)\n '''\n #SOLO SI EL 50p DE LOS VECINOS TIENEN UNA ETIQUETA DIFERENTE SE SUBSTITUYE\n d=self.getSimpleCA(self)\n vecindad=self.get_VNneighbourhood(d,idxs,1)\n vecindad_vote=sum(vecindad)\n \n if vecindad_vote>=2: \n self.substitute(idxs, cell(classes[i]))\n '''\n \n self.substitute(idxs, cell(classes[i]))\n# \n# #Después de actualizar la celda evolucionamos segun VN \n# self,abun_VN,empties_VN = self.evolve() \n \n #Evolucion local\n# coords_vecindad_Moore=self.get_neighboursMoore(np.array(idxs), exclude_p=True, shape=tuple(dims))\n# for k,c in enumerate(coords_vecindad_Moore):\n# principal_cel = self.get_cell(self.cells, c)\n# coords_vecindadlocal_Moore=self.get_neighboursMoore(np.array(c), exclude_p=False, shape=tuple(dims))\n# st=0\n# for h,nc in enumerate(coords_vecindadlocal_Moore): \n# ad_cel = self.get_cell(self.cells, nc)\n# st+=ad_cel[0].species\n# \n# new_state=0.0\n# if st>len(coords_vecindadlocal_Moore): \n# new_state=1.0\n# \n# if principal_cel[0].species!=new_state:\n# self.substitute(np.array(c), cell(new_state))\n \n \n \n return self,limits_VN,muta,idxs\n\n def predict(self, data):\n \n ness = []\n for i, r in enumerate(data):\n idxs = []\n for j, c in enumerate(r):\n idxs.append(np.argmax(c <= self.bins[j]))\n\n paula = self.get_cell(self.cells, idxs)\n if paula:\n ness.append(paula[0].species)\n else:\n print(\"ERROR: VN Grid is not full, some observations have no mapping.\")\n ness.append(-1)\n #COMO HEMOS HECHO QUE PUEDAN QUEDAR CELDAS VACIAS AL INICIALIZAR, HABRA SAMPLES SIN PREDICCION\n# ness.append(np.nan)\n \n return np.array(ness)\n \n def evolve(self):\n \n# print('VN evolving')\n # Create dictionary to track cell totals\n abundance = dict()\n \n # Iteration variables\n n = len(self.dimensions)\n c0 = deepcopy(self.cells)\n dims = np.array(self.dimensions)\n frmnt = np.zeros(dims.shape).astype(int)\n \n # Loop through every cell\n done = False\n empties=False\n \n while not done:\n \n # Get cell array for the current index - check if empty\n cel = self.get_cell(c0, frmnt)\n \n if cel:\n \n # Update the abundances\n if cel[0].species in abundance:\n abundance[cel[0].species] += 1\n else:\n abundance[cel[0].species] = 1\n \n # Add cells to von Neumann neighbors\n for d in range(0,dims.shape[0]):\n if frmnt[d] > 0:\n \n instance = deepcopy(frmnt)\n instance[d] -= 1\n \n if not self.get_cell(c0, instance):\n self.plant(instance, cell(cel[0].species))\n \n if frmnt[d] < dims[d]-1:\n instance = deepcopy(frmnt)\n instance[d] += 1\n \n if not self.get_cell(c0, instance):\n self.plant(instance, cell(cel[0].species))\n \n else:\n empties=True\n \n # Increment cell index\n frmnt = increment(frmnt, dims)\n \n if 0 in np.where(dims-frmnt == 0)[0]: \n done = True\n \n \n # Competition step needed to ensure there is only one cell per bin\n # Species with max cells in neighborhood is given control of the bin\n self.contest()\n \n # Return new cell totals\n return self,abundance,empties\n\n # Competition function - ensure one cell per space on the grid\n def contest(self):\n \n # Iteration variables\n dims = np.array(self.dimensions)\n cmpt = np.zeros(dims.shape).astype(int)\n \n # Loop through all cells\n done = False\n while not done:\n # Check if cell array empty\n cel = self.get_cell(self.cells, cmpt)\n if cel:\n \n # Calculate species with maximum cell count\n competitors = dict()\n for c in cel:\n if c.species in competitors:\n competitors[c.species] += 1\n else:\n competitors[c.species] = 1\n \n cel[:] = []\n \n # Cell becomes the resulting species\n spec = max(competitors.items(), key=operator.itemgetter(1))[0] \n cel.append(cell(spec))\n \n # Increment the index\n cmpt = increment(cmpt, dims)\n if 0 in np.where(dims-cmpt == 0)[0]: done = True\n \n return self\n \n # Function to add cells to cell array at a given index\n # Catch common location error\n def plant(self, pos, daughter):\n \n location = self.cells\n for p in pos:\n try:\n location = location[p]\n except:\n print(\"Location Error - Please Restart Simulation\")\n quit()\n \n location.append(daughter)\n \n# self.cells[pos[0]][pos[1]].append(daughter)\n\n def substitute(self, pos, new_candidate):\n# self.cells[pos[0]][pos[1]]=[new_candidate]\n\n location = self.cells\n for p in pos:\n try:\n location = location[p]\n except:\n print(\"Location Error - Please Restart Simulation\")\n quit()\n\n location[0]=new_candidate\n \n # Function to get cell array at a given index\n def get_cell(self, c0, pos):\n\n c = c0\n for i in pos:\n c = c[i]\n \n return c \n \n def score(self, X, y=None):\n # counts number of values bigger than mean\n return print('Scoring should be implemented in case of need')\n \n def get_params(self, deep=True):\n return {'dimensions': self.dimensions, 'cells': self.cells, 'bins': self.bins, 'bins_margin': self.bins_margin}\n \n def set_params(self, **parameters):\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n return self \n \n def get_abundance(self):\n \n abundance = dict()\n dims = np.array(self.dimensions)\n c0 = deepcopy(self.cells)\n frmnt = np.zeros(dims.shape).astype(int)\n done = False\n \n while not done:\n \n # Get cell array for the current index - check if empty\n cel = self.get_cell(c0, frmnt)\n \n if cel:\n # Update the abundances\n if cel[0].species in abundance:\n abundance[cel[0].species] += 1\n else:\n abundance[cel[0].species] = 1\n \n # Increment cell index\n frmnt = increment(frmnt, dims)\n \n if 0 in np.where(dims-frmnt == 0)[0]: \n done = True\n \n return abundance\n \n def get_VNneighbourhood(self,matrix, coordinates, distance):\n '''\n Se obtienen los estados de las celdas adyacentes\n '''\n dimensions = len(coordinates)\n neigh = []\n app = neigh.append\n \n def recc_von_neumann(arr, curr_dim=0, remaining_distance=distance, isCenter=True):\n #the breaking statement of the recursion\n if curr_dim == dimensions:\n if not isCenter:\n app(arr)\n return\n \n dimensions_coordinate = coordinates[curr_dim]\n \n if not (0 <= dimensions_coordinate < len(arr)):\n return \n \n dimesion_span = range(dimensions_coordinate - remaining_distance, dimensions_coordinate + remaining_distance + 1)\n \n for c in dimesion_span:\n\n if 0 <= c < len(arr):\n recc_von_neumann(arr[c], curr_dim + 1, remaining_distance - abs(dimensions_coordinate - c), isCenter and dimensions_coordinate == c)\n return\n \n recc_von_neumann(matrix)\n \n return neigh\n \n def get_neighboursMoore(self,p, exclude_p=True, shape=None):\n '''\n Se obtienen las coordenadas de las celdas adyacentes.\n https://stackoverflow.com/questions/34905274/how-to-find-the-neighbors-of-a-cell-in-an-ndarray/34908879#34908879\n '''\n\n ndim = len(p)\n \n # generate an (m, ndims) array containing all strings over the alphabet {0, 1, 2}:\n offset_idx = np.indices((3,) * ndim).reshape(ndim, -1).T\n \n # use these to index into np.array([-1, 0, 1]) to get offsets\n offsets = np.r_[-1, 0, 1].take(offset_idx)\n \n # optional: exclude offsets of 0, 0, ..., 0 (i.e. p itself)\n if exclude_p:\n offsets = offsets[np.any(offsets, 1)]\n \n neighbours = p + offsets # apply offsets to p\n \n # optional: exclude out-of-bounds indices\n if shape is not None:\n valid = np.all((neighbours < np.array(shape)) & (neighbours >= 0), axis=1)\n neighbours = neighbours[valid]\n\n return neighbours\n \n def getSimpleCA(self,cellular_aut):\n \n dim=cellular_aut.dimensions\n # Create image arrays\n arr = deepcopy(empties(dim))\n # Set variables to model results\n cells = cellular_aut.cells\n \n if len(dim)==2:\n \n for i in range(0, len(cells)):\n for j in range(0, len(cells)):\n \n if len(cells[i][j])==0:\n arr[i][j] = np.nan \n else:\n s = cells[i][j][0].species\n arr[i][j] = s\n\n if len(dim)==3:\n \n for i in range(0, len(cells)):\n for j in range(0, len(cells)):\n for k in range(0, len(cells)):\n \n if len(cells[i][j][k])==0:\n arr[i][j][k] = np.nan \n else:\n s = cells[i][j][k][0].species\n arr[i][j][k] = s\n\n if len(dim)==4:\n \n for i in range(0, len(cells)):\n for j in range(0, len(cells)):\n for k in range(0, len(cells)):\n for l in range(0, len(cells)):\n \n if len(cells[i][j][k][l])==0:\n arr[i][j][k][l] = np.nan \n else:\n s = cells[i][j][k][l][0].species\n arr[i][j][k][l] = s\n \n #AMPLIAR SI HAY DATASETS DE MAS DE 4 DIMENSIONES\n\n \n return arr \n "
]
| [
[
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.min",
"numpy.where",
"numpy.any",
"numpy.arange",
"numpy.argmax",
"numpy.indices",
"numpy.flip"
]
]
|
YZ-Xie/caltech-ee148-spring2020-hw02 | [
"26317707cac0f96250c45544aefbf31ad327cee6"
]
| [
"visualize.py"
]
| [
"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\n\r\ndef visualize_box(box, image, thick = 5):\r\n\r\n '''\r\n Visualize the bounding boxes with thick green sides\r\n Code for Q5 and Q6\r\n\r\n box: the bounding box coordinates, list of np arrays\r\n image: the 3D np array representing the original image (480 x 640 x 3)\r\n thick: how thick (how many pixels) the frame is\r\n\r\n This function returns a 3D np array representing the new image with bounding boxes\r\n '''\r\n height = image.shape[0]\r\n width = image.shape[1]\r\n I_new = np.copy(image)\r\n t = max(box[0]-thick,0)\r\n b = min(box[2]+thick,height)\r\n l = max(box[1]-thick,0)\r\n r = min(box[3]+thick,width)\r\n for h in range(t,box[0]):\r\n I_new[h, l:r, 0] = 0\r\n I_new[h, l:r, 1] = 255\r\n I_new[h, l:r, 2] = 0\r\n for h in range(box[0],box[2]):\r\n I_new[h, l:box[1], 0] = 0\r\n I_new[h, l:box[1], 1] = 255\r\n I_new[h, l:box[1], 2] = 0\r\n I_new[h, box[3]:r, 0] = 0\r\n I_new[h, box[3]:r, 1] = 255\r\n I_new[h, box[3]:r, 2] = 0\r\n for h in range(box[2],b):\r\n I_new[h, l:r, 0] = 0\r\n I_new[h, l:r, 1] = 255\r\n I_new[h, l:r, 2] = 0\r\n return I_new\r\n\r\ndef visualize(I, address = '../data/images/test/yea.jpg'):\r\n image = Image.fromarray(I)\r\n image.save(address)\r\n return"
]
| [
[
"numpy.copy"
]
]
|
Datajacker/Disaster_Response_Web_App | [
"651a26089eb631b3e213cc4d34230fd6fec3cfae"
]
| [
"app/run.py"
]
| [
"import json\nimport plotly\nimport pandas as pd\nimport re\n\n\nfrom nltk.tokenize import word_tokenize, RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer, PorterStemmer\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///./data/DisasterResponse.db')\ndf = pd.read_sql_table('DisasterResponse', engine)\n\n# load model\nmodel = joblib.load(\"./models/classifier.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\[email protected]('/')\[email protected]('/index')\ndef index():\n \n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n category_names = df.iloc[:,4:].columns\n category_bool = (df.iloc[:,4:] != 0).sum().values\n \n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graphs = [\n # first graph\n {\n 'data': [\n line(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n \n # second graph\n {\n 'data': [\n Bar(\n x= category_names,\n y= category_bool\n )\n ],\n 'layout': {\n 'title': 'Distribution of Categories across Messages',\n 'yaxis':{\n 'title':\"Count\"\n }, \n 'xaxis': {\n 'title':\"Categories\"\n }\n }\n }\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\[email protected]('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()"
]
| [
[
"sklearn.externals.joblib.load",
"pandas.read_sql_table"
]
]
|
phaustin/e211_lib | [
"fb19ec28743e5dfe19a1ad9c14beeea2eab9de69"
]
| [
"src/e211_lib/e211.py"
]
| [
"# %%\n'''\ne211 Function library\n\nauthor: Andrew Loeppky\ncourse: eosc 211 - computer methods for earth, ocean and atmospheric scientists\n''';\n\n# %%\nfrom PIL import Image\nimport numpy as np\nfrom scipy.io import loadmat\nimport datetime\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport arrow\n\n\n# %%\ndef mtime_to_arrow(mtime):\n \"\"\"\n convert matlab datenum values to datetime (intermediate) to arrow date objects\n\n in: decimal days since 0 0 2000 (float)\n out: arrow datetime object\n \"\"\"\n arrow_time = np.empty(len(mtime), dtype=\"O\")\n dt = np.empty(len(mtime), dtype=\"O\")\n\n for i, time in enumerate(mtime):\n dt[i] = datetime.datetime.fromordinal(int(mtime[i] - 1)) + datetime.timedelta(\n days=(mtime[i] % 1) - 365\n )\n arrow_time[i] = arrow.get(dt[i])\n return arrow_time\n\n\n# %%\ndef show_earthquake_data():\n df = pd.read_csv(\"EQCanOB_20190907_2020_0906.txt\", sep=\"|\")\n pd.options.display.max_columns = None\n display(df)\n\n\n# %%\ndef load_temps(my_data):\n \"\"\"\n loads temperature timeseries from Sand Heads. \n Output is a tuple (temp, time) of np arrays\n \"\"\"\n matfile = loadmat(my_data)\n temp = matfile[\"temperature\"].flatten()\n time = matfile[\"time\"].flatten()\n time = mtime_to_arrow(time)\n \n return temp, time\n\n\n# %%\ndef load_oceancolor(my_image):\n \"\"\"\n takes in a png image and returns a scaled numpy array. Scaling is set for\n https://oceancolor.gsfc.nasa.gov chlorophyl concentration files\n \"\"\"\n img_in = Image.open(my_image)\n img_np = np.asarray(img_in)\n # default img -- 255->0mg/m3, 0->20mg/m3\n img_scaled = -(img_np - 255) * (20 / 256) # this should acrualbe log scaled\n return img_scaled\n\n\n# %%\ndef load_mat(my_data):\n \"\"\"\n designed to load the bathymetry data for lab week 3\n \"\"\"\n # get data\n matfile = loadmat(my_data)\n\n # format data into something we can use\n # loadmat outputs a dictionary of np arrays. This code extracts the dictionary values to variables\n lon = matfile[\"bath\"][0][0][0].flatten() # extract latitude array\n lat = matfile[\"bath\"][0][0][1].flatten() # extract longitude array\n bath = matfile[\"bath\"][0][0][2]\n return bath\n\n# %%\ndef load_topo(my_data):\n \"\"\"\n designed to load digital elevation model for lab wk5\n \"\"\"\n matfile = loadmat(my_data)\n return matfile[\"topo\"]\n\n\n# %%\ndef load_aircraft(my_data):\n \"\"\"\n designed to load aircraft gps path for lab wk6\n\n returns velocity array and associated timestamps as arrow time objects\n \"\"\"\n matfile = loadmat(my_data)[\"gps\"]\n vel = matfile[\"vel\"][0][0][0]\n time = matfile[\"mtime\"][0][0][0] \n time = mtime_to_arrow(time) \n\n return vel, time\n\n\n# %%\ndef mdate_to_datetime(mdate):\n \"\"\"\n converts matlab dates to python datetime objects\n\n matlab's date convention is days since Jan 1/0AD,\n years and days are 1-indexed (ie jan = 1 not 0)\n \"\"\"\n # account for matlab's 1 indexed values by subtracting 1 year + 1 day\n # maintains fidelity to matlab datestr() function for ~20 values tested\n # this should be tested more thorougly (AL 08/08/21)\n the_date = datetime.date.fromordinal(int(mdate) - 366)\n the_time = datetime.timedelta(days=(mdate % 1.0))\n the_date = datetime.datetime(\n the_date.year,\n the_date.month,\n the_date.day,\n )\n \n return the_date + the_time\n\n\n# %%\ndef clean_a1_data(dataset, save=False):\n \"\"\"\n imports drifter dataset in .mat form, restructures into an array full of dictionaries\n each representing one drifter. See A1 for dictionary legend.\n\n save=True will save the output as a .npy file in the local directory as well as return\n the formatted array.\n \"\"\"\n\n # import the whole dataset\n matdata = loadmat(dataset)\n matdata = matdata[\"D\"].flatten()\n\n ##################################################################################\n # modify each element one by one, they all have slightly different shapes/dtypes #\n ##################################################################################\n\n drifter_id = np.concatenate(matdata[\"id\"]).flatten() # drifter ID\n\n design = np.concatenate(matdata[\"design\"]).flatten() # 1-6 which type of drifter\n\n tzone = np.concatenate(matdata[\"tzone\"]).flatten() # time zone\n\n mtime = matdata[\"mtime\"] # time in matlab ordinal (decimal days since jan1/0000)\n # create a new array datetime to replace messy mtime\n datetime = np.empty_like(mtime, dtype=\"O\")\n for m in range(len(mtime)):\n timestamp = np.empty(len(mtime[m]), dtype=\"O\")\n for n in range(len(mtime[m].flatten())):\n timestamp[n] = mdate_to_datetime(mtime[m][n, 0])\n datetime[m] = timestamp\n\n lon_in = matdata[\"lon\"] # drifter lons\n # create new variable lons containing restructured longitudes\n lons = np.empty_like(lon_in, dtype=\"O\")\n for m in range(len(lon_in)):\n lon = np.empty(len(lon_in[m]))\n for n in range(len(lon_in[m])):\n lon[n] = lon_in[m][n]\n lons[m] = lon\n\n lat_in = matdata[\"lat\"] # drifter lats\n # same treatment for lats\n lats = np.empty_like(lat_in, dtype=\"O\")\n for m in range(len(lat_in)):\n lat = np.empty(len(lat_in[m]))\n for n in range(len(lat_in[m])):\n lat[n] = lat_in[m][n]\n lats[m] = lat\n\n comment = np.concatenate(matdata[\"comment\"]).flatten() # metadata\n\n at_sea_in = matdata[\"atSea\"] # status codes for working/landed drifters\n\n # at_sea treatment echoes lats and lons\n at_sea = np.empty_like(at_sea_in, dtype=\"O\")\n for m in range(len(at_sea_in)):\n sea = np.empty(len(at_sea_in[m]))\n for n in range(len(at_sea_in[m])):\n sea[n] = at_sea_in[m][n]\n at_sea[m] = sea\n\n ends_on_land = matdata[\n \"endsOnLand\"\n ].flatten() # change from 1/0 logic to Python booleans\n ends_on_land[ends_on_land == 1] = True\n ends_on_land[ends_on_land == 0] = False\n\n found_on_land = matdata[\"foundOnLand\"].flatten() # use booleans not 1/0\n found_on_land[found_on_land == 1] = True\n found_on_land[found_on_land == 0] = False\n\n # convert data containing dates to datetime objs\n launchdate_in = np.concatenate(matdata[\"launchDate\"]).flatten()\n launchdate = np.empty(len(launchdate_in), dtype=\"O\")\n for i, ld in enumerate(launchdate_in):\n launchdate[i] = mdate_to_datetime(ld)\n\n enddate_in = np.concatenate(matdata[\"endDate\"]).flatten()\n enddate = np.empty(len(enddate_in), dtype=\"O\")\n for i, ed in enumerate(enddate_in):\n enddate[i] = mdate_to_datetime(ed)\n\n lifetime_in = np.concatenate(\n matdata[\"lifeTime\"]\n ).flatten() # decimal days from launchDate to endDate\n lifetime = (\n enddate - launchdate\n ) # ignore the original data and do datetime arithmetic. Get students to do this?\n\n refloated = matdata[\"refloated\"] # change to py logical\n refloated[refloated == 1] == True\n refloated[refloated == 0] == False\n\n first_ground_date_in = matdata[\"firstGrndDate\"]\n first_ground_date = np.empty(len(first_ground_date_in), dtype=\"O\")\n for i, fgd in enumerate(first_ground_date_in):\n if fgd == 0:\n first_ground_date[i] = enddate[i]\n else:\n first_ground_date[i] = mdate_to_datetime(fgd[0, 0])\n\n # - float: matlab time for first grounding\n # - matlab time of first of a string of atSea~=1, unless\n # the last point in the record has atSea==1 and\n # endsOnLand==1 in which case it is the time of the last\n # point.\n\n first_lifetime_in = matdata[\"firstLifeTime\"]\n first_lifetime = first_ground_date - launchdate # make students do this?\n\n # new datastructure: Each drifter is a dictionary with all vars above as keys, values are either arrays or numbers\n # save an array containing all the drifter \"objects\" (actually dictionaries...) array full of dictionaries full\n # of arrays!\n master_dataset = np.empty(len(drifter_id), dtype=\"O\")\n for i, data in enumerate(master_dataset):\n master_dataset[i] = {\n \"drifter_id\": drifter_id[i],\n \"design\": design[i],\n \"tzone\": tzone[i],\n \"datetime\": datetime[i],\n \"lons\": lons[i],\n \"lats\": lats[i],\n \"comment\": comment[i],\n \"at_sea\": at_sea[i],\n \"ends_on_land\": ends_on_land[i],\n \"found_on_land\": found_on_land[i],\n \"launchdate\": launchdate[i],\n \"enddate\": enddate[i],\n \"lifetime\": lifetime[i],\n \"refloated\": refloated[i],\n \"first_ground_date\": first_ground_date[i],\n \"first_lifetime\": first_lifetime[i],\n }\n\n # save the dataset in local directory as a .npy file\n if save == True:\n np.save(\"drifter_data.npy\", master_dataset)\n\n return master_dataset\n\n\n# %%\ndef timeit(method):\n \"\"\"\n https://www.laurivan.com/braindump-use-a-decorator-to-time-your-python-function/\n \"\"\"\n import time\n \n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n elapsed = te - ts\n\n # print(f'time to generate plot: {elapsed}s')\n return f\"time to run function: {elapsed} sec\"\n\n return timed\n"
]
| [
[
"numpy.concatenate",
"numpy.asarray",
"scipy.io.loadmat",
"numpy.save",
"pandas.read_csv",
"numpy.empty_like"
]
]
|
susmitpy/modin | [
"c7d7b492e52fcc4aa36af2a210312101bbada06e"
]
| [
"modin/pandas/dataframe.py"
]
| [
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses ``DataFrame`` class, that is distributed version of ``pandas.DataFrame``.\"\"\"\n\nimport pandas\nfrom pandas.core.common import apply_if_callable\nfrom pandas.core.dtypes.common import (\n infer_dtype_from_object,\n is_dict_like,\n is_list_like,\n is_numeric_dtype,\n)\nfrom pandas.util._validators import validate_bool_kwarg\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import StorageOptions\n\nimport re\nimport itertools\nimport functools\nimport numpy as np\nimport sys\nfrom typing import IO, Optional, Union, Mapping, Iterator\nimport warnings\n\nfrom modin.error_message import ErrorMessage\nfrom modin.utils import _inherit_docstrings, to_pandas, hashable\nfrom modin.config import Engine, IsExperimental, PersistentPickle\nfrom .utils import (\n from_pandas,\n from_non_pandas,\n)\nfrom . import _update_engine\nfrom .iterator import PartitionIterator\nfrom .series import Series\nfrom .base import BasePandasDataset, _ATTRS_NO_LOOKUP\nfrom .groupby import DataFrameGroupBy\nfrom .accessor import CachedAccessor, SparseFrameAccessor\n\n\n@_inherit_docstrings(\n pandas.DataFrame, excluded=[pandas.DataFrame.__init__], apilink=\"pandas.DataFrame\"\n)\nclass DataFrame(BasePandasDataset):\n \"\"\"\n Modin distributed representation of ``pandas.DataFrame``.\n\n Internally, the data can be divided into partitions along both columns and rows\n in order to parallelize computations and utilize the user's hardware as much as possible.\n\n Inherit common for ``DataFrame``-s and ``Series`` functionality from the\n `BasePandasDataset` class.\n\n Parameters\n ----------\n data : DataFrame, Series, pandas.DataFrame, ndarray, Iterable or dict, optional\n Dict can contain ``Series``, arrays, constants, dataclass or list-like objects.\n If data is a dict, column order follows insertion-order.\n index : Index or array-like, optional\n Index to use for resulting frame. Will default to ``RangeIndex`` if no\n indexing information part of input data and no index provided.\n columns : Index or array-like, optional\n Column labels to use for resulting frame. Will default to\n ``RangeIndex`` if no column labels are provided.\n dtype : str, np.dtype, or pandas.ExtensionDtype, optional\n Data type to force. Only a single dtype is allowed. If None, infer.\n copy : bool, default: False\n Copy data from inputs. Only affects ``pandas.DataFrame`` / 2d ndarray input.\n query_compiler : BaseQueryCompiler, optional\n A query compiler object to create the ``DataFrame`` from.\n\n Notes\n -----\n ``DataFrame`` can be created either from passed `data` or `query_compiler`. If both\n parameters are provided, data source will be prioritized in the next order:\n\n 1) Modin ``DataFrame`` or ``Series`` passed with `data` parameter.\n 2) Query compiler from the `query_compiler` parameter.\n 3) Various pandas/NumPy/Python data structures passed with `data` parameter.\n\n The last option is less desirable since import of such data structures is very\n inefficient, please use previously created Modin structures from the fist two\n options or import data using highly efficient Modin IO tools (for example\n ``pd.read_csv``).\n \"\"\"\n\n _pandas_class = pandas.DataFrame\n\n def __init__(\n self,\n data=None,\n index=None,\n columns=None,\n dtype=None,\n copy=None,\n query_compiler=None,\n ):\n Engine.subscribe(_update_engine)\n if isinstance(data, (DataFrame, Series)):\n self._query_compiler = data._query_compiler.copy()\n if index is not None and any(i not in data.index for i in index):\n raise NotImplementedError(\n \"Passing non-existant columns or index values to constructor not\"\n \" yet implemented.\"\n )\n if isinstance(data, Series):\n # We set the column name if it is not in the provided Series\n if data.name is None:\n self.columns = [0] if columns is None else columns\n # If the columns provided are not in the named Series, pandas clears\n # the DataFrame and sets columns to the columns provided.\n elif columns is not None and data.name not in columns:\n self._query_compiler = from_pandas(\n DataFrame(columns=columns)\n )._query_compiler\n if index is not None:\n self._query_compiler = data.loc[index]._query_compiler\n elif columns is None and index is None:\n data._add_sibling(self)\n else:\n if columns is not None and any(i not in data.columns for i in columns):\n raise NotImplementedError(\n \"Passing non-existant columns or index values to constructor not\"\n \" yet implemented.\"\n )\n if index is None:\n index = slice(None)\n if columns is None:\n columns = slice(None)\n self._query_compiler = data.loc[index, columns]._query_compiler\n\n # Check type of data and use appropriate constructor\n elif query_compiler is None:\n distributed_frame = from_non_pandas(data, index, columns, dtype)\n if distributed_frame is not None:\n self._query_compiler = distributed_frame._query_compiler\n return\n\n warnings.warn(\n \"Distributing {} object. This may take some time.\".format(type(data))\n )\n if is_list_like(data) and not is_dict_like(data):\n old_dtype = getattr(data, \"dtype\", None)\n values = [\n obj._to_pandas() if isinstance(obj, Series) else obj for obj in data\n ]\n if isinstance(data, np.ndarray):\n data = np.array(values, dtype=old_dtype)\n else:\n try:\n data = type(data)(values, dtype=old_dtype)\n except TypeError:\n data = values\n elif is_dict_like(data) and not isinstance(\n data, (pandas.Series, Series, pandas.DataFrame, DataFrame)\n ):\n data = {\n k: v._to_pandas() if isinstance(v, Series) else v\n for k, v in data.items()\n }\n pandas_df = pandas.DataFrame(\n data=data, index=index, columns=columns, dtype=dtype, copy=copy\n )\n self._query_compiler = from_pandas(pandas_df)._query_compiler\n else:\n self._query_compiler = query_compiler\n\n def __repr__(self):\n \"\"\"\n Return a string representation for a particular ``DataFrame``.\n\n Returns\n -------\n str\n \"\"\"\n from pandas.io.formats import console\n\n num_rows = pandas.get_option(\"display.max_rows\") or 10\n num_cols = pandas.get_option(\"display.max_columns\") or 20\n if pandas.get_option(\"display.max_columns\") is None and pandas.get_option(\n \"display.expand_frame_repr\"\n ):\n width, _ = console.get_console_size()\n width = min(width, len(self.columns))\n col_counter = 0\n i = 0\n while col_counter < width:\n col_counter += len(str(self.columns[i])) + 1\n i += 1\n\n num_cols = i\n i = len(self.columns) - 1\n col_counter = 0\n while col_counter < width:\n col_counter += len(str(self.columns[i])) + 1\n i -= 1\n\n num_cols += len(self.columns) - i\n result = repr(self._build_repr_df(num_rows, num_cols))\n if len(self.index) > num_rows or len(self.columns) > num_cols:\n # The split here is so that we don't repr pandas row lengths.\n return result.rsplit(\"\\n\\n\", 1)[0] + \"\\n\\n[{0} rows x {1} columns]\".format(\n len(self.index), len(self.columns)\n )\n else:\n return result\n\n def _repr_html_(self): # pragma: no cover\n \"\"\"\n Return a html representation for a particular ``DataFrame``.\n\n Returns\n -------\n str\n \"\"\"\n num_rows = pandas.get_option(\"max_rows\") or 60\n num_cols = pandas.get_option(\"max_columns\") or 20\n\n # We use pandas _repr_html_ to get a string of the HTML representation\n # of the dataframe.\n result = self._build_repr_df(num_rows, num_cols)._repr_html_()\n if len(self.index) > num_rows or len(self.columns) > num_cols:\n # We split so that we insert our correct dataframe dimensions.\n return result.split(\"<p>\")[\n 0\n ] + \"<p>{0} rows x {1} columns</p>\\n</div>\".format(\n len(self.index), len(self.columns)\n )\n else:\n return result\n\n def _get_columns(self):\n \"\"\"\n Get the columns for this ``DataFrame``.\n\n Returns\n -------\n pandas.Index\n The union of all indexes across the partitions.\n \"\"\"\n return self._query_compiler.columns\n\n def _set_columns(self, new_columns):\n \"\"\"\n Set the columns for this ``DataFrame``.\n\n Parameters\n ----------\n new_columns : list-like, Index\n The new index to set.\n \"\"\"\n self._query_compiler.columns = new_columns\n\n columns = property(_get_columns, _set_columns)\n\n @property\n def ndim(self): # noqa: RT01, D200\n \"\"\"\n Return the number of dimensions of the underlying data, by definition 2.\n \"\"\"\n return 2\n\n def drop_duplicates(\n self, subset=None, keep=\"first\", inplace=False, ignore_index=False\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Return ``DataFrame`` with duplicate rows removed.\n \"\"\"\n return super(DataFrame, self).drop_duplicates(\n subset=subset, keep=keep, inplace=inplace\n )\n\n @property\n def dtypes(self): # noqa: RT01, D200\n \"\"\"\n Return the dtypes in the ``DataFrame``.\n \"\"\"\n return self._query_compiler.dtypes\n\n def duplicated(self, subset=None, keep=\"first\"): # noqa: PR01, RT01, D200\n \"\"\"\n Return boolean ``Series`` denoting duplicate rows.\n \"\"\"\n import hashlib\n\n df = self[subset] if subset is not None else self\n # if the number of columns we are checking for duplicates is larger than 1, we must\n # hash them to generate a single value that can be compared across rows.\n if len(df.columns) > 1:\n hashed = df.apply(\n lambda s: hashlib.new(\"md5\", str(tuple(s)).encode()).hexdigest(), axis=1\n ).to_frame()\n else:\n hashed = df\n duplicates = hashed.apply(lambda s: s.duplicated(keep=keep)).squeeze(axis=1)\n # remove Series name which was assigned automatically by .apply\n duplicates.name = None\n return duplicates\n\n @property\n def empty(self): # noqa: RT01, D200\n \"\"\"\n Indicate whether ``DataFrame`` is empty.\n \"\"\"\n return len(self.columns) == 0 or len(self.index) == 0\n\n @property\n def axes(self): # noqa: RT01, D200\n \"\"\"\n Return a list representing the axes of the ``DataFrame``.\n \"\"\"\n return [self.index, self.columns]\n\n @property\n def shape(self): # noqa: RT01, D200\n \"\"\"\n Return a tuple representing the dimensionality of the ``DataFrame``.\n \"\"\"\n return len(self.index), len(self.columns)\n\n def add_prefix(self, prefix): # noqa: PR01, RT01, D200\n \"\"\"\n Prefix labels with string `prefix`.\n \"\"\"\n return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))\n\n def add_suffix(self, suffix): # noqa: PR01, RT01, D200\n \"\"\"\n Suffix labels with string `suffix`.\n \"\"\"\n return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))\n\n def applymap(\n self, func, na_action: Optional[str] = None, **kwargs\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Apply a function to a ``DataFrame`` elementwise.\n \"\"\"\n if not callable(func):\n raise ValueError(\"'{0}' object is not callable\".format(type(func)))\n ErrorMessage.non_verified_udf()\n return DataFrame(query_compiler=self._query_compiler.applymap(func))\n\n def apply(\n self, func, axis=0, raw=False, result_type=None, args=(), **kwargs\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Apply a function along an axis of the ``DataFrame``.\n \"\"\"\n axis = self._get_axis_number(axis)\n query_compiler = super(DataFrame, self).apply(\n func, axis=axis, raw=raw, result_type=result_type, args=args, **kwargs\n )\n if not isinstance(query_compiler, type(self._query_compiler)):\n # A scalar was returned\n return query_compiler\n\n if result_type == \"reduce\":\n output_type = Series\n elif result_type == \"broadcast\":\n output_type = DataFrame\n # the 'else' branch also handles 'result_type == \"expand\"' since it makes the output type\n # depend on the `func` result (Series for a scalar, DataFrame for list-like)\n else:\n reduced_index = pandas.Index([\"__reduced__\"])\n if query_compiler.get_axis(axis).equals(\n reduced_index\n ) or query_compiler.get_axis(axis ^ 1).equals(reduced_index):\n output_type = Series\n else:\n output_type = DataFrame\n\n return output_type(query_compiler=query_compiler)\n\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index=True,\n sort=True,\n group_keys=True,\n squeeze: bool = no_default,\n observed=False,\n dropna: bool = True,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Group ``DataFrame`` using a mapper or by a ``Series`` of columns.\n \"\"\"\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n axis = self._get_axis_number(axis)\n idx_name = None\n # Drop here indicates whether or not to drop the data column before doing the\n # groupby. The typical pandas behavior is to drop when the data came from this\n # dataframe. When a string, Series directly from this dataframe, or list of\n # strings is passed in, the data used for the groupby is dropped before the\n # groupby takes place.\n drop = False\n\n if (\n not isinstance(by, (pandas.Series, Series))\n and is_list_like(by)\n and len(by) == 1\n ):\n by = by[0]\n\n if callable(by):\n by = self.index.map(by)\n elif hashable(by) and not isinstance(by, pandas.Grouper):\n drop = by in self.columns\n idx_name = by\n if by is not None and by in self._query_compiler.get_index_names(axis):\n # In this case we pass the string value of the name through to the\n # partitions. This is more efficient than broadcasting the values.\n level, by = by, None\n elif level is None:\n by = self.__getitem__(by)._query_compiler\n elif isinstance(by, Series):\n drop = by._parent is self\n idx_name = by.name\n by = by._query_compiler\n elif is_list_like(by):\n # fastpath for multi column groupby\n if axis == 0 and all(\n (\n (hashable(o) and (o in self))\n or isinstance(o, Series)\n or (is_list_like(o) and len(o) == len(self.axes[axis]))\n )\n for o in by\n ):\n # We want to split 'by's into those that belongs to the self (internal_by)\n # and those that doesn't (external_by)\n internal_by, external_by = [], []\n\n for current_by in by:\n if hashable(current_by):\n internal_by.append(current_by)\n elif isinstance(current_by, Series):\n if current_by._parent is self:\n internal_by.append(current_by.name)\n else:\n external_by.append(current_by._query_compiler)\n else:\n external_by.append(current_by)\n\n by = internal_by + external_by\n\n if len(external_by) == 0:\n by = self[internal_by]._query_compiler\n\n drop = True\n else:\n mismatch = len(by) != len(self.axes[axis])\n if mismatch and all(\n hashable(obj)\n and (\n obj in self or obj in self._query_compiler.get_index_names(axis)\n )\n for obj in by\n ):\n # In the future, we will need to add logic to handle this, but for now\n # we default to pandas in this case.\n pass\n elif mismatch and any(\n hashable(obj) and obj not in self.columns for obj in by\n ):\n names = [o.name if isinstance(o, Series) else o for o in by]\n raise KeyError(next(x for x in names if x not in self))\n return DataFrameGroupBy(\n self,\n by,\n axis,\n level,\n as_index,\n sort,\n group_keys,\n squeeze,\n idx_name,\n observed=observed,\n drop=drop,\n dropna=dropna,\n )\n\n def keys(self): # noqa: RT01, D200\n \"\"\"\n Get columns of the ``DataFrame``.\n \"\"\"\n return self.columns\n\n def transpose(self, copy=False, *args): # noqa: PR01, RT01, D200\n \"\"\"\n Transpose index and columns.\n \"\"\"\n # FIXME: Judging by pandas docs `*args` serves only compatibility purpose\n # and does not affect the result, we shouldn't pass it to the query compiler.\n return DataFrame(query_compiler=self._query_compiler.transpose(*args))\n\n T = property(transpose)\n\n def add(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get addition of ``DataFrame`` and `other`, element-wise (binary operator `add`).\n \"\"\"\n return self._binary_op(\n \"add\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def append(\n self, other, ignore_index=False, verify_integrity=False, sort=False\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Append rows of `other` to the end of caller, returning a new object.\n \"\"\"\n if sort is False:\n warnings.warn(\n \"Due to https://github.com/pandas-dev/pandas/issues/35092, \"\n \"Pandas ignores sort=False; Modin correctly does not sort.\"\n )\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError(\n \"Can only append a Series if ignore_index=True\"\n \" or if the Series has a name\"\n )\n if other.name is not None:\n # other must have the same index name as self, otherwise\n # index name will be reset\n name = other.name\n # We must transpose here because a Series becomes a new row, and the\n # structure of the query compiler is currently columnar\n other = other._query_compiler.transpose()\n other.index = pandas.Index([name], name=self.index.name)\n else:\n # See note above about transpose\n other = other._query_compiler.transpose()\n elif isinstance(other, list):\n if not all(isinstance(o, BasePandasDataset) for o in other):\n other = DataFrame(pandas.DataFrame(other))._query_compiler\n else:\n other = [obj._query_compiler for obj in other]\n else:\n other = other._query_compiler\n\n # If ignore_index is False, by definition the Index will be correct.\n # We also do this first to ensure that we don't waste compute/memory.\n if verify_integrity and not ignore_index:\n appended_index = (\n self.index.append(other.index)\n if not isinstance(other, list)\n else self.index.append([o.index for o in other])\n )\n is_valid = next((False for idx in appended_index.duplicated() if idx), True)\n if not is_valid:\n raise ValueError(\n \"Indexes have overlapping values: {}\".format(\n appended_index[appended_index.duplicated()]\n )\n )\n\n query_compiler = self._query_compiler.concat(\n 0, other, ignore_index=ignore_index, sort=sort\n )\n return DataFrame(query_compiler=query_compiler)\n\n def assign(self, **kwargs): # noqa: PR01, RT01, D200\n \"\"\"\n Assign new columns to a ``DataFrame``.\n \"\"\"\n df = self.copy()\n for k, v in kwargs.items():\n if callable(v):\n df[k] = v(df)\n else:\n df[k] = v\n return df\n\n def boxplot(\n self,\n column=None,\n by=None,\n ax=None,\n fontsize=None,\n rot=0,\n grid=True,\n figsize=None,\n layout=None,\n return_type=None,\n backend=None,\n **kwargs,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Make a box plot from ``DataFrame`` columns.\n \"\"\"\n return to_pandas(self).boxplot(\n column=column,\n by=by,\n ax=ax,\n fontsize=fontsize,\n rot=rot,\n grid=grid,\n figsize=figsize,\n layout=layout,\n return_type=return_type,\n backend=backend,\n **kwargs,\n )\n\n def combine(\n self, other, func, fill_value=None, overwrite=True\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Perform column-wise combine with another ``DataFrame``.\n \"\"\"\n return super(DataFrame, self).combine(\n other, func, fill_value=fill_value, overwrite=overwrite\n )\n\n def compare(\n self,\n other: \"DataFrame\",\n align_axis: Union[str, int] = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> \"DataFrame\": # noqa: PR01, RT01, D200\n \"\"\"\n Compare to another ``DataFrame`` and show the differences.\n \"\"\"\n if not isinstance(other, DataFrame):\n raise TypeError(f\"Cannot compare DataFrame to {type(other)}\")\n other = self._validate_other(other, 0, compare_index=True)\n return self.__constructor__(\n query_compiler=self._query_compiler.compare(\n other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n )\n\n def corr(self, method=\"pearson\", min_periods=1): # noqa: PR01, RT01, D200\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n \"\"\"\n return self.__constructor__(\n query_compiler=self._query_compiler.corr(\n method=method,\n min_periods=min_periods,\n )\n )\n\n def corrwith(\n self, other, axis=0, drop=False, method=\"pearson\"\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Compute pairwise correlation.\n \"\"\"\n if isinstance(other, DataFrame):\n other = other._query_compiler.to_pandas()\n return self._default_to_pandas(\n pandas.DataFrame.corrwith, other, axis=axis, drop=drop, method=method\n )\n\n def cov(self, min_periods=None, ddof: Optional[int] = 1): # noqa: PR01, RT01, D200\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n \"\"\"\n numeric_df = self.drop(\n columns=[\n i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])\n ]\n )\n\n is_notna = True\n\n if all(numeric_df.notna().all()):\n if min_periods is not None and min_periods > len(numeric_df):\n result = np.empty((numeric_df.shape[1], numeric_df.shape[1]))\n result.fill(np.nan)\n return numeric_df.__constructor__(result)\n else:\n cols = numeric_df.columns\n idx = cols.copy()\n numeric_df = numeric_df.astype(dtype=\"float64\")\n denom = 1.0 / (len(numeric_df) - ddof)\n means = numeric_df.mean(axis=0)\n result = numeric_df - means\n result = result.T._query_compiler.conj().dot(result._query_compiler)\n else:\n result = numeric_df._query_compiler.cov(min_periods=min_periods)\n is_notna = False\n\n if is_notna:\n result = numeric_df.__constructor__(\n query_compiler=result, index=idx, columns=cols\n )\n result *= denom\n else:\n result = numeric_df.__constructor__(query_compiler=result)\n return result\n\n def dot(self, other): # noqa: PR01, RT01, D200\n \"\"\"\n Compute the matrix multiplication between the ``DataFrame`` and `other`.\n \"\"\"\n if isinstance(other, BasePandasDataset):\n common = self.columns.union(other.index)\n if len(common) > len(self.columns) or len(common) > len(other.index):\n raise ValueError(\"Matrices are not aligned\")\n\n qc = other.reindex(index=common)._query_compiler\n if isinstance(other, DataFrame):\n return self.__constructor__(\n query_compiler=self._query_compiler.dot(\n qc, squeeze_self=False, squeeze_other=False\n )\n )\n else:\n return self._reduce_dimension(\n query_compiler=self._query_compiler.dot(\n qc, squeeze_self=False, squeeze_other=True\n )\n )\n\n other = np.asarray(other)\n if self.shape[1] != other.shape[0]:\n raise ValueError(\n \"Dot product shape mismatch, {} vs {}\".format(self.shape, other.shape)\n )\n\n if len(other.shape) > 1:\n return self.__constructor__(\n query_compiler=self._query_compiler.dot(other, squeeze_self=False)\n )\n\n return self._reduce_dimension(\n query_compiler=self._query_compiler.dot(other, squeeze_self=False)\n )\n\n def eq(self, other, axis=\"columns\", level=None): # noqa: PR01, RT01, D200\n \"\"\"\n Perform equality comparison of ``DataFrame`` and `other` (binary operator `eq`).\n \"\"\"\n return self._binary_op(\n \"eq\", other, axis=axis, level=level, broadcast=isinstance(other, Series)\n )\n\n def equals(self, other): # noqa: PR01, RT01, D200\n \"\"\"\n Test whether two objects contain the same elements.\n \"\"\"\n if isinstance(other, pandas.DataFrame):\n # Copy into a Modin DataFrame to simplify logic below\n other = DataFrame(other)\n return (\n self.index.equals(other.index)\n and self.columns.equals(other.columns)\n and self.eq(other).all().all()\n )\n\n def _update_var_dicts_in_kwargs(self, expr, kwargs):\n \"\"\"\n Copy variables with \"@\" prefix in `local_dict` and `global_dict` keys of kwargs.\n\n Parameters\n ----------\n expr : str\n The expression string to search variables with \"@\" prefix.\n kwargs : dict\n See the documentation for eval() for complete details on the keyword arguments accepted by query().\n \"\"\"\n if \"@\" not in expr:\n return\n frame = sys._getframe()\n try:\n f_locals = frame.f_back.f_back.f_locals\n f_globals = frame.f_back.f_back.f_globals\n finally:\n del frame\n local_names = set(re.findall(r\"@([\\w]+)\", expr))\n local_dict = {}\n global_dict = {}\n\n for name in local_names:\n for dct_out, dct_in in ((local_dict, f_locals), (global_dict, f_globals)):\n try:\n dct_out[name] = dct_in[name]\n except KeyError:\n pass\n\n if local_dict:\n local_dict.update(kwargs.get(\"local_dict\") or {})\n kwargs[\"local_dict\"] = local_dict\n if global_dict:\n global_dict.update(kwargs.get(\"global_dict\") or {})\n kwargs[\"global_dict\"] = global_dict\n\n def eval(self, expr, inplace=False, **kwargs): # noqa: PR01, RT01, D200\n \"\"\"\n Evaluate a string describing operations on ``DataFrame`` columns.\n \"\"\"\n self._validate_eval_query(expr, **kwargs)\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._update_var_dicts_in_kwargs(expr, kwargs)\n new_query_compiler = self._query_compiler.eval(expr, **kwargs)\n return_type = type(\n pandas.DataFrame(columns=self.columns)\n .astype(self.dtypes)\n .eval(expr, **kwargs)\n ).__name__\n if return_type == type(self).__name__:\n return self._create_or_update_from_compiler(new_query_compiler, inplace)\n else:\n if inplace:\n raise ValueError(\"Cannot operate inplace if there is no assignment\")\n return getattr(sys.modules[self.__module__], return_type)(\n query_compiler=new_query_compiler\n )\n\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n ):\n return super(DataFrame, self)._fillna(\n squeeze_self=False,\n squeeze_value=isinstance(value, Series),\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def floordiv(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get integer division of ``DataFrame`` and `other`, element-wise (binary operator `floordiv`).\n \"\"\"\n return self._binary_op(\n \"floordiv\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n @classmethod\n def from_dict(\n cls, data, orient=\"columns\", dtype=None, columns=None\n ): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Construct ``DataFrame`` from dict of array-like or dicts.\n \"\"\"\n ErrorMessage.default_to_pandas(\"`from_dict`\")\n return from_pandas(\n pandas.DataFrame.from_dict(\n data, orient=orient, dtype=dtype, columns=columns\n )\n )\n\n @classmethod\n def from_records(\n cls,\n data,\n index=None,\n exclude=None,\n columns=None,\n coerce_float=False,\n nrows=None,\n ): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Convert structured or record ndarray to ``DataFrame``.\n \"\"\"\n ErrorMessage.default_to_pandas(\"`from_records`\")\n return from_pandas(\n pandas.DataFrame.from_records(\n data,\n index=index,\n exclude=exclude,\n columns=columns,\n coerce_float=coerce_float,\n nrows=nrows,\n )\n )\n\n def ge(self, other, axis=\"columns\", level=None): # noqa: PR01, RT01, D200\n \"\"\"\n Get greater than or equal comparison of ``DataFrame`` and `other`, element-wise (binary operator `ge`).\n \"\"\"\n return self._binary_op(\n \"ge\", other, axis=axis, level=level, broadcast=isinstance(other, Series)\n )\n\n def gt(self, other, axis=\"columns\", level=None): # noqa: PR01, RT01, D200\n \"\"\"\n Get greater than comparison of ``DataFrame`` and `other`, element-wise (binary operator `ge`).\n \"\"\"\n return self._binary_op(\n \"gt\", other, axis=axis, level=level, broadcast=isinstance(other, Series)\n )\n\n def hist(\n self,\n column=None,\n by=None,\n grid=True,\n xlabelsize=None,\n xrot=None,\n ylabelsize=None,\n yrot=None,\n ax=None,\n sharex=False,\n sharey=False,\n figsize=None,\n layout=None,\n bins=10,\n **kwds,\n ): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Make a histogram of the ``DataFrame``.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.hist,\n column=column,\n by=by,\n grid=grid,\n xlabelsize=xlabelsize,\n xrot=xrot,\n ylabelsize=ylabelsize,\n yrot=yrot,\n ax=ax,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n layout=layout,\n bins=bins,\n **kwds,\n )\n\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n memory_usage: Optional[Union[bool, str]] = None,\n show_counts: Optional[bool] = None,\n null_counts: Optional[bool] = None,\n ): # noqa: PR01, D200\n \"\"\"\n Print a concise summary of the ``DataFrame``.\n \"\"\"\n\n def put_str(src, output_len=None, spaces=2):\n src = str(src)\n return src.ljust(output_len if output_len else len(src)) + \" \" * spaces\n\n def format_size(num):\n for x in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if num < 1024.0:\n return f\"{num:3.1f} {x}\"\n num /= 1024.0\n return f\"{num:3.1f} PB\"\n\n output = []\n\n type_line = str(type(self))\n index_line = self.index._summary()\n columns = self.columns\n columns_len = len(columns)\n dtypes = self.dtypes\n dtypes_line = f\"dtypes: {', '.join(['{}({})'.format(dtype, count) for dtype, count in dtypes.value_counts().items()])}\"\n\n if max_cols is None:\n max_cols = 100\n\n exceeds_info_cols = columns_len > max_cols\n\n if buf is None:\n buf = sys.stdout\n\n if null_counts is None:\n null_counts = not exceeds_info_cols\n\n if verbose is None:\n verbose = not exceeds_info_cols\n\n if null_counts and verbose:\n # We're gonna take items from `non_null_count` in a loop, which\n # works kinda slow with `Modin.Series`, that's why we call `_to_pandas()` here\n # that will be faster.\n non_null_count = self.count()._to_pandas()\n\n if memory_usage is None:\n memory_usage = True\n\n def get_header(spaces=2):\n output = []\n head_label = \" # \"\n column_label = \"Column\"\n null_label = \"Non-Null Count\"\n dtype_label = \"Dtype\"\n non_null_label = \" non-null\"\n delimiter = \"-\"\n\n lengths = {}\n lengths[\"head\"] = max(len(head_label), len(pprint_thing(len(columns))))\n lengths[\"column\"] = max(\n len(column_label), max(len(pprint_thing(col)) for col in columns)\n )\n lengths[\"dtype\"] = len(dtype_label)\n dtype_spaces = (\n max(lengths[\"dtype\"], max(len(pprint_thing(dtype)) for dtype in dtypes))\n - lengths[\"dtype\"]\n )\n\n header = put_str(head_label, lengths[\"head\"]) + put_str(\n column_label, lengths[\"column\"]\n )\n if null_counts:\n lengths[\"null\"] = max(\n len(null_label),\n max(len(pprint_thing(x)) for x in non_null_count)\n + len(non_null_label),\n )\n header += put_str(null_label, lengths[\"null\"])\n header += put_str(dtype_label, lengths[\"dtype\"], spaces=dtype_spaces)\n\n output.append(header)\n\n delimiters = put_str(delimiter * lengths[\"head\"]) + put_str(\n delimiter * lengths[\"column\"]\n )\n if null_counts:\n delimiters += put_str(delimiter * lengths[\"null\"])\n delimiters += put_str(delimiter * lengths[\"dtype\"], spaces=dtype_spaces)\n output.append(delimiters)\n\n return output, lengths\n\n output.extend([type_line, index_line])\n\n def verbose_repr(output):\n columns_line = f\"Data columns (total {len(columns)} columns):\"\n header, lengths = get_header()\n output.extend([columns_line, *header])\n for i, col in enumerate(columns):\n i, col, dtype = map(pprint_thing, [i, col, dtypes[col]])\n\n to_append = put_str(\" {}\".format(i), lengths[\"head\"]) + put_str(\n col, lengths[\"column\"]\n )\n if null_counts:\n non_null = pprint_thing(non_null_count[col])\n to_append += put_str(\n \"{} non-null\".format(non_null), lengths[\"null\"]\n )\n to_append += put_str(dtype, lengths[\"dtype\"], spaces=0)\n output.append(to_append)\n\n def non_verbose_repr(output):\n output.append(columns._summary(name=\"Columns\"))\n\n if verbose:\n verbose_repr(output)\n else:\n non_verbose_repr(output)\n\n output.append(dtypes_line)\n\n if memory_usage:\n deep = memory_usage == \"deep\"\n mem_usage_bytes = self.memory_usage(index=True, deep=deep).sum()\n mem_line = f\"memory usage: {format_size(mem_usage_bytes)}\"\n\n output.append(mem_line)\n\n output.append(\"\")\n buf.write(\"\\n\".join(output))\n\n def insert(self, loc, column, value, allow_duplicates=False): # noqa: PR01, D200\n \"\"\"\n Insert column into ``DataFrame`` at specified location.\n \"\"\"\n if isinstance(value, (DataFrame, pandas.DataFrame)):\n if len(value.columns) != 1:\n raise ValueError(\n f\"Wrong number of items passed {len(value.columns)}, placement implies 1\"\n )\n value = value.squeeze(axis=1)\n\n if not self._query_compiler.lazy_execution and len(self.index) == 0:\n if not hasattr(value, \"index\"):\n try:\n value = pandas.Series(value)\n except (TypeError, ValueError, IndexError):\n raise ValueError(\n \"Cannot insert into a DataFrame with no defined index \"\n \"and a value that cannot be converted to a \"\n \"Series\"\n )\n new_index = value.index.copy()\n new_columns = self.columns.insert(loc, column)\n new_query_compiler = DataFrame(\n value, index=new_index, columns=new_columns\n )._query_compiler\n elif len(self.columns) == 0 and loc == 0:\n new_query_compiler = DataFrame(\n data=value, columns=[column], index=self.index\n )._query_compiler\n else:\n if (\n is_list_like(value)\n and not isinstance(value, (pandas.Series, Series))\n and len(value) != len(self.index)\n ):\n raise ValueError(\"Length of values does not match length of index\")\n if not allow_duplicates and column in self.columns:\n raise ValueError(\"cannot insert {0}, already exists\".format(column))\n if loc > len(self.columns):\n raise IndexError(\n \"index {0} is out of bounds for axis 0 with size {1}\".format(\n loc, len(self.columns)\n )\n )\n if loc < 0:\n raise ValueError(\"unbounded slice\")\n if isinstance(value, Series):\n value = value._query_compiler\n new_query_compiler = self._query_compiler.insert(loc, column, value)\n\n self._update_inplace(new_query_compiler=new_query_compiler)\n\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction: Optional[str] = None,\n limit_area=None,\n downcast=None,\n **kwargs,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Fill NaN values using an interpolation method.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.interpolate,\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n limit_direction=limit_direction,\n limit_area=limit_area,\n downcast=downcast,\n **kwargs,\n )\n\n def iterrows(self): # noqa: D200\n \"\"\"\n Iterate over ``DataFrame`` rows as (index, ``Series``) pairs.\n \"\"\"\n\n def iterrow_builder(s):\n \"\"\"Return tuple of the given `s` parameter name and the parameter themself.\"\"\"\n return s.name, s\n\n partition_iterator = PartitionIterator(self, 0, iterrow_builder)\n for v in partition_iterator:\n yield v\n\n def items(self): # noqa: D200\n \"\"\"\n Iterate over (column name, ``Series``) pairs.\n \"\"\"\n\n def items_builder(s):\n \"\"\"Return tuple of the given `s` parameter name and the parameter themself.\"\"\"\n return s.name, s\n\n partition_iterator = PartitionIterator(self, 1, items_builder)\n for v in partition_iterator:\n yield v\n\n def iteritems(self): # noqa: RT01, D200\n \"\"\"\n Iterate over (column name, ``Series``) pairs.\n \"\"\"\n return self.items()\n\n def itertuples(self, index=True, name=\"Pandas\"): # noqa: PR01, D200\n \"\"\"\n Iterate over ``DataFrame`` rows as ``namedtuple``-s.\n \"\"\"\n\n def itertuples_builder(s):\n \"\"\"Return the next ``namedtuple``.\"\"\"\n return next(s._to_pandas().to_frame().T.itertuples(index=index, name=name))\n\n partition_iterator = PartitionIterator(self, 0, itertuples_builder)\n for v in partition_iterator:\n yield v\n\n def join(\n self, other, on=None, how=\"left\", lsuffix=\"\", rsuffix=\"\", sort=False\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Join columns of another ``DataFrame``.\n \"\"\"\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError(\"Other Series must have a name\")\n other = DataFrame({other.name: other})\n if on is not None:\n return self.__constructor__(\n query_compiler=self._query_compiler.join(\n other._query_compiler,\n on=on,\n how=how,\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n sort=sort,\n )\n )\n if isinstance(other, DataFrame):\n # Joining the empty DataFrames with either index or columns is\n # fast. It gives us proper error checking for the edge cases that\n # would otherwise require a lot more logic.\n new_columns = (\n pandas.DataFrame(columns=self.columns)\n .join(\n pandas.DataFrame(columns=other.columns),\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n )\n .columns\n )\n other = [other]\n else:\n new_columns = (\n pandas.DataFrame(columns=self.columns)\n .join(\n [pandas.DataFrame(columns=obj.columns) for obj in other],\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n )\n .columns\n )\n new_frame = DataFrame(\n query_compiler=self._query_compiler.concat(\n 1, [obj._query_compiler for obj in other], join=how, sort=sort\n )\n )\n new_frame.columns = new_columns\n return new_frame\n\n def le(self, other, axis=\"columns\", level=None): # noqa: PR01, RT01, D200\n \"\"\"\n Get less than or equal comparison of ``DataFrame`` and `other`, element-wise (binary operator `le`).\n \"\"\"\n return self._binary_op(\n \"le\", other, axis=axis, level=level, broadcast=isinstance(other, Series)\n )\n\n def lookup(self, row_labels, col_labels): # noqa: PR01, RT01, D200\n \"\"\"\n Label-based \"fancy indexing\" function for ``DataFrame``.\n \"\"\"\n return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)\n\n def lt(self, other, axis=\"columns\", level=None): # noqa: PR01, RT01, D200\n \"\"\"\n Get less than comparison of ``DataFrame`` and `other`, element-wise (binary operator `le`).\n \"\"\"\n return self._binary_op(\n \"lt\", other, axis=axis, level=level, broadcast=isinstance(other, Series)\n )\n\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n ignore_index=True,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Unpivot a ``DataFrame`` from wide to long format, optionally leaving identifiers set.\n \"\"\"\n if id_vars is None:\n id_vars = []\n if not is_list_like(id_vars):\n id_vars = [id_vars]\n if value_vars is None:\n value_vars = self.columns.difference(id_vars)\n if var_name is None:\n columns_name = self._query_compiler.get_index_name(axis=1)\n var_name = columns_name if columns_name is not None else \"variable\"\n return self.__constructor__(\n query_compiler=self._query_compiler.melt(\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n ignore_index=ignore_index,\n )\n )\n\n def memory_usage(self, index=True, deep=False): # noqa: PR01, RT01, D200\n \"\"\"\n Return the memory usage of each column in bytes.\n \"\"\"\n if index:\n result = self._reduce_dimension(\n self._query_compiler.memory_usage(index=False, deep=deep)\n )\n index_value = self.index.memory_usage(deep=deep)\n return Series(index_value, index=[\"Index\"]).append(result)\n return super(DataFrame, self).memory_usage(index=index, deep=deep)\n\n def merge(\n self,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n sort=False,\n suffixes=(\"_x\", \"_y\"),\n copy=True,\n indicator=False,\n validate=None,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Merge ``DataFrame`` or named ``Series`` objects with a database-style join.\n \"\"\"\n if isinstance(right, Series):\n if right.name is None:\n raise ValueError(\"Cannot merge a Series without a name\")\n else:\n right = right.to_frame()\n if not isinstance(right, DataFrame):\n raise TypeError(\n f\"Can only merge Series or DataFrame objects, a {type(right)} was passed\"\n )\n\n if left_index and right_index:\n return self.join(\n right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort\n )\n\n return self.__constructor__(\n query_compiler=self._query_compiler.merge(\n right._query_compiler,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n copy=copy,\n indicator=indicator,\n validate=validate,\n )\n )\n\n def mod(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get modulo of ``DataFrame`` and `other`, element-wise (binary operator `mod`).\n \"\"\"\n return self._binary_op(\n \"mod\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def mul(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get multiplication of ``DataFrame`` and `other`, element-wise (binary operator `mul`).\n \"\"\"\n return self._binary_op(\n \"mul\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n rmul = multiply = mul\n\n def ne(self, other, axis=\"columns\", level=None): # noqa: PR01, RT01, D200\n \"\"\"\n Get not equal comparison of ``DataFrame`` and `other`, element-wise (binary operator `ne`).\n \"\"\"\n return self._binary_op(\n \"ne\", other, axis=axis, level=level, broadcast=isinstance(other, Series)\n )\n\n def nlargest(self, n, columns, keep=\"first\"): # noqa: PR01, RT01, D200\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n \"\"\"\n return DataFrame(query_compiler=self._query_compiler.nlargest(n, columns, keep))\n\n def nsmallest(self, n, columns, keep=\"first\"): # noqa: PR01, RT01, D200\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n \"\"\"\n return DataFrame(\n query_compiler=self._query_compiler.nsmallest(\n n=n, columns=columns, keep=keep\n )\n )\n\n def slice_shift(self, periods=1, axis=0): # noqa: PR01, RT01, D200\n \"\"\"\n Equivalent to `shift` without copying data.\n \"\"\"\n if periods == 0:\n return self.copy()\n\n if axis == \"index\" or axis == 0:\n if abs(periods) >= len(self.index):\n return DataFrame(columns=self.columns)\n else:\n new_df = self.iloc[:-periods] if periods > 0 else self.iloc[-periods:]\n new_df.index = (\n self.index[periods:] if periods > 0 else self.index[:periods]\n )\n return new_df\n else:\n if abs(periods) >= len(self.columns):\n return DataFrame(index=self.index)\n else:\n new_df = (\n self.iloc[:, :-periods] if periods > 0 else self.iloc[:, -periods:]\n )\n new_df.columns = (\n self.columns[periods:] if periods > 0 else self.columns[:periods]\n )\n return new_df\n\n def unstack(self, level=-1, fill_value=None): # noqa: PR01, RT01, D200\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels.\n \"\"\"\n if not isinstance(self.index, pandas.MultiIndex) or (\n isinstance(self.index, pandas.MultiIndex)\n and is_list_like(level)\n and len(level) == self.index.nlevels\n ):\n return self._reduce_dimension(\n query_compiler=self._query_compiler.unstack(level, fill_value)\n )\n else:\n return DataFrame(\n query_compiler=self._query_compiler.unstack(level, fill_value)\n )\n\n def pivot(self, index=None, columns=None, values=None): # noqa: PR01, RT01, D200\n \"\"\"\n Return reshaped ``DataFrame`` organized by given index / column values.\n \"\"\"\n return self.__constructor__(\n query_compiler=self._query_compiler.pivot(\n index=index, columns=columns, values=values\n )\n )\n\n def pivot_table(\n self,\n values=None,\n index=None,\n columns=None,\n aggfunc=\"mean\",\n fill_value=None,\n margins=False,\n dropna=True,\n margins_name=\"All\",\n observed=False,\n sort=True,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Create a spreadsheet-style pivot table as a ``DataFrame``.\n \"\"\"\n result = DataFrame(\n query_compiler=self._query_compiler.pivot_table(\n index=index,\n values=values,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n sort=sort,\n )\n )\n\n return result\n\n @property\n def plot(\n self,\n x=None,\n y=None,\n kind=\"line\",\n ax=None,\n subplots=False,\n sharex=None,\n sharey=False,\n layout=None,\n figsize=None,\n use_index=True,\n title=None,\n grid=None,\n legend=True,\n style=None,\n logx=False,\n logy=False,\n loglog=False,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n rot=None,\n fontsize=None,\n colormap=None,\n table=False,\n yerr=None,\n xerr=None,\n secondary_y=False,\n sort_columns=False,\n **kwargs,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Make plots of ``DataFrame``.\n \"\"\"\n return self._to_pandas().plot\n\n def pow(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get exponential power of ``DataFrame`` and `other`, element-wise (binary operator `pow`).\n \"\"\"\n if isinstance(other, Series):\n return self._default_to_pandas(\n \"pow\", other, axis=axis, level=level, fill_value=fill_value\n )\n return self._binary_op(\n \"pow\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def prod(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Return the product of the values over the requested axis.\n \"\"\"\n axis = self._get_axis_number(axis)\n if skipna is None:\n skipna = True\n if level is not None:\n if (\n not self._query_compiler.has_multiindex(axis=axis)\n and level > 0\n or level < -1\n and level != self.index.name\n ):\n raise ValueError(\"level > 0 or level < -1 only valid with MultiIndex\")\n return self.groupby(level=level, axis=axis, sort=False).prod(\n numeric_only=numeric_only, min_count=min_count\n )\n\n axis_to_apply = self.columns if axis else self.index\n if (\n skipna is not False\n and numeric_only is None\n and min_count > len(axis_to_apply)\n ):\n new_index = self.columns if not axis else self.index\n return Series(\n [np.nan] * len(new_index), index=new_index, dtype=np.dtype(\"object\")\n )\n\n data = self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)\n if min_count > 1:\n return data._reduce_dimension(\n data._query_compiler.prod_min_count(\n axis=axis,\n skipna=skipna,\n level=level,\n numeric_only=numeric_only,\n min_count=min_count,\n **kwargs,\n )\n )\n return data._reduce_dimension(\n data._query_compiler.prod(\n axis=axis,\n skipna=skipna,\n level=level,\n numeric_only=numeric_only,\n min_count=min_count,\n **kwargs,\n )\n )\n\n product = prod\n radd = add\n\n def query(self, expr, inplace=False, **kwargs): # noqa: PR01, RT01, D200\n \"\"\"\n Query the columns of a ``DataFrame`` with a boolean expression.\n \"\"\"\n ErrorMessage.non_verified_udf()\n self._update_var_dicts_in_kwargs(expr, kwargs)\n self._validate_eval_query(expr, **kwargs)\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n new_query_compiler = self._query_compiler.query(expr, **kwargs)\n return self._create_or_update_from_compiler(new_query_compiler, inplace)\n\n def reindex(\n self,\n labels=None,\n index=None,\n columns=None,\n axis=None,\n method=None,\n copy=True,\n level=None,\n fill_value=np.nan,\n limit=None,\n tolerance=None,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Conform ``DataFrame`` to new index with optional filling logic.\n \"\"\"\n axis = self._get_axis_number(axis)\n if axis == 0 and labels is not None:\n index = labels\n elif labels is not None:\n columns = labels\n return super(DataFrame, self).reindex(\n index=index,\n columns=columns,\n method=method,\n copy=copy,\n level=level,\n fill_value=fill_value,\n limit=limit,\n tolerance=tolerance,\n )\n\n def rename(\n self,\n mapper=None,\n index=None,\n columns=None,\n axis=None,\n copy=True,\n inplace=False,\n level=None,\n errors=\"ignore\",\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Alter axes labels.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if mapper is None and index is None and columns is None:\n raise TypeError(\"must pass an index to rename\")\n # We have to do this with the args because of how rename handles kwargs. It\n # doesn't ignore None values passed in, so we have to filter them ourselves.\n args = locals()\n kwargs = {k: v for k, v in args.items() if v is not None and k != \"self\"}\n # inplace should always be true because this is just a copy, and we will use the\n # results after.\n kwargs[\"inplace\"] = False\n if axis is not None:\n axis = self._get_axis_number(axis)\n if index is not None or (mapper is not None and axis == 0):\n new_index = pandas.DataFrame(index=self.index).rename(**kwargs).index\n else:\n new_index = None\n if columns is not None or (mapper is not None and axis == 1):\n new_columns = (\n pandas.DataFrame(columns=self.columns).rename(**kwargs).columns\n )\n else:\n new_columns = None\n\n if inplace:\n obj = self\n else:\n obj = self.copy()\n if new_index is not None:\n obj.index = new_index\n if new_columns is not None:\n obj.columns = new_columns\n\n if not inplace:\n return obj\n\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Replace values given in `to_replace` with `value`.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n new_query_compiler = self._query_compiler.replace(\n to_replace=to_replace,\n value=value,\n inplace=False,\n limit=limit,\n regex=regex,\n method=method,\n )\n return self._create_or_update_from_compiler(new_query_compiler, inplace)\n\n def rfloordiv(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get integer division of ``DataFrame`` and `other`, element-wise (binary operator `rfloordiv`).\n \"\"\"\n return self._binary_op(\n \"rfloordiv\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def rmod(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get modulo of ``DataFrame`` and `other`, element-wise (binary operator `rmod`).\n \"\"\"\n return self._binary_op(\n \"rmod\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def rpow(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get exponential power of ``DataFrame`` and `other`, element-wise (binary operator `rpow`).\n \"\"\"\n if isinstance(other, Series):\n return self._default_to_pandas(\n \"rpow\", other, axis=axis, level=level, fill_value=fill_value\n )\n return self._binary_op(\n \"rpow\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def rsub(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get subtraction of ``DataFrame`` and `other`, element-wise (binary operator `rsub`).\n \"\"\"\n return self._binary_op(\n \"rsub\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n def rtruediv(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get floating division of ``DataFrame`` and `other`, element-wise (binary operator `rtruediv`).\n \"\"\"\n return self._binary_op(\n \"rtruediv\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n rdiv = rtruediv\n\n def select_dtypes(self, include=None, exclude=None): # noqa: PR01, RT01, D200\n \"\"\"\n Return a subset of the ``DataFrame``'s columns based on the column dtypes.\n \"\"\"\n # Validates arguments for whether both include and exclude are None or\n # if they are disjoint. Also invalidates string dtypes.\n pandas.DataFrame().select_dtypes(include, exclude)\n\n if include and not is_list_like(include):\n include = [include]\n elif include is None:\n include = []\n if exclude and not is_list_like(exclude):\n exclude = [exclude]\n elif exclude is None:\n exclude = []\n\n sel = tuple(map(set, (include, exclude)))\n include, exclude = map(lambda x: set(map(infer_dtype_from_object, x)), sel)\n include_these = pandas.Series(not bool(include), index=self.columns)\n exclude_these = pandas.Series(not bool(exclude), index=self.columns)\n\n def is_dtype_instance_mapper(column, dtype):\n return column, functools.partial(issubclass, dtype.type)\n\n for column, f in itertools.starmap(\n is_dtype_instance_mapper, self.dtypes.iteritems()\n ):\n if include: # checks for the case of empty include or exclude\n include_these[column] = any(map(f, include))\n if exclude:\n exclude_these[column] = not any(map(f, exclude))\n\n dtype_indexer = include_these & exclude_these\n indicate = [\n i for i in range(len(dtype_indexer.values)) if not dtype_indexer.values[i]\n ]\n return self.drop(columns=self.columns[indicate], inplace=False)\n\n def set_index(\n self, keys, drop=True, append=False, inplace=False, verify_integrity=False\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Set the ``DataFrame`` index using existing columns.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not isinstance(keys, list):\n keys = [keys]\n\n if any(\n isinstance(col, (pandas.Index, Series, np.ndarray, list, Iterator))\n for col in keys\n ):\n if inplace:\n frame = self\n else:\n frame = self.copy()\n if not all(\n isinstance(col, (pandas.Index, Series, np.ndarray, list, Iterator))\n for col in keys\n ):\n if drop:\n keys = [frame.pop(k) if not is_list_like(k) else k for k in keys]\n keys = [k._to_pandas() if isinstance(k, Series) else k for k in keys]\n # These are single-threaded objects, so we might as well let pandas do the\n # calculation so that it matches.\n frame.index = (\n pandas.DataFrame(index=self.index)\n .set_index(keys, append=append, verify_integrity=verify_integrity)\n .index\n )\n if not inplace:\n return frame\n else:\n return\n new_query_compiler = self._query_compiler.set_index_from_columns(\n keys, drop=drop, append=append\n )\n\n if verify_integrity and not new_query_compiler.index.is_unique:\n duplicates = new_query_compiler.index[\n new_query_compiler.index.duplicated()\n ].unique()\n raise ValueError(f\"Index has duplicate keys: {duplicates}\")\n\n return self._create_or_update_from_compiler(new_query_compiler, inplace=inplace)\n\n sparse = CachedAccessor(\"sparse\", SparseFrameAccessor)\n\n def squeeze(self, axis=None): # noqa: PR01, RT01, D200\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n \"\"\"\n axis = self._get_axis_number(axis) if axis is not None else None\n if axis is None and (len(self.columns) == 1 or len(self.index) == 1):\n return Series(query_compiler=self._query_compiler).squeeze()\n if axis == 1 and len(self.columns) == 1:\n return Series(query_compiler=self._query_compiler)\n if axis == 0 and len(self.index) == 1:\n return Series(query_compiler=self.T._query_compiler)\n else:\n return self.copy()\n\n def stack(self, level=-1, dropna=True): # noqa: PR01, RT01, D200\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n \"\"\"\n if not isinstance(self.columns, pandas.MultiIndex) or (\n isinstance(self.columns, pandas.MultiIndex)\n and is_list_like(level)\n and len(level) == self.columns.nlevels\n ):\n return self._reduce_dimension(\n query_compiler=self._query_compiler.stack(level, dropna)\n )\n else:\n return DataFrame(query_compiler=self._query_compiler.stack(level, dropna))\n\n def sub(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get subtraction of ``DataFrame`` and `other`, element-wise (binary operator `sub`).\n \"\"\"\n return self._binary_op(\n \"sub\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n subtract = sub\n\n def sum(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Return the sum of the values over the requested axis.\n \"\"\"\n axis = self._get_axis_number(axis)\n if skipna is None:\n skipna = True\n axis_to_apply = self.columns if axis else self.index\n if (\n skipna is not False\n and numeric_only is None\n and min_count > len(axis_to_apply)\n ):\n new_index = self.columns if not axis else self.index\n return Series(\n [np.nan] * len(new_index), index=new_index, dtype=np.dtype(\"object\")\n )\n\n data = self._validate_dtypes_sum_prod_mean(\n axis, numeric_only, ignore_axis=False\n )\n if level is not None:\n if (\n not self._query_compiler.has_multiindex(axis=axis)\n and level > 0\n or level < -1\n and level != self.index.name\n ):\n raise ValueError(\"level > 0 or level < -1 only valid with MultiIndex\")\n return self.groupby(level=level, axis=axis, sort=False).sum(\n numeric_only=numeric_only, min_count=min_count\n )\n if min_count > 1:\n return data._reduce_dimension(\n data._query_compiler.sum_min_count(\n axis=axis,\n skipna=skipna,\n level=level,\n numeric_only=numeric_only,\n min_count=min_count,\n **kwargs,\n )\n )\n return data._reduce_dimension(\n data._query_compiler.sum(\n axis=axis,\n skipna=skipna,\n level=level,\n numeric_only=numeric_only,\n min_count=min_count,\n **kwargs,\n )\n )\n\n def to_feather(self, path, **kwargs): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Write a ``DataFrame`` to the binary Feather format.\n \"\"\"\n return self._default_to_pandas(pandas.DataFrame.to_feather, path, **kwargs)\n\n def to_gbq(\n self,\n destination_table,\n project_id=None,\n chunksize=None,\n reauth=False,\n if_exists=\"fail\",\n auth_local_webserver=False,\n table_schema=None,\n location=None,\n progress_bar=True,\n credentials=None,\n ): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Write a ``DataFrame`` to a Google BigQuery table.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.to_gbq,\n destination_table,\n project_id=project_id,\n chunksize=chunksize,\n reauth=reauth,\n if_exists=if_exists,\n auth_local_webserver=auth_local_webserver,\n table_schema=table_schema,\n location=location,\n progress_bar=progress_bar,\n credentials=credentials,\n )\n\n def to_html(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n justify=None,\n max_rows=None,\n max_cols=None,\n show_dimensions=False,\n decimal=\".\",\n bold_rows=True,\n classes=None,\n escape=True,\n notebook=False,\n border=None,\n table_id=None,\n render_links=False,\n encoding=None,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Render a ``DataFrame`` as an HTML table.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.to_html,\n buf=buf,\n columns=columns,\n col_space=col_space,\n header=header,\n index=index,\n na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n index_names=index_names,\n justify=justify,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=decimal,\n bold_rows=bold_rows,\n classes=classes,\n escape=escape,\n notebook=notebook,\n border=border,\n table_id=table_id,\n render_links=render_links,\n encoding=None,\n )\n\n def to_parquet(\n self,\n path=None,\n engine=\"auto\",\n compression=\"snappy\",\n index=None,\n partition_cols=None,\n storage_options: StorageOptions = None,\n **kwargs,\n ):\n\n config = {\n \"path\": path,\n \"engine\": engine,\n \"compression\": compression,\n \"index\": index,\n \"partition_cols\": partition_cols,\n \"storage_options\": storage_options,\n }\n new_query_compiler = self._query_compiler\n\n from modin.core.execution.dispatching.factories.dispatcher import (\n FactoryDispatcher,\n )\n\n return FactoryDispatcher.to_parquet(new_query_compiler, **config, **kwargs)\n\n def to_period(\n self, freq=None, axis=0, copy=True\n ): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Convert ``DataFrame`` from ``DatetimeIndex`` to ``PeriodIndex``.\n \"\"\"\n return super(DataFrame, self).to_period(freq=freq, axis=axis, copy=copy)\n\n def to_records(\n self, index=True, column_dtypes=None, index_dtypes=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Convert ``DataFrame`` to a NumPy record array.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.to_records,\n index=index,\n column_dtypes=column_dtypes,\n index_dtypes=index_dtypes,\n )\n\n def to_stata(\n self,\n path,\n convert_dates=None,\n write_index=True,\n byteorder=None,\n time_stamp=None,\n data_label=None,\n variable_labels=None,\n version=114,\n convert_strl=None,\n compression: Union[str, Mapping[str, str], None] = \"infer\",\n storage_options: StorageOptions = None,\n ): # pragma: no cover # noqa: PR01, RT01, D200\n \"\"\"\n Export ``DataFrame`` object to Stata data format.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.to_stata,\n path,\n convert_dates=convert_dates,\n write_index=write_index,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n variable_labels=variable_labels,\n version=version,\n convert_strl=convert_strl,\n compression=compression,\n storage_options=storage_options,\n )\n\n def to_timestamp(\n self, freq=None, how=\"start\", axis=0, copy=True\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period.\n \"\"\"\n return super(DataFrame, self).to_timestamp(\n freq=freq, how=how, axis=axis, copy=copy\n )\n\n def to_xml(\n self,\n path_or_buffer=None,\n index=True,\n root_name=\"data\",\n row_name=\"row\",\n na_rep=None,\n attr_cols=None,\n elem_cols=None,\n namespaces=None,\n prefix=None,\n encoding=\"utf-8\",\n xml_declaration=True,\n pretty_print=True,\n parser=\"lxml\",\n stylesheet=None,\n compression=\"infer\",\n storage_options=None,\n ):\n return self.__constructor__(\n query_compiler=self._query_compiler.default_to_pandas(\n pandas.DataFrame.to_xml,\n path_or_buffer=path_or_buffer,\n index=index,\n root_name=root_name,\n row_name=row_name,\n na_rep=na_rep,\n attr_cols=attr_cols,\n elem_cols=elem_cols,\n namespaces=namespaces,\n prefix=prefix,\n encoding=encoding,\n xml_declaration=xml_declaration,\n pretty_print=pretty_print,\n parser=parser,\n stylesheet=stylesheet,\n compression=compression,\n storage_options=storage_options,\n )\n )\n\n def truediv(\n self, other, axis=\"columns\", level=None, fill_value=None\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Get floating division of ``DataFrame`` and `other`, element-wise (binary operator `truediv`).\n \"\"\"\n return self._binary_op(\n \"truediv\",\n other,\n axis=axis,\n level=level,\n fill_value=fill_value,\n broadcast=isinstance(other, Series),\n )\n\n div = divide = truediv\n\n def update(\n self, other, join=\"left\", overwrite=True, filter_func=None, errors=\"ignore\"\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Modify in place using non-NA values from another ``DataFrame``.\n \"\"\"\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n query_compiler = self._query_compiler.df_update(\n other._query_compiler,\n join=join,\n overwrite=overwrite,\n filter_func=filter_func,\n errors=errors,\n )\n self._update_inplace(new_query_compiler=query_compiler)\n\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=no_default,\n ): # noqa: PR01, RT01, D200\n \"\"\"\n Replace values where the condition is False.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if isinstance(other, pandas.Series) and axis is None:\n raise ValueError(\"Must specify axis=0 or 1\")\n if level is not None:\n if isinstance(other, DataFrame):\n other = other._query_compiler.to_pandas()\n if isinstance(cond, DataFrame):\n cond = cond._query_compiler.to_pandas()\n new_query_compiler = self._default_to_pandas(\n pandas.DataFrame.where,\n cond,\n other=other,\n inplace=False,\n axis=axis,\n level=level,\n errors=errors,\n try_cast=try_cast,\n )\n return self._create_or_update_from_compiler(new_query_compiler, inplace)\n axis = self._get_axis_number(axis)\n cond = cond(self) if callable(cond) else cond\n\n if not isinstance(cond, DataFrame):\n if not hasattr(cond, \"shape\"):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n cond = DataFrame(cond, index=self.index, columns=self.columns)\n if isinstance(other, DataFrame):\n other = other._query_compiler\n elif isinstance(other, pandas.Series):\n other = other.reindex(self.index if not axis else self.columns)\n else:\n index = self.index if not axis else self.columns\n other = pandas.Series(other, index=index)\n query_compiler = self._query_compiler.where(\n cond._query_compiler, other, axis=axis, level=level\n )\n return self._create_or_update_from_compiler(query_compiler, inplace)\n\n def xs(self, key, axis=0, level=None, drop_level=True): # noqa: PR01, RT01, D200\n \"\"\"\n Return cross-section from the ``DataFrame``.\n \"\"\"\n return self._default_to_pandas(\n pandas.DataFrame.xs, key, axis=axis, level=level, drop_level=drop_level\n )\n\n def _getitem_column(self, key):\n \"\"\"\n Get column specified by `key`.\n\n Parameters\n ----------\n key : hashable\n Key that points to column to retrieve.\n\n Returns\n -------\n Series\n Selected column.\n \"\"\"\n if key not in self.keys():\n raise KeyError(\"{}\".format(key))\n s = DataFrame(\n query_compiler=self._query_compiler.getitem_column_array([key])\n ).squeeze(axis=1)\n if isinstance(s, Series):\n s._parent = self\n s._parent_axis = 1\n return s\n\n def __getattr__(self, key):\n \"\"\"\n Return item identified by `key`.\n\n Parameters\n ----------\n key : hashable\n Key to get.\n\n Returns\n -------\n Any\n\n Notes\n -----\n First try to use `__getattribute__` method. If it fails\n try to get `key` from ``DataFrame`` fields.\n \"\"\"\n try:\n return object.__getattribute__(self, key)\n except AttributeError as e:\n if key not in _ATTRS_NO_LOOKUP and key in self.columns:\n return self[key]\n raise e\n\n def __setattr__(self, key, value):\n \"\"\"\n Set attribute `value` identified by `key`.\n\n Parameters\n ----------\n key : hashable\n Key to set.\n value : Any\n Value to set.\n \"\"\"\n # We have to check for this first because we have to be able to set\n # _query_compiler before we check if the key is in self\n if key in [\"_query_compiler\"] or key in self.__dict__:\n pass\n elif key in self and key not in dir(self):\n self.__setitem__(key, value)\n elif isinstance(value, pandas.Series):\n warnings.warn(\n \"Modin doesn't allow columns to be created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access\",\n UserWarning,\n )\n object.__setattr__(self, key, value)\n\n def __setitem__(self, key, value):\n \"\"\"\n Set attribute `value` identified by `key`.\n\n Parameters\n ----------\n key : Any\n Key to set.\n value : Any\n Value to set.\n\n Returns\n -------\n None\n \"\"\"\n if isinstance(key, slice):\n return self._setitem_slice(key, value)\n\n if hashable(key) and key not in self.columns:\n if isinstance(value, Series) and len(self.columns) == 0:\n self._query_compiler = value._query_compiler.copy()\n # Now that the data is appended, we need to update the column name for\n # that column to `key`, otherwise the name could be incorrect. Drop the\n # last column name from the list (the appended value's name and append\n # the new name.\n self.columns = self.columns[:-1].append(pandas.Index([key]))\n return\n elif (\n isinstance(value, (pandas.DataFrame, DataFrame)) and value.shape[1] != 1\n ):\n raise ValueError(\n \"Wrong number of items passed %i, placement implies 1\"\n % value.shape[1]\n )\n elif isinstance(value, np.ndarray) and len(value.shape) > 1:\n if value.shape[1] == 1:\n # Transform into columnar table and take first column\n value = value.copy().T[0]\n else:\n raise ValueError(\n \"Wrong number of items passed %i, placement implies 1\"\n % value.shape[1]\n )\n\n # Do new column assignment after error checks and possible value modifications\n self.insert(loc=len(self.columns), column=key, value=value)\n return\n\n if not hashable(key):\n if isinstance(key, DataFrame) or isinstance(key, np.ndarray):\n if isinstance(key, np.ndarray):\n if key.shape != self.shape:\n raise ValueError(\"Array must be same shape as DataFrame\")\n key = DataFrame(key, columns=self.columns)\n return self.mask(key, value, inplace=True)\n\n def setitem_unhashable_key(df, value):\n df[key] = value\n return df\n\n return self._update_inplace(\n self._default_to_pandas(setitem_unhashable_key, value)._query_compiler\n )\n if is_list_like(value):\n if isinstance(value, (pandas.DataFrame, DataFrame)):\n value = value[value.columns[0]].values\n elif isinstance(value, np.ndarray):\n assert (\n len(value.shape) < 3\n ), \"Shape of new values must be compatible with manager shape\"\n value = value.T.reshape(-1)\n if len(self) > 0:\n value = value[: len(self)]\n if not isinstance(value, Series):\n value = list(value)\n\n if not self._query_compiler.lazy_execution and len(self.index) == 0:\n new_self = DataFrame({key: value}, columns=self.columns)\n self._update_inplace(new_self._query_compiler)\n else:\n if isinstance(value, Series):\n value = value._query_compiler\n self._update_inplace(self._query_compiler.setitem(0, key, value))\n\n def __iter__(self):\n \"\"\"\n Iterate over info axis.\n\n Returns\n -------\n iterable\n Iterator of the columns names.\n \"\"\"\n return iter(self.columns)\n\n def __contains__(self, key):\n \"\"\"\n Check if `key` in the ``DataFrame.columns``.\n\n Parameters\n ----------\n key : hashable\n Key to check the presence in the columns.\n\n Returns\n -------\n bool\n \"\"\"\n return self.columns.__contains__(key)\n\n def __round__(self, decimals=0):\n \"\"\"\n Round each value in a ``DataFrame`` to the given number of decimals.\n\n Parameters\n ----------\n decimals : int, default: 0\n Number of decimal places to round to.\n\n Returns\n -------\n DataFrame\n \"\"\"\n return self._default_to_pandas(pandas.DataFrame.__round__, decimals=decimals)\n\n def __delitem__(self, key):\n \"\"\"\n Delete item identified by `key` label.\n\n Parameters\n ----------\n key : hashable\n Key to delete.\n \"\"\"\n if key not in self:\n raise KeyError(key)\n self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))\n\n __add__ = add\n __iadd__ = add # pragma: no cover\n __radd__ = radd\n __mul__ = mul\n __imul__ = mul # pragma: no cover\n __rmul__ = rmul\n __pow__ = pow\n __ipow__ = pow # pragma: no cover\n __rpow__ = rpow\n __sub__ = sub\n __isub__ = sub # pragma: no cover\n __rsub__ = rsub\n __floordiv__ = floordiv\n __ifloordiv__ = floordiv # pragma: no cover\n __rfloordiv__ = rfloordiv\n __truediv__ = truediv\n __itruediv__ = truediv # pragma: no cover\n __rtruediv__ = rtruediv\n __mod__ = mod\n __imod__ = mod # pragma: no cover\n __rmod__ = rmod\n __rdiv__ = rdiv\n\n @property\n def attrs(self): # noqa: D200\n \"\"\"\n Return dictionary of global attributes of this dataset.\n \"\"\"\n\n def attrs(df):\n return df.attrs\n\n self._default_to_pandas(attrs)\n\n @property\n def style(self): # noqa: RT01, D200\n \"\"\"\n Return a Styler object.\n \"\"\"\n\n def style(df):\n \"\"\"Define __name__ attr because properties do not have it.\"\"\"\n return df.style\n\n return self._default_to_pandas(style)\n\n def _create_or_update_from_compiler(self, new_query_compiler, inplace=False):\n \"\"\"\n Return or update a ``DataFrame`` with given `new_query_compiler`.\n\n Parameters\n ----------\n new_query_compiler : PandasQueryCompiler\n QueryCompiler to use to manage the data.\n inplace : bool, default: False\n Whether or not to perform update or creation inplace.\n\n Returns\n -------\n DataFrame or None\n None if update was done, ``DataFrame`` otherwise.\n \"\"\"\n assert (\n isinstance(new_query_compiler, type(self._query_compiler))\n or type(new_query_compiler) in self._query_compiler.__class__.__bases__\n ), \"Invalid Query Compiler object: {}\".format(type(new_query_compiler))\n if not inplace:\n return DataFrame(query_compiler=new_query_compiler)\n else:\n self._update_inplace(new_query_compiler=new_query_compiler)\n\n def _get_numeric_data(self, axis: int):\n \"\"\"\n Grab only numeric data from ``DataFrame``.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to inspect on having numeric types only.\n\n Returns\n -------\n DataFrame\n ``DataFrame`` with numeric data.\n \"\"\"\n # Pandas ignores `numeric_only` if `axis` is 1, but we do have to drop\n # non-numeric columns if `axis` is 0.\n if axis != 0:\n return self\n return self.drop(\n columns=[\n i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])\n ]\n )\n\n def _validate_dtypes(self, numeric_only=False):\n \"\"\"\n Check that all the dtypes are the same.\n\n Parameters\n ----------\n numeric_only : bool, default: False\n Whether or not to allow only numeric data.\n If True and non-numeric data is found, exception\n will be raised.\n \"\"\"\n dtype = self.dtypes[0]\n for t in self.dtypes:\n if numeric_only and not is_numeric_dtype(t):\n raise TypeError(\"{0} is not a numeric data type\".format(t))\n elif not numeric_only and t != dtype:\n raise TypeError(\n \"Cannot compare type '{0}' with type '{1}'\".format(t, dtype)\n )\n\n def _validate_dtypes_min_max(self, axis, numeric_only):\n \"\"\"\n Validate data dtype for `min` and `max` methods.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to validate over.\n numeric_only : bool\n Whether or not to allow only numeric data.\n If True and non-numeric data is found, exception.\n\n Returns\n -------\n DataFrame\n \"\"\"\n # If our DataFrame has both numeric and non-numeric dtypes then\n # comparisons between these types do not make sense and we must raise a\n # TypeError. The exception to this rule is when there are datetime and\n # timedelta objects, in which case we proceed with the comparison\n # without ignoring any non-numeric types. We must check explicitly if\n # numeric_only is False because if it is None, it will default to True\n # if the operation fails with mixed dtypes.\n if (\n axis\n and numeric_only is False\n and np.unique([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2\n ):\n # check if there are columns with dtypes datetime or timedelta\n if all(\n dtype != np.dtype(\"datetime64[ns]\")\n and dtype != np.dtype(\"timedelta64[ns]\")\n for dtype in self.dtypes\n ):\n raise TypeError(\"Cannot compare Numeric and Non-Numeric Types\")\n\n return (\n self._get_numeric_data(axis)\n if numeric_only is None or numeric_only\n else self\n )\n\n def _validate_dtypes_sum_prod_mean(self, axis, numeric_only, ignore_axis=False):\n \"\"\"\n Validate data dtype for `sum`, `prod` and `mean` methods.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to validate over.\n numeric_only : bool\n Whether or not to allow only numeric data.\n If True and non-numeric data is found, exception\n will be raised.\n ignore_axis : bool, default: False\n Whether or not to ignore `axis` parameter.\n\n Returns\n -------\n DataFrame\n \"\"\"\n # We cannot add datetime types, so if we are summing a column with\n # dtype datetime64 and cannot ignore non-numeric types, we must throw a\n # TypeError.\n if (\n not axis\n and numeric_only is False\n and any(dtype == np.dtype(\"datetime64[ns]\") for dtype in self.dtypes)\n ):\n raise TypeError(\"Cannot add Timestamp Types\")\n\n # If our DataFrame has both numeric and non-numeric dtypes then\n # operations between these types do not make sense and we must raise a\n # TypeError. The exception to this rule is when there are datetime and\n # timedelta objects, in which case we proceed with the comparison\n # without ignoring any non-numeric types. We must check explicitly if\n # numeric_only is False because if it is None, it will default to True\n # if the operation fails with mixed dtypes.\n if (\n (axis or ignore_axis)\n and numeric_only is False\n and np.unique([is_numeric_dtype(dtype) for dtype in self.dtypes]).size == 2\n ):\n # check if there are columns with dtypes datetime or timedelta\n if all(\n dtype != np.dtype(\"datetime64[ns]\")\n and dtype != np.dtype(\"timedelta64[ns]\")\n for dtype in self.dtypes\n ):\n raise TypeError(\"Cannot operate on Numeric and Non-Numeric Types\")\n\n return (\n self._get_numeric_data(axis)\n if numeric_only is None or numeric_only\n else self\n )\n\n def _to_pandas(self):\n \"\"\"\n Convert Modin ``DataFrame`` to pandas ``DataFrame``.\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return self._query_compiler.to_pandas()\n\n def _validate_eval_query(self, expr, **kwargs):\n \"\"\"\n Validate the arguments of ``eval`` and ``query`` functions.\n\n Parameters\n ----------\n expr : str\n The expression to evaluate. This string cannot contain any\n Python statements, only Python expressions.\n **kwargs : dict\n Optional arguments of ``eval`` and ``query`` functions.\n \"\"\"\n if isinstance(expr, str) and expr == \"\":\n raise ValueError(\"expr cannot be an empty string\")\n\n if isinstance(expr, str) and \"not\" in expr:\n if \"parser\" in kwargs and kwargs[\"parser\"] == \"python\":\n ErrorMessage.not_implemented(\n \"'Not' nodes are not implemented.\"\n ) # pragma: no cover\n\n def _reduce_dimension(self, query_compiler):\n \"\"\"\n Reduce the dimension of data from the `query_compiler`.\n\n Parameters\n ----------\n query_compiler : BaseQueryCompiler\n Query compiler to retrieve the data.\n\n Returns\n -------\n Series\n \"\"\"\n return Series(query_compiler=query_compiler)\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Alter the name or names of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name for the Index, or list of names for the MultiIndex.\n axis : str or int, default: 0\n The axis to set the label.\n 0 or 'index' for the index, 1 or 'columns' for the columns.\n inplace : bool, default: False\n Whether to modify `self` directly or return a copy.\n\n Returns\n -------\n DataFrame or None\n \"\"\"\n axis = self._get_axis_number(axis)\n renamed = self if inplace else self.copy()\n if axis == 0:\n renamed.index = renamed.index.set_names(name)\n else:\n renamed.columns = renamed.columns.set_names(name)\n if not inplace:\n return renamed\n\n def _to_datetime(self, **kwargs):\n \"\"\"\n Convert `self` to datetime.\n\n Parameters\n ----------\n **kwargs : dict\n Optional arguments to use during query compiler's\n `to_datetime` invocation.\n\n Returns\n -------\n Series of datetime64 dtype\n \"\"\"\n return self._reduce_dimension(\n query_compiler=self._query_compiler.to_datetime(**kwargs)\n )\n\n def _getitem(self, key):\n \"\"\"\n Get the data specified by `key` for this ``DataFrame``.\n\n Parameters\n ----------\n key : callable, Series, DataFrame, np.ndarray, pandas.Index or list\n Data identifiers to retrieve.\n\n Returns\n -------\n Series or DataFrame\n Retrieved data.\n \"\"\"\n key = apply_if_callable(key, self)\n # Shortcut if key is an actual column\n is_mi_columns = self._query_compiler.has_multiindex(axis=1)\n try:\n if key in self.columns and not is_mi_columns:\n return self._getitem_column(key)\n except (KeyError, ValueError, TypeError):\n pass\n if isinstance(key, Series):\n return DataFrame(\n query_compiler=self._query_compiler.getitem_array(key._query_compiler)\n )\n elif isinstance(key, (np.ndarray, pandas.Index, list)):\n return DataFrame(query_compiler=self._query_compiler.getitem_array(key))\n elif isinstance(key, DataFrame):\n return self.where(key)\n elif is_mi_columns:\n return self._default_to_pandas(pandas.DataFrame.__getitem__, key)\n # return self._getitem_multilevel(key)\n else:\n return self._getitem_column(key)\n\n # Persistance support methods - BEGIN\n @classmethod\n def _inflate_light(cls, query_compiler):\n \"\"\"\n Re-creates the object from previously-serialized lightweight representation.\n\n The method is used for faster but not disk-storable persistence.\n\n Parameters\n ----------\n query_compiler : BaseQueryCompiler\n Query compiler to use for object re-creation.\n\n Returns\n -------\n DataFrame\n New ``DataFrame`` based on the `query_compiler`.\n \"\"\"\n return cls(query_compiler=query_compiler)\n\n @classmethod\n def _inflate_full(cls, pandas_df):\n \"\"\"\n Re-creates the object from previously-serialized disk-storable representation.\n\n Parameters\n ----------\n pandas_df : pandas.DataFrame\n Data to use for object re-creation.\n\n Returns\n -------\n DataFrame\n New ``DataFrame`` based on the `pandas_df`.\n \"\"\"\n return cls(data=from_pandas(pandas_df))\n\n def __reduce__(self):\n self._query_compiler.finalize()\n if PersistentPickle.get():\n return self._inflate_full, (self._to_pandas(),)\n return self._inflate_light, (self._query_compiler,)\n\n # Persistance support methods - END\n\n\nif IsExperimental.get():\n from modin.experimental.cloud.meta_magic import make_wrapped_class\n\n make_wrapped_class(DataFrame, \"make_dataframe_wrapper\")\n"
]
| [
[
"pandas.DataFrame.from_records",
"pandas.Index",
"numpy.empty",
"numpy.asarray",
"numpy.array",
"pandas.DataFrame.from_dict",
"pandas.core.common.apply_if_callable",
"pandas.DataFrame",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.util._validators.validate_bool_kwarg",
"pandas.Series",
"pandas.core.dtypes.common.is_list_like",
"pandas.io.formats.console.get_console_size",
"numpy.asanyarray",
"numpy.dtype",
"pandas.get_option"
]
]
|
acolinisi/h5py | [
"c69fc627c96aafcc1393bb70115e5bcd3a6f8a95"
]
| [
"h5py/_hl/filters.py"
]
| [
"# This file is part of h5py, a Python interface to the HDF5 library.\n#\n# http://www.h5py.org\n#\n# Copyright 2008-2013 Andrew Collette and contributors\n#\n# License: Standard 3-clause BSD; see \"license.txt\" for full license terms\n# and contributor agreement.\n\n\"\"\"\n Implements support for HDF5 compression filters via the high-level\n interface. The following types of filter are available:\n\n \"gzip\"\n Standard DEFLATE-based compression, at integer levels from 0 to 9.\n Built-in to all public versions of HDF5. Use this if you want a\n decent-to-good ratio, good portability, and don't mind waiting.\n\n \"lzf\"\n Custom compression filter for h5py. This filter is much, much faster\n than gzip (roughly 10x in compression vs. gzip level 4, and 3x faster\n in decompressing), but at the cost of a worse compression ratio. Use\n this if you want cheap compression and portability is not a concern.\n\n \"szip\"\n Access to the HDF5 SZIP encoder. SZIP is a non-mainstream compression\n format used in space science on integer and float datasets. SZIP is\n subject to license requirements, which means the encoder is not\n guaranteed to be always available. However, it is also much faster\n than gzip.\n\n The following constants in this module are also useful:\n\n decode\n Tuple of available filter names for decoding\n\n encode\n Tuple of available filter names for encoding\n\"\"\"\nfrom collections.abc import Mapping\nimport operator\n\nimport numpy as np\nfrom .compat import filename_encode\nfrom .. import h5z, h5p, h5d, h5f\n\n\n_COMP_FILTERS = {'gzip': h5z.FILTER_DEFLATE,\n 'szip': h5z.FILTER_SZIP,\n 'lzf': h5z.FILTER_LZF,\n 'shuffle': h5z.FILTER_SHUFFLE,\n 'fletcher32': h5z.FILTER_FLETCHER32,\n 'scaleoffset': h5z.FILTER_SCALEOFFSET }\n\nDEFAULT_GZIP = 4\nDEFAULT_SZIP = ('nn', 8)\n\ndef _gen_filter_tuples():\n \"\"\" Bootstrap function to figure out what filters are available. \"\"\"\n dec = []\n enc = []\n for name, code in _COMP_FILTERS.items():\n if h5z.filter_avail(code):\n info = h5z.get_filter_info(code)\n if info & h5z.FILTER_CONFIG_ENCODE_ENABLED:\n enc.append(name)\n if info & h5z.FILTER_CONFIG_DECODE_ENABLED:\n dec.append(name)\n\n return tuple(dec), tuple(enc)\n\ndecode, encode = _gen_filter_tuples()\n\ndef _external_entry(entry):\n \"\"\" Check for and return a well-formed entry tuple for\n a call to h5p.set_external. \"\"\"\n # We require only an iterable entry but also want to guard against\n # raising a confusing exception from unpacking below a str or bytes that\n # was mistakenly passed as an entry. We go further than that and accept\n # only a tuple, which allows simpler documentation and exception\n # messages.\n if not isinstance(entry, tuple):\n raise TypeError(\n \"Each external entry must be a tuple of (name, offset, size)\")\n name, offset, size = entry # raise ValueError without three elements\n name = filename_encode(name)\n offset = operator.index(offset)\n size = operator.index(size)\n return (name, offset, size)\n\ndef _normalize_external(external):\n \"\"\" Normalize external into a well-formed list of tuples and return. \"\"\"\n if external is None:\n return []\n try:\n # Accept a solitary name---a str, bytes, or os.PathLike acceptable to\n # filename_encode.\n return [_external_entry((external, 0, h5f.UNLIMITED))]\n except TypeError:\n pass\n # Check and rebuild each entry to be well-formed.\n return [_external_entry(entry) for entry in external]\n\nclass FilterRefBase(Mapping):\n \"\"\"Base class for referring to an HDF5 and describing its options\n\n Your subclass must define filter_id, and may define a filter_options tuple.\n \"\"\"\n filter_id = None\n filter_options = ()\n\n # Mapping interface supports using instances as **kwargs for compatibility\n # with older versions of h5py\n @property\n def _kwargs(self):\n return {\n 'compression': self.filter_id,\n 'compression_opts': self.filter_options\n }\n\n def __hash__(self):\n return hash((self.filter_id, self.filter_options))\n\n def __len__(self):\n return len(self._kwargs)\n\n def __iter__(self):\n return iter(self._kwargs)\n\n def __getitem__(self, item):\n return self._kwargs[item]\n\nclass Gzip(FilterRefBase):\n filter_id = h5z.FILTER_DEFLATE\n\n def __init__(self, level=DEFAULT_GZIP):\n self.filter_options = (level,)\n\ndef fill_dcpl(plist, shape, dtype, chunks, compression, compression_opts,\n shuffle, fletcher32, maxshape, scaleoffset, external):\n \"\"\" Generate a dataset creation property list.\n\n Undocumented and subject to change without warning.\n \"\"\"\n\n if shape is None or shape == ():\n shapetype = 'Empty' if shape is None else 'Scalar'\n if any((chunks, compression, compression_opts, shuffle, fletcher32,\n scaleoffset is not None)):\n raise TypeError(\n f\"{shapetype} datasets don't support chunk/filter options\"\n )\n if maxshape and maxshape != ():\n raise TypeError(f\"{shapetype} datasets cannot be extended\")\n return h5p.create(h5p.DATASET_CREATE)\n\n def rq_tuple(tpl, name):\n \"\"\" Check if chunks/maxshape match dataset rank \"\"\"\n if tpl in (None, True):\n return\n try:\n tpl = tuple(tpl)\n except TypeError:\n raise TypeError('\"%s\" argument must be None or a sequence object' % name)\n if len(tpl) != len(shape):\n raise ValueError('\"%s\" must have same rank as dataset shape' % name)\n\n rq_tuple(chunks, 'chunks')\n rq_tuple(maxshape, 'maxshape')\n\n if compression is not None:\n if isinstance(compression, FilterRefBase):\n compression_opts = compression.filter_options\n compression = compression.filter_id\n\n if compression not in encode and not isinstance(compression, int):\n raise ValueError('Compression filter \"%s\" is unavailable' % compression)\n\n if compression == 'gzip':\n if compression_opts is None:\n gzip_level = DEFAULT_GZIP\n elif compression_opts in range(10):\n gzip_level = compression_opts\n else:\n raise ValueError(\"GZIP setting must be an integer from 0-9, not %r\" % compression_opts)\n\n elif compression == 'lzf':\n if compression_opts is not None:\n raise ValueError(\"LZF compression filter accepts no options\")\n\n elif compression == 'szip':\n if compression_opts is None:\n compression_opts = DEFAULT_SZIP\n\n err = \"SZIP options must be a 2-tuple ('ec'|'nn', even integer 0-32)\"\n try:\n szmethod, szpix = compression_opts\n except TypeError:\n raise TypeError(err)\n if szmethod not in ('ec', 'nn'):\n raise ValueError(err)\n if not (0<szpix<=32 and szpix%2 == 0):\n raise ValueError(err)\n\n elif compression_opts is not None:\n # Can't specify just compression_opts by itself.\n raise TypeError(\"Compression method must be specified\")\n\n if scaleoffset is not None:\n # scaleoffset must be an integer when it is not None or False,\n # except for integral data, for which scaleoffset == True is\n # permissible (will use SO_INT_MINBITS_DEFAULT)\n\n if scaleoffset < 0:\n raise ValueError('scale factor must be >= 0')\n\n if dtype.kind == 'f':\n if scaleoffset is True:\n raise ValueError('integer scaleoffset must be provided for '\n 'floating point types')\n elif dtype.kind in ('u', 'i'):\n if scaleoffset is True:\n scaleoffset = h5z.SO_INT_MINBITS_DEFAULT\n else:\n raise TypeError('scale/offset filter only supported for integer '\n 'and floating-point types')\n\n # Scale/offset following fletcher32 in the filter chain will (almost?)\n # always triggers a read error, as most scale/offset settings are\n # lossy. Since fletcher32 must come first (see comment below) we\n # simply prohibit the combination of fletcher32 and scale/offset.\n if fletcher32:\n raise ValueError('fletcher32 cannot be used with potentially lossy'\n ' scale/offset filter')\n\n external = _normalize_external(external)\n # End argument validation\n\n if (chunks is True) or \\\n (chunks is None and any((shuffle, fletcher32, compression, maxshape,\n scaleoffset is not None))):\n chunks = guess_chunk(shape, maxshape, dtype.itemsize)\n\n if maxshape is True:\n maxshape = (None,)*len(shape)\n\n if chunks is not None:\n plist.set_chunk(chunks)\n plist.set_fill_time(h5d.FILL_TIME_ALLOC) # prevent resize glitch\n\n # scale-offset must come before shuffle and compression\n if scaleoffset is not None:\n if dtype.kind in ('u', 'i'):\n plist.set_scaleoffset(h5z.SO_INT, scaleoffset)\n else: # dtype.kind == 'f'\n plist.set_scaleoffset(h5z.SO_FLOAT_DSCALE, scaleoffset)\n\n for item in external:\n plist.set_external(*item)\n\n if shuffle:\n plist.set_shuffle()\n\n if compression == 'gzip':\n plist.set_deflate(gzip_level)\n elif compression == 'lzf':\n plist.set_filter(h5z.FILTER_LZF, h5z.FLAG_OPTIONAL)\n elif compression == 'szip':\n opts = {'ec': h5z.SZIP_EC_OPTION_MASK, 'nn': h5z.SZIP_NN_OPTION_MASK}\n plist.set_szip(opts[szmethod], szpix)\n elif isinstance(compression, int):\n if not h5z.filter_avail(compression):\n raise ValueError(\"Unknown compression filter number: %s\" % compression)\n\n plist.set_filter(compression, h5z.FLAG_OPTIONAL, compression_opts)\n\n # `fletcher32` must come after `compression`, otherwise, if `compression`\n # is \"szip\" and the data is 64bit, the fletcher32 checksum will be wrong\n # (see GitHub issue #953).\n if fletcher32:\n plist.set_fletcher32()\n\n return plist\n\ndef get_filters(plist):\n \"\"\" Extract a dictionary of active filters from a DCPL, along with\n their settings.\n\n Undocumented and subject to change without warning.\n \"\"\"\n\n filters = {h5z.FILTER_DEFLATE: 'gzip', h5z.FILTER_SZIP: 'szip',\n h5z.FILTER_SHUFFLE: 'shuffle', h5z.FILTER_FLETCHER32: 'fletcher32',\n h5z.FILTER_LZF: 'lzf', h5z.FILTER_SCALEOFFSET: 'scaleoffset'}\n\n pipeline = {}\n\n nfilters = plist.get_nfilters()\n\n for i in range(nfilters):\n\n code, _, vals, _ = plist.get_filter(i)\n\n if code == h5z.FILTER_DEFLATE:\n vals = vals[0] # gzip level\n\n elif code == h5z.FILTER_SZIP:\n mask, pixels = vals[0:2]\n if mask & h5z.SZIP_EC_OPTION_MASK:\n mask = 'ec'\n elif mask & h5z.SZIP_NN_OPTION_MASK:\n mask = 'nn'\n else:\n raise TypeError(\"Unknown SZIP configuration\")\n vals = (mask, pixels)\n elif code == h5z.FILTER_LZF:\n vals = None\n else:\n if len(vals) == 0:\n vals = None\n\n pipeline[filters.get(code, str(code))] = vals\n\n return pipeline\n\nCHUNK_BASE = 16*1024 # Multiplier by which chunks are adjusted\nCHUNK_MIN = 8*1024 # Soft lower limit (8k)\nCHUNK_MAX = 1024*1024 # Hard upper limit (1M)\n\ndef guess_chunk(shape, maxshape, typesize):\n \"\"\" Guess an appropriate chunk layout for a dataset, given its shape and\n the size of each element in bytes. Will allocate chunks only as large\n as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of\n each axis, slightly favoring bigger values for the last index.\n\n Undocumented and subject to change without warning.\n \"\"\"\n # pylint: disable=unused-argument\n\n # For unlimited dimensions we have to guess 1024\n shape = tuple((x if x!=0 else 1024) for i, x in enumerate(shape))\n\n ndims = len(shape)\n if ndims == 0:\n raise ValueError(\"Chunks not allowed for scalar datasets.\")\n\n chunks = np.array(shape, dtype='=f8')\n if not np.all(np.isfinite(chunks)):\n raise ValueError(\"Illegal value in chunk tuple\")\n\n # Determine the optimal chunk size in bytes using a PyTables expression.\n # This is kept as a float.\n dset_size = np.product(chunks)*typesize\n target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024)))\n\n if target_size > CHUNK_MAX:\n target_size = CHUNK_MAX\n elif target_size < CHUNK_MIN:\n target_size = CHUNK_MIN\n\n idx = 0\n while True:\n # Repeatedly loop over the axes, dividing them by 2. Stop when:\n # 1a. We're smaller than the target chunk size, OR\n # 1b. We're within 50% of the target chunk size, AND\n # 2. The chunk is smaller than the maximum chunk size\n\n chunk_bytes = np.product(chunks)*typesize\n\n if (chunk_bytes < target_size or \\\n abs(chunk_bytes-target_size)/target_size < 0.5) and \\\n chunk_bytes < CHUNK_MAX:\n break\n\n if np.product(chunks) == 1:\n break # Element size larger than CHUNK_MAX\n\n chunks[idx%ndims] = np.ceil(chunks[idx%ndims] / 2.0)\n idx += 1\n\n return tuple(int(x) for x in chunks)\n"
]
| [
[
"numpy.product",
"numpy.array",
"numpy.ceil",
"numpy.isfinite",
"numpy.log10"
]
]
|
Brym-Gyimah/mmdetection | [
"96abfd90cf0e38c5ce398795f949e9328eb85c1b"
]
| [
"mmdet/datasets/xml_style.py"
]
| [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport mmcv\nimport numpy as np\nfrom PIL import Image\n\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\[email protected]_module()\nclass XMLDataset(CustomDataset):\n \"\"\"XML dataset for detection.\n\n Args:\n min_size (int | float, optional): The minimum size of bounding\n boxes in the images. If the size of a bounding box is less than\n ``min_size``, it would be add to ignored field.\n img_subdir (str): Subdir where images are stored. Default: JPEGImages.\n ann_subdir (str): Subdir where annotations are. Default: Annotations.\n \"\"\"\n\n def __init__(self,\n min_size=None,\n img_subdir='JPEGImages',\n ann_subdir='Annotations',\n **kwargs):\n assert self.CLASSES or kwargs.get(\n 'classes', None), 'CLASSES in `XMLDataset` can not be None.'\n self.img_subdir = img_subdir\n self.ann_subdir = ann_subdir\n super(XMLDataset, self).__init__(**kwargs)\n self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}\n self.min_size = min_size\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotation from XML style ann_file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n \"\"\"\n\n data_infos = []\n img_ids = mmcv.list_from_file(ann_file)\n for img_id in img_ids:\n filename = osp.join(self.img_subdir, f'{img_id}.jpg')\n xml_path = osp.join(self.img_prefix, self.ann_subdir,\n f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n size = root.find('size')\n if size is not None:\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n else:\n img_path = osp.join(self.img_prefix, filename)\n img = Image.open(img_path)\n width, height = img.size\n data_infos.append(\n dict(id=img_id, filename=filename, width=width, height=height))\n\n return data_infos\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without annotation.\"\"\"\n valid_inds = []\n for i, img_info in enumerate(self.data_infos):\n if min(img_info['width'], img_info['height']) < min_size:\n continue\n if self.filter_empty_gt:\n img_id = img_info['id']\n xml_path = osp.join(self.img_prefix, self.ann_subdir,\n f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n for obj in root.findall('object'):\n name = obj.find('name').text\n if name in self.CLASSES:\n valid_inds.append(i)\n break\n else:\n valid_inds.append(i)\n return valid_inds\n\n def get_ann_info(self, idx):\n \"\"\"Get annotation from XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n bboxes = []\n labels = []\n bboxes_ignore = []\n labels_ignore = []\n for obj in root.findall('object'):\n name = obj.find('name').text\n if name not in self.CLASSES:\n continue\n label = self.cat2label[name]\n difficult = obj.find('difficult')\n difficult = 0 if difficult is None else int(difficult.text)\n bnd_box = obj.find('bndbox')\n # TODO: check whether it is necessary to use int\n # Coordinates may be float type\n bbox = [\n int(float(bnd_box.find('xmin').text)),\n int(float(bnd_box.find('ymin').text)),\n int(float(bnd_box.find('xmax').text)),\n int(float(bnd_box.find('ymax').text))\n ]\n ignore = False\n if self.min_size:\n assert not self.test_mode\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n if w < self.min_size or h < self.min_size:\n ignore = True\n if difficult or ignore:\n bboxes_ignore.append(bbox)\n labels_ignore.append(label)\n else:\n bboxes.append(bbox)\n labels.append(label)\n if not bboxes:\n bboxes = np.zeros((0, 4))\n labels = np.zeros((0, ))\n else:\n bboxes = np.array(bboxes, ndmin=2) - 1\n labels = np.array(labels)\n if not bboxes_ignore:\n bboxes_ignore = np.zeros((0, 4))\n labels_ignore = np.zeros((0, ))\n else:\n bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1\n labels_ignore = np.array(labels_ignore)\n ann = dict(\n bboxes=bboxes.astype(np.float32),\n labels=labels.astype(np.int64),\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64))\n return ann\n\n def get_cat_ids(self, idx):\n \"\"\"Get category ids in XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n \"\"\"\n\n cat_ids = []\n img_id = self.data_infos[idx]['id']\n xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n for obj in root.findall('object'):\n name = obj.find('name').text\n if name not in self.CLASSES:\n continue\n label = self.cat2label[name]\n cat_ids.append(label)\n\n return cat_ids\n"
]
| [
[
"numpy.array",
"numpy.zeros"
]
]
|
shiwanghua/Rein-Bits | [
"b7e594673c9d4ce1682b01cd119a4499128688e8"
]
| [
"pictures/programs/hem_exp3_n.py"
]
| [
"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import rc\nfrom matplotlib.pyplot import MultipleLocator\nrc('mathtext', default='regular')\n\nplt.rcParams['axes.unicode_minus'] = False\nplt.rcParams['font.family'] = ['Times New Roman'] # \nName = [\"REIN\", \"HEM\", \"Simple\", \"TAMA\", \"Ada-REIN\", \"OpIndex\"]\nx = [\"0.3M\",\"1M\",\"3M\",\"5M\",\"7M\",\"9M\"]\n\nRein = [2.713345, 8.232349, 21.414993, 37.706974, 55.908721, 69.915062]\nHEM = [0.195233, 0.618994, 1.962566, 3.380389, 4.932515, 12.299295]\n# Simple = [4.038256, 13.878217, 40.736844, 68.019431, 94.269526, 121.051676]\nTAMA = [0.704289, 3.219745, 13.40329, 26.586104, 50.579369, 81.963197]\nAdaREIN = [2.720448, 8.021952, 20.41238, 36.483386, 54.157477, 72.381039]\nOpIndex = [5.371004, 18.298927, 55.104239, 91.46666, 132.142078, 182.600677]\n\nlsize=24\n\nfig=plt.figure(figsize=(5, 4))\nax = fig.add_subplot(111)\nax.set_xlabel('Number of Subscriptions', fontsize=lsize)\nax.set_ylabel('Matching Time (ms)', fontsize=lsize)\n# plt.xticks(range(0,10))\nax.plot(x, Rein, marker='v', color='r', label=Name[0])\nax.plot(x, HEM, marker='.', color='DODGERBLUE', label=Name[1])\n# ax.plot(x, Simple, marker='D', color='deepskyblue', label=Name[2]) #\nax.plot(x, TAMA, marker='*', color='DarkCyan', label=Name[3])\nax.plot(x, AdaREIN, marker='x', color='DarkMagenta', label=Name[4])\nax.plot(x, OpIndex, marker='h', color='DimGray', label=Name[5]) # slategray\n\nax.legend(fontsize=12, ncol=2,loc='lower right') #fontsize=10 loc=(1.36/5,0.05/5),\nax.grid()\nax.set_xlim(0,5)\nax.set_xticks([0,1,2,3,4,5])\nax.set_xticklabels(x)\nax.set_yscale(\"log\")\n# ax.set_yticks([0,2,8,32,128,256])\n# ax.set_yticklabels(['-1', '0', '1'])\nax.set_zorder(0)\nplt.tick_params(labelsize=22)\ngcf = plt.gcf()\nplt.show()\ngcf.savefig('../exp3_n.eps',format='eps',bbox_inches='tight')"
]
| [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.rc",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show"
]
]
|
PMBio/GNetLMM | [
"103d6433ff6d4a13b5787c116032fda268dc4302"
]
| [
"GNetLMM/pycore/io/writer.py"
]
| [
"import pdb\nimport numpy as np\nimport csv\n\nclass Writer:\n \"\"\"\n basic class for writing out experiments\n \"\"\"\n def __init__(self, basefile):\n \"\"\"\n constructor\n\n input:\n basefile : name of basefile\n \"\"\"\n self.basefile = basefile\n\n\n def _writeInfo(self,data,which):\n \"\"\"\n writing out column (which=cols) or row (which=rows) information\n\n input:\n data : dictionary, containing information\n which : specifies file ending\n \"\"\"\n with open(self.basefile + '.' + which, \"wb\") as outfile:\n csv_writer = csv.writer(outfile,delimiter=' ')\n csv_writer.writerow(data.keys())\n csv_writer.writerows(zip(*data.values()))\n \n def writeColumnInfo(self,data):\n \"\"\"\n writing out column information\n \"\"\"\n self._writeInfo(data, 'cols')\n \n def writeRowInfo(self,data):\n \"\"\"\n writing out row information\n \"\"\"\n self._writeInfo(data, 'rows')\n\n def writeMatrix(self,M,**kwargs):\n \"\"\"\n writing out matrix\n\n input:\n M : data matrix\n \"\"\"\n np.savetxt(self.basefile + '.matrix', M,**kwargs)\n"
]
| [
[
"numpy.savetxt"
]
]
|
mstypulk/qiskit-terra | [
"058feb06657ec4b598cc65216288bdd984550d00"
]
| [
"qiskit/extensions/standard/cz.py"
]
| [
"# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name\n\n\"\"\"\ncontrolled-Phase gate.\n\"\"\"\n\nimport numpy\n\nfrom qiskit.circuit import CompositeGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.circuit.decorators import _op_expand, _to_bits\nfrom qiskit.extensions.standard.h import HGate\nfrom qiskit.extensions.standard.cx import CnotGate\n\n\nclass CzGate(Gate):\n \"\"\"controlled-Z gate.\"\"\"\n\n def __init__(self):\n \"\"\"Create new CZ gate.\"\"\"\n super().__init__(\"cz\", 2, [])\n\n def _define(self):\n \"\"\"\n gate cz a,b { h b; cx a,b; h b; }\n \"\"\"\n definition = []\n q = QuantumRegister(2, \"q\")\n rule = [\n (HGate(), [q[1]], []),\n (CnotGate(), [q[0], q[1]], []),\n (HGate(), [q[1]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n return CzGate() # self-inverse\n\n def to_matrix(self):\n \"\"\"Return a Numpy.array for the Cz gate.\"\"\"\n return numpy.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, -1]], dtype=complex)\n\n\n@_to_bits(2)\n@_op_expand(2)\ndef cz(self, ctl, tgt):\n \"\"\"Apply CZ to circuit.\"\"\"\n return self.append(CzGate(), [ctl, tgt], [])\n\n\nQuantumCircuit.cz = cz\nCompositeGate.cz = cz\n"
]
| [
[
"numpy.array"
]
]
|
p-glaum/PyPSA | [
"a8cfdf1acd9b348828474ad0899afe2c77818159"
]
| [
"examples/sector-coupling/power-to-gas-boiler-chp.py"
]
| [
"# -*- coding: utf-8 -*-\n## Power to Gas Example with Optional Coupling to Heat Sector (via Boiler OR Combined-Heat-and-Power (CHP))\n#\n# A location has an electric, gas and heat bus. The primary source is wind power, which can be converted to gas. The gas can be stored to convert into electricity or heat (with either a boiler or a CHP).\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pyomo.environ import Constraint\n\nimport pypsa\n\n#%matplotlib inline\n\n### Combined-Heat-and-Power (CHP) parameterisation\n\n# follows http://www.ea-energianalyse.dk/reports/student-reports/integration_of_50_percent_wind%20power.pdf pages 35-6\n\n# which follows http://www.sciencedirect.com/science/article/pii/030142159390282K\n\n# ratio between max heat output and max electric output\nnom_r = 1.0\n\n# backpressure limit\nc_m = 0.75\n\n# marginal loss for each additional generation of heat\nc_v = 0.15\n\n# Graph for the case that max heat output equals max electric output\n\nfig, ax = plt.subplots(1, 1)\n\nfig.set_size_inches((5, 5))\n\nt = 0.01\n\nph = np.arange(0, 1.0001, t)\n\nax.plot(ph, c_m * ph)\n\nax.set_xlabel(\"P_heat_out\")\n\nax.set_ylabel(\"P_elec_out\")\n\nax.grid(True)\n\nax.set_xlim([0, 1.1])\nax.set_ylim([0, 1.1])\n\nax.text(0.1, 0.7, \"Allowed output\", color=\"r\")\n\nax.plot(ph, 1 - c_v * ph)\n\nfor i in range(1, 10):\n k = 0.1 * i\n x = np.arange(0, k / (c_m + c_v), t)\n ax.plot(x, k - c_v * x, color=\"g\", alpha=0.5)\n\nax.text(0.05, 0.41, \"iso-fuel-lines\", color=\"g\", rotation=-7)\n\nax.fill_between(ph, c_m * ph, 1 - c_v * ph, facecolor=\"r\", alpha=0.5)\n\nfig.tight_layout()\n\nif False:\n fig.savefig(\"chp_feasible.pdf\", transparent=True)\n\n### Now do optimisation\n\nheat = True\nchp = True\n\n\nnetwork = pypsa.Network()\n\nnetwork.set_snapshots(pd.date_range(\"2016-01-01 00:00\", \"2016-01-01 03:00\", freq=\"H\"))\n\nnetwork.add(\"Bus\", \"0\", carrier=\"AC\")\n\nnetwork.add(\"Bus\", \"0 gas\", carrier=\"gas\")\n\nnetwork.add(\"Carrier\", \"wind\")\n\nnetwork.add(\"Carrier\", \"gas\", co2_emissions=0.2)\n\nnetwork.add(\"GlobalConstraint\", \"co2_limit\", sense=\"<=\", constant=0.0)\n\n\nnetwork.add(\n \"Generator\",\n \"wind turbine\",\n bus=\"0\",\n carrier=\"wind\",\n p_nom_extendable=True,\n p_max_pu=[0.0, 0.2, 0.7, 0.4],\n capital_cost=1000,\n)\n\nnetwork.add(\"Load\", \"load\", bus=\"0\", p_set=5.0)\n\n\nnetwork.add(\n \"Link\",\n \"P2G\",\n bus0=\"0\",\n bus1=\"0 gas\",\n efficiency=0.6,\n capital_cost=1000,\n p_nom_extendable=True,\n)\n\nnetwork.add(\n \"Link\",\n \"generator\",\n bus0=\"0 gas\",\n bus1=\"0\",\n efficiency=0.468,\n capital_cost=400,\n p_nom_extendable=True,\n)\n\n\nnetwork.add(\"Store\", \"gas depot\", bus=\"0 gas\", e_cyclic=True, e_nom_extendable=True)\n\n\nif heat:\n\n network.add(\"Bus\", \"0 heat\", carrier=\"heat\")\n\n network.add(\"Carrier\", \"heat\")\n\n network.add(\"Load\", \"heat load\", bus=\"0 heat\", p_set=10.0)\n\n network.add(\n \"Link\",\n \"boiler\",\n bus0=\"0 gas\",\n bus1=\"0 heat\",\n efficiency=0.9,\n capital_cost=300,\n p_nom_extendable=True,\n )\n\n network.add(\n \"Store\", \"water tank\", bus=\"0 heat\", e_cyclic=True, e_nom_extendable=True\n )\n\n\nif heat and chp:\n\n # Guarantees ISO fuel lines, i.e. fuel consumption p_b0 + p_g0 = constant along p_g1 + c_v p_b1 = constant\n network.links.at[\"boiler\", \"efficiency\"] = (\n network.links.at[\"generator\", \"efficiency\"] / c_v\n )\n\n def extra_functionality(network, snapshots):\n\n # Guarantees heat output and electric output nominal powers are proportional\n network.model.chp_nom = Constraint(\n rule=lambda model: network.links.at[\"generator\", \"efficiency\"]\n * nom_r\n * model.link_p_nom[\"generator\"]\n == network.links.at[\"boiler\", \"efficiency\"] * model.link_p_nom[\"boiler\"]\n )\n\n # Guarantees c_m p_b1 \\leq p_g1\n def backpressure(model, snapshot):\n return (\n c_m\n * network.links.at[\"boiler\", \"efficiency\"]\n * model.link_p[\"boiler\", snapshot]\n <= network.links.at[\"generator\", \"efficiency\"]\n * model.link_p[\"generator\", snapshot]\n )\n\n network.model.backpressure = Constraint(list(snapshots), rule=backpressure)\n\n # Guarantees p_g1 +c_v p_b1 \\leq p_g1_nom\n def top_iso_fuel_line(model, snapshot):\n return (\n model.link_p[\"boiler\", snapshot] + model.link_p[\"generator\", snapshot]\n <= model.link_p_nom[\"generator\"]\n )\n\n network.model.top_iso_fuel_line = Constraint(\n list(snapshots), rule=top_iso_fuel_line\n )\n\nelse:\n extra_functionality = None\n\nnetwork.lopf(network.snapshots, extra_functionality=extra_functionality)\nprint(\"Objective:\", network.objective)\n\nnetwork.loads_t.p\n\nnetwork.links.p_nom_opt\n\n# CHP is dimensioned by the heat demand met in three hours when no wind\n4 * 10.0 / 3 / network.links.at[\"boiler\", \"efficiency\"]\n\n# elec is set by the heat demand\n28.490028 * 0.15\n\nnetwork.links_t[\"p0\"]\n\nnetwork.links_t[\"p1\"]\n\nprint(pd.DataFrame({attr: network.stores_t[attr][\"gas depot\"] for attr in [\"p\", \"e\"]}))\n\nif heat:\n print(\n pd.DataFrame(\n {attr: network.stores_t[attr][\"water tank\"] for attr in [\"p\", \"e\"]}\n )\n )\n print(\n pd.DataFrame({attr: network.links_t[attr][\"boiler\"] for attr in [\"p0\", \"p1\"]})\n )\n\nprint(network.stores.loc[\"gas depot\"])\n\nprint(network.generators.loc[\"wind turbine\"])\n\nprint(network.links.p_nom_opt)\n\n# Calculate the overall efficiency of the CHP\n\neta_elec = network.links.at[\"generator\", \"efficiency\"]\n\nr = 1 / c_m\n\n# P_h = r*P_e\n\nprint((1 + r) / ((1 / eta_elec) * (1 + c_v * r)))\n"
]
| [
[
"pandas.DataFrame",
"pandas.date_range",
"numpy.arange",
"matplotlib.pyplot.subplots"
]
]
|
seanandrews/ARAA | [
"6c95f88f5619642b6914c611ba6c902b5412ab29"
]
| [
"mdisk_dist.py"
]
| [
"import numpy as np\nimport os\nimport sys\nfrom astropy.io import ascii\nfrom km_estimator import km_estimator\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nplt.style.use('araa')\nfrom matplotlib import rc\nrc('text.latex', preamble=r'\\usepackage{amsmath}')\nrc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Palatino\"]})\nrc(\"text\", usetex = True)\n\n\n# set up plot\nfig = plt.figure(figsize=(6.33, 2.5))\ngs = gridspec.GridSpec(1, 1)\nax = fig.add_subplot(gs[0, 0])\n\n# set up axes, labels\nplims = [0., 1.]\nMlims = [0.0125, 8000.]\t\t# earth masses\n\nax.set_xlim(Mlims)\nax.set_xscale('log')\nax.set_xticks([0.1, 1, 10, 100, 1000])\nax.set_xticklabels(['0.1', '1', '10', '100', '1000'])\nax.set_xlabel('$M \\;$ (M$_{\\\\boldsymbol{\\oplus}}$)')\n\nax.set_ylim(plims)\nax.yaxis.get_ticklocs(minor=True)\nax.minorticks_on()\nax.set_ylabel('$p$ ($\\ge M$)')\n\n# show the spectral index test?\nshow_test = False\n\n\n\n### Load the database \n\n# safe copy + load\nos.system('cp -r DISKS.csv temp.csv')\ndb = ascii.read('temp.csv', format='csv', fast_reader=True)\nprint(np.unique(db['SFR']))\n\n# baseline selections\nbase = ( (db['FL_MULT'] != 'J') & (db['FL_MULT'] != 'HJ') & \n (db['FL_MULT'] != 'HJB') & (db['SFR'] != 'sOri') & \n (db['SED'] != 'I') & (db['SED'] != 'III') & (db['SED'] != 'DEBRIS') ) \n\n\n### Set some constants\nd_ref, nu_ref = 150., 340.\nh, c, k = 6.626e-27, 2.9979e10, 1.381e-16\npc, mearth, mjup = 3.0857e18, 5.974e27, 1.898e30\nkappa, Tdust, alp = 3.5, 20., 2.2\n\n\n### Calculate CDFs for a test of the B6-->B7 conversion\nif (show_test == True):\n dualband = ( (db['FL_B7'] == 0) & (db['FL_B6'] == 0) & base )\n flags = (db['FL_B7'][dualband] == 1)\n L7 = 1e-3 * db['F_B7'][dualband] * (db['DPC'][dualband] / d_ref)**2\n nu7 = db['nu_B7'][dualband] * 1e9\n L6 = 1e-3 * db['F_B6'][dualband] * (db['DPC'][dualband] / d_ref)**2\n nu6 = db['nu_B6'][dualband] * 1e9\n\n Bnu = (2 * h * nu7**3 / c**2) / (np.exp(h * nu7 / (k * Tdust)) - 1)\n M7 = (d_ref * pc)**2 * 1e-23 * L7 / (kappa * Bnu) \n M6 = (d_ref * pc)**2 * 1e-23 * L6 * (nu7 / nu6)**alp / (kappa * Bnu) \n\n Ms7, pMs7, epMs7, mukm = km_estimator(M7 / mearth, flags)\n Ms6, pMs6, epMs6, mukm = km_estimator(M6 / mearth, flags)\n\n ax.fill_between(Ms7, pMs7+epMs7, pMs7-epMs7, \n facecolor='C0', alpha=0.5, step='post')\n ax.plot(Ms7, pMs7, 'C0', drawstyle='steps-post')\n\n ax.fill_between(Ms6, pMs6+epMs6, pMs6-epMs6,\n facecolor='C1', alpha=0.5, step='post')\n ax.plot(Ms6, pMs6, 'C1', drawstyle='steps-post')\n\n\n### Selection and combination of mm luminosities\n# calculate luminosities and upper limits\nL7 = 1e-3 * db['F_B7'] * (nu_ref / db['nu_B7'])**alp * (db['DPC'] / d_ref)**2\nL6 = 1e-3 * db['F_B6'] * (nu_ref / db['nu_B6'])**alp * (db['DPC'] / d_ref)**2\nlimL7 = 1e-3 * db['LIM_B7'] * (nu_ref/db['nu_B7'])**alp * (db['DPC']/d_ref)**2\nlimL6 = 1e-3 * db['LIM_B6'] * (nu_ref/db['nu_B6'])**alp * (db['DPC']/d_ref)**2\n\n# Planck function at the reference frequency\nBnu = (2*h*(nu_ref*1e9)**3 / c**2) / (np.exp(h*nu_ref*1e9 / (k*Tdust)) - 1)\n\n# targets with B7 detections\ndetB7 = ( (db['FL_B7'] == 0) & base )\nM_detB7 = L7[detB7] * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nd_detB7 = (db['FL_B7'][detB7] == 1)\n\n# targets with **only** B6 detections (i.e., no B7 or B7 limit)\ndetB6 = ( (db['FL_B7'] != 0) & (db['FL_B6'] == 0) & base )\nM_detB6 = L6[detB6] * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nd_detB6 = (db['FL_B7'][detB6] == 0)\n\n# targets with **only** limits or missing data\n# (there should be **no entry** without a limit in at least B6 or B7)\nlims = ( (db['FL_B7'] != 0) & (db['FL_B6'] != 0) & base )\ndlims = np.ma.column_stack( (limL7[lims], limL6[lims]) )\nM_lims = np.ma.min(dlims, 1) * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nd_lims = np.ones(len(M_lims), dtype=bool)\n\n\n### Solid mass distribution\n# combine all sub-samples\nMs = np.ma.concatenate( (M_detB7, M_detB6, M_lims) )\nflags = np.ma.concatenate( (d_detB7, d_detB6, d_lims) )\nprint(len(Ms))\n\n# calculate the combined CDF\nMsolids, pMsolids, epMsolids, mukm = km_estimator(Ms, flags)\n\n\n\n### Gas mass distribution\ndets_Mg = ( (db['FL_Mgas'] == 0) & base )\nlims_Mg = ( (db['FL_Mgas'] == 1) & base )\nMg_dets = db['Mgas'][dets_Mg] * mjup / mearth\nMg_lims = db['Mgas'][lims_Mg] * mjup / mearth\nMg = np.ma.concatenate((Mg_dets, Mg_lims))\nflag_dets = np.zeros_like(Mg_dets)\nflag_lims = np.ones_like(Mg_lims)\nflaggs = np.ma.concatenate((flag_dets, flag_lims))\nflagg = (flaggs == 1)\n\n# cumulative distribution\nMgas, pMgas, epMgas, mukm = km_estimator(Mg, flagg)\n\n\n### Solids for these gas measurements\n# targets with B7 detections\nDdetB7 = ( (db['FL_B7'] == 0) & dets_Mg )\nDM_detB7 = L7[DdetB7] * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nDd_detB7 = (db['FL_B7'][DdetB7] == 1)\n\nLdetB7 = ( (db['FL_B7'] == 0) & lims_Mg )\nLM_detB7 = L7[LdetB7] * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nLd_detB7 = (db['FL_B7'][LdetB7] == 1)\n\n# targets with **only** B6 detections (i.e., no B7 or B7 limit)\nDdetB6 = ( (db['FL_B7'] != 0) & (db['FL_B6'] == 0) & dets_Mg )\nDM_detB6 = L6[DdetB6] * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nDd_detB6 = (db['FL_B7'][DdetB6] == 0)\n\nLdetB6 = ( (db['FL_B7'] != 0) & (db['FL_B6'] == 0) & lims_Mg )\nLM_detB6 = L6[LdetB6] * (d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nLd_detB6 = (db['FL_B7'][LdetB6] == 0)\n\n# targets with **only** limits or missing data\n# (there should be **no entry** without a limit in at least B6 or B7)\nDlims = ( (db['FL_B7'] != 0) & (db['FL_B6'] != 0) & dets_Mg )\nDdlims = np.ma.column_stack( (limL7[Dlims], limL6[Dlims]) )\nDM_lims = np.ma.min(Ddlims, 1)*(d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nDd_lims = np.ones(len(DM_lims), dtype=bool)\n\nLlims = ( (db['FL_B7'] != 0) & (db['FL_B6'] != 0) & lims_Mg )\nLdlims = np.ma.column_stack( (limL7[Llims], limL6[Llims]) )\nLM_lims = np.ma.min(Ldlims, 1)*(d_ref * pc)**2 * 1e-23 / (kappa * Bnu) / mearth\nLd_lims = np.ones(len(LM_lims), dtype=bool)\n\n### Solid mass distribution\n# combine all sub-samples\nMsg = np.ma.concatenate( (DM_detB7, LM_detB7, DM_detB6, LM_detB6, DM_lims, LM_lims) )\nflags = np.ma.concatenate( (Dd_detB7, Ld_detB7, Dd_detB6, Ld_detB6, Dd_lims, Ld_lims) )\n\nMgs, pMgs, epMgs, mukm = km_estimator(Msg, flagg)\n\n\n### Plot the distributions \nax.fill_between(Mgs, pMgs+epMgs, pMgs-epMgs,\n facecolor='orange', alpha=0.3, step='post')\nax.plot(Mgs, pMgs, 'orange', drawstyle='steps-post', alpha=0.6, linewidth=3)\n\nax.fill_between(Mgas, pMgas+epMgas, pMgas-epMgas,\n facecolor='gray', alpha=0.3, step='post')\nax.plot(Mgas, pMgas, 'gray', drawstyle='steps-post', linewidth=3)\n\nax.fill_between(Msolids, pMsolids+epMsolids, pMsolids-epMsolids, \n facecolor='m', alpha=0.3, step='post')\nax.plot(Msolids, pMsolids, 'm', drawstyle='steps-post', linewidth=3)\n\n\n### Annotations\nax.text(0.07, 0.78, '$\\\\boldsymbol{M_{\\\\rm s}}$', color='m', fontsize=13,\n ha='right')\nax.text(0.13, 0.695, '(all disks)', color='m', fontsize=10, \n horizontalalignment='right', alpha=0.8)\nax.text(0.6, 0.37, '$\\\\boldsymbol{M_{\\\\rm s}}$', color='orange', fontsize=13, \n ha='right')\nax.text(0.63, 0.285, '($M_{\\\\rm g}$ sample)', color='orange', fontsize=10, \n horizontalalignment='right', alpha=0.8)\nax.text(10, 0.78, '$\\\\boldsymbol{M_{\\\\rm g}}$', color='gray', fontsize=13)\n\nax.plot([35.,35.],[0., 0.45], ':', color='m', linewidth=2, zorder=0)\nax.plot([3000,3000],[0., 0.45], ':', color='gray', linewidth=2, zorder=0)\n\n\nfig.subplots_adjust(left=0.2, right=0.8, bottom=0.165, top=0.975)\nfig.savefig('mdisk_dist.pdf')\nfig.clf()\n"
]
| [
[
"numpy.zeros_like",
"numpy.ones_like",
"numpy.exp",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"numpy.ma.column_stack",
"matplotlib.pyplot.style.use",
"numpy.ma.concatenate",
"numpy.unique",
"matplotlib.gridspec.GridSpec",
"numpy.ma.min"
]
]
|
wxy1224/cs224n_project | [
"6dbd7869622565d0eaa3ac7b1dacc569f33d5a18",
"6dbd7869622565d0eaa3ac7b1dacc569f33d5a18"
]
| [
"data_process/word_entity.py",
"src/train/pretrained_embedding_bidirectional_lstm_model.py"
]
| [
"# !/usr/bin/env python -W ignore::DeprecationWarning\nimport pandas as pd, numpy as np\nimport nltk\nfrom itertools import chain\nimport re\nimport nltk\nimport nltk.tag.stanford as st\nimport os\n\ntagger = st.StanfordNERTagger(\n '../../stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz',\n '../../stanford-ner/stanford-ner.jar')\n\n\ndef get_continuous_chunks(tag2, tagged_sent):\n continuous_chunk = []\n current_chunk = []\n\n for token, tag in tagged_sent:\n if tag == tag2:\n # if tag == \"PERSON\":\n current_chunk.append((token, tag))\n else:\n if current_chunk: # if the current chunk is not empty\n continuous_chunk.append(current_chunk)\n current_chunk = []\n # Flush the final current_chunk into the continuous_chunk, if any.\n if current_chunk:\n continuous_chunk.append(current_chunk)\n return continuous_chunk\n\n\ndef entity_list(train_file, label, tag, save_folder):\n train = pd.read_csv(train_file)\n # test = pd.read_csv('../input/test.csv')\n # subm = pd.read_csv('../input/sample_submission.csv')\n\n selected = train.loc[train[label] == 1]\n select_comments = selected[\"comment_text\"]\n comments = select_comments.as_matrix()\n # r=tagger.tag('John Eid is studying at Stanford University in NY'.split())\n # print(r)\n names = []\n count = 0\n for comment in comments:\n count += 1\n # if count<200:\n # \tcontinue\n r = tagger.tag(comment.split())\n c = get_continuous_chunks(tag, r)\n c2 = [\" \".join([token for token, tag in ne]) for ne in c]\n names = names + c2\n if count % 100 == 0:\n # print(names)\n print(label, count)\n namelist = names\n names = []\n filename = save_folder + 'entity_' + str(count) + '.txt'\n with open(filename, 'w') as file:\n for item in namelist:\n item = item.strip()\n # item = item.encode('utf-8').strip()\n file.write(\"%s\\n\" % item)\n\n\nif __name__ == '__main__':\n # create_folder(\"/names/\")\n train_file = '../input/train.csv'\n label = \"identity_hate\"\n save_folder = \"names/\"\n if not os.path.exists(save_folder):\n \tos.makedirs(save_folder)\n tag = \"PERSON\"\n entity_list(train_file, label, tag, save_folder)\n label2 = \"threat\"\n save_folder2 = \"names_threat/\"\n if not os.path.exists(save_folder2):\n os.makedirs(save_folder2)\n entity_list(train_file, label2, tag, save_folder2)\n\n\n\n\n\n\n\n\n# chunked = nltk.ne_chunk(comments[0], binary=True)\n\n# train_sents = list(nltk.corpus.conll2002.iob_sents('esp.train'))\n# test_sents = list(nltk.corpus.conll2002.iob_sents('esp.testb'))\n\n# def word2features(sent, i):\n# word = sent[i][0]\n# postag = sent[i][1]\n\n# features = {\n# 'bias': 1.0,\n# 'word.lower()': word.lower(),\n# 'word[-3:]': word[-3:],\n# 'word.isupper()': word.isupper(),\n# 'word.istitle()': word.istitle(),\n# 'word.isdigit()': word.isdigit(),\n# 'postag': postag,\n# 'postag[:2]': postag[:2],\n# }\n# if i > 0:\n# word1 = sent[i-1][0]\n# postag1 = sent[i-1][1]\n# features.update({\n# '-1:word.lower()': word1.lower(),\n# '-1:word.istitle()': word1.istitle(),\n# '-1:word.isupper()': word1.isupper(),\n# '-1:postag': postag1,\n# '-1:postag[:2]': postag1[:2],\n# })\n# else:\n# features['BOS'] = True\n\n# if i < len(sent)-1:\n# word1 = sent[i+1][0]\n# print(word1, sent[i+1])\n# postag1 = sent[i+1][1]\n# features.update({\n# '+1:word.lower()': word1.lower(),\n# '+1:word.istitle()': word1.istitle(),\n# '+1:word.isupper()': word1.isupper(),\n# '+1:postag': postag1,\n# '+1:postag[:2]': postag1[:2],\n# })\n# else:\n# features['EOS'] = True\n\n# return features\n\n\n# def sent2features(sent):\n# return [word2features(sent, i) for i in range(len(sent))]\n\n# def sent2labels(sent):\n# return [label for token, postag, label in sent]\n\n# def sent2tokens(sent):\n# return [token for token, postag, label in sent]\n\n# X_train = [sent2features(s) for s in train_sents]\n# y_train = [sent2labels(s) for s in train_sents]\n\n# X_test = [sent2features(s) for s in test_sents]\n# y_test = [sent2labels(s) for s in test_sents]\n\n\n# trainer = pycrfsuite.Trainer(verbose=False)\n\n# for xseq, yseq in zip(X_train, y_train):\n# trainer.append(xseq, yseq)\n\n# trainer.set_params({\n# 'c1': 1.0, # coefficient for L1 penalty\n# 'c2': 1e-3, # coefficient for L2 penalty\n# 'max_iterations': 50, # stop earlier\n\n# # include transitions that are possible, but not observed\n# 'feature.possible_transitions': True\n# })\n\n# trainer.train('conll2002-esp.crfsuite')\n\n# tagger = pycrfsuite.Tagger()\n# tagger.open('conll2002-esp.crfsuite')\n\n# example_sent = test_sents[0]\n# print(' '.join(sent2tokens(example_sent)))\n# print(example_sent)\n# sentence = \"I am John from America\"\n# # sentence = \"La Coruna , 23 may ( EFECOM )\" #comments[0]\n# sent1 = nltk.word_tokenize(sentence)\n# sent2 = nltk.pos_tag(sent1)\n# sent3 = nltk.ne_chunk(sent2, binary=True)\n# sent4=[]\n# for c in sent3:\n# if hasattr(c, 'node'):\n# sent4.append(' '.join(i[0] for i in c.leaves()))\n# print(sent2,sent3, sent4)\n\n# print(\"Predicted:\", ' '.join(tagger.tag(sent2features(sent3))))\n# print(\"Correct: \", ' '.join(sent2labels(example_sent)))\n\n# sentences =[ line.decode('utf-8').strip() for line in comments[:10]]\n# tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n# tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]\n# chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)\n\n\n# def extract_entity_names(t):\n# entity_names = []\n\n# if hasattr(t, 'label') and t.label:\n# if t.label() == 'NE':\n# entity_names.append(' '.join([child[0] for child in t]))\n# else:\n# for child in t:\n# entity_names.extend(extract_entity_names(child))\n\n# return entity_names\n\n# entity_names = []\n# for tree in chunked_sentences:\n# # Print results per sentence\n# # print extract_entity_names(tree)\n\n# entity_names.extend(extract_entity_names(tree))\n\n# # Print all entity names\n# #print entity_names\n\n# # Print unique entity names\n# print set(entity_names)\n",
"\nfrom src.train.abstract_model import BaseModel\nfrom keras.models import Model\nfrom keras.layers import Dense, Embedding, Input\nfrom keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom src.config.static_config import StaticConfig\nfrom keras.layers.normalization import BatchNormalization\nfrom src.config.dynamic_config import DynamicConfig\nfrom keras import metrics\nfrom numpy import zeros\nfrom keras.regularizers import l2\nfrom numpy import asarray\nfrom keras.layers import Conv1D, MaxPooling1D\nclass Bidirectional_LSTM_Model_Pretrained_Embedding(BaseModel):\n def __init__(self):\n # self._model = None\n self.global_config = StaticConfig()\n self.dynamic_config = DynamicConfig()\n # self.num_called = 0\n def embedding_index(self):\n self.embeddings_index = dict()\n f = open('./input/glove.6B.300d.txt')\n for line in f:\n values = line.split()\n word = values[0]\n coefs = asarray(values[1:], dtype='float32')\n self.embeddings_index[word] = coefs\n f.close()\n print('Loaded %s word vectors.' % len(self.embeddings_index))\n\n def get_model(self, count, lstm_length=50, dense_dim=30, drop_out = 0.1, preprocessor=None):\n self.embedding_index()\n tokenizer = preprocessor.tokenizer\n voc_size = len(tokenizer.word_index) + 1\n\n lstm_length = self.dynamic_config.config[count]['lstm_length']\n dense_dim = self.dynamic_config.config[count]['dense_dim']\n drop_out = self.dynamic_config.config[count]['drop_out']\n embed_size = self.global_config.lstm_embed_size\n max_features = self.global_config.max_features\n maxlen = self.global_config.maxlen\n kernel_size = self.global_config.cnn_kernel_size\n filters = self.global_config.cnn_filters\n pool_size = self.global_config.cnn_pool_size\n\n embedding_matrix = zeros((voc_size, embed_size))\n for word, i in tokenizer.word_index.items():\n embedding_vector = self.embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n inp = Input(shape=(maxlen,), dtype='int32')\n x = Embedding(voc_size, embed_size ,input_length = maxlen, weights=[embedding_matrix])(inp)\n\n x = Bidirectional(LSTM(maxlen, return_sequences=True, dropout=drop_out, recurrent_dropout=drop_out))(x)\n x = GlobalMaxPool1D()(x)\n x = Dropout(drop_out)(x)\n if self.global_config.l2_regularizer != 0.0:\n regularizer = l2(self.global_config.l2_regularizer)\n x = Dense(dense_dim, activation=\"relu\", kernel_regularizer=regularizer)(x)\n else:\n x = Dense(dense_dim, activation=\"relu\")(x)\n x = Dropout(drop_out)(x)\n if self.global_config.l2_regularizer != 0.0:\n regularizer = l2(self.global_config.l2_regularizer)\n x = Dense(dense_dim, activation=\"relu\", kernel_regularizer=regularizer)(x)\n else:\n x = Dense(dense_dim, activation=\"relu\")(x)\n\n x = Dropout(drop_out)(x)\n x = Dense(6, activation=\"sigmoid\")(x)\n\n\n model = Model(inputs=inp, outputs=x)\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=[metrics.categorical_accuracy])\n return model"
]
| [
[
"pandas.read_csv"
],
[
"numpy.asarray",
"numpy.zeros"
]
]
|
rte-france/relife | [
"b68177c0f2614f1a9074698aa7cbef691babd556"
]
| [
"relife/reward.py"
]
| [
"\"\"\"Rewards for renewal reward processes.\"\"\"\n\n# Copyright (c) 2022, RTE (https://www.rte-france.com)\n# See AUTHORS.txt\n# SPDX-License-Identifier: Apache-2.0 (see LICENSE.txt)\n# This file is part of ReLife, an open source Python library for asset\n# management based on reliability theory and lifetime data analysis.\n\nfrom abc import ABC, abstractmethod\nimport numpy as np\n\n\nclass Reward(ABC):\n \"\"\"Generic reward class.\"\"\"\n\n @abstractmethod\n def conditional_expectation(\n self, x: np.ndarray, *reward_args: np.ndarray\n ) -> np.ndarray:\n \"\"\"Conditional expected reward.\n\n Parameters\n ----------\n x : ndarray\n Duration.\n *reward_args : ndarray\n Extra arguments of the reward random variable.\n\n Returns\n -------\n ndarray\n The conditional expected reward with respect to the duration.\n \"\"\"\n pass\n\n def sample(self, x: np.ndarray, *reward_args: np.ndarray) -> np.ndarray:\n \"\"\"Reward conditional sampling.\n\n Parameters\n ----------\n x : ndarray\n Duration.\n *reward_args : ndarray\n Extra arguments of the reward random variable.\n\n Returns\n -------\n ndarray\n Random drawing of a reward with respect to the duration.\n\n \"\"\"\n return self.conditional_expectation(x, *reward_args)\n\n\nclass FailureCost(Reward):\n \"\"\"Run-to-failure costs.\n\n The replacements occur upon failures with costs `cf`.\n \"\"\"\n\n def conditional_expectation(self, x: np.ndarray, cf: np.ndarray) -> np.ndarray:\n return cf\n\n\nclass AgeReplacementCost(Reward):\n \"\"\"Age replacement costs.\n\n The replacements occur at a fixed age `ar` with preventive costs `cp` or\n upon failure with failure costs `cf` if earlier.\n \"\"\"\n\n def conditional_expectation(\n self, x: np.ndarray, ar: np.ndarray, cf: np.ndarray, cp: np.ndarray\n ) -> np.ndarray:\n return np.where(x < ar, cf, cp)\n"
]
| [
[
"numpy.where"
]
]
|
aksakalli/heatmap-wms | [
"058014329b746991135f2c371614c4515287f763"
]
| [
"heatmap.py"
]
| [
"from io import BytesIO\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy.ndimage import gaussian_filter\nfrom matplotlib import cm, colors\n\nalpha_heat = colors.LinearSegmentedColormap(\n \"alpha_heat\",\n {\n \"red\": [\n (0, 0, 0),\n (0.001, 0.54, 0.54),\n (0.1, 1, 1),\n (0.55, 1, 1),\n (0.7, 0.48, 0.48),\n (0.85, 0, 0),\n (1, 0, 0),\n ],\n \"green\": [\n (0, 0, 0),\n (0.1, 0, 0),\n (0.25, 0.27, 0.27),\n (0.4, 0.64, 0.64),\n (0.55, 1, 1),\n (0.7, 0.98, 0.98),\n (0.85, 1, 1),\n (1.0, 0, 0),\n ],\n \"blue\": [(0.0, 0, 0), (0.7, 0, 0), (0.85, 1, 1), (1, 1, 1)],\n \"alpha\": [\n (0.0, 0, 0),\n (0.001, 0, 0),\n (0.1, 0.25, 0.25),\n (0.25, 0.5, 0.5),\n (1, 0.5, 0.5),\n ],\n },\n)\n\ncm.register_cmap(cmap=alpha_heat)\n\n\nclass Heatmap:\n \"\"\"Creates a heatmap image from added points\n\n usage:\n\n >>> heatmap = Heatmap(5, 5, 0, 0, 50, 50)\n >>> heatmap.add_point(32, 11)\n >>> heatmap.add_point(22, 21)\n >>> heatmap.pixel_grid\n array([[0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0.],\n [0., 0., 0., 1., 0.],\n [0., 0., 0., 0., 0.]])\n\n The added points are scaled out to\n the existing pixel size of the image grid.\n Use the same map projection for constructing the heatmap\n and adding new points.\n\n >>> heatmap.update_pixel_grid_rgba()\n >>> image_bytes = heatmap.get_heatmap_image_bytes()\n \"\"\"\n\n def __init__(self, width, height, west, south, east, north):\n self.pixel_grid = np.zeros((height, width))\n self.pixel_grid_rgba = []\n self.west = west\n self.south = south\n self.east = east\n self.north = north\n\n def add_point(self, lon, lat, val=1):\n \"\"\"adds a new point to the grid\"\"\"\n height, width = self.pixel_grid.shape\n if self.north > lat > self.south and self.west < lon < self.east:\n y = int(height - height * (lat - self.south) / (self.north - self.south))\n x = int(width * (lon - self.west) / (self.east - self.west))\n self.pixel_grid[y][x] += val\n\n def update_pixel_grid_rgba(self, blur_sigma=10, cmap_name=\"alpha_heat\"):\n \"\"\"The `pixel_grid_rgba` attribute is not updated after addding points.\n This method needs to be called before getting rgba images.\n\n Parameters\n ----------\n blur_sigma: scalar or sequence of scalars\n creates more blurred image when you increase it\n\n cmap_name: matplotlib's colormap name\n you can set it to a different one or register your custom color map\n using `matplotlib.cm.register_cmap`.\n \"\"\"\n normalize = colors.Normalize()\n cmap = cm.get_cmap(cmap_name)\n pixel_grid_blurred = gaussian_filter(self.pixel_grid, sigma=blur_sigma)\n self.pixel_grid_rgba = cmap(normalize(pixel_grid_blurred))\n\n def get_heatmap_image(self):\n \"\"\"returns PIL image object\"\"\"\n pixel_grid_rgba_hex = (self.pixel_grid_rgba * 255).astype(np.uint8)\n return Image.fromarray(pixel_grid_rgba_hex, \"RGBA\")\n\n def get_heatmap_image_bytes(self):\n \"\"\"returns PNG image bytes\"\"\"\n image = self.get_heatmap_image()\n with BytesIO() as png_buffer:\n image.save(png_buffer, format=\"PNG\")\n image_byte_values = png_buffer.getvalue()\n return image_byte_values\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n"
]
| [
[
"matplotlib.cm.register_cmap",
"matplotlib.cm.get_cmap",
"numpy.zeros",
"scipy.ndimage.gaussian_filter",
"matplotlib.colors.Normalize",
"matplotlib.colors.LinearSegmentedColormap"
]
]
|
rogeryan/ssd_keras | [
"45c364a122d2aa894ce8ba687876b02223a83cd0"
]
| [
"ssd7_pred.py"
]
| [
"import os\nimport logging\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import load_model\nfrom math import ceil\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom models.keras_ssd7 import build_model\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\nfrom data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize\nfrom data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\n\n# 日志配置,便于记录训练过程信息\n\n\ndef init_log_config():\n \"\"\"\n 初始化日志相关配置\n :return:\n \"\"\"\n global logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n log_path = os.path.join(os.getcwd(), 'logs')\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n log_name = os.path.join(log_path, 'train.log')\n sh = logging.StreamHandler()\n fh = logging.FileHandler(log_name, mode='w')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n logger.addHandler(fh)\n\n\ninit_log_config()\n# 1. Set the model configuration parameters\nimg_height = 300 # Height of the input images\nimg_width = 480 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\n# Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nintensity_mean = 127.5\n# Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.\nintensity_range = 127.5\nn_classes = 5 # Number of positive classes\n# An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.\nscales = [0.08, 0.16, 0.32, 0.64, 0.96]\n# The list of aspect ratios for the anchor boxes\naspect_ratios = [0.5, 1.0, 2.0]\n# Whether or not you want to generate two anchor boxes for aspect ratio 1\ntwo_boxes_for_ar1 = True\nsteps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended\noffsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended\n# Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nclip_boxes = False\n# The list of variances by which the encoded target coordinates are scaled\nvariances = [1.0, 1.0, 1.0, 1.0]\n# Whether or not the model is supposed to use coordinates relative to the image size\nnormalize_coords = True\ndataset_prefix = './'\nmodel_saved = 'ssd7.h5'\nbatch_size = 16\n\n# 加载之前保存的模型或者创建新模型\nK.clear_session() # Clear previous models from memory.\nif os.path.exists(model_saved):\n # We need to create an SSDLoss object in order to pass that to the model loader.\n ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n model = load_model(model_saved, custom_objects={\n 'AnchorBoxes': AnchorBoxes, 'compute_loss': ssd_loss.compute_loss})\nelse:\n logger.error(f'无法加载训练好的模型:{model_saved}')\n\nval_dataset = DataGenerator(\n load_images_into_memory=False, hdf5_dataset_path=dataset_prefix+'traffic_val.h5')\nval_dataset_size = val_dataset.get_dataset_size()\nlogger.info(\"Number of images in the validation dataset:\\t{:>6}\".format(\n val_dataset_size))\n\npredictor_sizes = [model.get_layer('classes4').output_shape[1:3],\n model.get_layer('classes5').output_shape[1:3],\n model.get_layer('classes6').output_shape[1:3],\n model.get_layer('classes7').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_global=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.3,\n normalize_coords=normalize_coords)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n# Make predication\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images',\n 'processed_labels',\n 'filenames'},\n keep_images_without_gt=False)\n\n# 2: Generate samples\nbatch_images, batch_labels, batch_filenames = next(predict_generator)\ni = 0 # Which batch item to look at\nlogger.info(\"Image:\" + batch_filenames[i])\nlogger.info(\"Ground truth boxes:\")\nlogger.info(batch_labels[i])\n\ny_pred = model.predict(batch_images)\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nlogger.info(\"Predicted boxes:\\n\")\nlogger.info(' class conf xmin ymin xmax ymax')\nlogger.info(y_pred_decoded[i])\n\nplt.figure(figsize=(20, 12))\nplt.imshow(batch_images[i])\n\ncurrent_axis = plt.gca()\n\n# Set the colors for the bounding boxes\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()\n# Just so we can print class names onto the image instead of IDs\nclasses = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light']\n\n# Draw the ground truth boxes in green (omit the label for more clarity)\nfor box in batch_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle(\n (xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2))\n #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\n# Draw the predicted boxes in blue\nfor box in y_pred_decoded[i]:\n xmin = box[-4]\n ymin = box[-3]\n xmax = box[-2]\n ymax = box[-1]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle(\n (xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n current_axis.text(xmin, ymin, label, size='x-large',\n color='white', bbox={'facecolor': color, 'alpha': 1.0})\n"
]
| [
[
"numpy.set_printoptions",
"numpy.linspace",
"matplotlib.pyplot.figure",
"tensorflow.keras.models.load_model",
"tensorflow.keras.backend.clear_session",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow"
]
]
|
awasthiabhijeet/CRES | [
"217c2d3b0cdaec1b22bc498c4cd17c5262b3fd13"
]
| [
"src/hls/get_rule_related_statistics.py"
]
| [
"# gets majority voting accuracy from validation_processed.p and test_processed.p (or any pickle of similar format)\n# for validation and test data respectively\n\nimport pickle\nimport sys,os\nfrom collections import Counter\nimport numpy as np\nimport random\nfrom sklearn.metrics import precision_recall_fscore_support\n\ndef get_majority_vote(l, num_classes, default_class=None):\n\t# if no rule fire, \n\t#\tdefault label is the output if provided, \n\t# else a random label chosen uniformly\n\t# else\n\t#\tmajority label is the output\n\t# \tties between multiple majority classes are broken arbitarily\n\tresult = []\n\tfor i in range(len(l)):\n\t\tc = Counter(l[i]).most_common()\n\t\tif c[0][0] == num_classes and len(c)==1:\n\t\t\tif (default_class is None) or (default_class == \"None\"):\n\t\t\t\tresult.append(random.randint(0,num_classes-1))\n\t\t\telse:\n\t\t\t\tresult.append(int(default_class))\n\t\telse:\n\t\t\tc = [item for item in c if item[0]!=num_classes]\n\t\t\tmajority_freq = c[0][1]\n\t\t\tfreq_classes = [item[0] for item in c if item[1]==majority_freq]\n\t\t\tresult.append(random.choice(freq_classes))\n\treturn np.array(result)\n\npath_dir = sys.argv[1] #path to data strored in form of pickles\nnum_classes = int(sys.argv[2]) #num of classes to be predicted (depends on the dataset)\ndefault_class = sys.argv[3] #default class (usually the most frequent class) can also be \"None\"\n\nvalidation_pickle = open(os.path.join(path_dir,\"validation_processed.p\"),\"rb\")\nvalidation_x=pickle.load(validation_pickle)\nvalidation_l=pickle.load(validation_pickle)\nvalidation_m=pickle.load(validation_pickle)\nvalidation_L=pickle.load(validation_pickle) #true labels\nvalidation_d=pickle.load(validation_pickle)\n\nmajority = get_majority_vote(validation_l,num_classes,default_class) #majority voted predictions\naccuracy = np.sum(np.equal(majority,validation_L).astype(np.float))/len(validation_L)\n\nprecision, recall, f1_score, support = precision_recall_fscore_support(validation_L, majority)\n\nprint(\"Accuracy of majority voting on validation data: \", accuracy)\nprint(\"Precision of majority voting on validation data: \", precision)\nprint(\"Recall of majority voting on validation data: \", recall)\nprint(\"f1_score of majority voting on validation data: \", f1_score)\nprint(\"support of majority voting on validation data: \", support)\n\nU_pickle = open(os.path.join(path_dir,\"U_processed.p\"),\"rb\")\nU_x=pickle.load(U_pickle)\nU_l=pickle.load(U_pickle)\nU_m=pickle.load(U_pickle)\nU_L=pickle.load(U_pickle)\nU_d=pickle.load(U_pickle)\nfired_U_idx = [i for i,item in enumerate(U_m) if np.sum(item)!=0] # instance indices on which \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # atleast 1 rule fired\n\nprint(\"Number of rules: \", U_l.shape[1])\nprint(\"Size of Validation: \",len(validation_x))\nprint(\"Size of U: \",len(U_x))\nprint(\"Size of fired U: \", len(fired_U_idx))\nprint(\"Cover percentage: \",len(fired_U_idx)/len(U_x))\n\n\nd_pickle = open(os.path.join(path_dir,\"d_processed.p\"),\"rb\")\nd_x=pickle.load(d_pickle)\nd_l=pickle.load(d_pickle)\nd_m=pickle.load(d_pickle)\nd_L=pickle.load(d_pickle)\nd_d=pickle.load(d_pickle)\nprint(\"Size of d: \",len(d_x))\n\ntest_pickle = open(os.path.join(path_dir,\"test_processed.p\"),\"rb\")\ntest_x=pickle.load(test_pickle)\ntest_l=pickle.load(test_pickle)\ntest_m=pickle.load(test_pickle)\ntest_L=pickle.load(test_pickle)\ntest_d=pickle.load(test_pickle)\ntest_fired_idx = [i for i,item in enumerate(test_m) if sum(item)>0]\n\nmajority = get_majority_vote(test_l,num_classes,default_class)\n#dump majority preds for test data in a text file for external evaluation (if needed)\nwith open(os.path.join(path_dir,\"majority_voting_preds.txt\"),\"w\") as pred_file:\n\tfor item in majority :\n\t\tpred_file.write(str(item)+\"\\n\")\n\n\naccuracy = np.sum(np.equal(majority,test_L).astype(np.float))/len(test_L)\n\nprecision, recall, f1_score, support = precision_recall_fscore_support(test_L, majority)\n\nprint(\"Accuracy of majority voting on test data: \", accuracy)\nprint(\"Precision of majority voting on test data: \", precision)\nprint(\"Recall of majority voting on test data: \", recall)\nprint(\"f1_score of majority voting on test data: \", f1_score)\nprint(\"support of majority voting on test data: \", support)\n\nprint(\"size of test: \",len(test_x))\nprint(\"size of fired_test: \",len(test_fired_idx))\n\ndef get_rule_precision(l,L,m):\n\t#micro_p : correct_rule_firings/total_rule_firings (micro_precision)\n\t#macro_p : average of individual precision of rules having non-zero support\n\t#rule_wise_precision : individual precision of rules\n\n\tL = L.reshape([L.shape[0],1])\n\tcomp = np.equal(l,L).astype(np.float)\n\tcomp = comp * m\n\tcomp = np.sum(comp,0)\n\tsupport = np.sum(m,0)\n\tmicro_p = np.sum(comp)/np.sum(support)\n\tmacro_p = comp/(support + 1e-25)\n\tsupported_rules = [idx for idx,support_val in enumerate(support) if support_val>0]\n\tmacro_p = macro_p[supported_rules]\n\tmacro_p = np.mean(macro_p)\n\trule_wise_precision = comp/(support + 1e-25)\n\treturn micro_p,macro_p,rule_wise_precision\n\nmicro_p,macro_p,rule_wise_precision = get_rule_precision(test_l,test_L,test_m)\nprint(\"Micro Precision of rules on test data: \",micro_p)\n\ndef get_conflict_rule_cov_rule_per_inst(l,m):\n\trule_cov = np.mean(np.sum(m,0))\n\trules_per_inst = np.mean(np.sum(m,1))\n\tconflicts = 0\n\tfor i in range(len(l)):\n\t\tuniques = np.unique(l[i])\n\t\tif len(uniques) >=3:\n\t\t\tconflicts +=1\n\t\telse:\n\t\t\tif (len(uniques)==2 and num_classes in uniques) or len(uniques)==1:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tconflicts +=1\n\tavg_conflicts = conflicts/m.shape[0]\n\treturn avg_conflicts, rule_cov, rules_per_inst\n\nconflicts,rule_cov,rules_per_inst = get_conflict_rule_cov_rule_per_inst(U_l[fired_U_idx],U_m[fired_U_idx])\nprint(\"Conflict rate in U: \",conflicts)\nprint(\"Average num of instances covered by any rule in U: \",rule_cov)\nprint(\"Average rules firing on an instance in U: \", rules_per_inst)\n\n\n\n"
]
| [
[
"numpy.equal",
"numpy.array",
"numpy.sum",
"numpy.mean",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.unique"
]
]
|
francescodelduchetto/nbs_experiments | [
"9d8a2e9a7253d0c027f5a6e7225d2432ea8db198"
]
| [
"scripts/parseRosbagsNoPlay.py"
]
| [
"#!/usr/bin/env python\n\n'''\nOffline rosbag parser!\nBag contains: tf topic with robot tf and rfid tags tf\n rfid readings\n laser readings \n\n'''\n\n\nimport rosbag\nfrom tf_bag import BagTfTransformer\n\nimport tf\nimport pandas as pd\nimport rospy\nfrom rfid_node.msg import TagReading\n\n\ndef getRelativeXYYaw(bag_tf, orig_frame, dest_frame, t):\n translation, quaternion = bag_tf.lookupTransform(orig_frame, dest_frame, t)\n (rel_x, rel_y, rel_yaw) = getXYYaw(translation, quaternion)\n return (rel_x, rel_y, rel_yaw)\n\n\ndef getXYYaw(translat, rotat):\n rel_x = translat[0]\n rel_y = translat[1]\n (rel_rol, rel_pitch, rel_yaw) = tf.transformations.euler_from_quaternion(rotat)\n return (rel_x, rel_y, rel_yaw)\n\n\n# Main function.\nif __name__ == '__main__':\n #folder = '/home/manolofc/catkin_ws/src/RFID/pr_model/tests/linda/'\n #folder = '/home/manolofc/ownCloud/RFID-bags/'\n #saveFile = folder + '20dB-Linda-FAB-LAB-V3.csv'\n #bagFile = folder + '2000-Linda-FAB-LAB.bag'\n\n folder = '/home/manolofc/Desktop/success_INB3ENG/'\n saveFile = folder + 'INB3ENG_0.2_0.6_0.2_date__2019-09-09-18-50-52.csv'\n bagFile = folder + 'INB3ENG_0.2_0.6_0.2_date__2019-09-09-18-50-52.bag'\n tagSet=set()\n detectecTagSet=set()\n allTagSet=set()\n\n # tags surveyed in bag....\n tagSet.add('390100010000000000000002')\n tagSet.add('390100010000000000000004')\n tagSet.add('390100010000000000000005')\n tagSet.add('390100010000000000000007')\n tagCoveragePercent = 0.0\n isFirstStat = True\n rob_x = 0.0\n rob_y = 0.0\n rob_yaw = 0.0\n rfid_reading_topic = '/lastTag'\n mcdm_stats_topic = '/mcdm_stats'\n robot_frame = 'base_footprint'\n map_frame = \"map\"\n\n labels = ['Time', 'robot_x_m', 'robot_y_m', 'robot_yaw_rad','tagCoveragePercent', 'coveragePercent', 'numConfiguration', 'backTracking']\n dataEntries = []\n\n print(\"Procesing rosbag file: \" + bagFile)\n bag = rosbag.Bag(bagFile)\n print(\"Creating bag transformer (may take a while)\")\n bag_transformer = BagTfTransformer(bag)\n\n lastPrint = rospy.Time(0)\n printInc = rospy.Duration(30)\n\n print(\"Iterating over bag file...\")\n # main loop\n for topic, msg, t in bag.read_messages():\n if ((t-lastPrint) > printInc):\n print(\"T: \" + str(t))\n lastPrint = t\n\n if topic == rfid_reading_topic:\n tid = str(msg.ID)\n allTagSet.add(tid)\n if tid in tagSet:\n detectecTagSet.add(tid)\n\n tagCoveragePercent = 100.0 * float(len(detectecTagSet)) / float(len(tagSet))\n\n if topic == mcdm_stats_topic:\n \n raw_stats = msg.data.split(',')\n if isFirstStat:\n isFirstStat = False\n print(\"MDCM giving data about: \"+msg.data)\n else:\n (rob_x, rob_y, rob_yaw) = getRelativeXYYaw(bag_transformer, map_frame, robot_frame, t)\n coveragePercent = float(raw_stats[0])\n numConfiguration = float(raw_stats[1])\n backTracking = float(raw_stats[2])\n\n # entry order and elements MUST MATCH LABELS!!!!!\n entry = (str(t), rob_x, rob_y, rob_yaw, tagCoveragePercent, coveragePercent, numConfiguration, backTracking)\n dataEntries.append(entry)\n\n # save and close\n bag.close()\n\n df = pd.DataFrame.from_records(dataEntries, columns=labels)\n print(\"detected tags: \"+str(detectecTagSet))\n print(\"All tags: \"+str(allTagSet))\n print(\"Saving data to csv\")\n df.to_csv(saveFile, index=False)\n print(\"Done\")\n"
]
| [
[
"pandas.DataFrame.from_records"
]
]
|
AnnaTruzzi/cornet_analysis | [
"0a2fd0c5a6b09a80d3c8a47441b08fd6129f7a2d"
]
| [
"summarize_performance_aoa_fitlearningrate.py"
]
| [
"#%% \n\nimport os\nfrom os import path\nimport pickle\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport json\nimport seaborn as sns\nimport math\nimport numpy as np\n\nfrom scipy.optimize import curve_fit\n\n\ndef learningcurve(x,A,b):\n y=A*(np.exp(-x*b))\n return y\n\n# Colormap for AoA\ncmap = plt.cm.get_cmap('inferno')\ncolors = cmap(np.arange(cmap.N))\nprint(cmap.N)\n\n\ndf_aoa=pd.read_json('linearclass_v3_aoa.json')\n\nprint('Dropping epoch 0')\ndf_aoa=df_aoa[df_aoa.stage != 0]\ndf_aoa['aoa_rank']=df_aoa['aoa'].rank()\n\naoamin=df_aoa['aoa_rank'].min()\naoarange=df_aoa['aoa_rank'].max()-aoamin\n\nfrom scipy.stats import pearsonr\n\nfig,ax=plt.subplots(ncols=4)\n\n\nlc={}\nfor convkey,convgrp in df_aoa.groupby('conv'):\n ax[convkey].set_title('Conv layer %d'%convkey)\n lc[convkey]={}\n for nodekey,nodegrp in convgrp.groupby('node'):\n A0=float(nodegrp.loc[nodegrp['stage']==35]['loss']) # starting estimate for A\n stage=np.array([float(s) for s in nodegrp['stage']])\n loss=np.array([float(l) for l in nodegrp['loss']])\n lc[convkey][nodekey]=curve_fit(learningcurve,stage,loss,p0=[A0,0])\n colind=int(((nodegrp['aoa_rank'].iloc[0]-aoamin)/aoarange)*255)\n ax[convkey].plot(stage,loss,color=colors[colind],alpha=0.2)\nplt.show()\nprint(lc)\n\n\n\n\n\n\n \n\n#%%\n"
]
| [
[
"scipy.optimize.curve_fit",
"numpy.exp",
"matplotlib.pyplot.subplots",
"pandas.read_json",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.cm.get_cmap"
]
]
|
stefan-falk/tensor2tensor | [
"7ea91197843399ddf46ebf78c9d42c2a573a4335"
]
| [
"tensor2tensor/models/research/rl.py"
]
| [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Reinforcement learning models and parameters.\"\"\"\n\nimport collections\nimport functools\nimport operator\nimport gym\nimport six\n\nfrom tensor2tensor.data_generators import gym_env\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import video_utils\nfrom tensor2tensor.envs import tic_tac_toe_env\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import discretization\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.models.video import basic_deterministic_params\nfrom tensor2tensor.models.video import basic_stochastic\nfrom tensor2tensor.rl.envs.py_func_batch_env import PyFuncBatchEnv\nfrom tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv\nfrom tensor2tensor.rl.envs.simulated_batch_gym_env import SimulatedBatchGymEnv\nfrom tensor2tensor.utils import hparam\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.utils import trainer_lib\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\[email protected]_hparams\ndef ppo_base_v1():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = common_hparams.basic_params1()\n hparams.learning_rate_schedule = \"constant\"\n hparams.learning_rate_constant = 1e-4\n hparams.clip_grad_norm = 0.5\n hparams.weight_decay = 0\n # If set, extends the LR warmup to all epochs except the final one.\n hparams.add_hparam(\"lr_decay_in_final_epoch\", False)\n hparams.add_hparam(\"init_mean_factor\", 0.1)\n hparams.add_hparam(\"init_logstd\", 0.1)\n hparams.add_hparam(\"policy_layers\", (100, 100))\n hparams.add_hparam(\"value_layers\", (100, 100))\n hparams.add_hparam(\"clipping_coef\", 0.2)\n hparams.add_hparam(\"gae_gamma\", 0.99)\n hparams.add_hparam(\"gae_lambda\", 0.95)\n hparams.add_hparam(\"entropy_loss_coef\", 0.01)\n hparams.add_hparam(\"value_loss_coef\", 1)\n hparams.add_hparam(\"optimization_epochs\", 15)\n hparams.add_hparam(\"epoch_length\", 200)\n hparams.add_hparam(\"epochs_num\", 2000)\n hparams.add_hparam(\"eval_every_epochs\", 10)\n hparams.add_hparam(\"save_models_every_epochs\", 30)\n hparams.add_hparam(\"optimization_batch_size\", 50)\n hparams.add_hparam(\"intrinsic_reward_scale\", 0.)\n hparams.add_hparam(\"logits_clip\", 0.0)\n hparams.add_hparam(\"dropout_ppo\", 0.1)\n hparams.add_hparam(\"effective_num_agents\", None)\n hparams.add_hparam(\"use_epochs\", True)\n # TODO(afrozm): Clean this up, this is used in PPO learner to get modalities.\n hparams.add_hparam(\"policy_problem_name\", \"dummy_policy_problem\")\n return hparams\n\n\[email protected]_hparams\ndef basic_policy_parameters():\n wrappers = None\n return hparam.HParams(wrappers=wrappers)\n\n\[email protected]_hparams\ndef ppo_discrete_action_base():\n hparams = ppo_base_v1()\n hparams.add_hparam(\"policy_network\", \"feed_forward_categorical_policy\")\n return hparams\n\n\[email protected]_hparams\ndef discrete_random_action_base():\n hparams = common_hparams.basic_params1()\n hparams.add_hparam(\"policy_network\", \"random_policy\")\n return hparams\n\n\[email protected]_hparams\ndef ppo_atari_base():\n \"\"\"Pong base parameters.\"\"\"\n hparams = ppo_discrete_action_base()\n hparams.learning_rate_constant = 1e-4\n hparams.epoch_length = 200\n hparams.gae_gamma = 0.985\n hparams.gae_lambda = 0.985\n hparams.entropy_loss_coef = 0.003\n hparams.value_loss_coef = 1\n hparams.optimization_epochs = 3\n hparams.epochs_num = 1000\n hparams.policy_network = \"feed_forward_cnn_small_categorical_policy\"\n hparams.clipping_coef = 0.2\n hparams.optimization_batch_size = 20\n hparams.clip_grad_norm = 0.5\n return hparams\n\n\[email protected]_hparams\ndef ppo_original_params():\n \"\"\"Parameters based on the original PPO paper.\"\"\"\n hparams = ppo_atari_base()\n hparams.learning_rate_constant = 2.5e-4\n hparams.gae_gamma = 0.99\n hparams.gae_lambda = 0.95\n hparams.clipping_coef = 0.1\n hparams.value_loss_coef = 1\n hparams.entropy_loss_coef = 0.01\n hparams.eval_every_epochs = 200\n hparams.dropout_ppo = 0.1\n # The parameters below are modified to accommodate short epoch_length (which\n # is needed for model based rollouts).\n hparams.epoch_length = 50\n hparams.optimization_batch_size = 20\n return hparams\n\n\[email protected]_hparams\ndef ppo_dist_params():\n \"\"\"Parameters based on the original paper modified for distributional RL.\"\"\"\n hparams = ppo_original_params()\n hparams.learning_rate_constant = 1e-3\n return hparams\n\n\[email protected]_hparams\ndef ppo_original_tiny():\n \"\"\"Parameters based on the original PPO paper, tiny version.\"\"\"\n hparams = ppo_original_params()\n hparams.epoch_length = 5\n hparams.optimization_batch_size = 1\n return hparams\n\n\[email protected]_hparams\ndef ppo_ttt_params():\n \"\"\"Parameters based on the original PPO paper.\"\"\"\n hparams = ppo_original_tiny()\n hparams.policy_network = \"feed_forward_categorical_policy\"\n hparams.policy_problem_name = \"dummy_policy_problem_ttt\"\n return hparams\n\n\[email protected]_hparams\ndef ppo_original_params_gamma95():\n \"\"\"Parameters based on the original PPO paper, changed gamma.\"\"\"\n hparams = ppo_original_params()\n hparams.gae_gamma = 0.95\n return hparams\n\n\[email protected]_hparams\ndef ppo_original_params_gamma90():\n \"\"\"Parameters based on the original PPO paper, changed gamma.\"\"\"\n hparams = ppo_original_params()\n hparams.gae_gamma = 0.90\n return hparams\n\n\[email protected]_hparams\ndef ppo_original_world_model():\n \"\"\"Atari parameters with world model as policy.\"\"\"\n hparams = ppo_original_params()\n hparams.policy_network = \"next_frame_basic_deterministic\"\n hparams_keys = hparams.values().keys()\n video_hparams = basic_deterministic_params.next_frame_basic_deterministic()\n for (name, value) in six.iteritems(video_hparams.values()):\n if name in hparams_keys:\n hparams.set_hparam(name, value)\n else:\n hparams.add_hparam(name, value)\n # Mostly to avoid decaying WM params when training the policy.\n hparams.weight_decay = 0\n return hparams\n\n\[email protected]_hparams\ndef ppo_tiny_world_model():\n \"\"\"Atari parameters with world model as policy.\"\"\"\n hparams = ppo_original_params()\n hparams.policy_network = \"next_frame_basic_deterministic\"\n hparams_keys = hparams.values().keys()\n video_hparams = basic_deterministic_params.next_frame_tiny()\n for (name, value) in six.iteritems(video_hparams.values()):\n if name in hparams_keys:\n hparams.set_hparam(name, value)\n else:\n hparams.add_hparam(name, value)\n hparams.weight_decay = 0\n return hparams\n\n\[email protected]_hparams\ndef ppo_original_world_model_stochastic_discrete():\n \"\"\"Atari parameters with stochastic discrete world model as policy.\"\"\"\n hparams = ppo_original_params()\n hparams.policy_network = \"next_frame_basic_stochastic_discrete\"\n hparams_keys = hparams.values().keys()\n video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete()\n for (name, value) in six.iteritems(video_hparams.values()):\n if name in hparams_keys:\n hparams.set_hparam(name, value)\n else:\n hparams.add_hparam(name, value)\n # To avoid OOM. Probably way to small.\n hparams.optimization_batch_size = 1\n hparams.weight_decay = 0\n return hparams\n\n\ndef make_real_env_fn(env):\n \"\"\"Creates a function returning a given real env, in or out of graph.\n\n Args:\n env: Environment to return from the function.\n\n Returns:\n Function in_graph -> env.\n \"\"\"\n return lambda in_graph: PyFuncBatchEnv(env) if in_graph else env\n\n\ndef make_simulated_env_fn(**env_kwargs):\n \"\"\"Returns a function creating a simulated env, in or out of graph.\n\n Args:\n **env_kwargs: kwargs to pass to the simulated env constructor.\n\n Returns:\n Function in_graph -> env.\n \"\"\"\n def env_fn(in_graph):\n class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv\n return class_(**env_kwargs)\n return env_fn\n\n\n# TODO(koz4k): Move this and the one below to rl_utils.\ndef make_simulated_env_kwargs(real_env, hparams, **extra_kwargs):\n \"\"\"Extracts simulated env kwargs from real_env and loop hparams.\"\"\"\n objs_and_attrs = [\n (real_env, [\n \"reward_range\", \"observation_space\", \"action_space\", \"frame_height\",\n \"frame_width\"\n ]),\n (hparams, [\"frame_stack_size\", \"intrinsic_reward_scale\"])\n ]\n kwargs = {\n attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension\n for (obj, attrs) in objs_and_attrs for attr in attrs\n }\n kwargs[\"model_name\"] = hparams.generative_model\n kwargs[\"model_hparams\"] = trainer_lib.create_hparams(\n hparams.generative_model_params\n )\n if hparams.wm_policy_param_sharing:\n kwargs[\"model_hparams\"].optimizer_zero_grads = True\n kwargs.update(extra_kwargs)\n return kwargs\n\n\ndef make_simulated_env_fn_from_hparams(real_env, hparams, **extra_kwargs):\n \"\"\"Creates a simulated env_fn.\"\"\"\n return make_simulated_env_fn(\n **make_simulated_env_kwargs(real_env, hparams, **extra_kwargs)\n )\n\n\ndef get_policy(observations, hparams, action_space,\n distributional_size=1, epoch=-1):\n \"\"\"Get a policy network.\n\n Args:\n observations: observations\n hparams: parameters\n action_space: action space\n distributional_size: optional number of buckets for distributional RL\n epoch: optional epoch number\n\n Returns:\n Tuple (action logits, value).\n \"\"\"\n if not isinstance(action_space, gym.spaces.Discrete):\n raise ValueError(\"Expecting discrete action space.\")\n\n obs_shape = common_layers.shape_list(observations)\n (frame_height, frame_width) = obs_shape[2:4]\n\n # TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup\n # when possible and do this properly.\n if hparams.policy_problem_name == \"dummy_policy_problem_ttt\":\n tf.logging.info(\"Using DummyPolicyProblemTTT for the policy.\")\n policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT()\n else:\n tf.logging.info(\"Using DummyPolicyProblem for the policy.\")\n policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width)\n\n trainer_lib.add_problem_hparams(hparams, policy_problem)\n hparams.force_full_predict = True\n model = registry.model(hparams.policy_network)(\n hparams, tf.estimator.ModeKeys.TRAIN\n )\n try:\n num_target_frames = hparams.video_num_target_frames\n except AttributeError:\n num_target_frames = 1\n target_value_shape_suffix = [num_target_frames]\n if distributional_size > 1:\n target_value_shape_suffix = [num_target_frames, distributional_size]\n features = {\n \"inputs\": observations,\n \"epoch\": tf.constant(epoch + 1),\n \"input_action\": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),\n \"input_reward\": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),\n \"targets\": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),\n \"target_action\": tf.zeros(\n obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),\n \"target_reward\": tf.zeros(\n obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),\n \"target_policy\": tf.zeros(\n obs_shape[:1] + [num_target_frames] + [action_space.n]),\n \"target_value\": tf.zeros(\n obs_shape[:1] + target_value_shape_suffix)\n }\n model.distributional_value_size = max(distributional_size, 1)\n model.use_epochs = hparams.use_epochs\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n t2t_model.create_dummy_vars()\n (targets, _) = model(features)\n target_values = targets[\"target_value\"][:, 0]\n if distributional_size > 1:\n target_values = targets[\"target_value\"][:, :]\n return (targets[\"target_policy\"][:, 0, :], target_values)\n\n\[email protected]_hparams\ndef ppo_pong_ae_base():\n \"\"\"Pong autoencoder base parameters.\"\"\"\n hparams = ppo_original_params()\n hparams.learning_rate_constant = 1e-4\n hparams.network = \"dense_bitwise_categorical_policy\"\n return hparams\n\n\[email protected]_hparams\ndef dqn_atari_base():\n # These params are based on agents/dqn/configs/dqn.gin\n # with some modifications taking into account our code\n return hparam.HParams(\n agent_gamma=0.99,\n agent_update_horizon=1,\n agent_min_replay_history=20000, # agent steps\n agent_update_period=4,\n agent_target_update_period=8000, # agent steps\n agent_epsilon_train=0.01,\n agent_epsilon_eval=0.001,\n agent_epsilon_decay_period=250000, # agent steps\n agent_generates_trainable_dones=True,\n\n optimizer_class=\"RMSProp\",\n optimizer_learning_rate=0.00025,\n optimizer_decay=0.95,\n optimizer_momentum=0.0,\n optimizer_epsilon=0.00001,\n optimizer_centered=True,\n\n # TODO(kozak): change names maybe replay_buffer -> agent?\n # Also batch_size is now buffer_batch_size in _DQNAgent.\n replay_buffer_replay_capacity=1000000,\n replay_buffer_buffer_batch_size=32,\n\n time_limit=27000,\n save_every_steps=50000,\n num_frames=int(20 * 1e6),\n\n # TODO(konradczechowski) this is not used in trainer_model_free, clean\n # this up after evaluation refactor\n eval_episodes_num=3,\n )\n\n\[email protected]_hparams\ndef dqn_original_params():\n \"\"\"dqn_original_params.\"\"\"\n hparams = dqn_atari_base()\n hparams.set_hparam(\"num_frames\", int(1e6))\n return hparams\n\n\ndef rlmf_tiny_overrides():\n \"\"\"Parameters to override for tiny setting excluding agent-related hparams.\"\"\"\n return dict(\n max_num_noops=1,\n eval_max_num_noops=1,\n rl_env_max_episode_steps=7,\n eval_rl_env_max_episode_steps=7,\n eval_sampling_temps=[0.0, 1.0],\n )\n\n\[email protected]_hparams\ndef rlmf_original():\n return hparam.HParams(\n game=\"pong\",\n sticky_actions=False,\n base_algo=\"ppo\",\n base_algo_params=\"ppo_original_params\",\n batch_size=16,\n eval_batch_size=2,\n frame_stack_size=4,\n eval_sampling_temps=[0.0, 0.2, 0.5, 0.8, 1.0, 2.0],\n max_num_noops=8,\n eval_max_num_noops=8,\n eval_rl_env_max_episode_steps=1000,\n resize_height_factor=2,\n resize_width_factor=2,\n distributional_size=1, # In distributional RL, number of buckets.\n distributional_subscale=0.04, # How to scale values to buckets.\n distributional_threshold=0.0, # Optimism threshold for experiments.\n grayscale=0,\n rl_env_max_episode_steps=-1,\n # If set, use this as the gym env name, instead of changing game mode etc.\n rl_env_name=\"\",\n # Controls whether we should derive observation space, do some\n # pre-processing etc. See T2TGymEnv._derive_observation_space.\n rl_should_derive_observation_space=True,\n aunused=0, # unused param for multi-run settings.\n )\n\n\[email protected]_hparams\ndef rlmf_tictactoe():\n \"\"\"Base set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_original()\n hparams.game = \"tictactoe\"\n hparams.rl_env_name = \"T2TEnv-TicTacToeEnv-v0\"\n # Since we don't have any no-op actions, otherwise we have to have an\n # attribute called `get_action_meanings`.\n hparams.eval_max_num_noops = 0\n hparams.max_num_noops = 0\n hparams.rl_should_derive_observation_space = False\n\n hparams.policy_network = \"feed_forward_categorical_policy\"\n hparams.base_algo_params = \"ppo_ttt_params\"\n\n # Number of last observations to feed to the agent\n hparams.frame_stack_size = 1\n return hparams\n\n\[email protected]_hparams\ndef rlmf_base():\n \"\"\"Base set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_original()\n hparams.add_hparam(\"ppo_epochs_num\", 3000)\n hparams.add_hparam(\"ppo_eval_every_epochs\", 100)\n return hparams\n\n\[email protected]_ranged_hparams\ndef rlmf_5runs(rhp):\n rhp.set_discrete(\"aunused\", list(range(5)))\n\n\[email protected]_ranged_hparams\ndef rlmf_5runs_atari(rhp):\n rhp.set_categorical(\"game\", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE)\n rhp.set_discrete(\"aunused\", list(range(5)))\n\n\[email protected]_hparams\ndef rlmf_dist():\n \"\"\"Distributional set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_original()\n hparams.distributional_size = 1024\n hparams.base_algo_params = \"ppo_dist_params\"\n return hparams\n\n\[email protected]_hparams\ndef rlmf_dist_threshold():\n \"\"\"Distributional set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_dist()\n hparams.distributional_threshold = 0.5\n return hparams\n\n\[email protected]_hparams\ndef rlmf_tiny():\n \"\"\"Tiny set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_original()\n hparams = hparams.override_from_dict(rlmf_tiny_overrides())\n hparams.batch_size = 2\n hparams.base_algo_params = \"ppo_original_tiny\"\n hparams.add_hparam(\"ppo_epochs_num\", 3)\n hparams.add_hparam(\"ppo_epoch_length\", 2)\n return hparams\n\n\[email protected]_hparams\ndef rlmf_dqn_tiny():\n \"\"\"Tiny DQN params.\"\"\"\n hparams = rlmf_original()\n hparams = hparams.override_from_dict(rlmf_tiny_overrides())\n hparams.batch_size = 1\n hparams.base_algo = \"dqn\"\n hparams.base_algo_params = \"dqn_original_params\"\n hparams.add_hparam(\"dqn_num_frames\", 128)\n hparams.add_hparam(\"dqn_save_every_steps\", 128)\n hparams.add_hparam(\"dqn_replay_buffer_replay_capacity\", 100)\n hparams.add_hparam(\"dqn_agent_min_replay_history\", 10)\n return hparams\n\n\[email protected]_hparams\ndef rlmf_eval():\n \"\"\"Eval set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_original()\n hparams.batch_size = 16\n hparams.eval_batch_size = 32\n hparams.eval_episodes_num = 2\n hparams.eval_sampling_temps = [0.5, 0.0, 1.0]\n hparams.eval_rl_env_max_episode_steps = 40000\n hparams.add_hparam(\"ppo_epoch_length\", 128)\n hparams.add_hparam(\"ppo_optimization_batch_size\", 32)\n hparams.add_hparam(\"ppo_epochs_num\", 10000)\n hparams.add_hparam(\"ppo_eval_every_epochs\", 500)\n hparams.add_hparam(\"attempt\", 0)\n hparams.add_hparam(\"moe_loss_coef\", 0)\n return hparams\n\n\[email protected]_hparams\ndef rlmf_eval_dist():\n \"\"\"Distributional set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_eval()\n hparams.distributional_size = 4096\n hparams.distributional_subscale = 0.08\n hparams.base_algo_params = \"ppo_dist_params\"\n return hparams\n\n\[email protected]_hparams\ndef rlmf_eval_dist_threshold():\n \"\"\"Distributional set of hparams for model-free PPO.\"\"\"\n hparams = rlmf_eval_dist()\n hparams.distributional_threshold = 0.5\n return hparams\n\n\nclass PolicyBase(t2t_model.T2TModel):\n\n def __init__(self, *args, **kwargs):\n super(PolicyBase, self).__init__(*args, **kwargs)\n self.distributional_value_size = 1\n self.use_epochs = False\n\n def loss(self, *args, **kwargs):\n return 0.0\n\n\n# TODO(lukaszkaiser): move this class or clean up the whole file.\nclass DummyPolicyProblem(video_utils.VideoProblem):\n \"\"\"Dummy Problem for running the policy.\"\"\"\n\n def __init__(self, action_space, frame_height, frame_width):\n super(DummyPolicyProblem, self).__init__()\n self.action_space = action_space\n self._frame_height = frame_height\n self._frame_width = frame_width\n\n @property\n def frame_height(self):\n \"\"\"Height of each frame.\"\"\"\n return self._frame_height\n\n @property\n def frame_width(self):\n \"\"\"Width of each frame.\"\"\"\n return self._frame_width\n\n @property\n def num_actions(self):\n return self.action_space.n\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.modality = {\n \"inputs\": modalities.ModalityType.VIDEO,\n \"input_action\": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,\n \"input_reward\": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,\n \"targets\": modalities.ModalityType.VIDEO,\n \"target_action\": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,\n \"target_reward\": modalities.ModalityType.SYMBOL_WEIGHTS_ALL,\n \"target_policy\": modalities.ModalityType.IDENTITY,\n \"target_value\": modalities.ModalityType.IDENTITY,\n }\n p.vocab_size = {\n \"inputs\": 256,\n \"input_action\": self.num_actions,\n \"input_reward\": 3,\n \"targets\": 256,\n \"target_action\": self.num_actions,\n \"target_reward\": 3,\n \"target_policy\": None,\n \"target_value\": None,\n }\n p.input_space_id = problem.SpaceID.IMAGE\n p.target_space_id = problem.SpaceID.IMAGE\n\n\nNetworkOutput = collections.namedtuple(\n \"NetworkOutput\", \"policy, value, action_postprocessing\")\n\n\n# TODO(koz4k): Translate it to T2TModel or remove.\ndef feed_forward_gaussian_fun(action_space, config, observations):\n \"\"\"Feed-forward Gaussian.\"\"\"\n if not isinstance(action_space, gym.spaces.box.Box):\n raise ValueError(\"Expecting continuous action space.\")\n\n mean_weights_initializer = tf.initializers.variance_scaling(\n scale=config.init_mean_factor)\n logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)\n\n flat_observations = tf.reshape(observations, [\n tf.shape(observations)[0], tf.shape(observations)[1],\n functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])\n\n with tf.variable_scope(\"network_parameters\"):\n with tf.variable_scope(\"policy\"):\n x = flat_observations\n for size in config.policy_layers:\n x = tf.layers.dense(x, size, activation=tf.nn.relu)\n mean = tf.layers.dense(\n x, action_space.shape[0], activation=tf.tanh,\n kernel_initializer=mean_weights_initializer)\n logstd = tf.get_variable(\n \"logstd\", mean.shape[2:], tf.float32, logstd_initializer)\n logstd = tf.tile(\n logstd[None, None],\n [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))\n with tf.variable_scope(\"value\"):\n x = flat_observations\n for size in config.value_layers:\n x = tf.layers.dense(x, size, activation=tf.nn.relu)\n value = tf.layers.dense(x, 1)[..., 0]\n mean = tf.check_numerics(mean, \"mean\")\n logstd = tf.check_numerics(logstd, \"logstd\")\n value = tf.check_numerics(value, \"value\")\n\n policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))\n\n return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))\n\n\ndef clip_logits(logits, config):\n logits_clip = getattr(config, \"logits_clip\", 0.)\n if logits_clip > 0:\n min_logit = tf.reduce_min(logits)\n return tf.minimum(logits - min_logit, logits_clip)\n else:\n return logits\n\n\[email protected]_model\nclass FeedForwardCategoricalPolicy(PolicyBase):\n \"\"\"Feed-forward categorical.\"\"\"\n\n def body(self, features):\n observations = features[\"inputs_raw\"]\n observations = tf.cast(observations, tf.float32)\n flat_observations = tf.layers.flatten(observations)\n with tf.variable_scope(\"policy\"):\n x = flat_observations\n for size in self.hparams.policy_layers:\n x = tf.layers.dense(x, size, activation=tf.nn.relu)\n logits = tf.layers.dense(x, self.hparams.problem.num_actions)\n logits = tf.expand_dims(logits, axis=1)\n with tf.variable_scope(\"value\"):\n x = flat_observations\n for size in self.hparams.value_layers:\n x = tf.layers.dense(x, size, activation=tf.nn.relu)\n value = tf.layers.dense(x, 1)\n logits = clip_logits(logits, self.hparams)\n return {\"target_policy\": logits, \"target_value\": value}\n\n\[email protected]_model\nclass FeedForwardCnnSmallCategoricalPolicy(PolicyBase):\n \"\"\"Small cnn network with categorical output.\"\"\"\n\n def body(self, features):\n observations = features[\"inputs_raw\"]\n # Axis 0 - Batch.\n # Axis 1 - Input Frames, 4 frames.\n # Axis 2, 3 - Height & Width.\n # Axis 4 - Channels RGB, 3 colours.\n x = tf.transpose(observations, [0, 2, 3, 1, 4])\n x_shape = common_layers.shape_list(x)\n x = tf.reshape(x, x_shape[:-2] + [-1])\n dropout = getattr(self.hparams, \"dropout_ppo\", 0.0)\n with tf.variable_scope(\"feed_forward_cnn_small\"):\n x = tf.cast(x, tf.float32) / 255.0\n x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),\n activation=tf.nn.relu, padding=\"same\")\n x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2),\n activation=tf.nn.relu, padding=\"same\")\n\n flat_x = tf.layers.flatten(x)\n if self.use_epochs:\n epoch = features[\"epoch\"] + tf.zeros([x_shape[0]], dtype=tf.int32)\n # Randomly set epoch to 0 in some cases as that's the inference value.\n rand = tf.random.uniform([x_shape[0]])\n epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch)\n # Embed the epoch number.\n emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32]\n flat_x = tf.concat([flat_x, emb_epoch], axis=1)\n flat_x = tf.layers.dropout(flat_x, rate=dropout)\n x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)\n\n logits = tf.layers.dense(\n x, self.hparams.problem.num_actions, name=\"dense2\"\n )\n logits = clip_logits(logits, self.hparams)\n logits = tf.expand_dims(logits, axis=1)\n value = tf.layers.dense(x, self.distributional_value_size)\n return {\"target_policy\": logits, \"target_value\": value}\n\n\[email protected]_model\nclass FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase):\n \"\"\"Small cnn network with categorical output.\"\"\"\n\n def body(self, features):\n observations = features[\"inputs\"]\n x = tf.transpose(observations, [0, 2, 3, 1, 4])\n x_shape = common_layers.shape_list(x)\n x = tf.reshape(x, x_shape[:-2] + [-1])\n dropout = getattr(self.hparams, \"dropout_ppo\", 0.0)\n with tf.variable_scope(\"feed_forward_cnn_small\"):\n x = tf.cast(x, tf.float32) / 255.0\n x = tf.nn.dropout(x, rate=dropout)\n x = tf.layers.conv2d(\n x, 32, (4, 4), strides=(2, 2), name=\"conv1\",\n activation=common_layers.belu, padding=\"SAME\")\n x = tf.nn.dropout(x, rate=dropout)\n x = tf.layers.conv2d(\n x, 64, (4, 4), strides=(2, 2), name=\"conv2\",\n activation=common_layers.belu, padding=\"SAME\")\n x = tf.nn.dropout(x, rate=dropout)\n x = tf.layers.conv2d(\n x, 128, (4, 4), strides=(2, 2), name=\"conv3\",\n activation=common_layers.belu, padding=\"SAME\")\n\n flat_x = tf.layers.flatten(x)\n flat_x = tf.nn.dropout(flat_x, rate=dropout)\n x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name=\"dense1\")\n\n logits = tf.layers.dense(\n x, self.hparams.problem.num_actions, name=\"dense2\"\n )\n logits = tf.expand_dims(logits, axis=1)\n logits = clip_logits(logits, self.hparams)\n\n value = tf.layers.dense(x, 1, name=\"value\")\n return {\"target_policy\": logits, \"target_value\": value}\n\n\[email protected]_model\nclass DenseBitwiseCategoricalPolicy(PolicyBase):\n \"\"\"Dense network with bitwise input and categorical output.\"\"\"\n\n def body(self, features):\n observations = features[\"inputs\"]\n flat_x = tf.layers.flatten(observations)\n with tf.variable_scope(\"dense_bitwise\"):\n flat_x = discretization.int_to_bit_embed(flat_x, 8, 32)\n\n x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu)\n x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu)\n\n logits = tf.layers.dense(x, self.hparams.problem.num_actions)\n\n value = tf.layers.dense(x, 1)[..., 0]\n\n return {\"target_policy\": logits, \"target_value\": value}\n\n\[email protected]_model\nclass RandomPolicy(PolicyBase):\n \"\"\"Random policy with categorical output.\"\"\"\n\n def body(self, features):\n observations = features[\"inputs\"]\n obs_shape = observations.shape.as_list()\n # Just so Saver doesn't complain because of no variables.\n tf.get_variable(\"dummy_var\", initializer=0.0)\n num_actions = self.hparams.problem.num_actions\n logits = tf.constant(\n 1. / float(num_actions),\n shape=(obs_shape[:1] + [1, num_actions])\n )\n value = tf.zeros(obs_shape[:1] + [1])\n return {\"target_policy\": logits, \"target_value\": value}\n"
]
| [
[
"tensorflow.exp",
"tensorflow.reduce_min",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.clip_by_value",
"tensorflow.cast",
"tensorflow.random_normal_initializer",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.layers.flatten",
"tensorflow.logging.info",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.layers.dense",
"tensorflow.nn.dropout",
"tensorflow.zeros",
"tensorflow.minimum",
"tensorflow.expand_dims",
"tensorflow.random.uniform",
"tensorflow.layers.conv2d",
"tensorflow.get_variable",
"tensorflow.layers.dropout",
"tensorflow.check_numerics",
"tensorflow.initializers.variance_scaling"
]
]
|
manueltonneau/electra-1 | [
"d02495609c6d90d71dbc3af2c5a7329316581e13"
]
| [
"run_pretraining.py"
]
| [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pre-trains an ELECTRA model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport json\n\nimport tensorflow.compat.v1 as tf\n\nimport configure_pretraining\nfrom model import modeling\nfrom model import optimization\nfrom pretrain import pretrain_data\nfrom pretrain import pretrain_helpers\nfrom util import training_utils\nfrom util import utils\n\n\nclass PretrainingModel(object):\n \"\"\"Transformer pre-training using the replaced-token-detection task.\"\"\"\n\n def __init__(self, config: configure_pretraining.PretrainingConfig,\n features, is_training):\n # Set up model config\n self._config = config\n self._bert_config = training_utils.get_bert_config(config)\n if config.debug:\n self._bert_config.num_hidden_layers = 3\n self._bert_config.hidden_size = 144\n self._bert_config.intermediate_size = 144 * 4\n self._bert_config.num_attention_heads = 4\n\n # Mask the input\n masked_inputs = pretrain_helpers.mask(\n config, pretrain_data.features_to_inputs(features), config.mask_prob)\n\n # Generator\n embedding_size = (\n self._bert_config.hidden_size if config.embedding_size is None else\n config.embedding_size)\n if config.uniform_generator:\n mlm_output = self._get_masked_lm_output(masked_inputs, None)\n elif config.electra_objective and config.untied_generator:\n generator = self._build_transformer(\n masked_inputs, is_training,\n bert_config=get_generator_config(config, self._bert_config),\n embedding_size=(None if config.untied_generator_embeddings\n else embedding_size),\n untied_embeddings=config.untied_generator_embeddings,\n name=\"generator\")\n mlm_output = self._get_masked_lm_output(masked_inputs, generator)\n else:\n generator = self._build_transformer(\n masked_inputs, is_training, embedding_size=embedding_size)\n mlm_output = self._get_masked_lm_output(masked_inputs, generator)\n fake_data = self._get_fake_data(masked_inputs, mlm_output.logits)\n self.mlm_output = mlm_output\n self.total_loss = config.gen_weight * mlm_output.loss\n\n # Discriminator\n disc_output = None\n if config.electra_objective:\n discriminator = self._build_transformer(\n fake_data.inputs, is_training, reuse=not config.untied_generator,\n embedding_size=embedding_size)\n disc_output = self._get_discriminator_output(\n fake_data.inputs, discriminator, fake_data.is_fake_tokens)\n self.total_loss += config.disc_weight * disc_output.loss\n\n # Evaluation\n eval_fn_inputs = {\n \"input_ids\": masked_inputs.input_ids,\n \"masked_lm_preds\": mlm_output.preds,\n \"mlm_loss\": mlm_output.per_example_loss,\n \"masked_lm_ids\": masked_inputs.masked_lm_ids,\n \"masked_lm_weights\": masked_inputs.masked_lm_weights,\n \"input_mask\": masked_inputs.input_mask\n }\n if config.electra_objective:\n eval_fn_inputs.update({\n \"disc_loss\": disc_output.per_example_loss,\n \"disc_labels\": disc_output.labels,\n \"disc_probs\": disc_output.probs,\n \"disc_preds\": disc_output.preds,\n \"sampled_tokids\": tf.argmax(fake_data.sampled_tokens, -1,\n output_type=tf.int32)\n })\n eval_fn_keys = eval_fn_inputs.keys()\n eval_fn_values = [eval_fn_inputs[k] for k in eval_fn_keys]\n\n def metric_fn(*args):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n d = {k: arg for k, arg in zip(eval_fn_keys, args)}\n metrics = dict()\n metrics[\"masked_lm_accuracy\"] = tf.metrics.accuracy(\n labels=tf.reshape(d[\"masked_lm_ids\"], [-1]),\n predictions=tf.reshape(d[\"masked_lm_preds\"], [-1]),\n weights=tf.reshape(d[\"masked_lm_weights\"], [-1]))\n metrics[\"masked_lm_loss\"] = tf.metrics.mean(\n values=tf.reshape(d[\"mlm_loss\"], [-1]),\n weights=tf.reshape(d[\"masked_lm_weights\"], [-1]))\n if config.electra_objective:\n metrics[\"sampled_masked_lm_accuracy\"] = tf.metrics.accuracy(\n labels=tf.reshape(d[\"masked_lm_ids\"], [-1]),\n predictions=tf.reshape(d[\"sampled_tokids\"], [-1]),\n weights=tf.reshape(d[\"masked_lm_weights\"], [-1]))\n if config.disc_weight > 0:\n metrics[\"disc_loss\"] = tf.metrics.mean(d[\"disc_loss\"])\n metrics[\"disc_auc\"] = tf.metrics.auc(\n d[\"disc_labels\"] * d[\"input_mask\"],\n d[\"disc_probs\"] * tf.cast(d[\"input_mask\"], tf.float32))\n metrics[\"disc_accuracy\"] = tf.metrics.accuracy(\n labels=d[\"disc_labels\"], predictions=d[\"disc_preds\"],\n weights=d[\"input_mask\"])\n metrics[\"disc_precision\"] = tf.metrics.accuracy(\n labels=d[\"disc_labels\"], predictions=d[\"disc_preds\"],\n weights=d[\"disc_preds\"] * d[\"input_mask\"])\n metrics[\"disc_recall\"] = tf.metrics.accuracy(\n labels=d[\"disc_labels\"], predictions=d[\"disc_preds\"],\n weights=d[\"disc_labels\"] * d[\"input_mask\"])\n return metrics\n self.eval_metrics = (metric_fn, eval_fn_values)\n\n def _get_masked_lm_output(self, inputs: pretrain_data.Inputs, model):\n \"\"\"Masked language modeling softmax layer.\"\"\"\n masked_lm_weights = inputs.masked_lm_weights\n with tf.variable_scope(\"generator_predictions\"):\n if self._config.uniform_generator:\n logits = tf.zeros(self._bert_config.vocab_size)\n logits_tiled = tf.zeros(\n modeling.get_shape_list(inputs.masked_lm_ids) +\n [self._bert_config.vocab_size])\n logits_tiled += tf.reshape(logits, [1, 1, self._bert_config.vocab_size])\n logits = logits_tiled\n else:\n relevant_hidden = pretrain_helpers.gather_positions(\n model.get_sequence_output(), inputs.masked_lm_positions)\n hidden = tf.layers.dense(\n relevant_hidden,\n units=modeling.get_shape_list(model.get_embedding_table())[-1],\n activation=modeling.get_activation(self._bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n self._bert_config.initializer_range))\n hidden = modeling.layer_norm(hidden)\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[self._bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(hidden, model.get_embedding_table(),\n transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n oh_labels = tf.one_hot(\n inputs.masked_lm_ids, depth=self._bert_config.vocab_size,\n dtype=tf.float32)\n\n probs = tf.nn.softmax(logits)\n log_probs = tf.nn.log_softmax(logits)\n label_log_probs = -tf.reduce_sum(log_probs * oh_labels, axis=-1)\n\n numerator = tf.reduce_sum(inputs.masked_lm_weights * label_log_probs)\n denominator = tf.reduce_sum(masked_lm_weights) + 1e-6\n loss = numerator / denominator\n preds = tf.argmax(log_probs, axis=-1, output_type=tf.int32)\n\n MLMOutput = collections.namedtuple(\n \"MLMOutput\", [\"logits\", \"probs\", \"loss\", \"per_example_loss\", \"preds\"])\n return MLMOutput(\n logits=logits, probs=probs, per_example_loss=label_log_probs,\n loss=loss, preds=preds)\n\n def _get_discriminator_output(self, inputs, discriminator, labels):\n \"\"\"Discriminator binary classifier.\"\"\"\n with tf.variable_scope(\"discriminator_predictions\"):\n hidden = tf.layers.dense(\n discriminator.get_sequence_output(),\n units=self._bert_config.hidden_size,\n activation=modeling.get_activation(self._bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n self._bert_config.initializer_range))\n logits = tf.squeeze(tf.layers.dense(hidden, units=1), -1)\n weights = tf.cast(inputs.input_mask, tf.float32)\n labelsf = tf.cast(labels, tf.float32)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=labelsf) * weights\n per_example_loss = (tf.reduce_sum(losses, axis=-1) /\n (1e-6 + tf.reduce_sum(weights, axis=-1)))\n loss = tf.reduce_sum(losses) / (1e-6 + tf.reduce_sum(weights))\n probs = tf.nn.sigmoid(logits)\n preds = tf.cast(tf.round((tf.sign(logits) + 1) / 2), tf.int32)\n DiscOutput = collections.namedtuple(\n \"DiscOutput\", [\"loss\", \"per_example_loss\", \"probs\", \"preds\",\n \"labels\"])\n return DiscOutput(\n loss=loss, per_example_loss=per_example_loss, probs=probs,\n preds=preds, labels=labels,\n )\n\n def _get_fake_data(self, inputs, mlm_logits):\n \"\"\"Sample from the generator to create corrupted input.\"\"\"\n inputs = pretrain_helpers.unmask(inputs)\n disallow = tf.one_hot(\n inputs.masked_lm_ids, depth=self._bert_config.vocab_size,\n dtype=tf.float32) if self._config.disallow_correct else None\n sampled_tokens = tf.stop_gradient(pretrain_helpers.sample_from_softmax(\n mlm_logits / self._config.temperature, disallow=disallow))\n sampled_tokids = tf.argmax(sampled_tokens, -1, output_type=tf.int32)\n updated_input_ids, masked = pretrain_helpers.scatter_update(\n inputs.input_ids, sampled_tokids, inputs.masked_lm_positions)\n labels = masked * (1 - tf.cast(\n tf.equal(updated_input_ids, inputs.input_ids), tf.int32))\n updated_inputs = pretrain_data.get_updated_inputs(\n inputs, input_ids=updated_input_ids)\n FakedData = collections.namedtuple(\"FakedData\", [\n \"inputs\", \"is_fake_tokens\", \"sampled_tokens\"])\n return FakedData(inputs=updated_inputs, is_fake_tokens=labels,\n sampled_tokens=sampled_tokens)\n\n def _build_transformer(self, inputs: pretrain_data.Inputs, is_training,\n bert_config=None, name=\"electra\", reuse=False, **kwargs):\n \"\"\"Build a transformer encoder network.\"\"\"\n if bert_config is None:\n bert_config = self._bert_config\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n return modeling.BertModel(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=inputs.input_ids,\n input_mask=inputs.input_mask,\n token_type_ids=inputs.segment_ids,\n use_one_hot_embeddings=self._config.use_tpu,\n scope=name,\n **kwargs)\n\n\ndef get_generator_config(config: configure_pretraining.PretrainingConfig,\n bert_config: modeling.BertConfig):\n \"\"\"Get model config for the generator network.\"\"\"\n gen_config = modeling.BertConfig.from_dict(bert_config.to_dict())\n gen_config.hidden_size = int(round(\n bert_config.hidden_size * config.generator_hidden_size))\n gen_config.num_hidden_layers = int(round(\n bert_config.num_hidden_layers * config.generator_layers))\n gen_config.intermediate_size = 4 * gen_config.hidden_size\n gen_config.num_attention_heads = max(1, gen_config.hidden_size // 64)\n return gen_config\n\n\ndef model_fn_builder(config: configure_pretraining.PretrainingConfig):\n \"\"\"Build the model for training.\"\"\"\n\n def model_fn(features, labels, mode, params):\n \"\"\"Build the model for training.\"\"\"\n model = PretrainingModel(config, features,\n mode == tf.estimator.ModeKeys.TRAIN)\n utils.log(\"Model is built!\")\n\n # Load pre-trained weights from checkpoint\n tvars = tf.trainable_variables()\n\n init_checkpoint = tf.train.latest_checkpoint(config.init_checkpoint)\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if config.use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n utils.log(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n utils.log(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.total_loss, config.learning_rate, config.num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_steps=config.num_warmup_steps,\n lr_decay_power=config.lr_decay_power\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.total_loss),\n config.num_train_steps, config.iterations_per_loop,\n config.use_tpu)]\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.total_loss,\n scaffold_fn=scaffold_fn,\n eval_metrics=model.eval_metrics,\n evaluation_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.total_loss),\n config.num_eval_steps, config.iterations_per_loop,\n config.use_tpu, is_training=False)])\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported\")\n return output_spec\n\n return model_fn\n\n\ndef train_or_eval(config: configure_pretraining.PretrainingConfig):\n \"\"\"Run pre-training or evaluate the pre-trained model.\"\"\"\n if config.do_train == config.do_eval:\n raise ValueError(\"Exactly one of `do_train` or `do_eval` must be True.\")\n if config.debug and config.do_train:\n utils.rmkdir(config.model_dir)\n utils.heading(\"Config:\")\n utils.log_config(config)\n\n is_per_host = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2\n tpu_cluster_resolver = None\n if config.use_tpu and config.tpu_name:\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n config.tpu_name, zone=config.tpu_zone, project=config.gcp_project)\n tpu_config = tf.estimator.tpu.TPUConfig(\n iterations_per_loop=config.iterations_per_loop,\n num_shards=config.num_tpu_cores,\n tpu_job_name=config.tpu_job_name,\n per_host_input_for_training=is_per_host)\n run_config = tf.estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=config.model_dir,\n save_checkpoints_steps=config.save_checkpoints_steps,\n keep_checkpoint_max=config.keep_checkpoint_max,\n tpu_config=tpu_config)\n model_fn = model_fn_builder(config=config)\n estimator = tf.estimator.tpu.TPUEstimator(\n use_tpu=config.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=config.train_batch_size,\n eval_batch_size=config.eval_batch_size)\n\n if config.do_train:\n utils.heading(\"Running training\")\n estimator.train(input_fn=pretrain_data.get_input_fn(config, True),\n max_steps=config.num_train_steps)\n if config.do_eval:\n utils.heading(\"Running evaluation\")\n result = estimator.evaluate(\n input_fn=pretrain_data.get_input_fn(config, False),\n steps=config.num_eval_steps)\n for key in sorted(result.keys()):\n utils.log(\" {:} = {:}\".format(key, str(result[key])))\n return result\n\n\ndef train_one_step(config: configure_pretraining.PretrainingConfig):\n \"\"\"Builds an ELECTRA model an trains it for one step; useful for debugging.\"\"\"\n train_input_fn = pretrain_data.get_input_fn(config, True)\n features = tf.data.make_one_shot_iterator(train_input_fn(dict(\n batch_size=config.train_batch_size))).get_next()\n model = PretrainingModel(config, features, True)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n utils.log(sess.run(model.total_loss))\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"--data-dir\", required=True,\n help=\"Location of data files (model weights, etc).\")\n parser.add_argument(\"--model-name\", required=True,\n help=\"The name of the model being fine-tuned.\")\n parser.add_argument(\"--hparams\", default=\"{}\",\n help=\"JSON dict of model hyperparameters.\")\n args = parser.parse_args()\n if args.hparams.endswith(\".json\"):\n hparams = utils.load_json(args.hparams)\n else:\n hparams = json.loads(args.hparams)\n tf.logging.set_verbosity(tf.logging.ERROR)\n train_or_eval(configure_pretraining.PretrainingConfig(\n args.model_name, args.data_dir, **hparams))\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"tensorflow.compat.v1.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.nn.bias_add",
"tensorflow.compat.v1.estimator.tpu.TPUEstimator",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.compat.v1.argmax",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.layers.dense",
"tensorflow.compat.v1.train.init_from_checkpoint",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.metrics.accuracy",
"tensorflow.compat.v1.metrics.mean",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.estimator.tpu.TPUConfig",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.nn.log_softmax",
"tensorflow.compat.v1.estimator.tpu.RunConfig",
"tensorflow.compat.v1.sign",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.nn.softmax",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.logging.set_verbosity"
]
]
|
azizullah2017/conversational-datasets | [
"932050fbfa0f650c43a3a4df78238e0fcf848f3f"
]
| [
"amazon_qa/create_data.py"
]
| [
"\"\"\"A Dataflow script for creating Amazon question/answer data.\n\nFor usage see README.md.\n\"\"\"\n\n\nimport argparse\nimport ast\nimport hashlib\nimport json\nimport logging\nimport os\nimport uuid\nfrom functools import partial\n\nimport apache_beam as beam\nimport tensorflow as tf\nfrom apache_beam import pvalue\nfrom apache_beam.io.textio import ReadFromText, WriteToText\nfrom apache_beam.io.tfrecordio import WriteToTFRecord\nfrom apache_beam.options.pipeline_options import PipelineOptions, SetupOptions\n\n_TF_FORMAT = \"TF\"\n_JSON_FORMAT = \"JSON\"\n\n\ndef _parse_args(argv=None):\n \"\"\"Parse command-line args.\"\"\"\n\n def _positive_int(value):\n \"\"\"Define a positive integer ArgumentParser type.\"\"\"\n value = int(value)\n if value <= 0:\n raise argparse.ArgumentTypeError(\n \"Value must be positive, {} was passed.\".format(value))\n return value\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--file_pattern\",\n required=True,\n help=\"File pattern for amazon qa files on Google cloud storage.\",\n )\n parser.add_argument(\n \"--output_dir\",\n required=True,\n help=\"Output directory to write the dataset on Google cloud storage.\",\n )\n parser.add_argument(\n \"--dataset_format\",\n choices={_TF_FORMAT, _JSON_FORMAT},\n default=\"TF\",\n help=\"The dataset format to write. 'TF' for serialized tensorflow \"\n \"examples in TFRecords. 'JSON' for text files with one JSON \"\n \"object per line.\"\n )\n parser.add_argument(\n \"--max_words\",\n type=_positive_int,\n default=59,\n help=\"Maximum number of words a Q or A can have to be included.\",\n )\n parser.add_argument(\n \"--min_words\",\n type=_positive_int,\n default=4,\n help=\"Minimum number of words a Q or A must have to be included.\",\n )\n parser.add_argument(\n \"--train_split\",\n default=0.9, type=float,\n help=\"The proportion of data to put in the training set.\",\n )\n parser.add_argument(\n \"--num_shards_test\",\n default=10,\n type=_positive_int,\n help=\"The number of shards for the test set.\",\n )\n parser.add_argument(\n \"--num_shards_train\",\n default=100,\n type=_positive_int,\n help=\"The number of shards for the train set.\",\n )\n return parser.parse_known_args(argv)\n\n\ndef _create_tuples(qa_object, min_words, max_words):\n \"\"\"Creates (product_id, question, answer) tuples.\"\"\"\n if \"question\" in qa_object:\n question = qa_object['question']\n answer = qa_object['answer']\n product_id = qa_object['asin']\n if (_should_skip(question, min_words, max_words)\n or _should_skip(answer, min_words, max_words)):\n return\n yield (product_id, question, answer)\n\n elif \"questions\" in qa_object:\n product_id = qa_object['asin']\n for question_obj in qa_object['questions']:\n question = question_obj['questionText']\n if _should_skip(question, min_words, max_words):\n continue\n for answer_obj in question_obj['answers']:\n answer = answer_obj['answerText']\n if _should_skip(answer, min_words, max_words):\n continue\n yield (product_id, question, answer)\n\n\ndef _should_skip(text, min_words, max_words):\n # Estimate the number of words by splitting on spaces.\n num_words = len(text.split(\" \"))\n return num_words < min_words or num_words > max_words\n\n\ndef _create_example(product_id, question, answer):\n \"\"\"Create an example dictionary.\"\"\"\n return {\n 'product_id': product_id,\n 'context': question,\n 'response': answer,\n }\n\n\ndef _shuffle_examples(examples):\n examples |= \"add random key\" >> beam.Map(\n lambda example: (uuid.uuid4(), example)\n )\n examples |= \"group by key\" >> beam.GroupByKey()\n examples |= \"get shuffled values\" >> beam.FlatMap(lambda t: t[1])\n return examples\n\n\ndef _features_to_serialized_tf_example(features):\n \"\"\"Convert a string dict to a serialized TF example.\n\n The dictionary maps feature names (strings) to feature values (strings).\n \"\"\"\n example = tf.train.Example()\n for feature_name, feature_value in features.items():\n example.features.feature[feature_name].bytes_list.value.append(\n feature_value.encode(\"utf-8\"))\n return example.SerializeToString()\n\n\nclass _TrainTestSplitFn(beam.DoFn):\n \"\"\"Splits an input PCollection of examples into train and test.\n\n This uses the product id to compute the split, so that examples from the\n same product are in the same set. The split is deterministic based on\n prodict id, so that multiple runs produce the same result.\"\"\"\n\n TRAIN_TAG = \"train\"\n TEST_TAG = \"test\"\n\n def __init__(self, train_split=0.9, num_buckets=4096):\n super(_TrainTestSplitFn, self).__init__()\n self._train_split = train_split\n self._num_buckets = num_buckets\n\n def process(self, example):\n split_value = self._split_value(example['product_id'])\n split = (\n self.TRAIN_TAG if split_value < self._train_split else\n self.TEST_TAG)\n yield pvalue.TaggedOutput(split, example)\n\n def _split_value(self, product_id):\n \"\"\"Compute a value from 0 to 1 used to compute the split.\"\"\"\n md5 = hashlib.md5()\n md5.update(product_id)\n md5_digest = int(md5.hexdigest(), 16)\n return (\n (1 + md5_digest % self._num_buckets)\n / float(self._num_buckets)\n )\n\n\ndef run(argv=None):\n \"\"\"Run the beam pipeline.\"\"\"\n args, pipeline_args = _parse_args(argv)\n\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n p = beam.Pipeline(options=pipeline_options)\n\n lines = p | \"read qa files\" >> ReadFromText(args.file_pattern)\n\n # The lines are not JSON, but the string representation of python\n # dictionary objects. Parse them with ast.literal_eval.\n json_objects = lines | \"parsing dictionaries\" >> beam.Map(ast.literal_eval)\n qa_tuples = json_objects | \"create tuples\" >> beam.FlatMap(\n partial(\n _create_tuples,\n min_words=args.min_words, max_words=args.max_words)\n )\n\n # Remove duplicate examples.\n qa_tuples |= \"key by QA\" >> beam.Map(lambda v: (v[1:], v))\n qa_tuples |= \"group duplicates\" >> beam.GroupByKey()\n qa_tuples |= \"remove duplicates\" >> beam.Map(lambda v: sorted(v[1])[0])\n\n # Create the examples.\n examples = qa_tuples | \"create examples\" >> beam.Map(\n lambda args: _create_example(*args)\n )\n examples = _shuffle_examples(examples)\n\n examples |= \"split train and test\" >> beam.ParDo(\n _TrainTestSplitFn(args.train_split)\n ).with_outputs(_TrainTestSplitFn.TEST_TAG, _TrainTestSplitFn.TRAIN_TAG)\n\n if args.dataset_format == _JSON_FORMAT:\n write_sink = WriteToText\n file_name_suffix = \".json\"\n serialize_fn = json.dumps\n else:\n assert args.dataset_format == _TF_FORMAT\n write_sink = WriteToTFRecord\n file_name_suffix = \".tfrecord\"\n serialize_fn = _features_to_serialized_tf_example\n\n for name, tag in [(\"train\", _TrainTestSplitFn.TRAIN_TAG),\n (\"test\", _TrainTestSplitFn.TEST_TAG)]:\n\n serialized_examples = examples[tag] | (\n \"serialize {} examples\".format(name) >> beam.Map(serialize_fn))\n (\n serialized_examples | (\"write \" + name)\n >> write_sink(\n os.path.join(args.output_dir, name),\n file_name_suffix=file_name_suffix,\n num_shards=args.num_shards_train,\n )\n )\n\n result = p.run()\n result.wait_until_finish()\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n run()\n"
]
| [
[
"tensorflow.train.Example"
]
]
|
Anatoly1010/Atomize_NIOCH | [
"9359f2ee31e71ee3b2e210ad5ccc2dc3ad3dfbe0"
]
| [
"atomize/tests/pulse_epr/01_resonator_tuning.py"
]
| [
"import sys\nimport signal\nimport numpy as np\nimport atomize.general_modules.general_functions as general\nimport atomize.device_modules.Keysight_3000_Xseries as t3034\nimport atomize.device_modules.Mikran_X_band_MW_bridge as mwBridge\nimport atomize.device_modules.PB_ESR_500_pro as pb_pro\n#import atomize.general_modules.csv_opener_saver_tk_kinter as openfile\nt3034 = t3034.Keysight_3000_Xseries()\npb = pb_pro.PB_ESR_500_Pro()\nmw = mwBridge.Mikran_X_band_MW_bridge()\n\nfreq_before = int(str( mw.mw_bridge_synthesizer() ).split(' ')[1])\n\ndef cleanup(*args):\n mw.mw_bridge_synthesizer( freq_before )\n pb.pulser_stop()\n sys.exit(0)\n\nsignal.signal(signal.SIGTERM, cleanup)\n\n### Experimental parameters\nSTART_FREQ = 9300\nEND_FREQ = 9600\nSTEP = 1\nSCANS = 1\nAVERAGES = 100\nprocess = 'None'\n\n# PULSES\nREP_RATE = '500 Hz'\nPULSE_1_LENGTH = '100 ns'\nPULSE_1_START = '0 ns'\n\n# NAMES\nEXP_NAME = 'Tune Scan'\n\n# setting pulses:\npb.pulser_pulse(name ='P0', channel = 'TRIGGER', start = '0 ns', length = '100 ns')\npb.pulser_pulse(name ='P1', channel = 'MW', start = PULSE_1_START, length = PULSE_1_LENGTH)\n\npb.pulser_repetition_rate( REP_RATE )\npb.pulser_update()\n\n#\nt3034.oscilloscope_record_length( 1000 )\nreal_length = t3034.oscilloscope_record_length( )\n\npoints = int( (END_FREQ - START_FREQ) / STEP ) + 1\ndata = np.zeros( (points, real_length) )\n###\n\n#open1d = openfile.Saver_Opener()\nt3034.oscilloscope_acquisition_type('Average')\nt3034.oscilloscope_trigger_channel('CH1')\n#t3034.oscilloscope_record_length( osc_rec_length )\n#tb = t3034.oscilloscope_time_resolution()\nt3034.oscilloscope_stop()\n\nt3034.oscilloscope_number_of_averages(AVERAGES)\n\n# initialize the power\nmw.mw_bridge_synthesizer( START_FREQ )\n#path_to_file = open1d.create_file_dialog(directory = '')\n\nfor j in general.scans(SCANS):\n i = 0\n freq = START_FREQ\n\n while freq <= END_FREQ:\n \n mw.mw_bridge_synthesizer( freq )\n\n t3034.oscilloscope_start_acquisition()\n y = t3034.oscilloscope_get_curve('CH2')\n\n data[i] = ( data[i] * (j - 1) + y ) / j\n\n process = general.plot_2d(EXP_NAME, np.transpose( data ), start_step = ( (0, 1), (START_FREQ*1000000, STEP*1000000) ), xname = 'Time',\\\n xscale = 's', yname = 'Frequency', yscale = 'Hz', zname = 'Intensity', zscale = 'V', pr = process, text = 'Scan / Frequency: ' + \\\n str(j) + ' / '+ str(freq) )\n\n #f = open(path_to_file,'a')\n #np.savetxt(f, y, fmt='%.10f', delimiter=' ', newline='\\n', header='frequency: %d' % i, footer='', comments='#', encoding=None)\n #f.close()\n\n freq = round( (STEP + freq), 3 ) \n i += 1\n\n mw.mw_bridge_synthesizer( START_FREQ )\n\nmw.mw_bridge_synthesizer( freq_before )\n\npb.pulser_stop()\n"
]
| [
[
"numpy.transpose",
"numpy.zeros"
]
]
|
ReDrawing/redrawing | [
"20743f0c8d64d9d2e15cefa840423c9698c74653"
]
| [
"src/redrawing/ai_models/third_models/oak_blazepose/BlazeposeDepthai.py"
]
| [
"import numpy as np\nfrom collections import namedtuple\nfrom . import mediapipe_utils as mpu\nimport cv2\nfrom pathlib import Path\nfrom .FPS import FPS, now\nimport argparse\nimport os\nimport depthai as dai\nfrom math import atan2\n\nimport open3d as o3d\nfrom .o3d_utils import create_segment, create_grid\nimport time\n\nSCRIPT_DIR = Path(__file__).resolve().parent\nPOSE_DETECTION_MODEL = SCRIPT_DIR / \"models/pose_detection.blob\"\nFULL_BODY_LANDMARK_MODEL = SCRIPT_DIR / \"models/pose_landmark_full_body.blob\"\nUPPER_BODY_LANDMARK_MODEL = SCRIPT_DIR / \"models/pose_landmark_upper_body.blob\"\n\n\n# LINES_*_BODY are used when drawing the skeleton onto the source image. \n# Each variable is a list of continuous lines.\n# Each line is a list of keypoints as defined at https://google.github.io/mediapipe/solutions/pose.html#pose-landmark-model-blazepose-ghum-3d\nLINES_FULL_BODY = [[28,30,32,28,26,24,12,11,23,25,27,29,31,27], \n [23,24],\n [22,16,18,20,16,14,12], \n [21,15,17,19,15,13,11],\n [8,6,5,4,0,1,2,3,7],\n [10,9],\n ]\nLINES_UPPER_BODY = [[12,11,23,24,12], \n [22,16,18,20,16,14,12], \n [21,15,17,19,15,13,11],\n [8,6,5,4,0,1,2,3,7],\n [10,9],\n ]\n# LINE_MESH_*_BODY are used when drawing the skeleton in 3D. \nrgb = {\"right\":(0,1,0), \"left\":(1,0,0), \"middle\":(1,1,0)}\nLINE_MESH_FULL_BODY = [ [9,10],[4,6],[1,3],\n [12,14],[14,16],[16,20],[20,18],[18,16],\n [12,11],[11,23],[23,24],[24,12],\n [11,13],[13,15],[15,19],[19,17],[17,15],\n [24,26],[26,28],[32,30],\n [23,25],[25,27],[29,31]]\nLINE_TEST = [ [12,11],[11,23],[23,24],[24,12]]\n\nCOLORS_FULL_BODY = [\"middle\",\"right\",\"left\",\n \"right\",\"right\",\"right\",\"right\",\"right\",\n \"middle\",\"middle\",\"middle\",\"middle\",\n \"left\",\"left\",\"left\",\"left\",\"left\",\n \"right\",\"right\",\"right\",\"left\",\"left\",\"left\"]\nCOLORS_FULL_BODY = [rgb[x] for x in COLORS_FULL_BODY]\nLINE_MESH_UPPER_BODY = [[9,10],[4,6],[1,3],\n [12,14],[14,16],[16,20],[20,18],[18,16],\n [12,11],[11,23],[23,24],[24,12],\n [11,13],[13,15],[15,19],[19,17],[17,15]\n ]\n\n# For gesture demo\nsemaphore_flag = {\n (3,4):'A', (2,4):'B', (1,4):'C', (0,4):'D',\n (4,7):'E', (4,6):'F', (4,5):'G', (2,3):'H',\n (0,3):'I', (0,6):'J', (3,0):'K', (3,7):'L',\n (3,6):'M', (3,5):'N', (2,1):'O', (2,0):'P',\n (2,7):'Q', (2,6):'R', (2,5):'S', (1,0):'T',\n (1,7):'U', (0,5):'V', (7,6):'W', (7,5):'X',\n (1,6):'Y', (5,6):'Z'\n}\n\n# def to_planar(arr: np.ndarray, shape: tuple) -> list:\ndef to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:\n resized = cv2.resize(arr, shape)\n return resized.transpose(2,0,1)\n\nclass BlazeposeDepthai:\n def __init__(self, input_src=None,\n pd_path=POSE_DETECTION_MODEL, \n pd_score_thresh=0.5, pd_nms_thresh=0.3,\n lm_path=FULL_BODY_LANDMARK_MODEL,\n lm_score_threshold=0.7,\n full_body=True,\n use_gesture=False,\n smoothing= True,\n filter_window_size=5,\n filter_velocity_scale=10,\n show_3d=False,\n crop=False,\n multi_detection=False,\n output=None,\n internal_fps=15):\n \n self.pd_path = pd_path\n self.pd_score_thresh = pd_score_thresh\n self.pd_nms_thresh = pd_nms_thresh\n self.lm_path = lm_path\n self.lm_score_threshold = lm_score_threshold\n self.full_body = full_body\n self.use_gesture = use_gesture\n self.smoothing = smoothing\n self.show_3d = show_3d\n self.crop = crop\n self.multi_detection = multi_detection\n if self.multi_detection:\n print(\"With multi-detection, smoothing filter is disabled.\")\n self.smoothing = False\n self.internal_fps = internal_fps\n \n if input_src == None:\n self.input_type = \"internal\" # OAK* internal color camera\n self.video_fps = internal_fps # Used when saving the output in a video file. Should be close to the real fps\n video_height = video_width = 1080 # Depends on cam.setResolution() in create_pipeline()\n elif input_src.endswith('.jpg') or input_src.endswith('.png') :\n self.input_type= \"image\"\n self.img = cv2.imread(input_src)\n self.video_fps = 25\n video_height, video_width = self.img.shape[:2]\n else:\n self.input_type = \"video\"\n if input_src.isdigit():\n input_type = \"webcam\"\n input_src = int(input_src)\n self.cap = cv2.VideoCapture(input_src)\n self.video_fps = int(self.cap.get(cv2.CAP_PROP_FPS))\n video_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n video_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(\"Video FPS:\", self.video_fps)\n\n self.nb_kps = 33 if self.full_body else 25\n\n if self.smoothing:\n self.filter = mpu.LandmarksSmoothingFilter(filter_window_size, filter_velocity_scale, (self.nb_kps, 3))\n \n # Create SSD anchors \n # https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt\n anchor_options = mpu.SSDAnchorOptions(num_layers=4, \n min_scale=0.1484375,\n max_scale=0.75,\n input_size_height=128,\n input_size_width=128,\n anchor_offset_x=0.5,\n anchor_offset_y=0.5,\n strides=[8, 16, 16, 16],\n aspect_ratios= [1.0],\n reduce_boxes_in_lowest_layer=False,\n interpolated_scale_aspect_ratio=1.0,\n fixed_anchor_size=True)\n self.anchors = mpu.generate_anchors(anchor_options)\n self.nb_anchors = self.anchors.shape[0]\n print(f\"{self.nb_anchors} anchors have been created\")\n\n # Rendering flags\n self.show_pd_box = False\n self.show_pd_kps = False\n self.show_rot_rect = False\n self.show_landmarks = True\n self.show_scores = False\n self.show_gesture = self.use_gesture\n self.show_fps = True\n\n if self.show_3d:\n self.vis3d = o3d.visualization.Visualizer()\n self.vis3d.create_window() \n opt = self.vis3d.get_render_option()\n opt.background_color = np.asarray([0, 0, 0])\n z = min(video_height, video_width)/3\n self.grid_floor = create_grid([0,video_height,-z],[video_width,video_height,-z],[video_width,video_height,z],[0,video_height,z],5,2, color=(1,1,1))\n self.grid_wall = create_grid([0,0,z],[video_width,0,z],[video_width,video_height,z],[0,video_height,z],5,2, color=(1,1,1))\n self.vis3d.add_geometry(self.grid_floor)\n self.vis3d.add_geometry(self.grid_wall)\n view_control = self.vis3d.get_view_control()\n view_control.set_up(np.array([0,-1,0]))\n view_control.set_front(np.array([0,0,-1]))\n\n if output is None:\n self.output = None\n else:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n self.output = cv2.VideoWriter(output,fourcc,self.video_fps,(video_width, video_height)) \n\n def create_pipeline(self):\n print(\"Creating pipeline...\")\n # Start defining a pipeline\n pipeline = dai.Pipeline()\n pipeline.setOpenVINOVersion(version = dai.OpenVINO.Version.VERSION_2021_2)\n self.pd_input_length = 128\n\n if self.input_type == \"internal\":\n # ColorCamera\n print(\"Creating Color Camera...\")\n cam = pipeline.createColorCamera()\n cam.setPreviewSize(self.pd_input_length, self.pd_input_length)\n cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n # Crop video to square shape (palm detection takes square image as input)\n self.video_size = min(cam.getVideoSize())\n cam.setVideoSize(self.video_size, self.video_size)\n # \n cam.setFps(self.internal_fps)\n cam.setInterleaved(False)\n cam.setBoardSocket(dai.CameraBoardSocket.RGB)\n cam_out = pipeline.createXLinkOut()\n cam_out.setStreamName(\"cam_out\")\n # Link video output to host for higher resolution\n cam.video.link(cam_out.input)\n\n # Define pose detection model\n print(\"Creating Pose Detection Neural Network...\")\n pd_nn = pipeline.createNeuralNetwork()\n pd_nn.setBlobPath(str(Path(self.pd_path).resolve().absolute()))\n # Increase threads for detection\n # pd_nn.setNumInferenceThreads(2)\n # Specify that network takes latest arriving frame in non-blocking manner\n # Pose detection input \n if self.input_type == \"internal\":\n pd_nn.input.setQueueSize(1)\n pd_nn.input.setBlocking(False)\n cam.preview.link(pd_nn.input)\n else:\n pd_in = pipeline.createXLinkIn()\n pd_in.setStreamName(\"pd_in\")\n pd_in.out.link(pd_nn.input)\n # Pose detection output\n pd_out = pipeline.createXLinkOut()\n pd_out.setStreamName(\"pd_out\")\n pd_nn.out.link(pd_out.input)\n \n\n # Define landmark model\n print(\"Creating Landmark Neural Network...\") \n lm_nn = pipeline.createNeuralNetwork()\n lm_nn.setBlobPath(str(Path(self.lm_path).resolve().absolute()))\n lm_nn.setNumInferenceThreads(1)\n # Landmark input\n self.lm_input_length = 256\n lm_in = pipeline.createXLinkIn()\n lm_in.setStreamName(\"lm_in\")\n lm_in.out.link(lm_nn.input)\n # Landmark output\n lm_out = pipeline.createXLinkOut()\n lm_out.setStreamName(\"lm_out\")\n lm_nn.out.link(lm_out.input)\n \n print(\"Pipeline created.\")\n return pipeline \n\n \n def pd_postprocess(self, inference):\n scores = np.array(inference.getLayerFp16(\"classificators\"), dtype=np.float16) # 896\n bboxes = np.array(inference.getLayerFp16(\"regressors\"), dtype=np.float16).reshape((self.nb_anchors,12)) # 896x12\n\n # Decode bboxes\n self.regions = mpu.decode_bboxes(self.pd_score_thresh, scores, bboxes, self.anchors, best_only=not self.multi_detection)\n # Non maximum suppression (not needed if best_only is True)\n if self.multi_detection: \n self.regions = mpu.non_max_suppression(self.regions, self.pd_nms_thresh)\n \n mpu.detections_to_rect(self.regions, kp_pair=[0,1] if self.full_body else [2,3])\n mpu.rect_transformation(self.regions, self.frame_size, self.frame_size)\n\n def pd_render(self, frame):\n for r in self.regions:\n if self.show_pd_box:\n box = (np.array(r.pd_box) * self.frame_size).astype(int)\n cv2.rectangle(frame, (box[0], box[1]), (box[0]+box[2], box[1]+box[3]), (0,255,0), 2)\n if self.show_pd_kps:\n # Key point 0 - mid hip center\n # Key point 1 - point that encodes size & rotation (for full body)\n # Key point 2 - mid shoulder center\n # Key point 3 - point that encodes size & rotation (for upper body)\n if self.full_body:\n # Only kp 0 and 1 used\n list_kps = [0, 1]\n else:\n # Only kp 2 and 3 used for upper body\n list_kps = [2, 3]\n for kp in list_kps:\n x = int(r.pd_kps[kp][0] * self.frame_size)\n y = int(r.pd_kps[kp][1] * self.frame_size)\n cv2.circle(frame, (x, y), 3, (0,0,255), -1)\n cv2.putText(frame, str(kp), (x, y+12), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,255,0), 2)\n if self.show_scores:\n cv2.putText(frame, f\"Pose score: {r.pd_score:.2f}\", \n (int(r.pd_box[0] * self.frame_size+10), int((r.pd_box[1]+r.pd_box[3])*self.frame_size+60)), \n cv2.FONT_HERSHEY_PLAIN, 2, (255,255,0), 2)\n\n \n def lm_postprocess(self, region, inference):\n region.lm_score = inference.getLayerFp16(\"output_poseflag\")[0]\n if region.lm_score > self.lm_score_threshold: \n self.nb_active_regions += 1\n\n lm_raw = np.array(inference.getLayerFp16(\"ld_3d\")).reshape(-1,5)\n # Each keypoint have 5 information:\n # - X,Y coordinates are local to the region of\n # interest and range from [0.0, 255.0].\n # - Z coordinate is measured in \"image pixels\" like\n # the X and Y coordinates and represents the\n # distance relative to the plane of the subject's\n # hips, which is the origin of the Z axis. Negative\n # values are between the hips and the camera;\n # positive values are behind the hips. Z coordinate\n # scale is similar with X, Y scales but has different\n # nature as obtained not via human annotation, by\n # fitting synthetic data (GHUM model) to the 2D\n # annotation.\n # - Visibility, after user-applied sigmoid denotes the\n # probability that a keypoint is located within the\n # frame and not occluded by another bigger body\n # part or another object.\n # - Presence, after user-applied sigmoid denotes the\n # probability that a keypoint is located within the\n # frame.\n\n # Normalize x,y,z. Scaling in z = scaling in x = 1/self.lm_input_length\n lm_raw[:,:3] /= self.lm_input_length\n # Apply sigmoid on visibility and presence (if used later)\n # lm_raw[:,3:5] = 1 / (1 + np.exp(-lm_raw[:,3:5]))\n \n # region.landmarks contains the landmarks normalized 3D coordinates in the relative oriented body bounding box\n region.landmarks = lm_raw[:,:3]\n # Calculate the landmark coordinate in square padded image (region.landmarks_padded)\n src = np.array([(0, 0), (1, 0), (1, 1)], dtype=np.float32)\n dst = np.array([ (x, y) for x,y in region.rect_points[1:]], dtype=np.float32) # region.rect_points[0] is left bottom point and points going clockwise!\n mat = cv2.getAffineTransform(src, dst)\n lm_xy = np.expand_dims(region.landmarks[:self.nb_kps,:2], axis=0)\n lm_xy = np.squeeze(cv2.transform(lm_xy, mat)) \n # A segment of length 1 in the coordinates system of body bounding box takes region.rect_w_a pixels in the\n # original image. Then we arbitrarily divide by 4 for a more realistic appearance.\n lm_z = region.landmarks[:self.nb_kps,2:3] * region.rect_w_a / 4\n lm_xyz = np.hstack((lm_xy, lm_z))\n if self.smoothing:\n lm_xyz = self.filter.apply(lm_xyz)\n region.landmarks_padded = lm_xyz.astype(np.int)\n # If we added padding to make the image square, we need to remove this padding from landmark coordinates\n # region.landmarks_abs contains absolute landmark coordinates in the original image (padding removed))\n region.landmarks_abs = region.landmarks_padded.copy()\n if self.pad_h > 0:\n region.landmarks_abs[:,1] -= self.pad_h\n if self.pad_w > 0:\n region.landmarks_abs[:,0] -= self.pad_w\n\n if self.use_gesture: self.recognize_gesture(region)\n\n\n def lm_render(self, frame, region):\n if region.lm_score > self.lm_score_threshold:\n if self.show_rot_rect:\n cv2.polylines(frame, [np.array(region.rect_points)], True, (0,255,255), 2, cv2.LINE_AA)\n if self.show_landmarks:\n \n list_connections = LINES_FULL_BODY if self.full_body else LINES_UPPER_BODY\n lines = [np.array([region.landmarks_padded[point,:2] for point in line]) for line in list_connections]\n cv2.polylines(frame, lines, False, (255, 180, 90), 2, cv2.LINE_AA)\n \n for i,x_y in enumerate(region.landmarks_padded[:,:2]):\n if i > 10:\n color = (0,255,0) if i%2==0 else (0,0,255)\n elif i == 0:\n color = (0,255,255)\n elif i in [4,5,6,8,10]:\n color = (0,255,0)\n else:\n color = (0,0,255)\n cv2.circle(frame, (x_y[0], x_y[1]), 4, color, -11)\n\n if self.show_3d:\n points = region.landmarks_abs\n lines = LINE_MESH_FULL_BODY if self.full_body else LINE_MESH_UPPER_BODY\n colors = COLORS_FULL_BODY\n for i,a_b in enumerate(lines):\n a, b = a_b\n line = create_segment(points[a], points[b], radius=5, color=colors[i])\n if line: self.vis3d.add_geometry(line, reset_bounding_box=False)\n \n \n\n if self.show_scores:\n cv2.putText(frame, f\"Landmark score: {region.lm_score:.2f}\", \n (int(region.pd_box[0] * self.frame_size+10), int((region.pd_box[1]+region.pd_box[3])*self.frame_size+90)), \n cv2.FONT_HERSHEY_PLAIN, 2, (255,255,0), 2)\n if self.use_gesture and self.show_gesture:\n cv2.putText(frame, region.gesture, (int(region.pd_box[0]*self.frame_size+10), int(region.pd_box[1]*self.frame_size-50)), \n cv2.FONT_HERSHEY_PLAIN, 5, (0,1190,255), 3)\n \n\n\n \n def recognize_gesture(self, r): \n\n def angle_with_y(v):\n # v: 2d vector (x,y)\n # Returns angle in degree ofv with y-axis of image plane\n if v[1] == 0:\n return 90\n angle = atan2(v[0], v[1])\n return np.degrees(angle)\n\n # For the demo, we want to recognize the flag semaphore alphabet\n # For this task, we just need to measure the angles of both arms with vertical\n right_arm_angle = angle_with_y(r.landmarks_abs[14,:2] - r.landmarks_abs[12,:2])\n left_arm_angle = angle_with_y(r.landmarks_abs[13,:2] - r.landmarks_abs[11,:2])\n right_pose = int((right_arm_angle +202.5) / 45) \n left_pose = int((left_arm_angle +202.5) / 45) \n r.gesture = semaphore_flag.get((right_pose, left_pose), None)\n \n def run(self):\n\n device = dai.Device(self.create_pipeline())\n device.startPipeline()\n\n # Define data queues \n if self.input_type == \"internal\":\n q_video = device.getOutputQueue(name=\"cam_out\", maxSize=1, blocking=False)\n q_pd_out = device.getOutputQueue(name=\"pd_out\", maxSize=1, blocking=False)\n q_lm_out = device.getOutputQueue(name=\"lm_out\", maxSize=2, blocking=False)\n q_lm_in = device.getInputQueue(name=\"lm_in\")\n else:\n q_pd_in = device.getInputQueue(name=\"pd_in\")\n q_pd_out = device.getOutputQueue(name=\"pd_out\", maxSize=4, blocking=True)\n q_lm_out = device.getOutputQueue(name=\"lm_out\", maxSize=4, blocking=True)\n q_lm_in = device.getInputQueue(name=\"lm_in\")\n\n self.fps = FPS(mean_nb_frames=20)\n\n seq_num = 0\n nb_pd_inferences = 0\n nb_lm_inferences = 0\n glob_pd_rtrip_time = 0\n glob_lm_rtrip_time = 0\n while True:\n self.fps.update()\n \n if self.input_type == \"internal\":\n in_video = q_video.get()\n video_frame = in_video.getCvFrame()\n self.frame_size = video_frame.shape[0] # The image is square cropped on the device\n self.pad_w = self.pad_h = 0\n else:\n if self.input_type == \"image\":\n vid_frame = self.img\n else:\n ok, vid_frame = self.cap.read()\n if not ok:\n break\n \n h, w = vid_frame.shape[:2]\n if self.crop:\n # Cropping the long side to get a square shape\n self.frame_size = min(h, w)\n dx = (w - self.frame_size) // 2\n dy = (h - self.frame_size) // 2\n video_frame = vid_frame[dy:dy+self.frame_size, dx:dx+self.frame_size]\n else:\n # Padding on the small side to get a square shape\n self.frame_size = max(h, w)\n self.pad_h = int((self.frame_size - h)/2)\n self.pad_w = int((self.frame_size - w)/2)\n video_frame = cv2.copyMakeBorder(vid_frame, self.pad_h, self.pad_h, self.pad_w, self.pad_w, cv2.BORDER_CONSTANT)\n\n frame_nn = dai.ImgFrame()\n frame_nn.setSequenceNum(seq_num)\n frame_nn.setWidth(self.pd_input_length)\n frame_nn.setHeight(self.pd_input_length)\n frame_nn.setData(to_planar(video_frame, (self.pd_input_length, self.pd_input_length)))\n pd_rtrip_time = now()\n q_pd_in.send(frame_nn)\n \n\n seq_num += 1\n\n annotated_frame = video_frame.copy()\n\n # Get pose detection\n inference = q_pd_out.get()\n if self.input_type != \"internal\": \n pd_rtrip_time = now() - pd_rtrip_time\n glob_pd_rtrip_time += pd_rtrip_time\n self.pd_postprocess(inference)\n self.pd_render(annotated_frame)\n nb_pd_inferences += 1\n\n # Landmarks\n self.nb_active_regions = 0\n if self.show_3d:\n self.vis3d.clear_geometries()\n self.vis3d.add_geometry(self.grid_floor, reset_bounding_box=False)\n self.vis3d.add_geometry(self.grid_wall, reset_bounding_box=False)\n for i,r in enumerate(self.regions):\n frame_nn = mpu.warp_rect_img(r.rect_points, video_frame, self.lm_input_length, self.lm_input_length)\n nn_data = dai.NNData() \n nn_data.setLayer(\"input_1\", to_planar(frame_nn, (self.lm_input_length, self.lm_input_length)))\n if i == 0: lm_rtrip_time = now() # We measure only for the first region\n q_lm_in.send(nn_data)\n \n # Get landmarks\n inference = q_lm_out.get()\n if i == 0: \n lm_rtrip_time = now() - lm_rtrip_time\n glob_lm_rtrip_time += lm_rtrip_time\n nb_lm_inferences += 1\n self.lm_postprocess(r, inference)\n self.lm_render(annotated_frame, r)\n if self.show_3d:\n self.vis3d.poll_events()\n self.vis3d.update_renderer()\n if self.smoothing and self.nb_active_regions == 0:\n self.filter.reset()\n\n if self.input_type != \"internal\" and not self.crop:\n annotated_frame = annotated_frame[self.pad_h:self.pad_h+h, self.pad_w:self.pad_w+w]\n\n if self.show_fps:\n self.fps.display(annotated_frame, orig=(50,50), size=1, color=(240,180,100))\n cv2.imshow(\"Blazepose\", annotated_frame)\n\n if self.output:\n self.output.write(annotated_frame)\n\n key = cv2.waitKey(1) \n if key == ord('q') or key == 27:\n break\n elif key == 32:\n # Pause on space bar\n cv2.waitKey(0)\n elif key == ord('1'):\n self.show_pd_box = not self.show_pd_box\n elif key == ord('2'):\n self.show_pd_kps = not self.show_pd_kps\n elif key == ord('3'):\n self.show_rot_rect = not self.show_rot_rect\n elif key == ord('4'):\n self.show_landmarks = not self.show_landmarks\n elif key == ord('5'):\n self.show_scores = not self.show_scores\n elif key == ord('6'):\n self.show_gesture = not self.show_gesture\n elif key == ord('f'):\n self.show_fps = not self.show_fps\n\n # Print some stats\n print(f\"# pose detection inferences : {nb_pd_inferences}\")\n print(f\"# landmark inferences : {nb_lm_inferences}\")\n if self.input_type != \"internal\" and nb_pd_inferences != 0: print(f\"Pose detection round trip : {glob_pd_rtrip_time/nb_pd_inferences*1000:.1f} ms\")\n if nb_lm_inferences != 0: print(f\"Landmark round trip : {glob_lm_rtrip_time/nb_lm_inferences*1000:.1f} ms\")\n\n if self.output:\n self.output.release()\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str, \n help=\"Path to video or image file to use as input (default: internal camera\")\n parser.add_argument('-g', '--gesture', action=\"store_true\", \n help=\"enable gesture recognition\")\n parser.add_argument(\"--pd_m\", type=str,\n help=\"Path to an .blob file for pose detection model\")\n parser.add_argument(\"--lm_m\", type=str,\n help=\"Path to an .blob file for landmark model\")\n parser.add_argument('-c', '--crop', action=\"store_true\", \n help=\"Center crop frames to a square shape before feeding pose detection model\")\n parser.add_argument('-u', '--upper_body', action=\"store_true\", \n help=\"Use an upper body model\")\n parser.add_argument('--no_smoothing', action=\"store_true\", \n help=\"Disable smoothing filter\")\n parser.add_argument('--filter_window_size', type=int, default=5,\n help=\"Smoothing filter window size. Higher value adds to lag and to stability (default=%(default)i)\") \n parser.add_argument('--filter_velocity_scale', type=float, default=10,\n help=\"Smoothing filter velocity scale. Lower value adds to lag and to stability (default=%(default)s)\") \n parser.add_argument('-3', '--show_3d', action=\"store_true\", \n help=\"Display skeleton in 3d in a separate window (valid only for full body landmark model)\")\n parser.add_argument(\"-o\",\"--output\",\n help=\"Path to output video file\")\n parser.add_argument('--multi_detection', action=\"store_true\", \n help=\"Force multiple person detection (at your own risk)\")\n parser.add_argument('--internal_fps', type=int, default=15,\n help=\"Fps of internal color camera. Too high value lower NN fps (default=%(default)i)\") \n\n\n \n\n args = parser.parse_args()\n\n if not args.pd_m:\n args.pd_m = POSE_DETECTION_MODEL\n if not args.lm_m:\n if args.upper_body:\n args.lm_m = UPPER_BODY_LANDMARK_MODEL\n else:\n args.lm_m = FULL_BODY_LANDMARK_MODEL\n ht = BlazeposeDepthai(input_src=args.input, \n pd_path=args.pd_m,\n lm_path=args.lm_m,\n full_body=not args.upper_body,\n smoothing=not args.no_smoothing,\n filter_window_size=args.filter_window_size,\n filter_velocity_scale=args.filter_velocity_scale,\n use_gesture=args.gesture,\n show_3d=args.show_3d,\n crop=args.crop,\n multi_detection=args.multi_detection,\n output=args.output,\n internal_fps=args.internal_fps)\n ht.run()\n"
]
| [
[
"numpy.array",
"numpy.asarray",
"numpy.degrees",
"numpy.hstack",
"numpy.expand_dims"
]
]
|
ManuelNavarroGarcia/cpsplines | [
"544e8ccf7e438a192dea6c4a4e685d9346f57f9a"
]
| [
"tests/test_b_matrix.py"
]
| [
"import numpy as np\r\nimport pytest\r\nfrom cpsplines.psplines.bspline_basis import BsplineBasis\r\nfrom cpsplines.utils.weighted_b import get_idx_fitting_region, get_weighted_B\r\n\r\nB1 = (1 / 8) * np.array(\r\n [\r\n [4, 4, 0, 0, 0, 0, 0],\r\n [1, 6, 1, 0, 0, 0, 0],\r\n [0, 4, 4, 0, 0, 0, 0],\r\n [0, 1, 6, 1, 0, 0, 0],\r\n [0, 0, 4, 4, 0, 0, 0],\r\n [0, 0, 1, 6, 1, 0, 0],\r\n [0, 0, 0, 4, 4, 0, 0],\r\n [0, 0, 0, 1, 6, 1, 0],\r\n [0, 0, 0, 0, 4, 4, 0],\r\n [0, 0, 0, 0, 1, 6, 1],\r\n [0, 0, 0, 0, 0, 4, 4],\r\n ]\r\n)\r\n\r\nB2 = (1 / 750) * np.array(\r\n [\r\n [125, 500, 125, 0, 0, 0, 0, 0, 0],\r\n [8, 311, 404, 27, 0, 0, 0, 0, 0],\r\n [0, 64, 473, 212, 1, 0, 0, 0, 0],\r\n [0, 1, 212, 473, 64, 0, 0, 0, 0],\r\n [0, 0, 27, 404, 311, 8, 0, 0, 0],\r\n [0, 0, 0, 125, 500, 125, 0, 0, 0],\r\n [0, 0, 0, 8, 311, 404, 27, 0, 0],\r\n [0, 0, 0, 0, 64, 473, 212, 1, 0],\r\n [0, 0, 0, 0, 1, 212, 473, 64, 0],\r\n [0, 0, 0, 0, 0, 27, 404, 311, 8],\r\n [0, 0, 0, 0, 0, 0, 125, 500, 125],\r\n ]\r\n)\r\n\r\nB3 = (1 / 240000) * np.array(\r\n [\r\n [10000, 110000, 110000, 10000, 0, 0, 0, 0, 0, 0, 0],\r\n [81, 28156, 137846, 71516, 2401, 0, 0, 0, 0, 0, 0],\r\n [0, 1296, 59056, 142256, 37136, 256, 0, 0, 0, 0, 0],\r\n [0, 0, 6561, 97516, 121286, 14636, 1, 0, 0, 0, 0],\r\n [0, 0, 16, 20656, 130736, 84496, 4096, 0, 0, 0, 0],\r\n [0, 0, 0, 625, 47500, 143750, 47500, 625, 0, 0, 0],\r\n [0, 0, 0, 0, 4096, 84496, 130736, 20656, 16, 0, 0],\r\n [0, 0, 0, 0, 1, 14636, 121286, 97516, 6561, 0, 0],\r\n [0, 0, 0, 0, 0, 256, 37136, 142256, 59056, 1296, 0],\r\n [0, 0, 0, 0, 0, 0, 2401, 71516, 137846, 28156, 81],\r\n [0, 0, 0, 0, 0, 0, 0, 10000, 110000, 110000, 10000],\r\n ]\r\n)\r\n\r\nBV1 = (1 / 50) * np.array(\r\n [\r\n [0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 25, 25, 0, 0, 0, 0, 0],\r\n [0, 4, 37, 9, 0, 0, 0, 0],\r\n [0, 0, 16, 33, 1, 0, 0, 0],\r\n [0, 0, 1, 33, 16, 0, 0, 0],\r\n [0, 0, 0, 9, 37, 4, 0, 0],\r\n [0, 0, 0, 0, 25, 25, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0],\r\n ]\r\n)\r\nBV2 = (1 / 162) * np.array(\r\n [\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 27, 108, 27, 0, 0, 0, 0],\r\n [0, 0, 1, 60, 93, 8, 0, 0, 0],\r\n [0, 0, 0, 8, 93, 60, 1, 0, 0],\r\n [0, 0, 0, 0, 27, 108, 27, 0, 0],\r\n [0, 0, 0, 0, 1, 60, 93, 8, 0],\r\n [0, 0, 0, 0, 0, 8, 93, 60, 1],\r\n [0, 0, 0, 0, 0, 0, 27, 108, 27],\r\n ]\r\n)\r\n\r\nBV3 = (1 / 98) * np.array(\r\n [\r\n [49, 49, 0, 0, 0, 0, 0, 0, 0],\r\n [4, 69, 25, 0, 0, 0, 0, 0, 0],\r\n [0, 16, 73, 9, 0, 0, 0, 0, 0],\r\n [0, 0, 36, 61, 1, 0, 0, 0, 0],\r\n [0, 0, 1, 61, 36, 0, 0, 0, 0],\r\n [0, 0, 0, 9, 73, 16, 0, 0, 0],\r\n [0, 0, 0, 0, 25, 69, 4, 0, 0],\r\n [0, 0, 0, 0, 0, 49, 49, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n ]\r\n)\r\n\r\n# Test if the matrix B is computed properly using different parameters. Since at\r\n# the end we are interested on the weighted matrices, the output is the list of\r\n# weighted matrices from `get_weighted_B`.\r\n\r\n\r\[email protected](\r\n \"x_sam, deg, n_int, prediction, B\",\r\n [\r\n ([np.linspace(0, 10, 11)], [2], [5], [{}], [B1]),\r\n ([np.linspace(0, 10, 11)], [3], [6], [{}], [B2]),\r\n ([np.linspace(0, 10, 11)], [4], [7], [{}], [B3]),\r\n ([np.linspace(0, 5, 6)], [2], [3], [{\"backwards\": -1, \"forward\": 8}], [BV1]),\r\n (\r\n [np.linspace(0, 6, 7), np.linspace(-2.5, 3.7, 8)],\r\n [3, 2],\r\n [4, 5],\r\n [{\"backwards\": -2.5}, {\"forward\": 5}],\r\n [BV2, BV3],\r\n ),\r\n ],\r\n)\r\ndef test_B_matrix(x_sam, deg, n_int, prediction, B):\r\n bspline = []\r\n for x, d, n, pred in zip(x_sam, deg, n_int, prediction):\r\n bsp = BsplineBasis(deg=d, xsample=x, n_int=n, prediction=pred)\r\n bsp.get_matrix_B()\r\n bspline.append(bsp)\r\n\r\n B_out = get_weighted_B(bspline_bases=bspline)\r\n\r\n for P, Q in zip(B_out, B):\r\n np.testing.assert_allclose(P, Q)\r\n\r\n\r\n# Test correct ranges of the fitting region given a regressor sample\r\[email protected](\r\n \"x_sam, deg, n_int, prediction, x_range\",\r\n [\r\n (\r\n [np.linspace(0, 5, 6)],\r\n [2],\r\n [3],\r\n [{\"backwards\": -1, \"forward\": 8}],\r\n (slice(1, 7, None),),\r\n ),\r\n (\r\n [np.linspace(0, 6, 7), np.linspace(-2.5, 3.7, 8)],\r\n [3, 2],\r\n [4, 5],\r\n [{\"backwards\": -2.5}, {\"forward\": 5}],\r\n (slice(2, 9, None), slice(0, 8, None)),\r\n ),\r\n (\r\n [np.linspace(0, 8, 71), np.linspace(-2, 4, 83), np.linspace(10, 11, 10)],\r\n [5, 4, 6],\r\n [7, 8, 3],\r\n [{\"forward\": 8.5}, {}, {\"backwards\": 9, \"forward\": 12.34}],\r\n (slice(0, 71, None), slice(0, 83, None), slice(3, 13, None)),\r\n ),\r\n ],\r\n)\r\ndef test_get_idx_fit(x_sam, deg, n_int, prediction, x_range):\r\n bspline = []\r\n for x, d, n, pred in zip(x_sam, deg, n_int, prediction):\r\n bsp = BsplineBasis(deg=d, xsample=x, n_int=n, prediction=pred)\r\n bsp.get_matrix_B()\r\n bspline.append(bsp)\r\n\r\n range_out = get_idx_fitting_region(bspline_bases=bspline)\r\n\r\n for slice_out, slice_in in zip(range_out, x_range):\r\n assert slice_in == slice_out\r\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.linspace"
]
]
|
jiyongze/yolov4-tiny-pytorch | [
"1eaf47512bc70982ad558625bb3741cce854169e"
]
| [
"utils/callbacks.py"
]
| [
"import datetime\r\nimport os\r\n\r\nimport torch\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport scipy.signal\r\nfrom matplotlib import pyplot as plt\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\n\r\nclass LossHistory():\r\n def __init__(self, log_dir, model, input_shape):\r\n time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S')\r\n self.log_dir = os.path.join(log_dir, \"loss_\" + str(time_str))\r\n self.losses = []\r\n self.val_loss = []\r\n \r\n os.makedirs(self.log_dir)\r\n self.writer = SummaryWriter(self.log_dir)\r\n try:\r\n dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])\r\n self.writer.add_graph(model, dummy_input)\r\n except:\r\n pass\r\n\r\n def append_loss(self, epoch, loss, val_loss):\r\n if not os.path.exists(self.log_dir):\r\n os.makedirs(self.log_dir)\r\n\r\n self.losses.append(loss)\r\n self.val_loss.append(val_loss)\r\n\r\n with open(os.path.join(self.log_dir, \"epoch_loss.txt\"), 'a') as f:\r\n f.write(str(loss))\r\n f.write(\"\\n\")\r\n with open(os.path.join(self.log_dir, \"epoch_val_loss.txt\"), 'a') as f:\r\n f.write(str(val_loss))\r\n f.write(\"\\n\")\r\n\r\n self.writer.add_scalar('loss', loss, epoch)\r\n self.writer.add_scalar('val_loss', val_loss, epoch)\r\n self.loss_plot()\r\n\r\n def loss_plot(self):\r\n iters = range(len(self.losses))\r\n\r\n plt.figure()\r\n plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')\r\n plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')\r\n try:\r\n if len(self.losses) < 25:\r\n num = 5\r\n else:\r\n num = 15\r\n \r\n plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')\r\n plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')\r\n except:\r\n pass\r\n\r\n plt.grid(True)\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.legend(loc=\"upper right\")\r\n\r\n plt.savefig(os.path.join(self.log_dir, \"epoch_loss.png\"))\r\n\r\n plt.cla()\r\n plt.close(\"all\")\r\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.figure",
"torch.randn",
"matplotlib.pyplot.ylabel",
"torch.utils.tensorboard.SummaryWriter"
]
]
|
archu2020/python-2 | [
"3975c678d985c468deecd03560d882e9d316bb63"
]
| [
"DeepLearning/YOLOv3/video_demo.py"
]
| [
"import cv2\nimport time\nimport numpy as np\nimport core.utils as utils\nimport tensorflow as tf\nfrom PIL import Image\n\n\nreturn_elements = [\"input/input_data:0\", \"pred_sbbox/concat_2:0\", \"pred_mbbox/concat_2:0\", \"pred_lbbox/concat_2:0\"]\npb_file = \"./yolov3_coco.pb\"\nvideo_path = \"./docs/images/road.mp4\"\n# video_path = 0\nnum_classes = 80\ninput_size = 416\ngraph = tf.Graph()\nreturn_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)\n\nwith tf.Session(graph=graph) as sess:\n vid = cv2.VideoCapture(video_path)\n while True:\n return_value, frame = vid.read()\n if return_value:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n else:\n raise ValueError(\"No image!\")\n frame_size = frame.shape[:2]\n image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size])\n image_data = image_data[np.newaxis, ...]\n prev_time = time.time()\n\n pred_sbbox, pred_mbbox, pred_lbbox = sess.run(\n [return_tensors[1], return_tensors[2], return_tensors[3]],\n feed_dict={ return_tensors[0]: image_data})\n\n pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),\n np.reshape(pred_mbbox, (-1, 5 + num_classes)),\n np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)\n\n bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, 0.3)\n bboxes = utils.nms(bboxes, 0.45, method='nms')\n image = utils.draw_bbox(frame, bboxes)\n\n curr_time = time.time()\n exec_time = curr_time - prev_time\n result = np.asarray(image)\n info = \"time: %.2f ms\" %(1000*exec_time)\n cv2.namedWindow(\"result\", cv2.WINDOW_AUTOSIZE)\n result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imshow(\"result\", result)\n if cv2.waitKey(1) & 0xFF == ord('q'): break\n\n\n\n\n"
]
| [
[
"numpy.reshape",
"numpy.asarray",
"tensorflow.Graph",
"tensorflow.Session",
"numpy.copy"
]
]
|
mayankanand007/cudf | [
"a4730f30ce46c336de7ed71c509dcb6c9c1f6d3d",
"a4730f30ce46c336de7ed71c509dcb6c9c1f6d3d"
]
| [
"python/cudf/cudf/core/cut.py",
"python/cudf/cudf/tests/test_gcs.py"
]
| [
"from collections.abc import Sequence\n\nimport cupy\nimport numpy as np\nimport pandas as pd\n\nimport cudf\nfrom cudf.api.types import is_list_like\nfrom cudf.core.column import as_column, build_categorical_column\nfrom cudf.core.index import IntervalIndex, interval_range\n\n\ndef cut(\n x,\n bins,\n right: bool = True,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n include_lowest: bool = False,\n duplicates: str = \"raise\",\n ordered: bool = True,\n):\n\n \"\"\"\n Bin values into discrete intervals.\n Use cut when you need to segment and sort data values into bins. This\n function is also useful for going from a continuous variable to a\n categorical variable.\n Parameters\n ----------\n x : array-like\n The input array to be binned. Must be 1-dimensional.\n bins : int, sequence of scalars, or IntervalIndex\n The criteria to bin by.\n * int : Defines the number of equal-width bins in the\n range of x. The range of x is extended by .1% on each\n side to include the minimum and maximum values of x.\n right : bool, default True\n Indicates whether bins includes the rightmost edge or not.\n labels : array or False, default None\n Specifies the labels for the returned bins. Must be the same\n length as the resulting bins. If False, returns only integer\n indicators of thebins. If True,raises an error. When ordered=False,\n labels must be provided.\n retbins : bool, default False\n Whether to return the bins or not.\n precision : int, default 3\n The precision at which to store and display the bins labels.\n include_lowest : bool, default False\n Whether the first interval should be left-inclusive or not.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n ordered : bool, default True\n Whether the labels are ordered or not. Applies to returned types\n Categorical and Series (with Categorical dtype). If True,\n the resulting categorical will be ordered. If False, the resulting\n categorical will be unordered (labels must be provided).\n Returns\n -------\n out : CategoricalIndex\n An array-like object representing the respective bin for each value\n of x. The type depends on the value of labels.\n bins : numpy.ndarray or IntervalIndex.\n The computed or specified bins. Only returned when retbins=True.\n For scalar or sequence bins, this is an ndarray with the computed\n bins. If set duplicates=drop, bins will drop non-unique bin. For\n an IntervalIndex bins, this is equal to bins.\n Examples\n --------\n Discretize into three equal-sized bins.\n >>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]), 3)\n CategoricalIndex([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0],\n ... (5.0, 7.0],(0.994, 3.0]], categories=[(0.994, 3.0],\n ... (3.0, 5.0], (5.0, 7.0]], ordered=True, dtype='category')\n >>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)\n (CategoricalIndex([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0],\n ... (5.0, 7.0],(0.994, 3.0]],categories=[(0.994, 3.0],\n ... (3.0, 5.0], (5.0, 7.0]],ordered=True, dtype='category'),\n array([0.994, 3. , 5. , 7. ]))\n >>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]),\n ... 3, labels=[\"bad\", \"medium\", \"good\"])\n CategoricalIndex(['bad', 'good', 'medium', 'medium', 'good', 'bad'],\n ... categories=['bad', 'medium', 'good'],ordered=True,\n ... dtype='category')\n >>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]), 3,\n ... labels=[\"B\", \"A\", \"B\"], ordered=False)\n CategoricalIndex(['B', 'B', 'A', 'A', 'B', 'B'], categories=['A', 'B'],\n ... ordered=False, dtype='category')\n >>> cudf.cut([0, 1, 1, 2], bins=4, labels=False)\n array([0, 1, 1, 3], dtype=int32)\n Passing a Series as an input returns a Series with categorical dtype:\n >>> s = cudf.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> cudf.cut(s, 3)\n \"\"\"\n left_inclusive = False\n right_inclusive = True\n # saving the original input x for use in case its a series\n orig_x = x\n old_bins = bins\n\n if not ordered and labels is None:\n raise ValueError(\"'labels' must be provided if 'ordered = False'\")\n\n if duplicates not in [\"raise\", \"drop\"]:\n raise ValueError(\n \"invalid value for 'duplicates' parameter, valid options are: \"\n \"raise, drop\"\n )\n\n if labels is not False:\n if not (labels is None or is_list_like(labels)):\n raise ValueError(\n \"Bin labels must either be False, None or passed in as a \"\n \"list-like argument\"\n )\n elif ordered and labels is not None:\n if len(set(labels)) != len(labels):\n raise ValueError(\n \"labels must be unique if ordered=True;\"\n \"pass ordered=False for duplicate labels\"\n )\n\n # bins can either be an int, sequence of scalars or an intervalIndex\n if isinstance(bins, Sequence):\n if len(set(bins)) is not len(bins):\n if duplicates == \"raise\":\n raise ValueError(\n f\"Bin edges must be unique: {repr(bins)}.\\n\"\n f\"You can drop duplicate edges by setting the 'duplicates'\"\n \"kwarg\"\n )\n elif duplicates == \"drop\":\n # get unique values but maintain list dtype\n bins = list(dict.fromkeys(bins))\n\n # if bins is an intervalIndex we ignore the value of right\n elif isinstance(bins, (pd.IntervalIndex, cudf.IntervalIndex)):\n right = bins.closed == \"right\"\n\n # create bins if given an int or single scalar\n if not isinstance(bins, pd.IntervalIndex):\n if not isinstance(bins, (Sequence)):\n if isinstance(\n x, (pd.Series, cudf.Series, np.ndarray, cupy.ndarray)\n ):\n mn = x.min()\n mx = x.max()\n else:\n mn = min(x)\n mx = max(x)\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n adj = (mx - mn) * 0.001\n if right:\n bins[0] -= adj\n else:\n bins[-1] += adj\n\n # if right and include lowest we adjust the first\n # bin edge to make sure it is included\n if right and include_lowest:\n bins[0] = bins[0] - 10 ** (-precision)\n\n # if right is false the last bin edge is not included\n if not right:\n right_edge = bins[-1]\n x = cupy.asarray(x)\n x[x == right_edge] = right_edge + 1\n\n # adjust bin edges decimal precision\n int_label_bins = np.around(bins, precision)\n\n # the inputs is a column of the values in the array x\n input_arr = as_column(x)\n\n # checking for the correct inclusivity values\n if right:\n closed = \"right\"\n else:\n closed = \"left\"\n left_inclusive = True\n\n if isinstance(bins, pd.IntervalIndex):\n interval_labels = bins\n elif labels is None:\n if duplicates == \"drop\" and len(bins) == 1 and len(old_bins) != 1:\n if right and include_lowest:\n old_bins[0] = old_bins[0] - 10 ** (-precision)\n interval_labels = interval_range(\n old_bins[0], old_bins[1], periods=1, closed=closed\n )\n else:\n interval_labels = IntervalIndex.from_breaks(\n old_bins, closed=closed\n )\n else:\n # get labels for categories\n interval_labels = IntervalIndex.from_breaks(\n int_label_bins, closed=closed\n )\n elif labels is not False:\n if not (is_list_like(labels)):\n raise ValueError(\n \"Bin labels must either be False, None or passed in as a \"\n \"list-like argument\"\n )\n if ordered and len(set(labels)) != len(labels):\n raise ValueError(\n \"labels must be unique if ordered=True; pass ordered=False for\"\n \"duplicate labels\"\n )\n else:\n if len(labels) != len(bins) - 1:\n raise ValueError(\n \"Bin labels must be one fewer than the number of bin edges\"\n )\n if not ordered and len(set(labels)) != len(labels):\n interval_labels = cudf.CategoricalIndex(\n labels, categories=None, ordered=False\n )\n else:\n interval_labels = (\n labels if len(set(labels)) == len(labels) else None\n )\n\n if isinstance(bins, pd.IntervalIndex):\n # get the left and right edges of the bins as columns\n # we cannot typecast an IntervalIndex, so we need to\n # make the edges the same type as the input array\n left_edges = as_column(bins.left).astype(input_arr.dtype)\n right_edges = as_column(bins.right).astype(input_arr.dtype)\n else:\n # get the left and right edges of the bins as columns\n left_edges = as_column(bins[:-1:], dtype=\"float64\")\n right_edges = as_column(bins[+1::], dtype=\"float64\")\n # the input arr must be changed to the same type as the edges\n input_arr = input_arr.astype(left_edges.dtype)\n # get the indexes for the appropriate number\n index_labels = cudf._lib.labeling.label_bins(\n input_arr, left_edges, left_inclusive, right_edges, right_inclusive\n )\n\n if labels is False:\n # if labels is false we return the index labels, we return them\n # as a series if we have a series input\n if isinstance(orig_x, (pd.Series, cudf.Series)):\n # need to run more tests but looks like in this case pandas\n # always returns a float64 dtype\n indx_arr_series = cudf.Series(index_labels, dtype=\"float64\")\n # if retbins we return the bins as well\n if retbins:\n return indx_arr_series, bins\n else:\n return indx_arr_series\n elif retbins:\n return index_labels.values, bins\n else:\n return index_labels.values\n\n if labels is not None:\n if labels is not ordered and len(set(labels)) != len(labels):\n # when we have duplicate labels and ordered is False, we\n # should allow duplicate categories. The categories are\n # returned in order\n new_data = [interval_labels[i][0] for i in index_labels.values]\n return cudf.CategoricalIndex(\n new_data, categories=sorted(set(labels)), ordered=False\n )\n\n col = build_categorical_column(\n categories=interval_labels,\n codes=index_labels,\n mask=index_labels.base_mask,\n offset=index_labels.offset,\n size=index_labels.size,\n ordered=ordered,\n )\n\n # we return a categorical index, as we don't have a Categorical method\n categorical_index = cudf.core.index.as_index(col)\n\n if isinstance(orig_x, (pd.Series, cudf.Series)):\n # if we have a series input we return a series output\n res_series = cudf.Series(categorical_index, index=orig_x.index)\n if retbins:\n return res_series, bins\n else:\n return res_series\n elif retbins:\n # if retbins is true we return the bins as well\n return categorical_index, bins\n else:\n return categorical_index\n",
"# Copyright (c) 2020, NVIDIA CORPORATION.\n\nimport io\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.orc\nimport pytest\n\nimport cudf\nfrom cudf.testing._utils import assert_eq\n\ngcsfs = pytest.importorskip(\"gcsfs\")\n\nTEST_PROJECT = \"cudf-gcs-test-project\"\nTEST_BUCKET = \"cudf-gcs-test-bucket\"\n\n\[email protected]\ndef pdf(scope=\"module\"):\n df = pd.DataFrame()\n df[\"Integer\"] = np.array([2345, 11987, 9027, 9027])\n df[\"Float\"] = np.array([9.001, 8.343, 6, 2.781])\n df[\"Integer2\"] = np.array([2345, 106, 2088, 789277])\n df[\"String\"] = np.array([\"Alpha\", \"Beta\", \"Gamma\", \"Delta\"])\n df[\"Boolean\"] = np.array([True, False, True, False])\n return df\n\n\ndef test_read_csv(pdf, monkeypatch):\n # Write to buffer\n fpath = TEST_BUCKET + \"test_csv_reader.csv\"\n buffer = pdf.to_csv(index=False)\n\n def mock_open(*args, **kwargs):\n return io.BytesIO(buffer.encode())\n\n def mock_size(*args):\n return len(buffer.encode())\n\n monkeypatch.setattr(gcsfs.core.GCSFileSystem, \"open\", mock_open)\n monkeypatch.setattr(gcsfs.core.GCSFileSystem, \"size\", mock_size)\n got = cudf.read_csv(\"gcs://{}\".format(fpath))\n\n assert_eq(pdf, got)\n\n\ndef test_write_orc(pdf, monkeypatch, tmpdir):\n gcs_fname = TEST_BUCKET + \"test_orc_writer.orc\"\n local_filepath = os.path.join(tmpdir, \"test_orc.orc\")\n gdf = cudf.from_pandas(pdf)\n\n def mock_open(*args, **kwargs):\n return open(local_filepath, \"wb\")\n\n monkeypatch.setattr(gcsfs.core.GCSFileSystem, \"open\", mock_open)\n gdf.to_orc(\"gcs://{}\".format(gcs_fname))\n\n got = pa.orc.ORCFile(local_filepath).read().to_pandas()\n assert_eq(pdf, got)\n"
]
| [
[
"numpy.around",
"numpy.linspace"
],
[
"pandas.DataFrame",
"numpy.array"
]
]
|
searobbersduck/FattyLiver_Solution | [
"7b8542e70cdb4417889799ea6da2c794e9eae392"
]
| [
"copy/train_diff_3d_cls2.py"
]
| [
"import os\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))\n\nimport numpy as np\nimport sys\nimport scipy.ndimage as nd\nimport json\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\n# from models.resnet import *\nfrom models.resnet_bn import *\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport time\nimport math\nfrom utils.utils import AverageMeter\nfrom datasets.FattyLiverDatasets import FattyLiverClsDatasetsDiff3D3\n\nimport torch.nn.functional as F\n\ndef initial_cls_weights(cls):\n for m in cls.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0]*m.kernel_size[1]*m.out_channels\n m.weight.data.normal_(0, math.sqrt(2./n))\n if m.bias is not None:\n m.bias.data.zero_()\n if isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n if isinstance(m, nn.Conv3d):\n n = m.kernel_size[0]*m.kernel_size[1]*m.kernel_size[2]*m.out_channels\n m.weight.data.normal_(0, math.sqrt(2./n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n\ndef train(train_dataloader, model, criterion, optimizer, epoch, display):\n model.train()\n tot_pred = np.array([], dtype=int)\n tot_label = np.array([], dtype=int)\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n accuracy = AverageMeter()\n end = time.time()\n logger = []\n for num_iter, (images, labels,_) in enumerate(train_dataloader):\n labels[labels<2] = 0\n labels[labels>=2] = 1\n data_time.update(time.time()-end)\n output = model(Variable(images.cuda()))\n loss = criterion(output, Variable(labels.cuda()))\n _, pred = torch.max(output, 1)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n batch_time.update(time.time()-end)\n end = time.time()\n pred = pred.cpu().data.numpy()\n labels = labels.numpy()\n tot_pred = np.append(tot_pred, pred)\n tot_label = np.append(tot_label, labels)\n losses.update(loss.data.cpu().numpy(), len(images))\n accuracy.update(np.equal(pred, labels).sum()/len(labels), len(labels))\n if (num_iter+1) % display == 0:\n correct = np.equal(tot_pred, tot_label).sum()/len(tot_pred)\n print_info = 'Epoch: [{0}][{1}/{2}]\\tTime {batch_time.val:3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.avg:.3f}\\t''Loss {loss.avg:.4f}\\tAccuray {accuracy.avg:.4f}'.format(\n epoch, num_iter, len(train_dataloader),batch_time=batch_time, data_time=data_time,\n loss=losses, accuracy=accuracy\n )\n print(print_info)\n logger.append(print_info)\n print(tot_pred)\n print(tot_label)\n return accuracy.avg, logger\n\n\ndef val(train_dataloader, model, criterion, epoch, display):\n model.eval()\n tot_pred = np.array([], dtype=int)\n tot_label = np.array([], dtype=int)\n tot_prob = np.array([], dtype=np.float32)\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n accuracy = AverageMeter()\n end = time.time()\n logger = []\n for num_iter, (images, labels, _) in enumerate(train_dataloader):\n labels[labels<2] = 0\n labels[labels>=2] = 1\n data_time.update(time.time()-end)\n output = model(Variable(images.cuda()))\n loss = criterion(output, Variable(labels.cuda()))\n _, pred = torch.max(output, 1)\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n batch_time.update(time.time()-end)\n end = time.time()\n pred = pred.cpu().data.numpy()\n labels = labels.numpy()\n tot_pred = np.append(tot_pred, pred)\n tot_label = np.append(tot_label, labels)\n tot_prob = np.append(tot_prob, F.softmax(output).cpu().detach().numpy()[:,1])\n losses.update(loss.data.cpu().numpy(), len(images))\n accuracy.update(np.equal(pred, labels).sum()/len(labels), len(labels))\n if (num_iter+1) % display == 0:\n correct = np.equal(tot_pred, tot_label).sum()/len(tot_pred)\n print_info = 'Epoch: [{0}][{1}/{2}]\\tTime {batch_time.val:3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.avg:.3f}\\t''Loss {loss.avg:.4f}\\tAccuray {accuracy.avg:.4f}'.format(\n epoch, num_iter, len(train_dataloader),batch_time=batch_time, data_time=data_time,\n loss=losses, accuracy=accuracy\n )\n print(print_info)\n logger.append(print_info)\n print(tot_pred)\n print(tot_label)\n return accuracy.avg, logger, tot_pred, tot_label, tot_prob\n\n\ndef test(train_dataloader, model, criterion, epoch, display):\n return val(train_dataloader, model, criterion, epoch, display)\n\n\ndef main():\n\n batch_size = 2\n num_workers = 4\n phase = 'train'\n epochs = 10000\n display = 2\n task_name = 'diff'\n\n config_file = '../config/config_diff_3d.json'\n config = None\n with open(config_file) as f:\n config = json.load(f)\n print('\\n')\n print('====> parse options:')\n print(config)\n print('\\n')\n\n data_root = '../data/experiment_0/0.ori'\n config_train = '../data/config/config_train.txt'\n config_val = '../data/config/config_val.txt'\n crop_size = [32, 384, 512]\n\n\n print('====> create output model path:\\t')\n config[\"model_dir\"] = '../data/experiment_Oct_cls2'\n os.makedirs(config[\"model_dir\"], exist_ok=True)\n # time_stamp = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n model_dir = os.path.join(config[\"model_dir\"], 'fattyliver_task_{}_best.pth'.format(task_name))\n os.makedirs(model_dir, exist_ok=True)\n\n\n print('====> building model:\\t')\n model = resnet34(num_classes=2, \n shortcut_type=True, sample_size_y=crop_size[1], sample_size_x=crop_size[2], sample_duration=crop_size[0])\n initial_cls_weights(model)\n pretrained_weights = config['weight']\n if pretrained_weights is not None:\n model.load_state_dict(torch.load(pretrained_weights))\n\n criterion = nn.CrossEntropyLoss().cuda()\n\n if phase == 'train':\n train_ds = FattyLiverClsDatasetsDiff3D3(data_root, config_train, task_name, crop_size)\n val_ds = FattyLiverClsDatasetsDiff3D3(data_root, config_val, task_name, crop_size)\n train_dataloader = DataLoader(train_ds, batch_size=batch_size, \n shuffle=True, num_workers=num_workers, \n pin_memory=True)\n val_dataloader = DataLoader(val_ds, batch_size=batch_size, shuffle=False, \n num_workers=num_workers, pin_memory=False)\n\n best_acc = 0.5\n\n for epoch in range(epochs):\n if epoch < config['fix']:\n lr = config['lr']\n else:\n lr = config['lr'] * (0.1 ** (epoch//config['step']))\n mom = config['mom']\n wd = config['wd']\n optimizer = None\n if config['optimizer'] == 'sgd':\n optimizer = optim.SGD([{'params': model.parameters()}], \n lr=lr, momentum=mom, weight_decay=wd, nesterov=True)\n elif config['optimizer'] == 'adam':\n optimizer = torch.optim.Adam([{'params': model.parameters()}], lr=lr, betas=(0.9, 0.999))\n\n _, _ = train(train_dataloader, nn.DataParallel(model).cuda(), criterion, optimizer, epoch, display)\n acc, logger,tot_pred, tot_label, tot_pred = val(val_dataloader, nn.DataParallel(model).cuda(), criterion, epoch, display)\n print('val acc:\\t{:.3f}'.format(acc))\n #判断预测的标签非全1 or 非全0 \n if (np.all(tot_pred == 1) or np.all(tot_pred == 0)):\n continue\n # if (np.round(acc,3) == 0.647):\n # continue\n if (np.round(acc,3) == 0.588):\n continue\n\n if acc > best_acc:\n print('\\ncurrent best accuracy is: {}\\n'.format(acc))\n best_acc = acc\n saved_model_name = os.path.join(model_dir, 'fattyliver_task_{}_best.pth'.format(task_name))\n torch.save(model.cpu().state_dict(), saved_model_name)\n print('====> save model:\\t{}'.format(saved_model_name))\n\n\n\n\n\nif __name__ == '__main__':\n main()"
]
| [
[
"numpy.equal",
"numpy.array",
"torch.nn.functional.softmax",
"torch.max",
"numpy.round",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.append",
"numpy.all",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
]
|
limn2o4/analytics-zoo | [
"78d6ce10976a7e1320ff5ebdf431db93a439ec56"
]
| [
"pyzoo/test/zoo/chronos/data/utils/test_impute.py"
]
| [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nimport pandas as pd\nimport numpy as np\n\nfrom test.zoo.pipeline.utils.test_utils import ZooTestCase\nfrom zoo.chronos.data.utils.impute import impute_timeseries_dataframe, \\\n _last_impute_timeseries_dataframe, _const_impute_timeseries_dataframe, \\\n _linear_impute_timeseries_dataframe\n\n\ndef get_ugly_ts_df():\n data = np.random.random_sample((50, 5))\n mask = np.random.random_sample((50, 5))\n mask[mask >= 0.4] = 2\n mask[mask < 0.4] = 1\n mask[mask < 0.2] = 0\n data[mask == 0] = None\n data[mask == 1] = np.nan\n df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])\n df['a'][0] = np.nan # make sure column 'a' has a N/A\n df[\"datetime\"] = pd.date_range('1/1/2019', periods=50)\n return df\n\n\nclass TestImputeTimeSeries(ZooTestCase):\n def setup_method(self, method):\n self.df = get_ugly_ts_df()\n\n def teardown_method(self, method):\n pass\n\n def test_impute_timeseries_dataframe(self):\n with pytest.raises(AssertionError):\n impute_timeseries_dataframe(self.df, dt_col=\"z\")\n with pytest.raises(AssertionError):\n impute_timeseries_dataframe(\n self.df, dt_col=\"datetime\", mode=\"dummy\")\n with pytest.raises(AssertionError):\n impute_timeseries_dataframe(self.df, dt_col=\"a\")\n last_res_df = impute_timeseries_dataframe(\n self.df, dt_col=\"datetime\", mode=\"last\")\n assert self.df.isna().sum().sum() != 0\n assert last_res_df.isna().sum().sum() == 0\n const_res_df = impute_timeseries_dataframe(\n self.df, dt_col=\"datetime\", mode=\"const\")\n assert self.df.isna().sum().sum() != 0\n assert const_res_df.isna().sum().sum() == 0\n linear_res_df = impute_timeseries_dataframe(\n self.df, dt_col=\"datetime\", mode=\"linear\")\n assert self.df.isna().sum().sum() != 0\n assert linear_res_df.isna().sum().sum() == 0\n\n def test_last_impute_timeseries_dataframe(self):\n data = {'data': [np.nan, np.nan, 1, np.nan, 2, 3]}\n df = pd.DataFrame(data)\n res_df = _last_impute_timeseries_dataframe(df)\n assert res_df['data'][0] == 0\n assert res_df['data'][1] == 0\n assert res_df['data'][3] == 1\n\n def test_const_impute_timeseries_dataframe(self):\n data = {'data': [np.nan, 1, np.nan, 2, 3]}\n df = pd.DataFrame(data)\n res_df = _const_impute_timeseries_dataframe(df, 1)\n assert res_df['data'][0] == 1\n assert res_df['data'][2] == 1\n\n def test_linear_timeseries_dataframe(self):\n data = {'data': [np.nan, 1, np.nan, 2, 3]}\n df = pd.DataFrame(data)\n res_df = _linear_impute_timeseries_dataframe(df)\n assert res_df['data'][0] == 1\n assert res_df['data'][2] == 1.5\n"
]
| [
[
"pandas.DataFrame",
"pandas.date_range",
"numpy.random.random_sample"
]
]
|
ali-manhani/fun-simulations | [
"47f96279cc14e410c1216ada4a144d62225979ec"
]
| [
"non-linear_dynamics_and_chaos/cobweb_plot/cobweb.py"
]
| [
"#!/usr/bin/env python\n\n\"\"\" Code to draw the cobweb diagram of a recursive fucntion \"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef f(x, r):\n \"\"\" the function in question \"\"\"\n return x * r * (1 - x)\n\n\ndef iterate(func, r, iter_num):\n \"\"\" function to generate the cobweb coordinates \"\"\"\n x = np.random.uniform(0, 1)\n xs = [x]\n ys = [x]\n\n for i in range(iter_num):\n xs.append(x)\n y = func(x, r)\n ys.append(y)\n x = y\n xs.append(x)\n ys.append(x)\n\n return xs, ys\n\n\n\ndef main():\n \"\"\" main body \"\"\"\n x_axis = np.linspace(-1, 1, 1000)\n r = -1\n iteration = 100\n\n xs, ys = iterate(f, r, iteration)\n\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n\n ax.plot(x_axis, f(x_axis, r), c='g')\n ax.plot(x_axis, x_axis, c='g')\n ax.plot(xs, ys, c='r')\n ax.set_xlabel(r\"$x_{n}$\")\n ax.set_ylabel(r\"$x_{n+1}$\")\n plt.title(f'cobweb plot for r={r}')\n plt.savefig(f'cobweb_r{r}.jpg', dpi=200, bbox_inches='tight')\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"numpy.linspace"
]
]
|
ddatta-DAC/NOAA_Weather_Fetcher | [
"84baf6facf5f3ba3128df012b66420f0156c3f76"
]
| [
"src/week_util.py"
]
| [
"import pandas as pd\nfrom datetime import datetime\nfrom datetime import timedelta\nimport os\nimport sys\n\nsys.path.append('./../..')\nsys.path.append('./..')\nimport common_utils\n\nformat = \"%Y-%m-%d\"\nfile_location = './../aux_data'\nweek_info_file = 'year_week_data.csv'\nseason_year_week_file = 'season_year_week_data.csv'\n\ndf = None\ndate_week_year = {}\ns_df = None\ns_yw_index = None\n\n\n# ---------------------- #\ndef init():\n global df, s_df\n global week_info_file\n global season_year_week_file\n\n old_location = os.getcwd()\n script_dir = os.path.dirname(__file__)\n os.chdir(script_dir)\n week_info_file_path = file_location + '/' + week_info_file\n df = pd.read_csv(week_info_file_path)\n setup_date_week_year_list()\n\n season_year_week_info_path = file_location + '/' + season_year_week_file\n s_df = pd.read_csv(season_year_week_info_path, index_col=0)\n s_df.reset_index()\n setup_s_year_week_mapping()\n os.chdir(old_location)\n\n return\n\n\ndef setup_date_week_year_list():\n global df,date_week_year\n\n for i, row in df.iterrows():\n week = row['week']\n year = row['year']\n cur = datetime.strptime(row['start'], format).date()\n\n for i in range(7):\n date_week_year[str(cur)] = [year, week]\n cur = cur + timedelta(days=1)\n return\n\n\ndef setup_s_year_week_mapping():\n global s_df\n s_df = s_df.set_index(['year', 'week'])\n return\n\n\n# ----- Initialize ------ #\ninit()\n# ---------------------- #\n\n\ndef num_weeks_in_year(year):\n return common_utils.num_weeks_in_year(year)\n\n\n# Return a list of date objects\n# Corresponding to all dates of a year - week\ndef get_year_week_dates(year, week, return_string=False):\n global df, format\n\n idx = df.loc[(df['year'] == year) & (df['week'] == week)].index[0]\n start = datetime.strptime(df.loc[idx, 'start'], format).date()\n dt_list = []\n\n for i in range(7):\n dt = start + timedelta(days=i)\n if return_string:\n dt_list.append(str(dt))\n else:\n dt_list.append(dt)\n\n return dt_list\n\n\n# Input :\n# int year\n# int week\n# Return : datetime obj or string\ndef get_year_week_start(year, week, return_string=False):\n global df, format\n idx = df.loc[(df['year'] == year) & (df['week'] == week)].index[0]\n start = datetime.strptime(df.loc[idx, 'start'], format).date()\n if return_string:\n return str(start)\n else:\n return start\n\n\n# Input :\n# int year\n# int week\n# Return : datetime obj or string\ndef get_year_week_start_end(year, week, return_string=False):\n global df, format\n idx = df.loc[(df['year'] == year) & (df['week'] == week)].index[0]\n start = datetime.strptime(df.loc[idx, 'start'], format).date()\n end = datetime.strptime(df.loc[idx, 'end'], format).date()\n\n if return_string:\n return str(start), str(end)\n else:\n return start, end\n\n\n# Input : date obj string\n\ndef get_year_week_by_date(inp_dt):\n global date_week_year\n yr_wk = date_week_year[inp_dt]\n return yr_wk[0], yr_wk[1]\n\n\n# --------------------------------------------- #\n# Returns season year and season week.\n# Input :\n# int year\n# int week\n# ----------------------------------------------#\ndef get_s_year_week(year, week):\n res = s_df.loc[(year, week), :]\n s_week = res['s_week']\n s_year = res['s_year']\n return s_year, s_week\n"
]
| [
[
"pandas.read_csv"
]
]
|
caotians1/OD-test-master | [
"e272421294a3614bdcdb3a4e4b530f613dad1a1c"
]
| [
"setup/categories/deep_ensemble_setup.py"
]
| [
"from __future__ import print_function\nimport os\nfrom termcolor import colored\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nimport models as Models\nimport global_vars as Global\nfrom utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig\nfrom utils.logger import Logger\nfrom datasets import MirroredDataset\n\ndef get_classifier_config(args, model, dataset, home_path, mid=0):\n print(\"Preparing training D1 for %s\"%(dataset.name))\n\n # 80%, 20% for local train+test\n train_ds, valid_ds = dataset.split_dataset(0.8)\n\n if dataset.name in Global.mirror_augment:\n print(colored(\"Mirror augmenting %s\"%dataset.name, 'green'))\n new_train_ds = train_ds + MirroredDataset(train_ds)\n train_ds = new_train_ds\n\n # Initialize the multi-threaded loaders.\n train_loader = DataLoader(train_ds, batch_size=args.batch_size/2, shuffle=True, num_workers=args.workers, pin_memory=True)\n valid_loader = DataLoader(valid_ds, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)\n all_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)\n\n import methods.deep_ensemble as DE\n # Set up the model\n model = DE.DeepEnsembleWrapper(model).to(args.device)\n\n # Set up the criterion\n criterion = DE.DeepEnsembleLoss(ensemble_network=model).to(args.device)\n\n # Set up the config\n config = IterativeTrainerConfig()\n\n base_model_name = model.__class__.__name__\n if hasattr(model, 'preferred_name'):\n base_model_name = model.preferred_name()\n\n config.name = 'DeepEnsemble_%s_%s(%d)'%(dataset.name, base_model_name, mid)\n\n config.train_loader = train_loader\n config.valid_loader = valid_loader\n config.phases = {\n 'train': {'dataset' : train_loader, 'backward': True},\n 'test': {'dataset' : valid_loader, 'backward': False},\n 'all': {'dataset' : all_loader, 'backward': False}, \n }\n config.criterion = criterion\n config.classification = True\n config.stochastic_gradient = True\n config.visualize = not args.no_visualize\n config.model = model\n config.logger = Logger(home_path)\n\n config.optim = optim.Adam(model.parameters(), lr=1e-3)\n config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)\n config.max_epoch = 120\n \n if hasattr(model.model, 'train_config'):\n model_train_config = model.model.train_config()\n for key, value in model_train_config.items():\n print('Overriding config.%s'%key)\n config.__setattr__(key, value)\n\n return config\n\ndef train_classifier(args, model, dataset):\n config = None\n\n for mid in range(5):\n home_path = Models.get_ref_model_path(args, model.__class__.__name__, dataset.name, model_setup=True, suffix_str='DE.%d'%mid)\n hbest_path = os.path.join(home_path, 'model.best.pth')\n\n if not os.path.isdir(home_path):\n os.makedirs(home_path)\n else:\n if os.path.isfile(hbest_path + \".done\"):\n print(\"Skipping %s\"%(colored(home_path, 'yellow')))\n continue\n\n config = get_classifier_config(args, model.__class__(), dataset, home_path, mid=mid)\n\n trainer = IterativeTrainer(config, args)\n\n if not os.path.isfile(hbest_path + \".done\"):\n print(colored('Training from scratch', 'green'))\n best_accuracy = -1\n for epoch in range(1, config.max_epoch+1):\n\n # Track the learning rates.\n lrs = [float(param_group['lr']) for param_group in config.optim.param_groups]\n config.logger.log('LRs', lrs, epoch)\n config.logger.get_measure('LRs').legend = ['LR%d'%i for i in range(len(lrs))]\n \n # One epoch of train and test.\n trainer.run_epoch(epoch, phase='train')\n trainer.run_epoch(epoch, phase='test')\n\n train_loss = config.logger.get_measure('train_loss').mean_epoch()\n config.scheduler.step(train_loss)\n\n if config.visualize:\n # Show the average losses for all the phases in one figure.\n config.logger.visualize_average_keys('.*_loss', 'Average Loss', trainer.visdom)\n config.logger.visualize_average_keys('.*_accuracy', 'Average Accuracy', trainer.visdom)\n config.logger.visualize_average('LRs', trainer.visdom)\n\n test_average_acc = config.logger.get_measure('test_accuracy').mean_epoch()\n\n # Save the logger for future reference.\n torch.save(config.logger.measures, os.path.join(home_path, 'logger.pth'))\n\n # Saving a checkpoint. Enable if needed!\n # if args.save and epoch % 10 == 0:\n # print('Saving a %s at iter %s'%(colored('snapshot', 'yellow'), colored('%d'%epoch, 'yellow')))\n # torch.save(config.model.state_dict(), os.path.join(home_path, 'model.%d.pth'%epoch))\n\n if args.save and best_accuracy < test_average_acc:\n print('Updating the on file model with %s'%(colored('%.4f'%test_average_acc, 'red')))\n best_accuracy = test_average_acc\n torch.save(config.model.state_dict(), hbest_path)\n \n torch.save({'finished':True}, hbest_path+\".done\")\n if config.visualize:\n trainer.visdom.save([trainer.visdom.env])\n else:\n print(\"Skipping %s\"%(colored(home_path, 'yellow')))\n\n print(\"Loading the best model.\")\n config.model.load_state_dict(torch.load(hbest_path))\n config.model.eval()\n\n trainer.run_epoch(0, phase='all')\n test_average_acc = config.logger.get_measure('all_accuracy').mean_epoch(epoch=0)\n print(\"All average accuracy %s\"%colored('%.4f%%'%(test_average_acc*100), 'red'))\n"
]
| [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.save",
"torch.utils.data.DataLoader",
"torch.load"
]
]
|
tao-pr/52-challenges | [
"21c1723b8bb64c0a52afcca8f429b97e9948e86a"
]
| [
"011-tensor/tensor/run.py"
]
| [
"import tensorflow as tf\nimport numpy as np\nimport joblib\nimport logging\nimport json\n\nimport argparse\nimport sys\nimport os\nimport cv2\n\nfrom .model import build\nfrom .data import DataSet\n\n# Log to file and print to stdout simulteneously\nlogging.basicConfig(filename='tensor.log',level=logging.DEBUG)\nlogging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s')\nlogging.getLogger().addHandler(logging.StreamHandler())\n\n\ndef commandline():\n \"\"\"\n Create an instance of argument parser\n \"\"\"\n parser = argparse.ArgumentParser(description='Model runner')\n parser.add_argument('--datapath', dest='path', default='data',\n help='Path to read data from')\n parser.add_argument(\"--outputpath\", dest='out', default='out',\n help='Output path to store visual predictions')\n parser.add_argument('--ratio', dest='ratio', default=0.9, type=float,\n help='Ratio of training, ranging between 0-1')\n parser.add_argument(\"--batch\", dest=\"batch\", default=64, type=int,\n help=\"Size of each batch\")\n parser.add_argument(\"--epoch\", dest=\"epoch\", default=3, type=int,\n help=\"Number of epochs to run\")\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n cmdline = commandline()\n\n logging.info(\"Tensor run started\")\n logging.info(\"... Data path : {}\".format(cmdline.path))\n logging.info(\"... Ratio of training : {:.2f}\".format(cmdline.ratio))\n\n if not os.path.exists(cmdline.path) or os.path.isfile(cmdline.path):\n raise FileNotFoundError(\"Unable to find data path : {}\".format(cmdline.path))\n\n # Load and split the dataset\n ds = DataSet(cmdline.path)\n train,test = ds.load_split(cmdline.ratio)\n train_x, train_y, _ = zip(*train)\n test_x, test_y, test_filenames = zip(*test)\n\n # Reshape inputs (x,y), and make sure they are floating (tensor-compatible)\n h,w = test_x[0].shape[0], test_x[0].shape[1]\n train_x = np.array(train_x)\n train_x = train_x.reshape(len(train_y), w, w, 1)\n train_y = np.array(train_y).astype(float)\n\n test_x = np.array(test_x)\n test_x = test_x.reshape(len(test_y), w, w, 1)\n test_y = np.array(test_y).astype(float)\n \n # Train & validate\n logging.info(\"Fitting the model\")\n logging.debug(\"... Input shape : {}\".format(train_x.shape))\n w = train_x[0].shape[0]\n m = build(w)\n hist = m.fit(\n train_x, train_y, \n batch_size=cmdline.batch, \n epochs=cmdline.epoch,\n validation_data=(test_x, test_y))\n logging.debug(\"Fitting DONE\")\n\n # Save fitting history as json\n with open(\"tensor-history.json\", \"w\") as f:\n logging.info(\"Saving history of fitting as json\")\n safe = lambda v: [i.item() for i in v]\n hs = {k:safe(v) for k,v in hist.history.items()}\n json.dump(hs, f, indent=2)\n\n # Save model (only weights)\n logging.info(\"Saving model to model.checkpoint\")\n m.save_weights(\"model.checkpoint\")\n logging.debug(\"... Model SAVED\")\n\n # Render the predictions\n if not os.path.exists(cmdline.out) and not os.path.isfile(cmdline.out):\n os.mkdir(cmdline.out)\n\n logging.info(\"Evaluating model\")\n loss = m.evaluate(test_x, test_y, batch_size=cmdline.batch)\n logging.info(\"... loss = {}\".format(loss))\n\n logging.info(\"Rendering visual predictions\")\n logging.info(\"... Test size : {}\".format(len(test_x)))\n out = m.predict(test_x)\n for xy,filename in zip(out, test_filenames):\n x,y = xy\n fullpath = os.path.join(cmdline.out, filename)\n originalpath = os.path.join(cmdline.path, filename)\n logging.debug(\"... Saving output to {}\".format(fullpath))\n \n im = cv2.imread(originalpath)\n if y>=0 and y<h:\n cv2.line(im, (0,y), (w,y), (245,0,0), 1)\n if x>=0 and x<w:\n cv2.line(im, (x,0), (x,h), (245,0,0), 1)\n cv2.imwrite(fullpath, im)\n\n\n\n"
]
| [
[
"numpy.array"
]
]
|
w-klijn/TVB-NEST | [
"f3a4e18c9c6aab9d39d7ab45097d2fa98418cc1e"
]
| [
"nest_elephant_tvb/translation/science_tvb_to_nest.py"
]
| [
"# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université\n# \"Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. \"\n\nimport numpy as np\nfrom nest_elephant_tvb.translation.rate_spike import rates_to_spikes\nfrom quantities import ms,Hz\nimport logging\n\n# Can be changed to the function we had with elephant, this is just a toy function\ndef toy_rates_to_spikes(rates,t_start,t_stop):\n '''\n transform rate in spike with random value for testing\n :param rates: rates from tvb\n :param t_start: time of starting simulation\n :param t_stop: time of ending simulation\n :return: times of spikes\n '''\n times = t_start + np.random.rand(rates.shape[-1]) * (t_stop-t_start)\n times = np.around(np.sort(np.array(times)), decimals=1)\n return times\n\nclass generate_data:\n def __init__(self,path,nb_spike_generator,param):\n \"\"\"\n generate spike train for each neurons\n :param path : path for the logger files\n :param nb_spike_generator: number of spike generator/neurons in each regions\n \"\"\"\n self.percentage_shared = param['percentage_shared'] # percentage of shared rate between neurons\n self.nb_spike_generator = nb_spike_generator # number of spike generator\n self.nb_synapse = param['nb_synapses'] # number of synapses by neurons\n self.function_translation = param['function_select'] # choose the function for the translation\n\n np.random.seed(param['seed'])\n\n # configure the logger\n level_log = param['level_log']\n self.logger = logging.getLogger('generator')\n fh = logging.FileHandler(path+'/tvb_to_nest_science.log')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n if level_log == 0:\n fh.setLevel(logging.DEBUG)\n self.logger.setLevel(logging.DEBUG)\n elif level_log == 1:\n fh.setLevel(logging.INFO)\n self.logger.setLevel(logging.INFO)\n elif level_log == 2:\n fh.setLevel(logging.WARNING)\n self.logger.setLevel(logging.WARNING)\n elif level_log == 3:\n fh.setLevel(logging.ERROR)\n self.logger.setLevel(logging.ERROR)\n elif level_log == 4:\n fh.setLevel(logging.CRITICAL)\n self.logger.setLevel(logging.CRITICAL)\n\n def generate_spike(self,count,time_step,rate):\n \"\"\"\n generate spike\n This function are based on the paper : Kuhn, Alexandre, Ad Aertsen, and Stefan Rotter. “Higher-Order Statistics of Input Ensembles and the Response of Simple Model Neurons.” Neural Computation 15, no. 1 (January 2003): 67–101. https://doi.org/10.1162/089976603321043702.\n DOI: 10.1162/089976603321043702\n function 1 : Single Interaction Process Model\n function 2 : Multiple Interaction Process Model\n :param count: the number of step of synchronization between simulators\n :param time_step: the time of synchronization\n :param rate: the input rate of the mean field\n :return:\n \"\"\"\n if self.function_translation == 1:\n # Single Interaction Process Model\n # Compute the rate to spike trains\n rate *= self.nb_synapse # rate of poisson generator ( due property of poisson process)\n rate += 1e-12 # avoid rate equals to zeros\n spike_shared = \\\n rates_to_spikes(rate * self.percentage_shared * Hz,\n time_step[0] * ms, time_step[1] * ms, variation=True)[0]\n spike_generate = rates_to_spikes(np.repeat([rate],self.nb_spike_generator,axis=0) * (1 - self.percentage_shared) * Hz, time_step[0] * ms, time_step[1] * ms,\n variation=True)\n for i in range(self.nb_spike_generator):\n spike_generate[i] = np.around(np.sort(np.concatenate((spike_generate, spike_shared))), decimals=1)\n self.logger.info('rate :'+str(rate)+' spikes :'+str(np.concatenate(spike_generate).shape))\n return spike_generate\n elif self.function_translation == 2:\n # Multiple Interaction Process Model\n rate *= self.nb_synapse / self.percentage_shared # rate of poisson generator ( due property of poisson process)\n rate += 1e-12 # avoid rate equals to zeros\n spike_shared = np.round(rates_to_spikes(rate * Hz, time_step[0] * ms, time_step[1] * ms, variation=True)[0],1)\n select = np.random.binomial(n=1,p=self.percentage_shared,size=(self.nb_spike_generator,spike_shared.shape[0]))\n result = []\n for i in np.repeat([spike_shared],self.nb_spike_generator,axis=0)*select :\n result.append(i[np.where(i!=0)])\n self.logger.info('rate :'+str(rate)+' spikes :'+str(spike_shared))\n return result\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.random.binomial",
"numpy.random.rand",
"numpy.random.seed",
"numpy.where",
"numpy.repeat"
]
]
|
colour-science/prysm | [
"6673eaada6766f4debd910c445741accfc25aac2"
]
| [
"prysm/coordinates.py"
]
| [
"''' Coordinate conversions\n'''\nimport numpy as np\nfrom scipy import interpolate\n\nfrom prysm.mathops import pi, sqrt, atan2, cos, sin, exp\n\n\ndef cart_to_polar(x, y):\n ''' Returns the (rho,phi) coordinates of the (x,y) input points.\n\n Args:\n x (float): x coordinate.\n\n y (float): y coordinate.\n\n Returns:\n `tuple` containing:\n\n `float` or `numpy.ndarray`: radial coordinate.\n\n `float` or `numpy.ndarray`: azimuthal coordinate.\n\n '''\n rho = sqrt(x ** 2 + y ** 2)\n phi = atan2(y, x)\n return rho, phi\n\n\ndef polar_to_cart(rho, phi):\n ''' Returns the (x,y) coordinates of the (rho,phi) input points.\n\n Args:\n rho (`float` or `numpy.ndarray`): radial coordinate.\n\n phi (`float` or `numpy.ndarray`): azimuthal cordinate.\n\n Returns:\n `tuple` containing:\n\n `float` or `numpy.ndarray`: x coordinate.\n\n `float` or `numpy.ndarray`: y coordinate.\n\n '''\n x = rho * cos(phi)\n y = rho * sin(phi)\n return x, y\n\n\ndef uniform_cart_to_polar(x, y, data):\n ''' Interpolates data uniformly sampled in cartesian coordinates to polar\n coordinates.\n\n Args:\n x (`numpy.ndarray`): sorted 1D array of x sample pts.\n\n y (`numpy.ndarray`): sorted 1D array of y sample pts.\n\n data (`numpy.ndarray`): data sampled over the (x,y) coordinates.\n\n Returns:\n `tuple` containing:\n `numpy.ndarray`: rho samples for interpolated values.\n\n `numpy.ndarray`: phi samples for interpolated values.\n\n `numpy.ndarray`: data uniformly sampled in (rho,phi).\n\n Notes:\n Assumes data is sampled along x = [-1,1] and y = [-1,1] over a square grid.\n\n '''\n # create a set of polar coordinates to interpolate onto\n xmax = x[-1]\n num_pts = len(x)\n rho = np.linspace(0, xmax, num_pts / 2)\n phi = np.linspace(0, 2 * pi, num_pts)\n rv, pv = np.meshgrid(rho, phi)\n\n # map points to x, y and make a grid for the original samples\n xv, yv = polar_to_cart(rv, pv)\n\n # interpolate the function onto the new points\n f = interpolate.RegularGridInterpolator((x, y), data)\n return rho, phi, f((xv, yv), method='linear')\n\n\ndef resample_2d(array, sample_pts, query_pts):\n ''' Resamples 2D array to be sampled along queried points.\n\n Args:\n array (numpy.ndarray): 2D array.\n\n sample_pts (tuple): pair of numpy.ndarray objects that contain the x and y sample locations,\n each array should be 1D.\n\n query_pts (tuple): points to interpolate onto, also 1D for each array.\n\n Returns:\n numpy.ndarray. array resampled onto query_pts via bivariate spline.\n\n '''\n xq, yq = np.meshgrid(*query_pts)\n interpf = interpolate.RectBivariateSpline(*sample_pts, array)\n return interpf.ev(yq, xq)\n\n\ndef resample_2d_complex(array, sample_pts, query_pts):\n ''' Resamples a 2D complex array by interpolating the magnitude and phase\n independently and merging the results into a complex value.\n\n Args:\n array (numpy.ndarray): complex 2D array.\n\n sample_pts (tuple): pair of numpy.ndarray objects that contain the x and y sample locations,\n each array should be 1D.\n\n query_pts (tuple): points to interpolate onto, also 1D for each array.\n\n Returns:\n numpy.ndarray array resampled onto query_pts via bivariate spline\n\n '''\n xq, yq = np.meshgrid(*query_pts)\n mag = abs(array)\n phase = np.angle(array)\n\n magfunc = interpolate.RegularGridInterpolator(sample_pts, mag)\n phasefunc = interpolate.RegularGridInterpolator(sample_pts, phase)\n\n interp_mag = magfunc((yq, xq))\n interp_phase = phasefunc((yq, xq))\n\n return interp_mag * exp(1j * interp_phase)\n"
]
| [
[
"numpy.angle",
"scipy.interpolate.RectBivariateSpline",
"numpy.linspace",
"scipy.interpolate.RegularGridInterpolator",
"numpy.meshgrid"
]
]
|
nusnlp/neuralreord-aaai2017 | [
"82e04d1b5beadda9c8c0b8a684fe0aa8230852e3"
]
| [
"tools/DependencyReordering/nnAdapt/models/logistic_sgd_beta_zeros.py"
]
| [
"#!/usr/bin/python\n\"\"\"\nThis code is adapted from Deep Learning tutorial introducing logistic regression\nusing Theano and stochastic gradient descent.\n\n\"\"\"\n__docformat__ = 'restructedtext en'\n\nimport cPickle\nimport gzip\nimport os\nimport sys\nimport timeit\n\nimport numpy\n\nimport theano\nimport theano.tensor as T\n\n\nclass LogisticRegression(object):\n \"\"\"Binary Logistic Regression Class\n\n The logistic regression is fully described by a weight matrix :math:`W`\n and bias vector :math:`b`. Classification is done by projecting data\n points onto a set of hyperplanes, the distance to which is used to\n determine a class membership probability.\n \"\"\"\n\n def __init__(self, rng, input, n_in, W=None, b=None):\n \"\"\" Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n \"\"\"\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n if W is None:\n W = theano.shared(\n value=numpy.zeros(\n (n_in,),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n self.W = W\n # initialize the biases b as a vector of n_out 0s\n if b is None:\n b = theano.shared(\n value=0., name='b',\n borrow=True\n )\n self.b = b\n\n # L1 norm ; one regularization option is to enforce L1 norm to\n # be small\n self.L1 = abs(self.W).sum()\n\n # square of L2 norm ; one regularization option is to enforce\n # square of L2 norm to be small\n self.L2_sqr = (self.W ** 2).sum()\n\n # symbolic expression for computing the probability of positive case (p_1)\n # Where:\n # h_1 is the linear component for the logit\n # W is a vector of separation hyperplane for positive case\n # x is a matrix where row-j represents input training sample-j\n # b is the free parameter of the positive case\n h_1 = T.dot(input, self.W) + self.b\n self.p_1 = 1 / (1 + T.exp(-h_1))\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = (self.p_1 > 0.5)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n # keep track of model input\n self.input = input\n\n def cross_entropy(self, y, class_weights):\n \"\"\"Return the mean of the cross-entropy\n assuming L2 regularization\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vecvtor that gives for each example the correct label\n\n \"\"\"\n if class_weights is None:\n class_weights = (1., 1.)\n return -T.mean(T.cast(class_weights[1], theano.config.floatX) * T.cast(y, theano.config.floatX) * T.log(self.p_1) +\n T.cast(class_weights[0], theano.config.floatX) * (1-T.cast(y, theano.config.floatX)) * T.log(1-self.p_1))\n #return -T.mean(T.cast(y, theano.config.floatX) * T.log(self.p_1) +\n # (1-T.cast(y, theano.config.floatX)) * T.log(1-self.p_1))\n \n def errors(self, y):\n \"\"\"Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n \"\"\"\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError()\n \n def true_positives(self, y):\n \"\"\"Return an integer representing the number of true positives in the minibatch\n \"\"\"\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.eq operator returns a vector of 0s and 1s, where 1\n # represents a correct prediction\n return T.sum(T.eq(self.y_pred, y) * y)\n else:\n raise NotImplementedError()\n\n def true_negatives(self, y):\n \"\"\"Return an integer representing the number of true positives in the minibatch\n \"\"\"\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.eq operator returns a vector of 0s and 1s, where 1\n # represents a correct prediction\n return T.sum(T.eq(self.y_pred, y) * (1-y))\n else:\n raise NotImplementedError()\n\n def false_positives(self, y):\n \"\"\"Return an integer representing the number of true positives in the minibatch\n \"\"\"\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.eq operator returns a vector of 0s and 1s, where 1\n # represents an error\n return T.sum(T.neq(self.y_pred, y) * self.y_pred)\n else:\n raise NotImplementedError()\n\n def false_negatives(self, y):\n \"\"\"Return an integer representing the number of true positives in the minibatch\n \"\"\"\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correct datatype\n if y.dtype.startswith('int'):\n # the T.eq operator returns a vector of 0s and 1s, where 1\n # represents an error\n return T.sum(T.neq(self.y_pred, y) * y)\n else:\n raise NotImplementedError()\n"
]
| [
[
"numpy.zeros"
]
]
|
giordafrancis/DSfS | [
"e854db2da376e1c3efe7740073b55f8692cb0863"
]
| [
"old_dsfs/1_probability.py"
]
| [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport enum, random\n\n\n# Given a family of two kids , What is the probability of the event both children are girls, conditional on the event \"at least one of the children is a girls(L)\n\n# +\n# more on class Enum in the book\nclass KID(enum.Enum):\n BOY = 0\n GIRL = 1\n\ndef random_kid():\n return random.choice((KID.GIRL, KID.BOY))\n\n\n# +\nrandom.seed(0)\n\nboth_girls = 0\nolder_girl = 0\neither_girl = 0\n\nfor _ in range(10000):\n younger = random_kid()\n older = random_kid()\n if older == KID.GIRL:\n older_girl += 1\n if younger == KID.GIRL and older == KID.GIRL:\n both_girls += 1\n if younger == KID.GIRL or older == KID.GIRL:\n either_girl += 1 \n# -\n\nprint(\"P(both|older):\", both_girls/ older_girl)\nprint(\"P(both|either):\", both_girls/ either_girl)\n\n\ndef uniform_pdf(x: float) -> float:\n return 1 if 0<= x < 1 else 0\n\n\ndef uniform_cdf(x: float) -> float:\n \"\"\"Returns the probability that a uniform random variable is <=x\"\"\"\n if x < 0: return 0 # uniform random is never less than 0\n elif x < 1 : return x # e.g. P(X <= 0.4)\n else: return 1 # uniform random is always less than 1\n\n\n# +\nimport math\n\nSQRT_TWO_PI = math.sqrt(2 * math.pi)\ndef normal_pdf(x: float, mu: float = 0, sigma: float = 1) -> float:\n return (math.exp(-(x-mu) ** 2 / 2 / sigma ** 2) / (SQRT_TWO_PI * sigma))\n\n\n# +\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\nxs = [x / 10.0 for x in range(-50, 50)]\nplt.plot(xs, [normal_pdf(x, sigma=1) for x in xs], '-', label='mu=0,sigma=1')\nplt.plot(xs, [normal_pdf(x, sigma=2) for x in xs], '--', label='mu=0,sigma=2')\nplt.plot(xs, [normal_pdf(x, sigma=0.5) for x in xs], ':', label='mu=0,sigma=0.5')\nplt.plot(xs, [normal_pdf(x,mu=-1, sigma=1) for x in xs], '-.', label='mu=-1,sigma=1')\nplt.legend();\nplt.title(\"Various Normal pdfs\");\n\n# -\n# The normal cdf is the probaility the variable is below a threshold\n\ndef normal_cdf(x: float, mu: float = 0, sigma: float = 1) -> float:\n return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2\n\n\nplt.plot(xs, [normal_cdf(x) for x in xs], '-', label=\"mu=0,sigma=1\")\nplt.plot(xs, [normal_cdf(x, sigma=2) for x in xs], '--', label=\"mu=0,sigma=2\")\nplt.plot(xs, [normal_cdf(x, sigma=0.5) for x in xs], ':', label=\"mu=0,sigma=0.5\")\nplt.plot(xs, [normal_cdf(x, mu=-1) for x in xs], '-.', label=\"mu=0,sigma=1\")\nplt.legend(loc=4) # bottom right\nplt.title(\"Various Normal cdfs\");\n\n\n# Sometimes we’ll need to invert normal_cdf to find the value corresponding to aspecified probability. \n# There’s no simple way to compute its inverse, but normal_cdf is continuous and strictly increasing, so we can use a binary search:\n#\n# Binary search compares the target value to the middle element of the array. If they are not equal, the half in which the target cannot lie is eliminated and the search continues on the remaining half, again taking the middle element to compare to the target value, and repeating this until the target value is found.\n\ndef inverse_normal_cdf(p: float, mu: float = 0, sigma: float = 1, tolerance: float = 0.00001) -> float:\n \"\"\"Find approximate inverse using binary search\"\"\"\n # if not standard, compute standard and rescale \n if mu != 0 or sigma != 1:\n return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)\n low_z = -10.0 # normal_cdf(-10) is very close to 0\n hi_z = 10.0 # normal_cdf(10) is very close to 1\n while hi_z - low_z > tolerance:\n mid_z = (low_z + hi_z) / 2 # Consider the midpoint\n mid_p = normal_cdf(mid_z) # and the cdf's valu there\n \n if mid_p < p:\n low_z = mid_z # Midpoint too low, search above it\n else:\n hi_z = mid_z # Midpoint too high, search below it\n return mid_z\n\n\nassert -0.0001 < inverse_normal_cdf(.5) < 0.0001\n\n\n# Central Limit theorem\n\n# One reason the normal distribution is so useful is the central limit theorem, whichsays (in essence) that a random variable defined as the average of a large number ofindependent and identically distributed random variables is itself approximately nor‐mally distributed.\n\n# +\ndef bernoulli_trial(p: float) -> int:\n \"\"\"Returns 1 with probability p and 0 with probability 1-p\"\"\"\n return 1 if random.random() < p else 0\n\ndef binomial(n: int, p: float) -> int:\n \"\"\"Returns the sum of n bernoulli(p) trials\"\"\"\n return sum(bernoulli_trial(p) for _ in range(n))\n\n\n\n# -\n\n# The mean of a Bernoulli(p) variable is p, and its standard deviation is sqrt(p(1 - p)). Thecentral limit theorem says that as n gets large, a Binomial(n,p) variable is approxi‐mately a normal random variable with mean μ= n* p and standard deviation σ= sqrt(n * p(1 − p)).`\n\nbinomial(10, 0.1)\n\nfrom collections import Counter\n\n\ndef binomial_histogram(p: float, n: int, num_points: int) -> None:\n \"\"\"Picks points from a Binomial(n, p) and plot their histogram\"\"\"\n \n data = [binomial(n, p) for _ in range(num_points)]\n \n # use bar chart to show the actual binomial samples\n histogram = Counter(data)\n plt.bar([x - 0.4 for x in histogram.keys()], \n [v / num_points for v in histogram.values()],\n 0.8,\n color='0.75')\n \n mu = p * n\n sigma = math.sqrt(n * p * (1 - p))\n \n # use a line chart to show the normal approximation\n xs = range(min(data), max(data) + 1)\n ys = [normal_cdf(i + 0.5, mu, sigma) - normal_cdf(i - 0.5, mu, sigma)\n for i in xs]\n plt.plot(xs, ys)\n plt.title(\"Binomial distribution vs. Normal Approximation\") \n\n\nbinomial_histogram(0.75, 100, 10000)\n\n\n\n\n\n\n"
]
| [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot"
]
]
|
Django-Jiang/BigDL | [
"542c0e18f93eaafa571613fd5951278ddaf5446d"
]
| [
"python/chronos/test/bigdl/chronos/autots/test_autotsestimator.py"
]
| [
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom unittest import TestCase\nimport pytest\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, DataLoader\nimport numpy as np\nfrom bigdl.chronos.autots import AutoTSEstimator, TSPipeline\nfrom bigdl.chronos.data import TSDataset\nfrom bigdl.orca.automl import hp\nimport pandas as pd\nimport tensorflow as tf\nimport onnxruntime\n\n_onnxrt_ver = onnxruntime.__version__ != '1.6.0' # Jenkins requires 1.6.0(chronos)\nskip_onnxrt = pytest.mark.skipif(_onnxrt_ver, reason=\"Only runs when onnxrt is 1.6.0\")\n\n\ndef get_ts_df():\n sample_num = np.random.randint(100, 200)\n train_df = pd.DataFrame({\"datetime\": pd.date_range('1/1/2019', periods=sample_num),\n \"value 1\": np.random.randn(sample_num),\n \"value 2\": np.random.randn(sample_num),\n \"id\": np.array(['00'] * sample_num),\n \"extra feature 1\": np.random.randn(sample_num),\n \"extra feature 2\": np.random.randn(sample_num)})\n return train_df\n\n\ndef get_tsdataset():\n df = get_ts_df()\n return TSDataset.from_pandas(df,\n dt_col=\"datetime\",\n target_col=[\"value 1\", \"value 2\"],\n extra_feature_col=[\"extra feature 1\", \"extra feature 2\"],\n id_col=\"id\")\n\n\ndef get_data_creator(backend=\"torch\"):\n if backend == \"torch\":\n def data_creator(config):\n tsdata = get_tsdataset()\n x, y = tsdata.roll(lookback=7, horizon=1).to_numpy()\n return DataLoader(TensorDataset(torch.from_numpy(x).float(),\n torch.from_numpy(y).float()),\n batch_size=config[\"batch_size\"],\n shuffle=True)\n return data_creator\n if backend == \"keras\":\n def data_creator(config):\n tsdata = get_tsdataset()\n tsdata.roll(lookback=7, horizon=1)\n return tsdata.to_tf_dataset(batch_size=config[\"batch_size\"],\n shuffle=True)\n return data_creator\n\n\nclass CustomizedNet(nn.Module):\n def __init__(self,\n dropout,\n input_size,\n input_feature_num,\n hidden_dim,\n output_size):\n '''\n Simply use linear layers for multi-variate single-step forecasting.\n '''\n super().__init__()\n self.fc1 = nn.Linear(input_size*input_feature_num, hidden_dim)\n self.dropout = nn.Dropout(dropout)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(hidden_dim, output_size)\n\n def forward(self, x):\n # x.shape = (num_sample, input_size, input_feature_num)\n x = x.view(-1, x.shape[1]*x.shape[2])\n x = self.fc1(x)\n x = self.dropout(x)\n x = self.relu1(x)\n x = self.fc2(x)\n # x.shape = (num_sample, output_size)\n x = torch.unsqueeze(x, 1)\n # x.shape = (num_sample, 1, output_size)\n return x\n\n\ndef model_creator_pytorch(config):\n '''\n Pytorch customized model creator\n '''\n return CustomizedNet(dropout=config[\"dropout\"],\n input_size=config[\"past_seq_len\"],\n input_feature_num=config[\"input_feature_num\"],\n hidden_dim=config[\"hidden_dim\"],\n output_size=config[\"output_feature_num\"])\n\n\ndef model_creator_keras(config):\n '''\n Keras(tf2) customized model creator\n '''\n from bigdl.nano.tf.keras import Sequential\n model = Sequential([\n tf.keras.layers.Input(shape=(config[\"past_seq_len\"], config[\"input_feature_num\"])),\n tf.keras.layers.Dense(config[\"hidden_dim\"], activation='relu'),\n tf.keras.layers.Dropout(config[\"dropout\"]),\n tf.keras.layers.Dense(config[\"output_feature_num\"], activation='softmax')\n ])\n learning_rate = config.get('lr', 1e-3)\n optimizer = getattr(tf.keras.optimizers, config.get('optim', \"Adam\"))(learning_rate)\n model.compile(loss=config.get(\"loss\", \"mse\"),\n optimizer=optimizer,\n metrics=[config.get(\"metric\", \"mse\")])\n return model\n\n\nclass TestAutoTrainer(TestCase):\n def setUp(self) -> None:\n from bigdl.orca import init_orca_context\n init_orca_context(cores=8, init_ray_on_spark=True)\n\n def tearDown(self) -> None:\n from bigdl.orca import stop_orca_context\n stop_orca_context()\n\n def test_fit_third_party_feature(self):\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n tsdata_train = get_tsdataset().gen_dt_feature().scale(scaler, fit=True)\n tsdata_valid = get_tsdataset().gen_dt_feature().scale(scaler, fit=False)\n\n search_space = {\n 'hidden_dim': hp.grid_search([32, 64]),\n 'dropout': hp.uniform(0.1, 0.2)\n }\n\n auto_estimator = AutoTSEstimator(model=model_creator_pytorch,\n search_space=search_space,\n past_seq_len=hp.randint(4, 6),\n future_seq_len=1,\n selected_features=\"auto\",\n metric=\"mse\",\n loss=torch.nn.MSELoss(),\n cpus_per_trial=2)\n\n ts_pipeline = auto_estimator.fit(data=tsdata_train,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=tsdata_valid,\n n_sampling=1)\n best_config = auto_estimator.get_best_config()\n best_model = auto_estimator._get_best_automl_model()\n assert 4 <= best_config[\"past_seq_len\"] <= 6\n\n assert isinstance(ts_pipeline, TSPipeline)\n\n # use raw base model to predic and evaluate\n tsdata_valid.roll(lookback=best_config[\"past_seq_len\"],\n horizon=0,\n feature_col=best_config[\"selected_features\"])\n x_valid, y_valid = tsdata_valid.to_numpy()\n y_pred_raw = best_model.predict(x_valid)\n y_pred_raw = tsdata_valid.unscale_numpy(y_pred_raw)\n\n # use tspipeline to predic and evaluate\n eval_result = ts_pipeline.evaluate(tsdata_valid)\n y_pred = ts_pipeline.predict(tsdata_valid)\n\n # check if they are the same\n np.testing.assert_almost_equal(y_pred, y_pred_raw)\n\n # save and load\n ts_pipeline.save(\"/tmp/auto_trainer/autots_tmp_model_3rdparty\")\n new_ts_pipeline = TSPipeline.load(\"/tmp/auto_trainer/autots_tmp_model_3rdparty\")\n\n # check if load ppl is the same as previous\n eval_result_new = new_ts_pipeline.evaluate(tsdata_valid)\n y_pred_new = new_ts_pipeline.predict(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new[0])\n np.testing.assert_almost_equal(y_pred, y_pred_new)\n\n # use tspipeline to incrementally train\n new_ts_pipeline.fit(tsdata_valid)\n\n @pytest.mark.skipif(tf.__version__ < '2.0.0', reason=\"run only when tf>2.0.0\")\n def test_fit_third_party_feature_tf2(self):\n search_space = {'hidden_dim': hp.grid_search([32, 64]),\n 'layer_num': hp.randint(1, 3),\n 'dropout': hp.uniform(0.1, 0.2)}\n auto_estimator = AutoTSEstimator(model=model_creator_keras,\n search_space=search_space,\n past_seq_len=7,\n future_seq_len=1,\n input_feature_num=None,\n output_target_num=None,\n selected_features=\"auto\",\n metric=\"mse\",\n backend=\"keras\",\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n auto_estimator.fit(data=get_tsdataset(),\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=get_tsdataset(),\n n_sampling=1)\n config = auto_estimator.get_best_config()\n assert config[\"past_seq_len\"] == 7\n\n def test_fit_third_party_data_creator(self):\n input_feature_dim = 4\n output_feature_dim = 2 # 2 targets are generated in get_tsdataset\n\n search_space = {\n 'hidden_dim': hp.grid_search([32, 64]),\n 'dropout': hp.uniform(0.1, 0.2)\n }\n\n auto_estimator = AutoTSEstimator(model=model_creator_pytorch,\n search_space=search_space,\n past_seq_len=7,\n future_seq_len=1,\n input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim,\n selected_features=\"auto\",\n metric=\"mse\",\n loss=torch.nn.MSELoss(),\n cpus_per_trial=2)\n\n auto_estimator.fit(data=get_data_creator(),\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=get_data_creator(),\n n_sampling=1)\n\n config = auto_estimator.get_best_config()\n assert config[\"past_seq_len\"] == 7\n\n @pytest.mark.skipif(tf.__version__ < '2.0.0', reason=\"run only when tf>2.0.0\")\n def test_fit_third_party_data_creator_tf2(self):\n search_space = {'hidden_dim': hp.grid_search([32, 64]),\n 'layer_num': hp.randint(1, 3),\n 'dropout': hp.uniform(0.1, 0.2)}\n auto_estimator = AutoTSEstimator(model=model_creator_keras,\n search_space=search_space,\n past_seq_len=7,\n future_seq_len=1,\n input_feature_num=4,\n output_target_num=2,\n selected_features=\"auto\",\n metric=\"mse\",\n backend=\"keras\",\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n auto_estimator.fit(data=get_data_creator(backend=\"keras\"),\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=get_data_creator(backend=\"keras\"),\n n_sampling=1)\n config = auto_estimator.get_best_config()\n assert config[\"past_seq_len\"] == 7\n\n def test_fit_customized_metrics(self):\n from sklearn.preprocessing import StandardScaler\n from torchmetrics.functional import mean_squared_error\n import random\n\n scaler = StandardScaler()\n tsdata_train = get_tsdataset().gen_dt_feature().scale(scaler, fit=True)\n tsdata_valid = get_tsdataset().gen_dt_feature().scale(scaler, fit=False)\n\n def customized_metric(y_true, y_pred):\n return mean_squared_error(torch.from_numpy(y_pred),\n torch.from_numpy(y_true)).numpy()\n\n auto_estimator = AutoTSEstimator(model=random.choice(['tcn', 'lstm', 'seq2seq']),\n search_space=\"minimal\",\n past_seq_len=hp.randint(4, 6),\n future_seq_len=1,\n selected_features=\"auto\",\n metric=customized_metric,\n metric_mode=\"min\",\n optimizer=\"Adam\",\n loss=torch.nn.MSELoss(),\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n ts_pipeline = auto_estimator.fit(data=tsdata_train,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=tsdata_valid,\n n_sampling=1)\n best_config = auto_estimator.get_best_config()\n best_model = auto_estimator._get_best_automl_model()\n assert 4 <= best_config[\"past_seq_len\"] <= 6\n\n @skip_onnxrt\n def test_fit_lstm_feature(self):\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n tsdata_train = get_tsdataset().gen_dt_feature().scale(scaler, fit=True)\n tsdata_valid = get_tsdataset().gen_dt_feature().scale(scaler, fit=False)\n\n auto_estimator = AutoTSEstimator(model='lstm',\n search_space=\"minimal\",\n past_seq_len=hp.randint(4, 6),\n future_seq_len=1,\n selected_features=\"auto\",\n metric=\"mse\",\n loss=torch.nn.MSELoss(),\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n ts_pipeline = auto_estimator.fit(data=tsdata_train,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=tsdata_valid,\n n_sampling=1)\n best_config = auto_estimator.get_best_config()\n best_model = auto_estimator._get_best_automl_model()\n assert 4 <= best_config[\"past_seq_len\"] <= 6\n\n assert isinstance(ts_pipeline, TSPipeline)\n\n # use raw base model to predic and evaluate\n tsdata_valid.roll(lookback=best_config[\"past_seq_len\"],\n horizon=0,\n feature_col=best_config[\"selected_features\"])\n x_valid, y_valid = tsdata_valid.to_numpy()\n y_pred_raw = best_model.predict(x_valid)\n y_pred_raw = tsdata_valid.unscale_numpy(y_pred_raw)\n\n # use tspipeline to predic and evaluate\n eval_result = ts_pipeline.evaluate(tsdata_valid)\n y_pred = ts_pipeline.predict(tsdata_valid)\n\n # check if they are the same\n np.testing.assert_almost_equal(y_pred, y_pred_raw)\n\n # save and load\n ts_pipeline.save(\"/tmp/auto_trainer/autots_tmp_model_lstm\")\n new_ts_pipeline = TSPipeline.load(\"/tmp/auto_trainer/autots_tmp_model_lstm\")\n\n # check if load ppl is the same as previous\n eval_result_new = new_ts_pipeline.evaluate(tsdata_valid)\n y_pred_new = new_ts_pipeline.predict(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new[0])\n np.testing.assert_almost_equal(y_pred, y_pred_new)\n\n # check if load ppl is the same as previous with onnx\n try:\n import onnx\n import onnxruntime\n eval_result_new_onnx = new_ts_pipeline.evaluate_with_onnx(tsdata_valid)\n y_pred_new_onnx = new_ts_pipeline.predict_with_onnx(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new_onnx[0], decimal=5)\n np.testing.assert_almost_equal(y_pred, y_pred_new_onnx, decimal=5)\n except ImportError:\n pass\n\n # use tspipeline to incrementally train\n new_ts_pipeline.fit(tsdata_valid)\n\n @skip_onnxrt\n def test_fit_tcn_feature(self):\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n tsdata_train = get_tsdataset().gen_dt_feature().scale(scaler, fit=True)\n tsdata_valid = get_tsdataset().gen_dt_feature().scale(scaler, fit=False)\n\n auto_estimator = AutoTSEstimator(model='tcn',\n search_space=\"minimal\",\n past_seq_len=hp.randint(4, 6),\n future_seq_len=1,\n selected_features=\"auto\",\n metric=\"mse\",\n optimizer=\"Adam\",\n loss=torch.nn.MSELoss(),\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n ts_pipeline = auto_estimator.fit(data=tsdata_train,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=tsdata_valid,\n n_sampling=1)\n best_config = auto_estimator.get_best_config()\n best_model = auto_estimator._get_best_automl_model()\n assert 4 <= best_config[\"past_seq_len\"] <= 6\n\n assert isinstance(ts_pipeline, TSPipeline)\n\n # use raw base model to predic and evaluate\n tsdata_valid.roll(lookback=best_config[\"past_seq_len\"],\n horizon=0,\n feature_col=best_config[\"selected_features\"])\n x_valid, y_valid = tsdata_valid.to_numpy()\n y_pred_raw = best_model.predict(x_valid)\n y_pred_raw = tsdata_valid.unscale_numpy(y_pred_raw)\n\n # use tspipeline to predic and evaluate\n eval_result = ts_pipeline.evaluate(tsdata_valid)\n y_pred = ts_pipeline.predict(tsdata_valid)\n\n # check if they are the same\n np.testing.assert_almost_equal(y_pred, y_pred_raw)\n\n # save and load\n ts_pipeline.save(\"/tmp/auto_trainer/autots_tmp_model_tcn\")\n new_ts_pipeline = TSPipeline.load(\"/tmp/auto_trainer/autots_tmp_model_tcn\")\n\n # check if load ppl is the same as previous\n eval_result_new = new_ts_pipeline.evaluate(tsdata_valid)\n y_pred_new = new_ts_pipeline.predict(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new[0])\n np.testing.assert_almost_equal(y_pred, y_pred_new)\n\n # check if load ppl is the same as previous with onnx\n try:\n import onnx\n import onnxruntime\n eval_result_new_onnx = new_ts_pipeline.evaluate_with_onnx(tsdata_valid)\n y_pred_new_onnx = new_ts_pipeline.predict_with_onnx(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new_onnx[0], decimal=5)\n np.testing.assert_almost_equal(y_pred, y_pred_new_onnx, decimal=5)\n except ImportError:\n pass\n\n # use tspipeline to incrementally train\n new_ts_pipeline.fit(tsdata_valid)\n\n @skip_onnxrt\n def test_fit_seq2seq_feature(self):\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n tsdata_train = get_tsdataset().gen_dt_feature().scale(scaler, fit=True)\n tsdata_valid = get_tsdataset().gen_dt_feature().scale(scaler, fit=False)\n\n auto_estimator = AutoTSEstimator(model='seq2seq',\n search_space=\"minimal\",\n past_seq_len=hp.randint(4, 6),\n future_seq_len=1,\n selected_features=\"auto\",\n metric=\"mse\",\n optimizer=\"Adam\",\n loss=torch.nn.MSELoss(),\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n ts_pipeline = auto_estimator.fit(data=tsdata_train,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=tsdata_valid,\n n_sampling=1)\n best_config = auto_estimator.get_best_config()\n best_model = auto_estimator._get_best_automl_model()\n assert 4 <= best_config[\"past_seq_len\"] <= 6\n\n assert isinstance(ts_pipeline, TSPipeline)\n\n # use raw base model to predic and evaluate\n tsdata_valid.roll(lookback=best_config[\"past_seq_len\"],\n horizon=0,\n feature_col=best_config[\"selected_features\"])\n x_valid, y_valid = tsdata_valid.to_numpy()\n y_pred_raw = best_model.predict(x_valid)\n y_pred_raw = tsdata_valid.unscale_numpy(y_pred_raw)\n\n # use tspipeline to predic and evaluate\n eval_result = ts_pipeline.evaluate(tsdata_valid)\n y_pred = ts_pipeline.predict(tsdata_valid)\n\n # check if they are the same\n np.testing.assert_almost_equal(y_pred, y_pred_raw)\n\n # save and load\n ts_pipeline.save(\"/tmp/auto_trainer/autots_tmp_model_seq2seq\")\n new_ts_pipeline = TSPipeline.load(\"/tmp/auto_trainer/autots_tmp_model_seq2seq\")\n\n # check if load ppl is the same as previous\n eval_result_new = new_ts_pipeline.evaluate(tsdata_valid)\n y_pred_new = new_ts_pipeline.predict(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new[0])\n np.testing.assert_almost_equal(y_pred, y_pred_new)\n\n # check if load ppl is the same as previous with onnx\n try:\n import onnx\n import onnxruntime\n eval_result_new_onnx = new_ts_pipeline.evaluate_with_onnx(tsdata_valid)\n y_pred_new_onnx = new_ts_pipeline.predict_with_onnx(tsdata_valid)\n np.testing.assert_almost_equal(eval_result[0], eval_result_new_onnx[0], decimal=5)\n np.testing.assert_almost_equal(y_pred, y_pred_new_onnx, decimal=5)\n except ImportError:\n pass\n\n # use tspipeline to incrementally train\n new_ts_pipeline.fit(tsdata_valid)\n\n def test_fit_lstm_data_creator(self):\n input_feature_dim = 4\n output_feature_dim = 2 # 2 targets are generated in get_tsdataset\n\n search_space = {\n 'hidden_dim': hp.grid_search([32, 64]),\n 'layer_num': hp.randint(1, 3),\n 'lr': hp.choice([0.001, 0.003, 0.01]),\n 'dropout': hp.uniform(0.1, 0.2)\n }\n auto_estimator = AutoTSEstimator(model='lstm',\n search_space=search_space,\n past_seq_len=7,\n future_seq_len=1,\n input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim,\n selected_features=\"auto\",\n metric=\"mse\",\n loss=torch.nn.MSELoss(),\n logs_dir=\"/tmp/auto_trainer\",\n cpus_per_trial=2,\n name=\"auto_trainer\")\n auto_estimator.fit(data=get_data_creator(),\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=get_data_creator(),\n n_sampling=1)\n config = auto_estimator.get_best_config()\n assert config[\"past_seq_len\"] == 7\n\n def test_select_feature(self):\n sample_num = np.random.randint(100, 200)\n df = pd.DataFrame({\"datetime\": pd.date_range('1/1/2019', periods=sample_num),\n \"value\": np.random.randn(sample_num),\n \"id\": np.array(['00']*sample_num)})\n train_ts, val_ts, _ = TSDataset.from_pandas(df,\n target_col=['value'],\n dt_col='datetime',\n id_col='id',\n with_split=True,\n val_ratio=0.1)\n\n search_space = {\n 'hidden_dim': hp.grid_search([32, 64]),\n 'layer_num': hp.randint(1, 3),\n 'lr': hp.choice([0.001, 0.003, 0.01]),\n 'dropout': hp.uniform(0.1, 0.2)\n }\n\n input_feature_dim, output_feature_dim = 1, 1\n auto_estimator = AutoTSEstimator(model='lstm',\n search_space=search_space,\n past_seq_len=6,\n future_seq_len=1,\n input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim,\n selected_features=\"auto\",\n metric=\"mse\",\n loss=torch.nn.MSELoss(),\n cpus_per_trial=2,\n name=\"auto_trainer\")\n\n auto_estimator.fit(data=train_ts,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=val_ts,\n n_sampling=1)\n config = auto_estimator.get_best_config()\n assert config['past_seq_len'] == 6\n\n def test_future_list_input(self):\n sample_num = np.random.randint(100, 200)\n df = pd.DataFrame({\"datetime\": pd.date_range('1/1/2019', periods=sample_num),\n \"value\": np.random.randn(sample_num),\n \"id\": np.array(['00']*sample_num)})\n train_ts, val_ts, _ = TSDataset.from_pandas(df,\n target_col=['value'],\n dt_col='datetime',\n id_col='id',\n with_split=True,\n val_ratio=0.1)\n\n input_feature_dim, output_feature_dim = 1, 1\n auto_estimator = AutoTSEstimator(model='seq2seq',\n search_space=\"minimal\",\n past_seq_len=6,\n future_seq_len=[1, 3],\n input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim,\n selected_features=\"auto\",\n metric=\"mse\",\n loss=torch.nn.MSELoss(),\n cpus_per_trial=2,\n name=\"auto_trainer\")\n\n auto_estimator.fit(data=train_ts,\n epochs=1,\n batch_size=hp.choice([32, 64]),\n validation_data=val_ts,\n n_sampling=1)\n config = auto_estimator.get_best_config()\n assert config['future_seq_len'] == 2\n assert auto_estimator._future_seq_len == [1, 3]\n\n def test_autogener_best_cycle_length(self):\n sample_num = 100\n df = pd.DataFrame({\"datetime\": pd.date_range('1/1/2019', periods=sample_num),\n \"value\": np.sin(np.array((0, 30, 45, 60, 90)*20)*np.pi/180),\n \"id\": np.array(['00'] * sample_num)})\n\n train_ts = TSDataset.from_pandas(df,\n target_col=['value'],\n dt_col='datetime',\n id_col='id',\n with_split=False)\n\n input_feature_dim, output_feature_dim = 1, 1\n auto_estimator = AutoTSEstimator(model='lstm',\n search_space=\"minimal\",\n past_seq_len='auto',\n input_feature_num=input_feature_dim,\n output_target_num=output_feature_dim)\n \n auto_estimator.fit(data=train_ts,\n epochs=1,\n batch_size=hp.choice([16, 32]),\n validation_data=train_ts)\n config = auto_estimator.get_best_config()\n assert 2 <= config['past_seq_len'] <= 10\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"numpy.array",
"torch.nn.MSELoss",
"tensorflow.keras.layers.Input",
"sklearn.preprocessing.StandardScaler",
"numpy.testing.assert_almost_equal",
"pandas.date_range",
"numpy.random.randn",
"torch.unsqueeze",
"torch.nn.ReLU",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"torch.from_numpy",
"numpy.random.randint"
]
]
|
mack-the-psych/plimac3 | [
"7b47abf7a087961e5a3e9c90b1ae20d07b3d6898"
]
| [
"Lib/ac_hyponyms.py"
]
| [
"################################################################################\n# This module retrieves hyponyms from Wordnet as a part of NLTK module and its\n# corpus. The module recognizes each input pandas.DataFrame record as a unit of \n# assessment content (i.e. a single passage section, an item stem, \n# or an item option) and applies a serial number of 'AC_Doc_ID' to the each\n# output record for the following processing.\n# Parameters df_ac: input pandas.DataFrame, it should have, at least, one \n# column of lemmatized text assessment content \n# content_lemma_column: column name of lemmatized text assessment \n# content (as an output text from the \n# lemmatizer) to search Wordnet with the lemmas\n# Returns Result: pandas.DataFrame including the original columns of the input \n# DataFrame plus result hyponyms\n################################################################################\ndef ac_hyponyms(df_ac, content_lemma_column):\n import pandas as pd\n import numpy as np\n import nltk\n from nltk.corpus import wordnet as wn\n\n df_ac_buf = df_ac.copy()\n list_cntnt = list(df_ac_buf[content_lemma_column])\n list_cntnt_hyponyms = list_cntnt[:]\n list_doc_id = list_cntnt[:] \n df_hyponyms_all = pd.DataFrame()\n\n for i, x in enumerate(list_cntnt):\n tokens = nltk.word_tokenize(x)\n hypernym_list = []\n for y in tokens:\n for synset in wn.synsets(y):\n #Updated 3/5/2017 [email protected]\n nltk_ver = list(map(int, nltk.__version__.split('.')))\n if (nltk_ver[0] > 2):\n hyponyms = wn.synset(synset.name()).hyponyms()\n for v in hyponyms:\n hypernym_list = hypernym_list + v.lemma_names()\n else:\n hyponyms = wn.synset(synset.name).hyponyms()\n for v in hyponyms:\n hypernym_list = hypernym_list + v.lemma_names\n\n s = ' '.join(map(str,hypernym_list))\n list_cntnt_hyponyms[i] = s\n print(s)\n\n lower_hypernym_list = [w.lower() for w in hypernym_list] \n df_hyponyms = pd.DataFrame({ 'Hyponyms' : lower_hypernym_list })\n df_doc = pd.DataFrame({ 'AC_Doc_ID' : np.array([i] * len(df_hyponyms)) })\n df_hyponyms['AC_Doc_ID'] = df_doc['AC_Doc_ID']\n df_hyponyms['Dummy'] = df_doc['AC_Doc_ID']\n df_hyponyms_all = df_hyponyms_all.append(df_hyponyms)\n list_doc_id[i] = i\n\n df_doc_id = pd.DataFrame({ 'AC_Doc_ID' : list_doc_id })\n df_ac_buf['AC_Doc_ID'] = df_doc_id['AC_Doc_ID']\n df_cntnt_hyponyms = pd.DataFrame({ 'Cntnt_Hyponyms' : list_cntnt_hyponyms })\n df_ac_buf['Cntnt_Hyponyms'] = df_cntnt_hyponyms['Cntnt_Hyponyms']\n\n #Updated 12/18/2016 [email protected]\n if df_hyponyms_all.shape[0] > 0:\n #Updated 3/5/2017 [email protected]\n pd_ver = list(map(int, pd.__version__.split('.')))\n if (pd_ver[0] > 0) or (pd_ver[1] > 13):\n df_crosstab = df_hyponyms_all.pivot_table(values='Dummy', \n index='AC_Doc_ID', columns='Hyponyms', aggfunc = len)\n else:\n df_crosstab = df_hyponyms_all.pivot_table(values='Dummy', \n rows='AC_Doc_ID', cols='Hyponyms', aggfunc = len)\n df_crosstab['AC_Doc_ID'] = df_doc_id['AC_Doc_ID']\n df_res = pd.merge(df_ac_buf, df_crosstab, on='AC_Doc_ID', how='left')\n else:\n df_res = df_ac_buf\n\n return df_res.set_index('AC_Doc_ID')\n"
]
| [
[
"pandas.DataFrame",
"pandas.__version__.split",
"pandas.merge"
]
]
|
mo-igor/FIR_CNN-LSTM | [
"e88d7bdfb5a22ad2ce0bb16da8431aaf05de3c4a"
]
| [
"main.py"
]
| [
"from tools import dataset\nfrom tools.dataset import Dataset\nfrom tools import prepare\nfrom tools import augmentation as augment\n\nimport os\nimport argparse\nimport pandas as pd\n\nfrom glob import glob\nimport collections\nimport re\nimport random\nSEED = None # set to None to use the current system time\nrandom.seed(a=SEED)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nimport tensorflow\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, TimeDistributed, Dense, Dropout,\\\n Flatten, Activation, Conv2D, MaxPooling2D, GlobalAveragePooling1D,\\\n BatchNormalization, Masking, multiply, GlobalMaxPooling1D, Reshape,\\\n GRU, average, Lambda, Average, Maximum, Concatenate\n\nfrom tools.flow import farneback, farneback_mag\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN, ModelCheckpoint, ReduceLROnPlateau\n\n\nLABELS_REGEX_7 = dataset.LABELS_REGEX #7 labels\nLABELS_REGEX_5 = dataset.PAPER_LABELS_REGEX #5 labels\n\nKERAS_EPSILON = tensorflow.keras.backend.epsilon()\n\nkeras.backend.set_image_data_format('channels_last')\n\ndef spatial_stream():\n spatial_input = Input(shape=(None, 16, 16, 1), name='spatial_input')\n spatial_conv1 = TimeDistributed(Conv2D(16, (3, 3), padding='same', activation='relu', name='spatial_conv1'), name='spatial_timedistributed1')(spatial_input)\n spatial_bn_layer = TimeDistributed(BatchNormalization(name='spatial_bn_layer'), name='spatial_timedistributed2')(spatial_conv1)\n spatial_maxpool1 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool1'), name='spatial_timedistributed3')(spatial_bn_layer)\n spatial_conv2 = TimeDistributed(Conv2D(32, (3, 3), padding='same', activation='relu', name='spatial_conv2'), name='spatial_timedistributed4')(spatial_maxpool1)\n spatial_maxpool2 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool2'), name='spatial_timedistributed5')(spatial_conv2)\n spatial_conv3 = TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu', name='spatial_conv3'), name='spatial_timedistributed6')(spatial_maxpool2)\n spatial_maxpool3 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool3'), name='spatial_timedistributed7')(spatial_conv3)\n spatial_conv4 = TimeDistributed(Conv2D(128, (3, 3), padding='same', activation='relu', name='spatial_conv4'), name='spatial_timedistributed8')(spatial_maxpool3)\n spatial_maxpool4 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='spatial_maxpool4'), name='spatial_timedistributed9')(spatial_conv4)\n spatial_flattened = TimeDistributed(Flatten(name='spatial_flattened'), name='spatial_timedistributed10')(spatial_maxpool4)\n spatial_dense1 = TimeDistributed(Dense(512, name='spatial_dense1'), name='spatial_timedistributed11')(spatial_flattened)\n spatial_dense2 = TimeDistributed(Dense(256, name='spatial_dense2'), name='spatial_timedistributed12')(spatial_dense1)\n spatial_GRU = GRU(100, return_sequences=True, name='spatial_GRU')(spatial_dense2)\n spatial_GRU2 = GRU(100, return_sequences=False, name='spatial_GRU2')(spatial_GRU)\n #handle numerical instability\n spatial_output = Lambda(lambda x: tensorflow.keras.backend.clip(x, KERAS_EPSILON, 1-KERAS_EPSILON))(spatial_GRU2)\n return spatial_input, spatial_output\n\ndef temporal_stream():\n temporal_input = Input(shape=(None, 16, 16, 2), name='temporal_input')\n temporal_conv1 = TimeDistributed(Conv2D(16, (3, 3), padding='same', activation='relu', name='temporal_conv1'), name='temporal_timedistributed1')(temporal_input)\n temporal_bn_layer = TimeDistributed(BatchNormalization(name='temporal_bn_layer'), name='temporal_timedistributed2')(temporal_conv1)\n temporal_maxpool1 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool1'), name='temporal_timedistributed3')(temporal_bn_layer)\n temporal_conv2 = TimeDistributed(Conv2D(32, (3, 3), padding='same', activation='relu', name='temporal_conv2'), name='temporal_timedistributed4')(temporal_maxpool1)\n temporal_maxpool2 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool2'), name='temporal_timedistributed5')(temporal_conv2)\n temporal_conv3 = TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu', name='temporal_conv3'), name='temporal_timedistributed6')(temporal_maxpool2)\n temporal_maxpool3 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool3'), name='temporal_timedistributed7')(temporal_conv3)\n temporal_conv4 = TimeDistributed(Conv2D(128, (3, 3), padding='same', activation='relu', name='temporal_conv4'), name='temporal_timedistributed8')(temporal_maxpool3)\n temporal_maxpool4 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='temporal_maxpool4'), name='temporal_timedistributed9')(temporal_conv4)\n temporal_flattened = TimeDistributed(Flatten(name='temporal_flattened'), name='temporal_timedistributed10')(temporal_maxpool4)\n temporal_dense1 = TimeDistributed(Dense(512, name='temporal_dense1'), name='temporal_timedistributed11')(temporal_flattened)\n temporal_dense2 = TimeDistributed(Dense(256, name='temporal_dense2'), name='temporal_timedistributed12')(temporal_dense1)\n temporal_GRU = GRU(100, return_sequences=True, name='temporal_GRU')(temporal_dense2)\n temporal_GRU2 = GRU(100, return_sequences=False, name='temporal_GRU2')(temporal_GRU)\n #handle numerical instability\n temporal_output = Lambda(lambda x: tensorflow.keras.backend.clip(x, KERAS_EPSILON, 1-KERAS_EPSILON))(temporal_GRU2)\n return temporal_input, temporal_output\n\ndef stream2model(stream_input, stream_output):\n classification_output = Dense(CLASSES_N, activation=\"softmax\", name=\"single_stream_classification\")(stream_output)\n model = Model(stream_input, classification_output)\n return model\n\ndef merge_streams(spatial_input, spatial_output, temporal_input, temporal_output):\n concat = Concatenate(name='merged_concat')([spatial_output, temporal_output])\n output = Dense(CLASSES_N, activation=\"softmax\", name='merged_output')(concat)\n model = Model([spatial_input, temporal_input], output)\n return model\n\ndef compile_model(model, model_dir, optimizer=\"adam\", prefix=\"\"):\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n prepare.ensure_dir_exists(model_dir)\n keras.utils.plot_model(model, os.path.join(model_dir, prefix+'model.png'))\n return model\n\ndef plot_history(history, model_dir, prefix=\"\", suffix=\"\"):\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.savefig(os.path.join(model_dir, prefix+\"model_accuracy\"+suffix+\".png\"))\n plt.close()\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.savefig(os.path.join(model_dir, prefix+\"model_loss\"+suffix+\".png\"))\n return\n\ndef to_categorical(y):\n return tensorflow.keras.utils.to_categorical(y, CLASSES_N)\n\n\nclass DataGenerator(keras.utils.Sequence):\n '''\n FIR data batch generator for Keras\n\n Parameters\n ----------\n data : list\n list of [fn, y] where fn is file location and y is a label\n\n Returns\n ----------\n [[temperature, flow], y] : list\n temperature : numpy array \n flow : numpy array\n y : numpy array (one-hot encoded)\n\n '''\n def __init__(self, data, batch_size, shuffle: bool = True, augmentation: bool = False):\n self.data = data\n if (batch_size == -1):\n self.batch_size = len(data)\n else:\n self.batch_size = batch_size\n self.shuffle = shuffle\n if self.shuffle:\n random.shuffle(self.data)\n self.augmentation = augmentation\n\n def __len__(self):\n return int(np.floor(len(self.data) / self.batch_size))\n\n def on_epoch_end(self):\n if self.shuffle:\n random.shuffle(self.data)\n\n def __getitem__(self, index):\n indices = list(\n range(index * self.batch_size, (index + 1) * self.batch_size))\n return self.__load_data(indices)\n\n def __load_data(self, indices):\n samples = []\n temperature_length_max = 0\n flow_length_max = 0\n for idx in indices:\n if self.augmentation:\n k_rot = np.random.randint(0, 4)\n k_flip = np.random.randint(0, 3)\n [temperature_fn, flow_fn], y = self.data[idx]\n temperature = np.load(temperature_fn).astype(np.float32)\n if self.augmentation:\n temperature = augment.random_rotation(temperature, case=k_rot)\n temperature = augment.random_flip(temperature, case=k_flip)\n temperature = temperature[..., np.newaxis]\n flow = farneback(np.squeeze((255*temperature).astype(np.uint8)))\n if temperature.shape[0] > temperature_length_max:\n temperature_length_max = temperature.shape[0]\n if flow.shape[0] > flow_length_max:\n flow_length_max = flow.shape[0]\n samples.append([[temperature, flow], y])\n # zero-pad\n TEMPERATURE, FLOW = [], []\n Y = []\n for sample in samples:\n [temperature, flow], y = sample\n temperature = self.__pad_to_length(temperature,\n temperature_length_max)\n flow = self.__pad_to_length(flow, flow_length_max)\n TEMPERATURE.append(temperature)\n FLOW.append(flow)\n Y.append(y)\n TEMPERATURE, FLOW, Y = np.array(TEMPERATURE), np.array(FLOW), np.array(\n Y)\n return ([TEMPERATURE, FLOW], Y)\n\n def __pad_to_length(self, sequence, length):\n if sequence.shape[0] == length:\n return sequence\n trailing = np.zeros([length - sequence.shape[0], *sequence.shape[1:]],\n sequence.dtype)\n return np.vstack([trailing, sequence])\n\nclass TemperatureGenerator(keras.utils.Sequence):\n def __init__(self, data, batch_size, shuffle: bool = True, augmentation: bool = False):\n self.data = data\n if (batch_size == -1):\n self.batch_size = len(data)\n else:\n self.batch_size = batch_size\n self.shuffle = shuffle\n if self.shuffle:\n random.shuffle(self.data)\n self.augmentation = augmentation\n\n def __len__(self):\n return int(np.floor(len(self.data) / self.batch_size))\n\n def on_epoch_end(self):\n if self.shuffle:\n random.shuffle(self.data)\n\n def __getitem__(self, index):\n indices = list(\n range(index * self.batch_size, (index + 1) * self.batch_size))\n return self.__load_data(indices)\n\n def __load_data(self, indices):\n samples = []\n temperature_length_max = 0\n for idx in indices:\n if self.augmentation:\n k_rot = np.random.randint(0, 4)\n k_flip = np.random.randint(0, 3)\n [temperature_fn, _], y = self.data[idx]\n temperature = np.load(temperature_fn).astype(np.float32)\n if self.augmentation:\n temperature = augment.random_rotation(temperature, case=k_rot)\n temperature = augment.random_flip(temperature, case=k_flip)\n temperature = temperature[..., np.newaxis]\n if temperature.shape[0] > temperature_length_max:\n temperature_length_max = temperature.shape[0]\n samples.append([temperature, y])\n # zero-pad\n TEMPERATURE = []\n Y = []\n for sample in samples:\n temperature, y = sample\n temperature = self.__pad_to_length(temperature,\n temperature_length_max)\n TEMPERATURE.append(temperature)\n Y.append(y)\n TEMPERATURE, Y = np.array(TEMPERATURE), np.array(Y)\n return (TEMPERATURE, Y)\n\n def __pad_to_length(self, sequence, length):\n if sequence.shape[0] == length:\n return sequence\n trailing = np.zeros([length - sequence.shape[0], *sequence.shape[1:]],\n sequence.dtype)\n return np.vstack([trailing, sequence])\n\nclass FlowGenerator(keras.utils.Sequence):\n def __init__(self, data, batch_size, shuffle: bool = True, augmentation: bool = False):\n self.data = data\n if (batch_size == -1):\n self.batch_size = len(data)\n else:\n self.batch_size = batch_size\n self.shuffle = shuffle\n if self.shuffle:\n random.shuffle(self.data)\n self.augmentation = augmentation\n\n def __len__(self):\n return int(np.floor(len(self.data) / self.batch_size))\n\n def on_epoch_end(self):\n if self.shuffle:\n random.shuffle(self.data)\n\n def __getitem__(self, index):\n indices = list(\n range(index * self.batch_size, (index + 1) * self.batch_size))\n return self.__load_data(indices)\n\n def __load_data(self, indices):\n samples = []\n flow_length_max = 0\n for idx in indices:\n if self.augmentation:\n k_rot = np.random.randint(0, 4)\n k_flip = np.random.randint(0, 3)\n [temperature_fn, flow_fn], y = self.data[idx]\n temperature = np.load(temperature_fn).astype(np.float32)\n if self.augmentation:\n temperature = augment.random_rotation(temperature, case=k_rot)\n temperature = augment.random_flip(temperature, case=k_flip)\n temperature = temperature[..., np.newaxis]\n flow = farneback(np.squeeze((255*temperature).astype(np.uint8)))\n if flow.shape[0] > flow_length_max:\n flow_length_max = flow.shape[0]\n samples.append([flow, y])\n # zero-pad\n FLOW = []\n Y = []\n for sample in samples:\n flow, y = sample\n flow = self.__pad_to_length(flow, flow_length_max)\n FLOW.append(flow)\n Y.append(y)\n FLOW, Y = np.array(FLOW), np.array(Y)\n return (FLOW, Y)\n\n def __pad_to_length(self, sequence, length):\n if sequence.shape[0] == length:\n return sequence\n trailing = np.zeros([length - sequence.shape[0], *sequence.shape[1:]],\n sequence.dtype)\n return np.vstack([trailing, sequence])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_dir',\n type=str,\n default=os.path.join(\"..\", \"dataset\"),\n help='Path to folder containing the FIR dataset.')\n parser.add_argument('--model_dir',\n type=str,\n default=\"/\" + os.path.join(\"tmps\", \"model\"),\n help='Where to save the trained model.')\n parser.add_argument('--subdir',\n type=str,\n default=\"weigths\",\n help='Custom naming for subdirectory to save the model in.')\n parser.add_argument(\n '--temperature_dir',\n type=str,\n default=\"/\" + os.path.join(\"tmps\", \"cache\", \"temperature\"),\n help='Where to save the cached sequences (temperature).')\n parser.add_argument(\n '--flow_dir',\n type=str,\n default=\"/\" + os.path.join(\"tmps\", \"cache\", \"optical_flow\"),\n help='Where to save the cached sequences (optical flow).')\n parser.add_argument('--classes',\n type=int,\n default=5,\n help='How many classes? 5 if --classes=5, 7 otherwise.')\n parser.add_argument('--epochs',\n type=int,\n default=50,\n help='How many epochs to run before ending.')\n parser.add_argument('--learning_rate',\n type=float,\n default=1e-1,\n help='How large a learning rate to use when training.')\n parser.add_argument(\n '--validation_size',\n type=float,\n default=0.1,\n help='Between 0.0 and 1.0, the proportion of the dataset \\\n to include in the validation split.')\n parser.add_argument('--training_batch_size',\n type=int,\n default=128,\n help='How many images to train on at a time.')\n parser.add_argument('--validation_batch_size',\n type=int,\n default=-1,\n help='How many images to validate on at a time. -1 for batch_size = samples_n (more stable results).')\n parser.add_argument('--testing_batch_size',\n type=int,\n default=-1,\n help='How many images to test on at a time. -1 for batch_size = samples_n (more stable results).')\n parser.add_argument(\"--download\",\n action=\"store_true\",\n help='Download the dataset.')\n parser.add_argument(\"--prepare\",\n action=\"store_true\",\n help='Prepare the dataset.')\n parser.add_argument(\"--actor\",\n type=str,\n default=None,\n help='Choose testing actor, pattern: \"human{}\" [0-9]. Otherwise full cross validation is performed.')\n parser.add_argument(\"--pretrain\",\n action=\"store_true\",\n help='Pretrain by training streams separately.')\n parser.add_argument(\"--temporal_only\",\n action=\"store_true\",\n help='Train temporal only.')\n parser.add_argument(\"--spatial_only\",\n action=\"store_true\",\n help='Train spatial only.')\n FLAGS, unparsed = parser.parse_known_args()\n\n model_path = os.path.join(FLAGS.model_dir, FLAGS.subdir)\n if (FLAGS.classes == 5):\n LABELS_REGEX = LABELS_REGEX_5\n else:\n LABELS_REGEX = LABELS_REGEX_7\n CLASSES_N = len(LABELS_REGEX)\n\n if FLAGS.download:\n dataset.download(\"..\")\n\n if FLAGS.temporal_only and FLAGS.spatial_only:\n raise ValueError \n\n data_normalized = Dataset(FLAGS.dataset_dir, minmax_normalized=True)\n\n if FLAGS.prepare:\n prepare.sequences_by_actor(data_normalized, FLAGS.temperature_dir)\n prepare.optical_flow(data_normalized, FLAGS.flow_dir)\n\n temperature_files = glob(os.path.join(FLAGS.temperature_dir, \"**\",\n \"*.npy\"))\n flow_files = glob(os.path.join(FLAGS.flow_dir, \"**\", \"*.npy\"))\n\n def files_same(a, b):\n return collections.Counter([os.path.split(item)[1]\n for item in a]) == collections.Counter(\n [os.path.split(item)[1] for item in b])\n\n if not files_same(temperature_files, flow_files):\n raise ValueError(\n \"The number and naming of the samples in temporal and spatial \\\n streams should be the same.\")\n\n if (FLAGS.validation_size > 1) or (FLAGS.validation_size < 0):\n raise ValueError(\"Validation size should be between 0.0 and 1.0\")\n\n # relative_path, y = data_fn_y[i]\n data_fn_y = []\n for path in temperature_files:\n sample_actor, sample_basename = path.split(os.path.sep)[-2:]\n relative_path = os.path.join(sample_actor, sample_basename)\n y = None\n for pattern in LABELS_REGEX:\n if re.search(pattern + \"_\", sample_basename):\n y = LABELS_REGEX[pattern]\n data_fn_y.append([relative_path, y])\n\n cnfs_mtx_dict = dict()\n # LOOCV\n for actor in dataset.ACTORS:\n if FLAGS.actor:\n if actor != FLAGS.actor:\n print(\"Skip\")\n continue\n\n testing_actor = actor\n training_actors = list(dataset.ACTORS)\n training_actors.remove(testing_actor)\n\n model_fn_json = os.path.join(model_path, \"model.json\")\n model_fn_hdf5 = os.path.join(model_path, \"model_{}.hdf5\".format(actor))\n spatial_model_fn_json = os.path.join(model_path, \"spatial_model.json\")\n spatial_model_fn_hdf5 = os.path.join(model_path, \"spatial_model_{}.hdf5\".format(actor))\n temporal_model_fn_json = os.path.join(model_path, \"temporal_model.json\")\n temporal_model_fn_hdf5 = os.path.join(model_path, \"temporal_model_{}.hdf5\".format(actor))\n\n train_val_fns_y = []\n testing_fns_y = []\n for sample in data_fn_y:\n fn, y = sample\n sample_actor, sample_basename = fn.split(os.path.sep)\n if sample_actor == testing_actor:\n testing_fns_y.append([fn, y])\n else:\n train_val_fns_y.append([fn, y])\n\n # balanced split\n validation_fns_y, training_fns_y = [], []\n train_val_fns_y_classes = []\n for key in LABELS_REGEX.keys():\n tmp_class = []\n random.shuffle(train_val_fns_y)\n for sample in train_val_fns_y:\n fn, y = sample\n if (y == LABELS_REGEX[key]):\n tmp_class.append(sample)\n print(\"{} samples in class {}\".format(len(tmp_class), LABELS_REGEX[key]))\n split = int(len(tmp_class) * FLAGS.validation_size)\n validation_fns_y.extend(tmp_class[:split])\n training_fns_y.extend(tmp_class[split:])\n\n # add back the prefix\n # [temperature_fn, flow_fn], y = *_data\n def add_prefixes(list_fns_y, temperature_prefix, flow_prefix):\n list_data = []\n for sample in list_fns_y:\n fn, y = sample\n list_data.append([[\n os.path.join(temperature_prefix, fn),\n os.path.join(flow_prefix, fn)\n ],\n to_categorical(y)])\n return list_data\n\n # [temperature_fn, flow_fn], y = *_data\n testing_data = add_prefixes(testing_fns_y, FLAGS.temperature_dir,\n FLAGS.flow_dir)\n training_data = add_prefixes(training_fns_y, FLAGS.temperature_dir,\n FLAGS.flow_dir)\n validation_data = add_prefixes(validation_fns_y, FLAGS.temperature_dir,\n FLAGS.flow_dir)\n\n training_batches = DataGenerator(training_data,\n FLAGS.training_batch_size,\n shuffle=True, augmentation=True)\n validation_batches = DataGenerator(validation_data,\n FLAGS.validation_batch_size,\n shuffle=True)\n testing_batches = DataGenerator(testing_data,\n FLAGS.testing_batch_size,\n shuffle=False)\n\n print(\"[INFO] \\n\")\n print(\"Actor: {}\".format(actor))\n print(\"Training: {} samples -> {} batches\".format(\n len(training_data), len(training_batches)))\n print(\"Validation: {} samples -> {} batches\".format(\n len(validation_data), len(validation_batches)))\n print(\"Testing: {} samples -> {} batches\".format(\n len(testing_data), len(testing_batches)))\n \n\n\n def train_model(model, epochs, training_batches, validation_batches, callbacks, model_fn_json, prefix=\"\", suffix=\"\"):\n json_string = model.to_json()\n open(model_fn_json, 'w').write(json_string)\n model.summary()\n history = model.fit_generator(training_batches, epochs=FLAGS.epochs, validation_data=validation_batches, callbacks=callbacks)\n plot_history(history, model_path, prefix, suffix)\n return history\n\n def load_checkpoint(model_fn_json, model_fn_hdf5):\n # load json and create model\n json_file = open(model_fn_json, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = tensorflow.keras.models.model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(model_fn_hdf5)\n print(\"[INFO] Loaded model from disk ({}, {})\".format(model_fn_json, model_fn_hdf5))\n return loaded_model\n\n if (FLAGS.pretrain or FLAGS.spatial_only): \n #SPATIAL\n optimizer = optimizers.SGD(lr=FLAGS.learning_rate, clipnorm=0.5, momentum=0.5, nesterov=True) # best\n early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)\n terminateNaN = TerminateOnNaN() #that shouldn't happen\n saveBest = ModelCheckpoint(spatial_model_fn_hdf5, save_best_only=True)\n callbacks=[early_stopping, terminateNaN, saveBest]\n spatial_training_batches = TemperatureGenerator(training_data,\n FLAGS.training_batch_size,\n shuffle=True, augmentation=True)\n spatial_validation_batches = TemperatureGenerator(validation_data,\n FLAGS.validation_batch_size,\n shuffle=True)\n spatial_testing_batches = DataGenerator(testing_data,\n FLAGS.testing_batch_size,\n shuffle=False)\n spatial_model = compile_model(stream2model(*spatial_stream()), model_path, optimizer, prefix=\"spatial_\")\n spatial_history = train_model(spatial_model, FLAGS.epochs, spatial_training_batches, spatial_validation_batches, callbacks, spatial_model_fn_json, prefix=\"spatial_\", suffix=actor)\n if FLAGS.spatial_only:\n loaded_model = spatial_model = compile_model(stream2model(*spatial_stream()), model_path, optimizer, prefix=\"spatial_\")\n loaded_model.load_weights(spatial_model_fn_hdf5, by_name=True)\n predictions = loaded_model.predict_generator(spatial_testing_batches)\n y_pred = np.argmax(predictions, axis=-1)\n y_test = np.argmax(spatial_testing_batches[0][1], axis=-1)\n cnfs_mtx = confusion_matrix(y_test, y_pred)\n print(accuracy_score(y_test, y_pred))\n C = cnfs_mtx / cnfs_mtx.astype(np.float).sum(axis=1)\n cnfs_mtx_dict[actor] = cnfs_mtx\n continue\n clear_session()\n\n if (FLAGS.pretrain or FLAGS.temporal_only): \n #TEMPORAL\n optimizer = optimizers.SGD(lr=FLAGS.learning_rate, clipnorm=0.5) # best\n early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)\n terminateNaN = TerminateOnNaN() #that shouldn't happen\n saveBest = ModelCheckpoint(temporal_model_fn_hdf5, save_best_only=True)\n callbacks=[early_stopping, terminateNaN, saveBest]\n temporal_training_batches = FlowGenerator(training_data,\n FLAGS.training_batch_size,\n shuffle=True, augmentation=True)\n temporal_validation_batches = FlowGenerator(validation_data,\n FLAGS.validation_batch_size,\n shuffle=True)\n temporal_testing_batches = FlowGenerator(testing_data,\n FLAGS.testing_batch_size,\n shuffle=False)\n temporal_model = compile_model(stream2model(*temporal_stream()), model_path, optimizer, prefix=\"temporal_\")\n temporal_history = train_model(temporal_model, FLAGS.epochs, temporal_training_batches, temporal_validation_batches, callbacks, temporal_model_fn_json, prefix=\"temporal_\", suffix=actor)\n if FLAGS.temporal_only:\n loaded_model = temporal_model = compile_model(stream2model(*temporal_stream()), model_path, optimizer, prefix=\"temporal_\")\n loaded_model.load_weights(temporal_model_fn_hdf5, by_name=True)\n predictions = loaded_model.predict_generator(temporal_testing_batches)\n y_pred = np.argmax(predictions, axis=-1)\n y_test = np.argmax(temporal_testing_batches[0][1], axis=-1)\n cnfs_mtx = confusion_matrix(y_test, y_pred)\n print(accuracy_score(y_test, y_pred))\n C = cnfs_mtx / cnfs_mtx.astype(np.float).sum(axis=1)\n cnfs_mtx_dict[actor] = cnfs_mtx\n continue\n clear_session()\n\n #COMBINED\n optimizer = optimizers.SGD(lr=FLAGS.learning_rate, clipnorm=0.5) # best\n early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)\n terminateNaN = TerminateOnNaN() #that shouldn't happen\n saveBest = ModelCheckpoint(model_fn_hdf5, save_best_only=True)\n callbacks=[early_stopping, terminateNaN, saveBest]\n model = compile_model(merge_streams(*spatial_stream(), *temporal_stream()), model_path, optimizer)\n if FLAGS.pretrain: \n print(\"[INFO] Loading in pretrained streams weights...\")\n model.load_weights(spatial_model_fn_hdf5, by_name=True)\n model.load_weights(temporal_model_fn_hdf5, by_name=True)\n\n history = train_model(model, FLAGS.epochs, training_batches, validation_batches, callbacks, model_fn_json, suffix=actor)\n\n loaded_model = load_checkpoint(model_fn_json, model_fn_hdf5)\n predictions = loaded_model.predict_generator(testing_batches)\n y_pred = np.argmax(predictions, axis=-1)\n y_test = np.argmax(testing_batches[0][1], axis=-1)\n cnfs_mtx = confusion_matrix(y_test, y_pred)\n print(accuracy_score(y_test, y_pred))\n C = cnfs_mtx / cnfs_mtx.astype(np.float).sum(axis=1)\n cnfs_mtx_dict[actor] = cnfs_mtx\n\n print(\"[INFO] Model successfully trained, tested on {} \".format(actor))\n\n cross_validation_cnfs_mtx = sum(cnfs_mtx_dict[item] for item in cnfs_mtx_dict)\n cross_validation_accuracy = cross_validation_cnfs_mtx.diagonal().sum()/cross_validation_cnfs_mtx.sum()\n\n metrics = dict()\n metrics[\"confusion_matrix\"] = cross_validation_cnfs_mtx\n metrics[\"accuracy\"] = cross_validation_accuracy\n np.save(os.path.join(model_path, \"metrics_dict.npy\"), metrics)\n metrics = np.load(os.path.join(model_path, \"metrics_dict.npy\"), allow_pickle=True)[()]\n print(metrics[\"confusion_matrix\"])\n print(metrics[\"accuracy\"])\n\n with open(os.path.join(model_path,'cnfs_mtx.txt'),'wb') as f:\n for line in np.matrix(cross_validation_cnfs_mtx):\n np.savetxt(f, line, fmt='%.4f')\n\n with open(os.path.join(model_path,'accuracy.txt'),'wb') as f:\n for line in np.matrix(cross_validation_accuracy):\n np.savetxt(f, line, fmt='%.4f')\n\n\n\n"
]
| [
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.optimizers.SGD",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.backend.set_image_data_format",
"tensorflow.keras.callbacks.TerminateOnNaN",
"numpy.load",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.BatchNormalization",
"sklearn.metrics.accuracy_score",
"tensorflow.keras.layers.Conv2D",
"numpy.argmax",
"numpy.random.randint",
"tensorflow.keras.layers.Concatenate",
"numpy.vstack",
"numpy.matrix",
"numpy.array",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.models.model_from_json",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"tensorflow.keras.backend.epsilon",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.backend.clear_session",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.backend.clip",
"tensorflow.keras.callbacks.EarlyStopping"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.