repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
jgwak/McRecon
[ "b90acc3017ba2dbb99ef13bb2033be30059d7108" ]
[ "lib/data_process.py" ]
[ "'''\nParallel data loading functions\n'''\nimport sys\nimport time\nimport theano\nimport numpy as np\nimport traceback\nfrom PIL import Image\nfrom six.moves import queue\nfrom multiprocessing import Process, Event\n\nfrom lib.config import cfg\nfrom lib.data_augmentation import preprocess_img\nfrom lib.data_io import get_voxel_file, get_rendering_file, get_camera_file\nfrom lib.binvox_rw import read_as_3d_array\nfrom lib.get_projection import readBinvoxParams\n\n\ndef print_error(func):\n '''Flush out error messages. Mainly used for debugging separate processes'''\n\n def func_wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except:\n traceback.print_exception(*sys.exc_info())\n sys.stdout.flush()\n\n return func_wrapper\n\n\nclass DataProcess(Process):\n\n def __init__(self, data_queue, data_paths, repeat=True):\n '''\n data_queue : Multiprocessing queue\n data_paths : list of data and label pair used to load data\n repeat : if set True, return data until exit is set\n '''\n super(DataProcess, self).__init__()\n # Queue to transfer the loaded mini batches\n self.data_queue = data_queue\n self.data_paths = data_paths\n self.num_data = len(data_paths)\n self.repeat = repeat\n\n # Tuple of data shape\n self.batch_size = cfg.CONST.BATCH_SIZE\n self.exit = Event()\n self.shuffle_db_inds()\n\n def shuffle_db_inds(self):\n # Randomly permute the training roidb\n if self.repeat:\n self.perm = np.random.permutation(np.arange(self.num_data))\n else:\n self.perm = np.arange(self.num_data)\n self.cur = 0\n\n def get_next_minibatch(self):\n if (self.cur + self.batch_size) >= self.num_data and self.repeat:\n self.shuffle_db_inds()\n\n db_inds = self.perm[self.cur:min(self.cur + self.batch_size, self.num_data)]\n self.cur += self.batch_size\n return db_inds\n\n def shutdown(self):\n self.exit.set()\n\n @print_error\n def run(self):\n iteration = 0\n # Run the loop until exit flag is set\n while not self.exit.is_set() and self.cur <= self.num_data:\n # Ensure that the network sees (almost) all data per epoch\n db_inds = self.get_next_minibatch()\n\n data_list = []\n label_list = []\n for batch_id, db_ind in enumerate(db_inds):\n datum = self.load_datum(self.data_paths[db_ind])\n label = self.load_label(self.data_paths[db_ind])\n\n data_list.append(datum)\n label_list.append(label)\n\n batch_data = np.array(data_list).astype(np.float32)\n batch_label = np.array(label_list).astype(np.float32)\n\n # The following will wait until the queue frees\n self.data_queue.put((batch_data, batch_label), block=True)\n iteration += 1\n\n def load_datum(self, path):\n pass\n\n def load_label(self, path):\n pass\n\n\nclass ReconstructionDataProcess(DataProcess):\n\n def __init__(self, data_queue, category_model_pair, background_imgs=[], repeat=True,\n train=True):\n self.repeat = repeat\n self.train = train\n self.background_imgs = background_imgs\n super(ReconstructionDataProcess, self).__init__(\n data_queue, category_model_pair, repeat=repeat)\n\n @print_error\n def run(self):\n # set up constants\n img_h = cfg.CONST.IMG_W\n img_w = cfg.CONST.IMG_H\n n_vox = cfg.CONST.N_VOX\n\n # This is the maximum number of views\n n_views = cfg.CONST.N_VIEWS\n\n while not self.exit.is_set() and self.cur <= self.num_data:\n # To insure that the network sees (almost) all images per epoch\n db_inds = self.get_next_minibatch()\n\n # We will sample # views\n if cfg.TRAIN.RANDOM_NUM_VIEWS:\n curr_n_views = np.random.randint(n_views) + 1\n else:\n curr_n_views = n_views\n\n # This will be fed into the queue. create new batch everytime\n batch_img = np.zeros(\n (curr_n_views, self.batch_size, 4, img_h, img_w), dtype=theano.config.floatX)\n batch_voxel = np.zeros(\n (self.batch_size, n_vox, 2, n_vox, n_vox), dtype=theano.config.floatX)\n batch_camera = np.zeros((curr_n_views, self.batch_size, 11)).astype('float32')\n\n # load each data instance\n try:\n for batch_id, db_ind in enumerate(db_inds):\n category, model_id = self.data_paths[db_ind]\n image_ids = np.random.choice(cfg.TRAIN.NUM_RENDERING, curr_n_views)\n\n voxel, voxel_fn = self.load_label(category, model_id)\n with open(get_camera_file(category, model_id)) as f:\n camera_params = np.array([l.rstrip().split() for l in f.readlines()])[:, (0, 1, 3)].astype('float32')\n\n # load multi view images\n for view_id, image_id in enumerate(image_ids):\n im, cr, cc, flipped = self.load_img(\n category, model_id, image_id)\n # channel, height, width\n batch_img[view_id, batch_id, :, :, :] = \\\n im.transpose((2, 0, 1)).astype(theano.config.floatX)\n img_cam = camera_params[image_id].tolist()\n img_cam.extend(readBinvoxParams(voxel_fn))\n img_cam.extend((cr, cc, flipped))\n batch_camera[view_id, batch_id] = img_cam\n\n voxel_data = voxel.data\n\n batch_voxel[batch_id, :, 0, :, :] = voxel_data < 1\n batch_voxel[batch_id, :, 1, :, :] = voxel_data\n except FileNotFoundError:\n continue\n\n # The following will wait until the queue frees\n self.data_queue.put((batch_img, batch_camera, batch_voxel), block=True)\n\n print('Exiting')\n\n def load_img(self, category, model_id, image_id):\n image_fn = get_rendering_file(category, model_id, image_id)\n im = Image.open(image_fn)\n\n t_im, cr, cc, flipped = preprocess_img(im, self.train)\n return t_im, cr, cc, flipped\n\n def load_label(self, category, model_id):\n voxel_fn = get_voxel_file(category, model_id)\n with open(voxel_fn, 'rb') as f:\n voxel = read_as_3d_array(f)\n\n return voxel, voxel_fn\n\n\ndef kill_processes(queue, processes):\n print('Signal processes')\n for p in processes:\n p.shutdown()\n\n print('Empty queue')\n while not queue.empty():\n time.sleep(0.5)\n queue.get(False)\n\n print('kill processes')\n for p in processes:\n p.terminate()\n\n\ndef make_data_processes(queue, data_paths, num_workers, repeat=True, train=True):\n '''\n Make a set of data processes for parallel data loading.\n '''\n processes = []\n for i in range(num_workers):\n process = ReconstructionDataProcess(queue, data_paths, repeat=repeat, train=train)\n process.start()\n processes.append(process)\n return processes\n\n\ndef get_while_running(data_process, data_queue, sleep_time=0):\n while True:\n time.sleep(sleep_time)\n try:\n batch_data, batch_camera, batch_label = data_queue.get_nowait()\n except queue.Empty:\n if not data_process.is_alive():\n break\n else:\n continue\n yield batch_data, batch_camera, batch_label\n\n\ndef test_process():\n from multiprocessing import Queue\n from lib.config import cfg\n from lib.data_io import category_model_id_pair\n\n cfg.TRAIN.PAD_X = 10\n cfg.TRAIN.PAD_Y = 10\n\n data_queue = Queue(2)\n category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])\n\n data_process = ReconstructionDataProcess(data_queue, category_model_pair)\n data_process.start()\n batch_img, batch_cam, batch_voxel = data_queue.get()\n\n kill_processes(data_queue, [data_process])\n\n\nif __name__ == '__main__':\n test_process()\n" ]
[ [ "numpy.array", "numpy.random.choice", "numpy.zeros", "numpy.random.randint", "numpy.arange" ] ]
lzx551402/SuperPoint
[ "a7bda1f6b5c5262b89aeeb48160c36dbd7aa4a2f" ]
[ "superpoint/experiment.py" ]
[ "import logging\nimport yaml\nimport os\nimport argparse\nimport numpy as np\nfrom contextlib import contextmanager\nfrom json import dumps as pprint\n\nfrom datasets import get_dataset\nfrom models import get_model\nfrom utils.stdout_capturing import capture_outputs\nfrom settings import EXPER_PATH\n\nlogging.basicConfig(format='[%(asctime)s %(levelname)s] %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)\nimport tensorflow as tf # noqa: E402\n\n\ndef train(config, n_iter, output_dir, checkpoint_name='model.ckpt'):\n checkpoint_path = os.path.join(output_dir, checkpoint_name)\n with _init_graph(config) as net:\n try:\n net.train(n_iter, output_dir=output_dir,\n validation_interval=config.get('validation_interval', 100),\n save_interval=config.get('save_interval', None),\n checkpoint_path=checkpoint_path,\n keep_checkpoints=config.get('keep_checkpoints', 1))\n except KeyboardInterrupt:\n logging.info('Got Keyboard Interrupt, saving model and closing.')\n net.save(os.path.join(output_dir, checkpoint_name))\n\n\ndef evaluate(config, output_dir, n_iter=None):\n with _init_graph(config) as net:\n net.load(output_dir)\n results = net.evaluate(config.get('eval_set', 'test'), max_iterations=n_iter)\n return results\n\n\ndef predict(config, output_dir, n_iter):\n pred = []\n data = []\n with _init_graph(config, with_dataset=True) as (net, dataset):\n if net.trainable:\n net.load(output_dir)\n test_set = dataset.get_test_set()\n for _ in range(n_iter):\n data.append(next(test_set))\n pred.append(net.predict(data[-1], keys='*'))\n return pred, data\n\n\ndef set_seed(seed):\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n\ndef get_num_gpus():\n return len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))\n\n\n@contextmanager\ndef _init_graph(config, with_dataset=False):\n set_seed(config.get('seed', int.from_bytes(os.urandom(4), byteorder='big')))\n n_gpus = get_num_gpus()\n logging.info('Number of GPUs detected: {}'.format(n_gpus))\n\n dataset = get_dataset(config['data']['name'])(**config['data'])\n model = get_model(config['model']['name'])(\n data={} if with_dataset else dataset.get_tf_datasets(),\n n_gpus=n_gpus, **config['model'])\n model.__enter__()\n if with_dataset:\n yield model, dataset\n else:\n yield model\n model.__exit__()\n tf.reset_default_graph()\n\n\ndef _cli_train(config, output_dir, args):\n assert 'train_iter' in config\n\n with open(os.path.join(output_dir, 'config.yml'), 'w') as f:\n yaml.dump(config, f, default_flow_style=False)\n train(config, config['train_iter'], output_dir)\n\n if args.eval:\n _cli_eval(config, output_dir, args)\n\n\ndef _cli_eval(config, output_dir, args):\n # Load model config from previous experiment\n with open(os.path.join(output_dir, 'config.yml'), 'r') as f:\n model_config = yaml.load(f)['model']\n model_config.update(config.get('model', {}))\n config['model'] = model_config\n\n results = evaluate(config, output_dir, n_iter=config.get('eval_iter'))\n\n # Print and export results\n logging.info('Evaluation results: \\n{}'.format(\n pprint(results, indent=2, default=str)))\n with open(os.path.join(output_dir, 'eval.txt'), 'a') as f:\n f.write('Evaluation for {} dataset:\\n'.format(config['data']['name']))\n for r, v in results.items():\n f.write('\\t{}:\\n\\t\\t{}\\n'.format(r, v))\n f.write('\\n')\n\n\n# TODO\ndef _cli_pred(config, args):\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n\n # Training command\n p_train = subparsers.add_parser('train')\n p_train.add_argument('config', type=str)\n p_train.add_argument('exper_name', type=str)\n p_train.add_argument('--eval', action='store_true')\n p_train.set_defaults(func=_cli_train)\n\n # Evaluation command\n p_train = subparsers.add_parser('evaluate')\n p_train.add_argument('config', type=str)\n p_train.add_argument('exper_name', type=str)\n p_train.set_defaults(func=_cli_eval)\n\n # Inference command\n p_train = subparsers.add_parser('predict')\n p_train.add_argument('config', type=str)\n p_train.add_argument('exper_name', type=str)\n p_train.set_defaults(func=_cli_pred)\n\n args = parser.parse_args()\n with open(args.config, 'r') as f:\n config = yaml.load(f)\n output_dir = os.path.join(EXPER_PATH, args.exper_name)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n with capture_outputs(os.path.join(output_dir, 'log')):\n logging.info('Running command {}'.format(args.command.upper()))\n args.func(config, output_dir, args)\n" ]
[ [ "tensorflow.set_random_seed", "tensorflow.reset_default_graph", "numpy.random.seed" ] ]
shivangeerathi/photutils
[ "446b9701b14ab80a307a7da04d1c1609cc24e569" ]
[ "photutils/isophote/tests/test_harmonics.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nTests for the harmonics module.\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom .make_test_data import make_test_image\nfrom ..harmonics import (first_and_second_harmonic_function,\n fit_first_and_second_harmonics, fit_upper_harmonic)\nfrom ..sample import EllipseSample\nfrom ..fitter import EllipseFitter\n\n\ntry:\n from scipy.optimize import leastsq # noqa\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\[email protected]('not HAS_SCIPY')\ndef test_harmonics_1():\n # this is an almost as-is example taken from stackoverflow\n N = 100 # number of data points\n t = np.linspace(0, 4*np.pi, N)\n\n # create artificial data with noise:\n # mean = 0.5, amplitude = 3., phase = 0.1, noise-std = 0.01\n rng = np.random.default_rng(0)\n data = 3.0 * np.sin(t + 0.1) + 0.5 + 0.01 * rng.standard_normal(N)\n\n # first guesses for harmonic parameters\n guess_mean = np.mean(data)\n guess_std = 3 * np.std(data) / 2**0.5\n guess_phase = 0\n\n # Minimize the difference between the actual data and our \"guessed\"\n # parameters\n # optimize_func = lambda x: x[0] * np.sin(t + x[1]) + x[2] - data\n def optimize_func(x):\n return x[0] * np.sin(t + x[1]) + x[2] - data\n\n est_std, est_phase, est_mean = leastsq(\n optimize_func, [guess_std, guess_phase, guess_mean])[0]\n\n # recreate the fitted curve using the optimized parameters\n data_fit = est_std * np.sin(t + est_phase) + est_mean\n residual = data - data_fit\n\n assert_allclose(np.mean(residual), 0., atol=0.001)\n assert_allclose(np.std(residual), 0.01, atol=0.01)\n\n\[email protected]('not HAS_SCIPY')\ndef test_harmonics_2():\n # this uses the actual functional form used for fitting ellipses\n N = 100\n E = np.linspace(0, 4*np.pi, N)\n\n y0_0 = 100.\n a1_0 = 10.\n b1_0 = 5.\n a2_0 = 8.\n b2_0 = 2.\n rng = np.random.default_rng(0)\n data = (y0_0 + a1_0*np.sin(E) + b1_0*np.cos(E) + a2_0*np.sin(2*E) +\n b2_0*np.cos(2*E) + 0.01*rng.standard_normal(N))\n\n harmonics = fit_first_and_second_harmonics(E, data)\n y0, a1, b1, a2, b2 = harmonics[0]\n data_fit = (y0 + a1*np.sin(E) + b1*np.cos(E) + a2*np.sin(2*E) +\n b2*np.cos(2*E) + 0.01*rng.standard_normal(N))\n residual = data - data_fit\n\n assert_allclose(np.mean(residual), 0., atol=0.01)\n assert_allclose(np.std(residual), 0.015, atol=0.01)\n\n\[email protected]('not HAS_SCIPY')\ndef test_harmonics_3():\n \"\"\"Tests an upper harmonic fit.\"\"\"\n\n N = 100\n E = np.linspace(0, 4*np.pi, N)\n y0_0 = 100.\n a1_0 = 10.\n b1_0 = 5.\n order = 3\n rng = np.random.default_rng(0)\n data = (y0_0 + a1_0*np.sin(order*E) + b1_0*np.cos(order*E) +\n 0.01*rng.standard_normal(N))\n\n harmonic = fit_upper_harmonic(E, data, order)\n y0, a1, b1 = harmonic[0]\n rng = np.random.default_rng(0)\n data_fit = (y0 + a1*np.sin(order*E) + b1*np.cos(order*E) +\n 0.01*rng.standard_normal(N))\n residual = data - data_fit\n\n assert_allclose(np.mean(residual), 0., atol=0.01)\n assert_allclose(np.std(residual), 0.015, atol=0.014)\n\n\[email protected]('not HAS_SCIPY')\nclass TestFitEllipseSamples:\n def setup_class(self):\n # major axis parallel to X image axis\n self.data1 = make_test_image(seed=0)\n\n # major axis tilted 45 deg wrt X image axis\n self.data2 = make_test_image(pa=np.pi/4, seed=0)\n\n def test_fit_ellipsesample_1(self):\n sample = EllipseSample(self.data1, 40.)\n s = sample.extract()\n\n harmonics = fit_first_and_second_harmonics(s[0], s[2])\n y0, a1, b1, a2, b2 = harmonics[0]\n\n assert_allclose(np.mean(y0), 200.019, atol=0.001)\n assert_allclose(np.mean(a1), -0.000138, atol=0.001)\n assert_allclose(np.mean(b1), 0.000254, atol=0.001)\n assert_allclose(np.mean(a2), -5.658e-05, atol=0.001)\n assert_allclose(np.mean(b2), -0.00911, atol=0.001)\n\n # check that harmonics subtract nicely\n model = first_and_second_harmonic_function(\n s[0], np.array([y0, a1, b1, a2, b2]))\n residual = s[2] - model\n\n assert_allclose(np.mean(residual), 0., atol=0.001)\n assert_allclose(np.std(residual), 0.015, atol=0.01)\n\n def test_fit_ellipsesample_2(self):\n # initial guess is rounder than actual image\n sample = EllipseSample(self.data1, 40., eps=0.1)\n s = sample.extract()\n\n harmonics = fit_first_and_second_harmonics(s[0], s[2])\n y0, a1, b1, a2, b2 = harmonics[0]\n\n assert_allclose(np.mean(y0), 188.686, atol=0.001)\n assert_allclose(np.mean(a1), 0.000283, atol=0.001)\n assert_allclose(np.mean(b1), 0.00692, atol=0.001)\n assert_allclose(np.mean(a2), -0.000215, atol=0.001)\n assert_allclose(np.mean(b2), 10.153, atol=0.001)\n\n def test_fit_ellipsesample_3(self):\n # initial guess for center is offset\n sample = EllipseSample(self.data1, x0=220., y0=210., sma=40.)\n s = sample.extract()\n\n harmonics = fit_first_and_second_harmonics(s[0], s[2])\n y0, a1, b1, a2, b2 = harmonics[0]\n\n assert_allclose(np.mean(y0), 152.660, atol=0.001)\n assert_allclose(np.mean(a1), 55.338, atol=0.001)\n assert_allclose(np.mean(b1), 33.091, atol=0.001)\n assert_allclose(np.mean(a2), 33.036, atol=0.001)\n assert_allclose(np.mean(b2), -14.306, atol=0.001)\n\n def test_fit_ellipsesample_4(self):\n sample = EllipseSample(self.data2, 40., eps=0.4)\n s = sample.extract()\n\n harmonics = fit_first_and_second_harmonics(s[0], s[2])\n y0, a1, b1, a2, b2 = harmonics[0]\n\n assert_allclose(np.mean(y0), 245.102, atol=0.001)\n assert_allclose(np.mean(a1), -0.003108, atol=0.001)\n assert_allclose(np.mean(b1), -0.0578, atol=0.001)\n assert_allclose(np.mean(a2), 28.781, atol=0.001)\n assert_allclose(np.mean(b2), -63.184, atol=0.001)\n\n def test_fit_upper_harmonics(self):\n data = make_test_image(noise=1.e-10, seed=0)\n sample = EllipseSample(data, 40)\n fitter = EllipseFitter(sample)\n iso = fitter.fit(maxit=400)\n\n assert_allclose(iso.a3, -6.87e-7, atol=1.e-9)\n assert_allclose(iso.b3, 1.68e-6, atol=1.e-8)\n assert_allclose(iso.a4, -4.36e-6, atol=1.e-8)\n assert_allclose(iso.b4, 4.73e-5, atol=1.e-7)\n\n assert_allclose(iso.a3_err, 5.28e-5, atol=1.e-7)\n assert_allclose(iso.b3_err, 5.24e-5, atol=1.e-7)\n assert_allclose(iso.a4_err, 5.28e-5, atol=1.e-7)\n assert_allclose(iso.b4_err, 5.24e-5, atol=1.e-7)\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.sin", "numpy.array", "numpy.random.default_rng", "numpy.mean", "numpy.std", "scipy.optimize.leastsq", "numpy.cos", "numpy.linspace" ] ]
HAOCHENYE/mmdetection-mini
[ "3858c8c2f071c064fe78ce24088a1c9815ae1a21" ]
[ "mmdet/models/detectors/cornernet.py" ]
[ "import torch\n\nfrom mmdet.det_core import bbox2result, bbox_mapping_back\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\[email protected]_module()\nclass CornerNet(SingleStageDetector):\n \"\"\"CornerNet.\n\n This detector is the implementation of the paper `CornerNet: Detecting\n Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .\n \"\"\"\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,\n test_cfg, pretrained)\n\n def merge_aug_results(self, aug_results, img_metas):\n \"\"\"Merge augmented detection bboxes and score.\n\n Args:\n aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n image.\n img_metas (list[list[dict]]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n\n Returns:\n tuple: (bboxes, labels)\n \"\"\"\n recovered_bboxes, aug_labels = [], []\n for bboxes_labels, img_info in zip(aug_results, img_metas):\n img_shape = img_info[0]['img_shape'] # using shape before padding\n scale_factor = img_info[0]['scale_factor']\n flip = img_info[0]['flip']\n bboxes, labels = bboxes_labels\n bboxes, scores = bboxes[:, :4], bboxes[:, -1:]\n bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)\n recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))\n aug_labels.append(labels)\n\n bboxes = torch.cat(recovered_bboxes, dim=0)\n labels = torch.cat(aug_labels)\n\n if bboxes.shape[0] > 0:\n out_bboxes, out_labels = self.bbox_head._bboxes_nms(\n bboxes, labels, self.bbox_head.test_cfg)\n else:\n out_bboxes, out_labels = bboxes, labels\n\n return out_bboxes, out_labels\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Augment testing of CornerNet.\n\n Args:\n imgs (list[Tensor]): Augmented images.\n img_metas (list[list[dict]]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n\n Note:\n ``imgs`` must including flipped image pairs.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n \"\"\"\n img_inds = list(range(len(imgs)))\n\n assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (\n 'aug test must have flipped image pair')\n aug_results = []\n for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):\n img_pair = torch.cat([imgs[ind], imgs[flip_ind]])\n x = self.extract_feat(img_pair)\n outs = self.bbox_head(x)\n bbox_list = self.bbox_head.get_bboxes(\n *outs, [img_metas[ind], img_metas[flip_ind]], False, False)\n aug_results.append(bbox_list[0])\n aug_results.append(bbox_list[1])\n\n bboxes, labels = self.merge_aug_results(aug_results, img_metas)\n bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)\n\n return [bbox_results]\n" ]
[ [ "torch.cat" ] ]
chinmay3/NiaPy
[ "b4e5c0f98063e2a9eebd8d750f0922cfca88bc55" ]
[ "niapy/algorithms/other/aso.py" ]
[ "# encoding=utf8\nimport logging\n\nimport numpy as np\n\nfrom niapy.algorithms.algorithm import Algorithm\nfrom niapy.util import full_array, euclidean\n\nlogging.basicConfig()\nlogger = logging.getLogger('niapy.algorithms.other')\nlogger.setLevel('INFO')\n\n__all__ = ['AnarchicSocietyOptimization', 'elitism', 'sequential', 'crossover']\n\n\ndef elitism(x, xpb, xb, xr, mp_c, mp_s, mp_p, mutation_rate, crossover_probability, task, rng):\n r\"\"\"Select the best of all three strategies.\n\n Args:\n x (numpy.ndarray): individual position.\n xpb (numpy.ndarray): individuals best position.\n xb (numpy.ndarray): current best position.\n xr (numpy.ndarray): random individual.\n mp_c (float): Fickleness index value.\n mp_s (float): External irregularity index value.\n mp_p (float): Internal irregularity index value.\n mutation_rate (float): scale factor.\n crossover_probability (float): crossover factor.\n task (Task): optimization task.\n rng (numpy.random.Generator): random number generator.\n\n Returns:\n Tuple[numpy.ndarray, float]:\n 1. New position of individual\n 2. New positions fitness/function value\n\n \"\"\"\n xn = [task.repair(mp_current(x, mutation_rate, crossover_probability, mp_c, rng), rng=rng),\n task.repair(mp_society(x, xr, xb, crossover_probability, mp_s, rng), rng=rng),\n task.repair(mp_past(x, xpb, crossover_probability, mp_p, rng), rng=rng)]\n xn_f = np.apply_along_axis(task.eval, 1, xn)\n ib = np.argmin(xn_f)\n return xn[ib], xn_f[ib]\n\n\ndef sequential(x, xpb, xb, xr, mp_c, mp_s, mp_p, mutation_rate, crossover_probability, task, rng):\n r\"\"\"Sequentially combines all three strategies.\n\n Args:\n x (numpy.ndarray): individual position.\n xpb (numpy.ndarray): individuals best position.\n xb (numpy.ndarray): current best position.\n xr (numpy.ndarray): random individual.\n mp_c (float): Fickleness index value.\n mp_s (float): External irregularity index value.\n mp_p (float): Internal irregularity index value.\n mutation_rate (float): scale factor.\n crossover_probability (float): crossover factor.\n task (Task): optimization task.\n rng (numpy.random.Generator): random number generator.\n\n Returns:\n tuple[numpy.ndarray, float]:\n 1. new position\n 2. new positions function/fitness value\n\n \"\"\"\n xn = task.repair(mp_society(\n mp_past(mp_current(x, mutation_rate, crossover_probability, mp_c, rng), xpb, crossover_probability, mp_p, rng),\n xr,\n xb, crossover_probability, mp_s, rng), rng=rng)\n return xn, task.eval(xn)\n\n\ndef crossover(x, xpb, xb, xr, mp_c, mp_s, mp_p, mutation_rate, crossover_probability, task, rng):\n r\"\"\"Create a crossover over all three strategies.\n\n Args:\n x (numpy.ndarray): individual position.\n xpb (numpy.ndarray): individuals best position.\n xb (numpy.ndarray): current best position.\n xr (numpy.ndarray): random individual.\n mp_c (float): Fickleness index value.\n mp_s (float): External irregularity index value.\n mp_p (float): Internal irregularity index value.\n mutation_rate (float): scale factor.\n crossover_probability (float): crossover factor.\n task (Task): optimization task.\n rng (numpy.random.Generator): random number generator.\n\n Returns:\n Tuple[numpy.ndarray, float]:\n 1. new position\n 2. new positions function/fitness value.\n\n \"\"\"\n xns = [task.repair(mp_current(x, mutation_rate, crossover_probability, mp_c, rng), rng=rng),\n task.repair(mp_society(x, xr, xb, crossover_probability, mp_s, rng), rng=rng),\n task.repair(mp_past(x, xpb, crossover_probability, mp_p, rng), rng=rng)]\n index = rng.integers(len(xns))\n x = np.asarray([xns[index][i] if rng.random() < crossover_probability else x[i] for i in range(len(x))])\n return x, task.eval(x)\n\n\ndef mp_current(x, mutation_rate, crossover_rate, mp, rng):\n r\"\"\"Get bew position based on fickleness.\n\n Args:\n x (numpy.ndarray): Current individuals position.\n mutation_rate (float): Scale factor.\n crossover_rate (float): Crossover probability.\n mp (float): Fickleness index value\n rng (numpy.random.Generator): Random number generator\n\n Returns:\n numpy.ndarray: New position\n\n \"\"\"\n if mp < 0.5:\n b = np.sort(rng.choice(len(x), 2, replace=False))\n x[b[0]:b[1]] = x[b[0]:b[1]] + mutation_rate * rng.normal(0, 1, b[1] - b[0])\n return x\n return np.asarray(\n [x[i] + mutation_rate * rng.normal(0, 1) if rng.random() < crossover_rate else x[i] for i in range(len(x))])\n\n\ndef mp_society(x, xr, xb, crossover_rate, mp, rng):\n r\"\"\"Get new position based on external irregularity.\n\n Args:\n x (numpy.ndarray): Current individuals position.\n xr (numpy.ndarray): Random individuals position.\n xb (numpy.ndarray): Global best individuals position.\n crossover_rate (float): Crossover probability.\n mp (float): External irregularity index.\n rng (numpy.random.Generator): Random number generator.\n\n Returns:\n numpy.ndarray: New position.\n\n \"\"\"\n if mp < 0.25:\n b = np.sort(rng.choice(len(x), 2, replace=False))\n x[b[0]:b[1]] = xb[b[0]:b[1]]\n return x\n elif mp < 0.5:\n return np.asarray([xb[i] if rng.random() < crossover_rate else x[i] for i in range(len(x))])\n elif mp < 0.75:\n b = np.sort(rng.choice(len(x), 2, replace=False))\n x[b[0]:b[1]] = xr[b[0]:b[1]]\n return x\n return np.asarray([xr[i] if rng.random() < crossover_rate else x[i] for i in range(len(x))])\n\n\ndef mp_past(x, xpb, crossover_rate, mp, rng):\n r\"\"\"Get new position based on internal irregularity.\n\n Args:\n x (numpy.ndarray): Current individuals position.\n xpb (numpy.ndarray): Current individuals personal best position.\n crossover_rate (float): Crossover probability.\n mp (float): Internal irregularity index value.\n rng (numpy.random.Generator): Random number generator.\n\n Returns:\n numpy.ndarray: Current individuals new position.\n\n \"\"\"\n if mp < 0.5:\n b = np.sort(rng.choice(len(x), 2, replace=False))\n x[b[0]:b[1]] = xpb[b[0]:b[1]]\n return x\n return np.asarray([xpb[i] if rng.random() < crossover_rate else x[i] for i in range(len(x))])\n\n\nclass AnarchicSocietyOptimization(Algorithm):\n r\"\"\"Implementation of Anarchic Society Optimization algorithm.\n\n Algorithm:\n Anarchic Society Optimization algorithm\n\n Date:\n 2018\n\n Authors:\n Klemen Berkovič\n\n License:\n MIT\n\n Reference paper:\n Ahmadi-Javid, Amir. \"Anarchic Society Optimization: A human-inspired method.\" Evolutionary Computation (CEC), 2011 IEEE Congress on. IEEE, 2011.\n\n Attributes:\n Name (list of str): List of stings representing name of algorithm.\n alpha (List[float]): Factor for fickleness index function :math:`\\in [0, 1]`.\n gamma (List[float]): Factor for external irregularity index function :math:`\\in [0, \\infty)`.\n theta (List[float]): Factor for internal irregularity index function :math:`\\in [0, \\infty)`.\n d (Callable[[float, float], float]): function that takes two arguments that are function values and calculates the distance between them.\n dn (Callable[[numpy.ndarray, numpy.ndarray], float]): function that takes two arguments that are points in function landscape and calculates the distance between them.\n nl (float): Normalized range for neighborhood search :math:`\\in (0, 1]`.\n F (float): Mutation parameter.\n CR (float): Crossover parameter :math:`\\in [0, 1]`.\n Combination (Callable[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, float, float, float, float, float, float, Task, numpy.random.Generator]): Function for combining individuals to get new position/individual.\n\n See Also:\n * :class:`niapy.algorithms.Algorithm`\n\n \"\"\"\n\n Name = ['AnarchicSocietyOptimization', 'ASO']\n\n @staticmethod\n def info():\n r\"\"\"Get basic information about the algorithm.\n\n Returns:\n str: Basic information.\n\n See Also:\n :func:`niapy.algorithms.algorithm.Algorithm.info`\n\n \"\"\"\n return r\"\"\"Ahmadi-Javid, Amir. \"Anarchic Society Optimization: A human-inspired method.\" Evolutionary Computation (CEC), 2011 IEEE Congress on. IEEE, 2011.\"\"\"\n\n def __init__(self, population_size=43, alpha=(1, 0.83), gamma=(1.17, 0.56), theta=(0.932, 0.832), d=euclidean,\n dn=euclidean, nl=1, mutation_rate=1.2, crossover_rate=0.25, combination=elitism, *args, **kwargs):\n r\"\"\"Initialize AnarchicSocietyOptimization.\n\n Args:\n population_size (Optional[int]): Population size.\n alpha (Optional[Tuple[float, ...]]): Factor for fickleness index function :math:`\\in [0, 1]`.\n gamma (Optional[Tuple[float, ...]]): Factor for external irregularity index function :math:`\\in [0, \\infty)`.\n theta (Optional[List[float]]): Factor for internal irregularity index function :math:`\\in [0, \\infty)`.\n d (Optional[Callable[[float, float], float]]): function that takes two arguments that are function values and calculates the distance between them.\n dn (Optional[Callable[[numpy.ndarray, numpy.ndarray], float]]): function that takes two arguments that are points in function landscape and calculates the distance between them.\n nl (Optional[float]): Normalized range for neighborhood search :math:`\\in (0, 1]`.\n mutation_rate (Optional[float]): Mutation parameter.\n crossover_rate (Optional[float]): Crossover parameter :math:`\\in [0, 1]`.\n combination (Optional[Callable[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, float, float, float, float, float, float, Task, numpy.random.Generator]]): Function for combining individuals to get new position/individual.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.set_parameters`\n\n \"\"\"\n super().__init__(population_size, *args, **kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.theta = theta\n self.d = d\n self.dn = dn\n self.nl = nl\n self.mutation_rate = mutation_rate\n self.crossover_rate = crossover_rate\n self.Combination = combination\n\n def set_parameters(self, population_size=43, alpha=(1, 0.83), gamma=(1.17, 0.56), theta=(0.932, 0.832), d=euclidean,\n dn=euclidean, nl=1, mutation_rate=1.2, crossover_rate=0.25, combination=elitism, **kwargs):\n r\"\"\"Set the parameters for the algorithm.\n\n Args:\n population_size (Optional[int]): Population size.\n alpha (Optional[Tuple[float, ...]]): Factor for fickleness index function :math:`\\in [0, 1]`.\n gamma (Optional[Tuple[float, ...]]): Factor for external irregularity index function :math:`\\in [0, \\infty)`.\n theta (Optional[List[float]]): Factor for internal irregularity index function :math:`\\in [0, \\infty)`.\n d (Optional[Callable[[float, float], float]]): function that takes two arguments that are function values and calculates the distance between them.\n dn (Optional[Callable[[numpy.ndarray, numpy.ndarray], float]]): function that takes two arguments that are points in function landscape and calculates the distance between them.\n nl (Optional[float]): Normalized range for neighborhood search :math:`\\in (0, 1]`.\n mutation_rate (Optional[float]): Mutation parameter.\n crossover_rate (Optional[float]): Crossover parameter :math:`\\in [0, 1]`.\n combination (Optional[Callable[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, float, float, float, float, float, float, Task, numpy.random.Generator]]): Function for combining individuals to get new position/individual.\n\n See Also:\n * :func:`niapy.algorithms.Algorithm.set_parameters`\n * Combination methods:\n * :func:`niapy.algorithms.other.elitism`\n * :func:`niapy.algorithms.other.crossover`\n * :func:`niapy.algorithms.other.sequential`\n\n \"\"\"\n super().set_parameters(population_size=population_size, **kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.theta = theta\n self.d = d\n self.dn = dn\n self.nl = nl\n self.mutation_rate = mutation_rate\n self.crossover_rate = crossover_rate\n self.Combination = combination\n\n def init(self, _task):\n r\"\"\"Initialize dynamic parameters of algorithm.\n\n Args:\n _task (Task): Optimization task.\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]\n 1. Array of `self.alpha` propagated values\n 2. Array of `self.gamma` propagated values\n 3. Array of `self.theta` propagated values\n\n \"\"\"\n return full_array(self.alpha, self.population_size), full_array(self.gamma, self.population_size), full_array(\n self.theta, self.population_size)\n\n @staticmethod\n def fickleness_index(x_f, xpb_f, xb_f, alpha):\n r\"\"\"Get fickleness index.\n\n Args:\n x_f (float): Individuals fitness/function value.\n xpb_f (float): Individuals personal best fitness/function value.\n xb_f (float): Current best found individuals fitness/function value.\n alpha (float): Fickleness factor.\n\n Returns:\n float: Fickleness index.\n\n \"\"\"\n return 1 - alpha * xb_f / x_f - (1 - alpha) * xpb_f / x_f\n\n def external_irregularity(self, x_f, xnb_f, gamma):\n r\"\"\"Get external irregularity index.\n\n Args:\n x_f (float): Individuals fitness/function value.\n xnb_f (float): Individuals new fitness/function value.\n gamma (float): External irregularity factor.\n\n Returns:\n float: External irregularity index.\n\n \"\"\"\n return 1 - np.exp(-gamma * self.d(x_f, xnb_f))\n\n def irregularity_index(self, x_f, xpb_f, theta):\n r\"\"\"Get internal irregularity index.\n\n Args:\n x_f (float): Individuals fitness/function value.\n xpb_f (float): Individuals personal best fitness/function value.\n theta (float): Internal irregularity factor.\n\n Returns:\n float: Internal irregularity index\n\n \"\"\"\n return 1 - np.exp(-theta * self.d(x_f, xpb_f))\n\n def get_best_neighbors(self, i, population, population_fitness, rs):\n r\"\"\"Get neighbors of individual.\n\n Measurement of distance for neighborhood is defined with `self.nl`.\n Function for calculating distances is define with `self.dn`.\n\n Args:\n i (int): Index of individual for hum we are looking for neighbours.\n population (numpy.ndarray): Current population.\n population_fitness (numpy.ndarray[float]): Current population fitness/function values.\n rs (numpy.ndarray[float]): distance between individuals.\n\n Returns:\n numpy.ndarray[int]: Indexes that represent individuals closest to `i`-th individual.\n\n \"\"\"\n nn = np.asarray([self.dn(population[i], population[j]) / rs for j in range(len(population))])\n return np.argmin(population_fitness[np.where(nn <= self.nl)])\n\n @staticmethod\n def update_personal_best(population, population_fitness, personal_best, personal_best_fitness):\n r\"\"\"Update personal best solution of all individuals in population.\n\n Args:\n population (numpy.ndarray): Current population.\n population_fitness (numpy.ndarray[float]): Current population fitness/function values.\n personal_best (numpy.ndarray): Current population best positions.\n personal_best_fitness (numpy.ndarray[float]): Current populations best positions fitness/function values.\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray[float], numpy.ndarray, float]:\n 1. New personal best positions for current population.\n 2. New personal best positions function/fitness values for current population.\n 3. New best individual.\n 4. New best individual fitness/function value.\n\n \"\"\"\n ix_pb = np.where(population_fitness < personal_best_fitness)\n personal_best[ix_pb], personal_best_fitness[ix_pb] = population[ix_pb], population_fitness[ix_pb]\n return personal_best, personal_best_fitness\n\n def init_population(self, task):\n r\"\"\"Initialize first population and additional arguments.\n\n Args:\n task (Task): Optimization task\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray, dict]:\n 1. Initialized population\n 2. Initialized population fitness/function values\n 3. Dict[str, Any]:\n * x_best (numpy.ndarray): Initialized populations best positions.\n * x_best_fitness (numpy.ndarray): Initialized populations best positions function/fitness values.\n * alpha (numpy.ndarray):\n * gamma (numpy.ndarray):\n * theta (numpy.ndarray):\n * rs (float): distance of search space.\n\n See Also:\n * :func:`niapy.algorithms.algorithm.Algorithm.init_population`\n * :func:`niapy.algorithms.other.aso.AnarchicSocietyOptimization.init`\n\n \"\"\"\n population, population_fitness, d = Algorithm.init_population(self, task)\n alpha, gamma, theta = self.init(task)\n x_best, x_best_fitness = self.update_personal_best(population, task.optimization_type.value * population_fitness,\n np.zeros((self.population_size, task.dimension)),\n np.full(self.population_size, np.inf))\n d.update({'x_best': x_best, 'x_best_fitness': x_best_fitness, 'alpha': alpha, 'gamma': gamma, 'theta': theta,\n 'rs': self.d(task.upper, task.lower)})\n return population, population_fitness, d\n\n def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params):\n r\"\"\"Core function of AnarchicSocietyOptimization algorithm.\n\n Args:\n task (Task): Optimization task.\n population (numpy.ndarray): Current populations positions.\n population_fitness (numpy.ndarray): Current populations function/fitness values.\n best_x (numpy.ndarray): Current global best individuals position.\n best_fitness (float): Current global best individual function/fitness value.\n **params: Additional arguments.\n\n Returns:\n Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]:\n 1. Initialized population\n 2. Initialized population fitness/function values\n 3. New global best solution\n 4. New global best solutions fitness/objective value\n 5. Dict[str, Union[float, int, numpy.ndarray]:\n * x_best (numpy.ndarray): Initialized populations best positions.\n * x_best_fitness (numpy.ndarray): Initialized populations best positions function/fitness values.\n * alpha (numpy.ndarray):\n * gamma (numpy.ndarray):\n * theta (numpy.ndarray):\n * rs (float): distance of search space.\n\n \"\"\"\n x_best = params.pop('x_best')\n x_best_fitness = params.pop('x_best_fitness')\n alpha = params.pop('alpha')\n gamma = params.pop('gamma')\n theta = params.pop('theta')\n rs = params.pop('rs')\n\n x_in = [self.get_best_neighbors(i, population, population_fitness, rs) for i in range(len(population))]\n mp_c, mp_s, mp_p = np.asarray(\n [self.fickleness_index(population_fitness[i], x_best_fitness[i], best_fitness, alpha[i]) for i in\n range(len(population))]), np.asarray(\n [self.external_irregularity(population_fitness[i], population_fitness[x_in[i]], gamma[i]) for i in\n range(len(population))]), np.asarray(\n [self.irregularity_index(population_fitness[i], x_best_fitness[i], theta[i]) for i in range(len(population))])\n x_tmp = np.asarray([self.Combination(population[i], x_best[i], best_x,\n population[self.integers(len(population), skip=[i])], mp_c[i], mp_s[i],\n mp_p[i], self.mutation_rate, self.crossover_rate, task, self.rng) for i in range(len(population))],\n dtype=object)\n population, population_fitness = np.asarray([x_tmp[i][0] for i in range(len(population))]), np.asarray(\n [x_tmp[i][1] for i in range(len(population))])\n x_best, x_best_fitness = self.update_personal_best(population, population_fitness, x_best, x_best_fitness)\n best_x, best_fitness = self.get_best(population, population_fitness, best_x, best_fitness)\n return population, population_fitness, best_x, best_fitness, {'x_best': x_best,\n 'x_best_fitness': x_best_fitness,\n 'alpha': alpha,\n 'gamma': gamma,\n 'theta': theta,\n 'rs': rs}\n\n# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3\n" ]
[ [ "numpy.full", "numpy.zeros", "numpy.argmin", "numpy.where", "numpy.apply_along_axis" ] ]
chenzhutian/auto-infog-timeline
[ "b01e6efdaeb2f63da449844ec818d21ed305c4cf" ]
[ "maskrcnn_benchmark/modeling/backbone/fpn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass FPN(nn.Module):\n \"\"\"\n Module that adds FPN on top of a list of feature maps.\n The feature maps are currently supposed to be in increasing depth\n order, and must be consecutive\n \"\"\"\n\n def __init__(\n self, in_channels_list, out_channels, conv_block, top_blocks=None\n ):\n \"\"\"\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n will be fed\n out_channels (int): number of channels of the FPN representation\n top_blocks (nn.Module or None): if provided, an extra operation will\n be performed on the output of the last (smallest resolution)\n FPN output, and the result will extend the result list\n \"\"\"\n super(FPN, self).__init__()\n print('FPN in_channels_list', in_channels_list)\n print('FPN out_channels', out_channels)\n self.inner_blocks = []\n self.layer_blocks = []\n for idx, in_channels in enumerate(in_channels_list, 1):\n inner_block = \"fpn_inner{}\".format(idx)\n layer_block = \"fpn_layer{}\".format(idx)\n # inner_block_module = nn.Conv2d(in_channels, out_channels, 1)\n # layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1)\n # for module in [inner_block_module, layer_block_module]:\n # # Caffe2 implementation uses XavierFill, which in fact\n # # corresponds to kaiming_uniform_ in PyTorch\n # nn.init.kaiming_uniform_(module.weight, a=1)\n # nn.init.constant_(module.bias, 0)\n inner_block_module = conv_block(in_channels, out_channels, 1)\n layer_block_module = conv_block(out_channels, out_channels, 3, 1)\n self.add_module(inner_block, inner_block_module)\n self.add_module(layer_block, layer_block_module)\n self.inner_blocks.append(inner_block)\n self.layer_blocks.append(layer_block)\n self.top_blocks = top_blocks\n\n def forward(self, x):\n \"\"\"\n Arguments:\n x (list[Tensor]): feature maps for each feature level.\n Returns:\n results (tuple[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n \"\"\"\n last_inner = getattr(self, self.inner_blocks[-1])(x[-1])\n results = []\n results.append(getattr(self, self.layer_blocks[-1])(last_inner))\n for feature, inner_block, layer_block in zip(\n x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]\n ):\n inner_top_down = F.interpolate(last_inner, scale_factor=2, mode=\"nearest\")\n inner_lateral = getattr(self, inner_block)(feature)\n # TODO use size instead of scale to make it robust to different sizes\n # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],\n # mode='bilinear', align_corners=False)\n last_inner = inner_lateral + inner_top_down\n results.insert(0, getattr(self, layer_block)(last_inner))\n\n if self.top_blocks is not None:\n last_results = self.top_blocks(results[-1])\n results.extend(last_results)\n\n return tuple(results)\n\n\nclass LastLevelMaxPool(nn.Module):\n def forward(self, x):\n return [F.max_pool2d(x, 1, 2, 0)]\n" ]
[ [ "torch.nn.functional.interpolate", "torch.nn.functional.max_pool2d" ] ]
naivelogic/vision-ai-developer-kit
[ "e72ed377c027df24f125ad1bdd0c94652075e25a" ]
[ "machine-learning-notebooks/02-mobilenet-transfer-learning-scripts/retrain.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Simple transfer learning with Inception v3 or Mobilenet models.\n\nWith support for TensorBoard.\n\nThis example shows how to take a Inception v3 or Mobilenet model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector (1001-dimensional for\nMobilenet) for each image. We train a softmax layer on top of this\nrepresentation. Assuming the softmax layer contains N labels, this corresponds\nto learning N + 2048*N (or 1001*N) model parameters corresponding to the\nlearned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\n\n```bash\nbazel build tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/tensorflow/examples/image_retraining/retrain \\\n --image_dir ~/flower_photos\n```\n\nOr, if you have a pip installation of tensorflow, `retrain.py` can be run\nwithout bazel:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\nBy default this script will use the high accuracy, but comparatively large and\nslow Inception v3 model architecture. It's recommended that you start with this\nto validate that you have gathered good training data, but if you want to deploy\non resource-limited platforms, you can try the `--architecture` flag with a\nMobilenet model. For example:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos --architecture mobilenet_1.0_224\n```\n\nThere are 32 different Mobilenet models to choose from, with a variety of file\nsize and latency options. The first number can be '1.0', '0.75', '0.50', or\n'0.25' to control the size, and the second controls the input image size, either\n'224', '192', '160', or '128', with smaller sizes running faster. See\nhttps://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\nfor more information on Mobilenet.\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nFLAGS = None\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n tf.logging.error(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = collections.OrderedDict()\n sub_dirs = [\n os.path.join(image_dir,item)\n for item in gfile.ListDirectory(image_dir)]\n sub_dirs = sorted(item for item in sub_dirs\n if gfile.IsDirectory(item))\n for sub_dir in sub_dirs:\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n tf.logging.info(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n if not file_list:\n tf.logging.warning('No files found')\n continue\n if len(file_list) < 20:\n tf.logging.warning(\n 'WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n tf.logging.warning(\n 'WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category, architecture):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n architecture: The name of the model architecture.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '_' + architecture + '.txt'\n\n\ndef create_model_graph(model_info):\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Args:\n model_info: Dictionary containing information about the model architecture.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Graph().as_default() as graph:\n if(FLAGS.model_file_name):\n file_name = FLAGS.model_file_name\n else:\n file_name = model_info['model_file_name']\n\n model_path = os.path.join(FLAGS.model_dir, file_name)\n print(model_path)\n with gfile.FastGFile(model_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(\n graph_def,\n name='',\n return_elements=[\n model_info['bottleneck_tensor_name'],\n model_info['resized_input_tensor_name'],\n ]))\n return graph, bottleneck_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n decoded_image_tensor: Output of initial image resizing and preprocessing.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n # First decode the JPEG image, resize it, and rescale the pixel values.\n resized_input_values = sess.run(decoded_image_tensor,\n {image_data_tensor: image_data})\n # Then run it through the recognition network.\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: resized_input_values})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract(data_url):\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n\n Args:\n data_url: Web location of the tar file containing the pretrained model.\n \"\"\"\n dest_directory = FLAGS.model_dir\n print(dest_directory)\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n print(filepath)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n tf.logging.info('Successfully downloaded %s %d bytes.' %\n (filename, statinfo.st_size))\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n tf.logging.info('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor)\n except Exception as e:\n raise RuntimeError('Error during processing file %s (%s)' % (image_path,\n str(e)))\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The output tensor for the bottleneck values.\n architecture: The name of the model architecture.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category, architecture)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n tf.logging.warning('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The penultimate output layer of the graph.\n architecture: The name of the model architecture.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(\n sess, image_lists, label_name, index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n tf.logging.info(\n str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_input_tensor,\n bottleneck_tensor, architecture):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n decoded_image_tensor: The output of decoding and resizing the image.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n architecture: The name of the model architecture.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(\n sess, image_lists, label_name, image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,\n resized_input_tensor, bottleneck_tensor, architecture)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck_values = sess.run(bottleneck_tensor,\n {resized_input_tensor: distorted_image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck_values)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness, input_width, input_height,\n input_depth, input_mean, input_std):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n input_width: Horizontal size of expected input image to model.\n input_height: Vertical size of expected input image to model.\n input_depth: How many channels the expected input image should have.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, input_width)\n precrop_height = tf.multiply(scale_value, input_height)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [input_height, input_width, input_depth])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n offset_image = tf.subtract(brightened_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,\n bottleneck_tensor_size):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n bottleneck_tensor_size: How many entries in the bottleneck vector.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor,\n shape=[None, bottleneck_tensor_size],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(tf.float32,\n [None, class_count],\n name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal(\n [bottleneck_tensor_size, class_count], stddev=0.001)\n\n layer_weights = tf.Variable(initial_value, name='final_weights')\n\n variable_summaries(layer_weights)\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=ground_truth_input, logits=logits)\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(\n prediction, tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef save_graph_to_file(sess, graph, graph_file_name):\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n with gfile.FastGFile(graph_file_name, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n return\n\n\ndef prepare_file_system():\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n if FLAGS.intermediate_store_frequency > 0:\n ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)\n return\n\n\ndef create_model_info(architecture, model_download_url):\n \"\"\"Given the name of a model architecture, returns information about it.\n\n There are different base image recognition pretrained models that can be\n retrained using transfer learning, and this function translates from the name\n of a model to the attributes that are needed to download and train with it.\n\n Args:\n architecture: Name of a model architecture.\n\n Returns:\n Dictionary of information about the model, or None if the name isn't\n recognized\n\n Raises:\n ValueError: If architecture name is unknown.\n \"\"\"\n architecture = architecture.lower()\n if architecture == 'inception_v3':\n # pylint: disable=line-too-long\n data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n # pylint: enable=line-too-long\n bottleneck_tensor_name = 'pool_3/_reshape:0'\n bottleneck_tensor_size = 2048\n input_width = 299\n input_height = 299\n input_depth = 3\n resized_input_tensor_name = 'Mul:0'\n model_file_name = 'classify_image_graph_def.pb'\n input_mean = 128\n input_std = 128\n elif architecture.startswith('mobilenet_'):\n parts = architecture.split('_')\n if len(parts) != 3 and len(parts) != 4:\n tf.logging.error(\"Couldn't understand architecture name '%s'\",\n architecture)\n return None\n version_string = parts[1]\n if (version_string != '1.0' and version_string != '0.75' and\n version_string != '0.50' and version_string != '0.25'):\n tf.logging.error(\n \"\"\"\"The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',\n but found '%s' for architecture '%s'\"\"\",\n version_string, architecture)\n return None\n size_string = parts[2]\n if (size_string != '224' and size_string != '192' and\n size_string != '160' and size_string != '128'):\n tf.logging.error(\n \"\"\"The Mobilenet input size should be '224', '192', '160', or '128',\n but found '%s' for architecture '%s'\"\"\",\n size_string, architecture)\n return None\n if len(parts) == 3:\n is_quantized = False\n else:\n if parts[3] != 'quantized':\n tf.logging.error(\n \"Couldn't understand architecture suffix '%s' for '%s'\", parts[3],\n architecture)\n return None\n is_quantized = True\n data_url = model_download_url + 'mobilenet_v1_' + version_string + '_' + size_string + '_frozen.tgz'\n print(data_url)\n bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'\n bottleneck_tensor_size = 1001\n input_width = int(size_string)\n input_height = int(size_string)\n input_depth = 3\n resized_input_tensor_name = 'input:0'\n if is_quantized:\n model_base_name = 'quantized_graph.pb'\n else:\n model_base_name = 'frozen_graph.pb'\n model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string\n model_file_name = os.path.join(model_dir_name, model_base_name)\n input_mean = 127.5\n input_std = 127.5\n else:\n tf.logging.error(\"Couldn't understand architecture name '%s'\", architecture)\n raise ValueError('Unknown architecture', architecture)\n\n return {\n 'data_url': data_url,\n 'bottleneck_tensor_name': bottleneck_tensor_name,\n 'bottleneck_tensor_size': bottleneck_tensor_size,\n 'input_width': input_width,\n 'input_height': input_height,\n 'input_depth': input_depth,\n 'resized_input_tensor_name': resized_input_tensor_name,\n 'model_file_name': model_file_name,\n 'input_mean': input_mean,\n 'input_std': input_std,\n }\n\n\ndef add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n \"\"\"Adds operations that perform JPEG decoding and resizing to the graph..\n\n Args:\n input_width: Desired width of the image fed into the recognizer graph.\n input_height: Desired width of the image fed into the recognizer graph.\n input_depth: Desired channels of the image fed into the recognizer graph.\n input_mean: Pixel value that should be zero in the image for the graph.\n input_std: How much to divide the pixel values by before recognition.\n\n Returns:\n Tensors for the node to feed JPEG data into, and the output of the\n preprocessing steps.\n \"\"\"\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n return jpeg_data, mul_image\n\n\ndef main(_):\n # Needed to make sure the logging output is visible.\n # See https://github.com/tensorflow/tensorflow/issues/3047\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Prepare necessary directories that can be used during training\n prepare_file_system()\n\n # Gather information about the model architecture we'll be using.\n model_info = create_model_info(FLAGS.architecture, FLAGS.model_download_url)\n if not model_info:\n tf.logging.error('Did not recognize architecture flag')\n return -1\n\n # Set up the pre-trained graph.\n maybe_download_and_extract(model_info['data_url'])\n graph, bottleneck_tensor, resized_image_tensor = (\n create_model_graph(model_info))\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n tf.logging.error('Only one valid folder of images found at ' +\n FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n with tf.Session(graph=graph) as sess:\n # Set up the image decoding sub-graph.\n jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(\n model_info['input_width'], model_info['input_height'],\n model_info['input_depth'], model_info['input_mean'],\n model_info['input_std'])\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness, model_info['input_width'],\n model_info['input_height'], model_info['input_depth'],\n model_info['input_mean'], model_info['input_std'])\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor,\n bottleneck_tensor, FLAGS.architecture)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(\n len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,\n model_info['bottleneck_tensor_size'])\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, prediction = add_evaluation_step(\n final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # Every so often, print out how well the graph is training.\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %\n (datetime.now(), i, train_accuracy * 100))\n tf.logging.info('%s: Step %d: Cross entropy = %f' %\n (datetime.now(), i, cross_entropy_value))\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\n (datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks)))\n\n # Store intermediate results\n intermediate_frequency = FLAGS.intermediate_store_frequency\n\n if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)\n and i > 0):\n intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +\n 'intermediate_' + str(i) + '.pb')\n tf.logging.info('Save intermediate result to : ' +\n intermediate_file_name)\n save_graph_to_file(sess, graph, intermediate_file_name)\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.test_batch_size, 'testing',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n decoded_image_tensor, resized_image_tensor, bottleneck_tensor,\n FLAGS.architecture))\n test_accuracy, predictions = sess.run(\n [evaluation_step, prediction],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %\n (test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i].argmax():\n tf.logging.info('%70s %s' %\n (test_filename,\n list(image_lists.keys())[predictions[i]]))\n\n # Write out the trained graph and labels with the weights stored as\n # constants.\n save_graph_to_file(sess, graph, FLAGS.output_graph)\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\n type=str,\n default='',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='/tmp/output_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--intermediate_output_graphs_dir',\n type=str,\n default='/tmp/intermediate_graph/',\n help='Where to save the intermediate graphs.'\n )\n parser.add_argument(\n '--intermediate_store_frequency',\n type=int,\n default=0,\n help=\"\"\"\\\n How many steps to store intermediate graph. If \"0\" then will not\n store.\\\n \"\"\"\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='/tmp/output_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='/tmp/retrain_logs',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=4000,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='/tmp/imagenet',\n help=\"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\"\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='/tmp/bottleneck',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n parser.add_argument(\n '--architecture',\n type=str,\n default='inception_v3',\n help=\"\"\"\\\n Which model architecture to use. 'inception_v3' is the most accurate, but\n also the slowest. For faster or smaller models, chose a MobileNet with the\n form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,\n 'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224\n pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much\n less accurate, but smaller and faster network that's 920 KB on disk and\n takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\n for more information on Mobilenet.\\\n \"\"\")\n parser.add_argument(\n '--model_download_url',\n type=str,\n default='http://download.tensorflow.org/models/',\n help=\"\"\"\\\n Url to download models.\\\n \"\"\"\n )\n parser.add_argument(\n '--model_file_name',\n type=str,\n default=None,\n help=\"\"\"\\\n Name of the file after untar. Specify this if not common convention is followed.\\\n \"\"\"\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)" ]
[ [ "tensorflow.image.resize_bilinear", "tensorflow.reduce_min", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.image.random_flip_left_right", "tensorflow.matmul", "tensorflow.import_graph_def", "tensorflow.python.platform.gfile.ListDirectory", "tensorflow.stack", "tensorflow.python.platform.gfile.Exists", "tensorflow.nn.softmax", "tensorflow.random_crop", "tensorflow.python.platform.gfile.IsDirectory", "tensorflow.global_variables_initializer", "tensorflow.logging.warning", "tensorflow.image.decode_jpeg", "tensorflow.cast", "tensorflow.train.GradientDescentOptimizer", "tensorflow.subtract", "tensorflow.summary.histogram", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.Variable", "tensorflow.logging.info", "tensorflow.argmax", "tensorflow.constant", "tensorflow.gfile.MakeDirs", "tensorflow.squeeze", "tensorflow.gfile.DeleteRecursively", "tensorflow.app.run", "tensorflow.logging.set_verbosity", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.zeros", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.summary.scalar", "tensorflow.logging.error", "tensorflow.gfile.Exists", "tensorflow.Session", "tensorflow.GraphDef", "tensorflow.truncated_normal", "tensorflow.python.platform.gfile.Glob", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.summary.merge_all", "numpy.squeeze", "tensorflow.placeholder_with_default", "tensorflow.multiply", "tensorflow.Graph", "tensorflow.reduce_max", "tensorflow.logging.fatal", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.square", "tensorflow.python.util.compat.as_bytes" ] ]
BarryBAOWEI/CodeL
[ "3e2df2b1086e0e2208fa471e04af7b3a81740df3" ]
[ "JQdata/get_stock_inf.py" ]
[ "# -*- coding:utf-8 -*-\nimport tushare as ts\nimport pandas as pd\nimport datetime\n\n################### 子函数 ###################\ndef stock_inf_get(L,start_date,end_date):\n \"\"\"\n L : list 股票代码的列表,例如['000002','000423',...]\n start_date: str 数据起始时间,格式YYYY-MM-DD\n end_date : str 数据结束时间,格式YYYY-MM-DD\n \"\"\"\n dict_stock_all = {}\n for stock_code in L:\n df = ts.get_k_data(stock_code,start = start_date,end = end_date)[['code','date','open','close','volume']]\n df['return'] = df['close'].pct_change()\n dict_stock_all[stock_code] = df\n return dict_stock_all\n\ndef stock_to_excel(outputpath,df_dict):\n \"\"\"\n outputpath: str 输出路径,例如'c:/XXX/YYYYY'\n df_dict : dict stock_inf_get函数得到的结果字典\n \"\"\"\n outputfile = outputpath + '/stock_inf.xlsx'\n writer = pd.ExcelWriter(outputfile)\n for key, value in df_dict.items():\n value.to_excel(writer,key,index=False)\n writer.save()\n################### 子函数 ###################\n\n################### 启动函数,跑它就行 ###################\ndef run_all(L,start_date,end_date,outputpath):\n \"\"\"\n 参数说明\n L : list 股票代码的列表,例如['000002','000423',...]\n start_date: str 数据起始时间,格式YYYY-MM-DD\n end_date : str 数据结束时间,格式YYYY-MM-DD\n outputpath: str 输出路径,例如'c:/XXX/YYYYY'\n \"\"\"\n df_dict = stock_inf_get(L,start_date,end_date)\n stock_to_excel(outputpath,df_dict)\n return df_dict\n#########################################################\n\n#['600518','000538','000963','601607','600332','000153','000650','600196','600436','600085','300267','600572','600511']\n#康美药业,云南白药,华东医药,上海医药,白云山,丰原药业,仁和药业,复兴药业,片仔癀,同仁堂,尔康制药,康恩贝,国药股份\n\nall_stock_dict = run_all(\n L = ['600518','000538','000963','601607','600332','000153','000650','600196','600436','600085','300267','600572','600511']\n ,start_date = '2015-01-01'\n ,end_date = '2017-12-31'\n ,outputpath = 'C:/Users/43460/Desktop')\n\n# python 的 DataFrame 结果全部保存在 all_stock_dict 中,以字典的形式\n# 以 df1 = all_stock_dict['600518'] 的方式就可以读取其中某一个股票的DataFrame并保存在df1中\n\n\n\n" ]
[ [ "pandas.ExcelWriter" ] ]
jizongFox/IIC
[ "572076d5c0c26516ff3e807f2bad4e3498ab12c1" ]
[ "IIC/utils/segmentation/segmentation_eval.py" ]
[ "from __future__ import print_function\n\nimport sys\nfrom datetime import datetime\n\nimport torch\nfrom tqdm import tqdm\n\nfrom IIC.utils.cluster.cluster_eval import cluster_subheads_eval\nfrom IIC.utils.cluster.transforms import sobel_process\n\n\ndef segmentation_eval(config, net,\n mapping_assignment_dataloader,\n mapping_test_dataloader,\n sobel, using_IR=False, verbose=0, return_only=False):\n torch.cuda.empty_cache()\n net.eval()\n\n stats_dict = cluster_subheads_eval(config, net,\n mapping_assignment_dataloader=mapping_assignment_dataloader,\n mapping_test_dataloader=mapping_test_dataloader,\n sobel=sobel,\n using_IR=using_IR,\n get_data_fn=_segmentation_get_data,\n verbose=verbose)\n\n net.train()\n\n acc = stats_dict[\"best\"]\n is_best = (len(config.epoch_acc) > 0) and (acc > max(config.epoch_acc))\n\n torch.cuda.empty_cache()\n\n if not return_only:\n config.epoch_stats.append(stats_dict)\n config.epoch_acc.append(acc)\n config.epoch_avg_subhead_acc.append(stats_dict[\"avg\"])\n\n return is_best\n else:\n return stats_dict\n\n\ndef _segmentation_get_data(config, net, dataloader, sobel=False,\n using_IR=False, verbose=0):\n # returns (vectorised) cuda tensors for flat preds and targets\n # sister of _clustering_get_data\n\n assert (config.output_k <= 255)\n\n num_batches = len(dataloader)\n num_samples = 0\n\n # upper bound, will be less for last batch\n samples_per_batch = config.batch_sz * config.input_sz * config.input_sz\n\n if verbose > 0:\n print(\"started _segmentation_get_data %s\" % datetime.now())\n sys.stdout.flush()\n\n # vectorised\n flat_predss_all = [torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cuda() for _ in xrange(\n config.num_sub_heads)]\n flat_targets_all = torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cuda()\n mask_all = torch.zeros((num_batches * samples_per_batch),\n dtype=torch.uint8).cuda()\n\n if verbose > 0:\n batch_start = datetime.now()\n all_start = batch_start\n print(\"starting batches %s\" % batch_start)\n\n for b_i, batch in tqdm(enumerate(dataloader), desc=\"evaluation\", total=len(dataloader)):\n\n imgs, flat_targets, mask = batch\n imgs = imgs.cuda()\n\n if sobel:\n imgs = sobel_process(imgs, config.include_rgb, using_IR=using_IR)\n\n with torch.no_grad():\n x_outs = net(imgs)\n\n assert (x_outs[0].shape[1] == config.output_k)\n assert (x_outs[0].shape[2] == config.input_sz and x_outs[0].shape[\n 3] == config.input_sz)\n\n # actual batch size\n actual_samples_curr = (\n flat_targets.shape[0] * config.input_sz * config.input_sz)\n num_samples += actual_samples_curr\n\n # vectorise: collapse from 2D to 1D\n start_i = b_i * samples_per_batch\n for i in xrange(config.num_sub_heads):\n x_outs_curr = x_outs[i]\n assert (not x_outs_curr.requires_grad)\n flat_preds_curr = torch.argmax(x_outs_curr, dim=1)\n flat_predss_all[i][\n start_i:(start_i + actual_samples_curr)] = flat_preds_curr.view(-1)\n\n flat_targets_all[\n start_i:(start_i + actual_samples_curr)] = flat_targets.view(-1)\n mask_all[start_i:(start_i + actual_samples_curr)] = mask.view(-1)\n\n if verbose > 0 and b_i < 3:\n batch_finish = datetime.now()\n print(\"finished batch %d, %s, took %s, of %d\" %\n (b_i, batch_finish, batch_finish - batch_start, num_batches))\n batch_start = batch_finish\n sys.stdout.flush()\n\n if verbose > 0:\n all_finish = datetime.now()\n print(\n \"finished all batches %s, took %s\" % (all_finish, all_finish - all_start))\n sys.stdout.flush()\n\n flat_predss_all = [flat_predss_all[i][:num_samples] for i in\n xrange(config.num_sub_heads)]\n flat_targets_all = flat_targets_all[:num_samples]\n mask_all = mask_all[:num_samples]\n\n flat_predss_all = [flat_predss_all[i].masked_select(mask=mask_all) for i in\n xrange(config.num_sub_heads)]\n flat_targets_all = flat_targets_all.masked_select(mask=mask_all)\n\n if verbose > 0:\n print(\"ended _segmentation_get_data %s\" % datetime.now())\n sys.stdout.flush()\n\n selected_samples = mask_all.sum()\n assert (len(flat_predss_all[0].shape) == 1 and\n len(flat_targets_all.shape) == 1)\n assert (flat_predss_all[0].shape[0] == selected_samples)\n assert (flat_targets_all.shape[0] == selected_samples)\n\n return flat_predss_all, flat_targets_all\n" ]
[ [ "torch.zeros", "torch.cuda.empty_cache", "torch.argmax", "torch.no_grad" ] ]
julien-amar/date-a-scientist
[ "8748516ab5bcfca488e6ef6ecb4fcd3786daa8fc" ]
[ "models/naive-bayes/review-scikit.py" ]
[ "from reviews import neg_list, pos_list\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\nreview = \"This crib was amazing\"\n\n# Vectorize & count words gathering both positive & negative words\ncounter = CountVectorizer()\ncounter.fit(neg_list + pos_list)\n\nprint(counter.vocabulary_)\n\n# Get occurence of words\ntraining_counts = counter.transform(neg_list + pos_list)\nreview_counts = counter.transform([review])\n\nprint(review_counts)\n\n# Prepare labels for classifications (100 * negative + 1000 * positive)\ntraining_labels = [0] * 1000 + [1] * 1000\n\nclassifier = MultinomialNB()\n\n# Train by associating words with labels\nclassifier.fit(training_counts, training_labels)\n\n# Perdict is review is positive or negative (which label it correspond to) \nprint(classifier.predict(review_counts))\n\n# Show prediction score for all labels\nprint(classifier.predict_proba(review_counts))" ]
[ [ "sklearn.naive_bayes.MultinomialNB", "sklearn.feature_extraction.text.CountVectorizer" ] ]
Yuhuishishishi/dask-sql
[ "6fa88edf972d4d00eaed63927c328cd9a5b7b339" ]
[ "tests/unit/test_utils.py" ]
[ "import pytest\nfrom dask import dataframe as dd\nimport pandas as pd\n\nfrom dask_sql.utils import (\n is_frame,\n Pluggable,\n ParsingException,\n _set_or_check_java_home,\n)\n\n\ndef test_is_frame_for_frame():\n df = dd.from_pandas(pd.DataFrame({\"a\": [1]}), npartitions=1)\n assert is_frame(df)\n\n\ndef test_is_frame_for_none():\n assert not is_frame(None)\n\n\ndef test_is_frame_for_number():\n assert not is_frame(3)\n assert not is_frame(3.5)\n\n\nclass PluginTest1(Pluggable):\n pass\n\n\nclass PluginTest2(Pluggable):\n pass\n\n\ndef test_add_plugin():\n PluginTest1.add_plugin(\"some_key\", \"value\")\n\n assert PluginTest1.get_plugin(\"some_key\") == \"value\"\n assert PluginTest1().get_plugin(\"some_key\") == \"value\"\n\n with pytest.raises(KeyError):\n PluginTest2.get_plugin(\"some_key\")\n\n\ndef test_overwrite():\n PluginTest1.add_plugin(\"some_key\", \"value\")\n\n assert PluginTest1.get_plugin(\"some_key\") == \"value\"\n assert PluginTest1().get_plugin(\"some_key\") == \"value\"\n\n PluginTest1.add_plugin(\"some_key\", \"value_2\")\n\n assert PluginTest1.get_plugin(\"some_key\") == \"value_2\"\n assert PluginTest1().get_plugin(\"some_key\") == \"value_2\"\n\n PluginTest1.add_plugin(\"some_key\", \"value_3\", replace=False)\n\n assert PluginTest1.get_plugin(\"some_key\") == \"value_2\"\n assert PluginTest1().get_plugin(\"some_key\") == \"value_2\"\n\n\ndef test_exception_parsing():\n e = ParsingException(\n \"SELECT * FROM df\",\n \"\"\"org.apache.calcite.runtime.CalciteContextException: From line 1, column 3 to line 1, column 4: Message\"\"\",\n )\n\n expected = \"\"\"Can not parse the given SQL: org.apache.calcite.runtime.CalciteContextException: From line 1, column 3 to line 1, column 4: Message\n\nThe problem is probably somewhere here:\n\n\\tSELECT * FROM df\n\\t ^^\"\"\"\n assert str(e) == expected\n\n e = ParsingException(\n \"SELECT * FROM df\", \"\"\"Lexical error at line 1, column 3. Message\"\"\",\n )\n\n expected = \"\"\"Can not parse the given SQL: Lexical error at line 1, column 3. Message\n\nThe problem is probably somewhere here:\n\n\\tSELECT * FROM df\n\\t ^\"\"\"\n assert str(e) == expected\n\n e = ParsingException(\n \"SELECT *\\nFROM df\\nWHERE x = 3\",\n \"\"\"From line 1, column 3 to line 2, column 3: Message\"\"\",\n )\n\n expected = \"\"\"Can not parse the given SQL: From line 1, column 3 to line 2, column 3: Message\n\nThe problem is probably somewhere here:\n\n\\tSELECT *\n\\t ^^^^^^^\n\\tFROM df\n\\t^^^\n\\tWHERE x = 3\"\"\"\n assert str(e) == expected\n\n e = ParsingException(\"SELECT *\", \"Message\",)\n\n assert str(e) == \"Message\"\n\n\ndef test_no_warning():\n with pytest.warns(None) as warn:\n _set_or_check_java_home()\n\n assert not warn\n" ]
[ [ "pandas.DataFrame" ] ]
HunterLC/Features
[ "77e10d743a673fd03f8450719b56896916edf3b6" ]
[ "example/imagetoword.py" ]
[ "import tensorflow as tf\nfrom tensorflow import keras\n# 载入vgg19模型\nfrom tensorflow.keras.applications import vgg19\nfrom tensorflow.keras.applications import resnet50\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\nimport pandas as pd\nimport os\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport argparse\ntest_csv_path = r\"G:/result_origin.csv\"\n\n# 初始化vgg19模型,weights参数指的是使用ImageNet图片集训练的模型\n# 每种模型第一次使用的时候都会自网络下载保存的h5文件\n# vgg19的数据文件约为584M\n# model = vgg19.VGG19(weights='imagenet')\nmodel = resnet50.ResNet50(weights='imagenet')\n\ndef image_feature_extraction(df_image):\n #将第三列到最后列转为float\n # df_image.iloc[:,2:] = df_image.iloc[:,2:].astype(float)\n df_image.iloc[:, -2:] = df_image.iloc[:, -2:].astype(object)\n # return df_image\n #其余数据统计\n i = 0\n image_name = []\n for index, row in df_image.iterrows():\n #获得需要处理的文本内容\n if (pd.isna(df_image.at[i, 'piclist'])):\n i += 1\n image_name.append('nothing')\n continue\n else:\n image_list = row['piclist'].split('\\t')\n # 计算 颜色矩\n filename1 = 'G:/test/rumor_images/' + image_list[0]\n filename2 = 'G:/test/nonrumor_images/' + image_list[0]\n filename= ''\n if (os.path.isfile(filename1)):\n filename = filename1\n else:\n filename = filename2\n #计算颜色矩\n # df_image.at[i, 2:11] = image_color_moments(filename)\n #计算深度学习特征 ---PyTorch ResNet50 CNN\n # try:\n # df_image.at[i, 11:-2] = image_resnet_cnn(filename,model_resnet50)\n # except Exception as e:\n # logging.info(\"图片有问题\"+str(e))\n # df_image.at[i, 'tf_vgg19_class'] = image_get_class(filename)\n image_name.append(filename)\n i += 1\n df_image['tf_resnet50_class'], img_score = main(image_name)\n return df_image, img_score\n\ndef main(imgPath):\n # 载入命令行参数指定的图片文件, 载入时变形为224x224,这是模型规范数据要求的\n img_array = []\n img_score = []\n j = 0\n for i in imgPath:\n if (i == 'nothing'):\n img_array.append('no')\n img_score.append(0)\n else:\n try:\n img = image.load_img(i, target_size=(224, 224))\n # 将图片转换为(224,224,3)数组,最后的3是因为RGB三色彩图\n img = image.img_to_array(img)\n # 跟前面的例子一样,使用模型进行预测是批处理模式,\n # 所以对于单个的图片,要扩展一维成为(1,224,224,3)这样的形式\n # 相当于建立一个预测队列,但其中只有一张图片\n img = np.expand_dims(img, axis=0)\n predict_class = model.predict(img)\n # 获取图片识别可能性最高的3个结果\n # desc = vgg19.decode_predictions(predict_class, top=1)\n desc = resnet50.decode_predictions(predict_class, top=3)\n # 我们的预测队列中只有一张图片,所以结果也只有第一个有效,显示出来\n img_array.append(desc[0][0][1])\n img_score.append(desc[0][0][2])\n print(desc[0][0][2])\n except:\n img_array.append('no')\n img_score.append(0)\n\n j += 1\n print(str(j))\n # x = np.array(img_array)\n\n # 使用模型预测(识别)\n return img_array, img_score\n\ndef image_insert_cols(df_image,new_features_list):\n '''\n 增加图片新的特征列,方便后续提取并补充值\n :param df_image: 图片信息\n :return: df_image: 新图片信息dataframe\n '''\n col_name = list(df_image.columns)\n #插入新列之前列名去重\n col_name = col_name + sorted(set(new_features_list) - set(col_name), key = new_features_list.index)\n df_image = df_image.reindex(columns=col_name, fill_value=0)\n return df_image\n\nimage_csv_path = r'G:\\毕设\\数据集\\微博\\image.csv'\nif __name__ == '__main__':\n df_image = pd.read_csv(test_csv_path)\n df_image, img_score = image_feature_extraction(df_image)\n df_image.to_csv(test_csv_path, index=0) # 不保留行索引\n str = \" \".join('%s' %id for id in img_score)\n file_object = open(r'G:\\test_image_class_resnet50.txt', 'w')\n file_object.write(str)\n file_object.close( )" ]
[ [ "pandas.isna", "tensorflow.keras.preprocessing.image.load_img", "tensorflow.keras.applications.resnet50.decode_predictions", "tensorflow.keras.applications.resnet50.ResNet50", "tensorflow.keras.preprocessing.image.img_to_array", "pandas.read_csv", "numpy.expand_dims" ] ]
Jeket/japonicus
[ "a4f9dbb9d59be9ada645cdc13b70ec4841564304" ]
[ "web.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport datetime\nimport pandas as pd\nimport os\n\nimport flask\nimport dash\nfrom dash.dependencies import Input, Output, Event\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom flask_caching import Cache\nfrom promoterz.statistics import statisticsNames\nimport Settings\n\ngsettings = Settings.getSettings()['Global']\nsettings = Settings.getSettings()['bayesian']\n\n\ndef load_evolution_logs(filename=None):\n FileList = os.listdir(gsettings[\"save_dir\"])\n filename = os.path.join(gsettings[\"save_dir\"], filename)\n df = pd.read_csv(filename, names=columns)\n return df\n\n\ndef update_graph(GraphName, Statistics):\n print('Loading')\n ID = [s for s in GraphName if s.isdigit()]\n '''\n try:\n df = load_evolution_logs(filename=\"evolution_gen_Locale%s.csv\" % ''.join(ID))\n \n except:\n print(\"Failure to read evolution data.\")\n return None\n '''\n df = pd.DataFrame(Statistics)\n annotations = []\n for W in range(len(df['dateRange'])):\n DR = df['dateRange'][W]\n if DR != None:\n annotations.append(\n {\n 'xref': 'axis',\n 'yref': 'paper',\n 'xanchor': 'left',\n 'yanchor': 'bottom',\n 'font': {'family': 'Arial', 'size': 12, 'color': 'rgb(37,37,37)'},\n 'x': W,\n 'y': 1 if not len(annotations) %\n 2 else 0.93, # avoid label overlap;\n 'text': DR,\n }\n )\n colorSequence = [\n (188, 189, 34),\n (100, 11, 182),\n (186, 3, 34),\n (45, 111, 45),\n (66, 128, 66),\n (128, 66, 66),\n ]\n statNames = [\n 'avg', 'std', 'min', 'max', 'evaluationScore', 'evaluationScoreOnSecondary'\n ]\n DATA = [\n {\n 'x': df['id'],\n 'y': df[statNames[S]],\n 'type': 'line',\n 'name': statisticsNames[statNames[S]],\n 'line': {'color': 'rgb%s' % str(colorSequence[S])},\n }\n for S in range(len(statNames))\n ]\n fig = {\n 'data': [\n {\n 'x': [0, df[\"id\"]],\n 'y': [0],\n 'type': 'line',\n 'name': 'markzero',\n 'line': {'color': 'rgb(0,0,0)'},\n }\n ] +\n DATA,\n 'layout': {'title': 'Evolution at %s' % GraphName, 'annotations': annotations},\n }\n return fig\n\n\ndef newGraphic(name):\n G = dcc.Graph(id=name)\n G.Active = True\n return G\n\n\ndef run_server():\n # Setup the app\n server = flask.Flask(__name__)\n app = dash.Dash(__name__, server=server, csrf_protect=False)\n app.scripts.config.serve_locally = False\n dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-finance-1.28.0.min.js'\n # Add caching\n cache = Cache(app.server, config={'CACHE_TYPE': 'simple'})\n timeout = 60 * 60 # 1 hour\n # Controls\n app.update_graph = update_graph\n # Layout\n app.GraphicList = []\n app.newGraphic = lambda name: app.GraphicList.append(newGraphic(name))\n app.layout = html.Div(\n [\n html.Div(\n [\n html.H2(\n 'japonicus Evolution Statistics',\n style={'padding-top': '20', 'text-align': 'center'},\n ),\n html.Div(\n [\n dcc.Interval(id='my-interval'),\n dcc.RadioItems(\n id='set-time',\n value=5000,\n options=[\n {'label': 'Every 60 seconds', 'value': 60000},\n {'label': 'Every 15 seconds', 'value': 15000},\n {\n 'label': 'Every hour', 'value': 60 * 60 * 1000\n }, # or just every hour\n ],\n ),\n ]\n ),\n html.Div(id='display-time'),\n ]\n ),\n html.Div(id='Graphs'),\n ],\n style={'width': '1100', 'margin-left': 'auto', 'margin-right': 'auto', 'font-family': 'overpass', 'background-color': '#F3F3F3'},\n # Traces>Color\n )\n app.config['suppress_callback_exceptions'] = True\n\n @app.callback(\n Output('display-time', 'children'), events=[Event('my-interval', 'interval')]\n )\n def display_time():\n return str(datetime.datetime.now())\n\n @app.callback(Output('my-interval', 'interval'), [Input('set-time', 'value')])\n def update_interval(value):\n return value\n\n @cache.memoize(timeout=timeout)\n @app.callback(\n Output('Graphs', 'children'), events=[Event('my-interval', 'interval')]\n )\n def updateGraphs():\n '''\n for F in range(len(app.GraphicList)):\n if app.GraphicList[F].Active:\n app.GraphicList[F].__setattr__('figure', update_graph(app.GraphicList[F].id))\n '''\n return app.GraphicList\n\n # External css\n external_css = [\n \"https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i\",\n \"https://cdn.rawgit.com/plotly/dash-app-stylesheets/c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css\",\n ]\n for css in external_css:\n app.css.append_css({\"external_url\": css})\n # Run the Dash app\n if __name__ == '__main__':\n app.server.run(debug=True, host='0.0.0.0')\n else: # this way it integrates with main interface without child procs across pipes,\n return app\n\n\nif __name__ == '__main__':\n run_server()\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
DanielCastriani/machine-learning-algorithms
[ "5a1f361b283a751af0cb0157e28bfda2fb38d656" ]
[ "utils/math_functions.py" ]
[ "import numpy as np\n\n\ndef euclidian_distance(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n diff = a - b\n elms_pow = diff ** 2\n elms_sum = np.sum(elms_pow, axis=1)\n return np.sqrt(elms_sum)\n" ]
[ [ "numpy.sum", "numpy.sqrt" ] ]
edsuom/ade
[ "cd1058224017efe631f60adbbf2d19a5f882f810" ]
[ "ade/test/test_population.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# ade:\n# Asynchronous Differential Evolution.\n#\n# Copyright (C) 2018-19 by Edwin A. Suominen,\n# http://edsuom.com/ade\n#\n# See edsuom.com for API documentation as well as information about\n# Ed's background and other projects, software and otherwise.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS\n# IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nUnit tests for L{ade.population}.\n\"\"\"\n\nimport time, os.path, random, pickle\nfrom io import StringIO\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom twisted.internet import defer, reactor\n\nfrom ade.util import *\nfrom ade import abort, population\nfrom ade.individual import Individual\n\nfrom ade.test import testbase as tb\n\n#import twisted.internet.base\n#twisted.internet.base.DelayedCall.debug = True\n\n\nclass Test_ConstraintChecking(tb.TestCase):\n def setUp(self):\n abort.restart()\n self.pm = population.ParameterManager(\n ['a', 'b'], [(-5, +5), (-5, +5)], )\n\n def tearDown(self):\n abort.shutdown()\n \n def unitySumList(self, params):\n return sum(params.values()) == 1.0\n\n def proportionalDict(self, params):\n return params['a'] == 2*params['b']\n \n def test_call_no_constraints(self):\n self.assertTrue(self.pm.passesConstraints([0, 0]))\n self.assertTrue(self.pm.passesConstraints([1E20, 1E20]))\n \n def test_call(self):\n self.pm.constraints = [self.unitySumList]\n self.assertTrue(self.pm.passesConstraints([0.4, 0.6]))\n self.assertFalse(self.pm.passesConstraints([0.5, 0.6]))\n\n def test_call_with_names(self):\n self.pm.constraints = [self.proportionalDict]\n self.assertTrue(self.pm.passesConstraints([3.0, 1.5]))\n self.assertFalse(self.pm.passesConstraints([4.0, 1.5]))\n\n\nclass Test_ParameterManager(tb.TestCase):\n N_trials = 100000\n\n verbose = False\n\n def setUp(self):\n self.pm = population.ParameterManager([], [])\n \n def checkZeroPortion(self, N_zeros, expectedProportion):\n self.assertAlmostEqual(\n float(self.N_trials)/N_zeros, 1.0/expectedProportion, 1)\n\n def test_limit_distribution(self):\n N = 100000\n pm = population.ParameterManager(['x'], [(1, 2)])\n X = np.random.uniform(0, 3, N)\n Y = []\n for x in X:\n x = [x]\n y = pm.limit(x)[0]\n Y.append(y)\n plt.figure()\n bins = np.linspace(0, 3, 100)\n counts = plt.hist(Y, bins)[0]\n plt.title(\"Univariate limited i(1,2) distribution\")\n #plt.xticks(range(0, 3, 12), fontsize=14)\n plt.grid()\n for k, count in enumerate(counts):\n if self.verbose:\n print(sub(\n \"{:3d} {:4.2f}-{:4.2f} {:f}\",\n k, bins[k], bins[k+1], count))\n if bins[k+1] < 1 or bins[k] > 2:\n self.assertEqual(count, 0)\n else:\n self.assertGreater(count, 2900)\n self.assertLess(count, 3100)\n if self.verbose:\n plt.show()\n\n \nclass Test_Population(tb.TestCase):\n Np = 50\n verbose = False\n \n def setUp(self):\n abort.restart()\n self.p = population.Population(\n tb.ackley, [\"x\", \"y\"], [(-5, 5), (-5, 5)], popsize=self.Np)\n\n def tearDown(self):\n abort.shutdown()\n \n def positiveOnly(self, XY):\n return min(XY.values()) > 0\n\n def test_setup(self):\n def done(null):\n self.assertEqual(len(self.p), 2*self.Np)\n if self.verbose:\n self.plot(self)\n for k in (0, 1):\n values = [i.values[k] for i in self.p]\n self.assertGreater(min(values), -5)\n self.assertLess(max(values), 5)\n return self.p.setup().addCallback(done)\n \n def test_setup_constrained(self):\n def done(null):\n self.assertEqual(len(self.p), 2*self.Np)\n for i in self.p:\n self.assertTrue(np.all(i.values > 0))\n if self.verbose:\n self.plot(self)\n self.p = population.Population(\n tb.ackley, [\"x\", \"y\"], [(-5, 5), (-5, 5)],\n constraints=self.positiveOnly, popsize=self.Np)\n return self.p.setup().addCallback(done)\n\n def test_setup_constrained_uniform(self):\n def done(null):\n self.assertEqual(len(self.p), 2*self.Np)\n for i in self.p:\n self.assertTrue(np.all(i.values > 0))\n if self.verbose:\n self.plot(self)\n self.p = population.Population(\n tb.ackley, [\"x\", \"y\"], [(-5, 5), (-5, 5)],\n constraints=self.positiveOnly, popsize=self.Np)\n return self.p.setup(uniform=True).addCallback(done)\n\n def prob(self, N, func, *args):\n \"\"\"\n Returns an estimate of probability that C{func(*args)} will return\n C{True} by calling it I{N} times.\n \"\"\"\n count = 0\n for k in range(N):\n if func(*args): count += 1\n return float(count) / N\n \n def test_keepStatusQuo(self):\n def p(score):\n return self.prob(1000, self.p._keepStatusQuo, score)\n\n self.p.statusQuoScore = 1.0\n self.assertEqual(p(0.0), 0.0)\n self.assertEqual(p(1.0), 1.0)\n self.assertBetween(p(0.2), 0.05, 0.14)\n self.assertBetween(p(0.5), 0.43, 0.58)\n self.assertBetween(p(0.8), 0.87, 0.94)\n self.p.statusQuoScore = 0.1\n self.assertEqual(p(0.0), 0.0)\n self.assertEqual(p(1.0), 1.0)\n self.assertEqual(p(0.1), 1.0)\n self.assertBetween(p(0.02), 0.05, 0.14)\n \n def test_replacement(self):\n def doReplacements(*args):\n for rir in args:\n self.p.replacement(rir)\n return self.p.replacement()\n\n def checkReplacements(pMin, pMax, *args):\n p = self.prob(500, doReplacements, *args)\n self.assertBetween(p, pMin, pMax)\n \n self.assertTrue(self.p.replacement())\n self.assertEqual(self.p.replacementScore, 0)\n self.assertFalse(self.p.replacement())\n self.assertAlmostEqual(self.p.statusQuoScore, self.p.Np*2.0/100)\n # 0 x 1\n checkReplacements(0.0, 0.03, 0)\n # 1 x 1\n checkReplacements(0.08, 0.22, 1)\n # 1 x 2\n checkReplacements(0.4, 0.6, 1, 1)\n # 2 x 1\n checkReplacements(0.78, 0.92, 2)\n # 2 x 2\n self.p.replacement(2)\n self.p.replacement(2)\n self.assertEqual(self.p.replacementScore, 3.0)\n self.assertTrue(self.p.replacement())\n \n @defer.inlineCallbacks\n def test_push(self):\n yield self.p.setup()\n iList = list(self.p)\n iWorst = iList[np.argmax([i.SSE for i in iList])]\n iNew = Individual(self.p, np.array([1.23, 4.56]))\n iNew.SSE = 0\n self.p.push(iNew)\n self.assertIn(iNew, list(self.p))\n self.assertNotIn(iWorst, list(self.p))\n\n @defer.inlineCallbacks\n def test_push_and_best(self):\n SSE_prev = np.inf\n yield self.p.setup()\n for i in self.p:\n i.SSE = np.inf\n for k in range(100000):\n i = Individual(self.p, [0, 0])\n i.SSE = 1000*random.random()\n iBest = self.p.best()\n if i < iBest:\n self.p.push(i)\n if self.verbose:\n print(k, i.SSE)\n self.assertTrue(i.SSE <= self.p.iSorted[-1].SSE)\n self.assertTrue(iBest.SSE <= SSE_prev)\n SSE_prev = i.SSE\n \n @defer.inlineCallbacks\n def test_lock(self):\n def gotLock(null):\n stuff.append(sum(stuff))\n\n stuff = [1]\n yield self.p.setup()\n yield self.p.lock(4, 5)\n d = self.p.lock(5).addCallback(gotLock)\n stuff.append(2)\n self.p.release(4, 5)\n yield d\n self.assertEqual(stuff, [1, 2, 3])\n\n @defer.inlineCallbacks\n def test_pickle(self):\n yield self.p.setup()\n text = pickle.dumps(self.p)\n p = pickle.loads(text)\n self.assertEqual(repr(p), repr(self.p))\n k = p.sample(1, randomBase=0.5)\n self.assertLess(k, 2*self.Np)\n\n @defer.inlineCallbacks\n def test_save_load(self):\n yield self.p.setup()\n fp = tb.fileInModuleDir(\"ade-test.dat\", absolute=True, isTemp=True)\n self.p.save(fp)\n newBounds = [(-2, 2), (-2, 2)]\n p = population.Population.load(fp, func=tb.ackley, bounds=newBounds)\n k = p.sample(1, randomBase=0.5)\n self.assertLess(k, 2*self.Np)\n for i in p:\n for value in i.values:\n self.assertLess(abs(value), 2)\n SSE = i.SSE\n i = yield i.evaluate()\n self.assertLessEqual(i.SSE, SSE)\n\n @defer.inlineCallbacks\n def test_report_noarg(self):\n def callback(*args):\n cbList.append(args)\n\n cbList = []\n yield self.p.setup()\n fh = StringIO()\n msg(fh)\n self.p.addCallback(callback)\n self.p.report()\n yield self.p.waitForReports()\n self.assertEqual(len(cbList), 0)\n iNew = Individual(self.p, np.array([0.0, 0.0]))\n iNew.SSE = 0\n self.p.push(iNew)\n self.p.report()\n yield self.p.waitForReports()\n self.assertEqual(len(cbList), 1)\n self.assertEqual(fh.getvalue(), \"\")\n msg(None)\n\n @defer.inlineCallbacks\n def test_report_twoArgs(self):\n def callback(*args):\n cbList.append(args)\n\n cbList = []\n yield self.p.setup()\n fh = StringIO()\n msg(fh)\n self.p.addCallback(callback)\n iBest = self.p.best()\n iEvenBetter = iBest.copy()\n iEvenBetter.SSE *= 0.95\n rir = self.p.report(iEvenBetter, iBest)\n self.assertEqual(rir, 1)\n yield self.p.waitForReports()\n self.assertEqual(self.p.replacement(), True)\n iWorse = iBest.copy()\n iWorse.SSE *= 1.00000001\n rir = self.p.report(iWorse, iBest)\n self.assertIs(rir, None)\n yield self.p.waitForReports()\n self.assertEqual(self.p.replacement(), False)\n\n \nclass Test_ProbabilitySampler(tb.TestCase):\n def setUp(self):\n self.ps = population.ProbabilitySampler()\n \n def test_probSample_0r25(self):\n K = np.arange(10)\n counts = dict.fromkeys(K, 0)\n for repeat in range(10000):\n k = self.ps(K, 0.25)\n counts[k] += 1\n for k in range(1,10):\n self.assertGreater(counts[0], counts[k])\n if k > 4:\n self.assertEqual(counts[k], 0)\n continue\n self.assertGreater(counts[k], max([0, 380*(9-2*k)-50]))\n self.assertLess(counts[k], 420*(9-2*k)+50)\n\n def test_probSample_0r50(self):\n K = np.arange(10)\n counts = dict.fromkeys(K, 0)\n for repeat in range(10000):\n k = self.ps(K, 0.5)\n counts[k] += 1\n for k in range(1,10):\n self.assertGreater(counts[0], counts[k])\n self.assertGreater(counts[k], max([0, 92*(19-2*k)-50]))\n self.assertLess(counts[k], 108*(19-2*k)+50)\n\n def test_probSample_0r75(self):\n K = np.arange(10)\n counts = dict.fromkeys(K, 0)\n for repeat in range(10000):\n k = self.ps(K, 0.75)\n counts[k] += 1\n for k in range(1,10):\n if k < 5:\n self.assertGreater(counts[k], 1200)\n self.assertLess(counts[k], 1480)\n continue\n self.assertGreater(counts[k], max([0, 150+220*(9-k)-55]))\n self.assertLess(counts[k], 150+280*(9-k)+55)\n\n\nclass Test_Population_Abort(tb.TestCase):\n def setUp(self):\n self.p = population.Population(\n self.tenthSecond, [\"x\"], [(-5, 5)], popsize=100)\n\n def tenthSecond(self, x):\n return self.deferToDelay(0.1).addCallback(lambda _: 1.23)\n\n @defer.inlineCallbacks\n def test_setup_no_abort(self):\n t0 = time.time()\n yield self.p.setup()\n t1 = time.time()\n self.assertGreater(t1-t0, 0.1)\n self.assertLess(t1-t0, 0.13*self.p.N_maxParallel)\n \n @defer.inlineCallbacks\n def test_abort_during_setup(self):\n t0 = time.time()\n d = self.p.setup()\n self.deferToDelay(0.5).addCallback(lambda _: self.p.abort())\n yield d\n self.assertLess(time.time()-t0, 0.72)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.grid", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.hist", "numpy.random.uniform", "numpy.arange", "numpy.argmax", "numpy.all", "matplotlib.pyplot.show", "numpy.linspace" ] ]
xhsheng-ustc/Deep-PCAC
[ "3e655fc2df5c4491257f1556ac34e1f0b270e974" ]
[ "mycodec.py" ]
[ "import os\r\nimport argparse\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport importlib \r\nimport subprocess\r\ntf.enable_eager_execution()\r\n\r\nfrom entropy_model import EntropyBottleneck\r\nfrom conditional_entropy_model import SymmetricConditional\r\nimport open3d as o3d\r\n###################################### Preprocess & Postprocess ######################################\r\ndef preprocess(input_file, points_num=2048):\r\n \"\"\"Partition.\r\n Input: .ply file and arguments for pre-process. \r\n Output: partitioned cubes, cube positions, and number of points in each cube. \r\n \"\"\"\r\n\r\n print('===== Partition =====')\r\n # scaling (optional)\r\n pcd = o3d.io.read_point_cloud(input_file)\r\n coordinate = np.asarray(pcd.points)\r\n color = np.asarray(pcd.colors)\r\n point_cloud = np.concatenate((coordinate,color),axis=1)\r\n number_of_points_of_ply = point_cloud.shape[0]\r\n number_of_feature = point_cloud.shape[1]\r\n set_num = int(np.ceil(number_of_points_of_ply/points_num))\r\n point_set = np.zeros((1,points_num,number_of_feature))\r\n point_cloud = np.expand_dims(point_cloud,0)\r\n\r\n for i in range(set_num):\r\n if i <set_num-1:\r\n #print(i)\r\n point_set = np.concatenate((point_set,point_cloud[:,i*2048:(i+1)*2048,:]),0)\r\n else:\r\n temp = np.zeros((1,points_num,number_of_feature))\r\n num_less_than_2048 = number_of_points_of_ply-points_num*i\r\n #number points of last set whose number of points is less than 2048\r\n temp[:,0:num_less_than_2048,:] = point_cloud[:,i*points_num:,:]\r\n point_set = np.concatenate((point_set,temp),0)\r\n point_set = point_set[1:,:,:]\r\n print(point_set.shape)\r\n print(\"Partition\")\r\n return point_set,num_less_than_2048\r\n\r\ndef postprocess(output_file, point_set, num_less_than_2048,points_num=2048):\r\n \"\"\"Reconstrcut point cloud and write to ply file.\r\n Input: output_file, point_set\r\n \"\"\"\r\n set_num = point_set.shape[0]\r\n feature_num = point_set.shape[2]\r\n number_of_points_of_ply = (set_num-1)*points_num+num_less_than_2048\r\n point_cloud = np.zeros((number_of_points_of_ply,feature_num))\r\n for i in range(set_num):\r\n if i<set_num-1:\r\n point_cloud[i*2048:(i+1)*2048] = point_set[i]\r\n else:\r\n point_cloud[i*2048:] = point_set[i,0:num_less_than_2048,:]\r\n pcd = o3d.geometry.PointCloud()\r\n point_ori_position = point_cloud[:,0:3]\r\n point_ori_color = point_cloud[:,3:6]\r\n pcd.points=o3d.utility.Vector3dVector(point_ori_position)\r\n pcd.colors=o3d.utility.Vector3dVector(point_ori_color)\r\n o3d.io.write_point_cloud(output_file,pcd,write_ascii=False)\r\n return point_cloud\r\n\r\n###################################### Compress & Decompress ######################################\r\n\r\ndef compress(x_coori,x_color,model, ckpt_dir, latent_points):\r\n \"\"\"Compress cubes to bitstream.\r\n Input: cubes with shape [batch size, length, width, height, channel(1)].\r\n Input: cubes with shape [batch size, num_points=2048, num_feature=6].\r\n Output: compressed bitstream.\r\n \"\"\"\r\n\r\n print('===== Compress =====')\r\n # load model.\r\n model = importlib.import_module(model)\r\n analysis_transform = model.AnalysisTransform(latent_points)\r\n hyper_encoder = model.HyperEncoder()\r\n hyper_decoder = model.HyperDecoder()\r\n entropy_bottleneck = EntropyBottleneck()\r\n conditional_entropy_model = SymmetricConditional()\r\n\r\n checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform, \r\n hyper_encoder=hyper_encoder, \r\n hyper_decoder=hyper_decoder, \r\n estimator=entropy_bottleneck)\r\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\r\n\r\n x = tf.convert_to_tensor(x_color, \"float32\")\r\n x_coori = tf.convert_to_tensor(x_coori, \"float32\")\r\n\r\n def loop_analysis(element):\r\n x = tf.expand_dims(element[0], 0)\r\n x_coori = tf.expand_dims(element[1], 0)\r\n y = analysis_transform(x_coori,x)\r\n return tf.squeeze(y,axis=0)\r\n\r\n element = [x,x_coori]\r\n ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)\r\n print(\"Analysis Transform\")\r\n\r\n def loop_hyper_encoder(y):\r\n y = tf.expand_dims(y, 0)\r\n z = hyper_encoder(y)\r\n return tf.squeeze(z,axis=0)\r\n\r\n zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False)\r\n print(\"Hyper Encoder\")\r\n\r\n z_hats, _ = entropy_bottleneck(zs, False)\r\n print(\"Quantize hyperprior\")\r\n\r\n def loop_hyper_deocder(z):\r\n z = tf.expand_dims(z, 0)\r\n loc, scale = hyper_decoder(z)\r\n return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])\r\n\r\n locs, scales = tf.map_fn(loop_hyper_deocder, z_hats, dtype=(tf.float32, tf.float32),\r\n parallel_iterations=1, back_prop=False)\r\n lower_bound = 1e-9# TODO\r\n scales = tf.maximum(scales, lower_bound)\r\n print(\"Hyper Decoder\")\r\n\r\n z_strings, z_min_v, z_max_v = entropy_bottleneck.compress(zs)\r\n z_shape = tf.shape(zs)[:]\r\n print(\"Entropy Encode (Hyper)\")\r\n\r\n y_strings, y_min_v, y_max_v = conditional_entropy_model.compress(ys, locs, scales)\r\n y_shape = tf.shape(ys)[:]\r\n print(\"Entropy Encode\")\r\n\r\n return y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape\r\n\r\ndef decompress(x_coori,y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape, model, ckpt_dir,latent_points):\r\n \"\"\"Decompress bitstream to cubes.\r\n Input: compressed bitstream. latent representations (y) and hyper prior (z).\r\n Output: cubes with shape [batch size, length, width, height, channel(1)]\r\n \"\"\"\r\n\r\n print('===== Decompress =====')\r\n # load model.\r\n model = importlib.import_module(model)\r\n synthesis_transform = model.SynthesisTransform(latent_points)\r\n hyper_encoder = model.HyperEncoder()\r\n hyper_decoder = model.HyperDecoder()\r\n entropy_bottleneck = EntropyBottleneck()\r\n conditional_entropy_model = SymmetricConditional()\r\n\r\n checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform, \r\n hyper_encoder=hyper_encoder, \r\n hyper_decoder=hyper_decoder, \r\n estimator=entropy_bottleneck)\r\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\r\n\r\n zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1])\r\n print(\"Entropy Decoder (Hyper)\")\r\n\r\n def loop_hyper_deocder(z):\r\n z = tf.expand_dims(z, 0)\r\n loc, scale = hyper_decoder(z)\r\n return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])\r\n\r\n locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32),\r\n parallel_iterations=1, back_prop=False)\r\n lower_bound = 1e-9# TODO\r\n scales = tf.maximum(scales, lower_bound)\r\n print(\"Hyper Decoder\")\r\n\r\n ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape)\r\n print(\"Entropy Decoder\")\r\n\r\n def loop_synthesis(element):\r\n y = tf.expand_dims(element[0], 0)\r\n x_coori = tf.expand_dims(element[1], 0)\r\n x_coori= tf.cast(x_coori,tf.float32)\r\n x = synthesis_transform(x_coori,y)\r\n return tf.squeeze(x, [0])\r\n\r\n element=[ys,x_coori]\r\n xs = tf.map_fn(loop_synthesis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False)\r\n print(\"Synthesis Transform\")\r\n\r\n return xs\r\n\r\n###################################### write & read binary files. ######################################\r\n\r\ndef write_binary_files(filename, y_strings, z_strings, points_numbers_less_than2048, y_min_v, y_max_v, y_shape, z_min_v, z_max_v, z_shape, rootdir='/code'):\r\n \"\"\"Write compressed binary files:\r\n 1) Compressed latent features.\r\n 2) Compressed hyperprior.\r\n 3) Number of input points.\r\n \"\"\" \r\n\r\n if not os.path.exists(rootdir):\r\n os.makedirs(rootdir)\r\n print('===== Write binary files =====')\r\n file_strings = os.path.join(rootdir, filename+'.strings')\r\n file_strings_hyper = os.path.join(rootdir, filename+'.strings_hyper')\r\n file_pointnums = os.path.join(rootdir, filename+'.pointnums')\r\n \r\n with open(file_strings, 'wb') as f:\r\n f.write(np.array(y_shape, dtype=np.int16).tobytes())# [batch size, length, width, height, channels]\r\n f.write(np.array((y_min_v, y_max_v), dtype=np.int8).tobytes())\r\n f.write(y_strings)\r\n\r\n with open(file_strings_hyper, 'wb') as f:\r\n f.write(np.array(z_shape, dtype=np.int16).tobytes())# [batch size, length, width, height, channels]\r\n f.write(np.array((z_min_v, z_max_v), dtype=np.int8).tobytes())\r\n f.write(z_strings)\r\n\r\n # TODO: Compress numbers of points.\r\n with open(file_pointnums, 'wb') as f:\r\n f.write(np.array(points_numbers_less_than2048, dtype=np.uint16).tobytes())\r\n \r\n bytes_strings = os.path.getsize(file_strings)\r\n bytes_strings_hyper = os.path.getsize(file_strings_hyper)\r\n bytes_pointnums = os.path.getsize(file_pointnums)\r\n\r\n print('Total file size (Bytes): {}'.format(bytes_strings+bytes_strings_hyper+bytes_pointnums))\r\n print('Strings (Bytes): {}'.format(bytes_strings))\r\n print('Strings hyper (Bytes): {}'.format(bytes_strings_hyper))\r\n print('Numbers of points (Bytes): {}'.format(bytes_pointnums))\r\n\r\n return bytes_strings, bytes_strings_hyper, bytes_pointnums\r\n\r\ndef read_binary_files(filename, rootdir='/code'):\r\n \"\"\"Read from compressed binary files:\r\n 1) Compressed latent features.\r\n 2) Compressed hyperprior.\r\n 3) Number of input points.\r\n \"\"\" \r\n\r\n print('===== Read binary files =====')\r\n file_strings = os.path.join(rootdir, filename+'.strings')\r\n file_strings_hyper = os.path.join(rootdir, filename+'.strings_hyper')\r\n file_pointnums = os.path.join(rootdir, filename+'.pointnums')\r\n \r\n with open(file_strings, 'rb') as f:\r\n y_shape = np.frombuffer(f.read(2*4), dtype=np.int16)\r\n y_min_v, y_max_v = np.frombuffer(f.read(1*2), dtype=np.int8)\r\n y_strings = f.read()\r\n\r\n with open(file_strings_hyper, 'rb') as f:\r\n z_shape = np.frombuffer(f.read(2*4), dtype=np.int16)\r\n z_min_v, z_max_v = np.frombuffer(f.read(1*2), dtype=np.int8)\r\n z_strings = f.read()\r\n\r\n with open(file_pointnums, 'rb') as f:\r\n points_numbers_less_than2048 = np.frombuffer(f.read(2), dtype=np.uint16)\r\n \r\n return y_strings, z_strings, points_numbers_less_than2048, y_min_v, y_max_v, y_shape, z_min_v, z_max_v, z_shape\r\n\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser(\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n parser.add_argument(\r\n \"command\", choices=[\"compress\", \"decompress\"],\r\n help=\"What to do: 'compress' reads a point cloud (.ply format) \"\r\n \"and writes compressed binary files. 'decompress' \"\r\n \"reads binary files and reconstructs the point cloud (.ply format). \"\r\n \"input and output filenames need to be provided for the latter. \")\r\n parser.add_argument(\r\n \"--input\", default='',dest=\"input\",\r\n help=\"Input filename.\")\r\n parser.add_argument(\r\n \"--output\", default='',dest=\"output\",\r\n help=\"Output filename.\")\r\n parser.add_argument(\r\n \"--ckpt_dir\", type=str, default='', dest=\"ckpt_dir\",\r\n help='checkpoint direction trained with different RD tradeoff')\r\n parser.add_argument(\r\n \"--model\", default=\"model\",\r\n help=\"model.\")\r\n parser.add_argument(\r\n \"--gpu\", type=int, default=1, dest=\"gpu\",\r\n help=\"use gpu (1) or not (0).\")\r\n parser.add_argument(\r\n \"--latent_points\", type=int, default=256, dest=\"latent_points\")\r\n args = parser.parse_args()\r\n print(args)\r\n\r\n return args\r\n\r\nif __name__ == \"__main__\":\r\n\r\n args = parse_args()\r\n if args.gpu==1:\r\n os.environ['CUDA_VISIBLE_DEVICES']=\"0\"\r\n else:\r\n os.environ['CUDA_VISIBLE_DEVICES']=\"\"\r\n config = tf.ConfigProto()\r\n config.gpu_options.per_process_gpu_memory_fraction = 1.0\r\n config.gpu_options.allow_growth = True\r\n config.log_device_placement=True\r\n sess = tf.Session(config=config)\r\n\r\n if args.command == \"compress\":\r\n rootdir, filename = os.path.split(args.input)\r\n if not args.output:\r\n args.output = filename.split('.')[0]\r\n print(args.output)\r\n point_set,num_less_than_2048 = preprocess(args.input)\r\n x_coori = point_set[:,:,0:3]\r\n x_color = point_set[:,:,3:6]\r\n y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape = compress(x_coori,x_color, args.model, args.ckpt_dir,args.latent_points)\r\n\r\n bytes_strings, bytes_strings_hyper, bytes_pointnums = write_binary_files(\r\n args.output, y_strings.numpy(), z_strings.numpy(), num_less_than_2048,\r\n y_min_v.numpy(), y_max_v.numpy(), y_shape.numpy(), \r\n z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir='./compressed')\r\n\r\n elif args.command == \"decompress\":\r\n rootdir, filename = os.path.split(args.input)\r\n if not args.output:\r\n args.output = filename + \"_rec.ply\"\r\n ori_cooridinate_path = args.input + \".ply\"\r\n y_strings_d, z_strings_d, num_less_than_2048_d, \\\r\n y_min_v_d, y_max_v_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d = read_binary_files(filename, './compressed')\r\n\r\n point_set_ori,num_less_than_2048 = preprocess(ori_cooridinate_path)\r\n ori_coori = point_set_ori[:,:,0:3]\r\n\r\n rec_color = decompress(ori_coori,y_strings_d, y_min_v_d, y_max_v_d, y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, args.model, args.ckpt_dir,args.latent_points)\r\n ori_coori = point_set_ori[:,:,0:3]\r\n rec_point_cloud = np.concatenate((ori_coori,rec_color),-1)\r\n postprocess(args.output, rec_point_cloud, int(num_less_than_2048_d),points_num=2048)\r\n " ]
[ [ "numpy.concatenate", "tensorflow.convert_to_tensor", "numpy.ceil", "tensorflow.train.latest_checkpoint", "numpy.asarray", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.shape", "numpy.array", "tensorflow.Session", "tensorflow.train.Checkpoint", "tensorflow.map_fn", "tensorflow.cast", "tensorflow.ConfigProto", "tensorflow.enable_eager_execution", "tensorflow.squeeze", "tensorflow.maximum", "numpy.expand_dims" ] ]
csb6/libtcod-ada
[ "89c2a75eb357a8468ccb0a6476391a6b388f00b4" ]
[ "third_party/libtcod/python/libtcodpy/__init__.py" ]
[ "#\n# libtcod Python wrapper\n# Copyright (c) 2008-2018 Jice & Mingos & rmtew\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The name of Jice or Mingos may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY JICE, MINGOS AND RMTEW ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL JICE, MINGOS OR RMTEW BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport atexit\nimport ctypes\nimport struct\nimport warnings\nfrom ctypes import *\n\nwarnings.warn(\n \"\"\"\nThis implementation of libtcodpy is no longer maintained.\npython-tcod should be used as a drop-in replacement.\n\nTo switch from libtcodpy to python-tcod:\n* Update your current code base to Python 3.\n* If on Linux, run: \"sudo apt install gcc python3-dev libsdl2-dev libffi-dev\"\n* Install python-tcod using pip: \"python3 -m pip install tcod\"\n* Delete the \"libtcodpy/\" directory and all related \".dll\"/\".so\" files.\n* Make sure that the python installation you run is the one you've installed\n python-tcod on.\n\nNo additional changes are required, you can continue with a tutorial or with\ndocumentation designed for libtcodpy.\n\nBinary distributions can be made using PyInstaller.\"\"\",\n DeprecationWarning,\n stacklevel=2,\n)\n\n# We do not have a fully unicode API on libtcod, so all unicode strings have to\n# be implicitly converted to ascii, and any unicode specific operations have to\n# be explicitly made by users\n# v = v.encode('latin-1')\n# Returned byte strings from the api, should be converted to unicode, so that\n# if formatted via %, do not appear as \"b'sds'\".\n# v = v.decode(\"utf-8\")\nis_python_3 = sys.version_info > (3, 0)\n\nif is_python_3:\n def convert_to_ascii(v):\n if not isinstance(v, bytes):\n return v.encode('utf-8')\n warnings.warn(\"Passing bytes to this call is deprecated.\",\n DeprecationWarning, stacklevel=3)\n return v\nelse:\n def convert_to_ascii(v):\n if isinstance(v, unicode):\n return v.encode('utf-8')\n return v\n\nif sys.version_info[0] == 2: # Python 2\n def _bytes(string):\n if isinstance(string, unicode):\n return string.encode('latin-1')\n return string\n\n def _unicode(string):\n if not isinstance(string, unicode):\n return string.decode('latin-1')\n return string\n\nelse: # Python 3\n def _bytes(string):\n if isinstance(string, str):\n return string.encode('latin-1')\n warnings.warn(\"Passing bytes to this call is deprecated.\",\n DeprecationWarning, stacklevel=4)\n return string\n\n def _unicode(string):\n if isinstance(string, bytes):\n warnings.warn(\"Passing bytes to this call is deprecated.\",\n DeprecationWarning, stacklevel=4)\n return string.decode('latin-1')\n return string\n\n\ndef _fmt_bytes(string):\n return _bytes(string).replace(b'%', b'%%')\n\ndef _fmt_unicode(string):\n return _unicode(string).replace(u'%', u'%%')\n\nif not hasattr(ctypes, \"c_bool\"): # for Python < 2.6\n c_bool = c_uint8\n\nc_void = None\n\ntry: #import NumPy if available\n import numpy\n numpy_available = True\nexcept ImportError:\n numpy_available = False\n\nLINUX=False\nMAC=False\nMINGW=False\nMSVC=False\n\ndef _get_cdll(libname):\n '''\n get the library libname using a manual search path that will first\n check the package directory and then the development path\n\n returns the ctypes lib object\n '''\n def get_pe_architecture(filePath):\n # From: https://github.com/tgandor/meats/blob/master/missing/arch_of.py\n with open(filePath, 'rb') as f:\n doshdr = f.read(64)\n magic, padding, offset = struct.unpack('2s58si', doshdr)\n # print (magic, offset)\n if magic != b'MZ':\n return None\n f.seek(offset, os.SEEK_SET)\n pehdr = f.read(6)\n # careful! H == unsigned short, x64 is negative with signed\n magic, padding, machine = struct.unpack('2s2sH', pehdr)\n # print (magic, hex(machine))\n if magic != b'PE':\n return None\n if machine == 0x014c:\n return 'i386'\n if machine == 0x0200:\n return 'IA64'\n if machine == 0x8664:\n return 'x64'\n return 'unknown'\n\n pythonExePath = sys.executable\n pythonExeArchitecture = get_pe_architecture(pythonExePath)\n\n pathsToTry = []\n # 1. Try the directory this script is located in.\n pathsToTry.append(os.path.join(__path__[0], libname))\n # 2. Try the directory of the command-line script.\n scriptFilePath = sys.argv[0]\n scriptPath = os.path.dirname(scriptFilePath)\n if len(scriptPath):\n pathsToTry.append(os.path.join(scriptPath, libname))\n else:\n pathsToTry.append(os.path.join(os.getcwd(), libname))\n # 3. Try the environment variable LIBTCOD_DLL_PATH.\n if \"LIBTCOD_DLL_PATH\" in os.environ:\n envPaths = os.environ[\"LIBTCOD_DLL_PATH\"].split(\";\")\n for envPath in envPaths:\n if os.path.exists(envPath):\n pathsToTry.append(os.path.join(envPath, libname))\n # 4. Try the top-level path in the development tree.\n potentialTopLevelPath = os.path.realpath(os.path.join(__path__[0], os.pardir, os.pardir))\n pythonPath = os.path.join(potentialTopLevelPath, \"python\")\n if os.path.exists(pythonPath):\n pathsToTry.append(os.path.join(potentialTopLevelPath, libname))\n\n for libPath in pathsToTry:\n if os.path.exists(libPath):\n # get library from the package\n libArchitecture = get_pe_architecture(libPath)\n if libArchitecture != pythonExeArchitecture:\n libName = os.path.basename(libPath)\n print (\"Error: Incompatible architecture, python is %s, %s is %s\" % (pythonExeArchitecture, libName, libArchitecture))\n sys.exit(1)\n return ctypes.cdll[libPath]\n\n raise Exception(\"unable to locate: \"+ libname)\n\nif sys.platform.find('linux') != -1:\n _lib = _get_cdll('libtcod.so')\n LINUX=True\nelif sys.platform.find('darwin') != -1:\n _lib = _get_cdll('libtcod.dylib')\n MAC = True\nelif sys.platform.find('haiku') != -1:\n _lib = _get_cdll('libtcod.so')\n HAIKU = True\nelse:\n _get_cdll('SDL2.dll')\n _lib = _get_cdll('libtcod.dll')\n MSVC=True\n # On Windows, ctypes doesn't work well with function returning structs,\n # so we have to user the _wrapper functions instead\n for function_name in [\n \"TCOD_color_equals\",\n \"TCOD_color_add\",\n \"TCOD_color_subtract\",\n \"TCOD_color_multiply\",\n \"TCOD_color_multiply_scalar\",\n \"TCOD_color_lerp\",\n \"TCOD_color_get_HSV\",\n \"TCOD_color_get_hue\",\n \"TCOD_color_get_saturation\",\n \"TCOD_color_get_value\",\n \"TCOD_console_get_default_background\",\n \"TCOD_console_get_default_foreground\",\n \"TCOD_console_set_default_background\",\n \"TCOD_console_set_default_foreground\",\n \"TCOD_console_get_char_foreground\",\n \"TCOD_console_get_char_background\",\n \"TCOD_console_set_char_background\",\n \"TCOD_console_set_char_foreground\",\n \"TCOD_console_put_char_ex\",\n \"TCOD_console_set_fade\",\n \"TCOD_console_get_fading_color\",\n \"TCOD_console_set_color_control\",\n \"TCOD_image_clear\",\n \"TCOD_image_get_pixel\",\n \"TCOD_image_get_mipmap_pixel\",\n \"TCOD_image_put_pixel\",\n \"TCOD_image_set_key_color\",\n \"TCOD_parser_get_color_property\",\n \"TCOD_console_set_key_color\",\n ]:\n wrapper_func = getattr(_lib, function_name +\"_wrapper\", None)\n if wrapper_func is not None:\n setattr(_lib, function_name, wrapper_func)\n else:\n raise Exception(\"unable to find wrapper\", function_name)\n\nHEXVERSION = 0x010604\nSTRVERSION = \"1.6.4\"\nTECHVERSION = 0x01060400\n\n############################\n# color module\n############################\n\n\nclass Color(Structure):\n _fields_ = [('r', c_uint8),\n ('g', c_uint8),\n ('b', c_uint8),\n ]\n\n def __eq__(self, c):\n return _lib.TCOD_color_equals(self, c)\n\n def __mul__(self, c):\n if isinstance(c,Color):\n return _lib.TCOD_color_multiply(self, c)\n else:\n return _lib.TCOD_color_multiply_scalar(self, c_float(c))\n\n def __add__(self, c):\n return _lib.TCOD_color_add(self, c)\n\n def __sub__(self, c):\n return _lib.TCOD_color_subtract(self, c)\n\n def __repr__(self):\n return \"Color(%d,%d,%d)\" % (self.r, self.g, self.b)\n\n def __getitem__(self, i):\n if type(i) == str:\n return getattr(self, i)\n else:\n return getattr(self, \"rgb\"[i])\n\n def __setitem__(self, i, c):\n if type(i) == str:\n setattr(self, i, c)\n else:\n setattr(self, \"rgb\"[i], c)\n\n def __iter__(self):\n yield self.r\n yield self.g\n yield self.b\n\n_lib.TCOD_color_equals.restype=c_bool\n_lib.TCOD_color_equals.argtypes=[Color, Color]\n\n_lib.TCOD_color_add.restype=Color\n_lib.TCOD_color_add.argtypes=[Color, Color]\n\n_lib.TCOD_color_subtract.restype=Color\n_lib.TCOD_color_subtract.argtypes=[Color, Color]\n\n_lib.TCOD_color_multiply.restype=Color\n_lib.TCOD_color_multiply.argtypes=[Color , Color ]\n\n_lib.TCOD_color_multiply_scalar.restype=Color\n_lib.TCOD_color_multiply_scalar.argtypes=[Color , c_float ]\n\n# Should be valid on any platform, check it! Has to be done after Color is defined.\n# NOTE(rmtew): This should ideally be deleted. Most of it is moved or duplicated here.\nif MAC:\n from .cprotos import setup_protos\n setup_protos(_lib)\n\n\n# default colors\n# grey levels\nblack=Color(0,0,0)\ndarkest_grey=Color(31,31,31)\ndarker_grey=Color(63,63,63)\ndark_grey=Color(95,95,95)\ngrey=Color(127,127,127)\nlight_grey=Color(159,159,159)\nlighter_grey=Color(191,191,191)\nlightest_grey=Color(223,223,223)\ndarkest_gray=Color(31,31,31)\ndarker_gray=Color(63,63,63)\ndark_gray=Color(95,95,95)\ngray=Color(127,127,127)\nlight_gray=Color(159,159,159)\nlighter_gray=Color(191,191,191)\nlightest_gray=Color(223,223,223)\nwhite=Color(255,255,255)\n\n# sepia\ndarkest_sepia=Color(31,24,15)\ndarker_sepia=Color(63,50,31)\ndark_sepia=Color(94,75,47)\nsepia=Color(127,101,63)\nlight_sepia=Color(158,134,100)\nlighter_sepia=Color(191,171,143)\nlightest_sepia=Color(222,211,195)\n\n#standard colors\nred=Color(255,0,0)\nflame=Color(255,63,0)\norange=Color(255,127,0)\namber=Color(255,191,0)\nyellow=Color(255,255,0)\nlime=Color(191,255,0)\nchartreuse=Color(127,255,0)\ngreen=Color(0,255,0)\nsea=Color(0,255,127)\nturquoise=Color(0,255,191)\ncyan=Color(0,255,255)\nsky=Color(0,191,255)\nazure=Color(0,127,255)\nblue=Color(0,0,255)\nhan=Color(63,0,255)\nviolet=Color(127,0,255)\npurple=Color(191,0,255)\nfuchsia=Color(255,0,255)\nmagenta=Color(255,0,191)\npink=Color(255,0,127)\ncrimson=Color(255,0,63)\n\n# dark colors\ndark_red=Color(191,0,0)\ndark_flame=Color(191,47,0)\ndark_orange=Color(191,95,0)\ndark_amber=Color(191,143,0)\ndark_yellow=Color(191,191,0)\ndark_lime=Color(143,191,0)\ndark_chartreuse=Color(95,191,0)\ndark_green=Color(0,191,0)\ndark_sea=Color(0,191,95)\ndark_turquoise=Color(0,191,143)\ndark_cyan=Color(0,191,191)\ndark_sky=Color(0,143,191)\ndark_azure=Color(0,95,191)\ndark_blue=Color(0,0,191)\ndark_han=Color(47,0,191)\ndark_violet=Color(95,0,191)\ndark_purple=Color(143,0,191)\ndark_fuchsia=Color(191,0,191)\ndark_magenta=Color(191,0,143)\ndark_pink=Color(191,0,95)\ndark_crimson=Color(191,0,47)\n\n# darker colors\ndarker_red=Color(127,0,0)\ndarker_flame=Color(127,31,0)\ndarker_orange=Color(127,63,0)\ndarker_amber=Color(127,95,0)\ndarker_yellow=Color(127,127,0)\ndarker_lime=Color(95,127,0)\ndarker_chartreuse=Color(63,127,0)\ndarker_green=Color(0,127,0)\ndarker_sea=Color(0,127,63)\ndarker_turquoise=Color(0,127,95)\ndarker_cyan=Color(0,127,127)\ndarker_sky=Color(0,95,127)\ndarker_azure=Color(0,63,127)\ndarker_blue=Color(0,0,127)\ndarker_han=Color(31,0,127)\ndarker_violet=Color(63,0,127)\ndarker_purple=Color(95,0,127)\ndarker_fuchsia=Color(127,0,127)\ndarker_magenta=Color(127,0,95)\ndarker_pink=Color(127,0,63)\ndarker_crimson=Color(127,0,31)\n\n# darkest colors\ndarkest_red=Color(63,0,0)\ndarkest_flame=Color(63,15,0)\ndarkest_orange=Color(63,31,0)\ndarkest_amber=Color(63,47,0)\ndarkest_yellow=Color(63,63,0)\ndarkest_lime=Color(47,63,0)\ndarkest_chartreuse=Color(31,63,0)\ndarkest_green=Color(0,63,0)\ndarkest_sea=Color(0,63,31)\ndarkest_turquoise=Color(0,63,47)\ndarkest_cyan=Color(0,63,63)\ndarkest_sky=Color(0,47,63)\ndarkest_azure=Color(0,31,63)\ndarkest_blue=Color(0,0,63)\ndarkest_han=Color(15,0,63)\ndarkest_violet=Color(31,0,63)\ndarkest_purple=Color(47,0,63)\ndarkest_fuchsia=Color(63,0,63)\ndarkest_magenta=Color(63,0,47)\ndarkest_pink=Color(63,0,31)\ndarkest_crimson=Color(63,0,15)\n\n# light colors\nlight_red=Color(255,114,114)\nlight_flame=Color(255,149,114)\nlight_orange=Color(255,184,114)\nlight_amber=Color(255,219,114)\nlight_yellow=Color(255,255,114)\nlight_lime=Color(219,255,114)\nlight_chartreuse=Color(184,255,114)\nlight_green=Color(114,255,114)\nlight_sea=Color(114,255,184)\nlight_turquoise=Color(114,255,219)\nlight_cyan=Color(114,255,255)\nlight_sky=Color(114,219,255)\nlight_azure=Color(114,184,255)\nlight_blue=Color(114,114,255)\nlight_han=Color(149,114,255)\nlight_violet=Color(184,114,255)\nlight_purple=Color(219,114,255)\nlight_fuchsia=Color(255,114,255)\nlight_magenta=Color(255,114,219)\nlight_pink=Color(255,114,184)\nlight_crimson=Color(255,114,149)\n\n#lighter colors\nlighter_red=Color(255,165,165)\nlighter_flame=Color(255,188,165)\nlighter_orange=Color(255,210,165)\nlighter_amber=Color(255,232,165)\nlighter_yellow=Color(255,255,165)\nlighter_lime=Color(232,255,165)\nlighter_chartreuse=Color(210,255,165)\nlighter_green=Color(165,255,165)\nlighter_sea=Color(165,255,210)\nlighter_turquoise=Color(165,255,232)\nlighter_cyan=Color(165,255,255)\nlighter_sky=Color(165,232,255)\nlighter_azure=Color(165,210,255)\nlighter_blue=Color(165,165,255)\nlighter_han=Color(188,165,255)\nlighter_violet=Color(210,165,255)\nlighter_purple=Color(232,165,255)\nlighter_fuchsia=Color(255,165,255)\nlighter_magenta=Color(255,165,232)\nlighter_pink=Color(255,165,210)\nlighter_crimson=Color(255,165,188)\n\n# lightest colors\nlightest_red=Color(255,191,191)\nlightest_flame=Color(255,207,191)\nlightest_orange=Color(255,223,191)\nlightest_amber=Color(255,239,191)\nlightest_yellow=Color(255,255,191)\nlightest_lime=Color(239,255,191)\nlightest_chartreuse=Color(223,255,191)\nlightest_green=Color(191,255,191)\nlightest_sea=Color(191,255,223)\nlightest_turquoise=Color(191,255,239)\nlightest_cyan=Color(191,255,255)\nlightest_sky=Color(191,239,255)\nlightest_azure=Color(191,223,255)\nlightest_blue=Color(191,191,255)\nlightest_han=Color(207,191,255)\nlightest_violet=Color(223,191,255)\nlightest_purple=Color(239,191,255)\nlightest_fuchsia=Color(255,191,255)\nlightest_magenta=Color(255,191,239)\nlightest_pink=Color(255,191,223)\nlightest_crimson=Color(255,191,207)\n\n# desaturated colors\ndesaturated_red=Color(127,63,63)\ndesaturated_flame=Color(127,79,63)\ndesaturated_orange=Color(127,95,63)\ndesaturated_amber=Color(127,111,63)\ndesaturated_yellow=Color(127,127,63)\ndesaturated_lime=Color(111,127,63)\ndesaturated_chartreuse=Color(95,127,63)\ndesaturated_green=Color(63,127,63)\ndesaturated_sea=Color(63,127,95)\ndesaturated_turquoise=Color(63,127,111)\ndesaturated_cyan=Color(63,127,127)\ndesaturated_sky=Color(63,111,127)\ndesaturated_azure=Color(63,95,127)\ndesaturated_blue=Color(63,63,127)\ndesaturated_han=Color(79,63,127)\ndesaturated_violet=Color(95,63,127)\ndesaturated_purple=Color(111,63,127)\ndesaturated_fuchsia=Color(127,63,127)\ndesaturated_magenta=Color(127,63,111)\ndesaturated_pink=Color(127,63,95)\ndesaturated_crimson=Color(127,63,79)\n\n# metallic\nbrass=Color(191,151,96)\ncopper=Color(197,136,124)\ngold=Color(229,191,0)\nsilver=Color(203,203,203)\n\n# miscellaneous\nceladon=Color(172,255,175)\npeach=Color(255,159,127)\n\n# color functions\n_lib.TCOD_color_lerp.restype = Color\ndef color_lerp(c1, c2, a):\n return _lib.TCOD_color_lerp(c1, c2, c_float(a))\n\n_lib.TCOD_color_set_HSV.restype=c_void\n_lib.TCOD_color_set_HSV.argtypes=[POINTER(Color),c_float , c_float , c_float ]\ndef color_set_hsv(c, h, s, v):\n _lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))\n\n_lib.TCOD_color_get_HSV.restype=c_void\n_lib.TCOD_color_get_HSV.argtypes=[Color ,POINTER(c_float) , POINTER(c_float) , POINTER(c_float) ]\ndef color_get_hsv(c):\n h = c_float()\n s = c_float()\n v = c_float()\n _lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))\n return h.value, s.value, v.value\n\n_lib.TCOD_color_scale_HSV.restype=c_void\n_lib.TCOD_color_scale_HSV.argtypes=[POINTER(Color), c_float , c_float ]\ndef color_scale_HSV(c, scoef, vcoef) :\n _lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))\n\n_lib.TCOD_color_gen_map.restype=c_void\n_lib.TCOD_color_gen_map.argtypes=[POINTER(Color), c_int, POINTER(Color), POINTER(c_int)]\ndef color_gen_map(colors, indexes):\n ccolors = (Color * len(colors))(*colors)\n cindexes = (c_int * len(indexes))(*indexes)\n cres = (Color * (max(indexes) + 1))()\n _lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)\n return cres\n\n############################\n# console module\n############################\nclass Key(Structure):\n _fields_=[('vk', c_int),\n ('c', c_uint8),\n ('text',c_char * 32),\n ('pressed', c_bool),\n ('lalt', c_bool),\n ('lctrl', c_bool),\n ('lmeta', c_bool),\n ('ralt', c_bool),\n ('rctrl', c_bool),\n ('rmeta', c_bool),\n ('shift', c_bool)\n ]\n\nclass ConsoleBuffer:\n # simple console that allows direct (fast) access to cells. simplifies\n # use of the \"fill\" functions.\n def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):\n # initialize with given width and height. values to fill the buffer\n # are optional, defaults to black with no characters.\n n = width * height\n self.width = width\n self.height = height\n self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)\n\n def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):\n # clears the console. values to fill it with are optional, defaults\n # to black with no characters.\n n = self.width * self.height\n self.back_r = [back_r] * n\n self.back_g = [back_g] * n\n self.back_b = [back_b] * n\n self.fore_r = [fore_r] * n\n self.fore_g = [fore_g] * n\n self.fore_b = [fore_b] * n\n self.char = [ord(char)] * n\n\n def copy(self):\n # returns a copy of this ConsoleBuffer.\n other = ConsoleBuffer(0, 0)\n other.width = self.width\n other.height = self.height\n other.back_r = list(self.back_r) # make explicit copies of all lists\n other.back_g = list(self.back_g)\n other.back_b = list(self.back_b)\n other.fore_r = list(self.fore_r)\n other.fore_g = list(self.fore_g)\n other.fore_b = list(self.fore_b)\n other.char = list(self.char)\n return other\n\n def set_fore(self, x, y, r, g, b, char):\n # set the character and foreground color of one cell.\n i = self.width * y + x\n self.fore_r[i] = int(r)\n self.fore_g[i] = int(g)\n self.fore_b[i] = int(b)\n self.char[i] = ord(char)\n\n def set_back(self, x, y, r, g, b):\n # set the background color of one cell.\n i = self.width * y + x\n self.back_r[i] = int(r)\n self.back_g[i] = int(g)\n self.back_b[i] = int(b)\n\n def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):\n # set the background color, foreground color and character of one cell.\n i = self.width * y + x\n self.back_r[i] = int(back_r)\n self.back_g[i] = int(back_g)\n self.back_b[i] = int(back_b)\n self.fore_r[i] = int(fore_r)\n self.fore_g[i] = int(fore_g)\n self.fore_b[i] = int(fore_b)\n self.char[i] = ord(char)\n\n def blit(self, dest, fill_fore=True, fill_back=True):\n # use libtcod's \"fill\" functions to write the buffer to a console.\n if (console_get_width(dest) != self.width or\n console_get_height(dest) != self.height):\n raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')\n\n s = struct.Struct('%di' % len(self.back_r))\n\n if fill_back:\n _lib.TCOD_console_fill_background(c_void_p(dest), (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))\n\n if fill_fore:\n _lib.TCOD_console_fill_foreground(c_void_p(dest), (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))\n _lib.TCOD_console_fill_char(c_void_p(dest), (c_int * len(self.char))(*self.char))\n\n_lib.TCOD_console_is_fullscreen.restype = c_bool\n_lib.TCOD_console_is_window_closed.restype = c_bool\n_lib.TCOD_console_has_mouse_focus.restype = c_bool\n_lib.TCOD_console_is_active.restype = c_bool\n_lib.TCOD_console_get_default_background.restype = Color\n_lib.TCOD_console_get_default_foreground.restype = Color\n_lib.TCOD_console_get_char_background.restype = Color\n_lib.TCOD_console_get_char_foreground.restype = Color\n_lib.TCOD_console_get_fading_color.restype = Color\n_lib.TCOD_console_is_key_pressed.restype = c_bool\n\n# background rendering modes\nBKGND_NONE = 0\nBKGND_SET = 1\nBKGND_MULTIPLY = 2\nBKGND_LIGHTEN = 3\nBKGND_DARKEN = 4\nBKGND_SCREEN = 5\nBKGND_COLOR_DODGE = 6\nBKGND_COLOR_BURN = 7\nBKGND_ADD = 8\nBKGND_ADDA = 9\nBKGND_BURN = 10\nBKGND_OVERLAY = 11\nBKGND_ALPH = 12\nBKGND_DEFAULT=13\n\ndef BKGND_ALPHA(a):\n return BKGND_ALPH | (int(a * 255) << 8)\n\ndef BKGND_ADDALPHA(a):\n return BKGND_ADDA | (int(a * 255) << 8)\n\n# non blocking key events types\nKEY_PRESSED = 1\nKEY_RELEASED = 2\n# key codes\nKEY_NONE = 0\nKEY_ESCAPE = 1\nKEY_BACKSPACE = 2\nKEY_TAB = 3\nKEY_ENTER = 4\nKEY_SHIFT = 5\nKEY_CONTROL = 6\nKEY_ALT = 7\nKEY_PAUSE = 8\nKEY_CAPSLOCK = 9\nKEY_PAGEUP = 10\nKEY_PAGEDOWN = 11\nKEY_END = 12\nKEY_HOME = 13\nKEY_UP = 14\nKEY_LEFT = 15\nKEY_RIGHT = 16\nKEY_DOWN = 17\nKEY_PRINTSCREEN = 18\nKEY_INSERT = 19\nKEY_DELETE = 20\nKEY_LWIN = 21\nKEY_RWIN = 22\nKEY_APPS = 23\nKEY_0 = 24\nKEY_1 = 25\nKEY_2 = 26\nKEY_3 = 27\nKEY_4 = 28\nKEY_5 = 29\nKEY_6 = 30\nKEY_7 = 31\nKEY_8 = 32\nKEY_9 = 33\nKEY_KP0 = 34\nKEY_KP1 = 35\nKEY_KP2 = 36\nKEY_KP3 = 37\nKEY_KP4 = 38\nKEY_KP5 = 39\nKEY_KP6 = 40\nKEY_KP7 = 41\nKEY_KP8 = 42\nKEY_KP9 = 43\nKEY_KPADD = 44\nKEY_KPSUB = 45\nKEY_KPDIV = 46\nKEY_KPMUL = 47\nKEY_KPDEC = 48\nKEY_KPENTER = 49\nKEY_F1 = 50\nKEY_F2 = 51\nKEY_F3 = 52\nKEY_F4 = 53\nKEY_F5 = 54\nKEY_F6 = 55\nKEY_F7 = 56\nKEY_F8 = 57\nKEY_F9 = 58\nKEY_F10 = 59\nKEY_F11 = 60\nKEY_F12 = 61\nKEY_NUMLOCK = 62\nKEY_SCROLLLOCK = 63\nKEY_SPACE = 64\nKEY_CHAR = 65\nKEY_TEXT = 66\n# special chars\n# single walls\nCHAR_HLINE = 196\nCHAR_VLINE = 179\nCHAR_NE = 191\nCHAR_NW = 218\nCHAR_SE = 217\nCHAR_SW = 192\nCHAR_TEEW = 180\nCHAR_TEEE = 195\nCHAR_TEEN = 193\nCHAR_TEES = 194\nCHAR_CROSS = 197\n# double walls\nCHAR_DHLINE = 205\nCHAR_DVLINE = 186\nCHAR_DNE = 187\nCHAR_DNW = 201\nCHAR_DSE = 188\nCHAR_DSW = 200\nCHAR_DTEEW = 185\nCHAR_DTEEE = 204\nCHAR_DTEEN = 202\nCHAR_DTEES = 203\nCHAR_DCROSS = 206\n# blocks\nCHAR_BLOCK1 = 176\nCHAR_BLOCK2 = 177\nCHAR_BLOCK3 = 178\n# arrows\nCHAR_ARROW_N = 24\nCHAR_ARROW_S = 25\nCHAR_ARROW_E = 26\nCHAR_ARROW_W = 27\n# arrows without tail\nCHAR_ARROW2_N = 30\nCHAR_ARROW2_S = 31\nCHAR_ARROW2_E = 16\nCHAR_ARROW2_W = 17\n# double arrows\nCHAR_DARROW_H = 29\nCHAR_DARROW_V = 18\n# GUI stuff\nCHAR_CHECKBOX_UNSET = 224\nCHAR_CHECKBOX_SET = 225\nCHAR_RADIO_UNSET = 9\nCHAR_RADIO_SET = 10\n# sub-pixel resolution kit\nCHAR_SUBP_NW = 226\nCHAR_SUBP_NE = 227\nCHAR_SUBP_N = 228\nCHAR_SUBP_SE = 229\nCHAR_SUBP_DIAG = 230\nCHAR_SUBP_E = 231\nCHAR_SUBP_SW = 232\n# misc characters\nCHAR_BULLET = 7\nCHAR_BULLET_INV = 8\nCHAR_BULLET_SQUARE = 254\nCHAR_CENT = 189\nCHAR_CLUB = 5\nCHAR_COPYRIGHT = 184\nCHAR_CURRENCY = 207\nCHAR_DIAMOND = 4\nCHAR_DIVISION = 246\nCHAR_EXCLAM_DOUBLE = 19\nCHAR_FEMALE = 12\nCHAR_FUNCTION = 159\nCHAR_GRADE = 248\nCHAR_HALF = 171\nCHAR_HEART = 3\nCHAR_LIGHT = 15\nCHAR_MALE = 11\nCHAR_MULTIPLICATION = 158\nCHAR_NOTE = 13\nCHAR_NOTE_DOUBLE = 14\nCHAR_ONE_QUARTER = 172\nCHAR_PILCROW = 20\nCHAR_POUND = 156\nCHAR_POW1 = 251\nCHAR_POW2 = 253\nCHAR_POW3 = 252\nCHAR_RESERVED = 169\nCHAR_SECTION = 21\nCHAR_SMILIE = 1\nCHAR_SMILIE_INV = 2\nCHAR_SPADE = 6\nCHAR_THREE_QUARTERS = 243\nCHAR_UMLAUT = 249\nCHAR_YEN = 190\n# font flags\nFONT_LAYOUT_ASCII_INCOL = 1\nFONT_LAYOUT_ASCII_INROW = 2\nFONT_TYPE_GREYSCALE = 4\nFONT_TYPE_GRAYSCALE = 4\nFONT_LAYOUT_TCOD = 8\nFONT_LAYOUT_CP437 = 16\n# color control codes\nCOLCTRL_1=1\nCOLCTRL_2=2\nCOLCTRL_3=3\nCOLCTRL_4=4\nCOLCTRL_5=5\nCOLCTRL_NUMBER=5\nCOLCTRL_FORE_RGB=6\nCOLCTRL_BACK_RGB=7\nCOLCTRL_STOP=8\n# renderers\nRENDERER_GLSL=0\nRENDERER_OPENGL=1\nRENDERER_SDL=2\nRENDERER_SDL2=3\nRENDERER_OPENGL2=4\nNB_RENDERERS=5\n# alignment\nLEFT=0\nRIGHT=1\nCENTER=2\n\n\n_lib.TCOD_quit.restype = c_void\n_lib.TCOD_quit.argtypes = []\n\n# initializing the console\n\n_lib.TCOD_console_init_root.restype=c_int\n_lib.TCOD_console_init_root.argtypes=[c_int, c_int, c_char_p , c_bool , c_uint ]\ndef console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):\n atexit.register(_lib.TCOD_quit)\n return _lib.TCOD_console_init_root(w, h, convert_to_ascii(title), fullscreen, renderer)\n\n_lib.TCOD_console_set_custom_font.restype=c_void\n_lib.TCOD_console_set_custom_font.argtypes=[c_char_p, c_int,c_int, c_int]\ndef console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):\n _lib.TCOD_console_set_custom_font(convert_to_ascii(fontFile), flags, nb_char_horiz, nb_char_vertic)\n\n_lib.TCOD_console_map_ascii_code_to_font.restype=c_void\n_lib.TCOD_console_map_ascii_code_to_font.argtypes=[c_int, c_int, c_int]\ndef console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):\n asciiCode = convert_to_ascii(asciiCode)\n if type(asciiCode) is bytes:\n _lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX, fontCharY)\n else:\n _lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY)\n\n\n_lib.TCOD_console_map_ascii_codes_to_font.restype=c_void\n_lib.TCOD_console_map_ascii_codes_to_font.argtypes=[c_int, c_int, c_int, c_int]\ndef console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,\n fontCharY):\n if type(firstAsciiCode) == str or type(firstAsciiCode) == bytes:\n _lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes, fontCharX, fontCharY)\n else:\n _lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX, fontCharY)\n\n_lib.TCOD_console_map_string_to_font.argtypes=[c_char_p, c_int, c_int]\n_lib.TCOD_console_map_string_to_font_utf.argtypes=[c_wchar_p, c_int, c_int]\ndef console_map_string_to_font(s, fontCharX, fontCharY):\n # Python 3, utf is normal, so if they want utf behaviour call the other function.\n if type(s) is bytes or is_python_3:\n _lib.TCOD_console_map_string_to_font(convert_to_ascii(s), fontCharX, fontCharY)\n else:\n _lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)\n\ndef console_map_string_to_font_utf(s, fontCharX, fontCharY):\n _lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)\n\n_lib.TCOD_console_is_fullscreen.restype=c_bool\n_lib.TCOD_console_is_fullscreen.argtypes=[]\ndef console_is_fullscreen():\n return _lib.TCOD_console_is_fullscreen()\n\n_lib.TCOD_console_set_fullscreen.restype=c_void\n_lib.TCOD_console_set_fullscreen.argtypes=[c_bool ]\ndef console_set_fullscreen(fullscreen):\n _lib.TCOD_console_set_fullscreen(c_int(fullscreen))\n\n_lib.TCOD_console_is_window_closed.restype=c_bool\n_lib.TCOD_console_is_window_closed.argtypes=[]\ndef console_is_window_closed():\n return _lib.TCOD_console_is_window_closed()\n\n_lib.TCOD_console_has_mouse_focus.restype=c_bool\n_lib.TCOD_console_has_mouse_focus.argtypes=[]\ndef console_has_mouse_focus():\n return _lib.TCOD_console_has_mouse_focus()\n\n_lib.TCOD_console_is_active.restype=c_bool\n_lib.TCOD_console_is_active.argtypes=[]\ndef console_is_active():\n return _lib.TCOD_console_is_active()\n\n_lib.TCOD_console_set_window_title.restype=c_void\n_lib.TCOD_console_set_window_title.argtypes=[c_char_p]\ndef console_set_window_title(title):\n _lib.TCOD_console_set_window_title(convert_to_ascii(title))\n\n_lib.TCOD_console_credits_render.restype = c_bool\ndef console_credits():\n _lib.TCOD_console_credits()\n\n_lib.TCOD_console_credits_reset.restype=c_void\n_lib.TCOD_console_credits_reset.argtypes=[]\ndef console_credits_reset():\n _lib.TCOD_console_credits_reset()\n\n_lib.TCOD_console_credits_render.restype=c_bool\n_lib.TCOD_console_credits_render.argtypes=[c_int, c_int, c_bool ]\ndef console_credits_render(x, y, alpha):\n return _lib.TCOD_console_credits_render(x, y, c_int(alpha))\n\n_lib.TCOD_console_flush.restype=c_int\n_lib.TCOD_console_flush.argtypes=[]\ndef console_flush():\n return _lib.TCOD_console_flush()\n\n# drawing on a console\n\n_lib.TCOD_console_set_default_background.restype=c_void\n_lib.TCOD_console_set_default_background.argtypes=[c_void_p ,Color ]\ndef console_set_default_background(con, col):\n _lib.TCOD_console_set_default_background(con, col)\n\n\n_lib.TCOD_console_set_default_foreground.restype=c_void\n_lib.TCOD_console_set_default_foreground.argtypes=[c_void_p ,Color ]\ndef console_set_default_foreground(con, col):\n _lib.TCOD_console_set_default_foreground(con, col)\n\n_lib.TCOD_console_clear.restype=c_void\n_lib.TCOD_console_clear.argtypes=[c_void_p ]\ndef console_clear(con):\n return _lib.TCOD_console_clear(con)\n\n_lib.TCOD_console_put_char.restype=c_void\n_lib.TCOD_console_put_char.argtypes=[c_void_p ,c_int, c_int, c_int, c_int]\ndef console_put_char(con, x, y, c, flag=BKGND_DEFAULT):\n if type(c) == str or type(c) == bytes:\n _lib.TCOD_console_put_char(c_void_p(con), x, y, ord(c), flag)\n else:\n _lib.TCOD_console_put_char(c_void_p(con), x, y, c, flag)\n\n_lib.TCOD_console_put_char_ex.restype=c_void\n_lib.TCOD_console_put_char_ex.argtypes=[c_void_p ,c_int, c_int, c_int, Color, Color]\ndef console_put_char_ex(con, x, y, c, fore, back):\n if type(c) == str or type(c) == bytes:\n _lib.TCOD_console_put_char_ex(c_void_p(con), x, y, ord(c), fore, back)\n else:\n _lib.TCOD_console_put_char_ex(c_void_p(con), x, y, c, fore, back)\n\n_lib.TCOD_console_set_char_background.restype=c_void\n_lib.TCOD_console_set_char_background.argtypes=[c_void_p ,c_int, c_int, Color , c_int ]\ndef console_set_char_background(con, x, y, col, flag=BKGND_SET):\n _lib.TCOD_console_set_char_background(con, x, y, col, flag)\n\n_lib.TCOD_console_set_char_foreground.restype=c_void\n_lib.TCOD_console_set_char_foreground.argtypes=[c_void_p ,c_int, c_int, Color ]\ndef console_set_char_foreground(con, x, y, col):\n _lib.TCOD_console_set_char_foreground(con, x, y, col)\n\n_lib.TCOD_console_set_char.restype=c_void\n_lib.TCOD_console_set_char.argtypes=[c_void_p ,c_int, c_int, c_int]\ndef console_set_char(con, x, y, c):\n if type(c) == str or type(c) == bytes:\n _lib.TCOD_console_set_char(con, x, y, ord(c))\n else:\n _lib.TCOD_console_set_char(con, x, y, c)\n\n_lib.TCOD_console_set_background_flag.restype=c_void\n_lib.TCOD_console_set_background_flag.argtypes=[c_void_p ,c_int ]\ndef console_set_background_flag(con, flag):\n _lib.TCOD_console_set_background_flag(con, flag)\n\n_lib.TCOD_console_get_background_flag.restype=c_int\n_lib.TCOD_console_get_background_flag.argtypes=[c_void_p ]\ndef console_get_background_flag(con):\n return _lib.TCOD_console_get_background_flag(con)\n\n_lib.TCOD_console_set_alignment.restype=c_void\n_lib.TCOD_console_set_alignment.argtypes=[c_void_p ,c_int ]\ndef console_set_alignment(con, alignment):\n _lib.TCOD_console_set_alignment(con, alignment)\n\n_lib.TCOD_console_get_alignment.restype=c_int\n_lib.TCOD_console_get_alignment.argtypes=[c_void_p ]\ndef console_get_alignment(con):\n return _lib.TCOD_console_get_alignment(con)\n\n_lib.TCOD_console_print.argtypes=[c_void_p,c_int,c_int,c_char_p]\n_lib.TCOD_console_print_utf.argtypes=[c_void_p,c_int,c_int,c_wchar_p]\ndef console_print(con, x, y, fmt):\n _lib.TCOD_console_print_utf(con, x, y, _fmt_unicode(fmt))\n\n_lib.TCOD_console_print_ex.argtypes=[c_void_p,c_int,c_int,c_int,c_int,c_char_p]\n_lib.TCOD_console_print_ex_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_wchar_p]\ndef console_print_ex(con, x, y, flag, alignment, fmt):\n _lib.TCOD_console_print_ex_utf(con, x, y, flag, alignment,\n _fmt_unicode(fmt))\n\n_lib.TCOD_console_print_rect.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_char_p]\n_lib.TCOD_console_print_rect_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_wchar_p]\ndef console_print_rect(con, x, y, w, h, fmt):\n return _lib.TCOD_console_print_rect_utf(con, x, y, w, h, _fmt_unicode(fmt))\n\n_lib.TCOD_console_print_rect_ex.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_int, c_int, c_char_p]\n_lib.TCOD_console_print_rect_ex_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_int, c_int, c_wchar_p]\ndef console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):\n return _lib.TCOD_console_print_rect_ex_utf(con, x, y, w, h, flag, alignment, _fmt_unicode(fmt))\n\n_lib.TCOD_console_get_height_rect.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_char_p]\n_lib.TCOD_console_get_height_rect_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_wchar_p]\ndef console_get_height_rect(con, x, y, w, h, fmt):\n return _lib.TCOD_console_get_height_rect_utf(con, x, y, w, h,\n _fmt_unicode(fmt))\n\n_lib.TCOD_console_rect.argtypes=[ c_void_p, c_int, c_int, c_int, c_int, c_bool, c_int ]\ndef console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):\n _lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)\n\n_lib.TCOD_console_hline.argtypes=[ c_void_p, c_int, c_int, c_int, c_int ]\ndef console_hline(con, x, y, l, flag=BKGND_DEFAULT):\n _lib.TCOD_console_hline( con, x, y, l, flag)\n\n_lib.TCOD_console_vline.argtypes=[ c_void_p, c_int, c_int, c_int, c_int ]\ndef console_vline(con, x, y, l, flag=BKGND_DEFAULT):\n _lib.TCOD_console_vline( con, x, y, l, flag)\n\n_lib.TCOD_console_print_frame.argtypes=[c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_char_p]\ndef console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=''):\n _lib.TCOD_console_print_frame(con, x, y, w, h, clear, flag,\n _fmt_bytes(fmt))\n\n_lib.TCOD_console_set_color_control.restype=c_void\n_lib.TCOD_console_set_color_control.argtypes=[c_void_p, Color, Color ]\ndef console_set_color_control(con,fore,back) :\n _lib.TCOD_console_set_color_control(con,fore,back)\n\n_lib.TCOD_console_get_default_background.restype=Color\n_lib.TCOD_console_get_default_background.argtypes=[c_void_p]\ndef console_get_default_background(con):\n return _lib.TCOD_console_get_default_background(con)\n\n_lib.TCOD_console_get_default_foreground.restype=Color\n_lib.TCOD_console_get_default_foreground.argtypes=[c_void_p]\ndef console_get_default_foreground(con):\n return _lib.TCOD_console_get_default_foreground(con)\n\n_lib.TCOD_console_get_char_background.restype=Color\n_lib.TCOD_console_get_char_background.argtypes=[c_void_p, c_int, c_int]\ndef console_get_char_background(con, x, y):\n return _lib.TCOD_console_get_char_background(con, x, y)\n\n_lib.TCOD_console_get_char_foreground.restype=Color\n_lib.TCOD_console_get_char_foreground.argtypes=[c_void_p, c_int, c_int]\ndef console_get_char_foreground(con, x, y):\n return _lib.TCOD_console_get_char_foreground(con, x, y)\n\n_lib.TCOD_console_get_char.restype=c_int\n_lib.TCOD_console_get_char.argtypes=[c_void_p, c_int, c_int]\ndef console_get_char(con, x, y):\n return _lib.TCOD_console_get_char(con, x, y)\n\n_lib.TCOD_console_set_fade.restype=c_void\n_lib.TCOD_console_set_fade.argtypes=[c_byte, Color]\ndef console_set_fade(fade, fadingColor):\n _lib.TCOD_console_set_fade(fade, fadingColor)\n\n_lib.TCOD_console_get_fade.restype=c_byte\n_lib.TCOD_console_get_fade.argtypes=[]\ndef console_get_fade():\n return _lib.TCOD_console_get_fade()\n\n_lib.TCOD_console_get_fading_color.restype=Color\n_lib.TCOD_console_get_fading_color.argtypes=[]\ndef console_get_fading_color():\n return _lib.TCOD_console_get_fading_color()\n\n# handling keyboard input\ndef console_wait_for_keypress(flush):\n k=Key()\n _lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))\n return k\n\ndef console_check_for_keypress(flags=KEY_RELEASED):\n k=Key()\n _lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))\n return k\n\n_lib.TCOD_console_is_key_pressed.restype=c_bool\n_lib.TCOD_console_is_key_pressed.argtypes=[c_int ]\ndef console_is_key_pressed(key):\n return _lib.TCOD_console_is_key_pressed(key)\n\n# using offscreen consoles\n_lib.TCOD_console_new.restype=c_void_p\n_lib.TCOD_console_new.argtypes=[c_int, c_int]\ndef console_new(w, h):\n return _lib.TCOD_console_new(w, h)\n\n_lib.TCOD_console_from_file.restype=c_void_p\n_lib.TCOD_console_from_file.argtypes=[c_char_p]\ndef console_from_file(filename):\n return _lib.TCOD_console_from_file(convert_to_ascii(filename))\n\n_lib.TCOD_console_get_width.restype=c_int\n_lib.TCOD_console_get_width.argtypes=[c_void_p ]\ndef console_get_width(con):\n return _lib.TCOD_console_get_width(con)\n\n_lib.TCOD_console_get_height.restype=c_int\n_lib.TCOD_console_get_height.argtypes=[c_void_p ]\ndef console_get_height(con):\n return _lib.TCOD_console_get_height(con)\n\n_lib.TCOD_console_blit.argtypes=[c_void_p ,c_int, c_int, c_int, c_int, c_void_p , c_int, c_int, c_float, c_float]\ndef console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):\n _lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))\n\n_lib.TCOD_console_set_key_color.argtypes=[c_void_p ,Color ]\ndef console_set_key_color(con, col):\n _lib.TCOD_console_set_key_color(c_void_p(con), col)\n\n_lib.TCOD_console_set_dirty.restype=c_void\n_lib.TCOD_console_set_dirty.argtypes=[c_int, c_int, c_int, c_int]\ndef console_set_dirty(x, y, w, h):\n return _lib.TCOD_console_set_dirty(x, y, w, h)\n\n_lib.TCOD_console_delete.argtypes=[c_void_p ]\ndef console_delete(con):\n _lib.TCOD_console_delete(con)\n\n# fast color filling\n_lib.TCOD_console_fill_foreground.restype=c_void\n_lib.TCOD_console_fill_foreground.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int), POINTER(c_int)]\ndef console_fill_foreground(con,r,g,b) :\n if len(r) != len(g) or len(r) != len(b):\n raise TypeError('R, G and B must all have the same size.')\n\n if (numpy_available and isinstance(r, numpy.ndarray) and\n isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):\n #numpy arrays, use numpy's ctypes functions\n r = numpy.ascontiguousarray(r, dtype=numpy.int32)\n g = numpy.ascontiguousarray(g, dtype=numpy.int32)\n b = numpy.ascontiguousarray(b, dtype=numpy.int32)\n cr = r.ctypes.data_as(POINTER(c_int))\n cg = g.ctypes.data_as(POINTER(c_int))\n cb = b.ctypes.data_as(POINTER(c_int))\n else:\n # otherwise convert using ctypes arrays\n cr = (c_int * len(r))(*r)\n cg = (c_int * len(g))(*g)\n cb = (c_int * len(b))(*b)\n\n _lib.TCOD_console_fill_foreground(c_void_p(con), cr, cg, cb)\n\n_lib.TCOD_console_fill_background.restype=c_void\n_lib.TCOD_console_fill_background.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int), POINTER(c_int)]\n\ndef console_fill_background(con,r,g,b) :\n if len(r) != len(g) or len(r) != len(b):\n raise TypeError('R, G and B must all have the same size.')\n\n if (numpy_available and isinstance(r, numpy.ndarray) and\n isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):\n #numpy arrays, use numpy's ctypes functions\n r = numpy.ascontiguousarray(r, dtype=numpy.int32)\n g = numpy.ascontiguousarray(g, dtype=numpy.int32)\n b = numpy.ascontiguousarray(b, dtype=numpy.int32)\n cr = r.ctypes.data_as(POINTER(c_int))\n cg = g.ctypes.data_as(POINTER(c_int))\n cb = b.ctypes.data_as(POINTER(c_int))\n else:\n # otherwise convert using ctypes arrays\n cr = (c_int * len(r))(*r)\n cg = (c_int * len(g))(*g)\n cb = (c_int * len(b))(*b)\n\n _lib.TCOD_console_fill_background(c_void_p(con), cr, cg, cb)\n\n\n_lib.TCOD_console_fill_char.restype=c_void\n_lib.TCOD_console_fill_char.argtypes=[c_void_p , POINTER(c_int)]\ndef console_fill_char(con,arr) :\n if (numpy_available and isinstance(arr, numpy.ndarray) ):\n #numpy arrays, use numpy's ctypes functions\n arr = numpy.ascontiguousarray(arr, dtype=numpy.int32)\n carr = arr.ctypes.data_as(POINTER(c_int))\n else:\n carr = (c_int * len(arr))(*arr)\n\n _lib.TCOD_console_fill_char(c_void_p(con), carr)\n\n_lib.TCOD_console_load_asc.restype=c_bool\n_lib.TCOD_console_load_asc.argtypes=[c_void_p , c_char_p]\ndef console_load_asc(con, filename) :\n return _lib.TCOD_console_load_asc(con,convert_to_ascii(filename))\n\n_lib.TCOD_console_save_asc.restype=c_bool\n_lib.TCOD_console_save_asc.argtypes=[c_void_p , c_char_p]\ndef console_save_asc(con, filename) :\n return _lib.TCOD_console_save_asc(con,convert_to_ascii(filename))\n\n_lib.TCOD_console_load_apf.restype=c_bool\n_lib.TCOD_console_load_apf.argtypes=[c_void_p , c_char_p]\ndef console_load_apf(con, filename) :\n return _lib.TCOD_console_load_apf(con,convert_to_ascii(filename))\n\n_lib.TCOD_console_save_apf.restype=c_bool\n_lib.TCOD_console_save_apf.argtypes=[c_void_p , c_char_p]\ndef console_save_apf(con, filename) :\n return _lib.TCOD_console_save_apf(con,convert_to_ascii(filename))\n\n_lib.TCOD_console_from_xp.restype = c_void_p\n_lib.TCOD_console_from_xp.argtypes = [c_char_p]\ndef console_from_xp(filename):\n return _lib.TCOD_console_from_xp(filename.encode('utf-8'))\n\n_lib.TCOD_console_load_xp.restype = c_bool\n_lib.TCOD_console_load_xp.argtypes = [c_void_p, c_char_p]\ndef console_load_xp(con, filename):\n return _lib.TCOD_console_load_xp(con, filename.encode('utf-8'))\n\n_lib.TCOD_console_save_xp.restype = c_bool\n_lib.TCOD_console_save_xp.argtypes = [c_void_p, c_char_p, c_int]\ndef console_save_xp(con, filename, compress_level=9):\n return _lib.TCOD_console_save_xp(con, filename.encode('utf-8'),\n compress_level)\n\n_lib.TCOD_console_list_from_xp.restype = c_void_p\n_lib.TCOD_console_list_from_xp.argtypes = [c_char_p]\ndef console_list_load_xp(filename):\n tcod_list = _lib.TCOD_console_list_from_xp(filename.encode('utf-8'))\n if not tcod_list:\n return None\n try:\n python_list = []\n _lib.TCOD_list_reverse(tcod_list)\n while not _lib.TCOD_list_is_empty(tcod_list):\n python_list.append(_lib.TCOD_list_pop(tcod_list))\n return python_list\n finally:\n _lib.TCOD_list_delete(tcod_list)\n\n_lib.TCOD_console_list_save_xp.restype = c_bool\n_lib.TCOD_console_list_save_xp.argtypes = [c_void_p, c_char_p, c_int]\ndef console_list_save_xp(console_list, filename, compress_level=9):\n tcod_list = _lib.TCOD_list_new()\n try:\n for console in console_list:\n _lib.TCOD_list_push(tcod_list, console)\n return _lib.TCOD_console_list_save_xp(\n tcod_list, filename.encode('utf-8'), compress_level\n )\n finally:\n _lib.TCOD_list_delete(tcod_list)\n\n\n############################\n# sys module\n############################\n\n_lib.TCOD_sys_startup.restype=c_void\n_lib.TCOD_sys_startup.argtypes=[]\ndef sys_startup():\n _lib.TCOD_sys_startup()\n\n_lib.TCOD_sys_shutdown.restype=c_void\n_lib.TCOD_sys_shutdown.argtypes=[]\ndef sys_shutdown():\n _lib.TCOD_sys_shutdown()\n\n_lib.TCOD_sys_get_last_frame_length.restype = c_float\n_lib.TCOD_sys_elapsed_seconds.restype = c_float\n\n# high precision time functions\n_lib.TCOD_sys_set_fps.restype=c_void\n_lib.TCOD_sys_set_fps.argtypes=[c_int]\ndef sys_set_fps(fps):\n _lib.TCOD_sys_set_fps(fps)\n\n_lib.TCOD_sys_get_fps.restype=c_int\n_lib.TCOD_sys_get_fps.argtypes=[]\ndef sys_get_fps():\n return _lib.TCOD_sys_get_fps()\n\n_lib.TCOD_sys_get_last_frame_length.restype=c_float\n_lib.TCOD_sys_get_last_frame_length.argtypes=[]\ndef sys_get_last_frame_length():\n return _lib.TCOD_sys_get_last_frame_length()\n\n_lib.TCOD_sys_sleep_milli.restype=c_void\n_lib.TCOD_sys_sleep_milli.argtypes=[c_uint ]\ndef sys_sleep_milli(val):\n _lib.TCOD_sys_sleep_milli(val)\n\n_lib.TCOD_sys_elapsed_milli.restype=c_int\n_lib.TCOD_sys_elapsed_milli.argtypes=[]\ndef sys_elapsed_milli():\n return _lib.TCOD_sys_elapsed_milli()\n\n_lib.TCOD_sys_elapsed_seconds.restype=c_float\n_lib.TCOD_sys_elapsed_seconds.argtypes=[]\ndef sys_elapsed_seconds():\n return _lib.TCOD_sys_elapsed_seconds()\n\n_lib.TCOD_sys_set_renderer.restype=c_void\n_lib.TCOD_sys_set_renderer.argtypes=[c_int ]\ndef sys_set_renderer(renderer):\n _lib.TCOD_sys_set_renderer(renderer)\n\n_lib.TCOD_sys_get_renderer.restype=c_int\n_lib.TCOD_sys_get_renderer.argtypes=[]\ndef sys_get_renderer():\n return _lib.TCOD_sys_get_renderer()\n\n# easy screenshots\n\n_lib.TCOD_sys_save_screenshot.restype=c_void\n_lib.TCOD_sys_save_screenshot.argtypes=[c_char_p]\ndef sys_save_screenshot(name=0):\n _lib.TCOD_sys_save_screenshot(convert_to_ascii(name))\n\n# clipboard support\n# This maps to the SDL2 API, so only uses utf-8 for both Python 2 and 3.\n\n_lib.TCOD_sys_clipboard_set.restype=c_bool\n_lib.TCOD_sys_clipboard_set.argtypes=[c_char_p]\ndef sys_clipboard_set(text):\n return _lib.TCOD_sys_clipboard_set(text.encode(\"utf-8\"))\n\n_lib.TCOD_sys_clipboard_get.restype=c_char_p\n_lib.TCOD_sys_clipboard_get.argtypes=[]\ndef sys_clipboard_get():\n return _lib.TCOD_sys_clipboard_get().decode(\"utf-8\")\n\n# custom fullscreen resolution\n\n_lib.TCOD_sys_force_fullscreen_resolution.restype=c_void\n_lib.TCOD_sys_force_fullscreen_resolution.argtypes=[c_int, c_int]\ndef sys_force_fullscreen_resolution(width, height):\n _lib.TCOD_sys_force_fullscreen_resolution(width, height)\n\n_lib.TCOD_sys_get_current_resolution.restype=c_void\n_lib.TCOD_sys_get_current_resolution.argtypes=[POINTER(c_int), POINTER(c_int)]\ndef sys_get_current_resolution():\n w = c_int()\n h = c_int()\n _lib.TCOD_sys_get_current_resolution(byref(w), byref(h))\n return w.value, h.value\n\n_lib.TCOD_sys_get_fullscreen_offsets.restype=c_void\n_lib.TCOD_sys_get_fullscreen_offsets.argtypes=[POINTER(c_int), POINTER(c_int)]\n\n_lib.TCOD_sys_get_char_size.restype=c_void\n_lib.TCOD_sys_get_char_size.argtypes=[POINTER(c_int), POINTER(c_int)]\ndef sys_get_char_size():\n w = c_int()\n h = c_int()\n _lib.TCOD_sys_get_char_size(byref(w), byref(h))\n return w.value, h.value\n\n# update font bitmap\n_lib.TCOD_sys_update_char.restype=c_void\n_lib.TCOD_sys_update_char.argtypes=[c_int, c_int, c_int, c_void_p , c_int, c_int]\ndef sys_update_char(asciiCode, fontx, fonty, img, x, y) :\n _lib.TCOD_sys_update_char(asciiCode,fontx,fonty,img,x,y)\n\n# custom SDL post renderer\nSDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)\ndef sys_register_SDL_renderer(callback):\n global sdl_renderer_func\n sdl_renderer_func = SDL_RENDERER_FUNC(callback)\n _lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)\n\n# events\nEVENT_NONE=0\nEVENT_KEY_PRESS=1\nEVENT_KEY_RELEASE=2\nEVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE\nEVENT_MOUSE_MOVE=4\nEVENT_MOUSE_PRESS=8\nEVENT_MOUSE_RELEASE=16\nEVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE\nEVENT_ANY=EVENT_KEY|EVENT_MOUSE\n\n_lib.TCOD_sys_check_for_event.restype=c_int\n_lib.TCOD_sys_check_for_event.argtypes=[c_int, c_void_p, c_void_p]\ndef sys_check_for_event(mask,k,m) :\n return _lib.TCOD_sys_check_for_event(mask,byref(k),byref(m))\n\n_lib.TCOD_sys_wait_for_event.restype=c_int\n_lib.TCOD_sys_wait_for_event.argtypes=[c_int, c_void_p, c_void_p, c_bool ]\ndef sys_wait_for_event(mask,k,m,flush) :\n return _lib.TCOD_sys_wait_for_event(mask,byref(k),byref(m),flush)\n\n############################\n# line module\n############################\n\ndef line_init(xo, yo, xd, yd):\n _lib.TCOD_line_init(xo, yo, xd, yd)\n\n_lib.TCOD_line_step.restype = c_bool\n_lib.TCOD_line_step.argtypes=[POINTER(c_int), POINTER(c_int)]\ndef line_step():\n x = c_int()\n y = c_int()\n ret = _lib.TCOD_line_step(byref(x), byref(y))\n if not ret:\n return x.value, y.value\n return None,None\n\n_lib.TCOD_line.restype=c_bool\ndef line(xo,yo,xd,yd,py_callback) :\n LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)\n c_callback=LINE_CBK_FUNC(py_callback)\n return _lib.TCOD_line(xo,yo,xd,yd,c_callback)\n\n_lib.TCOD_line_init_mt.restype=c_void\n_lib.TCOD_line_init_mt.argtypes=[c_int, c_int, c_int, c_int, c_void_p]\n_lib.TCOD_line_step_mt.restype = c_bool\n_lib.TCOD_line_step_mt.argtypes=[POINTER(c_int), POINTER(c_int), c_void_p]\ndef line_iter(xo, yo, xd, yd):\n data = (c_int * 9)() # struct TCOD_bresenham_data_t\n _lib.TCOD_line_init_mt(xo, yo, xd, yd, data)\n x = c_int(xo)\n y = c_int(yo)\n done = False\n while not done:\n yield x.value, y.value\n done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)\n\n############################\n# image module\n############################\n\n_lib.TCOD_image_new.restype=c_void_p\n_lib.TCOD_image_new.argtypes=[c_int, c_int]\ndef image_new(width, height):\n return _lib.TCOD_image_new(width, height)\n\n_lib.TCOD_image_clear.restype=c_void\n_lib.TCOD_image_clear.argtypes=[c_void_p , Color ]\ndef image_clear(image,col) :\n _lib.TCOD_image_clear(image,col)\n\n_lib.TCOD_image_invert.restype=c_void\n_lib.TCOD_image_invert.argtypes=[c_void_p ]\ndef image_invert(image) :\n _lib.TCOD_image_invert(image)\n\n_lib.TCOD_image_hflip.restype=c_void\n_lib.TCOD_image_hflip.argtypes=[c_void_p ]\ndef image_hflip(image) :\n _lib.TCOD_image_hflip(image)\n\n_lib.TCOD_image_rotate90.restype=c_void\n_lib.TCOD_image_rotate90.argtypes=[c_void_p , c_int]\ndef image_rotate90(image, num=1) :\n _lib.TCOD_image_rotate90(image,num)\n\n_lib.TCOD_image_vflip.restype=c_void\n_lib.TCOD_image_vflip.argtypes=[c_void_p ]\ndef image_vflip(image) :\n _lib.TCOD_image_vflip(image)\n\n_lib.TCOD_image_scale.restype=c_void\n_lib.TCOD_image_scale.argtypes=[c_void_p , c_int, c_int]\ndef image_scale(image, neww, newh) :\n _lib.TCOD_image_scale(image,neww,newh)\n\n_lib.TCOD_image_set_key_color.restype=c_void\n_lib.TCOD_image_set_key_color.argtypes=[c_void_p , Color]\ndef image_set_key_color(image,col) :\n _lib.TCOD_image_set_key_color(image,col)\n\n_lib.TCOD_image_get_alpha.restype=c_int\n_lib.TCOD_image_get_alpha.argtypes=[c_void_p ,c_int, c_int]\ndef image_get_alpha(image,x,y) :\n return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))\n\n_lib.TCOD_image_is_pixel_transparent.restype = c_bool\n_lib.TCOD_image_is_pixel_transparent.argtypes=[c_void_p , c_int, c_int]\ndef image_is_pixel_transparent(image,x,y) :\n return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))\n\n_lib.TCOD_image_load.restype=c_void_p\n_lib.TCOD_image_load.argtypes=[c_char_p]\ndef image_load(filename):\n return _lib.TCOD_image_load(convert_to_ascii(filename))\n\n_lib.TCOD_image_from_console.restype=c_void_p\n_lib.TCOD_image_from_console.argtypes=[c_void_p ]\ndef image_from_console(console):\n return _lib.TCOD_image_from_console(console)\n\n_lib.TCOD_image_refresh_console.restype=c_void\n_lib.TCOD_image_refresh_console.argtypes=[c_void_p , c_void_p ]\ndef image_refresh_console(image, console):\n _lib.TCOD_image_refresh_console(image, console)\n\n_lib.TCOD_image_get_size.restype=c_void\n_lib.TCOD_image_get_size.argtypes=[c_void_p , POINTER(c_int),POINTER(c_int)]\ndef image_get_size(image):\n w=c_int()\n h=c_int()\n _lib.TCOD_image_get_size(image, byref(w), byref(h))\n return w.value, h.value\n\n_lib.TCOD_image_get_pixel.restype = Color\n_lib.TCOD_image_get_pixel.argtypes=[c_void_p ,c_int, c_int]\ndef image_get_pixel(image, x, y):\n return _lib.TCOD_image_get_pixel(image, x, y)\n\n_lib.TCOD_image_get_mipmap_pixel.restype = Color\n_lib.TCOD_image_get_mipmap_pixel.argtypes=[c_void_p ,c_float,c_float, c_float, c_float]\ndef image_get_mipmap_pixel(image, x0, y0, x1, y1):\n return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0), c_float(x1), c_float(y1))\n\n_lib.TCOD_image_put_pixel.restype=c_void\n_lib.TCOD_image_put_pixel.argtypes=[ c_void_p ,c_int, c_int, Color ]\ndef image_put_pixel(image, x, y, col):\n _lib.TCOD_image_put_pixel(image, x, y, col)\n\n_lib.TCOD_image_blit.restype=c_void\n_lib.TCOD_image_blit.argtypes=[c_void_p, c_void_p, c_float, c_float, c_int, c_float, c_float, c_float]\ndef image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):\n _lib.TCOD_image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle)\n\n_lib.TCOD_image_blit_rect.restype=c_void\n_lib.TCOD_image_blit_rect.argtypes=[c_void_p , c_void_p , c_int, c_int, c_int, c_int,]\ndef image_blit_rect(image, console, x, y, w, h, bkgnd_flag):\n _lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)\n\n_lib.TCOD_image_blit_2x.restype=c_void\n_lib.TCOD_image_blit_2x.argtypes=[c_void_p , c_void_p , c_int, c_int, c_int, c_int, c_int, c_int]\ndef image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):\n _lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)\n\n_lib.TCOD_image_save.restype=c_void\n_lib.TCOD_image_save.argtypes=[c_void_p, c_char_p]\ndef image_save(image, filename):\n _lib.TCOD_image_save(image, convert_to_ascii(filename))\n\n_lib.TCOD_image_delete.restype=c_void\n_lib.TCOD_image_delete.argtypes=[c_void_p]\ndef image_delete(image):\n _lib.TCOD_image_delete(image)\n\n############################\n# mouse module\n############################\nclass Mouse(Structure):\n _fields_=[('x', c_int),\n ('y', c_int),\n ('dx', c_int),\n ('dy', c_int),\n ('cx', c_int),\n ('cy', c_int),\n ('dcx', c_int),\n ('dcy', c_int),\n ('lbutton', c_bool),\n ('rbutton', c_bool),\n ('mbutton', c_bool),\n ('lbutton_pressed', c_bool),\n ('rbutton_pressed', c_bool),\n ('mbutton_pressed', c_bool),\n ('wheel_up', c_bool),\n ('wheel_down', c_bool),\n ]\n\n_lib.TCOD_mouse_is_cursor_visible.restype = c_bool\n\n_lib.TCOD_mouse_show_cursor.restype=c_void\n_lib.TCOD_mouse_show_cursor.argtypes=[c_bool ]\ndef mouse_show_cursor(visible):\n _lib.TCOD_mouse_show_cursor(c_int(visible))\n\n_lib.TCOD_mouse_is_cursor_visible.restype=c_bool\n_lib.TCOD_mouse_is_cursor_visible.argtypes=[]\ndef mouse_is_cursor_visible():\n return _lib.TCOD_mouse_is_cursor_visible()\n\n_lib.TCOD_mouse_move.restype=c_void\n_lib.TCOD_mouse_move.argtypes=[c_int, c_int]\ndef mouse_move(x, y):\n _lib.TCOD_mouse_move(x, y)\n\n_lib.TCOD_mouse_get_status_wrapper.restype=c_void\n_lib.TCOD_mouse_get_status_wrapper.argtypes=[c_void_p]\ndef mouse_get_status():\n mouse=Mouse()\n _lib.TCOD_mouse_get_status_wrapper(byref(mouse))\n return mouse\n\n############################\n# parser module\n############################\n\nclass Dice(Structure):\n _fields_=[('nb_dices', c_int),\n ('nb_faces', c_int),\n ('multiplier', c_float),\n ('addsub', c_float),\n ]\n\n def __repr__(self):\n return \"Dice(%d, %d, %s, %s)\" % (self.nb_dices, self.nb_faces,\n self.multiplier, self.addsub)\n\nclass _CValue(Union):\n _fields_=[('c',c_uint8),\n ('i',c_int),\n ('f',c_float),\n ('s',c_char_p),\n # JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types\n ('col',c_uint8 * 3),\n ('dice',c_int * 4),\n ('custom',c_void_p),\n ]\n\n_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)\n_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)\n_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)\n\nclass _CParserListener(Structure):\n _fields_=[('new_struct', _CFUNC_NEW_STRUCT),\n ('new_flag',_CFUNC_NEW_FLAG),\n ('new_property',_CFUNC_NEW_PROPERTY),\n ('end_struct',_CFUNC_NEW_STRUCT),\n ('error',_CFUNC_NEW_FLAG),\n ]\n\n# property types\nTYPE_NONE = 0\nTYPE_BOOL = 1\nTYPE_CHAR = 2\nTYPE_INT = 3\nTYPE_FLOAT = 4\nTYPE_STRING = 5\nTYPE_COLOR = 6\nTYPE_DICE = 7\nTYPE_VALUELIST00 = 8\nTYPE_VALUELIST01 = 9\nTYPE_VALUELIST02 = 10\nTYPE_VALUELIST03 = 11\nTYPE_VALUELIST04 = 12\nTYPE_VALUELIST05 = 13\nTYPE_VALUELIST06 = 14\nTYPE_VALUELIST07 = 15\nTYPE_VALUELIST08 = 16\nTYPE_VALUELIST09 = 17\nTYPE_VALUELIST10 = 18\nTYPE_VALUELIST11 = 19\nTYPE_VALUELIST12 = 20\nTYPE_VALUELIST13 = 21\nTYPE_VALUELIST14 = 22\nTYPE_VALUELIST15 = 23\nTYPE_LIST = 1024\n\n_lib.TCOD_list_get.restype = c_void_p\ndef _convert_TCODList(clist, typ):\n res = list()\n for i in range(_lib.TCOD_list_size(c_void_p(clist))):\n elt = _lib.TCOD_list_get(c_void_p(clist), i)\n elt = cast(elt, c_void_p)\n if typ == TYPE_BOOL:\n elt = c_bool.from_buffer(elt).value\n elif typ == TYPE_CHAR:\n elt = c_char.from_buffer(elt).value\n elif typ == TYPE_INT:\n elt = c_int.from_buffer(elt).value\n elif typ == TYPE_FLOAT:\n elt = c_float.from_buffer(elt).value\n elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:\n elt = cast(elt, c_char_p).value\n elif typ == TYPE_COLOR:\n elt = Color.from_buffer_copy(elt)\n elif typ == TYPE_DICE:\n # doesn't work\n elt = Dice.from_buffer_copy(elt)\n res.append(elt)\n return res\n\n_lib.TCOD_parser_new.restype=c_void_p\n_lib.TCOD_parser_new.argtypes=[]\ndef parser_new():\n return _lib.TCOD_parser_new()\n\n_lib.TCOD_parser_new_struct.restype=c_void_p\n_lib.TCOD_parser_new_struct.argtypes=[c_void_p , c_char_p]\ndef parser_new_struct(parser, name):\n return _lib.TCOD_parser_new_struct(parser, convert_to_ascii(name))\n\n_lib.TCOD_struct_add_flag.restype=c_void\n_lib.TCOD_struct_add_flag.argtypes=[c_void_p ,c_char_p]\ndef struct_add_flag(struct, name):\n _lib.TCOD_struct_add_flag(struct, convert_to_ascii(name))\n\n_lib.TCOD_struct_add_property.restype=c_void\n_lib.TCOD_struct_add_property.argtypes=[c_void_p , c_char_p,c_int , c_bool ]\ndef struct_add_property(struct, name, typ, mandatory):\n _lib.TCOD_struct_add_property(struct, convert_to_ascii(name), typ, mandatory)\n\n_lib.TCOD_struct_add_value_list.restype=c_void\n_lib.TCOD_struct_add_value_list.argtypes=[c_void_p ,c_char_p, POINTER(c_char_p), c_bool ]\ndef struct_add_value_list(struct, name, value_list, mandatory):\n CARRAY = c_char_p * (len(value_list) + 1)\n cvalue_list = CARRAY()\n for i in range(len(value_list)):\n cvalue_list[i] = cast(convert_to_ascii(value_list[i]), c_char_p)\n cvalue_list[len(value_list)] = 0\n _lib.TCOD_struct_add_value_list(struct, convert_to_ascii(name), cvalue_list, mandatory)\n\n_lib.TCOD_struct_add_value_list_sized.restype=c_void\n_lib.TCOD_struct_add_value_list_sized.argtypes=[c_void_p ,c_char_p, POINTER(c_char_p), c_int, c_bool ]\n\n_lib.TCOD_struct_add_list_property.restype=c_void\n_lib.TCOD_struct_add_list_property.argtypes=[c_void_p , c_char_p,c_int , c_bool ]\ndef struct_add_list_property(struct, name, typ, mandatory):\n _lib.TCOD_struct_add_list_property(struct, convert_to_ascii(name), typ, mandatory)\n\n_lib.TCOD_struct_add_structure.restype=c_void\n_lib.TCOD_struct_add_structure.argtypes=[c_void_p ,c_void_p]\ndef struct_add_structure(struct, sub_struct):\n _lib.TCOD_struct_add_structure(struct, sub_struct)\n\n_lib.TCOD_struct_get_name.restype=c_char_p\n_lib.TCOD_struct_get_name.argtypes=[c_void_p ]\ndef struct_get_name(struct):\n ret = _lib.TCOD_struct_get_name(struct)\n if is_python_3:\n return ret.decode(\"utf-8\")\n return ret\n\n_lib.TCOD_struct_is_mandatory.restype=c_bool\n_lib.TCOD_struct_is_mandatory.argtypes=[c_void_p ,c_char_p]\ndef struct_is_mandatory(struct, name):\n return _lib.TCOD_struct_is_mandatory(struct, convert_to_ascii(name))\n\n_lib.TCOD_struct_get_type.restype=c_int\n_lib.TCOD_struct_get_type.argtypes=[c_void_p , c_char_p]\ndef struct_get_type(struct, name):\n return _lib.TCOD_struct_get_type(struct, convert_to_ascii(name))\n\n_lib.TCOD_parser_run.restype=c_void\n_lib.TCOD_parser_run.argtypes=[c_void_p , c_char_p, c_void_p]\ndef parser_run(parser, filename, listener=0):\n if listener != 0:\n clistener=_CParserListener()\n def value_converter(name, typ, value):\n if typ == TYPE_BOOL:\n return listener.new_property(name, typ, value.c == 1)\n elif typ == TYPE_CHAR:\n return listener.new_property(name, typ, '%c' % (value.c & 0xFF))\n elif typ == TYPE_INT:\n return listener.new_property(name, typ, value.i)\n elif typ == TYPE_FLOAT:\n return listener.new_property(name, typ, value.f)\n elif typ == TYPE_STRING or \\\n TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:\n return listener.new_property(name, typ, value.s)\n elif typ == TYPE_COLOR:\n col = cast(value.col, POINTER(Color)).contents\n return listener.new_property(name, typ, col)\n elif typ == TYPE_DICE:\n dice = cast(value.dice, POINTER(Dice)).contents\n return listener.new_property(name, typ, dice)\n elif typ & TYPE_LIST:\n return listener.new_property(name, typ,\n _convert_TCODList(value.custom, typ & 0xFF))\n return True\n clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)\n clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)\n clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)\n clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)\n clistener.error = _CFUNC_NEW_FLAG(listener.error)\n _lib.TCOD_parser_run(parser, convert_to_ascii(filename), byref(clistener))\n else:\n _lib.TCOD_parser_run(parser, convert_to_ascii(filename), 0)\n\n_lib.TCOD_parser_delete.restype=c_void\n_lib.TCOD_parser_delete.argtypes=[c_void_p ]\ndef parser_delete(parser):\n _lib.TCOD_parser_delete(parser)\n\n_lib.TCOD_parser_has_property.restype = c_bool\n_lib.TCOD_parser_has_property.argtypes=[c_void_p, c_char_p]\ndef parser_has_property(parser, name):\n return _lib.TCOD_parser_has_property(parser, convert_to_ascii(name))\n\n_lib.TCOD_parser_get_bool_property.restype=c_bool\n_lib.TCOD_parser_get_bool_property.argtypes=[c_void_p , c_char_p]\ndef parser_get_bool_property(parser, name):\n return _lib.TCOD_parser_get_bool_property(parser, convert_to_ascii(name))\n\n_lib.TCOD_parser_get_int_property.restype=c_int\n_lib.TCOD_parser_get_int_property.argtypes=[c_void_p , c_char_p]\ndef parser_get_int_property(parser, name):\n return _lib.TCOD_parser_get_int_property(parser, convert_to_ascii(name))\n\n_lib.TCOD_parser_get_char_property.restype=c_int\n_lib.TCOD_parser_get_char_property.argtypes=[c_void_p , c_char_p]\ndef parser_get_char_property(parser, name):\n return '%c' % _lib.TCOD_parser_get_char_property(parser, convert_to_ascii(name))\n\n_lib.TCOD_parser_get_float_property.restype=c_float\n_lib.TCOD_parser_get_float_property.argtypes=[c_void_p , c_char_p]\ndef parser_get_float_property(parser, name):\n return _lib.TCOD_parser_get_float_property(parser, convert_to_ascii(name))\n\n_lib.TCOD_parser_get_string_property.restype=c_char_p\n_lib.TCOD_parser_get_string_property.argtypes=[c_void_p , c_char_p]\ndef parser_get_string_property(parser, name):\n ret = _lib.TCOD_parser_get_string_property(parser, convert_to_ascii(name))\n if is_python_3:\n return ret.decode(\"utf-8\")\n return ret\n\n_lib.TCOD_parser_get_color_property.restype = Color\n_lib.TCOD_parser_get_color_property.argtypes=[c_void_p , c_char_p]\ndef parser_get_color_property(parser, name):\n return _lib.TCOD_parser_get_color_property(parser, convert_to_ascii(name))\n\n_lib.TCOD_parser_get_dice_property_py.argtypes=[c_void_p,c_char_p,POINTER(Dice)]\ndef parser_get_dice_property(parser, name):\n d = Dice()\n _lib.TCOD_parser_get_dice_property_py(parser, convert_to_ascii(name), byref(d))\n return d\n\n_lib.TCOD_parser_get_list_property.restype=c_void_p\n_lib.TCOD_parser_get_list_property.argtypes=[c_void_p , c_char_p, c_int ]\ndef parser_get_list_property(parser, name, typ):\n clist = _lib.TCOD_parser_get_list_property(parser, convert_to_ascii(name), typ)\n return _convert_TCODList(clist, typ)\n\n_lib.TCOD_parser_get_custom_property.restype=c_void_p\n_lib.TCOD_parser_get_custom_property.argtypes=[c_void_p , c_char_p]\n\n############################\n# random module\n############################\n\nRNG_MT = 0\nRNG_CMWC = 1\n\nDISTRIBUTION_LINEAR = 0\nDISTRIBUTION_GAUSSIAN = 1\nDISTRIBUTION_GAUSSIAN_RANGE = 2\nDISTRIBUTION_GAUSSIAN_INVERSE = 3\nDISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4\n\n_lib.TCOD_random_get_instance.restype=c_void_p\n_lib.TCOD_random_get_instance.argtypes=[]\ndef random_get_instance():\n return _lib.TCOD_random_get_instance()\n\n_lib.TCOD_random_new.restype=c_void_p\n_lib.TCOD_random_new.argtypes=[c_int ]\ndef random_new(algo=RNG_CMWC):\n return _lib.TCOD_random_new(algo)\n\n_lib.TCOD_random_new_from_seed.restype=c_void_p\n_lib.TCOD_random_new_from_seed.argtypes=[c_int, c_uint]\ndef random_new_from_seed(seed, algo=RNG_CMWC):\n return _lib.TCOD_random_new_from_seed(algo, seed)\n\n_lib.TCOD_random_set_distribution.restype=c_void\n_lib.TCOD_random_set_distribution.argtypes=[c_void_p , c_int ]\ndef random_set_distribution(rnd, dist) :\n\t_lib.TCOD_random_set_distribution(rnd, dist)\n\n_lib.TCOD_random_get_int.restype=c_int\n_lib.TCOD_random_get_int.argtypes=[c_void_p , c_int, c_int]\ndef random_get_int(rnd, mi, ma):\n return _lib.TCOD_random_get_int(rnd, mi, ma)\n\n_lib.TCOD_random_get_float.restype=c_float\n_lib.TCOD_random_get_float.argtypes=[c_void_p , c_float , c_float ]\ndef random_get_float(rnd, mi, ma):\n return _lib.TCOD_random_get_float(rnd, mi, ma)\n\n_lib.TCOD_random_get_double.restype=c_double\n_lib.TCOD_random_get_double.argtypes=[c_void_p , c_double , c_double ]\ndef random_get_double(rnd, mi, ma):\n return _lib.TCOD_random_get_double(rnd, mi, ma)\n\n_lib.TCOD_random_get_int_mean.restype=c_int\n_lib.TCOD_random_get_int_mean.argtypes=[c_void_p , c_int, c_int, c_int]\ndef random_get_int_mean(rnd, mi, ma, mean):\n return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)\n\n_lib.TCOD_random_get_float_mean.restype=c_float\n_lib.TCOD_random_get_float_mean.argtypes=[c_void_p , c_float , c_float , c_float ]\ndef random_get_float_mean(rnd, mi, ma, mean):\n return _lib.TCOD_random_get_float_mean(rnd, mi, ma, mean)\n\n_lib.TCOD_random_get_double_mean.restype=c_double\n_lib.TCOD_random_get_double_mean.argtypes=[c_void_p , c_double , c_double , c_double ]\ndef random_get_double_mean(rnd, mi, ma, mean):\n return _lib.TCOD_random_get_double_mean(rnd, mi, ma, mean)\n\n_lib.TCOD_random_dice_roll_s.restype=c_int\n_lib.TCOD_random_dice_roll_s.argtypes=[c_void_p , c_char_p ]\ndef random_dice_roll_s(rnd, s):\n return _lib.TCOD_random_dice_roll_s(rnd, convert_to_ascii(s))\n\n_lib.TCOD_random_save.restype=c_void_p\n_lib.TCOD_random_save.argtypes=[c_void_p ]\ndef random_save(rnd):\n return _lib.TCOD_random_save(rnd)\n\n_lib.TCOD_random_restore.restype=c_void\n_lib.TCOD_random_restore.argtypes=[c_void_p , c_void_p ]\ndef random_restore(rnd, backup):\n _lib.TCOD_random_restore(rnd, backup)\n\n_lib.TCOD_random_delete.restype=c_void\n_lib.TCOD_random_delete.argtypes=[c_void_p ]\ndef random_delete(rnd):\n _lib.TCOD_random_delete(rnd)\n\n\n############################\n# noise module\n############################\n\nNOISE_DEFAULT_HURST = 0.5\nNOISE_DEFAULT_LACUNARITY = 2.0\n\nNOISE_DEFAULT = 0\nNOISE_PERLIN = 1\nNOISE_SIMPLEX = 2\nNOISE_WAVELET = 4\n\n_NOISE_PACKER_FUNC = (None,\n (c_float * 1),\n (c_float * 2),\n (c_float * 3),\n (c_float * 4),\n )\n\n_lib.TCOD_noise_new.restype=c_void_p\n_lib.TCOD_noise_new.argtypes=[c_int, c_float , c_float , c_void_p ]\ndef noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):\n return _lib.TCOD_noise_new(dim, h, l, random)\n\n_lib.TCOD_noise_set_type.restype=c_void\n_lib.TCOD_noise_set_type.argtypes=[c_void_p , c_int ]\ndef noise_set_type(n, typ) :\n _lib.TCOD_noise_set_type(n,typ)\n\n_lib.TCOD_noise_get.restype=c_float\n_lib.TCOD_noise_get.argtypes=[c_void_p , POINTER(c_float)]\n\n_lib.TCOD_noise_get_ex.restype=c_float\n_lib.TCOD_noise_get_ex.argtypes=[c_void_p , POINTER(c_float), c_int ]\ndef noise_get(n, f, typ=NOISE_DEFAULT):\n return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)\n\n_lib.TCOD_noise_get_fbm.restype=c_float\n_lib.TCOD_noise_get_fbm.argtypes=[c_void_p , POINTER(c_float), c_float ]\n\n_lib.TCOD_noise_get_fbm_ex.restype=c_float\n_lib.TCOD_noise_get_fbm_ex.argtypes=[c_void_p , POINTER(c_float), c_float , c_int ]\ndef noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):\n return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), oc, typ)\n\n_lib.TCOD_noise_get_turbulence.restype=c_float\n_lib.TCOD_noise_get_turbulence.argtypes=[c_void_p , POINTER(c_float), c_float ]\n\n_lib.TCOD_noise_get_turbulence_ex.restype=c_float\n_lib.TCOD_noise_get_turbulence_ex.argtypes=[c_void_p , POINTER(c_float), c_float , c_int ]\ndef noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):\n return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), oc, typ)\n\n_lib.TCOD_noise_delete.restype=c_void\n_lib.TCOD_noise_delete.argtypes=[c_void_p ]\ndef noise_delete(n):\n _lib.TCOD_noise_delete(n)\n\n############################\n# fov module\n############################\n_lib.TCOD_map_is_in_fov.restype = c_bool\n_lib.TCOD_map_is_transparent.restype = c_bool\n_lib.TCOD_map_is_walkable.restype = c_bool\n\nFOV_BASIC = 0\nFOV_DIAMOND = 1\nFOV_SHADOW = 2\nFOV_PERMISSIVE_0 = 3\nFOV_PERMISSIVE_1 = 4\nFOV_PERMISSIVE_2 = 5\nFOV_PERMISSIVE_3 = 6\nFOV_PERMISSIVE_4 = 7\nFOV_PERMISSIVE_5 = 8\nFOV_PERMISSIVE_6 = 9\nFOV_PERMISSIVE_7 = 10\nFOV_PERMISSIVE_8 = 11\nFOV_RESTRICTIVE = 12\nNB_FOV_ALGORITHMS = 13\n\ndef FOV_PERMISSIVE(p) :\n return FOV_PERMISSIVE_0+p\n\n_lib.TCOD_map_new.restype=c_void_p\n_lib.TCOD_map_new.argtypes=[c_int, c_int]\ndef map_new(w, h):\n return _lib.TCOD_map_new(w, h)\n\n_lib.TCOD_map_copy.restype=c_void\n_lib.TCOD_map_copy.argtypes=[c_void_p , c_void_p ]\ndef map_copy(source, dest):\n return _lib.TCOD_map_copy(source, dest)\n\n_lib.TCOD_map_set_properties.restype=c_void\n_lib.TCOD_map_set_properties.argtypes=[c_void_p , c_int, c_int, c_bool, c_bool]\ndef map_set_properties(m, x, y, isTrans, isWalk):\n _lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))\n\n_lib.TCOD_map_clear.restype = c_void\n_lib.TCOD_map_clear.argtypes = [c_void_p , c_bool , c_bool]\ndef map_clear(m, transparent=False, walkable=False):\n _lib.TCOD_map_clear(m, c_int(transparent), c_int(walkable))\n\n_lib.TCOD_map_compute_fov.restype=c_void\n_lib.TCOD_map_compute_fov.argtypes=[c_void_p , c_int, c_int, c_int, c_bool, c_int ]\ndef map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):\n _lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))\n\n_lib.TCOD_map_set_in_fov.restype=c_void\n_lib.TCOD_map_set_in_fov.argtypes=[c_void_p , c_int, c_int, c_bool ]\ndef map_set_in_fov(m, x, y, fov):\n return _lib.TCOD_map_set_in_fov(m, x, y, fov)\n\n_lib.TCOD_map_is_in_fov.restype=c_bool\n_lib.TCOD_map_is_in_fov.argtypes=[c_void_p , c_int, c_int]\ndef map_is_in_fov(m, x, y):\n return _lib.TCOD_map_is_in_fov(m, x, y)\n\n_lib.TCOD_map_is_transparent.restype=c_bool\n_lib.TCOD_map_is_transparent.argtypes=[c_void_p , c_int, c_int]\ndef map_is_transparent(m, x, y):\n return _lib.TCOD_map_is_transparent(m, x, y)\n\n_lib.TCOD_map_is_walkable.restype=c_bool\n_lib.TCOD_map_is_walkable.argtypes=[c_void_p , c_int, c_int]\ndef map_is_walkable(m, x, y):\n return _lib.TCOD_map_is_walkable(m, x, y)\n\n_lib.TCOD_map_delete.restype=c_void\n_lib.TCOD_map_delete.argtypes=[c_void_p ]\ndef map_delete(m):\n return _lib.TCOD_map_delete(m)\n\n_lib.TCOD_map_get_width.restype=c_int\n_lib.TCOD_map_get_width.argtypes=[c_void_p ]\ndef map_get_width(map):\n return _lib.TCOD_map_get_width(map)\n\n_lib.TCOD_map_get_height.restype=c_int\n_lib.TCOD_map_get_height.argtypes=[c_void_p ]\ndef map_get_height(map):\n return _lib.TCOD_map_get_height(map)\n\n_lib.TCOD_map_get_nb_cells.restype=c_int\n_lib.TCOD_map_get_nb_cells.argtypes=[c_void_p ]\ndef map_get_nb_cells(map):\n return TCOD_map_get_nb_cells(map)\n\n############################\n# pathfinding module\n############################\n\nPATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)\n\n_lib.TCOD_path_new_using_map.restype=c_void_p\n_lib.TCOD_path_new_using_map.argtypes=[c_void_p , c_float ]\ndef path_new_using_map(m, dcost=1.41):\n return (_lib.TCOD_path_new_using_map(m, dcost), None)\n\n_lib.TCOD_path_new_using_function.restype=c_void_p\n_lib.TCOD_path_new_using_function.argtypes=[c_int, c_int, PATH_CBK_FUNC,\n py_object, c_float]\ndef path_new_using_function(w, h, func, userdata=0, dcost=1.41):\n cbk_func = PATH_CBK_FUNC(func)\n return (_lib.TCOD_path_new_using_function(w, h, cbk_func,\n userdata, dcost), cbk_func)\n\n_lib.TCOD_path_compute.restype = c_bool\n_lib.TCOD_path_compute.argtypes=[c_void_p , c_int,c_int, c_int, c_int]\ndef path_compute(p, ox, oy, dx, dy):\n return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)\n\n_lib.TCOD_path_get_origin.restype=c_void\n_lib.TCOD_path_get_origin.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int)]\ndef path_get_origin(p):\n x = c_int()\n y = c_int()\n _lib.TCOD_path_get_origin(p[0], byref(x), byref(y))\n return x.value, y.value\n\n_lib.TCOD_path_get_destination.restype=c_void\n_lib.TCOD_path_get_destination.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int)]\ndef path_get_destination(p):\n x = c_int()\n y = c_int()\n _lib.TCOD_path_get_destination(p[0], byref(x), byref(y))\n return x.value, y.value\n\n_lib.TCOD_path_size.restype=c_int\n_lib.TCOD_path_size.argtypes=[c_void_p ]\ndef path_size(p):\n return _lib.TCOD_path_size(p[0])\n\n_lib.TCOD_path_reverse.restype=c_void\n_lib.TCOD_path_reverse.argtypes=[c_void_p ]\ndef path_reverse(p):\n _lib.TCOD_path_reverse(p[0])\n\n_lib.TCOD_path_get.restype=c_void\n_lib.TCOD_path_get.argtypes=[c_void_p , c_int, POINTER(c_int), POINTER(c_int)]\ndef path_get(p, idx):\n x = c_int()\n y = c_int()\n _lib.TCOD_path_get(p[0], idx, byref(x), byref(y))\n return x.value, y.value\n\n_lib.TCOD_path_is_empty.restype = c_bool\n_lib.TCOD_path_is_empty.argtypes=[c_void_p ]\ndef path_is_empty(p):\n return _lib.TCOD_path_is_empty(p[0])\n\n_lib.TCOD_path_walk.restype = c_bool\n_lib.TCOD_path_walk.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int), c_bool]\ndef path_walk(p, recompute):\n x = c_int()\n y = c_int()\n if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):\n return x.value, y.value\n return None,None\n\n_lib.TCOD_path_delete.restype=c_void\n_lib.TCOD_path_delete.argtypes=[c_void_p ]\ndef path_delete(p):\n _lib.TCOD_path_delete(p[0])\n\n\n\n_lib.TCOD_dijkstra_new .restype=c_void_p\n_lib.TCOD_dijkstra_new .argtypes=[c_void_p , c_float ]\ndef dijkstra_new(m, dcost=1.41):\n return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)\n\n_lib.TCOD_dijkstra_new_using_function.restype=c_void_p\n_lib.TCOD_dijkstra_new_using_function.argtypes=[c_int, c_int, PATH_CBK_FUNC,\n py_object, c_float]\ndef dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):\n cbk_func = PATH_CBK_FUNC(func)\n return (_lib.TCOD_dijkstra_new_using_function(w, h, cbk_func,\n userdata, dcost), cbk_func)\n\n_lib.TCOD_dijkstra_compute.restype=c_void\n_lib.TCOD_dijkstra_compute.argtypes=[c_void_p , c_int, c_int]\ndef dijkstra_compute(p, ox, oy):\n _lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))\n\n_lib.TCOD_dijkstra_path_set.restype = c_bool\n_lib.TCOD_dijkstra_path_set .argtypes=[c_void_p , c_int, c_int]\ndef dijkstra_path_set(p, x, y):\n return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))\n\n_lib.TCOD_dijkstra_get_distance.restype = c_float\n_lib.TCOD_dijkstra_get_distance.argtypes=[c_void_p , c_int, c_int]\ndef dijkstra_get_distance(p, x, y):\n return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))\n\n_lib.TCOD_dijkstra_size.restype=c_int\n_lib.TCOD_dijkstra_size.argtypes=[c_void_p ]\ndef dijkstra_size(p):\n return _lib.TCOD_dijkstra_size(p[0])\n\n_lib.TCOD_dijkstra_reverse.restype=c_void\n_lib.TCOD_dijkstra_reverse.argtypes=[c_void_p ]\ndef dijkstra_reverse(p):\n _lib.TCOD_dijkstra_reverse(p[0])\n\n_lib.TCOD_dijkstra_get.restype=c_void\n_lib.TCOD_dijkstra_get.argtypes=[c_void_p , c_int, POINTER(c_int), POINTER(c_int)]\ndef dijkstra_get(p, idx):\n x = c_int()\n y = c_int()\n _lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))\n return x.value, y.value\n\n_lib.TCOD_dijkstra_is_empty.restype = c_bool\n_lib.TCOD_dijkstra_is_empty.argtypes=[c_void_p ]\ndef dijkstra_is_empty(p):\n return _lib.TCOD_dijkstra_is_empty(p[0])\n\n_lib.TCOD_dijkstra_path_walk.restype = c_bool\n_lib.TCOD_dijkstra_path_walk.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int)]\ndef dijkstra_path_walk(p):\n x = c_int()\n y = c_int()\n if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):\n return x.value, y.value\n return None,None\n\n_lib.TCOD_dijkstra_delete .restype=c_void\n_lib.TCOD_dijkstra_delete.argtypes=[c_void_p ]\ndef dijkstra_delete(p):\n _lib.TCOD_dijkstra_delete(p[0])\n\n############################\n# bsp module\n############################\nclass _CBsp(Structure):\n _fields_ = [('next', c_void_p),\n ('father', c_void_p),\n ('son', c_void_p),\n ('x', c_int),\n ('y', c_int),\n ('w', c_int),\n ('h', c_int),\n ('position', c_int),\n ('level', c_uint8),\n ('horizontal', c_bool),\n ]\n\nBSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)\n\n# Python class encapsulating the _CBsp pointer\nclass Bsp(object):\n def __init__(self, cnode):\n pcbsp = cast(cnode, POINTER(_CBsp))\n self.p = pcbsp\n\n def getx(self):\n return self.p.contents.x\n def setx(self, value):\n self.p.contents.x = value\n x = property(getx, setx)\n\n def gety(self):\n return self.p.contents.y\n def sety(self, value):\n self.p.contents.y = value\n y = property(gety, sety)\n\n def getw(self):\n return self.p.contents.w\n def setw(self, value):\n self.p.contents.w = value\n w = property(getw, setw)\n\n def geth(self):\n return self.p.contents.h\n def seth(self, value):\n self.p.contents.h = value\n h = property(geth, seth)\n\n def getpos(self):\n return self.p.contents.position\n def setpos(self, value):\n self.p.contents.position = value\n position = property(getpos, setpos)\n\n def gethor(self):\n return self.p.contents.horizontal\n def sethor(self,value):\n self.p.contents.horizontal = value\n horizontal = property(gethor, sethor)\n\n def getlev(self):\n return self.p.contents.level\n def setlev(self,value):\n self.p.contents.level = value\n level = property(getlev, setlev)\n\n\n_lib.TCOD_bsp_new.restype=c_void_p\n_lib.TCOD_bsp_new.argtypes=[c_int, c_int, c_int, c_int]\ndef bsp_new(x, y, w, h):\n return _lib.TCOD_bsp_new(x, y, w, h)\n\n_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)\n_lib.TCOD_bsp_new_with_size.argtypes=[c_int,c_int,c_int, c_int]\ndef bsp_new_with_size(x, y, w, h):\n return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))\n\n_lib.TCOD_bsp_split_once.restype=c_void\n_lib.TCOD_bsp_split_once.argtypes=[c_void_p, c_bool , c_int]\ndef bsp_split_once(node, horizontal, position):\n _lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)\n\n_lib.TCOD_bsp_split_recursive.restype=c_void\n_lib.TCOD_bsp_split_recursive.argtypes=[c_void_p, c_void_p , c_int, ]\ndef bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,\n maxVRatio):\n _lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,\n c_float(maxHRatio), c_float(maxVRatio))\n\n_lib.TCOD_bsp_resize.restype=c_void\n_lib.TCOD_bsp_resize.argtypes=[c_void_p, c_int,c_int, c_int, c_int]\ndef bsp_resize(node, x, y, w, h):\n _lib.TCOD_bsp_resize(node.p, x, y, w, h)\n\n_lib.TCOD_bsp_left.restype = POINTER(_CBsp)\n_lib.TCOD_bsp_left.argtypes=[c_void_p]\ndef bsp_left(node):\n return Bsp(_lib.TCOD_bsp_left(node.p))\n\n_lib.TCOD_bsp_right.restype = POINTER(_CBsp)\n_lib.TCOD_bsp_right.argtypes=[c_void_p]\ndef bsp_right(node):\n return Bsp(_lib.TCOD_bsp_right(node.p))\n\n_lib.TCOD_bsp_father.restype = POINTER(_CBsp)\n_lib.TCOD_bsp_father.argtypes=[c_void_p]\ndef bsp_father(node):\n return Bsp(_lib.TCOD_bsp_father(node.p))\n\n_lib.TCOD_bsp_is_leaf.restype = c_bool\n_lib.TCOD_bsp_is_leaf.argtypes=[c_void_p]\ndef bsp_is_leaf(node):\n return _lib.TCOD_bsp_is_leaf(node.p)\n\n_lib.TCOD_bsp_contains.restype = c_bool\n_lib.TCOD_bsp_contains.argtypes=[c_void_p, c_int, c_int]\ndef bsp_contains(node, cx, cy):\n return _lib.TCOD_bsp_contains(node.p, cx, cy)\n\n_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)\n_lib.TCOD_bsp_find_node.argtypes=[c_void_p, c_int, c_int]\ndef bsp_find_node(node, cx, cy):\n return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))\n\ndef _bsp_traverse(node, callback, userData, func):\n # convert the c node into a Python node\n #before passing it to the actual callback\n def node_converter(cnode, data):\n node = Bsp(cnode)\n return callback(node, data)\n cbk_func = BSP_CBK_FUNC(node_converter)\n func(node.p, cbk_func, userData)\n\ndef bsp_traverse_pre_order(node, callback, userData=0):\n _bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)\n\ndef bsp_traverse_in_order(node, callback, userData=0):\n _bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)\n\ndef bsp_traverse_post_order(node, callback, userData=0):\n _bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)\n\ndef bsp_traverse_level_order(node, callback, userData=0):\n _bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)\n\ndef bsp_traverse_inverted_level_order(node, callback, userData=0):\n _bsp_traverse(node, callback, userData,\n _lib.TCOD_bsp_traverse_inverted_level_order)\n\n_lib.TCOD_bsp_remove_sons.restype=c_void\n_lib.TCOD_bsp_remove_sons.argtypes=[c_void_p]\ndef bsp_remove_sons(node):\n _lib.TCOD_bsp_remove_sons(node.p)\n\n_lib.TCOD_bsp_delete.restype=c_void\n_lib.TCOD_bsp_delete.argtypes=[c_void_p]\ndef bsp_delete(node):\n _lib.TCOD_bsp_delete(node.p)\n\n############################\n# heightmap module\n############################\nclass _CHeightMap(Structure):\n _fields_=[('w', c_int),\n ('h', c_int),\n ('values', POINTER(c_float)),\n ]\n\n\nclass HeightMap(object):\n def __init__(self, chm):\n pchm = cast(chm, POINTER(_CHeightMap))\n self.p = pchm\n\n def getw(self):\n return self.p.contents.w\n def setw(self, value):\n self.p.contents.w = value\n w = property(getw, setw)\n\n def geth(self):\n return self.p.contents.h\n def seth(self, value):\n self.p.contents.h = value\n h = property(geth, seth)\n\n_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)\n_lib.TCOD_heightmap_new.argtypes=[c_int,c_int]\ndef heightmap_new(w, h):\n phm = _lib.TCOD_heightmap_new(w, h)\n return HeightMap(phm)\n\n_lib.TCOD_heightmap_set_value.restype=c_void\n_lib.TCOD_heightmap_set_value.argtypes=[c_void_p, c_int, c_int, c_float ]\ndef heightmap_set_value(hm, x, y, value):\n _lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))\n\n_lib.TCOD_heightmap_add.restype=c_void\n_lib.TCOD_heightmap_add.argtypes=[c_void_p, c_float ]\ndef heightmap_add(hm, value):\n _lib.TCOD_heightmap_add(hm.p, c_float(value))\n\n_lib.TCOD_heightmap_scale.restype=c_void\n_lib.TCOD_heightmap_scale.argtypes=[c_void_p, c_float ]\ndef heightmap_scale(hm, value):\n _lib.TCOD_heightmap_scale(hm.p, c_float(value))\n\n_lib.TCOD_heightmap_clear.restype=c_void\n_lib.TCOD_heightmap_clear.argtypes=[c_void_p]\ndef heightmap_clear(hm):\n _lib.TCOD_heightmap_clear(hm.p)\n\n_lib.TCOD_heightmap_clamp.restype=c_void\n_lib.TCOD_heightmap_clamp.argtypes=[c_void_p, c_float , c_float ]\ndef heightmap_clamp(hm, mi, ma):\n _lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))\n\n_lib.TCOD_heightmap_copy.restype=c_void\n_lib.TCOD_heightmap_copy.argtypes=[c_void_p,c_void_p]\ndef heightmap_copy(hm1, hm2):\n _lib.TCOD_heightmap_copy(hm1.p, hm2.p)\n\n_lib.TCOD_heightmap_normalize.restype=c_void\n_lib.TCOD_heightmap_normalize.argtypes=[c_void_p, c_float , c_float ]\ndef heightmap_normalize(hm, mi=0.0, ma=1.0):\n _lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))\n\n_lib.TCOD_heightmap_lerp_hm.restype=c_void\n_lib.TCOD_heightmap_lerp_hm.argtypes=[c_void_p, c_void_p, c_void_p, c_float ]\ndef heightmap_lerp_hm(hm1, hm2, hm3, coef):\n _lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))\n\n_lib.TCOD_heightmap_add_hm.restype=c_void\n_lib.TCOD_heightmap_add_hm.argtypes=[c_void_p, c_void_p, c_void_p]\ndef heightmap_add_hm(hm1, hm2, hm3):\n _lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)\n\n_lib.TCOD_heightmap_multiply_hm.restype=c_void\n_lib.TCOD_heightmap_multiply_hm.argtypes=[c_void_p, c_void_p, c_void_p]\ndef heightmap_multiply_hm(hm1, hm2, hm3):\n _lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)\n\n_lib.TCOD_heightmap_add_hill.restype=c_void\n_lib.TCOD_heightmap_add_hill.argtypes=[c_void_p, c_float , c_float , c_float , c_float ]\ndef heightmap_add_hill(hm, x, y, radius, height):\n _lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),\n c_float( radius), c_float( height))\n\n_lib.TCOD_heightmap_dig_hill.restype=c_void\n_lib.TCOD_heightmap_dig_hill.argtypes=[c_void_p, c_float , c_float , c_float , c_float ]\ndef heightmap_dig_hill(hm, x, y, radius, height):\n _lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),\n c_float( radius), c_float( height))\n\n_lib.TCOD_heightmap_mid_point_displacement.restype = c_void\n_lib.TCOD_heightmap_mid_point_displacement.argtypes = [c_void_p, c_void_p,\n c_float]\ndef heightmap_mid_point_displacement(hm, rng, roughness):\n _lib.TCOD_heightmap_mid_point_displacement(hm.p, rng, roughness)\n\n_lib.TCOD_heightmap_rain_erosion.restype=c_void\n_lib.TCOD_heightmap_rain_erosion.argtypes=[c_void_p, c_int,c_float ,c_float ,c_void_p ]\ndef heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):\n _lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),\n c_float( sedimentationCoef), rnd)\n\n_lib.TCOD_heightmap_kernel_transform.restype=c_void\n_lib.TCOD_heightmap_kernel_transform.argtypes=[c_void_p, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_float), c_float ,c_float ]\ndef heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,\n maxLevel):\n FARRAY = c_float * kernelsize\n IARRAY = c_int * kernelsize\n cdx = IARRAY(*dx)\n cdy = IARRAY(*dy)\n cweight = FARRAY(*weight)\n _lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,\n c_float(minLevel), c_float(maxLevel))\n\n_lib.TCOD_heightmap_add_voronoi.restype=c_void\n_lib.TCOD_heightmap_add_voronoi.argtypes=[c_void_p, c_int, c_int, POINTER(c_float),c_void_p ]\ndef heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):\n FARRAY = c_float * nbCoef\n ccoef = FARRAY(*coef)\n _lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)\n\n_lib.TCOD_heightmap_add_fbm.restype=c_void\n_lib.TCOD_heightmap_add_fbm.argtypes=[c_void_p, c_void_p, c_float, c_float, c_float, c_float, c_float, c_float, c_float]\ndef heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):\n _lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),\n c_float(addx), c_float(addy),\n c_float(octaves), c_float(delta),\n c_float(scale))\n\n_lib.TCOD_heightmap_scale_fbm.restype=c_void\n_lib.TCOD_heightmap_scale_fbm.argtypes=[c_void_p, c_void_p, c_float, c_float, c_float, c_float, c_float, c_float, c_float]\ndef heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,\n scale):\n _lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),\n c_float(addx), c_float(addy),\n c_float(octaves), c_float(delta),\n c_float(scale))\n\n_lib.TCOD_heightmap_islandify.restype=c_void\n_lib.TCOD_heightmap_islandify.argtypes=[c_void_p, c_float ,c_void_p ]\ndef heightmap_islandify(hm, sealevel, rnd):\n return TCOD_heightmap_islandify(hm, sealevel, rnd)\n\n_lib.TCOD_heightmap_dig_bezier.restype=c_void\n_lib.TCOD_heightmap_dig_bezier.argtypes=[c_void_p, POINTER(c_int), POINTER(c_int), c_float , c_float , c_float , c_float ]\ndef heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,\n endDepth):\n IARRAY = c_int * 4\n cpx = IARRAY(*px)\n cpy = IARRAY(*py)\n _lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),\n c_float(startDepth), c_float(endRadius),\n c_float(endDepth))\n\n_lib.TCOD_heightmap_get_value.restype = c_float\n_lib.TCOD_heightmap_get_value.argtypes=[c_void_p, c_int, c_int]\ndef heightmap_get_value(hm, x, y):\n return _lib.TCOD_heightmap_get_value(hm.p, x, y)\n\n_lib.TCOD_heightmap_get_interpolated_value.restype=c_float\n_lib.TCOD_heightmap_get_interpolated_value.argtypes=[c_void_p, c_float , c_float ]\ndef heightmap_get_interpolated_value(hm, x, y):\n return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),\n c_float(y))\n\n_lib.TCOD_heightmap_get_slope.restype=c_float\n_lib.TCOD_heightmap_get_slope.argtypes=[c_void_p, c_int, c_int]\ndef heightmap_get_slope(hm, x, y):\n return _lib.TCOD_heightmap_get_slope(hm.p, x, y)\n\n_lib.TCOD_heightmap_get_normal.restype=c_void\n_lib.TCOD_heightmap_get_normal.argtypes=[c_void_p, c_float , c_float , POINTER(c_float), c_float ]\ndef heightmap_get_normal(hm, x, y, waterLevel):\n FARRAY = c_float * 3\n cn = FARRAY()\n _lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn, c_float(waterLevel))\n return cn[0], cn[1], cn[2]\n\n_lib.TCOD_heightmap_count_cells.restype=c_int\n_lib.TCOD_heightmap_count_cells.argtypes=[c_void_p, c_float , c_float ]\ndef heightmap_count_cells(hm, mi, ma):\n return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))\n\n_lib.TCOD_heightmap_has_land_on_border.restype = c_bool\n_lib.TCOD_heightmap_has_land_on_border.argtypes=[c_void_p, c_float ]\ndef heightmap_has_land_on_border(hm, waterlevel):\n return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))\n\n_lib.TCOD_heightmap_get_minmax.restype=c_void\n_lib.TCOD_heightmap_get_minmax.argtypes=[c_void_p, POINTER(c_float), POINTER(c_float)]\ndef heightmap_get_minmax(hm):\n mi = c_float()\n ma = c_float()\n _lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))\n return mi.value, ma.value\n\n_lib.TCOD_heightmap_delete.restype=c_void\n_lib.TCOD_heightmap_delete.argtypes=[c_void_p]\ndef heightmap_delete(hm):\n _lib.TCOD_heightmap_delete(hm.p)\n\n\n############################\n# name generator module\n############################\n\n_lib.TCOD_namegen_parse.restype=c_void\n_lib.TCOD_namegen_parse.argtypes=[c_char_p , c_void_p ]\ndef namegen_parse(filename,rnd=0) :\n _lib.TCOD_namegen_parse(convert_to_ascii(filename),rnd)\n\n_lib.TCOD_namegen_generate.restype=c_char_p\n_lib.TCOD_namegen_generate.argtypes=[c_char_p , c_bool ]\ndef namegen_generate(name) :\n ret = _lib.TCOD_namegen_generate(convert_to_ascii(name), 0)\n if is_python_3:\n return ret.decode(\"utf-8\")\n return ret\n\n_lib.TCOD_namegen_generate_custom.restype=c_char_p\n_lib.TCOD_namegen_generate_custom.argtypes=[c_char_p , c_char_p , c_bool ]\ndef namegen_generate_custom(name, rule) :\n ret = _lib.TCOD_namegen_generate(convert_to_ascii(name), convert_to_ascii(rule), 0)\n if is_python_3:\n return ret.decode(\"utf-8\")\n return ret\n\n_lib.TCOD_namegen_get_sets.restype=c_void_p\n_lib.TCOD_namegen_get_sets.argtypes=[]\n\ndef namegen_get_sets():\n nb=_lib.TCOD_namegen_get_nb_sets_wrapper()\n SARRAY = c_char_p * nb;\n setsa = SARRAY()\n _lib.TCOD_namegen_get_sets_wrapper(setsa)\n if is_python_3:\n return list(v.decode(\"utf-8\") for v in setsa)\n return list(setsa)\n\n_lib.TCOD_namegen_destroy.restype=c_void\n_lib.TCOD_namegen_destroy.argtypes=[]\ndef namegen_destroy() :\n _lib.TCOD_namegen_destroy()\n\n\n_lib.TCOD_lex_new_intern.restype=c_void_p\n_lib.TCOD_lex_new_intern.argtypes=[]\n\n_lib.TCOD_lex_new.restype=c_void_p\n_lib.TCOD_lex_new.argtypes=[POINTER(c_char_p), POINTER(c_char_p), c_char_p, ]\n\n_lib.TCOD_lex_delete.restype=c_void\n_lib.TCOD_lex_delete.argtypes=[c_void_p]\n\n_lib.TCOD_lex_set_data_buffer.restype=c_void\n_lib.TCOD_lex_set_data_buffer.argtypes=[c_void_p,c_char_p]\n\n_lib.TCOD_lex_set_data_file.restype=c_bool\n_lib.TCOD_lex_set_data_file.argtypes=[c_void_p,c_char_p]\n\n_lib.TCOD_lex_parse.restype=c_int\n_lib.TCOD_lex_parse.argtypes=[c_void_p]\n\n_lib.TCOD_lex_parse_until_token_type.restype=c_int\n_lib.TCOD_lex_parse_until_token_type.argtypes=[c_void_p,c_int]\n\n_lib.TCOD_lex_parse_until_token_value.restype=c_int\n_lib.TCOD_lex_parse_until_token_value.argtypes=[c_void_p,c_char_p]\n\n_lib.TCOD_lex_expect_token_type.restype=c_bool\n_lib.TCOD_lex_expect_token_type.argtypes=[c_void_p,c_int]\n\n_lib.TCOD_lex_expect_token_value.restype=c_bool\n_lib.TCOD_lex_expect_token_value.argtypes=[c_void_p,c_int,c_char_p]\n\n_lib.TCOD_lex_savepoint.restype=c_void\n_lib.TCOD_lex_savepoint.argtypes=[c_void_p,c_void_p]\n\n_lib.TCOD_lex_restore.restype=c_void\n_lib.TCOD_lex_restore.argtypes=[c_void_p,c_void_p]\n\n_lib.TCOD_lex_get_last_javadoc.restype=c_char_p\n_lib.TCOD_lex_get_last_javadoc.argtypes=[c_void_p]\n\n_lib.TCOD_lex_get_token_name.restype=c_char_p\n_lib.TCOD_lex_get_token_name.argtypes=[c_int]\n\n_lib.TCOD_lex_get_last_error.restype=c_char_p\n_lib.TCOD_lex_get_last_error.argtypes=[]\n\n_lib.TCOD_lex_hextoint.restype=c_int\n_lib.TCOD_lex_hextoint.argtypes=[c_char]\n\n_lib.TCOD_sys_load_image.restype=c_void_p\n_lib.TCOD_sys_load_image.argtypes=[c_char_p]\n\n_lib.TCOD_list_new.restype=c_void_p\n_lib.TCOD_list_new.argtypes=[]\n\n_lib.TCOD_list_allocate.restype=c_void_p\n_lib.TCOD_list_allocate.argtypes=[c_int]\n\n_lib.TCOD_list_duplicate.restype=c_void_p\n_lib.TCOD_list_duplicate.argtypes=[c_void_p ]\n\n_lib.TCOD_list_delete.restype=c_void\n_lib.TCOD_list_delete.argtypes=[c_void_p ]\n\n_lib.TCOD_list_push.restype=c_void\n_lib.TCOD_list_push.argtypes=[c_void_p ,c_void_p ]\n\n_lib.TCOD_list_pop.restype=c_void_p\n_lib.TCOD_list_pop.argtypes=[c_void_p ]\n\n_lib.TCOD_list_peek.restype=c_void_p\n_lib.TCOD_list_peek.argtypes=[c_void_p ]\n\n_lib.TCOD_list_add_all.restype=c_void\n_lib.TCOD_list_add_all.argtypes=[c_void_p , c_void_p ]\n\n_lib.TCOD_list_get.restype=c_void_p\n_lib.TCOD_list_get.argtypes=[c_void_p ,c_int]\n\n_lib.TCOD_list_set.restype=c_void\n_lib.TCOD_list_set.argtypes=[c_void_p ,c_void_p, c_int]\n\n_lib.TCOD_list_begin.restype=POINTER(c_void_p)\n_lib.TCOD_list_begin.argtypes=[c_void_p ]\n\n_lib.TCOD_list_end.restype=POINTER(c_void_p)\n_lib.TCOD_list_end.argtypes=[c_void_p ]\n\n_lib.TCOD_list_reverse.restype=c_void\n_lib.TCOD_list_reverse.argtypes=[c_void_p ]\n\n_lib.TCOD_list_remove_iterator.restype=POINTER(c_void_p)\n_lib.TCOD_list_remove_iterator.argtypes=[c_void_p , POINTER(c_void_p)]\n\n_lib.TCOD_list_remove.restype=c_void\n_lib.TCOD_list_remove.argtypes=[c_void_p ,c_void_p ]\n\n_lib.TCOD_list_remove_iterator_fast.restype=POINTER(c_void_p)\n_lib.TCOD_list_remove_iterator_fast.argtypes=[c_void_p , POINTER(c_void_p)]\n\n_lib.TCOD_list_remove_fast.restype=c_void\n_lib.TCOD_list_remove_fast.argtypes=[c_void_p ,c_void_p ]\n\n_lib.TCOD_list_contains.restype=c_bool\n_lib.TCOD_list_contains.argtypes=[c_void_p ,c_void_p ]\n\n_lib.TCOD_list_clear.restype=c_void\n_lib.TCOD_list_clear.argtypes=[c_void_p ]\n\n_lib.TCOD_list_clear_and_delete.restype=c_void\n_lib.TCOD_list_clear_and_delete.argtypes=[c_void_p ]\n\n_lib.TCOD_list_size.restype=c_int\n_lib.TCOD_list_size.argtypes=[c_void_p ]\n\n_lib.TCOD_list_insert_before.restype=POINTER(c_void_p)\n_lib.TCOD_list_insert_before.argtypes=[c_void_p ,c_void_p,c_int]\n\n_lib.TCOD_list_is_empty.restype=c_bool\n_lib.TCOD_list_is_empty.argtypes=[c_void_p ]\n\n_lib.TCOD_sys_create_directory.restype=c_bool\n_lib.TCOD_sys_create_directory.argtypes=[c_char_p]\n\n_lib.TCOD_sys_delete_file.restype=c_bool\n_lib.TCOD_sys_delete_file.argtypes=[c_char_p]\n\n_lib.TCOD_sys_delete_directory.restype=c_bool\n_lib.TCOD_sys_delete_directory.argtypes=[c_char_p]\n\n_lib.TCOD_sys_is_directory.restype=c_bool\n_lib.TCOD_sys_is_directory.argtypes=[c_char_p]\n\n_lib.TCOD_sys_get_directory_content.restype=c_void_p\n_lib.TCOD_sys_get_directory_content.argtypes=[c_char_p, c_char_p]\n\n_lib.TCOD_sys_file_exists.restype=c_bool\n# lib.TCOD_sys_file_exists.argtypes=[c_char_p , ...]\n\n_lib.TCOD_sys_get_num_cores.restype=c_int\n_lib.TCOD_sys_get_num_cores.argtypes=[]\n\n_lib.TCOD_thread_wait.restype=c_void\n_lib.TCOD_thread_wait.argtypes=[c_void_p ]\n\n_lib.TCOD_mutex_new.restype=c_void_p\n_lib.TCOD_mutex_new.argtypes=[]\n\n_lib.TCOD_mutex_in.restype=c_void\n_lib.TCOD_mutex_in.argtypes=[c_void_p ]\n\n_lib.TCOD_mutex_out.restype=c_void\n_lib.TCOD_mutex_out.argtypes=[c_void_p ]\n\n_lib.TCOD_mutex_delete.restype=c_void\n_lib.TCOD_mutex_delete.argtypes=[c_void_p ]\n\n_lib.TCOD_semaphore_new.restype=c_void_p\n_lib.TCOD_semaphore_new.argtypes=[c_int]\n\n_lib.TCOD_semaphore_lock.restype=c_void\n_lib.TCOD_semaphore_lock.argtypes=[c_void_p ]\n\n_lib.TCOD_semaphore_unlock.restype=c_void\n_lib.TCOD_semaphore_unlock.argtypes=[c_void_p ]\n\n_lib.TCOD_semaphore_delete.restype=c_void\n_lib.TCOD_semaphore_delete.argtypes=[ c_void_p ]\n\n_lib.TCOD_condition_new.restype=c_void_p\n_lib.TCOD_condition_new.argtypes=[]\n\n_lib.TCOD_condition_signal.restype=c_void\n_lib.TCOD_condition_signal.argtypes=[c_void_p ]\n\n_lib.TCOD_condition_broadcast.restype=c_void\n_lib.TCOD_condition_broadcast.argtypes=[c_void_p ]\n\n_lib.TCOD_condition_wait.restype=c_void\n_lib.TCOD_condition_wait.argtypes=[c_void_p , c_void_p ]\n\n_lib.TCOD_condition_delete.restype=c_void\n_lib.TCOD_condition_delete.argtypes=[ c_void_p ]\n\n_lib.TCOD_tree_new.restype=c_void_p\n_lib.TCOD_tree_new.argtypes=[]\n\n_lib.TCOD_tree_add_son.restype=c_void\n_lib.TCOD_tree_add_son.argtypes=[c_void_p, c_void_p]\n\n_lib.TCOD_text_init.restype=c_void_p\n_lib.TCOD_text_init.argtypes=[c_int, c_int, c_int, c_int, c_int]\n\n_lib.TCOD_text_set_properties.restype=c_void\n_lib.TCOD_text_set_properties.argtypes=[c_void_p , c_int, c_int, c_char_p , c_int]\n\n_lib.TCOD_text_set_colors.restype=c_void\n_lib.TCOD_text_set_colors.argtypes=[c_void_p , c_int , c_int , c_float]\n\n_lib.TCOD_text_update.restype=c_bool\n_lib.TCOD_text_update.argtypes=[c_void_p , c_int ]\n\n_lib.TCOD_text_render.restype=c_void\n_lib.TCOD_text_render.argtypes=[c_void_p , c_void_p ]\n\n_lib.TCOD_text_get.restype=c_char_p\n_lib.TCOD_text_get.argtypes=[c_void_p ]\n\n_lib.TCOD_text_reset.restype=c_void\n_lib.TCOD_text_reset.argtypes=[c_void_p ]\n\n_lib.TCOD_text_delete.restype=c_void\n_lib.TCOD_text_delete.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_new.restype=c_void_p\n_lib.TCOD_zip_new.argtypes=[]\n\n_lib.TCOD_zip_delete.restype=c_void\n_lib.TCOD_zip_delete.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_put_char.restype=c_void\n_lib.TCOD_zip_put_char.argtypes=[c_void_p , c_char ]\n\n_lib.TCOD_zip_put_int.restype=c_void\n_lib.TCOD_zip_put_int.argtypes=[c_void_p , c_int]\n\n_lib.TCOD_zip_put_float.restype=c_void\n_lib.TCOD_zip_put_float.argtypes=[c_void_p , c_float ]\n\n_lib.TCOD_zip_put_string.restype=c_void\n_lib.TCOD_zip_put_string.argtypes=[c_void_p , c_char_p]\n\n_lib.TCOD_zip_put_color.restype=c_void\n_lib.TCOD_zip_put_color.argtypes=[c_void_p , c_int ]\n\n_lib.TCOD_zip_put_image.restype=c_void\n_lib.TCOD_zip_put_image.argtypes=[c_void_p , c_void_p ]\n\n_lib.TCOD_zip_put_console.restype=c_void\n_lib.TCOD_zip_put_console.argtypes=[c_void_p , c_void_p ]\n\n_lib.TCOD_zip_put_data.restype=c_void\n_lib.TCOD_zip_put_data.argtypes=[c_void_p , c_int,c_void_p]\n\n_lib.TCOD_zip_get_current_bytes.restype=c_int\n_lib.TCOD_zip_get_current_bytes.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_save_to_file.restype=c_int\n_lib.TCOD_zip_save_to_file.argtypes=[c_void_p , c_char_p]\n\n_lib.TCOD_zip_load_from_file.restype=c_int\n_lib.TCOD_zip_load_from_file.argtypes=[c_void_p , c_char_p]\n\n_lib.TCOD_zip_get_char.restype=c_char\n_lib.TCOD_zip_get_char.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_int.restype=c_int\n_lib.TCOD_zip_get_int.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_float.restype=c_float\n_lib.TCOD_zip_get_float.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_string.restype=c_char_p\n_lib.TCOD_zip_get_string.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_color.restype=c_int\n_lib.TCOD_zip_get_color.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_image.restype=c_void_p\n_lib.TCOD_zip_get_image.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_console.restype=c_void_p\n_lib.TCOD_zip_get_console.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_get_data.restype=c_int\n_lib.TCOD_zip_get_data.argtypes=[c_void_p , c_int,c_void_p]\n\n_lib.TCOD_zip_get_remaining_bytes.restype=c_int\n_lib.TCOD_zip_get_remaining_bytes.argtypes=[c_void_p ]\n\n_lib.TCOD_zip_skip_bytes.restype=c_void\n_lib.TCOD_zip_skip_bytes.argtypes=[c_void_p ,c_int ]\n" ]
[ [ "numpy.ascontiguousarray" ] ]
nguyenquangminh/gpt2-tf2
[ "3683c12322481510b850da01f5f471f07f8ba016" ]
[ "train-horovod.py" ]
[ "#!/usr/bin/env python3\n# Usage:\n# PYTHONPATH=src ./train --dataset <file|directory|glob>\n\nimport fire\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport random\nimport time\n\nimport horovod.tensorflow as hvd\n\nimport model, sample, encoder\nfrom load_dataset import load_dataset, Sampler\n\nCHECKPOINT_DIR = 'checkpoint'\nSAMPLE_DIR = 'samples'\n\nhvd.init()\n\ndef maketree(path):\n try:\n os.makedirs(path)\n except:\n pass\n\n\ndef train_main(dataset,\n model_name='117M',\n seed=None,\n batch_size=2,\n sample_length=1023,\n sample_num=1,\n sample_every=4500,\n run_name='run1',\n restore_from='latest',\n save_every=2000,\n combine=50000):\n\n enc = encoder.get_encoder(model_name)\n hparams = model.default_hparams()\n with open(os.path.join('models', model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if sample_length is None:\n sample_length = hparams.n_ctx // 2\n elif sample_length > hparams.n_ctx:\n raise ValueError(\n \"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n # TF config\n\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n config.gpu_options.allow_growth = True\n\n with tf.compat.v1.Session(config=config) as sess:\n context = tf.compat.v1.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.compat.v1.set_random_seed(seed)\n output = model.model(hparams=hparams, X=context)\n loss = tf.reduce_mean(\n input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=context[:, 1:], logits=output['logits'][:, :-1]))\n\n tf_sample = sample.sample_sequence(\n hparams=hparams,\n length=sample_length,\n context=context,\n batch_size=batch_size,\n temperature=0.8,\n top_k=40)\n\n train_vars = [v for v in tf.compat.v1.trainable_variables() if 'model' in v.name]\n\n opt = tf.compat.v1.train.AdamOptimizer()\n opt = hvd.DistributedOptimizer(opt)\n train_op = opt.minimize(loss, var_list=train_vars)\n\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n bcast = hvd.broadcast_global_variables(0)\n\n saver = tf.compat.v1.train.Saver(\n var_list=train_vars,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=2)\n\n sess.run(tf.compat.v1.global_variables_initializer())\n\n\n if restore_from == 'latest':\n ckpt = tf.train.latest_checkpoint(\n os.path.join(CHECKPOINT_DIR, run_name))\n if ckpt is None:\n # Get fresh GPT weights if new run.\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', model_name))\n elif restore_from == 'fresh':\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', model_name))\n else:\n ckpt = tf.train.latest_checkpoint(restore_from)\n print(str(hvd.local_rank()), 'Loading checkpoint', ckpt)\n saver.restore(sess, ckpt)\n\n bcast.run()\n\n print(str(hvd.local_rank()), 'Loading dataset...')\n chunks = load_dataset(enc, dataset, combine)\n data_sampler = Sampler(chunks)\n print(str(hvd.local_rank()), 'dataset has', data_sampler.total_size, 'tokens')\n print(str(hvd.local_rank()), 'Training...')\n\n counter = 1\n if os.path.exists(os.path.join(CHECKPOINT_DIR, run_name, 'counter')):\n # Load the step number if we're resuming a run\n # Add 1 so we don't immediately try to save again\n with open(os.path.join(CHECKPOINT_DIR, run_name, 'counter'),\n 'r') as fp:\n counter = int(fp.read()) + 1\n\n def save():\n maketree(os.path.join(CHECKPOINT_DIR, run_name))\n print(\n 'Saving',\n os.path.join(CHECKPOINT_DIR, run_name,\n 'model-{}').format(counter))\n saver.save(\n sess,\n os.path.join(CHECKPOINT_DIR, run_name, 'model'),\n global_step=counter)\n with open(os.path.join(CHECKPOINT_DIR, run_name, 'counter'),\n 'w') as fp:\n fp.write(str(counter) + '\\n')\n\n def generate_samples():\n context_tokens = data_sampler.sample(1)\n all_text = []\n index = 0\n while index < sample_num:\n out = sess.run(\n tf_sample, feed_dict={context: batch_size*[context_tokens]})\n for i in range(min(sample_num - index, batch_size)):\n text = enc.decode(out[i])\n text = '======== SAMPLE {} ========\\n{}\\n'.format(index + 1, text)\n all_text.append(text)\n index += 1\n print(text)\n maketree(os.path.join(SAMPLE_DIR, run_name))\n with open(\n os.path.join(SAMPLE_DIR, run_name,\n 'samples-{}').format(counter), 'w') as fp:\n fp.write('\\n'.join(all_text))\n\n avg_loss = (0.0, 0.0)\n start_time = time.time()\n\n try:\n while True:\n\n batch = [data_sampler.sample(1024) for _ in range(batch_size)]\n\n _, lv = sess.run((train_op, loss), feed_dict={context: batch})\n\n avg_loss = (avg_loss[0] * 0.99 + lv, avg_loss[1] * 0.99 + 1.0)\n\n if hvd.rank() == 0:\n if counter % save_every == 0:\n save()\n if counter % sample_every == 0:\n generate_samples()\n\n print(\n '[{counter} | {time:2.2f}] loss={loss:2.2f} avg={avg:2.2f}'\n .format(\n counter=counter,\n time=time.time() - start_time,\n loss=lv,\n avg=avg_loss[0] / avg_loss[1]))\n\n counter += 1\n\n except KeyboardInterrupt:\n print('interrupted')\n if hvd.rank() == 0:\n save()\n\n\nif __name__ == '__main__':\n fire.Fire(train_main)\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.train.latest_checkpoint", "tensorflow.compat.v1.train.AdamOptimizer", "numpy.random.seed", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.set_random_seed", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits" ] ]
DMkelllog/wafermap_stacking
[ "3be5d3f878211efdce3f9199e8eaf1c12a2ccd09" ]
[ "run/mfe.py" ]
[ "import pickle\nimport numpy as np\nimport torch\nimport os\nimport csv\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom utils import softmax, CustomDataset, FNN, training, inference\n\n\n\n\nbatch_size = 512\nlr = 1e-4\nes_patience = 20\n\ndef build_MFE(args):\n\n with open('../data/X_MFE.pickle', 'rb') as f:\n X_MFE = pickle.load(f)\n\n with open('../data/y.pickle', 'rb') as f:\n y = pickle.load(f)\n\n X_MFE_trainval, X_MFE_test, y_trainval, y_test = train_test_split(X_MFE, y, test_size=10000, random_state=args.seed, stratify=y)\n X_MFE_trainval, y_trainval = X_MFE_trainval[:args.train_size], y_trainval[:args.train_size]\n X_MFE_train, X_MFE_val, y_train, y_val = train_test_split(X_MFE_trainval, y_trainval, test_size=0.2, random_state=args.seed, stratify=y_trainval)\n\n mode = 'MFE'\n print(f'{mode}')\n\n scaler = StandardScaler()\n X_MFE_train = scaler.fit_transform(X_MFE_train)\n X_MFE_val = scaler.transform(X_MFE_val)\n X_MFE_trainval = scaler.transform(X_MFE_trainval)\n X_MFE_test = scaler.transform(X_MFE_test)\n\n dataset_MFE_train = CustomDataset(torch.from_numpy(X_MFE_train), y_train)\n dataset_MFE_val = CustomDataset(torch.from_numpy(X_MFE_val), y_val)\n dataset_MFE_trainval = CustomDataset(torch.from_numpy(X_MFE_trainval), y_trainval)\n dataset_MFE_test = CustomDataset(torch.from_numpy(X_MFE_test), y_test)\n\n dataloader_MFE_train = DataLoader(dataset_MFE_train, batch_size=batch_size, shuffle=True, num_workers=4)\n dataloader_MFE_val = DataLoader(dataset_MFE_val, batch_size=batch_size, shuffle=False, num_workers=4)\n dataloader_MFE_trainval = DataLoader(dataset_MFE_trainval, batch_size=batch_size, shuffle=False, num_workers=4)\n dataloader_MFE_test = DataLoader(dataset_MFE_test, batch_size=batch_size, shuffle=False, num_workers=4)\n\n model = FNN(args).cuda()\n\n model, log = training(model, dataloader_MFE_train, dataloader_MFE_val, mode, args)\n\n f1_macro_MFE_test, f1_micro_MFE_test, y_hat_MFE_test = inference(model, dataloader_MFE_test, y_test, np.unique(y_train), args)\n f1_macro_trainval, f1_micro_trainval, y_hat_MFE_trainval = inference(model, dataloader_MFE_trainval, y_trainval, np.unique(y_train), args)\n\n y_hat_MFE_trainval, y_hat_MFE_test = softmax(y_hat_MFE_trainval), softmax(y_hat_MFE_test)\n # save trainval result\n with open(f'../result/{args.seed}/{args.train_size}/y_hat_{mode}.pickle', 'wb') as f:\n pickle.dump([y_hat_MFE_trainval, y_hat_MFE_test], f)\n\n is_file_exist = os.path.isfile('../result/result.csv')\n with open(f'../result/result.csv', 'a') as f:\n writer = csv.writer(f)\n if not is_file_exist:\n writer.writerow(['args.seed', 'args.train_size', 'mode', 'f1_macro', 'f1_micro'])\n writer.writerow([args.seed, args.train_size, 'MFE', f1_macro_MFE_test, f1_micro_MFE_test])\n return log\n\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "torch.from_numpy", "torch.utils.data.DataLoader", "sklearn.model_selection.train_test_split", "numpy.unique" ] ]
trakru/scikit-mobility
[ "ae3c6b7bb6189a01add9ade67a2b704ea5e28462" ]
[ "skmob/privacy/attacks.py" ]
[ "from itertools import combinations\nfrom abc import ABCMeta, abstractmethod\nfrom skmob.utils import constants\nfrom skmob.core.trajectorydataframe import TrajDataFrame\nfrom tqdm import tqdm\nimport pandas as pd\nfrom ..utils.utils import frequency_vector, probability_vector, date_time_precision\n\n\nclass Attack(object):\n\n # \"\"\"Privacy Attack\n #\n # Abstract class for a generic attack. Defines a series of functions common to all attacks.\n # Provides basic functions to compute risk for all users in a trajectory dataframe.\n # Requires the implementation of both a matching function and an assessment function, which are attack dependant.\n #\n # Parameters\n # ----------\n # knowledge_length : int\n # the length of the background knowledge that we want to simulate. The length of the background knowledge\n # specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n # combinations of points of length k will be evaluated.\n # Attributes\n # ----------\n # knowledge_length : int\n # the length of the background knowledge that we want to simulate.\n #\n # References\n # ----------\n # .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n # .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n # \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, knowledge_length):\n self.knowledge_length = knowledge_length\n\n @property\n def knowledge_length(self):\n return self._knowledge_length\n\n @knowledge_length.setter\n def knowledge_length(self, val):\n if val < 1:\n raise ValueError(\"Parameter knowledge_length should not be less than 1\")\n self._knowledge_length = val\n\n def _all_risks(self, traj, targets=None, force_instances=False, show_progress=False):\n # \"\"\"\n # Computes risk for all the users in the data. It applies the risk function to every individual in the data.\n # If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n # a portion of users to perform the calculation on.\n #\n # Parameters\n # ----------\n # traj: TrajectoryDataFrame\n # the dataframe against which to calculate risk.\n #\n # targets : TrajectoryDataFrame or list, optional\n # the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n # in which case risk is computed on all users in traj. The default is `None`.\n #\n # force_instances : boolean, optional\n # if True, returns all possible instances of background knowledge\n # with their respective probability of reidentification. The default is `False`.\n #\n # show_progress : boolean, optional\n # if True, shows the progress of the computation. The default is `False`.\n #\n # Returns\n # -------\n # DataFrame\n # a DataFrame with the privacy risk for each user, in the form (user_id, risk)\n # \"\"\"\n if targets is None:\n targets = traj\n else:\n if isinstance(targets, list):\n targets = traj[traj[constants.UID].isin(targets)]\n if isinstance(targets, TrajDataFrame) or isinstance(targets, pd.DataFrame):\n targets = traj[traj[constants.UID].isin(targets[constants.UID])]\n if show_progress:\n tqdm.pandas(desc=\"computing risk\")\n risks = targets.groupby(constants.UID).progress_apply(lambda x: self._risk(x, traj, force_instances))\n else:\n risks = targets.groupby(constants.UID).apply(lambda x: self._risk(x, traj, force_instances))\n if force_instances:\n risks = risks.droplevel(1)\n risks = risks.reset_index(drop=True)\n else:\n risks = risks.reset_index(name=constants.PRIVACY_RISK)\n return risks\n\n def _generate_instances(self, single_traj):\n # \"\"\"\n # Return a generator to all the possible background knowledge of length k for a single user_id.\n #\n # Parameters\n # ----------\n # single_traj : TrajectoryDataFrame\n # the dataframe of the trajectory of a single individual.\n #\n # Yields\n # ------\n # generator\n # a generator to all the possible instances of length k. Instances are tuples with the values of the actual\n # records in the combination.\n # \"\"\"\n size = len(single_traj.index)\n if self.knowledge_length > size:\n return combinations(single_traj.values, size)\n else:\n return combinations(single_traj.values, self.knowledge_length)\n\n def _risk(self, single_traj, traj, force_instances=False):\n \"\"\"\n # Computes the risk of reidentification of an individual with respect to the entire population in the data.\n #\n # Parameters\n # ----------\n # single_traj : TrajectoryDataFrame\n # the dataframe of the trajectory of a single individual.\n #\n # traj : TrajectoryDataFrame\n # the dataframe with the complete data.\n #\n # force_instances : boolean, optional\n # if True, returns all possible instances of background knowledge\n # with their respective probability of reidentification. The default is `False`.\n #\n # Returns\n # -------\n # float\n # the risk for the individual, expressed as a float between 0 and 1\n # \"\"\"\n instances = self._generate_instances(single_traj)\n risk = 0\n if force_instances:\n inst_data = {constants.LATITUDE: list(), constants.LONGITUDE: list(),\n constants.DATETIME: list(), constants.UID: list(),\n constants.INSTANCE: list(), constants.INSTANCE_ELEMENT: list(),\n constants.PROBABILITY: list()}\n inst_id = 1\n for instance in instances:\n prob = 1.0 / traj.groupby(constants.UID).apply(lambda x: self._match(x, instance)).sum()\n elem_count = 1\n for elem in instance:\n inst_data[constants.LATITUDE].append(elem[0])\n inst_data[constants.LONGITUDE].append(elem[1])\n inst_data[constants.DATETIME].append(elem[2])\n inst_data[constants.UID].append(elem[3])\n inst_data[constants.INSTANCE].append(inst_id)\n inst_data[constants.INSTANCE_ELEMENT].append(elem_count)\n inst_data[constants.PROBABILITY].append(prob)\n elem_count += 1\n inst_id += 1\n return pd.DataFrame(inst_data)\n else:\n for instance in instances:\n prob = 1.0 / traj.groupby(constants.UID).apply(lambda x: self._match(x, instance)).sum()\n if prob > risk:\n risk = prob\n if risk == 1.0:\n break\n return risk\n\n @abstractmethod\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n # \"\"\"\n # Abstract function to assess privacy risk for a TrajectoryDataFrame.\n # An attack must implement an assessing strategy. This could involve some preprocessing, for example\n # transforming the original data, and calls to the risk function.\n # If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n # a portion of users to perform the assessment on.\n #\n # Parameters\n # ----------\n # traj : TrajectoryDataFrame\n # the dataframe on which to assess privacy risk.\n #\n # targets : TrajectoryDataFrame or list, optional\n # the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n # in which case risk is computed on all users in traj. The defaul is `None`.\n #\n # force_instances : boolean, optional\n # if True, returns all possible instances of background knowledge\n # with their respective probability of reidentification. The defaul is `False`.\n #\n # show_progress : boolean, optional\n # if True, shows the progress of the computation. The defaul is `False`.\n #\n # Returns\n # -------\n # DataFrame\n # a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n # \"\"\"\n pass\n\n @abstractmethod\n def _match(self, single_traj, instance):\n # \"\"\"\n # Matching function for the attack. It is used to decide if an instance of background knowledge matches a certain\n # trajectory. The internal logic of an attack is represented by this function, therefore, it must be implemented\n # depending in the kind of the attack.\n #\n # Parameters\n # ----------\n # single_traj : TrajectoryDataFrame\n # the dataframe of the trajectory of a single individual.\n #\n # instance : tuple\n # an instance of background knowledge.\n #\n # Returns\n # -------\n # int\n # 1 if the instance matches the trajectory, 0 otherwise.\n # \"\"\"\n pass\n\n\nclass LocationAttack(Attack):\n \"\"\"Location Attack\n\n In a location attack the adversary knows the coordinates of the locations visited by an individual and matches them\n against trajectories.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.LocationAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n \tuid\trisk\n 0\t1\t0.333333\n 1\t2\t0.500000\n 2\t3\t0.333333\n 3\t4\t0.333333\n 4\t5\t0.250000\n 5\t6\t0.250000\n 6\t7\t0.500000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 1.000000\n 2 3 0.500000\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.250000\n 6 7 1.000000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.5\n 1 2 1.0\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 43.843014 10.507994 2011-02-03 08:34:04 1 1 1 0.333333\n 1 43.544270 10.326150 2011-02-03 09:34:04 1 1 2 0.333333\n 2 43.708530 10.403600 2011-02-03 10:34:04 1 1 3 0.333333\n 3 43.843014 10.507994 2011-02-03 08:34:04 1 2 1 0.500000\n 4 43.544270 10.326150 2011-02-03 09:34:04 1 2 2 0.500000\n 5 43.779250 11.246260 2011-02-04 10:34:04 1 2 3 0.500000\n 6 43.843014 10.507994 2011-02-03 08:34:04 1 3 1 0.333333\n 7 43.708530 10.403600 2011-02-03 10:34:04 1 3 2 0.333333\n 8 43.779250 11.246260 2011-02-04 10:34:04 1 3 3 0.333333\n 9 43.544270 10.326150 2011-02-03 09:34:04 1 4 1 0.333333\n 10 43.708530 10.403600 2011-02-03 10:34:04 1 4 2 0.333333\n 11 43.779250 11.246260 2011-02-04 10:34:04 1 4 3 0.333333\n 12 43.843014 10.507994 2011-02-03 08:34:04 2 1 1 1.000000\n 13 43.708530 10.403600 2011-02-03 09:34:04 2 1 2 1.000000\n 14 43.843014 10.507994 2011-02-04 10:34:04 2 1 3 1.000000\n 15 43.843014 10.507994 2011-02-03 08:34:04 2 2 1 0.333333\n 16 43.708530 10.403600 2011-02-03 09:34:04 2 2 2 0.333333\n 17 43.544270 10.326150 2011-02-04 11:34:04 2 2 3 0.333333\n 18 43.843014 10.507994 2011-02-03 08:34:04 2 3 1 1.000000\n 19 43.843014 10.507994 2011-02-04 10:34:04 2 3 2 1.000000\n 20 43.544270 10.326150 2011-02-04 11:34:04 2 3 3 1.000000\n 21 43.708530 10.403600 2011-02-03 09:34:04 2 4 1 0.333333\n 22 43.843014 10.507994 2011-02-04 10:34:04 2 4 2 0.333333\n 23 43.544270 10.326150 2011-02-04 11:34:04 2 4 3 0.333333\n\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length):\n super(LocationAttack, self).__init__(knowledge_length)\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n traj = traj.sort_values(by=[constants.UID, constants.DATETIME])\n return self._all_risks(traj, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a location attack, only the coordinates are used in the matching.\n If a trajectory presents the same locations as the ones in the instance, a match is found.\n Multiple visits to the same location are also handled.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n locs = single_traj.groupby([constants.LATITUDE, constants.LONGITUDE]).size().reset_index(name=constants.COUNT)\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst = inst.astype(dtype=dict(single_traj.dtypes))\n inst = inst.groupby([constants.LATITUDE, constants.LONGITUDE]).size().reset_index(name=constants.COUNT + \"inst\")\n locs_inst = pd.merge(locs, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n condition = locs_inst[constants.COUNT] >= locs_inst[constants.COUNT + \"inst\"]\n if len(locs_inst[condition].index) != len(inst.index):\n return 0\n else:\n return 1\n\n\nclass LocationSequenceAttack(Attack):\n \"\"\"Location Sequence Attack\n In a location sequence attack the adversary knows the coordinates of locations visited by an individual and\n the order in which they were visited and matches them against trajectories.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.LocationSequenceAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 0.500000\n 2 3 1.000000\n 3 4 0.500000\n 4 5 1.000000\n 5 6 0.333333\n 6 7 0.500000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 1.000000\n 1 2 1.000000\n 2 3 1.000000\n 3 4 1.000000\n 4 5 1.000000\n 5 6 0.333333\n 6 7 1.000000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 1.0\n 1 2 1.0\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 43.843014 10.507994 2011-02-03 08:34:04 1 1 1 1.0\n 1 43.544270 10.326150 2011-02-03 09:34:04 1 1 2 1.0\n 2 43.708530 10.403600 2011-02-03 10:34:04 1 1 3 1.0\n 3 43.843014 10.507994 2011-02-03 08:34:04 1 2 1 1.0\n 4 43.544270 10.326150 2011-02-03 09:34:04 1 2 2 1.0\n 5 43.779250 11.246260 2011-02-04 10:34:04 1 2 3 1.0\n 6 43.843014 10.507994 2011-02-03 08:34:04 1 3 1 1.0\n 7 43.708530 10.403600 2011-02-03 10:34:04 1 3 2 1.0\n 8 43.779250 11.246260 2011-02-04 10:34:04 1 3 3 1.0\n 9 43.544270 10.326150 2011-02-03 09:34:04 1 4 1 0.5\n 10 43.708530 10.403600 2011-02-03 10:34:04 1 4 2 0.5\n 11 43.779250 11.246260 2011-02-04 10:34:04 1 4 3 0.5\n 12 43.843014 10.507994 2011-02-03 08:34:04 2 1 1 1.0\n 13 43.708530 10.403600 2011-02-03 09:34:04 2 1 2 1.0\n 14 43.843014 10.507994 2011-02-04 10:34:04 2 1 3 1.0\n 15 43.843014 10.507994 2011-02-03 08:34:04 2 2 1 1.0\n 16 43.708530 10.403600 2011-02-03 09:34:04 2 2 2 1.0\n 17 43.544270 10.326150 2011-02-04 11:34:04 2 2 3 1.0\n 18 43.843014 10.507994 2011-02-03 08:34:04 2 3 1 1.0\n 19 43.843014 10.507994 2011-02-04 10:34:04 2 3 2 1.0\n 20 43.544270 10.326150 2011-02-04 11:34:04 2 3 3 1.0\n 21 43.708530 10.403600 2011-02-03 09:34:04 2 4 1 1.0\n 22 43.843014 10.507994 2011-02-04 10:34:04 2 4 2 1.0\n 23 43.544270 10.326150 2011-02-04 11:34:04 2 4 3 1.0\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length):\n super(LocationSequenceAttack, self).__init__(knowledge_length)\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n traj = traj.sort_values(by=[constants.UID, constants.DATETIME])\n return self._all_risks(traj, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a location sequence attack, both the coordinates and the order of visit are used in the matching.\n If a trajectory presents the same locations in the same order as the ones in the instance, a match is found.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst_iterator = inst.iterrows()\n inst_line = next(inst_iterator)[1]\n count = 0\n for index, row in single_traj.iterrows():\n if inst_line[constants.LATITUDE] == row[constants.LATITUDE] and inst_line[constants.LONGITUDE] == row[\n constants.LONGITUDE]:\n count += 1\n try:\n inst_line = next(inst_iterator)[1]\n except StopIteration:\n break\n if len(inst.index) == count:\n return 1\n else:\n return 0\n\n\nclass LocationTimeAttack(Attack):\n \"\"\"Location Time Attack\n\n In a location time attack the adversary knows the coordinates of locations visited by an individual and the time\n in which they were visited and matches them against trajectories. The precision at which to consider the temporal\n information can also be specified.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n\n time_precision : string, optional\n the precision at which to consider the timestamps for the visits.\n The possible precisions are: Year, Month, Day, Hour, Minute, Second. The default is `Hour`\n\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n time_precision : string\n the precision at which to consider the timestamps for the visits.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.LocationTimeAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 1.0\n 1 2 1.0\n 2 3 1.0\n 3 4 1.0\n 4 5 1.0\n 5 6 0.5\n 6 7 1.0\n\n >>> #change the time granularity of the attack\n >>> at.time_precision = \"Month\"\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 0.500000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.250000\n 5 6 0.250000\n 6 7 0.500000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 1.000000\n 2 3 0.500000\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.250000\n 6 7 1.000000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 1.000000\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 43.843014 10.507994 2011-02-03 08:34:04 1 1 1 0.333333\n 1 43.544270 10.326150 2011-02-03 09:34:04 1 1 2 0.333333\n 2 43.708530 10.403600 2011-02-03 10:34:04 1 1 3 0.333333\n 3 43.843014 10.507994 2011-02-03 08:34:04 1 2 1 0.500000\n 4 43.544270 10.326150 2011-02-03 09:34:04 1 2 2 0.500000\n 5 43.779250 11.246260 2011-02-04 10:34:04 1 2 3 0.500000\n 6 43.843014 10.507994 2011-02-03 08:34:04 1 3 1 0.333333\n 7 43.708530 10.403600 2011-02-03 10:34:04 1 3 2 0.333333\n 8 43.779250 11.246260 2011-02-04 10:34:04 1 3 3 0.333333\n 9 43.544270 10.326150 2011-02-03 09:34:04 1 4 1 0.333333\n 10 43.708530 10.403600 2011-02-03 10:34:04 1 4 2 0.333333\n 11 43.779250 11.246260 2011-02-04 10:34:04 1 4 3 0.333333\n 12 43.843014 10.507994 2011-02-03 08:34:04 2 1 1 1.000000\n 13 43.708530 10.403600 2011-02-03 09:34:04 2 1 2 1.000000\n 14 43.843014 10.507994 2011-02-04 10:34:04 2 1 3 1.000000\n 15 43.843014 10.507994 2011-02-03 08:34:04 2 2 1 0.333333\n 16 43.708530 10.403600 2011-02-03 09:34:04 2 2 2 0.333333\n 17 43.544270 10.326150 2011-02-04 11:34:04 2 2 3 0.333333\n 18 43.843014 10.507994 2011-02-03 08:34:04 2 3 1 1.000000\n 19 43.843014 10.507994 2011-02-04 10:34:04 2 3 2 1.000000\n 20 43.544270 10.326150 2011-02-04 11:34:04 2 3 3 1.000000\n 21 43.708530 10.403600 2011-02-03 09:34:04 2 4 1 0.333333\n 22 43.843014 10.507994 2011-02-04 10:34:04 2 4 2 0.333333\n 23 43.544270 10.326150 2011-02-04 11:34:04 2 4 3 0.333333\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length, time_precision=\"Hour\"):\n self.time_precision = time_precision\n super(LocationTimeAttack, self).__init__(knowledge_length)\n\n @property\n def time_precision(self):\n return self._time_precision\n\n @time_precision.setter\n def time_precision(self, val):\n if val not in constants.PRECISION_LEVELS:\n raise ValueError(\"Possible time precisions are: Year, Month, Day, Hour, Minute, Second\")\n self._time_precision = val\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n traj = traj.sort_values(by=[constants.UID, constants.DATETIME])\n traj[constants.TEMP] = traj[constants.DATETIME].apply(lambda x: date_time_precision(x, self.time_precision))\n return self._all_risks(traj, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a location time attack, both the coordinates and the order of visit are used in the matching.\n If a trajectory presents the same locations with the same temporal information as in the instance,\n a match is found.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n locs = single_traj.groupby([constants.LATITUDE, constants.LONGITUDE, constants.TEMP]).size().reset_index(name=constants.COUNT)\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst = inst.groupby([constants.LATITUDE, constants.LONGITUDE,constants.TEMP]).size().reset_index(name=constants.COUNT + \"inst\")\n locs_inst = pd.merge(locs, inst, left_on=[constants.LATITUDE, constants.LONGITUDE, constants.TEMP],\n right_on=[constants.LATITUDE, constants.LONGITUDE,constants.TEMP])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n condition = locs_inst[constants.COUNT] >= locs_inst[constants.COUNT + \"inst\"]\n if len(locs_inst[condition].index) != len(inst.index):\n return 0\n else:\n return 1\n\n\nclass UniqueLocationAttack(Attack):\n \"\"\"Unique Location Attack\n\n In a unique location attack the adversary knows the coordinates of unique locations visited by an individual,\n and matches them against frequency vectors. A frequency vector, is an aggregation on trajectory\n data showing the unique locations visited by an individual and the frequency with which he visited those locations.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.UniqueLocationAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 0.250000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.250000\n 5 6 0.250000\n 6 7 0.250000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 0.333333\n 2 3 0.500000\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.250000\n 6 7 0.250000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 0.333333\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 1.0 43.544270 10.326150 1.0 1 1 0.333333\n 1 1.0 43.708530 10.403600 1.0 1 2 0.333333\n 2 1.0 43.779250 11.246260 1.0 1 3 0.333333\n 3 1.0 43.544270 10.326150 1.0 2 1 0.333333\n 4 1.0 43.708530 10.403600 1.0 2 2 0.333333\n 5 1.0 43.843014 10.507994 1.0 2 3 0.333333\n 6 1.0 43.544270 10.326150 1.0 3 1 0.500000\n 7 1.0 43.779250 11.246260 1.0 3 2 0.500000\n 8 1.0 43.843014 10.507994 1.0 3 3 0.500000\n 9 1.0 43.708530 10.403600 1.0 4 1 0.333333\n 10 1.0 43.779250 11.246260 1.0 4 2 0.333333\n 11 1.0 43.843014 10.507994 1.0 4 3 0.333333\n 12 2.0 43.544270 10.326150 1.0 1 1 0.333333\n 13 2.0 43.708530 10.403600 1.0 1 2 0.333333\n 14 2.0 43.843014 10.507994 2.0 1 3 0.333333\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length):\n super(UniqueLocationAttack, self).__init__(knowledge_length)\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n freq = frequency_vector(traj)\n return self._all_risks(freq, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a unique location attack, the coordinates of unique locations are used in the matching.\n If a frequency vector presents the same locations as in the instance, a match is found.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) == len(inst.index):\n return 1\n else:\n return 0\n\n\nclass LocationFrequencyAttack(Attack):\n \"\"\"Location Frequency Attack\n\n In a location frequency attack the adversary knows the coordinates of the unique locations visited by an individual\n and the frequency with which he visited them, and matches them against frequency vectors. A frequency vector,\n is an aggregation on trajectory data showing the unique locations visited by an individual and the frequency\n with which he visited those locations. It is possible to specify a tolerance level for the matching of the frequency.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n\n tolerance : float, optional\n the tolarance with which to match the frequency. It can assume values between 0 and 1. The defaul is `0`.\n\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n tolerance : float\n the tolarance with which to match the frequency.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.LocationFrequencyAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 1.000000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.333333\n 6 7 1.000000\n\n >>> # change the tolerance with witch the frequency is matched\n >>> at.tolerance = 0.5\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 1.000000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.250000\n 5 6 0.250000\n 6 7 1.000000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 1.000000\n 2 3 0.500000\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.250000\n 6 7 1.000000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.5\n 1 2 1.0\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 1.0 43.544270 10.326150 1.0 1 1 0.333333\n 1 1.0 43.708530 10.403600 1.0 1 2 0.333333\n 2 1.0 43.779250 11.246260 1.0 1 3 0.333333\n 3 1.0 43.544270 10.326150 1.0 2 1 0.333333\n 4 1.0 43.708530 10.403600 1.0 2 2 0.333333\n 5 1.0 43.843014 10.507994 1.0 2 3 0.333333\n 6 1.0 43.544270 10.326150 1.0 3 1 0.500000\n 7 1.0 43.779250 11.246260 1.0 3 2 0.500000\n 8 1.0 43.843014 10.507994 1.0 3 3 0.500000\n 9 1.0 43.708530 10.403600 1.0 4 1 0.333333\n 10 1.0 43.779250 11.246260 1.0 4 2 0.333333\n 11 1.0 43.843014 10.507994 1.0 4 3 0.333333\n 12 2.0 43.544270 10.326150 1.0 1 1 1.000000\n 13 2.0 43.708530 10.403600 1.0 1 2 1.000000\n 14 2.0 43.843014 10.507994 2.0 1 3 1.000000\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length, tolerance=0.0):\n self.tolerance = tolerance\n super(LocationFrequencyAttack, self).__init__(knowledge_length)\n\n @property\n def tolerance(self):\n return self._tolerance\n\n @tolerance.setter\n def tolerance(self, val):\n if val > 1.0 or val < 0.0:\n raise ValueError(\"Tolerance should be in the interval [0.0,1.0]\")\n self._tolerance = val\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n freq = frequency_vector(traj)\n return self._all_risks(freq, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a frequency location attack, the coordinates of unique locations and their frequency of visit are used\n in the matching. If a frequency vector presents the same locations with the same frequency as in the instance,\n a match is found. The tolerance level specified at construction is used to construct and interval of frequency\n and allow for less precise matching.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst.rename(columns={constants.FREQUENCY: constants.FREQUENCY + \"inst\"}, inplace=True)\n locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n condition1 = locs_inst[constants.FREQUENCY + \"inst\"] >= locs_inst[constants.FREQUENCY] - (\n locs_inst[constants.FREQUENCY] * self.tolerance)\n condition2 = locs_inst[constants.FREQUENCY + \"inst\"] <= locs_inst[constants.FREQUENCY] + (\n locs_inst[constants.FREQUENCY] * self.tolerance)\n if len(locs_inst[condition1 & condition2].index) != len(inst.index):\n return 0\n else:\n return 1\n\n\nclass LocationProbabilityAttack(Attack):\n \"\"\"Location Probability Attack\n\n In a location probability attack the adversary knows the coordinates of\n the unique locations visited by an individual and the probability with which he visited them,\n and matches them against probability vectors.\n A probability vector, is an aggregation on trajectory data showing the unique locations visited by an individual\n and the probability with which he visited those locations.\n It is possible to specify a tolerance level for the matching of the probability.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n\n tolerance : float, optional\n the tolarance with which to match the probability. It can assume values between 0 and 1. The defaul is `0`.\n\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n tolerance : float\n the tolarance with which to match the probability.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.LocationProbabilityAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.5\n 1 2 1.0\n 2 3 0.5\n 3 4 1.0\n 4 5 1.0\n 5 6 1.0\n 6 7 1.0\n\n >>> # change the tolerance with witch the frequency is matched\n >>> at.tolerance = 0.5\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 0.500000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.250000\n 5 6 1.000000\n 6 7 1.000000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 1.000000\n 2 3 0.500000\n 3 4 0.333333\n 4 5 0.333333\n 5 6 1.000000\n 6 7 1.000000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.5\n 1 2 1.0\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 1.0 43.544270 10.326150 0.25 1 1 0.333333\n 1 1.0 43.708530 10.403600 0.25 1 2 0.333333\n 2 1.0 43.779250 11.246260 0.25 1 3 0.333333\n 3 1.0 43.544270 10.326150 0.25 2 1 0.333333\n 4 1.0 43.708530 10.403600 0.25 2 2 0.333333\n 5 1.0 43.843014 10.507994 0.25 2 3 0.333333\n 6 1.0 43.544270 10.326150 0.25 3 1 0.500000\n 7 1.0 43.779250 11.246260 0.25 3 2 0.500000\n 8 1.0 43.843014 10.507994 0.25 3 3 0.500000\n 9 1.0 43.708530 10.403600 0.25 4 1 0.333333\n 10 1.0 43.779250 11.246260 0.25 4 2 0.333333\n 11 1.0 43.843014 10.507994 0.25 4 3 0.333333\n 12 2.0 43.544270 10.326150 0.25 1 1 1.000000\n 13 2.0 43.708530 10.403600 0.25 1 2 1.000000\n 14 2.0 43.843014 10.507994 0.50 1 3 1.000000\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length, tolerance=0.0):\n self.tolerance = tolerance\n super(LocationProbabilityAttack, self).__init__(knowledge_length)\n\n @property\n def tolerance(self):\n return self._tolerance\n\n @tolerance.setter\n def tolerance(self, val):\n if val > 1.0 or val < 0.0:\n raise ValueError(\"Tolerance should be in the interval [0.0,1.0]\")\n self._tolerance = val\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n prob = probability_vector(traj)\n return self._all_risks(prob, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a probability location attack, the coordinates of unique locations and their probability of visit are used\n in the matching.\n If a probability vector presents the same locations with the same probability as in the instance,\n a match is found.\n The tolerance level specified at construction is used to build and interval of probability and allow\n for less precise matching.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst.rename(columns={constants.PROBABILITY: constants.PROBABILITY + \"inst\"}, inplace=True)\n locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n condition1 = locs_inst[constants.PROBABILITY + \"inst\"] >= locs_inst[constants.PROBABILITY] - (\n locs_inst[constants.PROBABILITY] * self.tolerance)\n condition2 = locs_inst[constants.PROBABILITY + \"inst\"] <= locs_inst[constants.PROBABILITY] + (\n locs_inst[constants.PROBABILITY] * self.tolerance)\n if len(locs_inst[condition1 & condition2].index) != len(inst.index):\n return 0\n else:\n return 1\n\n\nclass LocationProportionAttack(Attack):\n \"\"\"Location Proportion Attack\n\n In a location proportion attack the adversary knows the coordinates of the unique locations visited\n by an individual and the relative proportions between their frequencies of visit,\n and matches them against frequency vectors.\n A frequency vector is an aggregation on trajectory data showing the unique locations visited by an individual\n and the frequency with which he visited those locations.\n It is possible to specify a tolerance level for the matching of the proportion.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n\n tolerance : float, optional\n the tolarance with which to match the frequency. It can assume values between 0 and 1. The defaul is `0`.\n\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n tolerance : float\n the tolarance with which to match the frequency.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.LocationProportionAttack(knowledge_length=2)\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 1.000000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.333333\n 6 7 1.000000\n\n >>> # change the tolerance with witch the frequency is matched\n >>> at.tolerance = 0.5\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.333333\n 1 2 0.250000\n 2 3 0.333333\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.333333\n 6 7 0.250000\n\n >>> # change the length of the background knowledge and reassess risk\n >>> at.knowledge_length = 3\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 0.333333\n 2 3 0.500000\n 3 4 0.333333\n 4 5 0.333333\n 5 6 0.333333\n 6 7 0.250000\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.500000\n 1 2 0.333333\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 1.0 43.544270 10.326150 1.0 1 1 0.333333\n 1 1.0 43.708530 10.403600 1.0 1 2 0.333333\n 2 1.0 43.779250 11.246260 1.0 1 3 0.333333\n 3 1.0 43.544270 10.326150 1.0 2 1 0.500000\n 4 1.0 43.708530 10.403600 1.0 2 2 0.500000\n 5 1.0 43.843014 10.507994 1.0 2 3 0.500000\n 6 1.0 43.544270 10.326150 1.0 3 1 0.500000\n 7 1.0 43.779250 11.246260 1.0 3 2 0.500000\n 8 1.0 43.843014 10.507994 1.0 3 3 0.500000\n 9 1.0 43.708530 10.403600 1.0 4 1 0.333333\n 10 1.0 43.779250 11.246260 1.0 4 2 0.333333\n 11 1.0 43.843014 10.507994 1.0 4 3 0.333333\n 12 2.0 43.544270 10.326150 1.0 1 1 0.333333\n 13 2.0 43.708530 10.403600 1.0 1 2 0.333333\n 14 2.0 43.843014 10.507994 2.0 1 3 0.333333\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length, tolerance=0.0):\n self.tolerance = tolerance\n super(LocationProportionAttack, self).__init__(knowledge_length)\n\n @property\n def tolerance(self):\n return self._tolerance\n\n @tolerance.setter\n def tolerance(self, val):\n if val > 1.0 or val < 0.0:\n raise ValueError(\"Tolerance should be in the interval [0.0,1.0]\")\n self._tolerance = val\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n freq = frequency_vector(traj)\n return self._all_risks(freq, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack. For a proportion location attack,\n the coordinates of unique locations and their relative proportion of frequency of visit\n are used in the matching.\n The proportion of visit are calculated with respect to the most frequent location found in the instance.\n If a frequency vector presents the same locations with the same proportions of frequency of\n visit as in the instance, a match is found.\n The tolerance level specified at construction is used to build an interval of proportion\n and allow for less precise matching.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n inst.rename(columns={constants.FREQUENCY: constants.FREQUENCY + \"inst\"}, inplace=True)\n locs_inst = pd.merge(single_traj, inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) != len(inst.index):\n return 0\n else:\n locs_inst[constants.PROPORTION + \"inst\"] = locs_inst[constants.FREQUENCY + \"inst\"] / locs_inst[\n constants.FREQUENCY + \"inst\"].max()\n locs_inst[constants.PROPORTION] = locs_inst[constants.FREQUENCY] / locs_inst[constants.FREQUENCY].max()\n condition1 = locs_inst[constants.PROPORTION + \"inst\"] >= locs_inst[constants.PROPORTION] - (\n locs_inst[constants.PROPORTION] * self.tolerance)\n condition2 = locs_inst[constants.PROPORTION + \"inst\"] <= locs_inst[constants.PROPORTION] + (\n locs_inst[constants.PROPORTION] * self.tolerance)\n if len(locs_inst[condition1 & condition2].index) != len(inst.index):\n return 0\n else:\n return 1\n\n\nclass HomeWorkAttack(Attack):\n \"\"\"Home And Work Attack\n\n In a home and work attack the adversary knows the coordinates of\n the two locations most frequently visited by an individual, and matches them against frequency vectors.\n A frequency vector is an aggregation on trajectory data showing the unique\n locations visited by an individual and the frequency with which he visited those locations.\n This attack does not require the generation of combinations to build the possible instances of background knowledge.\n\n Parameters\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate. The length of the background knowledge\n specifies the amount of knowledge that the adversary will use for her attack. For each individual all the\n combinations of points of length k will be evaluated.\n Attributes\n ----------\n knowledge_length : int\n the length of the background knowledge that we want to simulate.\n\n Examples\n --------\n >>> import skmob\n >>> from skmob.privacy import attacks\n >>> from skmob.core.trajectorydataframe import TrajDataFrame\n >>> # load data\n >>> url_priv_ex = \"https://raw.githubusercontent.com/scikit-mobility/scikit-mobility/master/tutorial/data/privacy_toy.csv\"\n >>> trjdat = TrajDataFrame.from_file(filename=url_priv_ex)\n >>> # create a location attack and assess risk\n >>> at = attacks.HomeWorkAttack()\n >>> r = at.assess_risk(trjdat)\n >>> print(r)\n uid risk\n 0 1 0.25\n 1 2 0.25\n 2 3 0.25\n 3 4 0.25\n 4 5 1.00\n 5 6 1.00\n 6 7 1.00\n\n >>> # limit privacy assessment to some target uids\n >>> r = at.assess_risk(trjdat, targets=[1,2])\n >>> print(r)\n uid risk\n 0 1 0.25\n 1 2 0.25\n\n >>> # inspect probability of reidentification for each background knowledge instance\n >>> r = at.assess_risk(trjdat, targets=[1,2], force_instances=True)\n >>> print(r)\n lat lng datetime uid instance instance_elem prob\n 0 1.0 43.54427 10.32615 1.0 1 1 0.25\n 1 1.0 43.70853 10.40360 1.0 1 2 0.25\n 2 2.0 43.54427 10.32615 1.0 1 1 0.25\n 3 2.0 43.70853 10.40360 1.0 1 2 0.25\n\n References\n ----------\n .. [TIST2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, and Anna Monreale. 2017. A Data Mining Approach to Assess Privacy Risk in Human Mobility Data. ACM Trans. Intell. Syst. Technol. 9, 3, Article 31 (December 2017), 27 pages. DOI: https://doi.org/10.1145/3106774\n .. [MOB2018] Roberto Pellungrini, Luca Pappalardo, Francesca Pratesi, Anna Monreale: Analyzing Privacy Risk in Human Mobility Data. STAF Workshops 2018: 114-129\n \"\"\"\n\n def __init__(self, knowledge_length=1):\n super(HomeWorkAttack, self).__init__(knowledge_length)\n\n def _generate_instances(self, single_traj):\n \"\"\"\n Returns the two most frequently visited locations by an individual.\n This is an ovverride of the _generate_instances method of the Attack absttract class.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n Returns\n -------\n list\n a list with the records with the two most frequently visited locations.\n \"\"\"\n return [single_traj[:2].values]\n\n def assess_risk(self, traj, targets=None, force_instances=False, show_progress=False):\n \"\"\"\n Assess privacy risk for a TrajectoryDataFrame.\n An attack must implement an assessing strategy. This could involve some preprocessing, for example\n transforming the original data, and calls to the risk function.\n If it is not required to compute the risk for the entire data, the targets parameter can be used to select\n a portion of users to perform the assessment on.\n\n Parameters\n ----------\n traj : TrajectoryDataFrame\n the dataframe on which to assess privacy risk.\n\n targets : TrajectoryDataFrame or list, optional\n the users_id target of the attack. They must be compatible with the trajectory data. Default values is None\n in which case risk is computed on all users in traj. The defaul is `None`.\n\n force_instances : boolean, optional\n if True, returns all possible instances of background knowledge\n with their respective probability of reidentification. The defaul is `False`.\n\n show_progress : boolean, optional\n if True, shows the progress of the computation. The defaul is `False`.\n\n Returns\n -------\n DataFrame\n a DataFrame with the privacy risk for each user, in the form (user_id, risk).\n \"\"\"\n freq = frequency_vector(traj)\n return self._all_risks(freq, targets, force_instances, show_progress)\n\n def _match(self, single_traj, instance):\n \"\"\"\n Matching function for the attack.\n For a home and work attack, the coordinates of the two locations are used in the matching.\n If a frequency vector presents the same locations as in the instance, a match is found.\n\n Parameters\n ----------\n single_traj : TrajectoryDataFrame\n the dataframe of the trajectory of a single individual.\n\n instance : tuple\n an instance of background knowledge.\n\n Returns\n -------\n int\n 1 if the instance matches the trajectory, 0 otherwise.\n \"\"\"\n inst = pd.DataFrame(data=instance, columns=single_traj.columns)\n locs_inst = pd.merge(single_traj[:2], inst, left_on=[constants.LATITUDE, constants.LONGITUDE],\n right_on=[constants.LATITUDE, constants.LONGITUDE])\n if len(locs_inst.index) == len(inst.index):\n return 1\n else:\n return 0\n" ]
[ [ "pandas.DataFrame", "pandas.merge" ] ]
Flux9665/IMS-Toucan
[ "dca76d37c5ea35bcdb35f979efd533fec75ed855" ]
[ "Preprocessing/visualize_phoneme_embeddings.py" ]
[ "import json\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.markers import MarkerStyle\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\n\nfrom Preprocessing.ArticulatoryCombinedTextFrontend import ArticulatoryCombinedTextFrontend\n\n\ndef plot_embeddings(reduced_data, phoneme_list, title, save=False):\n consonants = ['w', 'b', 'ɡ', 'n', 'ʒ', 'ʃ', 'd', 'l', 'θ', 'ŋ', 'f', 'ɾ', 's', 'm', 't', 'h', 'z', 'p', 'ʔ', 'v', 'ɹ', 'j', 'ð', 'k']\n vowels = ['o', 'ɛ', 'ᵻ', 'ɔ', 'æ', 'i', 'ɐ', 'ɜ', 'ə', 'ɑ', 'e', 'ʌ', 'ɚ', 'a', 'ɪ', 'ʊ', 'u']\n special_symbols = ['?', '.', '!', '~', '#']\n uniques_v = ['y', 'ʏ', 'ø', 'œ', 'ε']\n uniques_c = ['ç', 'x']\n\n plt.clf()\n fig, ax = plt.subplots(ncols=1, nrows=1)\n fig.set_size_inches(3, 3)\n\n ax.scatter(x=[x[0] for x in reduced_data], y=[x[1] for x in reduced_data], marker=MarkerStyle())\n ax.axis('off')\n for index, phoneme in enumerate(reduced_data):\n x_position = phoneme[0]\n y_position = phoneme[1]\n label = phoneme_list[index]\n if label in special_symbols:\n color = \"gray\"\n elif label in consonants:\n color = \"blue\"\n elif label in vowels:\n color = \"darkgreen\"\n elif label in uniques_v:\n color = \"darkorange\"\n elif label in uniques_c:\n color = \"darkred\"\n else:\n continue\n ax.text(x=x_position, y=y_position, s=label, color=color)\n if not save:\n ax.title(title)\n plt.show()\n else:\n fig.subplots_adjust(top=1.0, bottom=0.0, left=0.0, right=1.0)\n fig.savefig(f\"{title}.pdf\")\n\n\nif __name__ == '__main__':\n\n key_list = list() # no matter where you get it from, this needs to be a list of the phonemes you want to visualize as string\n embedding_list = list() # in the same order as the phonemes in the list above, this list needs to be filled with their embedding vectors\n\n text2phone = ArticulatoryCombinedTextFrontend(language=\"de\", inference=True)\n\n phone_to_embedding = dict()\n for phone in text2phone.phone_to_vector:\n phone_to_embedding[phone] = text2phone.phone_to_vector[phone]\n\n for key in phone_to_embedding:\n key_list.append(key)\n embedding_list += [phone_to_embedding[key]]\n\n embeddings_as_array = np.array(embedding_list)\n\n tsne = TSNE(verbose=1, learning_rate=4, perplexity=30, n_iter=200000, n_iter_without_progress=8000, init='pca')\n pca = PCA(n_components=2)\n\n reduced_data_tsne = tsne.fit_transform(embeddings_as_array)\n reduced_data_pca = pca.fit_transform(embeddings_as_array)\n\n plot_embeddings(reduced_data_tsne, key_list, title=\"featurespace\", save=True)\n\n ##########################################################################################################################\n with open(\"embedding_table_512dim.json\", 'r', encoding=\"utf8\") as fp:\n datapoints = json.load(fp)\n\n key_list = list()\n embedding_list = list()\n\n for key in datapoints:\n key_list.append(key)\n embedding_list += [datapoints[key]]\n\n embeddings_as_array = np.array(embedding_list)\n\n tsne = TSNE(verbose=1, learning_rate=4, perplexity=30, n_iter=200000, n_iter_without_progress=8000, init='pca')\n pca = PCA(n_components=2)\n\n reduced_data_tsne = tsne.fit_transform(embeddings_as_array)\n reduced_data_pca = pca.fit_transform(embeddings_as_array)\n\n plot_embeddings(reduced_data_tsne, key_list, title=\"embeddingspace_taco\", save=True)\n\n ##########################################################################################################################\n with open(\"embedding_table_384dim.json\", 'r', encoding=\"utf8\") as fp:\n datapoints = json.load(fp)\n\n key_list = list()\n embedding_list = list()\n\n for key in datapoints:\n key_list.append(key)\n embedding_list += [datapoints[key]]\n\n embeddings_as_array = np.array(embedding_list)\n\n tsne = TSNE(verbose=1, learning_rate=4, perplexity=30, n_iter=200000, n_iter_without_progress=8000, init='pca')\n pca = PCA(n_components=2)\n\n reduced_data_tsne = tsne.fit_transform(embeddings_as_array)\n reduced_data_pca = pca.fit_transform(embeddings_as_array)\n\n plot_embeddings(reduced_data_tsne, key_list, title=\"embeddingspace_fast\", save=True)\n # plot_embeddings(reduced_data_pca, key_list, title=\"Trained Embeddings PCA\")\n" ]
[ [ "numpy.array", "sklearn.manifold.TSNE", "matplotlib.pyplot.subplots", "matplotlib.markers.MarkerStyle", "matplotlib.pyplot.show", "matplotlib.pyplot.clf", "sklearn.decomposition.PCA" ] ]
fcollman/cloud-volume
[ "906b6d338f7588b9091f99d1d0f4bca9a35e2709" ]
[ "cloudvolume/skeletonservice.py" ]
[ "from collections import defaultdict\nimport copy\nimport datetime\nimport re\nimport os\n\ntry:\n from StringIO import cStringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\n\nimport numpy as np\nimport struct\n\nfrom . import lib\nfrom .exceptions import (\n SkeletonDecodeError, SkeletonEncodeError, \n SkeletonUnassignedEdgeError\n)\nfrom .lib import red, Bbox\nfrom .txrx import cdn_cache_control\nfrom .storage import Storage, SimpleStorage\n\nclass PrecomputedSkeleton(object):\n def __init__(self, \n vertices=None, edges=None, \n radii=None, vertex_types=None, \n segid=None\n ):\n\n self.id = segid\n\n if vertices is None:\n self.vertices = np.array([[]], dtype=np.float32)\n elif type(vertices) is list:\n self.vertices = np.array(vertices, dtype=np.float32)\n else:\n self.vertices = vertices.astype(np.float32)\n\n if edges is None:\n self.edges = np.array([[]], dtype=np.uint32)\n elif type(edges) is list:\n self.edges = np.array(edges, dtype=np.uint32)\n else:\n self.edges = edges.astype(np.uint32)\n\n if radii is None:\n self.radii = -1 * np.ones(shape=self.vertices.shape[0], dtype=np.float32)\n elif type(radii) is list:\n self.radii = np.array(radii, dtype=np.float32)\n else:\n self.radii = radii\n\n if vertex_types is None:\n # 0 = undefined in SWC (http://research.mssm.edu/cnic/swc.html)\n self.vertex_types = np.zeros(shape=self.vertices.shape[0], dtype=np.uint8)\n elif type(vertex_types) is list:\n self.vertex_types = np.array(vertex_types, dtype=np.uint8)\n else:\n self.vertex_types = vertex_types.astype(np.uint8)\n\n @classmethod\n def from_path(kls, vertices):\n \"\"\"\n Given an Nx3 array of vertices that constitute a single path, \n generate a skeleton with appropriate edges.\n \"\"\"\n if vertices.shape[0] == 0:\n return PrecomputedSkeleton()\n\n skel = PrecomputedSkeleton(vertices)\n edges = np.zeros(shape=(skel.vertices.shape[0] - 1, 2), dtype=np.uint32)\n edges[:,0] = np.arange(skel.vertices.shape[0] - 1)\n edges[:,1] = np.arange(1, skel.vertices.shape[0])\n skel.edges = edges\n return skel\n\n @classmethod\n def simple_merge(kls, skeletons):\n \"\"\"\n Simple concatenation of skeletons into one object \n without adding edges between them.\n \"\"\"\n if len(skeletons) == 0:\n return PrecomputedSkeleton()\n\n if type(skeletons[0]) is np.ndarray:\n skeletons = [ skeletons ]\n\n ct = 0\n edges = []\n for skel in skeletons:\n edge = skel.edges + ct\n edges.append(edge)\n ct += skel.vertices.shape[0]\n\n return PrecomputedSkeleton(\n vertices=np.concatenate([ skel.vertices for skel in skeletons ], axis=0),\n edges=np.concatenate(edges, axis=0),\n radii=np.concatenate([ skel.radii for skel in skeletons ], axis=0),\n vertex_types=np.concatenate([ skel.vertex_types for skel in skeletons ], axis=0),\n segid=skeletons[0].id,\n )\n\n def merge(self, skel):\n \"\"\"Combine with an additional skeleton and consolidate.\"\"\"\n return PrecomputedSkeleton.simple_merge((self, skel)).consolidate()\n\n def empty(self):\n return self.vertices.size == 0 or self.edges.size == 0\n\n def encode(self):\n edges = self.edges.astype(np.uint32)\n vertices = self.vertices.astype(np.float32)\n \n result = BytesIO()\n\n # Write number of positions and edges as first two uint32s\n result.write(struct.pack('<II', vertices.size // 3, edges.size // 2))\n result.write(vertices.tobytes('C'))\n result.write(edges.tobytes('C'))\n\n def writeattr(attr, dtype, text):\n if attr is None:\n return\n\n attr = attr.astype(dtype)\n\n if attr.shape[0] != vertices.shape[0]:\n raise SkeletonEncodeError(\"Number of {} {} ({}) must match the number of vertices ({}).\".format(\n dtype, text, attr.shape[0], vertices.shape[0]\n ))\n \n result.write(attr.tobytes('C'))\n\n writeattr(self.radii, np.float32, 'Radii')\n writeattr(self.vertex_types, np.uint8, 'SWC Vertex Types')\n\n return result.getvalue()\n\n @classmethod\n def decode(kls, skelbuf, segid=None):\n \"\"\"\n Convert a buffer into a PrecomputedSkeleton object.\n\n Format:\n num vertices (Nv) (uint32)\n num edges (Ne) (uint32)\n XYZ x Nv (float32)\n edge x Ne (2x uint32)\n radii x Nv (optional, float32)\n vertex_type x Nv (optional, req radii, uint8) (SWC definition)\n\n More documentation: \n https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Skeletons-and-Point-Clouds\n \"\"\"\n if len(skelbuf) < 8:\n raise SkeletonDecodeError(\"{} bytes is fewer than needed to specify the number of verices and edges.\".format(len(skelbuf)))\n\n num_vertices, num_edges = struct.unpack('<II', skelbuf[:8])\n min_format_length = 8 + 12 * num_vertices + 8 * num_edges\n\n if len(skelbuf) < min_format_length:\n raise SkeletonDecodeError(\"The input skeleton was {} bytes but the format requires {} bytes.\".format(\n len(skelbuf), format_length\n ))\n\n vstart = 2 * 4 # two uint32s in\n vend = vstart + num_vertices * 3 * 4 # float32s\n vertbuf = skelbuf[ vstart : vend ]\n\n estart = vend\n eend = estart + num_edges * 4 * 2 # 2x uint32s\n\n edgebuf = skelbuf[ estart : eend ]\n\n vertices = np.frombuffer(vertbuf, dtype='<f4').reshape( (num_vertices, 3) )\n edges = np.frombuffer(edgebuf, dtype='<u4').reshape( (num_edges, 2) )\n\n if len(skelbuf) == min_format_length:\n return PrecomputedSkeleton(vertices, edges, segid=segid)\n\n radii_format_length = min_format_length + num_vertices * 4\n\n if len(skelbuf) < radii_format_length:\n raise SkeletonDecodeError(\"Input buffer did not have enough float32 radii to correspond to each vertex. # vertices: {}, # radii: {}\".format(\n num_vertices, (radii_format_length - min_format_length) / 4\n ))\n\n rstart = eend\n rend = rstart + num_vertices * 4 # 4 bytes np.float32\n radiibuf = skelbuf[ rstart : rend ]\n radii = np.frombuffer(radiibuf, dtype=np.float32)\n\n if len(skelbuf) == radii_format_length:\n return PrecomputedSkeleton(vertices, edges, radii, segid=segid)\n\n type_format_length = radii_format_length + num_vertices * 1 \n\n if len(skelbuf) < type_format_length:\n raise SkeletonDecodeError(\"Input buffer did not have enough uint8 SWC vertex types to correspond to each vertex. # vertices: {}, # types: {}\".format(\n num_vertices, (type_format_length - radii_format_length)\n ))\n\n tstart = rend\n tend = tstart + num_vertices\n typebuf = skelbuf[ tstart:tend ]\n vertex_types = np.frombuffer(typebuf, dtype=np.uint8)\n\n return PrecomputedSkeleton(vertices, edges, radii, vertex_types, segid=segid)\n\n @classmethod\n def equivalent(kls, first, second):\n \"\"\"\n Tests that two skeletons are the same in form not merely that\n their array contents are exactly the same. This test can be\n made more sophisticated. \n \"\"\"\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, inv1 = np.unique(first.vertices, axis=0, return_inverse=True)\n vertex2, inv2 = np.unique(second.vertices, axis=0, return_inverse=True)\n\n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n if not vertex_match:\n return False\n\n remapping = {}\n for i in range(len(inv1)):\n remapping[inv1[i]] = inv2[i]\n remap = np.vectorize(lambda idx: remapping[idx])\n\n edges1 = np.sort(np.unique(first.edges, axis=0), axis=1)\n edges1 = edges1[np.lexsort(edges1[:,::-1].T)]\n\n edges2 = remap(second.edges)\n edges2 = np.sort(np.unique(edges2, axis=0), axis=1)\n edges2 = edges2[np.lexsort(edges2[:,::-1].T)]\n edges_match = np.all(edges1 == edges2)\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n for i in range(len(first.radii)):\n i2 = second_verts[tuple(first.vertices[i])]\n\n if first.radii[i] != second.radii[i2]:\n return False\n\n if first.vertex_types[i] != second.vertex_types[i2]:\n return False\n\n return True\n\n def crop(self, bbox):\n \"\"\"\n Crop away all vertices and edges that lie outside of the given bbox.\n The edge counts as inside.\n\n Returns: new PrecomputedSkeleton\n \"\"\"\n skeleton = self.clone()\n bbox = Bbox.create(bbox)\n\n if skeleton.empty():\n return skeleton\n\n nodes_valid_mask = np.array(\n [ bbox.contains(vtx) for vtx in skeleton.vertices ], dtype=np.bool\n )\n nodes_valid_idx = np.where(nodes_valid_mask)[0]\n\n # Set invalid vertices to be duplicates\n # so they'll be removed during consolidation\n if nodes_valid_idx.shape[0] == 0:\n return PrecomputedSkeleton()\n\n first_node = nodes_valid_idx[0]\n skeleton.vertices[~nodes_valid_mask] = skeleton.vertices[first_node]\n \n edges_valid_mask = np.isin(skeleton.edges, nodes_valid_idx)\n edges_valid_idx = edges_valid_mask[:,0] * edges_valid_mask[:,1] \n skeleton.edges = skeleton.edges[edges_valid_idx,:]\n return skeleton.consolidate()\n\n def consolidate(self):\n \"\"\"\n Remove duplicate vertices and edges from this skeleton without\n side effects.\n\n Returns: new consolidated PrecomputedSkeleton \n \"\"\"\n nodes = self.vertices\n edges = self.edges \n radii = self.radii\n vertex_types = self.vertex_types\n\n if self.empty():\n return PrecomputedSkeleton()\n \n eff_nodes, uniq_idx, idx_representative = np.unique(\n nodes, axis=0, return_index=True, return_inverse=True\n )\n\n edge_vector_map = np.vectorize(lambda x: idx_representative[x])\n eff_edges = edge_vector_map(edges)\n eff_edges = np.sort(eff_edges, axis=1) # sort each edge [2,1] => [1,2]\n eff_edges = eff_edges[np.lexsort(eff_edges[:,::-1].T)] # Sort rows \n eff_edges = np.unique(eff_edges, axis=0)\n eff_edges = eff_edges[ eff_edges[:,0] != eff_edges[:,1] ] # remove trivial loops\n\n radii_vector_map = np.vectorize(lambda idx: radii[idx])\n eff_radii = radii_vector_map(uniq_idx)\n\n vertex_type_map = np.vectorize(lambda idx: vertex_types[idx])\n eff_vtype = vertex_type_map(uniq_idx) \n \n return PrecomputedSkeleton(eff_nodes, eff_edges, eff_radii, eff_vtype, segid=self.id)\n\n def clone(self):\n vertices = np.copy(self.vertices)\n edges = np.copy(self.edges)\n radii = np.copy(self.radii)\n vertex_types = np.copy(self.vertex_types)\n\n return PrecomputedSkeleton(vertices, edges, radii, vertex_types, segid=self.id)\n\n def cable_length(self):\n \"\"\"\n Returns cable length of connected skeleton vertices in the same\n metric that this volume uses (typically nanometers).\n \"\"\"\n v1 = self.vertices[self.edges[:,0]]\n v2 = self.vertices[self.edges[:,1]]\n\n delta = (v2 - v1)\n delta *= delta\n dist = np.sum(delta, axis=1)\n dist = np.sqrt(dist)\n\n return np.sum(dist)\n\n def downsample(self, factor):\n \"\"\"\n Compute a downsampled version of the skeleton by striding while \n preserving endpoints.\n\n factor: stride length for downsampling the saved skeleton paths.\n\n Returns: downsampled PrecomputedSkeleton\n \"\"\"\n if int(factor) != factor or factor < 1:\n raise ValueError(\"Argument `factor` must be a positive integer greater than or equal to 1. Got: <{}>({})\", type(factor), factor)\n\n paths = self.interjoint_paths()\n\n for i, path in enumerate(paths):\n paths[i] = np.concatenate(\n (path[0::factor, :], path[-1:, :]) # preserve endpoints\n )\n\n ds_skel = PrecomputedSkeleton.simple_merge(\n [ PrecomputedSkeleton.from_path(path) for path in paths ]\n ).consolidate()\n ds_skel.id = self.id\n\n # TODO: I'm sure this could be sped up if need be.\n index = {}\n for i, vert in enumerate(self.vertices):\n vert = tuple(vert)\n index[vert] = i\n\n for i, vert in enumerate(ds_skel.vertices):\n vert = tuple(vert)\n ds_skel.radii[i] = self.radii[index[vert]]\n ds_skel.vertex_types[i] = self.vertex_types[index[vert]]\n\n return ds_skel\n\n def _single_tree_paths(self, tree):\n \"\"\"Get all traversal paths from a single tree.\"\"\"\n skel = tree.consolidate()\n\n tree = defaultdict(list)\n\n for edge in skel.edges:\n svert = edge[0]\n evert = edge[1]\n tree[svert].append(evert)\n tree[evert].append(svert)\n\n def dfs(path, visited):\n paths = []\n stack = [ (path, visited) ]\n \n while stack:\n path, visited = stack.pop(0)\n\n vertex = path[-1]\n children = tree[vertex]\n \n visited[vertex] = True\n\n children = [ child for child in children if not visited[child] ]\n\n if len(children) == 0:\n paths.append(path)\n\n for child in children:\n stack.append( \n (path + [child], copy.deepcopy(visited))\n )\n\n return paths\n \n root = skel.edges[0,0]\n paths = dfs([root], defaultdict(bool))\n\n root = np.argmax([ len(_) for _ in paths ])\n root = paths[root][-1]\n \n paths = dfs([ root ], defaultdict(bool))\n \n return [ np.flip(skel.vertices[path], axis=0) for path in paths ] \n\n def paths(self):\n \"\"\"\n Assuming the skeleton is structured as a single tree, return a \n list of all traversal paths across all components. For each component, \n start from the first vertex, find the most distant vertex by \n hops and set that as the root. Then use depth first traversal \n to produce paths.\n\n Returns: [ [(x,y,z), (x,y,z), ...], path_2, path_3, ... ]\n \"\"\"\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree)\n return paths\n\n def _single_tree_interjoint_paths(self, skeleton):\n vertices = skeleton.vertices\n edges = skeleton.edges\n\n unique_nodes, unique_counts = np.unique(edges, return_counts=True)\n terminal_nodes = unique_nodes[ unique_counts == 1 ]\n branch_nodes = set(unique_nodes[ unique_counts >= 3 ])\n \n critical_points = set(terminal_nodes)\n critical_points.update(branch_nodes)\n\n tree = defaultdict(set)\n\n for e1, e2 in edges:\n tree[e1].add(e2)\n tree[e2].add(e1)\n\n # The below depth first search would be\n # more elegantly implemented as recursion,\n # but it quickly blows the stack, mandating\n # an iterative implementation.\n\n paths = []\n\n stack = [ terminal_nodes[0] ]\n criticals = [ terminal_nodes[0] ]\n # Saving the path stack is memory intensive\n # There might be a way to do it more linearly\n # via a DFS rather than BFS strategy.\n path_stack = [ [] ] \n \n visited = defaultdict(bool)\n\n while stack:\n node = stack.pop()\n root = criticals.pop() # \"root\" is used v. loosely here\n path = path_stack.pop()\n\n path.append(node)\n visited[node] = True\n\n if node != root and node in critical_points:\n paths.append(path)\n path = [ node ]\n root = node\n\n for child in tree[node]:\n if not visited[child]:\n stack.append(child)\n criticals.append(root)\n path_stack.append(list(path))\n\n return [ vertices[path] for path in paths ]\n\n def interjoint_paths(self):\n \"\"\"\n Returns paths between the adjacent critical points\n in the skeleton, where a critical point is the set of\n terminal and branch points.\n \"\"\"\n paths = []\n for tree in self.components():\n subpaths = self._single_tree_interjoint_paths(tree)\n paths.extend(subpaths)\n\n return paths\n\n def _compute_components(self):\n skel = self.consolidate()\n if skel.edges.size == 0:\n return skel, []\n\n index = defaultdict(set)\n visited = defaultdict(bool)\n for e1, e2 in skel.edges:\n index[e1].add(e2)\n index[e2].add(e1)\n\n def extract_component(start):\n edge_list = []\n stack = [ start ]\n parents = [ -1 ]\n\n while stack:\n node = stack.pop()\n parent = parents.pop()\n\n if node < parent:\n edge_list.append( (node, parent) )\n else:\n edge_list.append( (parent, node) )\n\n if visited[node]:\n continue\n\n visited[node] = True\n \n for child in index[node]:\n stack.append(child)\n parents.append(node)\n\n return edge_list[1:]\n\n forest = []\n for edge in np.unique(skel.edges.flatten()):\n if visited[edge]:\n continue\n\n forest.append(\n extract_component(edge)\n )\n\n return skel, forest\n \n def components(self):\n \"\"\"\n Extract connected components from graph. \n Useful for ensuring that you're working with a single tree.\n\n Returns: [ PrecomputedSkeleton, PrecomputedSkeleton, ... ]\n \"\"\"\n skel, forest = self._compute_components()\n\n if len(forest) == 0:\n return []\n elif len(forest) == 1:\n return [ skel ]\n\n orig_verts = { tuple(coord): i for i, coord in enumerate(skel.vertices) } \n\n skeletons = []\n for edge_list in forest:\n edge_list = np.array(edge_list, dtype=np.uint32)\n edge_list = np.unique(edge_list, axis=0)\n vert_idx = np.unique(edge_list.flatten())\n vert_list = skel.vertices[vert_idx]\n radii = skel.radii[vert_idx]\n vtypes = skel.vertex_types[vert_idx]\n\n new_verts = { orig_verts[tuple(coord)]: i for i, coord in enumerate(vert_list) }\n\n edge_vector_map = np.vectorize(lambda x: new_verts[x])\n edge_list = edge_vector_map(edge_list)\n\n skeletons.append(\n PrecomputedSkeleton(vert_list, edge_list, radii, vtypes, skel.id)\n )\n\n return skeletons\n\n @classmethod\n def from_swc(self, swcstr):\n lines = swcstr.split(\"\\n\")\n while re.match(r'[#\\s]', lines[0][0]):\n lines.pop(0)\n\n vertices = []\n edges = []\n radii = []\n vertex_types = []\n\n vertex_index = {}\n label_index = {}\n for i, line in enumerate(lines):\n (vid, vtype, x, y, z, radius, parent_id) = line.split(\" \")\n \n coord = tuple([ float(_) for _ in (x,y,z) ])\n vid = int(vid)\n parent_id = int(parent_id)\n\n vertex_index[coord] = i \n label_index[vid] = coord\n\n vertices.append(coord)\n\n if parent_id >= 0:\n edges.append( (i, vertex_index[label_index[parent_id]]) )\n\n vertex_types.append(int(vtype))\n radii.append(float(radius))\n\n return PrecomputedSkeleton(vertices, edges, radii, vertex_types)\n\n def to_swc(self):\n \"\"\"\n Prototype SWC file generator. \n\n c.f. http://research.mssm.edu/cnic/swc.html\n \"\"\"\n from . import __version__\n swc = \"\"\"# ORIGINAL_SOURCE CloudVolume {}\n# CREATURE \n# REGION\n# FIELD/LAYER\n# TYPE\n# CONTRIBUTOR {}\n# REFERENCE\n# RAW \n# EXTRAS \n# SOMA_AREA\n# SHINKAGE_CORRECTION \n# VERSION_NUMBER \n# VERSION_DATE {}\n# SCALE 1.0 1.0 1.0\n\n\"\"\".format(\n __version__, \n \", \".join([ str(_) for _ in self.vol.provenance.owners ]),\n datetime.datetime.utcnow().isoformat()\n )\n\n skel = self.clone()\n\n def parent(i):\n coords = np.where( skel.edges == i )\n edge = skel.edges[ coords[0][0] ]\n if edge[0] == i:\n return edge[1] + 1\n return edge[0] + 1\n\n for i in range(skel.vertices.shape[0]):\n line = \"{n} {T} {x} {y} {z} {R} {P}\".format(\n n=i+1,\n T=skel.vertex_types[i],\n x=skel.vertices[i][0],\n y=skel.vertices[i][1],\n z=skel.vertices[i][2],\n R=skel.radii[i],\n P=-1 if i == 0 else parent(i),\n )\n\n swc += line + '\\n'\n\n return swc\n\n def __eq__(self, other):\n if self.id != other.id:\n return False\n elif self.vertices.shape[0] != other.vertices.shape[0]:\n return False\n elif self.edges.shape[0] != other.edges.shape[0]:\n return False\n\n return (np.all(self.vertices == other.vertices, axis=0) \\\n and np.any(self.edges == other.edges, axis=0) \\\n and np.any(self.radii == other.radii) \\\n and np.any(self.vertex_types == other.vertex_types))\n\n def __str__(self):\n return \"PrecomputedSkeleton(segid={}, vertices=(shape={}, {}), edges=(shape={}, {}), radii=(shape={}, {}), vertex_types=(shape={}, {}))\".format(\n self.id,\n self.vertices.shape[0], self.vertices.dtype,\n self.edges.shape[0], self.edges.dtype,\n self.radii.shape[0], self.radii.dtype,\n self.vertex_types.shape[0], self.vertex_types.dtype\n )\n\n def __repr__(self):\n return str(self)\n\nclass PrecomputedSkeletonService(object):\n def __init__(self, vol):\n self.vol = vol\n\n @property\n def path(self):\n path = 'skeletons'\n if 'skeletons' in self.vol.info:\n path = self.vol.info['skeletons']\n return path\n\n def get(self, segids):\n \"\"\"\n Retrieve one or more skeletons from the data layer.\n\n Example: \n skel = vol.skeleton.get(5)\n skels = vol.skeleton.get([1, 2, 3])\n\n Raises SkeletonDecodeError on missing files or decoding errors.\n\n Required:\n segids: list of integers or integer\n\n Returns: \n if segids is a list, returns list of PrecomputedSkeletons\n else returns a single PrecomputedSkeleton\n \"\"\"\n list_return = True\n if type(segids) in (int, float):\n list_return = False\n segids = [ int(segids) ]\n\n paths = [ os.path.join(self.path, str(segid)) for segid in segids ]\n\n StorageClass = Storage if len(segids) > 1 else SimpleStorage\n\n with StorageClass(self.vol.layer_cloudpath, progress=self.vol.progress) as stor:\n results = stor.get_files(paths)\n\n for res in results:\n if res['error'] is not None:\n raise res['error']\n\n missing = [ res['filename'] for res in results if res['content'] is None ]\n\n if len(missing):\n raise SkeletonDecodeError(\"File(s) do not exist: {}\".format(\", \".join(missing)))\n\n skeletons = []\n for res in results:\n segid = int(os.path.basename(res['filename']))\n try:\n skel = PrecomputedSkeleton.decode(\n res['content'], segid=segid\n )\n except Exception as err:\n raise SkeletonDecodeError(\"segid \" + str(segid) + \": \" + err.message)\n skeletons.append(skel)\n\n if list_return:\n return skeletons\n\n return skeletons[0]\n\n def upload_raw(self, segid, vertices, edges, radii=None, vertex_types=None):\n skel = PrecomputedSkeleton(\n vertices, edges, radii, \n vertex_types, segid=segid\n )\n return self.upload(skel)\n \n def upload(self, skeletons):\n if type(skeletons) == PrecomputedSkeleton:\n skeletons = [ skeletons ]\n\n StorageClass = Storage if len(skeletons) > 1 else SimpleStorage\n\n with StorageClass(self.vol.layer_cloudpath, progress=self.vol.progress) as stor:\n for skel in skeletons:\n path = os.path.join(self.path, str(skel.id))\n stor.put_file(\n file_path='{}/{}'.format(self.path, str(skel.id)),\n content=skel.encode(),\n compress='gzip',\n cache_control=cdn_cache_control(self.vol.cdn_cache),\n )\n " ]
[ [ "numpy.copy", "numpy.where", "numpy.sort", "numpy.unique", "numpy.frombuffer", "numpy.concatenate", "numpy.vectorize", "numpy.arange", "numpy.sqrt", "numpy.array", "numpy.zeros", "numpy.lexsort", "numpy.sum", "numpy.ones", "numpy.any", "numpy.abs", "numpy.all", "numpy.flip", "numpy.isin" ] ]
bioembeddings/PLUS
[ "349cf3b9c69c20c0aa59aa404c40ba79fe62c1ce" ]
[ "plus_embedding.py" ]
[ "# Written by Seonwoo Min, Seoul National University ([email protected])\n# PLUS\n\nimport os\nimport sys\nimport argparse\n\nimport torch\n\nimport plus.config as config\nfrom plus.data.alphabets import Protein\nimport plus.data.dataset as dataset\nimport plus.model.plus_rnn as plus_rnn\nimport plus.model.plus_tfm as plus_tfm\nimport plus.model.p_elmo as p_elmo\nfrom plus.data.fasta import load_fasta\nfrom plus.train import Trainer\nfrom plus.utils import Print, set_seeds, set_output, load_models\n\n\nparser = argparse.ArgumentParser('Protein sequence embedding with PLUS models')\nparser.add_argument('--data-config', help='path for data configuration file')\nparser.add_argument('--model-config', help='path for model configuration file')\nparser.add_argument('--lm-model-config', help='path for lm-model configuration file (for P-ELMo)')\nparser.add_argument('--run-config', help='path for run configuration file')\nparser.add_argument('--pretrained-model', help='path for pretrained model file')\nparser.add_argument('--pretrained-lm-model', help='path for pretrained lm-model file (for P-ELMo)')\nparser.add_argument('--device', help='device to use; multi-GPU if given multiple GPUs sperated by comma (default: cpu)')\nparser.add_argument('--output-path', help='path for outputs (default: stdout and without saving)')\nparser.add_argument('--output-index', help='index for outputs')\nparser.add_argument('--sanity-check', default=False, action='store_true', help='sanity check flag')\n\n\ndef main():\n set_seeds(2020)\n args = vars(parser.parse_args())\n\n alphabet = Protein()\n cfgs = []\n data_cfg = config.DataConfig(args[\"data_config\"]); cfgs.append(data_cfg)\n if args[\"lm_model_config\"] is None:\n model_cfg = config.ModelConfig(args[\"model_config\"], input_dim=len(alphabet))\n cfgs += [model_cfg]\n else:\n lm_model_cfg = config.ModelConfig(args[\"lm_model_config\"], idx=\"lm_model_config\", input_dim=len(alphabet))\n model_cfg = config.ModelConfig(args[\"model_config\"], input_dim=len(alphabet),\n lm_dim=lm_model_cfg.num_layers * lm_model_cfg.hidden_dim * 2)\n cfgs += [model_cfg, lm_model_cfg]\n run_cfg = config.RunConfig(args[\"run_config\"], sanity_check=args[\"sanity_check\"]); cfgs.append(run_cfg)\n output, save_prefix = set_output(args, \"embedding_log\", embedding=True)\n os.environ['CUDA_VISIBLE_DEVICES'] = args[\"device\"] if args[\"device\"] is not None else \"\"\n device, data_parallel = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"), torch.cuda.device_count() > 1\n config.print_configs(args, cfgs, device, output)\n flag_rnn = (model_cfg.model_type == \"RNN\")\n flag_lm_model = (args[\"lm_model_config\"] is not None)\n\n ## load test datasets\n start = Print(\" \".join(['start loading a dataset:', data_cfg.path[\"test\"]]), output)\n test_dataset = load_fasta(data_cfg, \"test\", alphabet, sanity_check=args[\"sanity_check\"])\n test_dataset = dataset.Embedding_dataset(test_dataset, alphabet, run_cfg, flag_rnn)\n collate_fn = dataset.collate_sequences_for_embedding if flag_rnn else None\n iterator_test = torch.utils.data.DataLoader(test_dataset, run_cfg.batch_size_eval, collate_fn=collate_fn)\n end = Print(\" \".join(['loaded', str(len(test_dataset)), 'sequences']), output)\n Print(\" \".join(['elapsed time:', str(end - start)]), output, newline=True)\n\n ## initialize a model\n start = Print('start initializing a model', output)\n models_list = [] # list of lists [model, idx, flag_frz, flag_clip_grad, flag_clip_weight]\n ### model\n if not flag_rnn: model = plus_tfm.PLUS_TFM(model_cfg)\n elif not flag_lm_model: model = plus_rnn.PLUS_RNN(model_cfg)\n else: model = p_elmo.P_ELMo(model_cfg)\n models_list.append([model, \"\", True, False, False])\n ### lm_model\n if flag_lm_model:\n lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)\n models_list.append([lm_model, \"lm\", True, False, False])\n load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn)\n get_loss = plus_rnn.get_embedding if flag_rnn else plus_tfm.get_embedding\n end = Print('end initializing a model', output)\n Print(\"\".join(['elapsed time:', str(end - start)]), output, newline=True)\n\n ## setup trainer configurations\n start = Print('start setting trainer configurations', output)\n tasks_list = [[\"\", [], []]] # list of lists [idx, metrics_train, metrics_eval]\n trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)\n trainer_args = {\"data_parallel\": data_parallel}\n end = Print('end setting trainer configurations', output)\n Print(\"\".join(['elapsed time:', str(end - start)]), output, newline=True)\n\n ## evaluate a model\n start = Print('start embedding protein sequences', output)\n\n ### evaluate cls\n for b, batch in enumerate(iterator_test):\n batch = [t.to(device) if type(t) is torch.Tensor else t for t in batch]\n trainer.embed(batch, trainer_args)\n if b % 10 == 0: print('# cls {:.1%} loss={:.4f}'.format(\n b / len(iterator_test), trainer.loss_eval), end='\\r', file=sys.stderr)\n print(' ' * 150, end='\\r', file=sys.stderr)\n\n trainer.save_embeddings(save_prefix)\n trainer.reset()\n\n end = Print('end embedding protein sequences', output)\n Print(\"\".join(['elapsed time:', str(end - start)]), output, newline=True)\n output.close()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.cuda.device_count" ] ]
eric-erki/autokeras
[ "d365a04af7f41641c4b0634fc076f6dbe2364d53" ]
[ "autokeras/layers.py" ]
[ "import torch\nfrom torch import nn\nfrom keras import layers\n\nclass StubLayer:\n def __init__(self, input_node=None, output_node=None):\n self.input = input_node\n self.output = output_node\n self.weights = None\n\n def build(self, shape):\n pass\n\n def set_weights(self, weights):\n self.weights = weights\n\n def import_weights(self, torch_layer):\n pass\n\n def import_weights_keras(self, keras_layer):\n pass\n\n def export_weights(self, torch_layer):\n pass\n\n def export_weights_keras(self, keras_layer):\n pass\n\n def get_weights(self):\n return self.weights\n\n @property\n def output_shape(self):\n return self.input.shape\n\n\nclass StubWeightBiasLayer(StubLayer):\n def import_weights(self, torch_layer):\n self.set_weights((torch_layer.weight.data.cpu().numpy(), torch_layer.bias.data.cpu().numpy()))\n\n def import_weights_keras(self, keras_layer):\n self.set_weights(keras_layer.get_weights())\n\n def export_weights(self, torch_layer):\n torch_layer.weight.data = torch.Tensor(self.weights[0])\n torch_layer.bias.data = torch.Tensor(self.weights[1])\n\n def export_weights_keras(self, keras_layer):\n keras_layer.set_weights(self.weights)\n\n\nclass StubBatchNormalization(StubWeightBiasLayer):\n def __init__(self, num_features, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n self.num_features = num_features\n\n def import_weights(self, torch_layer):\n self.set_weights((torch_layer.weight.data.cpu().numpy(),\n torch_layer.bias.data.cpu().numpy(),\n torch_layer.running_mean.cpu().numpy(),\n torch_layer.running_var.cpu().numpy(),\n ))\n\n def export_weights(self, torch_layer):\n torch_layer.weight.data = torch.Tensor(self.weights[0])\n torch_layer.bias.data = torch.Tensor(self.weights[1])\n torch_layer.running_mean = torch.Tensor(self.weights[2])\n torch_layer.running_var = torch.Tensor(self.weights[3])\n\n\nclass StubDense(StubWeightBiasLayer):\n def __init__(self, input_units, units, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n self.input_units = input_units\n self.units = units\n\n @property\n def output_shape(self):\n return self.units,\n\n def import_weights_keras(self, keras_layer):\n self.set_weights((keras_layer.get_weights()[0].T, keras_layer.get_weights()[1]))\n\n def export_weights_keras(self, keras_layer):\n keras_layer.set_weights((self.weights[0].T, self.weights[1]))\n\n\nclass StubConv(StubWeightBiasLayer):\n def __init__(self, input_channel, filters, kernel_size, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n self.input_channel = input_channel\n self.filters = filters\n self.kernel_size = kernel_size\n\n @property\n def output_shape(self):\n ret = self.input.shape[:-1]\n ret = ret + (self.filters,)\n return ret\n\n def import_weights_keras(self, keras_layer):\n self.set_weights((keras_layer.get_weights()[0].T, keras_layer.get_weights()[1]))\n\n def export_weights_keras(self, keras_layer):\n keras_layer.set_weights((self.weights[0].T, self.weights[1]))\n\n\nclass StubAggregateLayer(StubLayer):\n def __init__(self, input_nodes=None, output_node=None):\n if input_nodes is None:\n input_nodes = []\n super().__init__(input_nodes, output_node)\n\n\nclass StubConcatenate(StubAggregateLayer):\n @property\n def output_shape(self):\n ret = 0\n for current_input in self.input:\n ret += current_input.shape[-1]\n ret = self.input[0].shape[:-1] + (ret,)\n return ret\n\n\nclass StubAdd(StubAggregateLayer):\n @property\n def output_shape(self):\n return self.input[0].shape\n\n\nclass StubFlatten(StubLayer):\n @property\n def output_shape(self):\n ret = 1\n for dim in self.input.shape:\n ret *= dim\n return ret,\n\n\nclass StubReLU(StubLayer):\n pass\n\n\nclass StubSoftmax(StubLayer):\n pass\n\n\nclass StubPooling(StubLayer):\n def __init__(self, kernel_size=2, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n self.kernel_size = kernel_size\n\n @property\n def output_shape(self):\n ret = tuple()\n for dim in self.input.shape[:-1]:\n ret = ret + (max(int(dim / self.kernel_size), 1),)\n ret = ret + (self.input.shape[-1],)\n return ret\n\n\nclass StubGlobalPooling(StubLayer):\n def __init__(self, func, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n self.func = func\n\n\nclass StubDropout(StubLayer):\n def __init__(self, rate, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n self.rate = rate\n\n\nclass StubInput(StubLayer):\n def __init__(self, input_node=None, output_node=None):\n super().__init__(input_node, output_node)\n\n\ndef is_layer(layer, layer_type):\n if layer_type == 'Input':\n return isinstance(layer, StubInput)\n if layer_type == 'Conv':\n return isinstance(layer, StubConv)\n if layer_type == 'Dense':\n return isinstance(layer, (StubDense,))\n if layer_type == 'BatchNormalization':\n return isinstance(layer, (StubBatchNormalization,))\n if layer_type == 'Concatenate':\n return isinstance(layer, (StubConcatenate,))\n if layer_type == 'Add':\n return isinstance(layer, (StubAdd,))\n if layer_type == 'Pooling':\n return isinstance(layer, StubPooling)\n if layer_type == 'Dropout':\n return isinstance(layer, (StubDropout,))\n if layer_type == 'Softmax':\n return isinstance(layer, (StubSoftmax,))\n if layer_type == 'ReLU':\n return isinstance(layer, (StubReLU,))\n if layer_type == 'Flatten':\n return isinstance(layer, (StubFlatten,))\n if layer_type == 'GlobalAveragePooling':\n return isinstance(layer, StubGlobalPooling)\n\n\ndef layer_width(layer):\n if is_layer(layer, 'Dense'):\n return layer.units\n if is_layer(layer, 'Conv'):\n return layer.filters\n raise TypeError('The layer should be either Dense or Conv layer.')\n\n\nclass TorchConcatenate(nn.Module):\n def forward(self, input_list):\n return torch.cat(input_list, dim=1)\n\n\nclass TorchAdd(nn.Module):\n def forward(self, input_list):\n return input_list[0] + input_list[1]\n\n\nclass TorchFlatten(nn.Module):\n def forward(self, input_tensor):\n return input_tensor.view(input_tensor.size(0), -1)\n\n\ndef KerasDropout(layer, rate):\n input_dim = len(layer.input.shape)\n if input_dim == 2:\n return layers.SpatialDropout1D(rate)\n elif input_dim == 3:\n return layers.SpatialDropout2D(rate)\n elif input_dim == 4:\n return layers.SpatialDropout3D(rate)\n else:\n return layers.Dropout(rate)\n\n\ndef to_real_layer(layer):\n if is_layer(layer, 'Dense'):\n return torch.nn.Linear(layer.input_units, layer.units)\n if is_layer(layer, 'Conv'):\n return torch.nn.Conv2d(layer.input_channel,\n layer.filters,\n layer.kernel_size,\n padding=int(layer.kernel_size / 2))\n if is_layer(layer, 'Pooling'):\n return torch.nn.MaxPool2d(2)\n if is_layer(layer, 'BatchNormalization'):\n return torch.nn.BatchNorm2d(layer.num_features)\n if is_layer(layer, 'Concatenate'):\n return TorchConcatenate()\n if is_layer(layer, 'Add'):\n return TorchAdd()\n if is_layer(layer, 'Dropout'):\n return torch.nn.Dropout2d(layer.rate)\n if is_layer(layer, 'ReLU'):\n return torch.nn.ReLU()\n if is_layer(layer, 'Softmax'):\n return torch.nn.LogSoftmax(dim=1)\n if is_layer(layer, 'Flatten'):\n return TorchFlatten()\n\n\ndef to_real_keras_layer(layer):\n if is_layer(layer, 'Dense'):\n return layers.Dense(layer.units, input_shape=(layer.input_units, ))\n if is_layer(layer, 'Conv'):\n return layers.Conv2D(layer.filters,\n layer.kernel_size,\n input_shape=layer.input.shape,\n padding='same') # padding\n if is_layer(layer, 'Pooling'):\n return layers.MaxPool2D(2)\n if is_layer(layer, 'BatchNormalization'):\n return layers.BatchNormalization(input_shape=layer.input.shape)\n if is_layer(layer, 'Concatenate'):\n return layers.Concatenate()\n if is_layer(layer, 'Add'):\n return layers.Add()\n if is_layer(layer, 'Dropout'):\n return KerasDropout(layer, layer.rate)\n if is_layer(layer, 'ReLU'):\n return layers.Activation('relu')\n if is_layer(layer, 'Softmax'):\n return layers.Activation('softmax')\n if is_layer(layer, 'Flatten'):\n return layers.Flatten()\n\n\ndef set_torch_weight_to_stub(torch_layer, stub_layer):\n stub_layer.import_weights(torch_layer)\n\n\ndef set_keras_weight_to_stub(keras_layer, stub_layer):\n stub_layer.import_weights_keras(keras_layer)\n\n\ndef set_stub_weight_to_torch(stub_layer, torch_layer):\n stub_layer.export_weights(torch_layer)\n\n\ndef set_stub_weight_to_keras(stub_layer, keras_layer):\n stub_layer.export_weights_keras(keras_layer)" ]
[ [ "torch.nn.Linear", "torch.nn.LogSoftmax", "torch.cat", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.Dropout2d", "torch.nn.ReLU", "torch.Tensor" ] ]
jiashenC/detectron2
[ "a0d88f9b191e2dcbe987b1ccb53f280ea018c2e4" ]
[ "detectron2/engine/train_loop.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport os\nimport logging\nimport numpy as np\nimport time\nimport weakref\nimport torch\n\nimport detectron2.utils.comm as comm\nfrom detectron2.utils.events import EventStorage\n\n__all__ = [\"HookBase\", \"TrainerBase\", \"SimpleTrainer\"]\n\n\nclass HookBase:\n \"\"\"\n Base class for hooks that can be registered with :class:`TrainerBase`.\n\n Each hook can implement 4 methods. The way they are called is demonstrated\n in the following snippet:\n\n .. code-block:: python\n\n hook.before_train()\n for iter in range(start_iter, max_iter):\n hook.before_step()\n trainer.run_step()\n hook.after_step()\n hook.after_train()\n\n Notes:\n 1. In the hook method, users can access `self.trainer` to access more\n properties about the context (e.g., current iteration).\n\n 2. A hook that does something in :meth:`before_step` can often be\n implemented equivalently in :meth:`after_step`.\n If the hook takes non-trivial time, it is strongly recommended to\n implement the hook in :meth:`after_step` instead of :meth:`before_step`.\n The convention is that :meth:`before_step` should only take negligible time.\n\n Following this convention will allow hooks that do care about the difference\n between :meth:`before_step` and :meth:`after_step` (e.g., timer) to\n function properly.\n\n Attributes:\n trainer: A weak reference to the trainer object. Set by the trainer when the hook is\n registered.\n \"\"\"\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n pass\n\n def after_train(self):\n \"\"\"\n Called after the last iteration.\n \"\"\"\n pass\n\n def before_step(self):\n \"\"\"\n Called before each iteration.\n \"\"\"\n pass\n\n def after_step(self):\n \"\"\"\n Called after each iteration.\n \"\"\"\n pass\n\n\nclass TrainerBase:\n \"\"\"\n Base class for iterative trainer with hooks.\n\n The only assumption we made here is: the training runs in a loop.\n A subclass can implement what the loop is.\n We made no assumptions about the existence of dataloader, optimizer, model, etc.\n\n Attributes:\n iter(int): the current iteration.\n\n start_iter(int): The iteration to start with.\n By convention the minimum possible value is 0.\n\n max_iter(int): The iteration to end training.\n\n storage(EventStorage): An EventStorage that's opened during the course of training.\n \"\"\"\n\n def __init__(self):\n self._hooks = []\n\n def register_hooks(self, hooks):\n \"\"\"\n Register hooks to the trainer. The hooks are executed in the order\n they are registered.\n\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n \"\"\"\n hooks = [h for h in hooks if h is not None]\n for h in hooks:\n assert isinstance(h, HookBase)\n # To avoid circular reference, hooks and trainer cannot own each other.\n # This normally does not matter, but will cause memory leak if the\n # involved objects contain __del__:\n # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/\n h.trainer = weakref.proxy(self)\n self._hooks.extend(hooks)\n\n def train(self, start_iter: int, max_iter: int):\n \"\"\"\n Args:\n start_iter, max_iter (int): See docs above\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Starting training from iteration {}\".format(start_iter))\n\n self.iter = self.start_iter = start_iter\n self.max_iter = max_iter\n\n # perf profiling\n prof_type = os.getenv('DETECTRON2_PROF', None)\n\n def prof_func(): return torch.autograd.profiler.profile(use_cuda=prof_type == 'cuda')\n\n if prof_type is not None:\n prof_key = '{}_time_total'.format(prof_type if prof_type == 'cpu' else 'cuda')\n prof_logger = logging.getLogger('detectron2_prof_train_{}'.format(prof_type))\n prof_logger.setLevel(logging.INFO)\n prof_logger.addHandler(logging.FileHandler(\n './detectron2_prof_train_{}.log'.format(prof_type), 'w'))\n\n with EventStorage(start_iter) as self.storage, prof_func() as prof:\n try:\n self.before_train()\n for self.iter in range(start_iter, max_iter):\n self.before_step()\n self.run_step()\n self.after_step()\n except Exception:\n logger.exception(\"Exception during training:\")\n raise\n finally:\n self.after_train()\n\n # perf profiling logging\n if prof_type is not None:\n prof_logger.info(prof.key_averages().table(sort_by=prof_key))\n\n def before_train(self):\n for h in self._hooks:\n h.before_train()\n\n def after_train(self):\n for h in self._hooks:\n h.after_train()\n\n def before_step(self):\n for h in self._hooks:\n h.before_step()\n\n def after_step(self):\n for h in self._hooks:\n h.after_step()\n # this guarantees, that in each hook's after_step, storage.iter == trainer.iter\n self.storage.step()\n\n def run_step(self):\n raise NotImplementedError\n\n\nclass SimpleTrainer(TrainerBase):\n \"\"\"\n A simple trainer for the most common type of task:\n single-cost single-optimizer single-data-source iterative optimization.\n It assumes that every step, you:\n\n 1. Compute the loss with a data from the data_loader.\n 2. Compute the gradients with the above loss.\n 3. Update the model with the optimizer.\n\n If you want to do anything fancier than this,\n either subclass TrainerBase and implement your own `run_step`,\n or write your own training loop.\n \"\"\"\n\n def __init__(self, model, data_loader, optimizer):\n \"\"\"\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of losses.\n data_loader: an iterable. Contains data to be used to call model.\n optimizer: a torch optimizer.\n \"\"\"\n super().__init__()\n\n \"\"\"\n We set the model to training mode in the trainer.\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n \"\"\"\n model.train()\n\n self.model = model\n self.data_loader = data_loader\n self._data_loader_iter = iter(data_loader)\n self.optimizer = optimizer\n\n def run_step(self):\n \"\"\"\n Implement the standard training logic described above.\n \"\"\"\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If you want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If you want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n\n \"\"\"\n If you need to accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()\n\n def _detect_anomaly(self, losses, loss_dict):\n if not torch.isfinite(losses).all():\n raise FloatingPointError(\n \"Loss became infinite or NaN at iteration={}!\\nloss_dict = {}\".format(\n self.iter, loss_dict\n )\n )\n\n def _write_metrics(self, metrics_dict: dict):\n \"\"\"\n Args:\n metrics_dict (dict): dict of scalar metrics\n \"\"\"\n metrics_dict = {\n k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)\n for k, v in metrics_dict.items()\n }\n # gather metrics among all workers for logging\n # This assumes we do DDP-style training, which is currently the only\n # supported method in detectron2.\n all_metrics_dict = comm.gather(metrics_dict)\n\n if comm.is_main_process():\n if \"data_time\" in all_metrics_dict[0]:\n # data_time among workers can have high variance. The actual latency\n # caused by data_time is the maximum among workers.\n data_time = np.max([x.pop(\"data_time\") for x in all_metrics_dict])\n self.storage.put_scalar(\"data_time\", data_time)\n\n # average the rest metrics\n metrics_dict = {\n k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()\n }\n total_losses_reduced = sum(loss for loss in metrics_dict.values())\n\n self.storage.put_scalar(\"total_loss\", total_losses_reduced)\n if len(metrics_dict) > 1:\n self.storage.put_scalars(**metrics_dict)\n" ]
[ [ "torch.isfinite", "torch.autograd.profiler.profile", "numpy.mean" ] ]
tastiSaher/SpectralReconstruction
[ "f60c9fdc46fc00f3a2167c5425fc4e20bd77edb5" ]
[ "data.py" ]
[ "import numpy as np, h5py\nfrom torch.utils.data import Dataset\nimport torch\nimport os\nimport random\nimport cv2\nfrom SpectralImage import MSpecImage\n\n\"\"\"\nThis function defines the data_loader\n\nArgs:\n file_name: path to the utilized data\n\"\"\"\n\nclass RGB2SpectralDataset(Dataset):\n def __init__(self, folder):\n super(RGB2SpectralDataset, self).__init__()\n\n self._patchsize = 64\n self._channel = -1\n self._folder = folder\n self.dirT1Spec = os.path.join(folder, 'Train1_Spectral')\n self.dirT2Spec = os.path.join(folder, 'Train2_Spectral')\n self.dirT1RGB = []\n self.dirT1RGB = []\n self._challType = \"\"\n # In the supplied folder, check for training set 1 and 2, respectively\n self.set1 = self.ReadAllFilesInDir(self.dirT1Spec)\n self.set2 = self.ReadAllFilesInDir(self.dirT2Spec)\n\n self.patchtype = \"determ\" # random, determ, all\n self.enabDebug = 0\n self.cntFilesAvailable = 0\n self._useSecondSetOnly = 0\n ## Splits the ICVL dataset into a training, validation and test set according to the supplied percentages\n # The split is performed on an image level.\n #\n # @percTraining size of the training set in percent, i.e. 0-100\n # @percValidation size of the validation set in percent, i.e. 0-100\n # @percTest size of the test set in percent, i.e. 0-100\n def PerformDataSplit(self, percTraining, percValidation, percTest):\n\n if percTraining+percValidation+percTest != 100:\n print(\"Sum of all percentages needs to equal 100\")\n return 0\n\n # split into training and test set\n self.percTraining = percTraining/100 # relative amount of images per set to be placed inside the training set\n self.percTest = percTest/100 # relative amount of image completely ignored by the optimization\n self.percValidation = percValidation/100\n\n self.cntFiles1 = len(self.set1)\n self.cntFiles2 = len(self.set2)\n cntTraining1 = int(self.percTraining * self.cntFiles1)\n cntTraining2 = int(self.percTraining * self.cntFiles2)\n cntValidation1 = int(self.percValidation * self.cntFiles1)\n cntValidation2 = int(self.percValidation * self.cntFiles2)\n\n # extract the corresponding indices\n # note that the actual files need be explicitly loaded ...\n self.idces1Train = random.sample(range(0, self.cntFiles1), cntTraining1)\n self.idces2Train = random.sample(range(0, self.cntFiles2), cntTraining2)\n\n # split the remaining images into test set and validation set\n remIdces1 = [i for i in range(self.cntFiles1) if i not in self.idces1Train]\n remIdces2 = [i for i in range(self.cntFiles2) if i not in self.idces2Train]\n\n print(len(remIdces1))\n tempIdc1 = random.sample(range(0, len(remIdces1)), cntValidation1)\n tempIdc2 = random.sample(range(0, len(remIdces2)), cntValidation2)\n\n # the resulting validation set\n self.idces1Val = [remIdces1[i] for i in tempIdc1]\n self.idces2Val = [remIdces2[i] for i in tempIdc2]\n\n # the resulting test set\n self.idces1Test = [remIdces1[i] for i in range(len(remIdces1)) if i not in tempIdc1]\n self.idces2Test = [remIdces2[i] for i in range(len(remIdces2)) if i not in tempIdc2]\n\n print(\"Succesfully parsed ICVL dataset!\")\n print(\"Total amount of images: {}\".format(self.cntFiles1 + self.cntFiles2))\n print(\"Size of training set: {}\".format(cntTraining1 + cntTraining2))\n print(\"Size of validation set: {}\".format(cntValidation1 + cntValidation2))\n print(\"Size of test set: {}\".format(len(self.idces1Test) + len(self.idces2Test)))\n\n print(\"\\n-ICVL dataset 1\")\n print(\"\\tTotal amount of images: {}\".format(self.cntFiles1))\n print(\"\\tSize of training set: {}\".format(cntTraining1))\n print(\"\\tSize of validation set: {}\".format(cntValidation1))\n print(\"\\tSize of test set: {}\".format(len(self.idces1Test)))\n\n print(\"\\n-ICVL dataset 2\")\n print(\"\\tTotal amount of images: {}\".format(self.cntFiles2))\n print(\"\\tSize of training set: {}\".format(cntTraining2))\n print(\"\\tSize of validation set: {}\".format(cntValidation2))\n print(\"\\tSize of test set: {}\".format(len(self.idces2Test)))\n\n # ---- Only Take the first image for testing purposes -----\n if self.enabDebug:\n cntPerSet = 1\n self.idces1Train = self.idces1Train[0:cntPerSet]\n self.idces2Train = self.idces2Train[0:cntPerSet]\n self.idces1Val = self.idces1Val[0:cntPerSet]\n self.idces2Val = self.idces2Val[0:cntPerSet]\n # --------------------------------------------------------\n\n return 1\n\n def SetDebugModeOnOff(self, onOff):\n self.enabDebug = onOff\n\n def SetPatchSize(self, patchSize):\n self._patchsize = patchSize\n\n def GetCntImages(self):\n return self.cntFilesAvailable\n\n def GetImagePair(self, id):\n if (id < 0) or (id > self.cntFilesAvailable):\n print(\"id outside valid index range\")\n return 0\n\n return self.allRGB[id], self.mspecs[id], self.allNames[id]\n\n ## Saves the current configuration of the database, e.g. data split in form of indices, not the images themselves\n #\n # @param filename have a guess...\n def SaveConfig(self, filename):\n np.savez(filename, i1val=self.idces1Val, i2val=self.idces2Val, i1train=self.idces1Train,\n i2train=self.idces2Train, i1test=self.idces1Test, i2test=self.idces2Test, percTrain=self.percTraining,\n percVal=self.percValidation, percTest=self.percTest, files1=self.set1, files2=self.set2,\n dir1spec=self.dirT1Spec, dir2spec=self.dirT2Spec, dir1rgb=self.dirT1RGB, dir2rgb=self.dirT2RGB,\n chan=self._channel, patchsize=self._patchsize, challType=self._challType, patchType=self.patchtype)\n\n ## Load a configuration of the database, e.g. data split in form of indices, not the images themselves\n #\n # @param filename have a guess...\n def LoadConfig(self, filename):\n npzfile = np.load(filename)\n self.idces1Val = npzfile['i1val']\n self.idces2Val = npzfile['i2val']\n self.idces1Train = npzfile['i1train']\n self.idces2Train = npzfile['i2train']\n self.idces1Test = npzfile['i1test']\n self.idces2Test = npzfile['i2test']\n self.percTraining = npzfile['percTrain']\n self.percTest = npzfile['percTest']\n self.percValidation = npzfile['percVal']\n self.dirT1Spec = str(npzfile['dir1spec'])\n self.dirT2Spec = str(npzfile['dir2spec'])\n self.dirT1RGB = str(npzfile['dir1rgb'])\n self.dirT2RGB = str(npzfile['dir2rgb'])\n self.set1 = npzfile['files1']\n self.set2 = npzfile['files2']\n self._channel = npzfile['chan']\n self._patchsize = npzfile['patchsize']\n print(self._patchsize)\n\n if 'challType' in npzfile:\n self._challType = npzfile['challType']\n print (self._challType)\n else:\n print('Warning: Old version, manual specification of the challenge type is required!')\n\n def ReadAllFilesInDir(self, folder):\n allFiles = []\n for file in os.listdir(folder):\n if file.endswith(\".mat\"):\n allFiles.append(file[:-4])\n return allFiles\n\n def SetChallengeType(self, type='RealWorld'):\n if type == 'RealWorld':\n self.dirT1RGB = os.path.join(self._folder, 'Train1_RealWorld')\n self.dirT2RGB = os.path.join(self._folder, 'Train2_RealWorld')\n elif type == 'Clean':\n self.dirT1RGB = os.path.join(self._folder, 'Train1_Clean')\n self.dirT2RGB = os.path.join(self._folder, 'Train2_Clean')\n else:\n return 0\n\n self._challType = type\n return 1\n\n\n def InitializeSet(self, type='train'):\n print('Loading images into memory...')\n\n if self._challType == \"\":\n print(\"Error! Challenge type has not been specified.\")\n return 0\n\n # ---- Only Take the first image for testing purposes -----\n if self.enabDebug:\n cntPerSet = 1\n self.idces1Train = self.idces1Train[0:cntPerSet]\n self.idces2Train = self.idces2Train[0:cntPerSet]\n self.idces1Val = self.idces1Val[0:cntPerSet]\n self.idces2Val = self.idces2Val[0:cntPerSet]\n # --------------------------------------------------------\n\n idces1 = []\n idces2 = []\n if type == 'train':\n idces1 = self.idces1Train\n idces2 = self.idces2Train\n elif type == 'validation':\n idces1 = self.idces1Val\n idces2 = self.idces2Val\n elif type == 'test':\n idces1 = self.idces1Test\n idces2 = self.idces2Test\n else:\n return 0\n\n # idces1 = idces1[0:1]\n # idces2 = idces2[0:1]\n\n # _______________________________________________________________________________________________\n #\n # ... 1 load all files\n self.cntFilesAvailable = len(idces1) + len(idces2)\n self.mspecs = []\n self.allRGB = []\n self.allNames = []\n\n # load every indexed file within the first set\n if self._useSecondSetOnly == 0:\n for c, indFile in enumerate(idces1):\n curName = self.set1[indFile]\n\n # load the spectral image, i.e. the ground truth\n filename_spectral = os.path.join(self.dirT1Spec, curName + '.mat')\n print(filename_spectral)\n curSpecImg = MSpecImage()\n curSpecImg.LoadICVLSpectral(filename_spectral)\n self.mspecs.append(curSpecImg)\n self.allNames.append(curName)\n\n # load the rgb image\n if self._challType == \"Clean\":\n filename_rgb = os.path.join(self.dirT1RGB, curName + '_clean.png')\n elif self._challType == \"RealWorld\":\n filename_rgb = os.path.join(self.dirT1RGB, curName + '_camera.jpg')\n self.allRGB.append(cv2.imread(filename_rgb))\n\n print(\"first set loaded\")\n # load every indexed file within the second set\n for indFile in idces2:\n self.cntFilesAvailable = len(idces2)\n curName = self.set2[indFile]\n\n # load the spectral image, i.e. the ground truth\n filename_spectral = os.path.join(self.dirT2Spec, curName + '.mat')\n print(filename_spectral)\n curSpecImg = MSpecImage()\n curSpecImg.LoadICVLSpectral(filename_spectral)\n self.mspecs.append(curSpecImg)\n self.allNames.append(curName)\n\n # load the rgb image\n if self._challType == \"Clean\":\n filename_rgb = os.path.join(self.dirT2RGB, curName + '_clean.png')\n elif self._challType == \"RealWorld\":\n filename_rgb = os.path.join(self.dirT2RGB, curName + '_camera.jpg')\n self.allRGB.append(cv2.imread(filename_rgb))\n\n # _______________________________________________________________________________________________\n #\n # 2) Convert loaded files into patches\n cntTotalPatches = 0\n cntTotalPatchesRand = 0\n for c in range(0, self.cntFilesAvailable):\n if self.patchtype == 'all':\n cntTotalPatches += self.mspecs[c].GetCntPossiblePatchesAll(self._patchsize, self._patchsize)\n elif self.patchtype == 'determ':\n cntTotalPatches += self.mspecs[c].GetCntPossiblePatches(self._patchsize, self._patchsize)\n elif self.patchtype == 'random':\n cntTotalPatches += self.mspecs[c].GetCntPossiblePatches(self._patchsize, self._patchsize)\n cntTotalPatchesRand += self.mspecs[c].GetCntPossiblePatchesAll(self._patchsize, self._patchsize)\n\n print(\"Total amount of available patches: {}\".format(cntTotalPatches))\n self.idxMap = [-1] * cntTotalPatches, [-1] * cntTotalPatches\n lastVal = 0\n for c in range(0, self.cntFilesAvailable):\n if self.patchtype == 'all':\n cntCurPatches = self.mspecs[c].GetCntPossiblePatchesAll(self._patchsize, self._patchsize)\n elif self.patchtype == 'determ' or self.patchtype == 'random':\n cntCurPatches = self.mspecs[c].GetCntPossiblePatches(self._patchsize, self._patchsize)\n\n self.idxMap[0][lastVal:lastVal+cntCurPatches] = [c] * cntCurPatches\n self.idxMap[1][lastVal:lastVal + cntCurPatches] = np.arange(cntCurPatches).tolist()\n\n lastVal = lastVal+cntCurPatches\n\n if self.patchtype == 'random':\n print(\"Random mode active. Total amount underlying of patches: {}\".format(cntTotalPatchesRand))\n self.shuffledIdces = list(np.ndindex(len(self.idxMap[0])))\n # Shuffle the indices in-place\n np.random.shuffle(self.shuffledIdces)\n self.nextAccessed = 0\n self.cntDataAvailable = cntTotalPatches\n\n print('Done!')\n return 1\n\n def SetSingleChannelOnly(self, channel):\n self._channel = channel\n\n def __len__(self):\n return self.cntDataAvailable\n\n def __getitem__(self, item):\n\n if self.patchtype == 'random':\n indImg = self.idxMap[0][self.shuffledIdces[self.nextAccessed][0]]\n indPatch = self.idxMap[1][self.shuffledIdces[self.nextAccessed][0]]\n self.nextAccessed += 1\n\n if self.nextAccessed == len(self.shuffledIdces):\n self.nextAccessed = 0\n np.random.shuffle(self.shuffledIdces)\n else:\n # get image pair the item corresponds to\n indImg = self.idxMap[0][item]\n indPatch = self.idxMap[1][item]\n\n # get the image area corresponding to the patch id\n if self.patchtype == 'all' or self.patchtype == 'random':\n startR, startC = self.mspecs[indImg].GetPatchStartAll(indPatch, self._patchsize, self._patchsize)\n elif self.patchtype == 'determ':\n startR, startC = self.mspecs[indImg].GetPatchStart(indPatch, self._patchsize, self._patchsize)\n\n x = self.allRGB[indImg][startR:startR+self._patchsize, startC:startC+self._patchsize]\n\n midPoint = int(self._patchsize / 2)\n y = self.mspecs[indImg].data[startR:startR+self._patchsize, startC:startC+self._patchsize, :]\n\n #\n if self._channel >= 0:\n y = y[:,:,self._channel]\n y = torch.from_numpy(y)\n x = torch.from_numpy(x)\n x = x.permute(2, 0, 1) # torch expects the following ordering: (channels, height, width)\n else:\n y = torch.from_numpy(y)\n x = torch.from_numpy(x)\n y = y.permute(2, 0, 1) # torch expects the following ordering: (channels, height, width)\n x = x.permute(2, 0, 1) # torch expects the following ordering: (channels, height, width)\n\n\n\n return x, y\n\n\n # def CreatePatches(self, img, patchsize, overlap):\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass SpectralData(Dataset):\n\n def __init__(self, folder):\n super(SpectralData, self).__init__()\n\n\n self.folder = folder\n self.allFiles = []\n for file in os.listdir(folder):\n if file.endswith(\".mat\"):\n self.allFiles.append(file)\n self.cntFiles = len(self.allFiles)\n\n\n # load data\n # self.spectra = np.load('/home/staff/stiebel/Code/Python/mspec/SpectralReconstruction/test.npy')\n\n def InitializeSet(self, type='train'):\n cnter = 0\n cntFilesTrain = 1\n cntFilesValidation = 1\n cntSpectra = 1300 * 1392\n\n allSpecs = []\n\n print('Initializing set...')\n\n if type == 'train':\n # simply load the first 4 files\n for indFile in range(0, cntFilesTrain):\n filename = os.path.join(self.folder, self.allFiles[indFile])\n f = h5py.File(filename)\n curRs = np.array(f['rad'])\n curRs = curRs.reshape(31, cntSpectra)\n allSpecs.append(curRs.transpose())\n\n elif type == 'validation':\n # simply load the first 4 files\n for indFile in range(cntFilesTrain, cntFilesTrain+cntFilesValidation):\n filename = os.path.join(self.folder, self.allFiles[indFile])\n f = h5py.File(filename)\n curRs = np.array(f['rad'])\n curRs = curRs.reshape(31, cntSpectra)\n allSpecs.append(curRs.transpose())\n\n self.spectra = np.vstack(allSpecs)\n\n print('Done!')\n\n # mandatory function for number of samples\n def __len__(self):\n print(len(self.spectra))\n return len(self.spectra)\n\n # mandatory function to get a specific sample\n def __getitem__(self, item):\n curSpec = self.spectra[item, :]\n x = torch.from_numpy(curSpec)\n y = x\n\n return x, y\n\nclass FullData(Dataset):\n\n def __init__(self, file_name):\n super(FullData, self).__init__()\n\n # Assuming the file is a hdf5 file with 3 matrices: input, label and mask\n # In addition, all subjects (in this example 10) are stored as channel (4th dimension)\n self.file_name = file_name\n f_data = h5py.File(self.file_name)\n self.full_data = np.transpose(np.array(f_data['/input']))\n self.full_label = np.transpose(np.array(f_data['/label']))\n self.full_mask = np.transpose(np.array(f_data['/mask']))\n print('complete dataset loaded...')\n f_data.close()\n\n # This function returns the corresponding dataset\n def set_type(self, set_type='validation'):\n if set_type == 'train':\n print('Loading train dataset')\n x = self.full_data\n x = x[:, :, :, np.arange(8)]\n x = np.reshape(x, (x.shape[0], x.shape[1], x.shape[2]*x.shape[3]))\n self.x = torch.from_numpy(x)\n\n label = self.full_label\n label = label[:, :, :, np.arange(8)]\n label = np.reshape(label, (label.shape[0], label.shape[1], label.shape[2]*label.shape[3]))\n self.label = torch.from_numpy(label)\n\n mask = self.full_mask\n mask = mask[:, :, :, np.arange(8)]\n mask = np.reshape(mask, (mask.shape[0], mask.shape[1], mask.shape[2]*mask.shape[3]))\n self.indices = np.where(mask > 0)\n\n self.data = self.x, self.label, self.indices\n\n elif set_type == 'validation':\n print('Loading validation dataset')\n\n x = self.full_data\n x = x[:, :, :, 8]\n x = np.squeeze(x)\n self.x = torch.from_numpy(x)\n\n label = self.full_label\n label = label[:, :, :, 8]\n label = np.squeeze(label)\n self.label = torch.from_numpy(label)\n\n mask = self.full_mask\n mask = mask[:, :, :, 8]\n mask = np.squeeze(mask)\n self.indices = np.where(mask > 0)\n\n self.data = self.x, self.label, self.indices\n\n else:\n print('Loading test dataset')\n\n x = self.full_data\n x = x[:, :, :, 9]\n x = np.squeeze(x)\n self.x = torch.from_numpy(x)\n\n label = self.full_label\n label = label[:, :, :, 9]\n label = np.squeeze(label)\n self.label = torch.from_numpy(label)\n\n mask = self.full_mask\n mask = mask[:, :, :, 9]\n mask = np.squeeze(mask)\n self.indices = np.where(mask > 0)\n\n self.data = self.x, self.label, self.indices\n\n # mandatory function for number of samples\n def __len__(self):\n return len(self.data[2][0])\n\n # mandatory function to get a specific sample\n def __getitem__(self, item):\n idx = self.data[2][0][item]\n idy = self.data[2][1][item]\n idz = self.data[2][2][item]\n\n x = self.data[0][idx - 2:idx + 3, idy - 2:idy + 3, idz - 2:idz + 3]\n x = torch.unsqueeze(x, 0)\n y = self.data[1][idx - 2:idx + 3, idy - 2:idy + 3, idz - 2:idz + 3]\n y = torch.unsqueeze(y, 0)\n\n return x, y\n\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.load", "numpy.random.shuffle", "torch.unsqueeze", "torch.from_numpy", "numpy.where", "numpy.savez", "numpy.arange", "numpy.squeeze", "numpy.vstack" ] ]
lee-winchester/deep-neural-network
[ "8f7c012e864a6bf9a3257d8cd08e3b3488243b19" ]
[ "logistic regression.py" ]
[ "import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\n\nROWS = 64\nCOLS = 64\nCHANNELS = 3\n\nTRAIN_DIR = 'Train_data/'\nTEST_DIR = 'Test_data/'\n\ntrain_images = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]\ntest_images = [TEST_DIR+i for i in os.listdir(TEST_DIR)]\n\ndef read_image(file_path):\n img = cv2.imread(file_path, cv2.IMREAD_COLOR)\n return cv2.resize(img, (ROWS, COLS), interpolation=cv2.INTER_CUBIC)\n\ndef prepare_data(images):\n m = len(images)\n X = np.zeros((m, ROWS, COLS, CHANNELS), dtype=np.uint8)\n y = np.zeros((1, m))\n for i, image_file in enumerate(images):\n X[i,:] = read_image(image_file)\n if 'dog' in image_file.lower():\n y[0, i] = 1\n elif 'cat' in image_file.lower():\n y[0, i] = 0\n return X, y\n\ndef sigmoid(z):\n s = 1/(1+np.exp(-z))\n return s\n\ndef propagate(w, b, X, Y):\n m = X.shape[1]\n \n # FORWARD PROPAGATION (FROM X TO COST)\n z = np.dot(w.T, X)+b # tag 1\n A = sigmoid(z) # tag 2 \n cost = (-np.sum(Y*np.log(A)+(1-Y)*np.log(1-A)))/m # tag 5\n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n dw = (np.dot(X,(A-Y).T))/m # tag 6\n db = np.average(A-Y) # tag 7\n\n cost = np.squeeze(cost)\n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs = [] \n for i in range(num_iterations):\n # Cost and gradient calculation\n grads, cost = propagate(w, b, X, Y)\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update w and b\n w = w - learning_rate*dw\n b = b - learning_rate*db\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n # update w and b to dictionary\n params = {\"w\": w,\n \"b\": b}\n \n # update derivatives to dictionary\n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs\n\ndef predict(w, b, X): \n m = X.shape[1]\n Y_prediction = np.zeros((1, m))\n w = w.reshape(X.shape[0], 1)\n \n z = np.dot(w.T, X) + b\n A = sigmoid(z)\n \n for i in range(A.shape[1]):\n # Convert probabilities A[0,i] to actual predictions p[0,i]\n if A[0,i] > 0.5:\n Y_prediction[[0],[i]] = 1\n else: \n Y_prediction[[0],[i]] = 0\n \n return Y_prediction\n\ndef initialize_with_zeros(dim):\n w = np.zeros((dim, 1))\n b = 0\n return w, b\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n # initialize parameters with zeros\n w, b = initialize_with_zeros(X_train.shape[0])\n\n # Gradient descent\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples\n Y_prediction_test = predict(w,b,X_test)\n Y_prediction_train = predict(w,b,X_train)\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n dict = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test,\n \"Y_prediction_train\": Y_prediction_train,\n \"w\": w,\n \"b\": b,\n \"learning_rate\": learning_rate,\n \"num_iterations:\": num_iterations}\n \n return dict\n\ntrain_set_x, train_set_y = prepare_data(train_images)\ntest_set_x, test_set_y = prepare_data(test_images)\n\ntrain_set_x_flatten = train_set_x.reshape(train_set_x.shape[0], ROWS*COLS*CHANNELS).T\ntest_set_x_flatten = test_set_x.reshape(test_set_x.shape[0], -1).T\n\ntrain_set_x = train_set_x_flatten/255\ntest_set_x = test_set_x_flatten/255\n\nd = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 3000, learning_rate = 0.003, print_cost = True)\n\ntest_image = \"cat.jpg\"\nmy_image = read_image(test_image).reshape(1, ROWS*COLS*CHANNELS).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\nprint(np.squeeze(my_predicted_image))\n'''\nlearning_rates = [0.001, 0.002, 0.003, 0.005, 0.01]\nmodels = {}\nfor i in learning_rates:\n print(\"learning rate is: \",i)\n models[i] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 3000, learning_rate = i, print_cost = False)\n print(\"---------------------------------------------------------\")\nfor i in learning_rates:\n plt.plot(np.squeeze(models[i][\"costs\"]), label= str(models[i][\"learning_rate\"]))\nplt.ylabel('cost')\nplt.xlabel(\"iterations (hundreds)\")\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()\n'''\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.log", "numpy.exp", "numpy.abs", "numpy.average", "numpy.squeeze" ] ]
fenzhantw/BQ_AUTOML
[ "6f3a592a74320fab6fe85ab4f956294d5a5a422d" ]
[ "retail/recommendation-system/bqml-scann/index_builder/builder/indexer.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport scann\nimport tensorflow as tf\nimport numpy as np\nimport math\nimport pickle\n\nMETRIC = 'dot_product'\nDIMENSIONS_PER_BLOCK = 2\nANISOTROPIC_QUANTIZATION_THRESHOLD = 0.2\nNUM_NEIGHBOURS = 10\nNUM_LEAVES_TO_SEARCH = 200\nREORDER_NUM_NEIGHBOURS = 200\nTOKENS_FILE_NAME = 'tokens'\n\n\ndef load_embeddings(embedding_files_pattern):\n \n embedding_list = list()\n tokens = list()\n embed_files = tf.io.gfile.glob(embedding_files_pattern)\n print(f'{len(embed_files)} embedding files are found.')\n\n for file_idx, embed_file in enumerate(embed_files):\n print(f'Loading embeddings in file {file_idx+1} of {len(embed_files)}...')\n with tf.io.gfile.GFile(embed_file, 'r') as file_reader:\n lines = file_reader.readlines()\n for line in lines:\n parts = line.split(',')\n item_Id = parts[0]\n embedding = parts[1:]\n embedding = np.array([float(v) for v in embedding])\n normalized_embedding = embedding / np.linalg.norm(embedding)\n embedding_list.append(normalized_embedding)\n tokens.append(item_Id)\n \n print(f'{len(embedding_list)} embeddings are loaded.')\n \n return tokens, np.array(embedding_list)\n \n \ndef build_index(embeddings, num_leaves):\n \n data_size = embeddings.shape[0] \n if not num_leaves:\n num_leaves = int(math.sqrt(data_size))\n \n print('Start building the ScaNN index...')\n scann_builder = scann.scann_ops.builder(embeddings, NUM_NEIGHBOURS, METRIC)\n scann_builder = scann_builder.tree(\n num_leaves=num_leaves, \n num_leaves_to_search=NUM_LEAVES_TO_SEARCH, \n training_sample_size=data_size)\n scann_builder = scann_builder.score_ah(\n DIMENSIONS_PER_BLOCK, \n anisotropic_quantization_threshold=ANISOTROPIC_QUANTIZATION_THRESHOLD)\n scann_builder = scann_builder.reorder(REORDER_NUM_NEIGHBOURS)\n scann_index = scann_builder.build()\n print('ScaNN index is built.')\n \n return scann_index\n\n\ndef save_index(index, tokens, output_dir):\n print('Saving index as a SavedModel...')\n module = index.serialize_to_module()\n tf.saved_model.save(\n module, output_dir, signatures=None, options=None\n )\n print(f'Index is saved to {output_dir}')\n \n print(f'Saving tokens file...')\n tokens_file_path = os.path.join(output_dir, TOKENS_FILE_NAME)\n with tf.io.gfile.GFile(tokens_file_path, 'wb') as handle:\n pickle.dump(tokens, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(f'Item file is saved to {tokens_file_path}.')\n \n\ndef build(embedding_files_pattern, output_dir, num_leaves=None):\n print(\"Indexer started...\")\n tokens, embeddings = load_embeddings(embedding_files_pattern)\n index = build_index(embeddings, num_leaves)\n save_index(index, tokens, output_dir)\n print(\"Indexer finished.\")\n \n \n " ]
[ [ "numpy.array", "numpy.linalg.norm", "tensorflow.io.gfile.GFile", "tensorflow.io.gfile.glob", "tensorflow.saved_model.save" ] ]
simonbowly/lp-generators
[ "a1c13c02d2fa32ecf12a6e5134e2672e5397735c" ]
[ "lp_generators/neighbours_common.py" ]
[ "''' Elementwise modifiers to instance data. Functions here take a matrix of\ninstance data and modify in place. Implementors at the instance level should\ncopy the data. '''\n\nimport numpy as np\n\n\ndef apply_repeat(func):\n ''' Add a count argument to the decorated modifier function to allow its\n operation to be applied repeatedly. '''\n def apply_repeat_fn(arr, random_state, *args, count, **kwargs):\n for _ in range(count):\n func(arr, random_state, *args, **kwargs)\n return apply_repeat_fn\n\n\n@apply_repeat\ndef _exchange_basis(beta, random_state):\n ''' Exchange elements in a basis vector. '''\n incoming = random_state.choice(np.where(beta == 0)[0])\n outgoing = random_state.choice(np.where(beta == 1)[0])\n beta[incoming] = 1\n beta[outgoing] = 0\n\n\n@apply_repeat\ndef _scale_vector_entry(vector, random_state, mean, sigma, dist):\n ''' Scale element in a one dimensional vector. '''\n scale_index = random_state.choice(vector.shape[0])\n if dist == 'normal':\n scale_value = random_state.normal(loc=mean, scale=sigma)\n elif dist == 'lognormal':\n scale_value = random_state.lognormal(mean=mean, sigma=sigma)\n else:\n raise ValueError('Vector entry scales only with normal or lognormal')\n vector[scale_index] = scale_value * vector[scale_index]\n\n\n@apply_repeat\ndef _remove_lhs_entry(lhs, random_state):\n ''' Remove element from lhs matrix. '''\n nz_rows, nz_cols = np.where(lhs != 0)\n if len(nz_rows) == 0:\n return\n remove_index = random_state.choice(nz_rows.shape[0])\n lhs[nz_rows[remove_index], nz_cols[remove_index]] = 0\n\n\n@apply_repeat\ndef _add_lhs_entry(lhs, random_state, mean, sigma):\n ''' Add an element to lhs matrix. '''\n zero_rows, zero_cols = np.where(lhs == 0)\n if len(zero_rows) == 0:\n return\n add_index = random_state.choice(zero_rows.shape[0])\n add_value = random_state.normal(loc=mean, scale=sigma)\n lhs[zero_rows[add_index], zero_cols[add_index]] = add_value\n\n\n@apply_repeat\ndef _scale_lhs_entry(lhs, random_state, mean, sigma):\n ''' Scale an element of the constraint matrix. '''\n nz_rows, nz_cols = np.where(lhs != 0)\n if len(nz_rows) == 0:\n return lhs\n scale_index = random_state.choice(nz_rows.shape[0])\n scale_value = random_state.normal(loc=mean, scale=sigma)\n lhs[nz_rows[scale_index], nz_cols[scale_index]] = (\n scale_value * lhs[nz_rows[scale_index], nz_cols[scale_index]])\n" ]
[ [ "numpy.where" ] ]
manigalati/MnMs2
[ "76e14053604803d64a1fe37a66cc258c71742ff6" ]
[ "reconstructor.py" ]
[ "import torch.nn as nn\r\nimport torch\r\nimport os\r\nimport numpy as np\r\nfrom utils import device\r\nfrom utils import MSELoss, GDLoss\r\nfrom utils import DC, HD\r\n\r\nclass Reconstructor(nn.Module):\r\n def __init__(self, args):\r\n super().__init__()\r\n self.args = args\r\n self.init_layers()\r\n self.apply(self.weight_init)\r\n self.optimizer = torch.optim.Adam(self.parameters(), lr=args.lr, weight_decay=1e-5)\r\n\r\n def init_layers(self):\r\n self.encoder = nn.Sequential(\r\n nn.Conv2d(in_channels=self.args.in_channels, out_channels=32, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=64),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=64),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=128),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=64),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.Conv2d(\r\n in_channels=32,\r\n out_channels=self.args.latent_size,\r\n kernel_size=self.args.last_layer[0],\r\n stride=self.args.last_layer[1],\r\n padding=self.args.last_layer[2]\r\n )\r\n )\r\n\r\n self.decoder = nn.Sequential(\r\n nn.ConvTranspose2d(\r\n in_channels=self.args.latent_size,\r\n out_channels=32,\r\n kernel_size=self.args.last_layer[0],\r\n stride=self.args.last_layer[1],\r\n padding=self.args.last_layer[2]\r\n ),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=64),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=128),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=64),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=64),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=4, stride=2, padding=1),\r\n nn.BatchNorm2d(num_features=32),\r\n nn.LeakyReLU(.2),\r\n nn.Dropout(0.5),\r\n\r\n nn.ConvTranspose2d(in_channels=32, out_channels=self.args.in_channels, kernel_size=4, stride=2, padding=1),\r\n nn.Softmax(dim=1)\r\n )\r\n\r\n def weight_init(self, m):\r\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\r\n nn.init.kaiming_uniform_(m.weight)\r\n\r\n def forward(self, x):\r\n latent = self.encoder(x)\r\n reconstruction = self.decoder(latent)\r\n return reconstruction\r\n\r\n def Loss(self, prediction, target, epoch=None, validation=False):\r\n contributes = {}\r\n contributes[\"MSELoss\"] = MSELoss(prediction,target)\r\n contributes[\"GDLoss\"] = GDLoss(prediction,target)\r\n contributes[\"Total\"] = sum(contributes.values())\r\n if validation:\r\n return {k:v.item() for k,v in contributes.items()}\r\n return contributes[\"Total\"]\r\n\r\n def Metrics(self,prediction,target): \r\n metrics = {}\r\n for c, key in enumerate([\"LV_\", \"MYO_\", \"RV_\"], start=1):\r\n ref = np.copy(target)\r\n pred = np.copy(prediction)\r\n ref = np.where(ref != c, 0, 1)\r\n pred = np.where(pred != c, 0, 1) \r\n metrics[key + \"dc\"] = DC(pred, ref)\r\n metrics[key + \"hd\"] = HD(pred, ref) \r\n return metrics\r\n\r\n def training_routine(self, epochs, train_loader, val_loader, ckpt_folder):\r\n if not os.path.isdir(ckpt_folder):\r\n os.makedirs(ckpt_folder)\r\n history = []\r\n best_acc = np.inf\r\n for epoch in epochs:\r\n self.train()\r\n for batch in train_loader:\r\n batch = batch[\"gt\"].to(device)\r\n self.optimizer.zero_grad()\r\n reconstruction = self.forward(batch)\r\n loss = self.Loss(reconstruction, batch, epoch)\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n self.eval()\r\n with torch.no_grad():\r\n result = self.evaluation_routine(val_loader)\r\n if result[\"Total\"] < best_acc or epoch%10 == 0:\r\n ckpt = os.path.join(ckpt_folder, \"{:03d}.pth\".format(epoch))\r\n if result[\"Total\"] < best_acc:\r\n best_acc = result[\"Total\"]\r\n ckpt = ckpt.split(\".pth\")[0] + \"_best.pth\"\r\n torch.save({\"R\": self.state_dict(), \"R_optim\": self.optimizer.state_dict()}, ckpt)\r\n \r\n self.epoch_end(epoch, result)\r\n history.append(result[\"Total\"])\r\n return history\r\n\r\n def evaluation_routine(self, val_loader):\r\n epoch_summary={}\r\n for patient in val_loader:\r\n gt, reconstruction = [], []\r\n for batch in patient:\r\n batch = {\"gt\": batch[\"gt\"].to(device)}\r\n batch[\"reconstruction\"] = self.forward(batch[\"gt\"])\r\n gt = torch.cat([gt,batch[\"gt\"]], dim=0) if len(gt)>0 else batch[\"gt\"]\r\n reconstruction = torch.cat([reconstruction, batch[\"reconstruction\"]], dim=0) if len(reconstruction)>0 else batch[\"reconstruction\"]\r\n for k,v in self.Loss(batch[\"reconstruction\"], batch[\"gt\"], validation=True).items():\r\n if k not in epoch_summary.keys():\r\n epoch_summary[k] = []\r\n epoch_summary[k].append(v)\r\n gt = np.argmax(gt.cpu().numpy(), axis=1)\r\n gt = {\"ED\": gt[:len(gt)//2], \"ES\": gt[len(gt)//2:]}\r\n reconstruction = np.argmax(reconstruction.cpu().numpy(), axis=1)\r\n reconstruction = {\"ED\": reconstruction[:len(reconstruction)//2], \"ES\": reconstruction[len(reconstruction)//2:]}\r\n for phase in [\"ED\",\"ES\"]:\r\n for k,v in self.Metrics(reconstruction[phase],gt[phase]).items():\r\n if k not in epoch_summary.keys(): epoch_summary[k]=[]\r\n epoch_summary[k].append(v)\r\n epoch_summary = {k:np.mean(v) for k,v in epoch_summary.items()}\r\n return epoch_summary\r\n\r\n def epoch_end(self,epoch,result):\r\n print(\"\\033[1mEpoch [{}]\\033[0m\".format(epoch))\r\n header, row = \"\", \"\"\r\n for k,v in result.items():\r\n header += \"{:.6}\\t\".format(k)\r\n row += \"{:.6}\\t\".format(\"{:.4f}\".format(v))\r\n print(header);print(row)" ]
[ [ "torch.nn.Dropout", "torch.nn.init.kaiming_uniform_", "torch.cat", "torch.nn.Softmax", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d", "torch.nn.LeakyReLU", "numpy.copy", "numpy.mean", "torch.no_grad", "numpy.where", "torch.nn.Conv2d" ] ]
oliverrose1998/Attention-Confidence
[ "fda42ee155c8075e571281e85810e2f2b8e3bc6f" ]
[ "model/recurrent_encoder/lstmcell.py" ]
[ "\"\"\" `lstmcell.py` defines:\n * basic LSTM cell,\n\"\"\"\n\n\nimport numpy as np\nimport sys\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import init\n\n\nDURATION_IDX = 50\n\n\nclass LSTMCell(nn.LSTMCell):\n \"\"\" Overriding initialization and naming methods of LSTMCell. \"\"\"\n\n def reset_parameters(self):\n \"\"\" Orthogonal Initialization \"\"\"\n init.orthogonal(self.weight_ih.data)\n self.weight_hh.data.set_(torch.eye(self.hidden_size).repeat(4, 1))\n # The bias is just set to zero vectors.\n if self.bias:\n init.constant(self.bias_ih.data, val=0)\n init.constant(self.bias_hh.data, val=0)\n\n def __repr__(self):\n \"\"\" Rename \"\"\"\n string = '{name}({input_size}, {hidden_size})'\n if 'bias' in self.__dict__ and self.bias is False:\n string += ', bias={bias}'\n return string.format(name=self.__class__.__name__, **self.__dict__)\n" ]
[ [ "torch.eye", "torch.nn.init.orthogonal", "torch.nn.init.constant" ] ]
ffancheng/megaman
[ "faccaf267aad0a8b18ec8a705735fd9dd838ca1e" ]
[ "megaman/utils/tests/test_analyze_dimension_and_radius.py" ]
[ "import numpy as np\r\nfrom numpy.random import RandomState\r\nfrom scipy.spatial.distance import squareform, pdist\r\nimport megaman.utils.analyze_dimension_and_radius as adar\r\nfrom scipy.sparse import csr_matrix\r\nfrom numpy.testing import assert_array_almost_equal\r\n\r\ndef test_dim_distance_passed_vs_computed(seed=1234):\r\n rng = RandomState(seed)\r\n X = rng.randn(100, 10)\r\n dists = csr_matrix(squareform(pdist(X)))\r\n rmin = 2\r\n rmax = 10.0\r\n nradii = 10\r\n radii = 10**(np.linspace(np.log10(rmin), np.log10(rmax), nradii))\r\n\r\n results_passed = adar.neighborhood_analysis(dists, radii)\r\n avg_neighbors = results_passed['avg_neighbors'].flatten()\r\n radii = results_passed['radii'].flatten()\r\n fit_range = range(len(radii))\r\n dim_passed = adar.find_dimension_plot(avg_neighbors, radii, fit_range)\r\n results_computed, dim_computed = adar.run_analyze_dimension_and_radius(X, rmin, rmax, nradii)\r\n assert(dim_passed == dim_computed)\r\n assert_array_almost_equal(results_passed['avg_neighbors'], results_computed['avg_neighbors'])" ]
[ [ "numpy.testing.assert_array_almost_equal", "numpy.log10", "scipy.spatial.distance.pdist", "numpy.random.RandomState" ] ]
nicktimko/multiworm
[ "d694b8c2738cb3b5052ade880bca8587ada2c1e7" ]
[ "multiworm/analytics/sgolay.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSavitzky-Golay Filter from the Scipy.org Cookbook:\nhttp://wiki.scipy.org/Cookbook/SavitzkyGolay\n\"\"\"\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\nimport six\nfrom six.moves import (zip, filter, map, reduce, input, range)\n\nimport numpy as np\nfrom math import factorial\n\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\n r\"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\n The Savitzky-Golay filter removes high frequency noise from data.\n It has the advantage of preserving the original shape and\n features of the signal better than other types of filtering\n approaches, such as moving averages techniques.\n\n Parameters\n ----------\n y : array_like, shape (N,)\n the values of the time history of the signal.\n window_size : int\n the length of the window. Must be an odd integer number.\n order : int\n the order of the polynomial used in the filtering.\n Must be less then `window_size` - 1.\n deriv: int\n the order of the derivative to compute (default = 0 means only smoothing)\n\n Returns\n -------\n ys : ndarray, shape (N)\n the smoothed signal (or it's n-th derivative).\n\n Notes\n -----\n The Savitzky-Golay is a type of low-pass filter, particularly\n suited for smoothing noisy data. The main idea behind this\n approach is to make for each point a least-square fit with a\n polynomial of high order over a odd-sized window centered at\n the point.\n\n Examples\n --------\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n\n References\n ----------\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\n Cambridge University Press ISBN-13: 9780521880688\n \"\"\"\n\n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError as msg:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m[::-1], y, mode='valid')\n\ndef main():\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.concatenate", "numpy.random.normal", "numpy.int", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "numpy.exp", "numpy.linalg.pinv", "numpy.abs", "matplotlib.pyplot.show", "numpy.linspace", "numpy.convolve" ] ]
GiDiPa/LinkPredictionCo-AuthorNetworks
[ "d4b7ec6e40102a99c5d659f8cd5729a6b0e7179e" ]
[ "LinkPrediction/DatasetCelegans/orderResults.py" ]
[ "import time\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport gc\nimport networkx as nx\n\nG = nx.read_gml('GeneratedData/testCelegans.gml')\nGset_edges = set(G.edges())\n\nprint(len(Gset_edges))\nnp.set_printoptions(precision=7)\nnp.set_printoptions(suppress=True)\n\npath = 'GeneratedData/mp_lp_alg_resultsDefinitive.txt'\ntextLength = 0\nfirstLine = True\nwith open(path) as lines:\n for line in lines:\n if firstLine:\n firstLine = False\n continue\n else:\n textLength += 1\n\ncnList = np.empty((textLength,9))\n\n\nfirstLine = True\ni = 0\ncount = 0\nstart = time.time()\nwith open(path) as lines:\n for line in lines:\n if firstLine:\n firstLine = False\n continue\n else:\n #print(cnList[i])\n splitLine = line.split(',')\n if tuple((splitLine[0], splitLine[1])) not in Gset_edges:\n cnList[i] = [int(splitLine[0]),int(splitLine[1]),int(splitLine[2]),splitLine[3],int(splitLine[4]),splitLine[5],splitLine[6],splitLine[7].strip('\\n'),0.0]\n else:\n count += 1\n cnList[i] = [int(splitLine[0]),int(splitLine[1]),int(splitLine[2]),splitLine[3],int(splitLine[4]),splitLine[5],splitLine[6],splitLine[7].strip('\\n'),1.0]\n #print(cnList[i])\n i += 1\n\n#cnList.sort(key = lambda i: i[2], reverse=True)\n#cnListsorted = cnList[cnList[:, 2].argsort()[::-1]]\n#print(max,min)\nprint(count)\n\n# Create the dataframe\ndf = pd.DataFrame(cnList,columns=['Node1', \n 'Node2', 'CN', 'JaccardCoefficient', 'PrefAttachment', 'AdamicAdar', 'QCNJacCoeff', 'QCNAdamicA', 'IsTestSet'])\n\ndf['Node1'] = df['Node1'].astype(int)\ndf['Node2'] = df['Node2'].astype(int)\ndf['CN'] = df['CN'].astype(int)\ndf['PrefAttachment'] = df['PrefAttachment'].astype(int)\ndf['IsTestSet'] = df['IsTestSet'].astype(int)\n\ndf.to_pickle(\"GeneratedData/DataFrameCelegans.pkl\")\n\n#df = pd.read_pickle(\"./dummy.pkl\")" ]
[ [ "numpy.set_printoptions", "numpy.empty", "pandas.DataFrame" ] ]
PastorD/rpi-deep-pantilt
[ "4c432a4003cc76e1997fec3d1c4f55f3d01d526c" ]
[ "rpi_deep_pantilt/control/manager.py" ]
[ "import logging\nfrom multiprocessing import Value, Process, Manager\nimport time\n\nimport pantilthat as pth\nimport signal\nimport sys\nimport numpy as np\n\nfrom rpi_deep_pantilt.detect.camera import PiCameraStream\nfrom rpi_deep_pantilt.detect.ssd_mobilenet_v3_coco import SSDMobileNet_V3_Small_Coco_PostProcessed, SSDMobileNet_V3_Coco_EdgeTPU_Quant\nfrom rpi_deep_pantilt.control.pid import PIDController\n\nlogging.basicConfig()\nLOGLEVEL = logging.getLogger().getEffectiveLevel()\n\nRESOLUTION = (320, 320)\n\nSERVO_MIN = -90\nSERVO_MAX = 90\n\nCENTER = (\n RESOLUTION[0] // 2,\n RESOLUTION[1] // 2\n)\n\n# function to handle keyboard interrupt\n\n\ndef signal_handler(sig, frame):\n # print a status message\n print(\"[INFO] You pressed `ctrl + c`! Exiting...\")\n\n # disable the servos\n pth.servo_enable(1, False)\n pth.servo_enable(2, False)\n\n # exit\n sys.exit()\n\n\ndef run_detect(center_x, center_y, labels, model_cls):\n \n model = model_cls()\n\n capture_manager = PiCameraStream(resolution=RESOLUTION)\n capture_manager.start()\n capture_manager.start_overlay()\n\n label_idxs = model.label_to_category_index(labels)\n start_time = time.time()\n fps_counter = 0\n while not capture_manager.stopped:\n if capture_manager.frame is not None:\n frame = capture_manager.read()\n prediction = model.predict(frame)\n\n if not len(prediction.get('detection_boxes')):\n continue\n\n if any(item in label_idxs for item in prediction.get('detection_classes')):\n\n tracked = (\n (i, x) for i, x in\n enumerate(prediction.get('detection_classes'))\n if x in label_idxs\n )\n tracked_idxs, tracked_classes = zip(*tracked)\n\n track_target = prediction.get('detection_boxes')[\n tracked_idxs[0]]\n # [ymin, xmin, ymax, xmax]\n y = int(\n RESOLUTION[1] - ((np.take(track_target, [0, 2])).mean() * RESOLUTION[1]))\n center_y.value = y\n x = int(\n RESOLUTION[0] - ((np.take(track_target, [1, 3])).mean() * RESOLUTION[0]))\n center_x.value = x\n\n display_name = model.category_index[tracked_classes[0]]['name']\n logging.info(\n f'Tracking {display_name} center_x {x} center_y {y}')\n\n overlay = model.create_overlay(frame, prediction)\n capture_manager.overlay_buff = overlay\n if LOGLEVEL is logging.DEBUG and (time.time() - start_time) > 1:\n fps_counter += 1\n fps = fps_counter / (time.time() - start_time)\n logging.debug(f'FPS: {fps}')\n fps_counter = 0\n start_time = time.time()\n\n\ndef in_range(val, start, end):\n # determine the input vale is in the supplied range\n return (val >= start and val <= end)\n\n\ndef set_servos(pan, tilt):\n # signal trap to handle keyboard interrupt\n signal.signal(signal.SIGINT, signal_handler)\n\n while True:\n pan_angle = -1 * pan.value\n tilt_angle = tilt.value\n\n # if the pan angle is within the range, pan\n if in_range(pan_angle, SERVO_MIN, SERVO_MAX):\n pth.pan(pan_angle)\n else:\n logging.info(f'pan_angle not in range {pan_angle}')\n\n if in_range(tilt_angle, SERVO_MIN, SERVO_MAX):\n pth.tilt(tilt_angle)\n else:\n logging.info(f'tilt_angle not in range {tilt_angle}')\n\n\ndef pid_process(output, p, i, d, box_coord, origin_coord, action):\n # signal trap to handle keyboard interrupt\n signal.signal(signal.SIGINT, signal_handler)\n\n # create a PID and initialize it\n p = PIDController(p.value, i.value, d.value)\n p.reset()\n\n # loop indefinitely\n while True:\n error = origin_coord - box_coord.value\n output.value = p.update(error)\n # logging.info(f'{action} error {error} angle: {output.value}')\n\n# ('person',)\n#('orange', 'apple', 'sports ball')\n\n\ndef pantilt_process_manager(\n model_cls,\n labels=('person',)\n):\n\n pth.servo_enable(1, True)\n pth.servo_enable(2, True)\n with Manager() as manager:\n # set initial bounding box (x, y)-coordinates to center of frame\n center_x = manager.Value('i', 0)\n center_y = manager.Value('i', 0)\n\n center_x.value = RESOLUTION[0] // 2\n center_y.value = RESOLUTION[1] // 2\n\n # pan and tilt angles updated by independent PID processes\n pan = manager.Value('i', 0)\n tilt = manager.Value('i', 0)\n\n # PID gains for panning\n\n pan_p = manager.Value('f', 0.05)\n # 0 time integral gain until inferencing is faster than ~50ms\n pan_i = manager.Value('f', 0.1)\n pan_d = manager.Value('f', 0)\n\n # PID gains for tilting\n tilt_p = manager.Value('f', 0.15)\n # 0 time integral gain until inferencing is faster than ~50ms\n tilt_i = manager.Value('f', 0.2)\n tilt_d = manager.Value('f', 0)\n\n detect_processr = Process(target=run_detect,\n args=(center_x, center_y, labels, model_cls))\n\n pan_process = Process(target=pid_process,\n args=(pan, pan_p, pan_i, pan_d, center_x, CENTER[0], 'pan'))\n\n tilt_process = Process(target=pid_process,\n args=(tilt, tilt_p, tilt_i, tilt_d, center_y, CENTER[1], 'tilt'))\n\n servo_process = Process(target=set_servos, args=(pan, tilt))\n\n detect_processr.start()\n pan_process.start()\n tilt_process.start()\n servo_process.start()\n\n detect_processr.join()\n pan_process.join()\n tilt_process.join()\n servo_process.join()\n\n\nif __name__ == '__main__':\n pantilt_process_manager()\n" ]
[ [ "numpy.take" ] ]
rushabh-v/estimator
[ "6915557cef8bfc86f29f87e4467d601e4553b957" ]
[ "tensorflow_estimator/python/estimator/keras_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for training routines.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport math\nimport os\nimport tempfile\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python import keras\nfrom tensorflow.python.feature_column import dense_features\nfrom tensorflow.python.feature_column import dense_features_v2\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import optimizers as optimizer_v1\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.layers import recurrent_v2 as rnn_v2\nfrom tensorflow.python.keras.optimizer_v2 import gradient_descent as optimizer_v2\nfrom tensorflow.python.keras.utils import np_utils\nfrom tensorflow.python.ops.parsing_ops import gen_parsing_ops\nfrom tensorflow.python.saved_model import utils_impl as saved_model_utils\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow_estimator.python.estimator import keras as keras_lib\nfrom tensorflow_estimator.python.estimator import run_config as run_config_lib\nfrom tensorflow_estimator.python.estimator.export import export_lib\nfrom tensorflow_estimator.python.estimator.inputs import numpy_io\nfrom tensorflow_estimator.python.estimator.mode_keys import ModeKeys\n\ntry:\n import h5py # pylint:disable=g-import-not-at-top\nexcept ImportError:\n h5py = None\n\n_RANDOM_SEED = 1337\n_TRAIN_SIZE = 200\n_INPUT_SIZE = (10,)\n_NUM_CLASS = 2\n\n_TMP_DIR = '/tmp'\n\n\ndef simple_sequential_model():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))\n model.add(keras.layers.Dropout(0.1))\n model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))\n return model\n\n\ndef simple_functional_model(activation='relu'):\n a = keras.layers.Input(shape=_INPUT_SIZE, name='input_layer')\n b = keras.layers.Dense(16, activation=activation)(a)\n b = keras.layers.Dropout(0.1)(b)\n b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)\n model = keras.models.Model(inputs=[a], outputs=[b])\n return model\n\n\ndef simple_subclassed_model():\n\n class SimpleModel(keras.Model):\n\n def __init__(self):\n super(SimpleModel, self).__init__()\n self.dense1 = keras.layers.Dense(16, activation='relu')\n self.dp = keras.layers.Dropout(0.1)\n self.dense2 = keras.layers.Dense(_NUM_CLASS, activation='softmax')\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.dp(x)\n return self.dense2(x)\n\n return SimpleModel()\n\n\ndef gen_input_fn(x, y=None, batch_size=128, num_epochs=1, shuffle=False):\n\n def input_fn():\n ds = tf.compat.v1.data.Dataset.from_tensor_slices((\n x, y) if y is not None else x)\n if shuffle:\n ds = ds.shuffle(1000)\n return ds.repeat(num_epochs).batch(batch_size)\n\n return input_fn\n\n\ndef get_multi_inputs_multi_outputs_data():\n (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(16,),\n num_classes=3,\n random_seed=_RANDOM_SEED)\n (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(16,),\n num_classes=2,\n random_seed=_RANDOM_SEED)\n (m_train, _), (m_test, _) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(8,),\n num_classes=2,\n random_seed=_RANDOM_SEED)\n\n c_train = np_utils.to_categorical(c_train)\n c_test = np_utils.to_categorical(c_test)\n d_train = np_utils.to_categorical(d_train)\n d_test = np_utils.to_categorical(d_test)\n\n train_data = {\n 'input_a': a_train,\n 'input_b': b_train,\n 'input_m': m_train,\n 'output_c': c_train,\n 'output_d': d_train\n }\n test_data = {\n 'input_a': a_test,\n 'input_b': b_test,\n 'input_m': m_test,\n 'output_c': c_test,\n 'output_d': d_test\n }\n\n return (train_data, test_data)\n\n\ndef get_resource_for_simple_model(\n model_type='sequential',\n is_evaluate=False,\n):\n if model_type == 'sequential':\n model = simple_sequential_model()\n model.build()\n elif model_type == 'subclass':\n model = simple_subclassed_model()\n else:\n assert model_type == 'functional'\n model = simple_functional_model()\n\n if model_type == 'subclass':\n input_name = 'input_1'\n output_name = 'output_1'\n else:\n input_name = model.input_names[0]\n output_name = model.output_names[0]\n\n np.random.seed(_RANDOM_SEED)\n (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=_INPUT_SIZE,\n num_classes=_NUM_CLASS)\n y_train = np_utils.to_categorical(y_train)\n y_test = np_utils.to_categorical(y_test)\n\n train_input_fn = gen_input_fn(\n x=randomize_io_type(x_train, input_name),\n y=randomize_io_type(y_train, output_name),\n shuffle=False,\n num_epochs=None,\n batch_size=16)\n\n evaluate_input_fn = gen_input_fn(\n x=randomize_io_type(x_test, input_name),\n y=randomize_io_type(y_test, output_name),\n num_epochs=1,\n shuffle=False)\n\n predict_input_fn = gen_input_fn(\n x=randomize_io_type(x_test, input_name), num_epochs=1, shuffle=False)\n\n inference_input_fn = evaluate_input_fn if is_evaluate else predict_input_fn\n\n return model, (x_train, y_train), (x_test,\n y_test), train_input_fn, inference_input_fn\n\n\ndef randomize_io_type(array, name):\n switch = np.random.random()\n if switch > 0.5:\n return array\n else:\n return {name: array}\n\n\ndef multi_inputs_multi_outputs_model():\n input_a = keras.layers.Input(shape=(16,), name='input_a')\n input_b = keras.layers.Input(shape=(16,), name='input_b')\n input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')\n dense = keras.layers.Dense(8, name='dense_1')\n\n interm_a = dense(input_a)\n # Read m\n interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)\n interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])\n interm_b = dense(input_b)\n merged = keras.layers.concatenate([interm_s, interm_b], name='merge')\n output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)\n output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)\n model = keras.models.Model(\n inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])\n model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics={\n 'dense_2': 'categorical_accuracy',\n 'dense_3': 'categorical_accuracy'\n })\n return model\n\n\nclass MyHook(tf.compat.v1.train.SessionRunHook):\n\n def begin(self):\n _ = tf.compat.v1.get_variable('temp', [1])\n\n\nclass TestKerasEstimator(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n self._base_dir = os.path.join(self.get_temp_dir(), 'keras_estimator_test')\n tf.compat.v1.gfile.MakeDirs(self._base_dir)\n self._config = run_config_lib.RunConfig(\n tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)\n super(TestKerasEstimator, self).setUp()\n\n def tearDown(self):\n # Make sure nothing is stuck in limbo.\n tf.compat.v1.summary.FileWriterCache.clear()\n if os.path.isdir(self._base_dir):\n tf.compat.v1.gfile.DeleteRecursively(self._base_dir)\n keras.backend.clear_session()\n super(TestKerasEstimator, self).tearDown()\n\n @parameterized.named_parameters(\n dict(\n testcase_name='functional',\n model_type='functional',\n checkpoint_format='saver'),\n dict(\n testcase_name='sequential',\n model_type='sequential',\n checkpoint_format='saver'),\n dict(\n testcase_name='subclass',\n model_type='subclass',\n optimizer='tf_rmsprop',\n checkpoint_format='saver'),\n dict(\n testcase_name='functional_object_ckpt',\n model_type='functional',\n checkpoint_format='checkpoint'),\n dict(\n testcase_name='sequential_object_ckpt_w_fit',\n model_type='sequential',\n checkpoint_format='checkpoint',\n fit_before_export=True,\n optimizer='tf_rmsprop'),\n dict(\n testcase_name='functional_w_fit',\n model_type='functional',\n fit_before_export=True,\n optimizer='tf_rmsprop',\n checkpoint_format='saver'),\n dict(\n testcase_name='subclass_w_fit',\n model_type='subclass',\n fit_before_export=True,\n optimizer='tf_rmsprop',\n checkpoint_format='saver'),\n # b/109935364\n dict(\n testcase_name='hooks',\n model_type='subclass',\n hook=MyHook,\n optimizer='tf_rmsprop',\n checkpoint_format='saver'),\n dict(\n testcase_name='hooks_and_fit',\n model_type='subclass',\n hook=MyHook,\n fit_before_export=True,\n optimizer='tf_rmsprop',\n checkpoint_format='saver'),\n dict(\n testcase_name='tf_optimizer',\n model_type='subclass',\n hook=MyHook,\n optimizer='tf_rmsprop',\n fit_before_export=True,\n checkpoint_format='saver'))\n def test_train_keras_estimator(self,\n model_type,\n checkpoint_format=None,\n fit_before_export=False,\n optimizer='rmsprop',\n hook=None):\n hooks = [hook()] if hook else None\n tf_optimizer = False\n if optimizer == 'tf_rmsprop':\n tf_optimizer = True\n optimizer = tf.compat.v1.train.RMSPropOptimizer(1e-3)\n\n keras_model, (x_train, y_train), (_, _), train_input_fn, eval_input_fn = (\n get_resource_for_simple_model(model_type=model_type, is_evaluate=True))\n keras_model.compile(\n optimizer=optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n if fit_before_export:\n keras_model.fit(x_train, y_train, epochs=1)\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model,\n config=self._config,\n checkpoint_format=checkpoint_format)\n\n est_keras.train(\n input_fn=train_input_fn, steps=_TRAIN_SIZE / 16, hooks=hooks)\n before_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n est_keras.train(\n input_fn=train_input_fn, steps=_TRAIN_SIZE / 16, hooks=hooks)\n after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n self.assertLess(after_eval_results['loss'], before_eval_results['loss'])\n\n if checkpoint_format == 'object' and tf_optimizer:\n latest_checkpoint = tf.train.latest_checkpoint(est_keras.model_dir)\n keras_model.load_weights(latest_checkpoint)\n\n def test_train_with_dense_features(self):\n feature_dict = {\n 'sex': np.int64([1, 1, 1, 1, 0]),\n 'cp': np.int64([0, 3, 3, 2, 1]),\n 'slope': np.int64([3, 2, 0, 3, 1]),\n }\n label = np.int64([0, 1, 0, 0, 0])\n train_input_fn = numpy_io.numpy_input_fn(\n x=feature_dict, y=label, num_epochs=1, shuffle=False)\n feature_columns = list()\n input_features = dict()\n for feature_name, data_array in feature_dict.items():\n feature_columns.append(\n tf.feature_column.indicator_column(\n tf.feature_column.categorical_column_with_identity(\n key=feature_name,\n num_buckets=np.size(np.unique(data_array)))))\n input_features[feature_name] = keras.layers.Input(\n name=feature_name,\n shape=(np.size(np.unique(data_array)),),\n dtype=tf.dtypes.int64)\n\n x = dense_features.DenseFeatures(feature_columns)(input_features)\n x = keras.layers.Dense(16, activation='relu')(x)\n logits = keras.layers.Dense(1, activation='linear')(x)\n model = keras.Model(inputs=input_features, outputs=logits)\n\n model.compile(\n optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n estimator_model = keras_lib.model_to_estimator(keras_model=model)\n estimator_model.train(input_fn=train_input_fn, steps=5)\n\n # TODO(b/139845232): Enable after TF2 nightly's start.\n def DISABLED_test_train_with_dense_features_embedding(self):\n feature_dict = {\n 'sex': np.int64([1, 1, 1, 1, 0]),\n 'cp': np.int64([0, 3, 3, 2, 1]),\n 'slope': np.int64([3, 2, 0, 3, 1]),\n }\n label = np.int64([0, 1, 0, 0, 0])\n train_input_fn = numpy_io.numpy_input_fn(\n x=feature_dict, y=label, num_epochs=1, shuffle=False)\n feature_columns = list()\n input_features = dict()\n for feature_name, data_array in feature_dict.items():\n feature_columns.append(\n tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_identity(\n key=feature_name, num_buckets=np.size(np.unique(data_array))),\n dimension=3))\n input_features[feature_name] = keras.layers.Input(\n name=feature_name,\n shape=(np.size(np.unique(data_array)),),\n dtype=tf.dtypes.int64)\n\n df = dense_features.DenseFeatures(feature_columns)\n x = df(input_features)\n x = keras.layers.Dense(16, activation='relu')(x)\n logits = keras.layers.Dense(1, activation='linear')(x)\n model = keras.Model(inputs=input_features, outputs=logits)\n\n model.compile(\n optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n estimator_model = keras_lib.model_to_estimator(keras_model=model)\n estimator_model.train(input_fn=train_input_fn, steps=5)\n # We assert that we find the embedding_weights variables in the dependencies\n # for the DenseFeatures layer.\n dependency_names = [x.name for x in df._checkpoint_dependencies]\n self.assertNotIn('embedding_weights', dependency_names)\n self.assertIn('cp_embedding/embedding_weights', dependency_names)\n self.assertIn('sex_embedding/embedding_weights', dependency_names)\n self.assertIn('slope_embedding/embedding_weights', dependency_names)\n\n # TODO(b/139845232): Enable after TF2 nightly's start.\n def DISABLED_test_train_with_dense_features_v2(self):\n feature_dict = {\n 'sex': np.int64([1, 1, 1, 1, 0]),\n 'cp': np.int64([0, 3, 3, 2, 1]),\n 'slope': np.int64([3, 2, 0, 3, 1]),\n }\n label = np.int64([0, 1, 0, 0, 0])\n train_input_fn = numpy_io.numpy_input_fn(\n x=feature_dict, y=label, num_epochs=1, shuffle=False)\n feature_columns = list()\n input_features = dict()\n for feature_name, data_array in feature_dict.items():\n feature_columns.append(\n tf.feature_column.embedding_column(\n tf.feature_column.categorical_column_with_identity(\n key=feature_name, num_buckets=np.size(np.unique(data_array))),\n dimension=3))\n input_features[feature_name] = keras.layers.Input(\n name=feature_name,\n shape=(np.size(np.unique(data_array)),),\n dtype=tf.dtypes.int64)\n\n df = dense_features_v2.DenseFeatures(feature_columns)\n x = df(input_features)\n x = keras.layers.Dense(16, activation='relu')(x)\n logits = keras.layers.Dense(1, activation='linear')(x)\n model = keras.Model(inputs=input_features, outputs=logits)\n\n model.compile(\n optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n estimator_model = keras_lib.model_to_estimator(keras_model=model)\n estimator_model.train(input_fn=train_input_fn, steps=5)\n # We assert that we find the embedding_weights variables in the dependencies\n # for the DenseFeatures layer.\n dependency_names = [x.name for x in df._checkpoint_dependencies]\n self.assertNotIn('embedding_weights', dependency_names)\n self.assertIn('cp_embedding/embedding_weights', dependency_names)\n self.assertIn('sex_embedding/embedding_weights', dependency_names)\n self.assertIn('slope_embedding/embedding_weights', dependency_names)\n\n def test_evaluate(self):\n keras_model, (x_train, y_train), (\n x_test, y_test), _, eval_input_fn = get_resource_for_simple_model(\n model_type='functional', is_evaluate=True)\n\n metrics = [\n 'binary_accuracy', 'binary_crossentropy', 'categorical_accuracy',\n 'categorical_crossentropy', 'cosine_proximity', 'hinge',\n 'kullback_leibler_divergence', 'mean_absolute_error',\n 'mean_absolute_percentage_error', 'mean_squared_error',\n 'mean_squared_logarithmic_error', 'poisson', 'squared_hinge',\n 'top_k_categorical_accuracy'\n ]\n keras_model.compile(\n loss='categorical_crossentropy', optimizer='adam', metrics=metrics)\n keras_model.fit(x_train, y_train, epochs=1)\n keras_eval = keras_model.evaluate(x_test, y_test, batch_size=32)\n\n keras_est = keras_lib.model_to_estimator(\n keras_model=keras_model, config=self._config)\n est_eval = keras_est.evaluate(input_fn=eval_input_fn)\n\n metrics = ['loss'] + metrics\n\n # Check loss and all metrics match between keras and estimator.\n def shift(val):\n if val == 0:\n return 0\n else:\n return val / 10**int(math.log10(abs(val)))\n\n for i, metric_name in enumerate(metrics):\n self.assertAlmostEqual(\n shift(keras_eval[i]),\n shift(est_eval[metric_name]),\n places=4,\n msg='%s mismatch, keras model: %s, estimator: %s' %\n (metric_name, keras_eval[i], est_eval[metric_name]))\n\n def test_predict(self):\n # Check that predict on a pretrained model yield the same result.\n keras_model, (x_train, y_train), (\n x_test, _), _, pred_input_fn = get_resource_for_simple_model(\n model_type='sequential', is_evaluate=False)\n\n keras_model.compile(\n loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):\n train_data, test_data = get_multi_inputs_multi_outputs_data()\n\n def train_input_fn():\n input_dict = {\n 'input_a': train_data['input_a'],\n 'input_b': train_data['input_b'],\n 'input_m': train_data['input_m'].astype(np.str)\n }\n output_dict = {\n 'dense_2': train_data['output_c'],\n 'dense_3': train_data['output_d']\n }\n return input_dict, output_dict\n\n def eval_input_fn():\n input_dict = {\n 'input_a': test_data['input_a'],\n 'input_b': test_data['input_b'],\n 'input_m': test_data['input_m'].astype(np.str)\n }\n output_dict = {\n 'dense_2': test_data['output_c'],\n 'dense_3': test_data['output_d']\n }\n return input_dict, output_dict\n\n def pred_input_fn():\n input_dict = {\n 'input_a': test_data['input_a'],\n 'input_b': test_data['input_b'],\n 'input_m': test_data['input_m'].astype(np.str)\n }\n return input_dict\n\n self.do_test_multi_inputs_multi_outputs_with_input_fn(\n train_input_fn, eval_input_fn, pred_input_fn)\n\n def test_multi_inputs_multi_outputs_with_input_fn_as_list(self):\n train_data, test_data = get_multi_inputs_multi_outputs_data()\n\n def train_input_fn():\n input_list = [\n train_data['input_a'], train_data['input_b'],\n train_data['input_m'].astype(np.str)\n ]\n output_list = [train_data['output_c'], train_data['output_d']]\n return input_list, output_list\n\n def eval_input_fn():\n input_list = [\n test_data['input_a'], test_data['input_b'],\n test_data['input_m'].astype(np.str)\n ]\n output_list = [test_data['output_c'], test_data['output_d']]\n return input_list, output_list\n\n def pred_input_fn():\n input_list = [\n test_data['input_a'], test_data['input_b'],\n test_data['input_m'].astype(np.str)\n ]\n return input_list\n\n self.do_test_multi_inputs_multi_outputs_with_input_fn(\n train_input_fn, eval_input_fn, pred_input_fn)\n\n def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,\n eval_input_fn,\n pred_input_fn):\n model = multi_inputs_multi_outputs_model()\n est_keras = keras_lib.model_to_estimator(\n keras_model=model, config=self._config)\n baseline_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)\n eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n self.assertLess(eval_results['loss'], baseline_eval_results['loss'])\n est_keras.predict(input_fn=pred_input_fn)\n\n def test_init_from_file(self):\n if h5py is None:\n return # Skip test if models cannot be saved.\n\n keras_model, (x_train, y_train), (\n x_test, _), _, pred_input_fn = get_resource_for_simple_model(\n model_type='functional', is_evaluate=False)\n\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['categorical_accuracy'])\n keras_model.fit(x_train, y_train, epochs=1)\n keras_pred = [np.argmax(y) for y in keras_model.predict(x_test)]\n fname = os.path.join(self._base_dir, 'keras_model.h5')\n keras.models.save_model(keras_model, fname)\n\n keras_est = keras_lib.model_to_estimator(\n keras_model_path=fname, config=self._config)\n est_pred = [\n np.argmax(y[keras_model.output_names[0]])\n for y in keras_est.predict(input_fn=pred_input_fn)\n ]\n self.assertAllEqual(est_pred, keras_pred)\n\n def test_keras_model_init_error(self):\n with self.assertRaisesRegexp(ValueError, 'Either'):\n keras_lib.model_to_estimator()\n\n keras_model = simple_sequential_model()\n with self.assertRaisesRegexp(ValueError, 'not both'):\n keras_lib.model_to_estimator(\n keras_model=keras_model,\n keras_model_path=tempfile.mkdtemp(dir=self._base_dir))\n\n keras_model = simple_sequential_model()\n with self.assertRaisesRegexp(ValueError, 'compiled'):\n keras_lib.model_to_estimator(keras_model=keras_model)\n\n def test_invalid_ionames_error(self):\n (x_train, y_train), (_, _) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=100,\n input_shape=(10,),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n def invald_input_name_input_fn():\n input_dict = {'invalid_input_name': x_train}\n return input_dict, y_train\n\n def invald_output_name_input_fn():\n input_dict = {'input_layer': x_train}\n output_dict = {'invalid_output_name': y_train}\n return input_dict, output_dict\n\n model = simple_functional_model()\n model.compile(\n loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\n est_keras = keras_lib.model_to_estimator(\n keras_model=model, config=self._config)\n with self.assertRaisesRegexp(\n KeyError,\n 'features keys: .*invalid_input_name.*Missed keys: .*input_layer'):\n est_keras.train(input_fn=invald_input_name_input_fn, steps=100)\n\n with self.assertRaisesRegexp(\n KeyError, 'labels keys: .*invalid_output_name.*Missed keys: .*dense_1'):\n est_keras.train(input_fn=invald_output_name_input_fn, steps=100)\n\n def test_custom_objects(self):\n\n def relu6(x):\n return keras.backend.relu(x, max_value=6)\n\n keras_model = simple_functional_model(activation=relu6)\n keras_model.compile(loss='categorical_crossentropy', optimizer='adam')\n custom_objects = {'relu6': relu6}\n\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(10,),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train, 2)\n input_name = keras_model.input_names[0]\n output_name = keras_model.output_names[0]\n train_input_fn = gen_input_fn(\n x=randomize_io_type(x_train, input_name),\n y=randomize_io_type(y_train, output_name),\n shuffle=False,\n num_epochs=None,\n batch_size=16)\n with self.assertRaisesRegexp(ValueError, 'relu6'):\n est = keras_lib.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n est.train(input_fn=train_input_fn, steps=1)\n\n est = keras_lib.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir),\n custom_objects=custom_objects)\n est.train(input_fn=train_input_fn, steps=1)\n\n def test_tf_config(self):\n keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n\n tf_config = json.dumps({\n 'cluster': {\n run_config_lib.TaskType.PS: ['localhost:1234'],\n run_config_lib.TaskType.WORKER: ['localhost:1236'],\n run_config_lib.TaskType.MASTER: ['localhost:1238']\n },\n 'task': {\n 'type': run_config_lib.TaskType.MASTER,\n 'index': 0\n }\n })\n with tf.compat.v1.test.mock.patch.dict('os.environ',\n {'TF_CONFIG': tf_config}):\n keras_lib.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n\n def test_gpu_config(self):\n with tf.Graph().as_default():\n keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n\n gpu_options = config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.3)\n sess_config = config_pb2.ConfigProto(gpu_options=gpu_options)\n self._config._session_config = sess_config\n keras_lib.model_to_estimator(keras_model=keras_model, config=self._config)\n self.assertEqual(\n keras.backend.get_session(\n )._config.gpu_options.per_process_gpu_memory_fraction,\n gpu_options.per_process_gpu_memory_fraction)\n\n def test_with_empty_config(self):\n keras_model, _, _, _, _ = get_resource_for_simple_model(\n model_type='sequential', is_evaluate=True)\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model,\n model_dir=self._base_dir,\n config=run_config_lib.RunConfig())\n self.assertEqual(run_config_lib.get_default_session_config(),\n est_keras._session_config)\n self.assertEqual(est_keras._session_config,\n est_keras._config.session_config)\n self.assertEqual(self._base_dir, est_keras._config.model_dir)\n self.assertEqual(self._base_dir, est_keras._model_dir)\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, model_dir=self._base_dir, config=None)\n self.assertEqual(run_config_lib.get_default_session_config(),\n est_keras._session_config)\n self.assertEqual(est_keras._session_config,\n est_keras._config.session_config)\n self.assertEqual(self._base_dir, est_keras._config.model_dir)\n self.assertEqual(self._base_dir, est_keras._model_dir)\n\n def test_with_empty_config_and_empty_model_dir(self):\n keras_model, _, _, _, _ = get_resource_for_simple_model(\n model_type='sequential', is_evaluate=True)\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n\n with tf.compat.v1.test.mock.patch.object(\n tempfile, 'mkdtemp', return_value=_TMP_DIR):\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, config=run_config_lib.RunConfig())\n self.assertEqual(est_keras._model_dir, _TMP_DIR)\n\n def test_with_conflicting_model_dir_and_config(self):\n keras_model, _, _, _, _ = get_resource_for_simple_model(\n model_type='sequential', is_evaluate=True)\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n\n with self.assertRaisesRegexp(\n ValueError, '`model_dir` are set both in '\n 'constructor and `RunConfig`'):\n keras_lib.model_to_estimator(\n keras_model=keras_model,\n model_dir=self._base_dir,\n config=run_config_lib.RunConfig(model_dir=_TMP_DIR))\n\n def test_pretrained_weights(self):\n keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer=tf.compat.v1.train.RMSPropOptimizer(1e-3),\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n keras_model.train_on_batch(\n np.random.random((10,) + _INPUT_SIZE), np.random.random(\n (10, _NUM_CLASS)))\n weights = keras_model.get_weights()\n keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()\n keras_model.set_weights(weights)\n\n if tf.executing_eagerly():\n sgd_optimizer = optimizer_v2.SGD(lr=0.0001, momentum=0.9)\n else:\n sgd_optimizer = optimizer_v1.SGD(lr=0.0001, momentum=0.9)\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer=sgd_optimizer,\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n keras_lib.model_to_estimator(keras_model=keras_model, config=self._config)\n\n def assert_increasing_global_step(self, optimizer):\n keras_model, _, _, train_input_fn, _ = get_resource_for_simple_model(\n model_type='sequential', is_evaluate=True)\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['mse', keras.metrics.CategoricalAccuracy()])\n with self.cached_session() as sess:\n keras_model_fn = keras_lib._create_keras_model_fn(keras_model)\n global_step = tf.compat.v1.train.create_global_step()\n features, labels = train_input_fn().make_one_shot_iterator().get_next()\n spec = keras_model_fn(features, labels, mode=ModeKeys.TRAIN)\n\n sess.run(tf.compat.v1.initializers.global_variables())\n sess.run(tf.compat.v1.initializers.local_variables())\n\n self.assertEqual(global_step.eval(), 0) # Sanity check\n sess.run(spec.train_op)\n self.assertEqual(global_step.eval(), 1)\n\n @test_util.run_v1_only('training_util.create_global_step is v1 only.')\n def test_model_fn_increments_global_step_tf_optimizer(self):\n self.assert_increasing_global_step(\n tf.compat.v1.train.RMSPropOptimizer(1e-3))\n\n @test_util.run_v1_only('training_util.create_global_step is v1 only.')\n def test_model_fn_increments_global_step_keras_optimizer(self):\n self.assert_increasing_global_step('rmsprop')\n\n @parameterized.named_parameters(\n dict(testcase_name='object_ckpt', checkpoint_format='checkpoint'),\n dict(testcase_name='name_ckpt', checkpoint_format='saver'))\n def test_export_keras_estimator(self, checkpoint_format):\n keras_model, (x_train, y_train), (\n _, _), train_input_fn, _ = get_resource_for_simple_model(\n model_type='sequential', is_evaluate=False)\n\n keras_model.compile(\n loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n keras_model.fit(x_train, y_train, epochs=1)\n bias_value = keras.backend.get_value(keras_model.layers[0].bias)\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model,\n model_dir=tempfile.mkdtemp(dir=self._base_dir),\n checkpoint_format=checkpoint_format)\n\n def serving_input_receiver_fn():\n feature_spec = {\n 'dense_input': tf.io.FixedLenFeature([1], dtype=tf.dtypes.float32)\n }\n return export_lib.build_parsing_serving_input_receiver_fn(feature_spec)\n\n # Try immediately exporting, testing that (1) exported values are the same,\n # and (2) estimator can be exported without saving a checkpoint into the\n # model directory.\n saved_model_dir = est_keras.export_saved_model(\n tempfile.mkdtemp(dir=self._base_dir), serving_input_receiver_fn())\n variables_path = saved_model_utils.get_variables_path(saved_model_dir)\n\n variable_name = 'dense/bias'\n if checkpoint_format == 'checkpoint':\n names_to_keys = saver_lib.object_graph_key_mapping(variables_path)\n variable_name = names_to_keys[variable_name]\n\n self.assertAllClose(bias_value,\n tf.train.load_variable(variables_path, variable_name))\n\n # Export the estimator after training a bit.\n est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)\n saved_model_dir = est_keras.export_saved_model(\n tempfile.mkdtemp(dir=self._base_dir), serving_input_receiver_fn())\n variables_path = saved_model_utils.get_variables_path(saved_model_dir)\n self.assertNotAllClose(\n bias_value, tf.train.load_variable(variables_path, variable_name))\n\n def test_export_subclassed_model_retains_model_state(self):\n keras_model, (x_train, y_train), (\n _, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(\n model_type='subclass', is_evaluate=True)\n keras_model.compile(\n optimizer=tf.compat.v1.train.RMSPropOptimizer(1e-3),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n keras_model.fit(x_train, y_train, epochs=1)\n iterations = keras.backend.get_value(keras_model.optimizer.iterations)\n optimizer = keras_model.optimizer\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, config=self._config, checkpoint_format='saver')\n est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)\n\n # Subclassed models resets the model object. Assert that attributes are\n # properly restored.\n iterations_after = keras.backend.get_value(keras_model.optimizer.iterations)\n self.assertEqual(optimizer, keras_model.optimizer)\n self.assertEqual(iterations, iterations_after)\n # TODO(b/132839451): model.fit results in an error after model_to_estimator.\n # keras_model.fit(x_train, y_train, epochs=1)\n\n def test_warm_start_from_keras_ckpt(self):\n keras_model, (x_train, y_train), (\n _, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(\n model_type='functional', is_evaluate=True)\n keras_model.compile(\n optimizer=tf.compat.v1.train.RMSPropOptimizer(1e-3),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n keras_model.fit(x_train, y_train, epochs=1)\n\n warm_start_path = os.path.join(self._config.model_dir, 'keras',\n 'warm_start.ckpt')\n keras_model.save_weights(warm_start_path)\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, config=self._config, checkpoint_format='saver')\n\n self.assertEqual(warm_start_path,\n est_keras._warm_start_settings.ckpt_to_initialize_from)\n before_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)\n after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n self.assertLess(after_eval_results['loss'], before_eval_results['loss'])\n\n def test_sample_weights(self):\n # Create simple pass-through model\n input_layer = keras.layers.Input(shape=1, name='input_layer')\n keras_model = keras.Model(inputs=input_layer, outputs=input_layer)\n\n keras_model.compile(loss='mean_absolute_error', optimizer='adam')\n\n features = [[0.], [0], [1], [1]]\n sample_weights = [0, .4, 1, 1]\n targets = [[0], [1], [0], [1]]\n\n expected_loss = keras_model.test_on_batch(\n tf.constant(features), tf.constant(targets),\n tf.constant(sample_weights))\n\n def input_fn():\n dataset = tf.compat.v1.data.Dataset.from_tensors(({\n 'features': features,\n 'sample_weights': sample_weights\n }, targets))\n return dataset\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, model_dir=tempfile.mkdtemp(dir=self._base_dir))\n eval_results = est_keras.evaluate(input_fn, steps=1)\n self.assertAllClose(expected_loss, eval_results['loss'])\n\n # Test multiple with outputs and sample weights.\n keras_model = keras.Model(\n inputs=input_layer, outputs=[input_layer, input_layer])\n keras_model.compile(loss='mean_absolute_error', optimizer='adam')\n expected_loss = keras_model.test_on_batch(\n tf.constant(features),\n [tf.constant(targets), tf.constant(targets)],\n [tf.constant(sample_weights),\n tf.constant(sample_weights)])[0]\n\n def input_fn_multiple_targets():\n dataset = tf.compat.v1.data.Dataset.from_tensors(\n (features, sample_weights, targets))\n dataset = dataset.map(lambda x, y, z: ({\n 'features': x,\n 'sample_weights': (y, y)\n }, (z, z)))\n return dataset\n\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, model_dir=tempfile.mkdtemp(dir=self._base_dir))\n eval_results = est_keras.evaluate(input_fn_multiple_targets, steps=1)\n self.assertAllClose(expected_loss, eval_results['loss'])\n\n @parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])\n def test_model_to_estimator_with_rnn(self, layer):\n # See https://github.com/tensorflow/tensorflow/issues/27750 for details.\n timestep = 10\n rnn_cell_size = 8\n\n layers = [\n keras.layers.Reshape([timestep, 1], input_shape=[\n timestep,\n ]),\n layer(rnn_cell_size, return_sequences=True),\n layer(rnn_cell_size),\n keras.layers.Dense(1)\n ]\n\n model = keras.Sequential(layers)\n model.compile(loss='mse', optimizer='sgd')\n keras_lib.model_to_estimator(\n keras_model=model,\n checkpoint_format='checkpoint',\n model_dir=tempfile.mkdtemp(dir=self._base_dir))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.executing_eagerly", "tensorflow.compat.v1.initializers.global_variables", "tensorflow.python.keras.testing_utils.get_test_data", "tensorflow.python.keras.layers.Dense", "tensorflow.python.feature_column.dense_features.DenseFeatures", "numpy.random.random", "tensorflow.compat.v1.gfile.DeleteRecursively", "tensorflow.python.keras.models.Sequential", "tensorflow.compat.v1.test.mock.patch.dict", "tensorflow.train.latest_checkpoint", "tensorflow.compat.v1.train.RMSPropOptimizer", "tensorflow.python.feature_column.dense_features_v2.DenseFeatures", "tensorflow.python.keras.optimizer_v2.gradient_descent.SGD", "tensorflow.io.FixedLenFeature", "tensorflow.python.keras.backend.relu", "tensorflow.constant", "numpy.argmax", "tensorflow.python.keras.metrics.CategoricalAccuracy", "tensorflow.python.keras.Sequential", "tensorflow.python.keras.backend.get_session", "tensorflow.python.saved_model.utils_impl.get_variables_path", "tensorflow.compat.v1.initializers.local_variables", "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.keras.models.Model", "tensorflow.python.training.saver.object_graph_key_mapping", "tensorflow.python.keras.models.save_model", "tensorflow.train.load_variable", "tensorflow.python.keras.layers.Dropout", "numpy.int64", "tensorflow.test.main", "tensorflow.python.keras.Model", "tensorflow.python.keras.layers.Input", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.summary.FileWriterCache.clear", "tensorflow.python.keras.optimizers.SGD", "tensorflow.python.keras.utils.np_utils.to_categorical", "numpy.random.seed", "tensorflow.compat.v1.data.Dataset.from_tensor_slices", "tensorflow.compat.v1.train.create_global_step", "tensorflow.Graph", "tensorflow.core.protobuf.config_pb2.GPUOptions", "tensorflow.compat.v1.data.Dataset.from_tensors", "tensorflow.python.keras.backend.get_value", "tensorflow.python.keras.layers.Lambda", "tensorflow.python.keras.layers.concatenate", "tensorflow.python.keras.backend.clear_session", "tensorflow.python.keras.layers.Reshape", "tensorflow.compat.v1.test.mock.patch.object", "numpy.unique" ] ]
Xylonwang/mindspore
[ "ea37dc76f0a8f0b10edd85c2ad545af44552af1e" ]
[ "tests/ut/python/nn/optim/test_proximal_ada_grad.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test PROXIMAL_ADA_GRAD \"\"\"\n\nimport numpy as np\n\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.common.api import _executor\nfrom mindspore.nn import TrainOneStepCell, WithLossCell\nfrom mindspore.nn.optim import ProximalAdagrad\nfrom mindspore.ops import operations as P\n\n\nclass Net(nn.Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.weight = Parameter(Tensor(np.ones([64, 10]).astype(np.float32)), name='weight')\n self.bias = Parameter(Tensor(np.ones([10]).astype(np.float32)), name='bias')\n self.matmul = P.MatMul()\n self.biasAdd = P.BiasAdd()\n\n def construct(self, x):\n x = self.biasAdd(self.matmul(x, self.weight), self.bias)\n return x\n\n\ndef test_proximal_ada_grad():\n \"\"\" test_proximal_ada_grad \"\"\"\n inputs = Tensor(np.ones([1, 64]).astype(np.float32))\n label = Tensor(np.zeros([1, 10]).astype(np.float32))\n net = Net()\n net.set_train()\n loss = nn.SoftmaxCrossEntropyWithLogits()\n optimizer = ProximalAdagrad(net.trainable_params())\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepCell(net_with_loss, optimizer)\n _executor.compile(train_network, inputs, label)\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
xiaohanzai/fake_spectra
[ "170b42ac7732eb4f299617a1049cd3eabecfa3a7" ]
[ "fake_spectra/rate_network.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"A rate network for neutral hydrogen following\nKatz, Weinberg & Hernquist 1996, eq. 28-32.\"\"\"\nimport os.path\nimport math\nimport numpy as np\nimport scipy.interpolate as interp\nimport scipy.optimize\n\nclass RateNetwork(object):\n \"\"\"A rate network for neutral hydrogen following\n Katz, Weinberg & Hernquist 1996, astro-ph/9509107, eq. 28-32.\n\n Most internal methods are CamelCapitalized and follow a convention that\n they are named like the process and then the ion they refer to.\n eg:\n CollisionalExciteHe0 is the neutral Helium collisional excitation rate.\n RecombHp is the recombination rate for ionized hydrogen.\n\n Externally useful methods (the API) are named like get_*.\n These are:\n get_temp() - gets the temperature from the density and internal energy.\n get_cooling_rate() - gets the total cooling rate from density and internal energy.\n get_neutral_fraction() - gets the neutral fraction from the rate network given density and internal energy.\n Two useful helper functions:\n get_equilib_ne() - gets the equilibrium electron density.\n get_ne_by_nh() - gets the above, divided by the hydrogen density (Gadget reports this as ElectronAbundance).\n\n Constructor arguments:\n redshift - the redshift at which to evaluate the cooling. Affects the photoionization rate,\n the Inverse Compton cooling and the self shielding threshold.\n photo_factor - Factor by which to multiply the UVB amplitude.\n f_bar - Baryon fraction. Omega_b / Omega_cdm.\n converge - Tolerance to which the rate network should be converged.\n selfshield - Flag to enable self-shielding following Rahmati 2013\n cool - which cooling rate coefficient table to use.\n Supported are: KWH (original Gadget rates)\n Nyx (rates used in Nyx (Lukic 2015))\n Sherwood (rates used in Sherwood simulations (Bolton 2017))\n Default is Sherwood\n recomb - which recombination rate table to use.\n Supported are: C92 (Cen 1992, the Gadget default)\n V96 (Verner & Ferland 1996, more accurate rates).\n B06 (Badnell 2006 rates, current cloudy defaults. Very similar to V96).\n collisional - Flag to enable collisional ionizations.\n treecool_file - File to read a UV background from. Matches format used by Gadget.\n \"\"\"\n def __init__(self,redshift, photo_factor = 1., f_bar = 0.17, converge = 1e-7, selfshield=True, cool=\"Sherwood\", recomb=\"V96\", collisional=True, treecool_file=\"data/TREECOOL_ep_2018p\"):\n if recomb == \"V96\":\n self.recomb = RecombRatesVerner96()\n elif recomb == \"B06\":\n self.recomb = RecombRatesBadnell()\n else:\n self.recomb = RecombRatesCen92()\n self.photo = PhotoRates(treecool_file=treecool_file)\n self.photo_factor = photo_factor\n self.f_bar = f_bar\n if cool == \"KWH\":\n self.cool = CoolingRatesKWH92()\n elif cool == \"Sherwood\":\n self.cool = CoolingRatesSherwood()\n elif cool == \"Nyx\":\n self.cool = CoolingRatesNyx()\n else:\n raise ValueError(\"Not supported\")\n #Extra helium reionization photoheating model\n self.hub = 0.7\n self.he_thresh = 10\n self.he_amp = 1\n self.he_exp = 0\n self.he_model_on = False\n #proton mass in g\n self.protonmass = 1.67262178e-24\n self.redshift = redshift\n self.converge = converge\n self.selfshield = selfshield\n self.collisional = collisional\n zz = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n #Tables for the self-shielding correction. Note these are not well-measured for z > 5!\n gray_opac = [2.59e-18,2.37e-18,2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18, 1.82e-18, 1.71e-18, 1.60e-18]\n self.Gray_ss = interp.InterpolatedUnivariateSpline(zz, gray_opac)\n\n def get_temp(self, density, ienergy, helium=0.24):\n \"\"\"Get the equilibrium temperature at given internal energy.\n density is gas density in protons/cm^3\n Internal energy is in J/kg == 10^-10 ergs/g.\n helium is a mass fraction\"\"\"\n ne = self.get_equilib_ne(density, ienergy, helium)\n nh = density * (1-helium)\n return self._get_temp(ne/nh, ienergy, helium)\n\n def get_cooling_rate(self, density, ienergy, helium=0.24, photoheating=False):\n \"\"\"Get the total cooling rate for a temperature and density. Negative means heating.\"\"\"\n ne = self.get_equilib_ne(density, ienergy, helium)\n nh = density * (1-helium)\n temp = self._get_temp(ne/nh, ienergy, helium)\n nH0 = self._nH0(nh, temp, ne)\n nHe0 = self._nHe0(nh, temp, ne)\n nHp = self._nHp(nh, temp, ne)\n nHep = self._nHep(nh, temp, ne)\n nHepp = self._nHepp(nh, temp, ne)\n #This is the collisional excitation and ionisation rate.\n LambdaCollis = ne * (self.cool.CollisionalH0(temp) * nH0 +\n self.cool.CollisionalHe0(temp) * nHe0 +\n self.cool.CollisionalHeP(temp) * nHep)\n LambdaRecomb = ne * (self.cool.RecombHp(temp) * nHp +\n self.cool.RecombHeP(temp) * nHep +\n self.cool.RecombHePP(temp) * nHepp)\n LambdaFF = ne * (self.cool.FreeFree(temp, 1)*(nHp + nHep) + self.cool.FreeFree(temp, 2)*nHepp)\n LambdaCmptn = ne * self.cool.InverseCompton(temp, self.redshift)\n Lambda = LambdaCollis + LambdaRecomb + LambdaFF + LambdaCmptn\n Heating = 0\n if photoheating:\n Heating = nH0 * self.photo.epsH0(self.redshift)\n Heating += nHe0 * self.photo.epsHe0(self.redshift)\n Heating += nHep * self.photo.epsHep(self.redshift)\n Heating *= self.photo_factor\n if self.he_model_on:\n Heating *= self._he_reion_factor(density)\n return Lambda - Heating\n\n def get_equilib_ne(self, density, ienergy,helium=0.24):\n \"\"\"Solve the system of equations for photo-ionisation equilibrium,\n starting with ne = nH and continuing until convergence.\n density is gas density in protons/cm^3\n Internal energy is in J/kg == 10^-10 ergs/g.\n helium is a mass fraction.\n \"\"\"\n #Get hydrogen number density\n nh = density * (1-helium)\n rooted = lambda ne: self._ne(nh, self._get_temp(ne/nh, ienergy, helium=helium), ne, helium=helium)\n ne = scipy.optimize.fixed_point(rooted, nh,xtol=self.converge)\n assert np.all(np.abs(rooted(ne) - ne) < self.converge)\n return ne\n\n def get_ne_by_nh(self, density, ienergy, helium=0.24):\n \"\"\"Same as above, but get electrons per proton.\"\"\"\n return self.get_equilib_ne(density, ienergy, helium)/(density*(1-helium))\n\n def get_neutral_fraction(self, density, ienergy, helium=0.24):\n \"\"\"Get the neutral hydrogen fraction at a given temperature and density.\n density is gas density in protons/cm^3\n Internal energy is in J/kg == 10^-10 ergs/g.\n helium is a mass fraction.\n \"\"\"\n ne = self.get_equilib_ne(density, ienergy, helium=helium)\n nh = density * (1-helium)\n temp = self._get_temp(ne/nh, ienergy, helium)\n return self._nH0(nh, temp, ne) / nh\n\n def _nH0(self, nh, temp, ne):\n \"\"\"The neutral hydrogen number density. Eq. 33 of KWH.\"\"\"\n alphaHp = self.recomb.alphaHp(temp)\n GammaeH0 = self.collisional * self.recomb.GammaeH0(temp)\n photorate = self.photo.gH0(self.redshift)/ne*self.photo_factor*self._self_shield_corr(nh, temp)\n return nh * alphaHp/ (alphaHp + GammaeH0 + photorate)\n\n def _nHp(self, nh, temp, ne):\n \"\"\"The ionised hydrogen number density. Eq. 34 of KWH.\"\"\"\n return nh - self._nH0(nh, temp, ne)\n\n def _nHep(self, nh, temp, ne):\n \"\"\"The ionised helium number density, divided by the helium number fraction. Eq. 35 of KWH.\"\"\"\n alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)\n alphaHepp = self.recomb.alphaHepp(temp)\n photofac = self.photo_factor*self._self_shield_corr(nh, temp)\n GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac\n GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac\n return nh / (1 + alphaHep / GammaHe0 + GammaHep/alphaHepp)\n\n def _nHe0(self, nh, temp, ne):\n \"\"\"The neutral helium number density, divided by the helium number fraction. Eq. 36 of KWH.\"\"\"\n alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)\n photofac = self.photo_factor*self._self_shield_corr(nh, temp)\n GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac\n return self._nHep(nh, temp, ne) * alphaHep / GammaHe0\n\n def _nHepp(self, nh, temp, ne):\n \"\"\"The doubly ionised helium number density, divided by the helium number fraction. Eq. 37 of KWH.\"\"\"\n photofac = self.photo_factor*self._self_shield_corr(nh, temp)\n GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac\n alphaHepp = self.recomb.alphaHepp(temp)\n return self._nHep(nh, temp, ne) * GammaHep / alphaHepp\n\n def _ne(self, nh, temp, ne, helium=0.24):\n \"\"\"The electron number density. Eq. 38 of KWH.\"\"\"\n yy = helium / 4 / (1 - helium)\n return self._nHp(nh, temp, ne) + yy * self._nHep(nh, temp, ne) + 2* yy * self._nHepp(nh, temp, ne)\n\n def _self_shield_corr(self, nh, temp):\n \"\"\"Photoionisation rate as a function of density from Rahmati 2012, eq. 14.\n Calculates Gamma_{Phot} / Gamma_{UVB}.\n Inputs: hydrogen density, temperature\n n_H\n The coefficients are their best-fit from appendix A.\"\"\"\n if not self.selfshield:\n return np.ones_like(nh)\n nSSh = 1.003*self._self_shield_dens(self.redshift, temp)\n return 0.98*(1+(nh/nSSh)**1.64)**-2.28+0.02*(1+nh/nSSh)**-0.84\n\n def _self_shield_dens(self,redshift, temp):\n \"\"\"Calculate the critical self-shielding density. Rahmati 202 eq. 13.\n gray_opac is a parameter of the UVB used.\n gray_opac is in cm^2 (2.49e-18 is HM01 at z=3)\n temp is particle temperature in K\n f_bar is the baryon fraction. 0.17 is roughly 0.045/0.265\n Returns density in atoms/cm^3\"\"\"\n T4 = temp/1e4\n G12 = self.photo.gH0(redshift)/1e-12\n return 6.73e-3 * (self.Gray_ss(redshift) / 2.49e-18)**(-2./3)*(T4)**0.17*(G12)**(2./3)*(self.f_bar/0.17)**(-1./3)\n\n def _he_reion_factor(self, density):\n \"\"\"Compute a density dependent correction factor to the heating rate which can model the effect of helium reionization.\n Argument: Gas density in protons/cm^3.\"\"\"\n #Newton's constant (cgs units)\n gravity = 6.672e-8\n #100 km/s/Mpc in h/sec\n hubble = 3.2407789e-18\n omegab = 0.0483\n atime = 1/(1+self.redshift)\n rhoc = 3 * (self.hub* hubble)**2 /(8* math.pi * gravity)\n overden = self.protonmass * density /(omegab * rhoc * atime**(-3))\n if overden >= self.he_thresh:\n overden = self.he_thresh\n return self.he_amp * overden**self.he_exp\n\n def _get_temp(self, nebynh, ienergy, helium=0.24):\n \"\"\"Compute temperature (in K) from internal energy and electron density.\n Uses: internal energy\n electron abundance per H atom (ne/nH)\n hydrogen mass fraction (0.76)\n Internal energy is in J/kg, internal gadget units, == 10^-10 ergs/g.\n Factor to convert U (J/kg) to T (K) : U = N k T / (γ - 1)\n T = U (γ-1) μ m_P / k_B\n where k_B is the Boltzmann constant\n γ is 5/3, the perfect gas constant\n m_P is the proton mass\n\n μ = 1 / (mean no. molecules per unit atomic weight)\n = 1 / (X + Y /4 + E)\n where E = Ne * X, and Y = (1-X).\n Can neglect metals as they are heavy.\n Leading contribution is from electrons, which is already included\n [+ Z / (12->16)] from metal species\n [+ Z/16*4 ] for OIV from electrons.\"\"\"\n #convert U (J/kg) to T (K) : U = N k T / (γ - 1)\n #T = U (γ-1) μ m_P / k_B\n #where k_B is the Boltzmann constant\n #γ is 5/3, the perfect gas constant\n #m_P is the proton mass\n #μ is 1 / (mean no. molecules per unit atomic weight) calculated in loop.\n #Internal energy units are 10^-10 erg/g\n hy_mass = 1 - helium\n muienergy = 4 / (hy_mass * (3 + 4*nebynh) + 1)*ienergy*1e10\n #Boltzmann constant (cgs)\n boltzmann=1.38066e-16\n gamma=5./3\n #So for T in K, boltzmann in erg/K, internal energy has units of erg/g\n temp = (gamma-1) * self.protonmass / boltzmann * muienergy\n return temp\n\nclass RecombRatesCen92(object):\n \"\"\"Recombination rates and collisional ionization rates, as a function of temperature.\n This is taken from KWH 06, astro-ph/9509107, Table 2, based on Cen 1992.\n Illustris uses these rates.\"\"\"\n def alphaHp(self,temp):\n \"\"\"Recombination rate for H+, ionized hydrogen, in cm^3/s.\n Temp in K.\"\"\"\n return 8.4e-11 / np.sqrt(temp) / np.power(temp/1000, 0.2) / (1+ np.power(temp/1e6, 0.7))\n\n def alphaHep(self,temp):\n \"\"\"Recombination rate for He+, ionized helium, in cm^3/s.\n Temp in K.\"\"\"\n return 1.5e-10 / np.power(temp,0.6353)\n\n def alphad(self, temp):\n \"\"\"Recombination rate for dielectronic recombination, in cm^3/s.\n Temp in K.\"\"\"\n return 1.9e-3 / np.power(temp,1.5) * np.exp(-4.7e5/temp)*(1+0.3*np.exp(-9.4e4/temp))\n\n def alphaHepp(self, temp):\n \"\"\"Recombination rate for doubly ionized helium, in cm^3/s.\n Temp in K.\"\"\"\n return 4 * self.alphaHp(temp)\n\n def GammaeH0(self,temp):\n \"\"\"Collisional ionization rate for H0 in cm^3/s. Temp in K\"\"\"\n return 5.85e-11 * np.sqrt(temp) * np.exp(-157809.1/temp) / (1+ np.sqrt(temp/1e5))\n\n def GammaeHe0(self,temp):\n \"\"\"Collisional ionization rate for H0 in cm^3/s. Temp in K\"\"\"\n return 2.38e-11 * np.sqrt(temp) * np.exp(-285335.4/temp) / (1+ np.sqrt(temp/1e5))\n\n def GammaeHep(self,temp):\n \"\"\"Collisional ionization rate for H0 in cm^3/s. Temp in K\"\"\"\n return 5.68e-12 * np.sqrt(temp) * np.exp(-631515.0/temp) / (1+ np.sqrt(temp/1e5))\n\nclass RecombRatesVerner96(object):\n \"\"\"Recombination rates and collisional ionization rates, as a function of temperature.\n Recombination rates are the fit from Verner & Ferland 1996 (astro-ph/9509083).\n Collisional rates are the fit from Voronov 1997 (http://www.sciencedirect.com/science/article/pii/S0092640X97907324).\n\n In a very photoionised medium this changes the neutral hydrogen abundance by approximately 10% compared to Cen 1992.\n These rates are those used by Nyx.\n \"\"\"\n def _Verner96Fit(self, temp, aa, bb, temp0, temp1):\n \"\"\"Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083).\"\"\"\n sqrttt0 = np.sqrt(temp/temp0)\n sqrttt1 = np.sqrt(temp/temp1)\n return aa / ( sqrttt0 * (1 + sqrttt0)**(1-bb)*(1+sqrttt1)**(1+bb) )\n\n def alphaHp(self,temp):\n \"\"\"Recombination rate for H+, ionized hydrogen, in cm^3/s.\n The V&F 96 fitting formula is accurate to < 1% in the worst case.\n Temp in K.\"\"\"\n #See line 1 of V&F96 table 1.\n return self._Verner96Fit(temp, aa=7.982e-11, bb=0.748, temp0=3.148, temp1=7.036e+05)\n\n def alphaHep(self,temp):\n \"\"\"Recombination rate for He+, ionized helium, in cm^3/s.\n Accurate to ~2% for T < 10^6 and 5% for T< 10^10.\n Temp in K.\"\"\"\n #VF96 give two rates. The first is more accurate for T < 10^6, the second is valid up to T = 10^10.\n #We use the most accurate allowed. See lines 2 and 3 of Table 1 of VF96.\n lowTfit = self._Verner96Fit(temp, aa=3.294e-11, bb=0.6910, temp0=1.554e+01, temp1=3.676e+07)\n highTfit = self._Verner96Fit(temp, aa=9.356e-10, bb=0.7892, temp0=4.266e-02, temp1=4.677e+06)\n #Note that at 10^6K the two fits differ by ~10%. This may lead one to disbelieve the quoted accuracies!\n #We thus switch over at a slightly lower temperature.\n #The two fits cross at T ~ 3e5K.\n swtmp = 7e5\n deltat = 1e5\n upper = swtmp + deltat\n lower = swtmp - deltat\n #In order to avoid a sharp feature at 10^6 K, we linearly interpolate between the two fits around 10^6 K.\n interpfit = (lowTfit * (upper - temp) + highTfit * (temp - lower))/(2*deltat)\n return (temp < lower)*lowTfit + (temp > upper)*highTfit + (upper > temp)*(temp > lower)*interpfit\n\n def alphad(self, temp):\n \"\"\"Recombination rate for dielectronic recombination, in cm^3/s.\n This is the value from Aldrovandi & Pequignot 73, as used in Nyx, Sherwood and Cen 1992.\n It is corrected from the value in Aldrovandi & Pequignot 1973 by Burgess & Tworkowski 1976 (fig1)\n by a factor of 0.65. The exponent is also made slightly more accurate.\n Temp in K.\"\"\"\n return 1.23e-3 / np.power(temp,1.5) * np.exp(-4.72e5/temp)*(1+0.3*np.exp(-9.4e4/temp))\n\n def alphaHepp(self, temp):\n \"\"\"Recombination rate for doubly ionized helium, in cm^3/s. Accurate to 2%.\n Temp in K.\"\"\"\n #See line 4 of V&F96 table 1.\n return self._Verner96Fit(temp, aa=1.891e-10, bb=0.7524, temp0=9.370, temp1=2.774e6)\n\n def _Voronov96Fit(self, temp, dE, PP, AA, XX, KK):\n \"\"\"Fitting function for collisional rates. Eq. 1 of Voronov 1997. Accurate to 10%,\n but data is only accurate to 50%.\"\"\"\n bolevk = 8.61734e-5 # Boltzmann constant in units of eV/K\n UU = dE / (bolevk * temp)\n return AA * (1 + PP * np.sqrt(UU))/(XX+UU) * UU**KK * np.exp(-UU)\n\n def GammaeH0(self,temp):\n \"\"\"Collisional ionization rate for H0 in cm^3/s. Temp in K. Voronov 97, Table 1.\"\"\"\n return self._Voronov96Fit(temp, 13.6, 0, 0.291e-07, 0.232, 0.39)\n\n def GammaeHe0(self,temp):\n \"\"\"Collisional ionization rate for He0 in cm^3/s. Temp in K. Voronov 97, Table 1.\"\"\"\n return self._Voronov96Fit(temp, 24.6, 0, 0.175e-07, 0.180, 0.35)\n\n def GammaeHep(self,temp):\n \"\"\"Collisional ionization rate for HeI in cm^3/s. Temp in K. Voronov 97, Table 1.\"\"\"\n return self._Voronov96Fit(temp, 54.4, 1, 0.205e-08, 0.265, 0.25)\n\nclass RecombRatesBadnell(RecombRatesVerner96):\n \"\"\"Recombination rates and collisional ionization rates, as a function of temperature.\n Recombination rates are the fit from Badnell's website: http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial.\n \"\"\"\n\n def _RecombRateFit_lowcharge_ion(self, temp, aa, bb, cc, temp0, temp1, temp2):\n \"\"\"Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)/ See http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial.\"\"\"\n sqrttt0 = np.sqrt(temp/temp0)\n sqrttt1 = np.sqrt(temp/temp1)\n BB = bb + cc*np.exp(-temp2/temp)\n return aa / ( sqrttt0 * (1 + sqrttt0)**(1-BB)*(1+sqrttt1)**(1+BB) )\n\n\n def alphaHp(self,temp):\n \"\"\"Recombination rate for H+, ionized hydrogen, in cm^3/s.\n Temp in K.\"\"\"\n #See line 1 of V&F96 table 1.\n return self._Verner96Fit(temp, aa=8.318e-11, bb=0.7472, temp0=2.965, temp1=7.001e5)\n\n def alphaHep(self,temp):\n \"\"\"Recombination rate for H+, ionized hydrogen, in cm^3/s.\n Temp in K.\"\"\"\n #See line 1 of V&F96 table 1.\n return self._Verner96Fit(temp, aa=1.818E-10, bb=0.7492, temp0=10.17, temp1=2.786e6)\n\n def alphaHepp(self, temp):\n \"\"\"Recombination rate for doubly ionized helium, in cm^3/s.\n Temp in K.\"\"\"\n #See line 4 of V&F96 table 1.\n return self._RecombRateFit_lowcharge_ion(temp, aa=5.235E-11, bb=0.6988, cc=0.0829, temp0=7.301, temp1=4.475e6, temp2 = 1.682e5)\n\nclass PhotoRates(object):\n \"\"\"The photoionization rates for a given species.\n Eq. 29 of KWH 96. This is loaded from a TREECOOL table.\"\"\"\n def __init__(self, treecool_file=\"data/TREECOOL_ep_2018p\"):\n #Format of the treecool table:\n # log_10(1+z), Gamma_HI, Gamma_HeI, Gamma_HeII, Qdot_HI, Qdot_HeI, Qdot_HeII,\n # where 'Gamma' is the photoionization rate and 'Qdot' is the photoheating rate.\n # The Gamma's are in units of s^-1, and the Qdot's are in units of erg s^-1.\n try:\n data = np.loadtxt(treecool_file)\n except OSError:\n treefile = os.path.join(os.path.dirname(os.path.realpath(__file__)), treecool_file)\n data = np.loadtxt(treefile)\n redshifts = data[:,0]\n photo_rates = data[:,1:4]\n photo_heat = data[:,4:7]\n assert np.shape(redshifts)[0] == np.shape(photo_rates)[0]\n self.Gamma_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,0])\n self.Gamma_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,1])\n self.Gamma_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,2])\n self.Eps_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,0])\n self.Eps_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,1])\n self.Eps_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,2])\n\n def gHe0(self,redshift):\n \"\"\"Get photo rate for neutral Helium\"\"\"\n log1z = np.log10(1+redshift)\n return self.Gamma_HeI(log1z)\n\n def gHep(self,redshift):\n \"\"\"Get photo rate for singly ionized Helium\"\"\"\n log1z = np.log10(1+redshift)\n return self.Gamma_HeII(log1z)\n\n def gH0(self,redshift):\n \"\"\"Get photo rate for neutral Hydrogen\"\"\"\n log1z = np.log10(1+redshift)\n return self.Gamma_HI(log1z)\n\n def epsHe0(self,redshift):\n \"\"\"Get photo heating rate for neutral Helium\"\"\"\n log1z = np.log10(1+redshift)\n return self.Eps_HeI(log1z)\n\n def epsHep(self,redshift):\n \"\"\"Get photo heating rate for singly ionized Helium\"\"\"\n log1z = np.log10(1+redshift)\n return self.Eps_HeII(log1z)\n\n def epsH0(self,redshift):\n \"\"\"Get photo heating rate for neutral Hydrogen\"\"\"\n log1z = np.log10(1+redshift)\n return self.Eps_HI(log1z)\n\nclass CoolingRatesKWH92(object):\n \"\"\"The cooling rates from KWH92, in erg s^-1 cm^-3 (cgs).\n All rates are divided by the abundance of the ions involved in the interaction.\n So we are computing the cooling rate divided by n_e n_X. Temperatures in K.\n None of these rates are original to KWH92, but are taken from Cen 1992,\n and originally from older references. The hydrogen rates in particular are probably inaccurate.\n Cen 1992 modified (arbitrarily) the excitation and ionisation rates for high temperatures.\n There is no collisional excitation rate for He0 - not sure why.\n References:\n Black 1981, from Lotz 1967, Seaton 1959, Burgess & Seaton 1960.\n Recombination rates are from Spitzer 1978.\n Free-free: Spitzer 1978.\n Collisional excitation and ionisation cooling rates are merged.\n \"\"\"\n def __init__(self, tcmb=2.7255, t5_corr=1e5, recomb=None):\n self.tcmb = tcmb\n if recomb is None:\n self.recomb = RecombRatesCen92()\n else:\n self.recomb = recomb\n self.t5_corr = t5_corr\n #1 eV in ergs\n self.eVinergs = 1.60218e-12\n #boltzmann constant in erg/K\n self.kB = 1.38064852e-16\n\n def _t5(self, temp):\n \"\"\"Commonly used Cen 1992 correction factor for large temperatures.\n This is implemented so that the cooling rates have the right\n asymptotic behaviour. However, Cen erroneously imposes this correction at T=1e5,\n which is too small: the Black 1981 rates these are based on should be good\n until 5e5 at least, where the correction factor has a 10% effect already.\n More modern tables thus impose it at T=5e7, which is still arbitrary but should be harmless.\n \"\"\"\n return 1+(temp/t5_corr)**0.5\n\n def CollisionalExciteH0(self, temp):\n \"\"\"Collisional excitation cooling rate for n_H0 and n_e. Gadget calls this BetaH0.\"\"\"\n return 7.5e-19 * np.exp(-118348.0/temp) /self._t5(temp)\n\n def CollisionalExciteHeP(self, temp):\n \"\"\"Collisional excitation cooling rate for n_He+ and n_e. Gadget calls this BetaHep.\"\"\"\n return 5.54e-17 * temp**(-0.397)*np.exp(-473638./temp)/self._t5(temp)\n\n def CollisionalExciteHe0(self, temp):\n \"\"\"This is listed in Cen 92 but neglected in KWH 97, presumably because it is very small.\"\"\"\n #return 0\n return 9.1e-27 * temp**(-0.1687) * np.exp(-473638/temp) / self._t5(temp)\n\n def CollisionalIonizeH0(self, temp):\n \"\"\"Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeH0.\"\"\"\n #Ionisation potential of H0\n return 13.5984 * self.eVinergs * self.recomb.GammaeH0(temp)\n\n def CollisionalIonizeHe0(self, temp):\n \"\"\"Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHe0.\"\"\"\n return 24.5874 * self.eVinergs * self.recomb.GammaeHe0(temp)\n\n def CollisionalIonizeHeP(self, temp):\n \"\"\"Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHep.\"\"\"\n return 54.417760 * self.eVinergs * self.recomb.GammaeHep(temp)\n\n def CollisionalH0(self, temp):\n \"\"\"Total collisional cooling for H0\"\"\"\n return self.CollisionalExciteH0(temp) + self.CollisionalIonizeH0(temp)\n\n def CollisionalHe0(self, temp):\n \"\"\"Total collisional cooling for H0\"\"\"\n return self.CollisionalExciteHe0(temp) + self.CollisionalIonizeHe0(temp)\n\n def CollisionalHeP(self, temp):\n \"\"\"Total collisional cooling for H0\"\"\"\n return self.CollisionalExciteHeP(temp) + self.CollisionalIonizeHeP(temp)\n\n def RecombHp(self, temp):\n \"\"\"Recombination cooling rate for H+ and e. Gadget calls this AlphaHp.\"\"\"\n return 0.75 * self.kB * temp * self.recomb.alphaHp(temp)\n\n def RecombHeP(self, temp):\n \"\"\"Recombination cooling rate for He+ and e. Gadget calls this AlphaHep.\"\"\"\n #I'm not sure why they use 0.75 kT as the free energy of an electron.\n #I would guess this is explained in Spitzer 1978.\n return 0.75 * self.kB * temp * self.recomb.alphaHep(temp)+ self._RecombDielect(temp)\n\n def RecombHePP(self, temp):\n \"\"\"Recombination cooling rate for He++ and e. Gadget calls this AlphaHepp.\"\"\"\n return 0.75 * self.kB * temp * self.recomb.alphaHepp(temp)\n\n def _RecombDielect(self, temp):\n \"\"\"Dielectric recombination rate for He+ and e. Gadget calls this Alphad.\"\"\"\n #What is this magic number?\n return 6.526e-11*self.recomb.alphad(temp)\n\n def FreeFree(self, temp, zz):\n \"\"\"Free-free cooling rate for electrons scattering on ions without being captured.\n Factors here are n_e and total ionized species:\n (FreeFree(zz=1)*(n_H+ + n_He+) + FreeFree(zz=2)*n_He++)\"\"\"\n return 1.426e-27*np.sqrt(temp)*zz**2*self._gff(temp,zz)\n\n def _gff(self, temp, zz):\n \"\"\"Formula for the Gaunt factor. KWH takes this from Spitzer 1978.\"\"\"\n _ = zz\n return 1.1+0.34*np.exp(-(5.5 - np.log10(temp))**2/3.)\n\n def InverseCompton(self, temp, redshift):\n \"\"\"Cooling rate for inverse Compton from the microwave background.\n Multiply this only by n_e. Note the CMB temperature is hardcoded in KWH92 to 2.7.\"\"\"\n tcmb_red = self.tcmb * (1+redshift)\n #Thompson cross-section in cm^2\n sigmat = 6.6524e-25\n #Radiation density constant, 4 sigma_stefan-boltzmann / c in erg cm^-3 K^-4\n rad_dens = 7.5657e-15\n #Electron mass in g\n me = 9.10938e-28\n #Speed of light in cm/s\n cc = 2.99792e10\n return 4 * sigmat * rad_dens / (me*cc) * tcmb_red**4 * self.kB * (temp - tcmb_red)\n\nclass CoolingRatesSherwood(CoolingRatesKWH92):\n \"\"\"The cooling rates used in the Sherwood simulation, Bolton et al 2017, in erg s^-1 cm^-3 (cgs).\n Differences from KWH92 are updated recombination and collisional ionization rates, and the use of a\n larger temperature correction factor than Cen 92.\n \"\"\"\n def __init__(self, tcmb=2.7255, recomb=None):\n CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=RecombRatesVerner96)\n\nclass CoolingRatesNyx(CoolingRatesKWH92):\n \"\"\"The cooling rates used in the Nyx paper Lukic 2014, 1406.6361, in erg s^-1 cm^-3 (cgs).\n All rates are divided by the abundance of the ions involved in the interaction.\n So we are computing the cooling rate divided by n_e n_X. Temperatures in K.\n Major differences from KWH are the use of the Scholz & Walter 1991\n hydrogen collisional cooling rates, a less aggressive high temperature correction for helium, and\n Shapiro & Kang 1987 for free free.\n Older Black 1981 recombination cooling rates are used!\n They use the recombination rates from Verner & Ferland 96, but do not change the cooling rates to match.\n Ditto the ionization rates from Voronov 1997: they should also use these rates for collisional ionisation,\n although this is harder because Sholz & Walter don't break their rates into ionization and excitation.\n References:\n Scholz & Walters 1991 (0.45% accuracy)\n Black 1981 (recombination and helium)\n Shapiro & Kang 1987\n \"\"\"\n def __init__(self, tcmb=2.7255, recomb=None):\n CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=recomb)\n\n def CollisionalH0(self, temp):\n \"\"\"Collisional cooling rate for n_H0 and n_e. Gadget calls this BetaH0 + GammaeH0.\n Formula from Eq. 23, Table 4 of Scholz & Walters, claimed good to 0.45 %.\n Note though that they have two datasets which differ by a factor of two.\n Differs from Cen 92 by a factor of two.\"\"\"\n #Technically only good for T > 2000.\n y = np.log(temp)\n #Constant is 0.75/k_B in Rydberg\n Ryd = 2.1798741e-11\n tot = -0.75/self.kB*Ryd/temp\n coeffslowT = [213.7913, 113.9492, 25.06062, 2.762755, 0.1515352, 3.290382e-3]\n coeffshighT = [271.25446, 98.019455, 14.00728, 0.9780842, 3.356289e-2, 4.553323e-4]\n for j in range(6):\n tot += ((temp < 1e5)*coeffslowT[j]+(temp >=1e5)*coeffshighT[j])*(-y)**j\n return 1e-20 * np.exp(tot)\n\n def RecombHp(self, temp):\n \"\"\"Recombination cooling rate for H+ and e. Gadget calls this AlphaHp.\n Differs by O(10%) until 3x10^6.\"\"\"\n return 2.851e-27 * np.sqrt(temp) * (5.914 - 0.5 * np.log(temp) + 0.01184 * temp**(1./3))\n\n def RecombHePP(self, temp):\n \"\"\"Recombination cooling rate for H+ and e. Gadget calls this AlphaHepp.\n Differs from Cen 92 by 10% until ~10^7\"\"\"\n return 1.140e-26 * np.sqrt(temp) * (6.607 - 0.5 * np.log(temp) + 7.459e-3 * temp**(1./3))\n\n def _gff(self, temp, zz):\n \"\"\"Formula for the Gaunt factor from Shapiro & Kang 1987. ZZ is 1 for H+ and He+ and 2 for He++.\n This is almost identical to the KWH rate but not continuous.\"\"\"\n #This is not continuous. Check the original reference.\n little = (temp/zz**2 <= 3.2e5)\n lt = np.log10(temp/zz**2)\n return little * (0.79464 + 0.1243*lt) + np.logical_not(little) * ( 2.13164 - 0.1240 * lt)\n" ]
[ [ "numpy.logical_not", "numpy.ones_like", "scipy.interpolate.InterpolatedUnivariateSpline", "numpy.log", "numpy.exp", "numpy.shape", "numpy.loadtxt", "numpy.power", "numpy.sqrt", "numpy.log10" ] ]
zahrag/BRLVBVC
[ "47c61eb69fbe96789b6a84c1510df0426bbcbfcc" ]
[ "collisions.py" ]
[ "import numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nrc('text', usetex=True)\n\nfrom scipy.special import gamma as scigamma\nfrom scipy.special import gammaln as scigammaln\nfrom collections import OrderedDict\n\n\ndef split_into_tasks(reader):\n out = list()\n tmp = OrderedDict()\n last_weather = -1\n last_start = -1\n last_end = -1\n for row in reader:\n weather = row['weather']\n startp = row['start_point']\n endp = row['end_point']\n if weather != last_weather or startp != last_start or endp != last_end:\n # Add new task\n if tmp:\n out.append(tmp)\n tmp = OrderedDict()\n last_weather = weather\n last_start = startp\n last_end = endp\n if tmp:\n for key in row:\n tmp[key].append(float(row[key]))\n else:\n for key in row:\n tmp[key] = [float(row[key])]\n# tmp.update(row)\n\n if tmp:\n out.append(tmp)\n\n return out\n\n\ndef get_total_distance_of_tasks(task_data):\n distance = list()\n for task in task_data:\n x_diff = np.diff(task['pos_x'])\n y_diff = np.diff(task['pos_y'])\n\n acc_dist = np.cumsum(np.sqrt(x_diff ** 2 + y_diff ** 2))\n distance.append(acc_dist[-1])\n\n return np.asarray(distance)\n\n\ndef get_successful_tasks(task_data, key):\n successes = list()\n for task in task_data:\n data = np.asarray(task[key])\n if np.any(data > 0):\n successes.append(False)\n else:\n successes.append(True)\n\n return np.asarray(successes)\n\n\ndef get_distance_between_infractions(task_data, key):\n distances = list()\n acc_dist = 0\n for task in task_data:\n data = np.array(task[key])\n x_diff = np.diff(task['pos_x'])\n y_diff = np.diff(task['pos_y'])\n\n distance = np.cumsum(np.sqrt(x_diff ** 2 + y_diff ** 2))\n if not np.any(data > 0):\n # Accumulate distance between tasks\n acc_dist += distance[-1]\n continue\n\n indices = np.flatnonzero(data)\n distances.append(acc_dist + distance[indices[0] - 1])\n # Reset accumulated distance for next infraction\n acc_dist = 0\n\n if not distances:\n distances.append(acc_dist)\n\n return np.asarray(distances)\n\n\ndef get_distance_to_first_infraction(task_data, key):\n distances = list()\n for task in task_data:\n data = np.array(task[key])\n if not np.any(data > 0):\n continue\n x_diff = np.diff(task['pos_x'])\n y_diff = np.diff(task['pos_y'])\n\n distance = np.cumsum(np.sqrt(x_diff**2 + y_diff**2))\n indices = np.flatnonzero(data)\n\n distances.append(distance[indices[0]-1])\n\n return np.asarray(distances)\n\n\ndef get_percentage_under_infraction(task_data, key, threshold):\n percentages = list()\n if type(key) != list:\n key = [key]\n for task in task_data:\n data = list()\n for k in key:\n data.append(np.asarray(task[k]))\n data = np.asarray(data)\n infractions = data > threshold\n infractions = np.logical_or.reduce(infractions)\n percentages.append(np.sum(infractions) / data.shape[1])\n return np.asarray(percentages)\n\n\ndef get_hist_of_infractions(task_data, nbins=100):\n data_offroad = []\n data_otherside = []\n for task in task_data:\n data_offroad += task['intersection_offroad']\n data_otherside += task['intersection_otherlane']\n\n data_offroad = np.asarray(data_offroad)\n data_otherside = np.asarray(data_otherside)\n data = np.concatenate((data_offroad, -data_otherside))\n hist = np.histogram(data, bins=nbins, range=(-1, 1))\n\n return data\n\n\ndef estimate_binomial_distribution(success_statuses):\n s = np.sum(success_statuses)\n f = len(success_statuses) - s\n return s, f\n\n\ndef estimate_beta_distribution(distances):\n dist = np.array(distances, dtype=np.float)\n x = np.mean(dist)\n v = np.var(dist)\n\n alpha = x * (x*(1-x) / v - 1)\n beta = (1 - x) * (x*(1-x) / v - 1)\n\n return alpha, beta\n\n\n\ndef get_pdf_beta_posterior(s, f, prior='jeffreys'):\n x = np.linspace(0.05, 0.95, 100)\n if prior == 'bayes':\n pdf = (x**s * (1-x)**f) * scigamma(s + f + 2) / (scigamma(s + 1) * scigamma(f + 1))\n elif prior == 'jeffreys':\n pdf = (x ** (s-0.5) * (1 - x) ** (f-0.5)) * scigamma(s + f + 1) / (scigamma(s + 0.5) * scigamma(f + 0.5))\n return pdf, x\n\n\ndef get_pdf_beta_posterior_in_logarithm(s, f, prior='jeffreys'):\n x = np.linspace(0.001, 0.999, 1000)\n lx= np.log(x)\n lnx = np.log(1-x)\n if prior == 'jeffreys':\n lpdf = (s-0.5)*lx + (f-0.5)*lnx + scigammaln(s + f + 1) - scigammaln(s + 0.5) - scigammaln(f + 0.5)\n return np.exp(lpdf), x\n\n\ndef get_pdf_for_infractions(filename):\n\n with open(filename, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Split measurments into tasks\n tasks = split_into_tasks(reader)\n\n\n infr_dists_other = get_distance_between_infractions(tasks, 'collision_other')\n infr_dists_pedestrian = get_distance_between_infractions(tasks, 'collision_pedestrians')\n infr_dists_vehicle = get_distance_between_infractions(tasks, 'collision_vehicles')\n\n # Estimate for accidents\n gamma_distr = list()\n\n # Estimate for accidents\n exp_distr = list()\n\n # Estimate for percentage of non-collision\n successes_other = get_successful_tasks(tasks, 'collision_other')\n successes_pedestrians = get_successful_tasks(tasks, 'collision_pedestrians')\n successes_vehicles = get_successful_tasks(tasks, 'collision_vehicles')\n\n beta_distr = list()\n s, f = estimate_binomial_distribution(successes_other)\n pdf, x = get_pdf_beta_posterior_in_logarithm(s, f)\n coll_free = {'s': s, 'f': f}\n\n beta_distr.append({'pdf': pdf, 'params': (s, f)})\n s, f = estimate_binomial_distribution(successes_pedestrians)\n pdf, x = get_pdf_beta_posterior_in_logarithm(s, f)\n beta_distr.append({'pdf': pdf, 'params': (s, f)})\n s, f = estimate_binomial_distribution(successes_vehicles)\n pdf, x = get_pdf_beta_posterior_in_logarithm(s, f)\n beta_distr.append({'pdf': pdf, 'params': (s, f)})\n\n beta_x = x\n\n return exp_distr, gamma_distr, beta_distr, tasks, exp_x, gamma_x, beta_x, coll_free\n\n\ndef get_success_status(filename):\n success = list()\n with open(filename, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for line in reader:\n reached_goal = int(line['result'])\n success.append(reached_goal == 1)\n\n return success\n\n\ndef calculate_KL_divergence_exponential(lambda1, lambda2):\n \"\"\"\n Calculates distance from true distribution to approximate distribution\n Args:\n lambda1: \"True\" distribution\n lambda2: \"Approximate\" distribution\n\n Returns:\n KL divergence\n \"\"\"\n return np.log(lambda1) - np.log(lambda2) + lambda2 / lambda1 - 1\n\n\ndef sample_from_exponential_bayesian(data, a=1, b=1, nsamples=100):\n dist = np.asarray(data)\n xbar = np.mean(dist)\n n = len(dist)\n\n ls = np.random.gamma(a+n, 1/(b+n*xbar), nsamples)\n for l in ls:\n cdf, x = get_cdf_exponential(l)\n plt.plot(x, cdf)\n plt.show()\n\n\nif __name__ == '__main__':\n #np.seterr(all='raise')\n\n # Metrics for GT models\n exp_distr = {}\n gamma_distr = {}\n beta_distr = {}\n task_data = {}\n exp_x = {}\n gamma_x = {}\n beta_x = {}\n results = {}\n beta_success = {}\n beta_success_x = {}\n folder_names = {}\n beta_success_acc = {}\n beta_coll_acc = {}\n keys = ['setting']\n for k in keys:\n exp_distr[k] = list()\n gamma_distr[k] = list()\n beta_distr[k] = list()\n task_data[k] = list()\n exp_x[k] = list()\n gamma_x[k] = list()\n beta_x[k] = list()\n results[k] = list()\n beta_success[k] = list()\n beta_success_x[k] = list()\n folder_names[k] = list()\n beta_success_acc[k] = list()\n beta_coll_acc[k] = list()\n\n\n base_name = {'setting': 'path/to/carla_results_folder'}\n nums = {'setting': None}\n\n\n for key in base_name:\n if nums[key] is not None:\n for n in nums[key]:\n folder_names[key].append(base_name[key] + '{}'.format(n))\n else:\n folder_names[key].append(base_name[key])\n\n\n for key in folder_names:\n s_acc_goal = 0\n f_acc_goal = 0\n s_acc_coll = 0\n f_acc_coll = 0\n for folder in folder_names[key]:\n exp_distr1, gamma_distr1, beta_distr1, task_data1, exp_x1, gamma_x1, beta_x1, coll_free = get_pdf_for_infractions(\n folder + '/measurements.csv')\n reached_goal = get_success_status(folder + '/summary.csv')\n exp_distr[key].append(exp_distr1)\n gamma_distr[key].append(gamma_distr1)\n beta_distr[key].append(beta_distr1)\n task_data[key].append(task_data1)\n exp_x[key].append(exp_x1)\n gamma_x[key].append(gamma_x1)\n beta_x[key].append(beta_x1)\n results[key].append(reached_goal)\n s, f = estimate_binomial_distribution(reached_goal)\n #pdf, x = get_pdf_beta_posterior(s, f)\n pdf, x = get_pdf_beta_posterior_in_logarithm(s, f)\n beta_success[key].append(pdf)\n beta_success_x[key].append(x)\n s_acc_goal += s\n f_acc_goal += f\n s_acc_coll += coll_free['s']\n f_acc_coll += coll_free['f']\n\n if len(folder_names[key]) > 1:\n pdf, x = get_pdf_beta_posterior_in_logarithm(s_acc_goal, f_acc_goal)\n beta_success_acc[key] = {'pdf': pdf, 'x': x}\n\n pdf, x = get_pdf_beta_posterior_in_logarithm(s_acc_coll, f_acc_coll)\n beta_coll_acc[key] = {'pdf': pdf, 'x': x}\n\n\n # Estimate a ranking of the models\n offroad_percentage = {}\n otherlane_percentage = {}\n combined_percentage = {}\n distance = {}\n accomplished_tasks = {}\n collisionfree_tasks = {}\n score = {}\n best_idx = {}\n worst_idx = {}\n median_idx = {}\n\n for k in keys:\n offroad_percentage[k] = {\"mean\": -1, \"median\": -1, \"best\": -1, \"worst\": -1, \"all\": list()}\n otherlane_percentage[k] = {\"mean\": -1, \"median\": -1, \"best\": -1, \"worst\": -1, \"all\": list()}\n combined_percentage[k] = {\"mean\": -1, \"median\": -1, \"best\": -1, \"worst\": -1, \"all\": list()}\n distance[k] = {\"mean\": -1, \"median\": -1, \"best\": -1, \"worst\": -1, \"all\": list()}\n accomplished_tasks[k] = {\"mean\": -1, \"median\": -1, \"best\": -1, \"worst\": -1, \"all\": list()}\n collisionfree_tasks[k] = {\"mean\": -1, \"median\": -1, \"best\": -1, \"worst\": -1, \"all\": list()}\n score[k] = list()\n best_idx[k] = -1\n worst_idx[k] = -1\n median_idx[k] = -1\n\n for key in keys:\n total_dist = 0.\n percentage_for_infraction = 0.2\n for i, (task, beta_d, reached_goal) in enumerate(zip(task_data[key], beta_distr[key], results[key])):\n offroad_percentage[key][\"all\"].append(100. * np.mean(get_percentage_under_infraction(task, 'intersection_offroad',\n percentage_for_infraction)))\n otherlane_percentage[key][\"all\"].append(100. * np.mean(get_percentage_under_infraction(task, 'intersection_otherlane',\n percentage_for_infraction)))\n combined_percentage[key][\"all\"].append(100. * np.mean(get_percentage_under_infraction(task,\n ['intersection_offroad', 'intersection_otherlane'],\n percentage_for_infraction)))\n\n distance[key][\"all\"].append(np.sum(get_total_distance_of_tasks(task)))\n\n s, fails = beta_d[0]['params']\n collisionfree_tasks[key][\"all\"].append( 100. * s / (s + fails) )\n res = np.sum(reached_goal)\n accomplished_tasks[key][\"all\"].append(100 * res / len(reached_goal))\n\n score[key].append((100. - combined_percentage[key][\"all\"][-1] + collisionfree_tasks[key][\"all\"][-1]\n + accomplished_tasks[key][\"all\"][-1])/300)\n\n scores = np.asarray(score[key])\n best_idx[key] = np.argmax(scores)\n worst_idx[key] = np.argmin(scores)\n median_idx[key] = np.argmax(np.median(scores) == scores)\n\n offroad_percentage[key][\"best\"] = offroad_percentage[key][\"all\"][best_idx[key]]\n offroad_percentage[key][\"worst\"] = offroad_percentage[key][\"all\"][worst_idx[key]]\n offroad_percentage[key][\"median\"] = np.median(offroad_percentage[key][\"all\"]) #[median_idx[key]]\n offroad_percentage[key][\"mean\"] = np.mean(offroad_percentage[key][\"all\"])\n\n otherlane_percentage[key][\"best\"] = otherlane_percentage[key][\"all\"][best_idx[key]]\n otherlane_percentage[key][\"worst\"] = otherlane_percentage[key][\"all\"][worst_idx[key]]\n otherlane_percentage[key][\"median\"] = np.median(otherlane_percentage[key][\"all\"]) # [median_idx[key]]\n otherlane_percentage[key][\"mean\"] = np.mean(otherlane_percentage[key][\"all\"])\n\n combined_percentage[key][\"best\"] = combined_percentage[key][\"all\"][best_idx[key]]\n combined_percentage[key][\"worst\"] = combined_percentage[key][\"all\"][worst_idx[key]]\n combined_percentage[key][\"median\"] = np.median(combined_percentage[key][\"all\"]) # [median_idx[key]]\n combined_percentage[key][\"mean\"] = np.mean(combined_percentage[key][\"all\"])\n\n accomplished_tasks[key][\"best\"] = accomplished_tasks[key][\"all\"][best_idx[key]]\n accomplished_tasks[key][\"worst\"] = accomplished_tasks[key][\"all\"][worst_idx[key]]\n accomplished_tasks[key][\"median\"] = np.median(accomplished_tasks[key][\"all\"]) # [median_idx[key]]\n accomplished_tasks[key][\"mean\"] = np.mean(accomplished_tasks[key][\"all\"])\n\n collisionfree_tasks[key][\"best\"] = collisionfree_tasks[key][\"all\"][best_idx[key]]\n collisionfree_tasks[key][\"worst\"] = collisionfree_tasks[key][\"all\"][worst_idx[key]]\n collisionfree_tasks[key][\"median\"] = np.median(collisionfree_tasks[key][\"all\"]) # [median_idx[key]]\n collisionfree_tasks[key][\"mean\"] = np.mean(collisionfree_tasks[key][\"all\"])\n\n distance[key][\"best\"] = distance[key][\"all\"][best_idx[key]]\n distance[key][\"worst\"] = distance[key][\"all\"][worst_idx[key]]\n distance[key][\"median\"] = np.median(distance[key][\"all\"]) # [median_idx[key]]\n distance[key][\"mean\"] = np.mean(distance[key][\"all\"])\n\n colours = ['cyan', 'brown', 'purple', 'red', 'blue', 'green']\n for i, key in enumerate(keys):\n plt.plot(beta_x[key][best_idx[key]], beta_distr[key][best_idx[key]][0]['pdf'], label=key, lw=3, color=colours[i])\n\n import matplotlib.patches as mpatches\n red_patch = mpatches.Patch(color='red', label='T*D*')\n purple_patch = mpatches.Patch(color='blue', label='RL')\n brown_path = mpatches.Patch(color='green', label='IL')\n\n plt.legend(handles=[red_patch, purple_patch, brown_path],\n loc='upper left', fontsize=24)\n plt.ylim([0, 30])\n plt.xlim([0, 1])\n plt.yticks([])\n plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\\%', '25\\%', '50\\%', '75\\%', '100\\%'], fontsize=28)\n plt.ylabel(r'$P(p_{\\neg Collision} = x\\%)$', fontsize=30)\n plt.show()\n\n for i, key in enumerate(keys):\n plt.plot(beta_success_x[key][best_idx[key]], beta_success[key][best_idx[key]], label=key, lw=3, color=colours[i])\n\n plt.legend(handles=[red_patch, purple_patch, brown_path],\n loc='upper left', fontsize=24)\n plt.ylim([0, 30])\n plt.xlim([0, 1])\n plt.yticks([])\n plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\\%', '25\\%', '50\\%', '75\\%', '100\\%'], fontsize=28)\n plt.ylabel(r'$P(p_{Success} = x\\%)$', fontsize=30)\n plt.subplots_adjust(hspace=0.05, wspace=0)\n plt.show()\n\n # Plot the success rate for all models in a specific training condition\n\n blue_patch = mpatches.Patch(color='blue', label='TGDG')\n orange_patch = mpatches.Patch(color='orange', label='TGDE')\n green_patch = mpatches.Patch(color='green', label='TEDE')\n red_patch = mpatches.Patch(color='red', label='TEDG')\n\n for i, key in enumerate(keys):\n if beta_success_acc[key]:\n plt.plot(beta_success_acc[key]['x'], beta_success_acc[key]['pdf'], label=key, lw=3)\n\n plt.legend(handles=[blue_patch, orange_patch, green_patch, red_patch],\n loc='upper left', fontsize=20)\n plt.ylim([0, 45])\n plt.xlim([0, 1])\n plt.yticks([])\n plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\\%', '25\\%', '50\\%', '75\\%', '100\\%'], fontsize=28)\n plt.ylabel(r'$P(p_{Success} = x\\%)$', fontsize=30)\n plt.savefig('results/betaSuccessTot.pdf', bbox_inches='tight')\n plt.show()\n\n # Plot the success rate for all models in a specific training condition\n for i, key in enumerate(keys):\n if beta_coll_acc[key]:\n plt.plot(beta_coll_acc[key]['x'], beta_coll_acc[key]['pdf'], label=key, lw=3)\n\n plt.legend(handles=[blue_patch, orange_patch, green_patch, red_patch],\n loc='upper left', fontsize=20)\n plt.ylim([0, 30])\n plt.xlim([0, 1])\n plt.yticks([])\n plt.xticks([0, 0.25, 0.5, 0.75, 1.0], ['0\\%', '25\\%', '50\\%', '75\\%', '100\\%'], fontsize=28)\n plt.ylabel(r'$P(p_{\\neg Collision} = x\\%)$', fontsize=30)\n plt.savefig('results/betaCollTot.pdf', bbox_inches='tight')\n plt.show()\n\n\n # Plotting histogram for out-of-road\n fig = plt.figure()\n nfigs = len(task_data)\n nbins = 13\n for j, key in enumerate(keys):\n nmodels = 1 # len(task_data[key])\n td = task_data[key][best_idx[key]]\n plt.subplot(nfigs, 1, j+1)\n hist_data = get_hist_of_infractions(td)\n plt.hist2d(hist_data, np.zeros_like(hist_data), bins=[nbins, 1], range=[[-1, 1], [0, 0]], normed=True)\n plt.yticks([]) #np.arange(0, nmodels), fontsize=14)\n plt.ylabel(key, fontsize=14)\n plt.ylim([-0.5, nmodels - 0.5])\n plt.xticks([])\n\n ax = plt.gca()\n ax.set_xticks(np.linspace(-1, 1, nbins+1), minor=True)\n ax.set_yticks(np.arange(-0.5, nmodels - 0.5, 1), minor=True)\n plt.grid(which='minor', lw=0.5, c='k')\n plt.xticks([-1., -0.5, 0., 0.5, 1.],\n ['100\\%', '50\\%', '0\\%', '50\\%', '100\\%'], fontsize=28)\n fig.subplots_adjust(hspace=0.05, wspace=0)\n plt.show()\n\n # Plotting histogram for out-of-road for each model set\n fig = plt.figure()\n nfigs = len(task_data)\n nbins = 13\n for j, key in enumerate(keys):\n nmodels = 1 # len(task_data[key])\n hist_data = np.zeros(nbins)\n for td in task_data[key]:\n hist_data = np.concatenate((hist_data,get_hist_of_infractions(td)))\n plt.subplot(nfigs, 1, j + 1)\n plt.hist2d(hist_data, np.zeros_like(hist_data), bins=[nbins, 1], range=[[-1, 1], [0, 0]], normed=True)\n plt.yticks([])\n plt.ylabel(key, fontsize=14)\n plt.ylim([-0.5, nmodels - 0.5])\n plt.xticks([])\n\n ax = plt.gca()\n ax.set_xticks(np.linspace(-1, 1, nbins + 1), minor=True)\n ax.set_yticks(np.arange(-0.5, nmodels - 0.5, 1), minor=True)\n plt.grid(which='minor', lw=0.5, c='k')\n plt.xticks([-1., -0.5, 0., 0.5, 1.],\n ['100\\%', '50\\%', '0\\%', '50\\%', '100\\%'], fontsize=28)\n fig.subplots_adjust(hspace=0.05, wspace=0)\n plt.show()\n\n" ]
[ [ "scipy.special.gamma", "matplotlib.pyplot.xlim", "numpy.argmin", "numpy.median", "numpy.exp", "numpy.mean", "matplotlib.pyplot.xticks", "numpy.concatenate", "numpy.histogram", "numpy.zeros_like", "numpy.log", "matplotlib.pyplot.savefig", "numpy.argmax", "numpy.arange", "numpy.sqrt", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplots_adjust", "numpy.flatnonzero", "matplotlib.pyplot.subplot", "numpy.array", "numpy.zeros", "matplotlib.pyplot.yticks", "matplotlib.rc", "matplotlib.pyplot.figure", "numpy.diff", "matplotlib.patches.Patch", "matplotlib.pyplot.show", "scipy.special.gammaln", "numpy.asarray", "numpy.sum", "numpy.random.gamma", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "numpy.any", "matplotlib.pyplot.ylabel", "numpy.logical_or.reduce", "numpy.linspace", "numpy.var" ] ]
ostwind/ProteinLatentRep
[ "33d2667b6ee6dfeed0946fd9e2f22eef0e2960cc" ]
[ "Similarity_Reg/train.py" ]
[ "import torch \nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch import nn\nfrom torch.optim import SGD\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom Bio import SeqIO\nimport matplotlib.pyplot as plt\n\ndef plot_loss(loss_list, epoch, train=True):\n plt.figure(figsize=(8,12))\n plt.plot(range(len(loss_list)), loss_list, label=\"last loss value: {0}\".format(loss_list[-1]))\n if train:\n plt.title(\"Train Loss Curve ({} Epochs)\".format(epoch))\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.savefig(\"Train_Loss_Curve.png\")\n else:\n plt.title(\"Valid Loss Curve ({} Epochs)\".format(epoch)) \n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.savefig(\"Validation_Loss_Curve.png\")\n plt.close()\n\ndef _compute_loss(pred, label):\n difference = label - pred\n loss = torch.sum( torch.abs(difference))\n return loss, loss.data[0]\n \ndef evaluate_predictions(dev_data_iter, dev_poss_matches, model, epoch):\n #model.eval()\n losses = []\n for x in dev_data_iter:\n dev_embs, dev_known_labels = Variable(x[0], volatile=True), Variable(x[1], volatile=True)\n prediction = model.forward(dev_embs, dev_poss_matches)\n _, loss = _compute_loss(prediction, dev_known_labels) \n losses.append(loss)\n \n #model.train()\n return np.mean(losses)\n \ndef training_loop(\n batch_size, num_epochs, model, optim, data_iter, rna_options, dev_input,\n embed_file = 'unknown_model', \n print_every=50, eval_every=100):\n epoch = 0\n losses, avg_loss_each_epoch, valid_losses = [], [], []\n \n total_batches = int(len(data_iter))\n \n #print(rna_options)\n \n poss_matches = Variable(rna_options) # RNA matrix to right multiply \n \n while epoch <= num_epochs:\n for x in data_iter:\n data, label = Variable(x[0]), Variable(x[1]) \n \n model.train()\n model.zero_grad()\n\n prediction = model.forward(data, poss_matches)\n loss_var, loss = _compute_loss(prediction, label) \n\n losses.append(loss) \n loss_var.backward()\n optim.step()\n\n epoch += 1\n #avg_loss_each_epoch.append(np.mean(losses))\n if epoch % print_every == 0:\n print(\"TRAIN loss at epoch {0}: {1}\".format(epoch, loss ))\n print(\"TEST loss at epoch {0}: {1}\".format(epoch, valid_losses[-1]))\n print(' ')\n\n #print( \"TRAIN avg loss at epoch: \", (epoch), \"Avg Loss:\", np.mean(losses)/batch_size ) \n #plot_loss(avg_loss_each_epoch, epoch)\n if epoch % eval_every == 0:\n valid_loss = evaluate_predictions(dev_input, poss_matches, model, epoch)\n if epoch > 2000 and valid_loss > valid_losses[-1] :\n print(\"Lowest TEST loss at epoch {0}: {1}\".format(epoch, valid_loss))\n exit() \n torch.save( model.state_dict(), '%s_%s_SRSavedModel.pth' %(embed_file, valid_loss)) \n \n valid_losses.append(valid_loss)\n \n\n \n #plot_loss(valid_losses, epoch, False)\n \n# new_mat = known_matches - outs\n # loss = torch.bmm(new_mat.view(new_mat.size()[0], 1, new_mat.size()[1]),\n # new_mat.view(new_mat.size()[0], new_mat.size()[1], 1)\n # )\n\n #weight_sparsity=torch.sum((model.lin3.weight**2).sum(1))\n #weight_sparsity=0 #torch.sum((torch.abs(model.lin3.weight)).sum(1))\n #loss = torch.sqrt(loss) \n #loss = torch.div(torch.sum(loss.view(-1)), batch_size) + weight_sparsity\n " ]
[ [ "torch.autograd.Variable", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "numpy.mean", "matplotlib.pyplot.figure", "torch.abs", "matplotlib.pyplot.ylabel" ] ]
douglasresende/lambda-deep-learning-demo
[ "ebbbd63c0abf87a1a4155b17cef145039b7a1ef7" ]
[ "source/network/seq2label_basic.py" ]
[ "import numpy as np\n\nimport tensorflow as tf\n\nrnn = tf.contrib.rnn\n\nEMBEDDING_SIZE = 200\nNUM_RNN_LAYER = 2\nRNN_SIZE = [128, 128]\n\n\ndef net(inputs, mask, num_classes, is_training, batch_size, vocab_size, embd=None, use_one_hot_embeddings=False):\n\n\n with tf.variable_scope(name_or_scope='seq2label_basic',\n reuse=tf.AUTO_REUSE):\n\n initial_state = ()\n for i_layer in range(NUM_RNN_LAYER):\n initial_state = initial_state + \\\n (rnn.LSTMStateTuple(tf.zeros([batch_size, RNN_SIZE[i_layer]], tf.float32),\n tf.zeros([batch_size, RNN_SIZE[i_layer]], tf.float32)),)\n\n cell = rnn.MultiRNNCell([rnn.LSTMCell(num_units=RNN_SIZE[i_layer])\n for i_layer in range(NUM_RNN_LAYER)])\n\n if len(embd) > 0:\n embeddingW = tf.get_variable(\n 'embedding',\n initializer=tf.constant(embd),\n trainable=True)\n else:\n embeddingW = tf.get_variable(\n 'embedding', [vocab_size, EMBEDDING_SIZE])\n\n # Only use the non-padded words\n sequence_length = tf.cast(tf.reduce_sum(mask, 1), tf.int32)\n\n input_feature = tf.nn.embedding_lookup(embeddingW, inputs)\n\n output, _ = tf.nn.dynamic_rnn(\n cell,\n input_feature,\n initial_state=initial_state,\n sequence_length=sequence_length)\n\n # The last output is the encoding of the entire sentence\n idx_gather = tf.concat(\n [tf.expand_dims(tf.range(tf.shape(output)[0], delta=1), axis=1),\n tf.expand_dims(sequence_length - 1, axis=1)], axis=1)\n\n last_output = tf.gather_nd(output, indices=idx_gather)\n\n logits = tf.layers.dense(\n inputs=last_output,\n units=2,\n activation=tf.identity,\n use_bias=True,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),\n bias_initializer=tf.zeros_initializer())\n\n probabilities = tf.nn.softmax(logits, name='prob')\n\n return logits, probabilities" ]
[ [ "tensorflow.zeros_initializer", "tensorflow.zeros", "tensorflow.shape", "tensorflow.expand_dims", "tensorflow.gather_nd", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.get_variable", "tensorflow.nn.embedding_lookup", "tensorflow.reduce_sum", "tensorflow.nn.softmax", "tensorflow.nn.dynamic_rnn" ] ]
compmech/particles
[ "d23ae69ad4c79024b997a232247b4ae0e1e7031c" ]
[ "meshless/espim/tests/test_calc_linear_buckling.py" ]
[ "import os\nimport inspect\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nfrom composites.laminate import read_stack\nfrom structsolve import solve, lb\n\nfrom meshless.espim.read_mesh import read_mesh\nfrom meshless.espim.plate2d_calc_k0 import calc_k0\nfrom meshless.espim.plate2d_calc_kG import calc_kG\nfrom meshless.espim.plate2d_add_k0s import add_k0s\n\nTHISDIR = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\ndef test_calc_linear_buckling():\n E11 = 71.e9\n nu = 0.33\n plyt = 0.007\n lam = read_stack([0], plyt=plyt, laminaprop=(E11, E11, nu))\n ans = {'edge-based': 41.85273, 'cell-based': 6.98852939,\n 'cell-based-no-smoothing': 4.921956}\n for prop_from_nodes in [True, False]:\n for k0s_method in ['edge-based', 'cell-based', 'cell-based-no-smoothing']:\n mesh = read_mesh(os.path.join(THISDIR, 'nastran_plate_16_nodes.dat'))\n for tria in mesh.elements.values():\n tria.prop = lam\n for node in mesh.nodes.values():\n node.prop = lam\n k0 = calc_k0(mesh, prop_from_nodes)\n add_k0s(k0, mesh, prop_from_nodes, k0s_method, alpha=0.2)\n\n # running static subcase first\n dof = 5\n n = k0.shape[0] // 5\n fext = np.zeros(n*dof, dtype=np.float64)\n fext[mesh.nodes[4].index*dof + 0] = -500.\n fext[mesh.nodes[7].index*dof + 0] = -500.\n fext[mesh.nodes[5].index*dof + 0] = -1000.\n fext[mesh.nodes[6].index*dof + 0] = -1000.\n\n # boundary conditions\n def bc(K):\n for i in [1, 10, 11, 12]:\n for j in [0, 1, 2]:\n K[mesh.nodes[i].index*dof+j, :] = 0\n K[:, mesh.nodes[i].index*dof+j] = 0\n\n for i in [2, 3, 4, 5, 6, 7, 8, 9]:\n for j in [1, 2]:\n K[mesh.nodes[i].index*dof+j, :] = 0\n K[:, mesh.nodes[i].index*dof+j] = 0\n\n bc(k0)\n k0 = coo_matrix(k0)\n d = solve(k0, fext, silent=True)\n kG = calc_kG(d, mesh, prop_from_nodes)\n bc(kG)\n kG = coo_matrix(kG)\n\n eigvals, eigvecs = lb(k0, kG, silent=True)\n print('k0s_method, eigvals[0]', k0s_method, eigvals[0])\n\n assert np.isclose(eigvals[0], ans[k0s_method])\n\nif __name__ == '__main__':\n test_calc_linear_buckling()\n\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.isclose", "numpy.zeros" ] ]
Panxj/models-1
[ "840f652d8d8ac2175551803bb17f4259e14161c5" ]
[ "fluid/text_classification/clouds/scdb_single_card.py" ]
[ "import unittest\nimport contextlib\nimport paddle.fluid as fluid\nimport paddle.v2 as paddle\nimport numpy as np\nimport sys\nimport time\nimport os\nimport json\nimport random\n\n\ndef to_lodtensor(data, place):\n \"\"\"\n convert to LODtensor\n \"\"\"\n seq_lens = [len(seq) for seq in data]\n cur_len = 0\n lod = [cur_len]\n for l in seq_lens:\n cur_len += l\n lod.append(cur_len)\n flattened_data = np.concatenate(data, axis=0).astype(\"int64\")\n flattened_data = flattened_data.reshape([len(flattened_data), 1])\n res = fluid.LoDTensor()\n res.set(flattened_data, place)\n res.set_lod([lod])\n return res\n\n\ndef load_vocab(filename):\n \"\"\"\n load imdb vocabulary\n \"\"\"\n vocab = {}\n with open(filename) as f:\n wid = 0\n for line in f:\n vocab[line.strip()] = wid\n wid += 1\n vocab[\"<unk>\"] = len(vocab)\n return vocab\n\n\ndef data2tensor(data, place):\n \"\"\"\n data2tensor\n \"\"\"\n input_seq = to_lodtensor(map(lambda x: x[0], data), place)\n y_data = np.array(map(lambda x: x[1], data)).astype(\"int64\")\n y_data = y_data.reshape([-1, 1])\n return {\"words\": input_seq, \"label\": y_data}\n\n\ndef data2pred(data, place):\n \"\"\"\n data2tensor\n \"\"\"\n input_seq = to_lodtensor(map(lambda x: x[0], data), place)\n y_data = np.array(map(lambda x: x[1], data)).astype(\"int64\")\n y_data = y_data.reshape([-1, 1])\n return {\"words\": input_seq}\n\n\ndef load_dict(vocab):\n \"\"\"\n Load dict from vocab\n \"\"\"\n word_dict = dict()\n with open(vocab, \"r\") as fin:\n for line in fin:\n cols = line.strip(\"\\r\\n\").decode(\"gb18030\").split(\"\\t\")\n word_dict[cols[0]] = int(cols[1])\n return word_dict\n\n\ndef save_dict(word_dict, vocab):\n \"\"\"\n Save dict into file\n \"\"\"\n with open(vocab, \"w\") as fout:\n for k, v in word_dict.iteritems():\n outstr = (\"%s\\t%s\\n\" % (k, v)).encode(\"gb18030\")\n fout.write(outstr)\n\n\ndef build_dict(fname):\n \"\"\"\n build word dict using trainset\n \"\"\"\n word_dict = dict()\n with open(fname, \"r\") as fin:\n for line in fin:\n try:\n words = line.strip(\"\\r\\n\").decode(\"gb18030\").split(\"\\t\")[\n 1].split(\" \")\n except:\n sys.stderr.write(\"[warning] build_dict: decode error\\n\")\n continue\n for w in words:\n if w not in word_dict:\n word_dict[w] = len(word_dict)\n return word_dict\n\n\ndef scdb_word_dict(vocab=\"scdb_data/train_set/train.vocab\"):\n \"\"\"\n get word_dict\n \"\"\"\n if not os.path.exists(vocab):\n w_dict = build_dict(train_file)\n save_dict(w_dict, vocab)\n else:\n w_dict = load_dict(vocab)\n w_dict[\"<unk>\"] = len(w_dict)\n return w_dict\n\n\ndef data_reader(fname, word_dict, is_dir=False):\n \"\"\"\n Convert word sequence into slot\n \"\"\"\n unk_id = len(word_dict)\n all_data = []\n filelist = []\n if is_dir:\n filelist = [fname + os.sep + f for f in os.listdir(fname)]\n else:\n filelist = [fname]\n\n for each_name in filelist:\n with open(each_name, \"r\") as fin:\n for line in fin:\n try:\n cols = line.strip(\"\\r\\n\").decode(\"gb18030\").split(\"\\t\")\n except:\n sys.stderr.write(\"warning: ignore decode error\\n\")\n continue\n\n label = int(cols[0])\n wids = [\n word_dict[x] if x in word_dict else unk_id\n for x in cols[1].split(\" \")\n ]\n all_data.append((wids, label))\n\n random.shuffle(all_data)\n\n def reader():\n for doc, label in all_data:\n yield doc, label\n\n return reader\n\n\ndef scdb_train_data(train_dir=\"scdb_data/train_set/corpus.train.seg\",\n w_dict=None):\n \"\"\"\n create train data\n \"\"\"\n return data_reader(train_dir, w_dict, True)\n\n\ndef scdb_test_data(test_file, w_dict):\n \"\"\"\n test_set=[\"car\", \"lbs\", \"spot\", \"weibo\", \n \"baby\", \"toutiao\", \"3c\", \"movie\", \"haogan\"]\n \"\"\"\n return data_reader(test_file, w_dict)\n\n\ndef bow_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2):\n \"\"\"\n bow net\n \"\"\"\n emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])\n bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')\n bow_tanh = fluid.layers.tanh(bow)\n fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act=\"tanh\")\n fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act=\"tanh\")\n prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef cnn_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n win_size=3):\n \"\"\"\n conv net\n \"\"\"\n emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])\n\n conv_3 = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=hid_dim,\n filter_size=win_size,\n act=\"tanh\",\n pool_type=\"max\")\n\n fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2)\n\n prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef lstm_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=30.0):\n \"\"\"\n lstm net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4, act='tanh')\n\n lstm_h, c = fluid.layers.dynamic_lstm(\n input=fc0, size=hid_dim * 4, is_reverse=False)\n\n lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')\n lstm_max_tanh = fluid.layers.tanh(lstm_max)\n\n fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')\n\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef bilstm_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=30.0):\n \"\"\"\n lstm net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4, act='tanh')\n\n rfc0 = fluid.layers.fc(input=emb, size=hid_dim * 4, act='tanh')\n\n lstm_h, c = fluid.layers.dynamic_lstm(\n input=fc0, size=hid_dim * 4, is_reverse=False)\n\n rlstm_h, c = fluid.layers.dynamic_lstm(\n input=rfc0, size=hid_dim * 4, is_reverse=True)\n\n lstm_last = fluid.layers.sequence_last_step(input=lstm_h)\n rlstm_last = fluid.layers.sequence_last_step(input=rlstm_h)\n\n lstm_last_tanh = fluid.layers.tanh(lstm_last)\n rlstm_last_tanh = fluid.layers.tanh(rlstm_last)\n\n lstm_concat = fluid.layers.concat(input=[lstm_last, rlstm_last], axis=1)\n\n fc1 = fluid.layers.fc(input=lstm_concat, size=hid_dim2, act='tanh')\n\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef gru_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=30.0):\n \"\"\"\n gru net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3)\n\n gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)\n\n gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max')\n gru_max_tanh = fluid.layers.tanh(gru_max)\n\n fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh')\n\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef infer(test_reader, use_cuda, model_path=None):\n \"\"\"\n inference function\n \"\"\"\n if model_path is None:\n print(str(model_path) + \" cannot be found\")\n return\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(model_path, exe)\n\n class2_list, class3_list = [], []\n for each_test_reader in test_reader:\n class2_acc, class3_acc = 0.0, 0.0\n total_count, neu_count = 0, 0\n\n for data in each_test_reader():\n pred = exe.run(inference_program,\n feed=data2pred(data, place),\n fetch_list=fetch_targets,\n return_numpy=True)\n\n for i, val in enumerate(data):\n pos_score = pred[0][i, 1]\n true_label = val[1]\n if true_label == 2.0 and pos_score > 0.5:\n class2_acc += 1\n if true_label == 0.0 and pos_score < 0.5:\n class2_acc += 1\n\n if true_label == 2.0 and pos_score > 0.55:\n class3_acc += 1\n if true_label == 1.0 and pos_score > 0.45 and pos_score <= 0.55:\n class3_acc += 1\n if true_label == 0.0 and pos_score <= 0.45:\n class3_acc += 1\n\n if true_label == 1.0:\n neu_count += 1\n\n total_count += len(data)\n\n class2_acc = class2_acc / (total_count - neu_count)\n class3_acc = class3_acc / total_count\n class2_list.append(class2_acc)\n class3_list.append(class3_acc)\n\n class2_acc = sum(class2_list) / len(class2_list)\n class3_acc = sum(class3_list) / len(class3_list)\n print(\"[test info] model_path: %s, class2_acc: %f, class3_acc: %f\" %\n (model_path, class2_acc, class3_acc))\n\n\ndef start_train(train_reader,\n test_reader,\n word_dict,\n network,\n use_cuda,\n parallel,\n save_dirname,\n lr=0.2,\n batch_size=128,\n pass_num=30):\n \"\"\"\n train network\n \"\"\"\n data = fluid.layers.data(\n name=\"words\", shape=[1], dtype=\"int64\", lod_level=1)\n\n label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\n cost, acc, pred = network(data, label, len(word_dict) + 1)\n\n sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=lr)\n sgd_optimizer.minimize(cost)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=[data, label], place=place)\n\n exe.run(fluid.default_startup_program())\n for pass_id in xrange(pass_num):\n data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0\n for data in train_reader():\n avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[cost, acc])\n data_size = len(data)\n total_acc += data_size * avg_acc_np\n total_cost += data_size * avg_cost_np\n data_count += data_size\n\n avg_cost = total_cost / data_count\n avg_acc = total_acc / data_count\n print(\"[train info]: pass_id: %d, avg_acc: %f, avg_cost: %f\" %\n (pass_id, avg_acc, avg_cost))\n\n epoch_model = save_dirname + \"/\" + \"epoch\" + str(pass_id)\n fluid.io.save_inference_model(epoch_model, [\"words\"], pred, exe)\n infer(test_reader, False, epoch_model)\n\n\ndef train_net(vocab=\"./thirdparty/train.vocab\",\n train_dir=\"./train\",\n test_list=[\"car\", \"spot\", \"weibo\", \"lbs\"]):\n w_dict = scdb_word_dict(vocab=vocab)\n test_files = [\"./thirdparty\" + os.sep + f for f in test_list]\n\n train_reader = paddle.batch(\n scdb_train_data(train_dir, w_dict), batch_size=256)\n\n test_reader = [paddle.batch(scdb_test_data(test_file, w_dict), batch_size = 50) \\\n for test_file in test_files]\n\n start_train(\n train_reader,\n test_reader,\n w_dict,\n bow_net,\n use_cuda=False,\n parallel=False,\n save_dirname=\"scdb_bow_model\",\n lr=0.002,\n pass_num=10,\n batch_size=256)\n\n\nif __name__ == \"__main__\":\n train_net()\n" ]
[ [ "numpy.concatenate" ] ]
andres091096/FPGA_FiniteDifference
[ "8d9b1db1884c7f5d83dfa6c04d70c2402372b333" ]
[ "plot_results.py" ]
[ "import numpy as np\nimport struct\nfrom matplotlib import pyplot as plt\n\npixels = 64;\n\nimg = []\nf = open(\"data/U.bin\",\"rb\")\nwhile True:\n byte=f.read(1)\n if not byte:\n break;\n value = struct.unpack('B', byte)[0]\n img.append(value)\n\nf.close()\n\nfig, axes = plt.subplots(nrows=1, ncols=4, figsize=(12, 6),\n sharex=True, sharey=True)\nax = axes.ravel()\n\nimg = np.array(img)\nimg = (img.astype(np.float32) / max(img))*255\nimg = np.uint8(img)\nimg = np.reshape(img,(pixels,pixels))\n\ncpu_reconstruction = []\nf = open(\"data/cpu_Ux.bin\",\"rb\")\nwhile True:\n byte=f.read(4)\n if not byte:\n break;\n value = struct.unpack('f', byte)[0]\n cpu_reconstruction.append(value)\n\nf.close()\ncpu_reconstruction = np.array(cpu_reconstruction)\ncpu_reconstruction = (cpu_reconstruction.astype(np.float32) / max(cpu_reconstruction))*255\ncpu_reconstruction = np.uint8(cpu_reconstruction)\ncpu_reconstruction = np.reshape(cpu_reconstruction,(pixels,pixels))\n\n\nrtl_reconstruction = []\nf = open(\"data/rtl_Ux.bin\",\"rb\")\nwhile True:\n byte=f.read(4)\n if not byte:\n break;\n value = struct.unpack('f', byte)[0]\n rtl_reconstruction.append(value)\n\nf.close()\nrtl_reconstruction = np.array(rtl_reconstruction)\nrtl_reconstruction = (rtl_reconstruction.astype(np.float32) / max(rtl_reconstruction))*255\nrtl_reconstruction = np.uint8(rtl_reconstruction)\nrtl_reconstruction = np.reshape(rtl_reconstruction,(pixels,pixels))\n\n\nrtl_divergence = []\nf = open(\"data/divU.bin\",\"rb\")\nwhile True:\n byte=f.read(4)\n if not byte:\n break;\n value = struct.unpack('f', byte)[0]\n rtl_divergence.append(value)\n\nf.close()\nrtl_divergence = np.array(rtl_divergence)\nrtl_divergence = (rtl_divergence.astype(np.float32) / max(rtl_divergence))*255\nrtl_divergence = np.uint8(rtl_divergence)\nrtl_divergence = np.reshape(rtl_divergence,(pixels,pixels))\n\n\nax[0].imshow(img, cmap=plt.cm.gray)\nax[0].set_title('Original image')\n\nax[1].imshow(cpu_reconstruction, cmap=plt.cm.gray)\nax[1].set_title('CPU X-Derivate')\n\nax[2].imshow(rtl_reconstruction, cmap=plt.cm.gray)\nax[2].set_title('RTL X-Derivate')\n\nax[3].imshow(rtl_divergence, cmap=plt.cm.gray)\nax[3].set_title('RTL Divergence')\nplt.show()\n" ]
[ [ "numpy.uint8", "numpy.array", "numpy.reshape", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
taneemishere/Live_Face_Recognizer
[ "debe0f052d438de4ba6e76a04422614546f2b567" ]
[ "main.py" ]
[ "import cv2\r\nimport numpy as np\r\nimport face_recognition\r\n\r\n# Capture the video from the webcam\r\nwebcam = cv2.VideoCapture(0)\r\n\r\n# Loading the known faces images.\r\nimran_image = face_recognition.load_image_file(\"imrankhan.jpg\")\r\nimran_face_encoding = face_recognition.face_encodings(imran_image)[0]\r\n\r\ndonald_image = face_recognition.load_image_file(\"donald.jpg\")\r\ndonald_face_encoding = face_recognition.face_encodings(donald_image)[0]\r\n\r\n# Creating array of known face encodings and their names\r\nknown_face_encodings_array = [\r\n imran_face_encoding,\r\n donald_face_encoding\r\n]\r\nknown_face_names_array = [\r\n \"Imran Khan\",\r\n \"Donald Trump\"\r\n]\r\n\r\n# Initialize some variables\r\nface_locations = []\r\nface_encodings = []\r\nface_names = []\r\nprocess_this_frame = True\r\n\r\nwhile True:\r\n # Reading the current frame from web-cam\r\n # The successful_frame_read is boolean and frame is an array\r\n successful_frame, frame = webcam.read()\r\n\r\n # if there is an error break out of the loop\r\n if not successful_frame:\r\n break\r\n\r\n # Resize frame of to one forth of the size for faster processing\r\n # (src=frame, dsize=(0, 0), fx, fy = scale factor along the horizontal and vertical axis)\r\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\r\n\r\n # Converting the frame from BGR cv2 default to RGB for face_recognition\r\n rgb_small_frame = small_frame[:, :, ::-1]\r\n\r\n if process_this_frame:\r\n # Find all the faces and face encodings in the current frame of video\r\n\r\n # face_locations returns a list of tuples of found faces locations\r\n # in top, right, bottom, left order\r\n face_locations = face_recognition.face_locations(rgb_small_frame)\r\n # face_encodings returns a list of 128d face encodings (one for each face in the image)\r\n # face_encodings (face_images=rgb_small_frame, known_face_locations= face_locations)\r\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\r\n\r\n face_names = []\r\n for index in face_encodings:\r\n # Check whether or not the face matches for the known faces\r\n\r\n # compare_faces(known_face_encodings = known_face_encodings_array,\r\n # face_encoding_to_check = index, tolerance=0.6)\r\n matches = face_recognition.compare_faces(known_face_encodings_array, index)\r\n name = \"Unknown\"\r\n\r\n # Using the known face with the smallest distance to the new face\r\n\r\n # face_distance(face_encodings = known_face_encodings_array, face_to_compare = index)\r\n # returns a numpy ndarray with the distance for each face in the same order as the faces array\r\n face_distances = face_recognition.face_distance(known_face_encodings_array, index)\r\n\r\n # Returns the indices of the minimum values along an axis\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names_array[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n process_this_frame = not process_this_frame\r\n\r\n # Display the results\r\n # x, y, w, h\r\n # the zip() returns a zip object which is an iterator of tuples where the\r\n # first item in each passed iterator is paired together, and then the second\r\n # item in each passed iterator are paired together etc.\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n\r\n # Scale back up face locations since the frame we detected in was scaled to one forth size\r\n top *= 4\r\n right *= 4\r\n bottom *= 4\r\n left *= 4\r\n\r\n # Draw a box around the face\r\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n # For putting the name over the known faces\r\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)\r\n\r\n # Display the resulting footage\r\n cv2.imshow('Face Recognizer', frame)\r\n\r\n # If q is pressed quit the window\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# Release handle to the webcam\r\nwebcam.release()\r\ncv2.destroyAllWindows()\r\n" ]
[ [ "numpy.argmin" ] ]
ashwin-M-D/DM-Gym
[ "f468c175d16b09d88edc21d77b6755ca2d35fc13" ]
[ "dm_gym/rewards/ClusteringEnv_2_reward.py" ]
[ "import numpy as np\nimport math\n\n\n\nclass Reward_Function:\n\n def __init__(self):\n pass\n\n def reward_function(self, obs, action, centroids):\n reward = 0\n\n y_i = self.get_yi(centroids, obs, action)\n p = self.get_p_i(centroids[action], obs)\n\n '''\n p_is = []\n for coords in centroids:\n p_is.append(self.get_p_i(coords, obs))\n p_is = np.array(p_is)/sum(p_is)\n\n p = p_is[action]\n reward = p\n '''\n\n if(y_i == 1):\n #reward = 1/(y_i-p)\n #reward = 1\n reward = p\n else:\n #reward = -1/(y_i-p)\n #reward = -1\n reward = -1*(1-p)\n\n return reward, y_i, p\n\n def get_yi(self, coordinates, obs, action):\n dist = []\n for coor in coordinates:\n c = np.array(coor)\n d = np.array(obs)\n dist.append(np.linalg.norm(c-d))\n\n y_i = dist.index(min(dist))\n if(y_i == action):\n y_i = 1\n else:\n y_i = 0\n\n return y_i\n \n def get_p_i(self, coordinates, obs):\n s_i = np.linalg.norm(np.array(coordinates) - np.array(obs))\n f_si = 1 / 1 + math.exp(-s_i)\n p_i = 2 * (1 - f_si)\n return p_i\n\n" ]
[ [ "numpy.array", "numpy.linalg.norm" ] ]
simplelifetime/LXMERT-MPOCompressed
[ "ecb5d0e0fd3d57f9cbb965c353b8f0637fc0e2b0" ]
[ "src/tasks/vqa.py" ]
[ "# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport os\nimport collections\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm import tqdm\n\nfrom param import args\nfrom pretrain.qa_answer_table import load_lxmert_qa\nfrom tasks.vqa_model import VQAModel\nfrom tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator\nimport IPython\nimport time\nfrom compress_tools.Linear2MPO import Linear2MPO\n\nDataTuple = collections.namedtuple(\"DataTuple\", 'dataset loader evaluator')\n\n\ndef get_data_tuple(splits: str, bs: int, shuffle=False, drop_last=False) -> DataTuple:\n dset = VQADataset(splits)\n tset = VQATorchDataset(dset)\n evaluator = VQAEvaluator(dset)\n data_loader = DataLoader(\n tset, batch_size=bs,\n shuffle=shuffle, num_workers=args.num_workers,\n drop_last=drop_last, pin_memory=True\n )\n\n return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)\n\n\ndef get_parameter_number(net):\n '''\n :param net: model class\n :return: params statistics\n '''\n total_num = sum(p.numel() for p in net.parameters()) / 1000 / 1000\n trainable_num = sum(p.numel() for p in net.parameters()\n if p.requires_grad) / 1000 / 1000\n return {'Total(M)': total_num, 'Trainable(M)': trainable_num}\n\n\nclass VQA:\n def __init__(self):\n # Datasets\n self.train_tuple = get_data_tuple(\n args.train, bs=args.batch_size, shuffle=True, drop_last=True\n )\n if args.valid != \"\":\n self.valid_tuple = get_data_tuple(\n args.valid, bs=1024,\n shuffle=False, drop_last=False\n )\n else:\n self.valid_tuple = None\n\n # Model\n self.model = VQAModel(self.train_tuple.dataset.num_answers)\n\n # Load pre-trained weights\n if args.load_lxmert is not None:\n self.model.lxrt_encoder.load(args.load_lxmert)\n print(\"loading weights from pretrained\")\n if args.load_lxmert_qa is not None:\n load_lxmert_qa(args.load_lxmert_qa, self.model,\n label2ans=self.train_tuple.dataset.label2ans)\n print(\"loading weights for fine-tuning\")\n self.model.from_pretrained_mpo()\n\n # GPU options\n self.model = self.model.cuda()\n if args.multiGPU:\n print(\"using multiple gpus!\")\n self.model.lxrt_encoder.multi_gpu()\n\n # Loss and Optimizer\n self.bce_loss = nn.BCEWithLogitsLoss()\n if 'bert' in args.optim:\n batch_per_epoch = len(self.train_tuple.loader)\n t_total = int(batch_per_epoch * args.epochs)\n print(\"BertAdam Total Iters: %d\" % t_total)\n from lxrt.optimization import BertAdam\n self.optim = BertAdam(list(filter(lambda p: p.requires_grad, self.model.parameters())),\n lr=args.lr,\n warmup=0.1,\n t_total=t_total)\n else:\n self.optim = args.optimizer(\n filter(lambda p: p.requires_grad, self.model.parameters()), args.lr)\n\n # Output Directory\n self.output = args.output\n os.makedirs(self.output, exist_ok=True)\n\n def train(self, train_tuple, eval_tuple):\n s_time = time.process_time()\n dset, loader, evaluator = train_tuple\n iter_wrapper = (lambda x: tqdm(x, total=len(loader))\n ) if args.tqdm else (lambda x: x)\n\n best_valid = 0.\n with open(self.output + \"/log.log\", 'a') as f:\n f.write(str(get_parameter_number(self.model)))\n f.flush()\n for epoch in range(args.epochs):\n quesid2ans = {}\n for i, (ques_id, feats, boxes, sent, target) in iter_wrapper(enumerate(loader)):\n\n self.model.train()\n self.optim.zero_grad()\n\n feats, boxes, target = feats.cuda(), boxes.cuda(), target.cuda()\n logit = self.model(feats, boxes, sent)\n assert logit.dim() == target.dim() == 2\n loss = self.bce_loss(logit, target)\n loss = loss * logit.size(1)\n\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), 5.)\n self.optim.step()\n\n score, label = logit.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n\n log_str = \"\\nEpoch %d: Train %0.2f\\n\" % (\n epoch, evaluator.evaluate(quesid2ans) * 100.)\n\n if self.valid_tuple is not None: # Do Validation\n valid_score = self.evaluate(eval_tuple)\n if valid_score > best_valid:\n best_valid = valid_score\n self.save(\"BEST\")\n\n log_str += \"Epoch %d: Valid %0.2f\\n\" % (epoch, valid_score * 100.) + \\\n \"Epoch %d: Best %0.2f\\n\" % (\n epoch, best_valid * 100.)\n\n print(log_str, end='')\n\n with open(self.output + \"/log.log\", 'a') as f:\n f.write(log_str)\n f.flush()\n e_time = time.process_time()\n with open(self.output + \"/log.log\", 'a') as f:\n f.write(str(e_time-s_time))\n f.flush()\n self.save(\"LAST\")\n\n def predict(self, eval_tuple: DataTuple, dump=None):\n \"\"\"\n Predict the answers to questions in a data split.\n\n :param eval_tuple: The data tuple to be evaluated.\n :param dump: The path of saved file to dump results.\n :return: A dict of question_id to answer.\n \"\"\"\n self.model.eval()\n dset, loader, evaluator = eval_tuple\n quesid2ans = {}\n for i, datum_tuple in tqdm(enumerate(loader)):\n # Avoid seeing ground truth\n ques_id, feats, boxes, sent = datum_tuple[:4]\n with torch.no_grad():\n feats, boxes = feats.cuda(), boxes.cuda()\n logit = self.model(feats, boxes, sent)\n score, label = logit.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n if dump is not None:\n evaluator.dump_result(quesid2ans, dump)\n return quesid2ans\n\n def evaluate(self, eval_tuple: DataTuple, dump=None):\n \"\"\"Evaluate all data in data_tuple.\"\"\"\n quesid2ans = self.predict(eval_tuple, dump)\n return eval_tuple.evaluator.evaluate(quesid2ans)\n\n @staticmethod\n def oracle_score(data_tuple):\n dset, loader, evaluator = data_tuple\n quesid2ans = {}\n for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):\n _, label = target.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n return evaluator.evaluate(quesid2ans)\n\n def save(self, name):\n torch.save(self.model.state_dict(),\n os.path.join(self.output, \"%s.pth\" % name))\n\n def load(self, path):\n print(\"Load model from %s\" % path)\n state_dict = torch.load(\"%s.pth\" % path)\n self.model.load_state_dict(state_dict, strict=True)\n\n def load_from_pretrained_mpo(self):\n self.model.load_from_pretrained_mpo()\n\n\ndef Model2Mpo(module, prefix='', exclude_module=\"\"):\n for name, child in module._modules.items():\n if name in exclude_module:\n print(prefix+name+\" excluded\")\n continue\n if child is not None:\n newmodule = getattr(module, name)\n if isinstance(newmodule, nn.Linear):\n print(prefix+name)\n mpo_module = Linear2MPO(newmodule, tensor_learn=True)\n mpo_module.from_pretrained(newmodule)\n setattr(module, name, mpo_module)\n Model2Mpo(child, prefix + name + '.',exclude_module=\"pooler,visn_fc\")\n\n\nif __name__ == \"__main__\":\n # Build Class\n vqa = VQA()\n\n # Load VQA model weights\n # Note: It is different from loading LXMERT pre-trained weights.\n if args.load is not None:\n vqa.model.from_pretrained_mpo()\n vqa.load(args.load)\n\n print(get_parameter_number(vqa.model))\n\n # IPython.embed()\n\n # Test or Train\n if args.test is not None:\n args.fast = args.tiny = False # Always loading all data in test\n if 'test' in args.test:\n vqa.predict(\n get_data_tuple(args.test, bs=950,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'test_predict.json')\n )\n elif 'val' in args.test:\n # Since part of valididation data are used in pre-training/fine-tuning,\n # only validate on the minival set.\n result = vqa.evaluate(\n get_data_tuple('minival', bs=950,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'minival_predict.json')\n )\n print(result)\n else:\n assert False, \"No such test option for %s\" % args.test\n else:\n print('Splits in Train data:', vqa.train_tuple.dataset.splits)\n if vqa.valid_tuple is not None:\n print('Splits in Valid data:', vqa.valid_tuple.dataset.splits)\n print(\"Valid Oracle: %0.2f\" %\n (vqa.oracle_score(vqa.valid_tuple) * 100))\n else:\n print(\"DO NOT USE VALIDATION\")\n vqa.train(vqa.train_tuple, vqa.valid_tuple)\n" ]
[ [ "torch.no_grad", "torch.utils.data.dataloader.DataLoader", "torch.nn.BCEWithLogitsLoss", "torch.load" ] ]
OatmealLiu/DTC
[ "a2d0d2279efc946b83692d5af32008559eb74eff" ]
[ "fianl_DTC_tinyimagenet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.optim import SGD\nfrom torch.autograd import Variable\nfrom sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score\nfrom sklearn.metrics import adjusted_rand_score as ari_score\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom utils.util import Identity, AverageMeter, seed_torch, str2bool\nfrom utils import ramps\nfrom models.resnet import ResNetDual, BasicBlock\nfrom modules.module import feat2prob, target_distribution\nfrom data.cifarloader import CIFAR10Loader, CIFAR10LoaderMix, CIFAR100Loader, CIFAR100LoaderMix\nfrom data.tinyimagenetloader import TinyImageNetLoader\nfrom tqdm import tqdm\nimport numpy as np\nimport warnings\nimport random\nimport os\nimport wandb\nfrom collections.abc import Iterable\nfrom utils.fair_evals import cluster_acc\n\n\ndef init_prob_kmeans(model, eval_loader, args):\n torch.manual_seed(1)\n model = model.to(device)\n # cluster parameter initiate\n model.eval()\n targets = np.zeros(len(eval_loader.dataset))\n feats = np.zeros((len(eval_loader.dataset), 512))\n for _, (x, label, idx) in enumerate(eval_loader):\n x = x.to(device)\n feat = model(x, output='head1')\n idx = idx.data.cpu().numpy()\n feats[idx, :] = feat.data.cpu().numpy()\n targets[idx] = label.data.cpu().numpy()\n # evaluate clustering performance\n pca = PCA(n_components=args.n_clusters)\n feats = pca.fit_transform(feats)\n kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)\n y_pred = kmeans.fit_predict(feats)\n acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)\n print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))\n probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))\n return acc, nmi, ari, kmeans.cluster_centers_, probs\n\ndef fair_test(model, test_loader, args, cluster=True, ind=None, return_ind=False):\n model.eval()\n preds = np.array([])\n targets = np.array([])\n\n for batch_idx, (x, label, _) in enumerate(tqdm(test_loader)):\n x, label = x.to(args.device), label.to(args.device)\n output1, output2, _ = model(x, output='both')\n output2 = feat2prob(output2, model.center)\n if args.head == 'head1':\n output = torch.cat([output1, output2], dim=1)\n else:\n output = output2\n\n _, pred = output.max(1)\n targets = np.append(targets, label.cpu().numpy())\n preds = np.append(preds, pred.cpu().numpy())\n\n if cluster:\n if return_ind:\n acc, ind = cluster_acc(targets.astype(int), preds.astype(int), return_ind)\n else:\n acc = cluster_acc(targets.astype(int), preds.astype(int), return_ind)\n nmi, ari = nmi_score(targets, preds), ari_score(targets, preds)\n print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))\n else:\n if ind is not None:\n ind = ind[:args.num_unlabeled_classes, :]\n idx = np.argsort(ind[:, 1])\n id_map = ind[idx, 0]\n id_map += args.num_labeled_classes\n\n # targets_new = targets\n targets_new = np.copy(targets)\n for i in range(args.num_unlabeled_classes):\n targets_new[targets == i + args.num_labeled_classes] = id_map[i]\n targets = targets_new\n\n preds = torch.from_numpy(preds)\n targets = torch.from_numpy(targets)\n correct = preds.eq(targets).float().sum(0)\n acc = float(correct / targets.size(0))\n print('Test acc {:.4f}'.format(acc))\n\n if return_ind:\n return acc, ind\n else:\n return acc\n\ndef freeze_layers(model, layer_names, freeze=True):\n if not isinstance(layer_names, Iterable):\n layer_names = [layer_names]\n for name, child in model.named_children():\n if name not in layer_names:\n continue\n for param in child.parameters():\n param.requires_grad = not freeze\n\ndef unfreeze_layers(model, layer_names):\n freeze_layers(model, layer_names, False)\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n description='cluster',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--warmup_lr', type=float, default=0.1)\n parser.add_argument('--lr', type=float, default=0.05)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n parser.add_argument('--warmup_epochs', default=10, type=int)\n parser.add_argument('--epochs', default=100, type=int)\n parser.add_argument('--rampup_length', default=5, type=int)\n parser.add_argument('--rampup_coefficient', type=float, default=10.0)\n parser.add_argument('--batch_size', default=128, type=int)\n parser.add_argument('--update_interval', default=5, type=int)\n parser.add_argument('--num_unlabeled_classes', default=5, type=int)\n parser.add_argument('--num_labeled_classes', default=5, type=int)\n parser.add_argument('--n_clusters', default=5, type=int)\n parser.add_argument('--dataset_name', type=str, default='cifar10', help='options: cifar10, cifar100, tinyimagenet')\n parser.add_argument('--seed', default=1, type=int)\n parser.add_argument('--save_txt', default=False, type=str2bool, help='save txt or not', metavar='BOOL')\n parser.add_argument('--pretrain_dir', type=str, default='./data/experiments/pretrained/resnet18_cifar10_classif_5.pth')\n parser.add_argument('--dataset_root', type=str, default='./data/datasets/CIFAR/')\n parser.add_argument('--exp_root', type=str, default='./data/experiments/')\n parser.add_argument('--model_name', type=str, default='resnet18')\n parser.add_argument('--save_txt_name', type=str, default='result.txt')\n parser.add_argument('--DTC', type=str, default='PI')\n parser.add_argument('--wandb_mode', type=str, default='offline', choices=['online', 'offline', 'disabled'])\n parser.add_argument('--wandb_entity', type=str, default='oatmealliu')\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n args.device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n seed_torch(args.seed)\n\n runner_name = \"DTC_incd_train_tinyimagenet\"\n model_dir= args.exp_root + '{}/{}'.format(runner_name, args.DTC)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n args.model_dir = model_dir+'/'+args.model_name+'.pth'\n args.save_txt_path= args.exp_root+ '{}/{}/{}'.format(runner_name, args.DTC, args.save_txt_name)\n\n num_classes = args.num_labeled_classes + args.num_unlabeled_classes\n\n if args.dataset_name == 'cifar10':\n # train_loader = CIFAR10LoaderMix(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, labeled_list=range(args.num_labeled_classes), unlabeled_list=range(args.num_labeled_classes, num_classes))\n train_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, target_list = range(args.num_labeled_classes, num_classes))\n unlabeled_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))\n unlabeled_eval_loader_test = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))\n labeled_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes))\n all_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(num_classes))\n elif args.dataset_name == 'cifar100':\n # train_loader = CIFAR100LoaderMix(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, labeled_list=range(args.num_labeled_classes), unlabeled_list=range(args.num_labeled_classes, num_classes))\n train_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, target_list = range(args.num_labeled_classes, num_classes))\n unlabeled_eval_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))\n unlabeled_eval_loader_test = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))\n labeled_eval_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes))\n all_eval_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(num_classes))\n elif args.dataset_name == 'tinyimagenet':\n # train_loader = TinyImageNetLoader(batch_size=args.batch_size, num_workers=8, path=args.dataset_root, aug='twice', shuffle=True, class_list = range(args.num_labeled_classes, num_classes), subfolder='train')\n train_loader = TinyImageNetLoader(batch_size=args.batch_size, num_workers=8, path=args.dataset_root, aug='twice', shuffle=True, class_list = range(args.num_labeled_classes, num_classes), subfolder='train')\n unlabeled_eval_loader = TinyImageNetLoader(batch_size=args.batch_size, num_workers=8, path=args.dataset_root, aug=None, shuffle=False, class_list = range(args.num_labeled_classes, num_classes), subfolder='train')\n unlabeled_eval_loader_test = TinyImageNetLoader(batch_size=args.batch_size, num_workers=8, path=args.dataset_root, aug=None, shuffle=False, class_list = range(args.num_labeled_classes, num_classes), subfolder='val')\n labeled_eval_loader = TinyImageNetLoader(batch_size=args.batch_size, num_workers=8, path=args.dataset_root, aug=None, shuffle=False, class_list = range(args.num_labeled_classes), subfolder='val')\n all_eval_loader = TinyImageNetLoader(batch_size=args.batch_size, num_workers=8, path=args.dataset_root, aug=None, shuffle=False, class_list = range(num_classes), subfolder='val')\n\n model = ResNetDual(BasicBlock, [2, 2, 2, 2], args.num_labeled_classes, args.num_unlabeled_classes).to(device)\n state_dict = torch.load(args.model_dir)\n model.load_state_dict(state_dict, strict=False)\n\n model.head1 = Identity()\n init_feat_extractor = model\n init_acc, init_nmi, init_ari, init_centers, init_probs = init_prob_kmeans(init_feat_extractor,\n unlabeled_eval_loader,\n args)\n args.p_targets = target_distribution(init_probs)\n\n model = ResNetDual(BasicBlock, [2, 2, 2, 2], args.num_labeled_classes, args.num_unlabeled_classes).to(device)\n state_dict = torch.load(args.model_dir)\n model.load_state_dict(state_dict, strict=False)\n model.center = Parameter(torch.Tensor(args.n_clusters, args.n_clusters))\n model.center.data = torch.tensor(init_centers).float().to(device)\n\n model.eval()\n# =============================== Final Test ===============================\n acc_list = []\n\n print('Head2: test on unlabeled classes')\n args.head = 'head2'\n _, ind = fair_test(model, unlabeled_eval_loader, args, return_ind=True)\n\n print('Evaluating on Head1')\n args.head = 'head1'\n\n print('test on labeled classes (test split)')\n acc = fair_test(model, labeled_eval_loader, args, cluster=False)\n acc_list.append(acc)\n\n print('test on unlabeled classes (test split)')\n acc = fair_test(model, unlabeled_eval_loader_test, args, cluster=False, ind=ind)\n acc_list.append(acc)\n\n print('test on all classes w/o clustering (test split)')\n acc = fair_test(model, all_eval_loader, args, cluster=False, ind=ind)\n acc_list.append(acc)\n\n print('test on all classes w/ clustering (test split)')\n acc = fair_test(model, all_eval_loader, args, cluster=True)\n acc_list.append(acc)\n\n print('Evaluating on Head2')\n args.head = 'head2'\n\n print('test on unlabeled classes (train split)')\n acc = fair_test(model, unlabeled_eval_loader, args)\n acc_list.append(acc)\n\n print('test on unlabeled classes (test split)')\n acc = fair_test(model, unlabeled_eval_loader_test, args)\n acc_list.append(acc)\n\n print('Acc List: Joint Head1->Old, New, All_wo_cluster, All_w_cluster, Head2->Train, Test')\n print(acc_list)\n" ]
[ [ "torch.device", "numpy.array", "torch.cat", "sklearn.cluster.KMeans", "numpy.copy", "sklearn.metrics.cluster.normalized_mutual_info_score", "torch.manual_seed", "torch.from_numpy", "torch.cuda.is_available", "torch.tensor", "sklearn.metrics.adjusted_rand_score", "torch.load", "numpy.argsort", "torch.Tensor", "sklearn.decomposition.PCA" ] ]
otmanon/gpytoolbox
[ "81d305bba9767a94f4d36264dd6849c410231c7c" ]
[ "unit_tests/cotmatrix_tets.py" ]
[ "import igl\nimport numpy as np\nfrom scipy.sparse import csr_matrix, diags\nfrom cotan_weights_tets import cotan_weights_tets\n\ndef cotmatrix_tets(V, T):\n \"\"\"\n Returns the cotangent Laplacian matrix for tet-meshes, implemented as described in\n \"Algorithms and Interfaces for Real-Time Deformation of 2D and 3D Shapes\" [Jacobson, 2013]\n :param V: |V|xdim Vertices of your tet mesh\n :param T: |T|x4 Indices into V for each tetrahedron\n :return: |V|x|V| sparse csr_matrix representing the cotangent laplacian matrix for a tetrahedral mesh\n \"\"\"\n #get cotan weights for tet mesh\n cotan_weights = cotan_weights_tets(V, T)\n\n #fill indices\n i = (T[:, [1, 2, 0, 3, 3, 3]]).flatten()\n j = (T[:, [2, 0, 1, 0, 1, 2]]).flatten()\n v =(cotan_weights).flatten()\n L = csr_matrix((v, (i, j)), shape=(V.shape[0], V.shape[0]))\n\n L += L.T\n\n diag_entries = -np.array(L.sum(1)).flatten()\n L += diags(diag_entries)\n return L\n\n\n\n\n\n" ]
[ [ "scipy.sparse.csr_matrix", "scipy.sparse.diags" ] ]
fernandezfran/exma
[ "4d489d14814f91dc364eaaae9239c52e125a7244" ]
[ "tests/electrochemistry/test_voltage.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of exma (https://github.com/fernandezfran/exma/).\n# Copyright (c) 2021, Francisco Fernandez\n# License: MIT\n# Full Text: https://github.com/fernandezfran/exma/blob/master/LICENSE\n\n# ============================================================================\n# IMPORTS\n# ============================================================================\n\nimport exma.electrochemistry\n\nimport numpy as np\n\nimport pandas as pd\n\n# ============================================================================\n# TESTS\n# ============================================================================\n\n\ndef test_voltage():\n \"\"\"Test the voltage approximation.\"\"\"\n reffe_spline = np.array(\n [\n 0.8766071,\n 0.8881785,\n 0.8997439,\n 0.9113034,\n 0.9228569,\n 0.9344045,\n 0.9459461,\n 0.9574818,\n 0.9690115,\n 0.98053527,\n 0.9920531,\n 1.003565,\n 1.0150709,\n 1.0265709,\n 1.0380648,\n 1.0495529,\n 1.061035,\n 1.0725112,\n 1.0839815,\n 1.0954458,\n 1.106904,\n 1.1183565,\n 1.1298028,\n 1.1412433,\n 1.1526779,\n 1.1641064,\n 1.175529,\n 1.1869457,\n 1.1983564,\n 1.2097611,\n 1.22116,\n 1.2325529,\n 1.2439398,\n 1.2553208,\n 1.2666959,\n 1.2780648,\n 1.289428,\n 1.3007852,\n 1.3121364,\n 1.3234817,\n 1.334821,\n 1.3461543,\n 1.3574818,\n 1.3688033,\n 1.3801187,\n 1.3914284,\n 1.402732,\n 1.4140296,\n 1.4253213,\n 1.4366071,\n ],\n )\n refvoltage = np.array(\n [\n -0.5671429,\n -0.5668513,\n -0.5665598,\n -0.56626827,\n -0.5659767,\n -0.56568515,\n -0.5653936,\n -0.56510204,\n -0.5648105,\n -0.5645189,\n -0.5642274,\n -0.5639359,\n -0.5636443,\n -0.56335276,\n -0.56306124,\n -0.56276965,\n -0.5624781,\n -0.5621866,\n -0.561895,\n -0.5616035,\n -0.56131196,\n -0.5610204,\n -0.56072885,\n -0.5604373,\n -0.56014574,\n -0.5598542,\n -0.5595627,\n -0.5592711,\n -0.5589796,\n -0.55868804,\n -0.55839646,\n -0.55810493,\n -0.5578134,\n -0.5575218,\n -0.5572303,\n -0.55693877,\n -0.5566472,\n -0.55635566,\n -0.5560641,\n -0.55577254,\n -0.555481,\n -0.5551895,\n -0.5548979,\n -0.5546064,\n -0.55431485,\n -0.55402327,\n -0.55373174,\n -0.5534402,\n -0.5531486,\n -0.5528571,\n ],\n )\n\n x = np.linspace(0, 1, num=5)\n fe = np.array([0.875000, 1.021875, 1.156250, 1.296875, 1.437500])\n df = pd.DataFrame({\"x\": x, \"fe\": fe})\n\n result = exma.electrochemistry.voltage(df, k=2)\n\n np.testing.assert_almost_equal(result.x, np.linspace(0, 1))\n np.testing.assert_almost_equal(result.fe_spline, reffe_spline, 6)\n np.testing.assert_almost_equal(result.voltage, refvoltage)\n" ]
[ [ "numpy.testing.assert_almost_equal", "pandas.DataFrame", "numpy.array", "numpy.linspace" ] ]
MetaCell/PsyNeuLink
[ "aeddf3e8ea62504a5d928b100b59aa18e593156c" ]
[ "psyneulink/core/components/ports/inputport.py" ]
[ "# Princeton University licenses this file to You under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License. You may obtain a copy of the License at:\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and limitations under the License.\n\n\n# ******************************************* InputPort *****************************************************\n#\n\"\"\"\nContents\n--------\n\n* `InputPort_Overview`\n* `InputPort_Creation`\n - `InputPort_Deferred_Initialization`\n - `InputPort_Primary`\n - `InputPort_Specification`\n • `Forms of Specification <InputPort_Forms_of_Specification>`\n • `Variable, Value and Mechanism <InputPort_Variable_and_Value>`\n • `Variable: Compatability and Constaints <InputPort_Compatability_and_Constraints>`\n* `InputPort_Structure`\n - `Afferent Projections <InputPort_Afferent_Projections>`\n - `Variable <InputPort_Variable>`\n - `Function <InputPort_Function>`\n - `Value <InputPort_Value>`\n - `Weights ane Exponents <InputPort_Weights_And_Exponents>`\n* `InputPort_Execution`\n* `InputPort_Class_Reference`\n\n.. _InputPort_Overview:\n\nOverview\n--------\n\nThe purpose of an InputPort is to receive and combine inputs to a `Mechanism <Mechanism>`, allow them to be modified,\nand provide them to the Mechanism's `function <Mechanism_Base.function>`. An InputPort receives input to a Mechanism\nprovided by the `Projections <Projection>` to that Mechanism from others in a `Composition`. If the InputPort belongs\nto an `ORIGIN` Mechanism (see `Mechanism_Role_In_Compositions`), then it receives the input specified when that\nComposition is `run <Run>`. The `PathwayProjections <PathWayProjection>` received by an InputPort are listed in its\n`path_afferents <Port.path_afferents>`, and its `ModulatoryProjections <ModulatoryProjection>` in its `mod_afferents\n<Port.mod_afferents>` attribute. Its `function <InputPort.function>` combines the values received from its\nPathWayProjections, modifies the combined value according to value(s) any ModulatoryProjections it receives, and\nprovides the result to the assigned item of its owner Mechanism's `variable <Mechanism_Base.variable>` and\n`input_values <Mechanism_Base.input_values>` attributes (see `below` and `Mechanism InputPorts <Mechanism_InputPorts>`\nfor additional details about the role of InputPorts in Mechanisms, and their assignment to the items of a Mechanism's\n`variable <Mechanism_Base.variable>` attribute).\n\n.. _InputPort_Creation:\n\nCreating an InputPort\n----------------------\n\nAn InputPort can be created by calling its constructor, but in general this is not necessary as a `Mechanism\n<Mechanism>` can usually automatically create the InputPort(s) it needs when it is created. For example, if the\nMechanism isbeing created within the `pathway <Process.pathway>` of a `Process`, its InputPort is created and assigned\nas the `receiver <MappingProjection.receiver>` of a `MappingProjection` from the preceding Mechanism in the `pathway\n<Process.pathway>`. InputPorts can also be specified in the **input_ports** argument of a Mechanism's constructor\n(see `below <InputPort_Specification>`).\n\nThe `variable <InputPort.variable>` of an InputPort can be specified using the **variable** or **size** arguments of\nits constructor. It can also be specified using the **projections** argument, if neither **variable** nor **size** is\nspecified. The **projections** argument is used to `specify Projections <Port_Projections>` to the InputPort. If\nneither the **variable** nor **size** arguments is specified, then the value of the `Projections(s) <Projection>` or\ntheir `sender <Projection_Base.sender>`\\\\s (all of which must be the same length) is used to determine the `variable\n<InputPort.variable>` of the InputPort.\n\nIf an InputPort is created using its constructor, and a Mechanism is specified in the **owner** argument,\nit is automatically assigned to that Mechanism. Note that its `value <InputPort.value>` (generally determined\nby the size of its `variable <InputPort.variable>` -- see `below <InputPort_Variable_and_Value>`) must\nbe compatible (in number and type of elements) with the item of its owner's `variable <Mechanism_Base.variable>` to\nwhich it is assigned (see `below <InputPort_Variable_and_Value>` and `Mechanism <Mechanism_Variable_and_InputPorts>`).\nIf the **owner** argument is not specified, `initialization <Port_Deferred_Initialization>` is deferred.\n\n.. _InputPort_Deferred_Initialization:\n\n*Owner Assignment and Deferred Initialization*\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAn InputPort must be owned by a `Mechanism <Mechanism>`. When InputPort is specified in the constructor for a\nMechanism (see `below <InputPort_Specification>`), it is automatically assigned to that Mechanism as its owner. If\nthe InputPort is created on its own, its `owner <Port.owner>` can specified in the **owner** argument of its\nconstructor, in which case it is assigned to that Mechanism. If its **owner** argument is not specified, its\ninitialization is `deferred <Port_Deferred_Initialization>` until\nCOMMENT:\nTBI: its `owner <Port_Base.owner>` attribute is assigned or\nCOMMENT\nthe InputPort is assigned to a Mechanism using the Mechanism's `add_ports <Mechanism_Base.add_ports>` method.\n\n.. _InputPort_Primary:\n\n*Primary InputPort*\n~~~~~~~~~~~~~~~~~~~\n\nEvery Mechanism has at least one InputPort, referred to as its *primary InputPort*. If InputPorts are not\n`explicitly specified <InputPort_Specification>` for a Mechanism, a primary InputPort is automatically created\nand assigned to its `input_port <Mechanism_Base.input_port>` attribute (note the singular), and also to the first\nentry of the Mechanism's `input_ports <Mechanism_Base.input_ports>` attribute (note the plural). The `value\n<InputPort.value>` of the primary InputPort is assigned as the first (and often only) item of the Mechanism's\n`variable <Mechanism_Base.variable>` and `input_values <Mechanism_Base.input_values>` attributes.\n\n.. _InputPort_Specification:\n\n*InputPort Specification*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSpecifying InputPorts when a Mechanism is created\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nInputPorts can be specified for a `Mechanism <Mechanism>` when it is created, in the **input_ports** argument of the\nMechanism's constructor (see `examples <Port_Constructor_Argument_Examples>` in Port), or in an *INPUT_PORTS* entry\nof a parameter dictionary assigned to the constructor's **params** argument. The latter takes precedence over the\nformer (that is, if an *INPUT_PORTS* entry is included in the parameter dictionary, any specified in the\n**input_ports** argument are ignored).\n\n .. _InputPort_Replace_Default_Note:\n\n .. note::\n Assigning InputPorts to a Mechanism in its constructor **replaces** any that are automatically generated for\n that Mechanism (i.e., those that it creates for itself by default). If any of those are needed, they must be\n explicitly specified in the list assigned to the **input_ports** argument, or the *INPUT_PORTS* entry of the\n parameter dictionary in the **params** argument. The number of InputPorts specified must also be equal to\n the number of items in the Mechanism's `variable <Mechanism_Base.variable>` attribute.\n\n.. _InputPort_Variable_and_Value:\n\n*InputPort's* `variable <InputPort.variable>`, `value <InputPort.value>` *and Mechanism's* `variable <Mechanism_Base.variable>`\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nEach InputPort specified in the **input_ports** argument of a Mechanism's constructor must correspond to an item of\nthe Mechanism's `variable <Mechanism_Base.variable>` attribute (see `Mechanism <Mechanism_Variable_and_InputPorts>`),\nand the `value <InputPort.value>` of the InputPort must be compatible with that item (that is, have the same number\nand type of elements). By default, this is also true of the InputPort's `variable <InputPort.variable>` attribute,\nsince the default `function <InputPort.function>` for an InputPort is a `LinearCombination`, the purpose of which\nis to combine the inputs it receives and possibly modify the combined value (under the influence of any\n`ModulatoryProjections <ModulatoryProjection>` it receives), but **not mutate its form**. Therefore, under most\ncircumstances, both the `variable <InputPort.variable>` of an InputPort and its `value <InputPort.value>` should\nmatch the item of its owner's `variable <Mechanism_Base.variable>` to which the InputPort is assigned.\n\nThe format of an InputPort's `variable <InputPort.variable>` can be specified in a variety of ways. The most\nstraightforward is in the **variable** argument of its constructor. More commonly, however, it is determined by\nthe context in which it is being created, such as the specification for its owner Mechanism's `variable\n<Mechanism_Base.variable>` or for the InputPort in the Mechanism's **input_ports** argument (see `below\n<InputPort_Forms_of_Specification>` and `Mechanism InputPort specification <Mechanism_InputPort_Specification>`\nfor details).\n\n\nAdding InputPorts to a Mechanism after it is created\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nInputPorts can also be **added** to a Mechanism, either by creating the InputPort on its own, and specifying the\nMechanism in the InputPort's **owner** argument, or by using the Mechanism's `add_ports <Mechanism_Base.add_ports>`\nmethod (see `examples <Port_Create_Port_Examples>` in Port).\n\n .. _InputPort_Add_Port_Note:\n\n .. note::\n Adding InputPorts *does not replace* any that the Mechanism generates by default; rather they are added to the\n Mechanism, and appended to the list of InputPorts in its `input_ports <Mechanism_Base.input_ports>` attribute.\n Importantly, the Mechanism's `variable <Mechanism_Base.variable>` attribute is extended with items that\n correspond to the `value <InputPort.value>` attribute of each added InputPort. This may affect the\n relationship of the Mechanism's `variable <Mechanism_Base.variable>` to its `function\n <Mechanism_Base.function>`, as well as the number of its `OutputPorts <OutputPort>` (see `note\n <Mechanism_Add_InputPorts_Note>`).\n\nIf the name of an InputPort added to a Mechanism is the same as one that already exists, its name is suffixed with a\nnumerical index (incremented for each InputPort with that name; see `Registry_Naming`), and the InputPort is added to\nthe list (that is, it will *not* replace ones that already exist).\n\n.. _InputPort_Forms_of_Specification:\n\nForms of Specification\n^^^^^^^^^^^^^^^^^^^^^^\n\nInputPorts can be specified in a variety of ways, that fall into three broad categories: specifying an InputPort\ndirectly; use of a `Port specification dictionary <Port_Specification>`; or by specifying one or more Components that\nshould project to the InputPort. Each of these is described below:\n\n .. _InputPort_Direct_Specification:\n\n **Direct Specification of an InputPort**\n\n * existing **InputPort object** or the name of one -- If this is used to specify an InputPort in the\n constructor for a Mechanism, its `value <InputPort.value>` must be compatible with the corresponding item of\n the owner Mechanism's `variable <Mechanism_Base.variable>` (see `Mechanism InputPort specification\n <Mechanism_InputPort_Specification>` and `InputPort_Compatability_and_Constraints` below). If the InputPort\n belongs to another Mechanism, then an InputPort is created along with Projections(s) that `shadow the inputs\n <InputPort_Shadow_Inputs>` to the specified InputPort.\n ..\n * **InputPort class**, **keyword** *INPUT_PORT*, or a **string** -- this creates a default InputPort; if used\n to specify an InputPort in the constructor for a Mechanism, the item of the owner Mechanism's `variable\n <Mechanism_Base.variable>` to which the InputPort is assigned is used as the format for the InputPort`s\n `variable <InputPort.variable>`; otherwise, the default for the InputPort is used. If a string is specified,\n it is used as the `name <InputPort.name>` of the InputPort (see `example <Port_Constructor_Argument_Examples>`).\n\n .. _InputPort_Specification_by_Value:\n\n * **value** -- this creates a default InputPort using the specified value as the InputPort's `variable\n <InputPort.variable>`; if used to specify an InputPort in the constructor for a Mechanism, the format must be\n compatible with the corresponding item of the owner Mechanism's `variable <Mechanism_Base.variable>` (see\n `Mechanism InputPort specification <Mechanism_InputPort_Specification>`, `example\n <port_value_Spec_Example>`, and discussion `below <InputPort_Compatability_and_Constraints>`).\n\n .. _InputPort_Specification_Dictionary:\n\n **InputPort Specification Dictionary**\n\n * **InputPort specification dictionary** -- this can be used to specify the attributes of an InputPort, using\n any of the entries that can be included in a `Port specification dictionary <Port_Specification>` (see\n `examples <Port_Specification_Dictionary_Examples>` in Port). If the dictionary is used to specify an\n InputPort in the constructor for a Mechanism, and it includes a *VARIABLE* and/or *VALUE* or entry, the value\n must be compatible with the item of the owner Mechanism's `variable <Mechanism_Base.variable>` to which the\n InputPort is assigned (see `Mechanism InputPort specification <Mechanism_InputPort_Specification>`).\n\n The *PROJECTIONS* entry can include specifications for one or more Ports, Mechanisms and/or Projections that\n should project to the InputPort (including both `MappingProjections <MappingProjection>` and/or\n `ModulatoryProjections <ModulatoryProjection>`; however, this may be constrained by or have consequences for the\n InputPort's `variable <InputPort.variable>` (see `InputPort_Compatability_and_Constraints`).\n\n In addition to the standard entries of a `Port specification dictionary <Port_Specification>`, the dictionary\n can also include either or both of the following entries specific to InputPorts:\n\n * *WEIGHT*:<number>\n the value must be an integer or float, and is assigned as the value of the InputPort's `weight\n <InputPort.weight>` attribute (see `weight and exponent <InputPort_Weights_And_Exponents>`);\n this takes precedence over any specification in the **weight** argument of the InputPort's constructor.\n\n * *EXPONENT*:<number>\n the value must be an integer or float, and is assigned as the value of the InputPort's `exponent\n <InputPort.exponent>` attribute (see `weight and exponent <InputPort_Weights_And_Exponents>`);\n this takes precedence over any specification in the **exponent** argument of the InputPort's constructor.\n\n .. _InputPort_Projection_Source_Specification:\n\n **Specification of an InputPort by Components that Project to It**\n\n COMMENT:\n `examples\n <Port_Projections_Examples>` in Port)\n COMMENT\n\n COMMENT:\n ?? PUT IN ITS OWN SECTION ABOVE OR BELOW??\n Projections to an InputPort can be specified either as attributes, in the constructor for an\n InputPort (in its **projections** argument or in the *PROJECTIONS* entry of an `InputPort specification dictionary\n <InputPort_Specification_Dictionary>`), or used to specify the InputPort itself (using one of the\n `InputPort_Forms_of_Specification` described above. See `Port Projections <Port_Projections>` for additional\n details concerning the specification of\n Projections when creating a Port.\n COMMENT\n\n An InputPort can also be specified by specifying one or more Ports, Mechanisms or Projections that should project\n to it, as described below. Specifying an InputPort in this way creates both the InputPort and any of the\n specified or implied Projection(s) to it (if they don't already exist). `MappingProjections <MappingProjection>`\n are assigned to the InputPort's `path_afferents <Port.path_afferents>` attribute, while `ControlProjections\n <ControlProjection>` and `GatingProjections <GatingProjection>` to its `mod_afferents <Port.mod_afferents>`\n attribute. Any of the following can be used to specify an InputPort by the Components that projection to it (see\n `below <InputPort_Compatability_and_Constraints>` for an explanation of the relationship between the `value` of\n these Components and the InputPort's `variable <InputPort.variable>`):\n\n * **OutputPort, GatingSignal, Mechanism, or list with any of these** -- creates an InputPort with Projection(s)\n to it from the specified Port(s) or Mechanism(s). For each Mechanism specified, its `primary OutputPort\n <OutputPort_Primary>` (or GatingSignal) is used.\n ..\n * **Projection** -- any form of `Projection specification <Projection_Specification>` can be\n used; creates an InputPort and assigns it as the Projection's `receiver <Projection_Base.receiver>`.\n\n .. _InputPort_Tuple_Specification:\n\n * **InputPort specification tuples** -- these are convenience formats that can be used to compactly specify an\n InputPort and Projections to it any of the following ways:\n\n .. _InputPort_Port_Mechanism_Tuple:\n\n * **2-item tuple:** *(<Port name or list of Port names>, <Mechanism>)* -- 1st item must be the name of an\n `OutputPort` or `ModulatorySignal`, or a list of such names, and the 2nd item must be the Mechanism to\n which they all belong. Projections of the relevant types are created for each of the specified Ports\n (see `Port 2-item tuple <Port_2_Item_Tuple>` for additional details).\n\n * **2-item tuple:** *(<value, Port specification, or list of Port specs>, <Projection specification>)* --\n this is a contracted form of the 4-item tuple described below;\n\n * **3 or 4-item tuple:** *(<value, Port spec, or list of Port specs>, weight, exponent, Projection\n specification)* -- this allows the specification of Port(s) that should project to the InputPort, together\n with a specification of the InputPort's `weight <InputPort.weight>` and/or `exponent <InputPort.exponent>`\n attributes of the InputPort, and (optionally) the Projection(s) to it. This can be used to compactly\n specify a set of Ports that project the InputPort, while using the 4th item to determine its variable\n (e.g., using the matrix of the Projection specification) and/or attributes of the Projection(s) to it. Each\n tuple must have at least the following first three items (in the order listed), and can include the fourth:\n\n * **value, Port specification, or list of Port specifications** -- specifies either the `variable\n <InputPort.variable>` of the InputPort, or one or more Ports that should project to it. The Port\n specification(s) can be a (Port name, Mechanism) tuple (see above), and/or include Mechanisms (in which\n case their `primary OutputPort <OutputPortPrimary>` is used. All of the Port specifications must be\n consistent with (that is, their `value <Port_Base.value>` must be compatible with the `variable\n <Projection_Base.variable>` of) the Projection specified in the fourth item if that is included;\n\n * **weight** -- must be an integer or a float; multiplies the `value <InputPort.value>` of the InputPort\n before it is combined with others by the Mechanism's `function <Mechanism.function>` (see\n ObjectiveMechanism for `examples <ObjectiveMechanism_Weights_and_Exponents_Example>`);\n\n * **exponent** -- must be an integer or float; exponentiates the `value <InputPort.value>` of the\n InputPort before it is combined with others by the ObjectiveMechanism's `function\n <ObjectiveMechanism.function>` (see ObjectiveMechanism for `examples\n <ObjectiveMechanism_Weights_and_Exponents_Example>`);\n\n * **Projection specification** (optional) -- `specifies a Projection <Projection_Specification>` that\n must be compatible with the Port specification(s) in the 1st item; if there is more than one Port\n specified, and the Projection specification is used, all of the Ports\n must be of the same type (i.e.,either OutputPorts or GatingSignals), and the `Projection\n Specification <Projection_Specification>` cannot be an instantiated Projection (since a\n Projection cannot be assigned more than one `sender <Projection_Base.sender>`).\n\n .. _InputPort_Shadow_Inputs:\n\n * **InputPorts of Mechanisms to shadow** -- either of the following can be used to create InputPorts that\n receive the same inputs as (\"shadow\") the ones specified:\n\n * *InputPort or [InputPort, ...]* -- each InputPort must belong to an existing Mechanism; creates a new\n InputPort for each one specified, along with Projections to it that parallel those of the one specified\n (see below).\n\n * *{SHADOW_INPUTS: <InputPort or Mechanism or [<InputPort or Mechanism>,...]>}* -- any InputPorts specified\n must belong to an existing Mechanism; creates a new InputPort for each one specified, and for each of the\n InputPorts belonging to any Mechanisms specified, along with Projections to them that parallel those of the\n one(s) specified (see below).\n\n For each InputPort specified, and all of the InputPorts belonging to any Mechanisms specified, a new InputPort\n is created along with Projections to it that parallel those received by the corresponding InputPort in the\n list. In other words, for each InputPort specified, a new one is created that receives exactly the same inputs\n from the same `senders <Projection_Base.sender>` as the ones specified.\n\n If an InputPort shadows another, its `shadow_inputs <InputPort.shadow_inputs>` attribute identifies the InputPort\n that it shadows.\n\n .. note::\n Only InputPorts belonging to Mechanisms in the *same Composition*, or ones that are `INPUT <NodeRole.INPUT>`\n `Nodes <Composition_Nodes>` of a `nested <Composition_Nested>` can be specified for shadowing, unless the\n `allow_probes <Composition.allow_probes>` attribute of the `Composition` is set to True. Note also that any\n Node that shadows an `INPUT <NodeRole.INPUT>` `Node <Composition_Nodes>` of the Composition to which it\n belongs is itself also assigned the role of `INPUT <NodeRole.INPUT>` Node.\n\n .. hint::\n If an InputPort needs to be shadowed that belongs to a Mechanism in a `nested <Composition_Nested>` that is\n not an `INPUT <NodeRole.INPUT>` `Node <Composition_Nodes>` of that Composition, this can be accomplished as\n follows: 1) add a Mechanism to the nested Composition with an InputPort that shadows the one to be\n shadowed; 2) specify `OUTPUT <NodeRole.INPUT>` as a `required_role <Composition.add_node.required_roles>`\n for that Mechanism; 3) use that Mechanism as the `InputPort specification <InputPort_Specification>`\n for the shadowing InputPort.\n\n.. _InputPort_Compatability_and_Constraints:\n\nInputPort `variable <InputPort.variable>`: Compatibility and Constraints\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe `variable <InputPort.variable>` of an InputPort must be compatible with the item of its owner Mechanism's\n`variable <Mechanism_Base.variable>` to which it is assigned (see `Mechanism_Variable_and_InputPorts`). This may\nhave consequences that must be taken into account when `specifying an InputPort by Components that project to it\n<InputPort_Projection_Source_Specification>`. These depend on the context in which the specification is made, and\npossibly the value of other specifications. These considerations and how they are handled are described below,\nstarting with constraints that are given the highest precedence:\n\n * **InputPort is** `specified in a Mechanism's constructor <Mechanism_InputPort_Specification>` and the\n **default_variable** argument for the Mechanism is also specified -- the item of the variable to which the\n `InputPort is assigned <Mechanism_Variable_and_InputPorts>` is used to determine the InputPort's `variable must\n <InputPort.variable>`. Any other specifications of the InputPort relevant to its `variable <InputPort.variable>`\n must be compatible with this (for example, `specifying it by value <InputPort_Specification_by_Value>` or by a\n `MappingProjection` or `OutputPort` that projects to it (see `above <InputPort_Projection_Source_Specification>`).\n\n COMMENT:\n ***XXX EXAMPLE HERE\n COMMENT\n ..\n * **InputPort is specified on its own**, or the **default_variable** argument of its Mechanism's constructor\n is not specified -- any direct specification of the InputPort's `variable <InputPort.variable>` is used to\n determine its format (e.g., `specifying it by value <InputPort_Specification_by_Value>`, or a *VARIABLE* entry\n in an `InputPort specification dictionary <InputPort_Specification_Dictionary>`. In this case, the value of any\n `Components used to specify the InputPort <InputPort_Projection_Source_Specification>` that are relevant to its\n `variable <InputPort.variable>` must be compatible with it (see below).\n\n COMMENT:\n ***XXX EXAMPLE HERE\n COMMENT\n ..\n * If the InputPort's `variable <InputPort.variable>` is not constrained by any of the conditions above,\n then its format is determined by the `specification of Components that project to it\n <InputPort_Projection_Source_Specification>`:\n\n * **More than one Component is specified with the same `value` format** -- that format is used to determine\n the format of the InputPort's `variable <InputPort.variable>`.\n\n * **More than one Component is specified with different `value` formats** -- the InputPort's `variable\n <InputPort.variable>` is determined by item of the default `variable <Mechanism_Base.variable>` for\n the class of its owner Mechanism.\n\n * **A single Component is specified** -- its `value` is used to determine the format of the InputPort's\n `variable <InputPort.variable>`; if the Component is a(n):\n\n * **MappingProjection** -- can be specified by its class, an existing MappingProjection, or a matrix:\n\n * `MappingProjection` **class** -- a default value is used both the for the InputPort's `variable\n <InputPort.variable>` and the Projection's `value <Projection_Base.value>` (since the Projection's\n `sender <Projection_Base.sender>` is unspecified, its `initialization is deferred\n <Projection_Deferred_Initialization>`.\n\n * **Existing MappingProjection** -- then its `value <Projection_Base.value>` determines the\n InputPort's `variable <InputPort.variable>`.\n\n * `Matrix specification <MappingProjection_Matrix_Specification>` -- its receiver dimensionality determines the\n format of the InputPort's `variable <InputPort.variable>`. For a standard 2d \"weight\" matrix (i.e., one that\n maps a 1d array from its `sender <Projection_Base.sender>` to a 1d array of its `receiver\n <Projection_Base.receiver>`), the receiver dimensionality is its outer dimension (axis 1, or its number of\n columns). However, if the `sender <Projection_Base.sender>` has more than one dimension, then the\n dimensionality of the receiver (used for the InputPort's `variable <InputPort.variable>`) is the\n dimensionality of the matrix minus the dimensionality of the sender's `value <OutputPort.value>`\n (see `matrix dimensionality <Mapping_Matrix_Dimensionality>`).\n\n * **OutputPort or ProcessingMechanism** -- the `value <OutputPort.value>` of the OutputPort (if it is a\n Mechanism, then its `primary OutputPort <OutputPort_Primary>`) determines the format of the InputPort's\n `variable <InputPort.variable>`, and a MappingProjection is created from the OutputPort to the InputPort\n using an `IDENTITY_MATRIX`. If the InputPort's `variable <InputPort.variable>` is constrained (as in some\n of the cases above), then a `FULL_CONNECTIVITY_MATRIX` is used which maps the shape of the OutputPort's `value\n <OutputPort.value>` to that of the InputPort's `variable <InputPort.variable>`.\n\n * **GatingProjection, GatingSignal or GatingMechanism** -- any of these can be used to specify an InputPort;\n their `value` does not need to be compatible with the InputPort's `variable <InputPort.variable>`, however\n it does have to be compatible with the `modulatory parameter <Function_Modulatory_Params>` of the InputPort's\n `function <InputPort.function>`.\n\n.. _InputPort_Structure:\n\nStructure\n---------\n\nEvery InputPort is owned by a `Mechanism <Mechanism>`. It can receive one or more `MappingProjections\n<MappingProjection>` from other Mechanisms, as well as from the Process or System to which its owner belongs (if it\nis the `ORIGIN` Mechanism for that Process or System). It has the following attributes, that includes ones specific\nto, and that can be used to customize the InputPort:\n\n* `projections <Port.projections>` -- all of the `Projections <Projection>` received by the InputPort.\n\n.. _InputPort_Afferent_Projections:\n\n* `path_afferents <Port.path_afferents>` -- `MappingProjections <MappingProjection>` that project to the InputPort,\n the `value <Projection_Base.value>`\\\\s of which are combined by the InputPort's `function <InputPort.function>`,\n possibly modified by its `mod_afferents <InputPort_mod_afferents>`, and assigned to the corresponding item of the\n owner Mechanism's `variable <Mechanism_Base.variable>`.\n\n* `mod_afferents <InputPort_mod_afferents>` -- `GatingProjections <GatingProjection>` that project to the InputPort,\n the `value <GatingProjection.value>` of which can modify the InputPort's `value <InputPort.value>` (see the\n descriptions of Modulation under `ModulatorySignals <ModulatorySignal_Modulation>` and `GatingSignals\n <GatingSignal_Modulation>` for additional details). If the InputPort receives more than one GatingProjection,\n their values are combined before they are used to modify the `value <InputPort.value>` of InputPort.\n\n.. _InputPort_Variable:\n\n* `variable <InputPort.variable>` -- serves as the template for the `value <Projection_Base.value>` of the\n `Projections <Projection>` received by the InputPort: each must be compatible with (that is, match both the\n number and type of elements of) the InputPort's `variable <InputPort.variable>` (see `Mapping_Matrix` for additonal\n details). In general, this must also be compatible with the item of the owner Mechanism's `variable\n <Mechanism_Base.variable>` to which the InputPort is assigned (see `above <InputPort_Variable_and_Value>` and\n `Mechanism InputPort specification <Mechanism_InputPort_Specification>`).\n\n.. _InputPort_Function:\n\n* `function <InputPort.function>` -- combines the `value <Projection_Base.value>` of all of the\n `Projections <Projection>` received by the InputPort, and assigns the result to the InputPort's `value\n <InputPort.value>` attribute. The default function is `LinearCombination` that performs an elementwise (Hadamard)\n sums the values. However, the parameters of the `function <InputPort.function>` -- and thus the `value\n <InputPort.value>` of the InputPort -- can be modified by any `GatingProjections <GatingProjection>` received by\n the InputPort (listed in its `mod_afferents <Port.mod_afferents>` attribute. A custom function can also be\n specified, so long as it generates a result that is compatible with the item of the Mechanism's `variable\n <Mechanism_Base.variable>` to which the `InputPort is assigned <Mechanism_InputPorts>`.\n\n.. _InputPort_Value:\n\n* `value <InputPort.value>` -- the result returned by its `function <InputPort.function>`,\n after aggregating the value of the `PathProjections <PathwayProjection>` it receives, possibly modified by any\n `GatingProjections <GatingProjection>` received by the InputPort. It must be compatible with the\n item of the owner Mechanism's `variable <Mechanism_Base.variable>` to which the `InputPort has been assigned\n <Mechanism_InputPorts>` (see `above <InputPort_Variable_and_Value>` and `Mechanism InputPort specification\n <Mechanism_InputPort_Specification>`).\n\n.. _InputPort_Weights_And_Exponents:\n\n* `weight <InputPort.weight>` and `exponent <InputPort.exponent>` -- these can be used by the Mechanism to which the\n InputPort belongs when that combines the `value <InputPort.value>`\\\\s of its Ports (e.g., an ObjectiveMechanism\n uses the weights and exponents assigned to its InputPorts to determine how the values it monitors are combined by\n its `function <ObjectiveMechanism>`). The value of each must be an integer or float, and the default is 1 for both.\n\n.. _InputPort_Execution:\n\nExecution\n---------\n\nAn InputPort cannot be executed directly. It is executed when the Mechanism to which it belongs is executed.\nWhen this occurs, the InputPort executes any `Projections <Projection>` it receives, calls its `function\n<InputPort.function>` to combines the values received from any `MappingProjections <MappingProjection>` it receives\n(listed in its its `path_afferents <Port.path_afferents>` attribute) and modulate them in response to any\n`GatingProjections <GatingProjection>` (listed in its `mod_afferents <Port.mod_afferents>` attribute),\nand then assigns the result to the InputPort's `value <InputPort.value>` attribute. This, in turn, is assigned to\nthe item of the Mechanism's `variable <Mechanism_Base.variable>` and `input_values <Mechanism_Base.input_values>`\nattributes corresponding to that InputPort (see `Mechanism Variable and InputPorts\n<Mechanism_Variable_and_InputPorts>` for additional details).\n\n.. _InputPort_Class_Reference:\n\nClass Reference\n---------------\n\n\"\"\"\nimport inspect\nimport numbers\nimport warnings\n\nimport collections\nimport numpy as np\nimport typecheck as tc\n\nfrom psyneulink.core.components.component import DefaultsFlexibility\nfrom psyneulink.core.components.functions.function import Function\nfrom psyneulink.core.components.functions.nonstateful.combinationfunctions import CombinationFunction, LinearCombination\nfrom psyneulink.core.components.ports.outputport import OutputPort\nfrom psyneulink.core.components.ports.port import PortError, Port_Base, _instantiate_port_list, port_type_keywords\nfrom psyneulink.core.globals.context import ContextFlags, handle_external_context\nfrom psyneulink.core.globals.keywords import \\\n COMBINE, CONTROL_SIGNAL, EXPONENT, FUNCTION, GATING_SIGNAL, INPUT_PORT, INPUT_PORTS, INPUT_PORT_PARAMS, \\\n LEARNING_SIGNAL, MAPPING_PROJECTION, MATRIX, NAME, OPERATION, OUTPUT_PORT, OUTPUT_PORTS, OWNER,\\\n PARAMS, PRODUCT, PROJECTIONS, REFERENCE_VALUE, \\\n SENDER, SHADOW_INPUTS, SHADOW_INPUT_NAME, SIZE, PORT_TYPE, SUM, VALUE, VARIABLE, WEIGHT\nfrom psyneulink.core.globals.parameters import Parameter\nfrom psyneulink.core.globals.preferences.basepreferenceset import is_pref_set\nfrom psyneulink.core.globals.preferences.preferenceset import PreferenceLevel\nfrom psyneulink.core.globals.utilities import \\\n append_type_to_name, convert_to_np_array, is_numeric, iscompatible, kwCompatibilityLength, convert_to_list\n\n__all__ = [\n 'InputPort', 'InputPortError', 'port_type_keywords', 'SHADOW_INPUTS',\n]\n\nport_type_keywords = port_type_keywords.update({INPUT_PORT})\n\n# InputPortPreferenceSet = BasePreferenceSet(log_pref=logPrefTypeDefault,\n# reportOutput_pref=reportOutputPrefTypeDefault,\n# verbose_pref=verbosePrefTypeDefault,\n# param_validation_pref=paramValidationTypeDefault,\n# level=PreferenceLevel.TYPE,\n# name='InputPortClassPreferenceSet')\n\nWEIGHT_INDEX = 1\nEXPONENT_INDEX = 2\n\nDEFER_VARIABLE_SPEC_TO_MECH_MSG = \"InputPort variable not yet defined, defer to Mechanism\"\n\nclass InputPortError(Exception):\n def __init__(self, error_value):\n self.error_value = error_value\n\n def __str__(self):\n return repr(self.error_value)\n\n\nclass InputPort(Port_Base):\n \"\"\"\n InputPort( \\\n variable=None, \\\n reference_value=None, \\\n function=LinearCombination(operation=SUM), \\\n combine=None, \\\n projections=None, \\\n weight=None, \\\n exponent=None, \\\n internal_only=False)\n\n Subclass of `Port <Port>` that calculates and represents the input to a `Mechanism <Mechanism>` from one or more\n `PathwayProjections <PathwayProjection>`. See `Port_Class_Reference` for additional arguments and attributes.\n\n COMMENT:\n\n PortRegistry\n -------------\n All InputPorts are registered in PortRegistry, which maintains an entry for the subclass,\n a count for all instances of it, and a dictionary of those instances\n\n COMMENT\n\n Arguments\n ---------\n\n reference_value : number, list or np.ndarray\n the value of the item of the owner Mechanism's `variable <Mechanism_Base.variable>` attribute to which\n the InputPort is assigned; used as the template for the InputPort's `value <InputPort.value>` attribute.\n\n variable : number, list or np.ndarray\n specifies the shape of the InputPort's `variable <InputPort.variable>`, which may be used to define the\n shape of the `matrix <MappingProjection.matrix>` parameter of the `MappingProjection` that projects to the\n Inputport (see `InputPort_Variable` for additional details).\n\n function : Function or method : default LinearCombination(operation=SUM)\n specifies the function applied to the variable. The default value combines the `values\n <Projection_Base.value>` of the `Projections <Projection>` received by the InputPort. Any function\n can be assigned, however: a) it must produce a result that has the same format (number and type of elements)\n as the item of its owner Mechanism's `variable <Mechanism_Base.variable>` to which the InputPort has been\n assigned; b) if it is not a CombinationFunction, it may produce unpredictable results if the InputPort\n receives more than one Projection (see `function <InputPort.function>`.\n\n combine : SUM or PRODUCT : default None\n specifies the **operation** argument used by the default `LinearCombination` function, which determines how the\n `value <Projection_Base.value>` of the InputPort's `projections <Port.projections>` are combined. This is a\n convenience argument, that allows the **operation** to be specified without having to specify the\n LinearCombination function; it assumes that LinearCombination (the default) is used as the InputPort's function\n -- if it conflicts with a specification of **function** an error is generated.\n\n projections : list of Projection specifications\n specifies the `MappingProjection(s) <MappingProjection>`, `ControlProjection(s) <ControlProjection>` and/or\n `GatingProjection(s) <GatingProjection>` to be received by the InputPort, and that are listed in its\n `path_afferents <Port.path_afferents>` and `mod_afferents <Port.mod_afferents>` attributes,\n respectively (see `InputPort_Compatability_and_Constraints` for additional details). If **projections** but\n neither **variable** nor **size** are specified, then the `value <Projection_Base.value>` of the Projection(s)\n or their `senders <Projection_Base.sender>` specified in **projections** argument are used to determine the\n InputPort's `variable <InputPort.variable>`.\n\n weight : number : default 1\n specifies the value of the `weight <InputPort.weight>` attribute of the InputPort.\n\n exponent : number : default 1\n specifies the value of the `exponent <InputPort.exponent>` attribute of the InputPort.\n\n internal_only : bool : False\n specifies whether the InputPort requires external input when its `owner <Port.owner>` is the `INPUT`\n `Node <Composition_Nodes>` of a `Composition (see `internal_only <InputPort.internal_only>` for details).\n\n Attributes\n ----------\n\n variable : value, list or np.ndarray\n the template for the `value <Projection_Base.value>` of each Projection that the InputPort receives,\n each of which must match the format (number and types of elements) of the InputPort's\n `variable <InputPort.variable>`. If neither the **variable** or **size** argument is specified, and\n **projections** is specified, then `variable <InputPort.variable>` is assigned the `value\n <Projection_Base.value>` of the Projection(s) or its `sender <Projection_Base.sender>`.\n\n function : Function\n If it is a `CombinationFunction`, it combines the `values <Projection_Base.value>` of the `PathwayProjections\n <PathwayProjection>` (e.g., `MappingProjections <MappingProjection>`) received by the InputPort (listed in\n its `path_afferents <Port.path_afferents>` attribute), under the possible influence of `GatingProjections\n <GatingProjection>` received by the InputPort (listed in its `mod_afferents <Port.mod_afferents>` attribute).\n The result is assigned to the InputPort's `value <InputPort.value>` attribute. For example, the default\n (`LinearCombination` with *SUM* as it **operation**) performs an element-wise (Hadamard) sum of its Projection\n `values <Projection_Base.value>`, and assigns to `value <InputPort.value>` an array that is of the same length\n as each of the Projection `values <Projection_Base.value>`. If the InputPort receives only one Projection,\n then any other function can be applied and it will generate a value that is the same length as the Projection's\n `value <Projection_Base.value>`. However, if the InputPort receives more than one Projection and uses a function\n other than a CombinationFunction, a warning is generated and only the `value <Projection_Base.value>` of the\n first Projection list in `path_afferents <Port.path_afferents>` is used by the function, which may generate\n unexpected results when executing the Mechanism or Composition to which it belongs.\n\n value : value or ndarray\n the output of the InputPort's `function <InputPort.function>`, that is assigned to an item of the owner\n Mechanism's `variable <Mechanism_Base.variable>` attribute.\n\n label : string or number\n the string label that represents the current `value <InputPort.value>` of the InputPort, according to the\n owner mechanism's `input_labels_dict <Mechanism.input_labels_dict>`. If the current `value <InputPort.value>`\n of the InputPort does not have a corresponding label, then the numeric `value <InputPort.value>` is returned.\n\n weight : number\n see `weight and exponent <InputPort_Weights_And_Exponents>` for description.\n\n exponent : number\n see `weight and exponent <InputPort_Weights_And_Exponents>` for description.\n\n internal_only : bool\n determines whether `input from a Composition <Composition_Execution_Input>` must be specified for this\n InputPort from a Composition's `execution method <Composition_Execution_Method>` if the InputPort's `owner\n <Port.owner>` is an `INPUT` `Node <Composition_Nodes>` of that Composition; if `True`, external input is\n *not* required or allowed.\n\n shadow_inputs : InputPort\n identifies the InputPort of another `Mechanism` that is being shadowed by this InputPort.\n\n name : str\n the name of the InputPort; if it is not specified in the **name** argument of the constructor, a default is\n assigned by the InputPortRegistry of the Mechanism to which the InputPort belongs. Note that some Mechanisms\n automatically create one or more non-default InputPorts, that have pre-specified names. However, if any\n InputPorts are specified in the **input_ports** argument of the Mechanism's constructor, those replace those\n InputPorts (see `note <Mechanism_Default_Port_Suppression_Note>`), and `standard naming conventions\n <Registry_Naming>` apply to the InputPorts specified, as well as any that are added to the Mechanism once it\n is created (see `note <Port_Naming_Note>`).\n\n \"\"\"\n\n #region CLASS ATTRIBUTES\n\n componentType = INPUT_PORT\n paramsType = INPUT_PORT_PARAMS\n\n portAttributes = Port_Base.portAttributes | {WEIGHT, EXPONENT}\n\n connectsWith = [OUTPUT_PORT,\n LEARNING_SIGNAL,\n GATING_SIGNAL,\n CONTROL_SIGNAL\n ]\n connectsWithAttribute = [OUTPUT_PORTS]\n projectionSocket = SENDER\n modulators = [GATING_SIGNAL, CONTROL_SIGNAL]\n canReceive = modulators + [MAPPING_PROJECTION]\n projection_type = MAPPING_PROJECTION\n\n classPreferenceLevel = PreferenceLevel.TYPE\n # Any preferences specified below will override those specified in TYPE_DEFAULT_PREFERENCES\n # Note: only need to specify setting; level will be assigned to TYPE automatically\n # classPreferences = {\n # PREFERENCE_SET_NAME: 'InputPortCustomClassPreferences',\n # PREFERENCE_KEYWORD<pref>: <setting>...}\n\n # Note: the following enforce encoding as 1D np.ndarrays (one variable/value array per port)\n variableEncodingDim = 1\n valueEncodingDim = 1\n\n class Parameters(Port_Base.Parameters):\n \"\"\"\n Attributes\n ----------\n\n combine\n see `combine <InputPort.combine>`\n\n :default value: None\n :type:\n\n exponent\n see `exponent <InputPort.exponent>`\n\n :default value: None\n :type:\n\n function\n see `function <InputPort_Function>`\n\n :default value: `LinearCombination`\n :type: `Function`\n\n internal_only\n see `internal_only <InputPort.internal_only>`\n\n :default value: False\n :type: ``bool``\n\n shadow_inputs\n specifies whether InputPort shadows inputs of another InputPort;\n if not None, must be assigned another InputPort\n\n :default value: None\n :type:\n :read only: True\n\n weight\n see `weight <InputPort.weight>`\n\n :default value: None\n :type:\n \"\"\"\n function = Parameter(LinearCombination(operation=SUM), stateful=False, loggable=False)\n weight = Parameter(None, modulable=True)\n exponent = Parameter(None, modulable=True)\n combine = None\n internal_only = Parameter(False, stateful=False, loggable=False, pnl_internal=True)\n shadow_inputs = Parameter(None, stateful=False, loggable=False, read_only=True, pnl_internal=True, structural=True)\n\n #endregion\n\n @handle_external_context()\n @tc.typecheck\n def __init__(self,\n owner=None,\n reference_value=None,\n variable=None,\n size=None,\n function=None,\n projections=None,\n combine:tc.optional(tc.enum(SUM,PRODUCT))=None,\n weight=None,\n exponent=None,\n internal_only: tc.optional(bool) = None,\n params=None,\n name=None,\n prefs:is_pref_set=None,\n context=None,\n **kwargs):\n\n if variable is None and size is None and projections is not None:\n variable = self._assign_variable_from_projection(variable, size, projections)\n\n # If combine argument is specified, save it along with any user-specified function for _validate_params()\n if combine:\n self.combine_function_args = (combine, function)\n\n # If owner or reference_value has not been assigned, defer init to Port._instantiate_projection()\n # if owner is None or (variable is None and reference_value is None and projections is None):\n if owner is None:\n # Temporarily name InputPort\n self._assign_deferred_init_name(name)\n # Store args for deferred initialization\n self._store_deferred_init_args(**locals())\n\n # Flag for deferred initialization\n self.initialization_status = ContextFlags.DEFERRED_INIT\n return\n\n self.reference_value = reference_value\n\n # Validate sender (as variable) and params, and assign to variable\n # Note: pass name of owner (to override assignment of componentName in super.__init__)\n super(InputPort, self).__init__(\n owner,\n variable=variable,\n size=size,\n projections=projections,\n function=function,\n weight=weight,\n exponent=exponent,\n internal_only=internal_only,\n shadow_inputs=None,\n params=params,\n name=name,\n prefs=prefs,\n context=context,\n )\n\n if self.name is self.componentName or self.componentName + '-' in self.name:\n self._assign_default_port_Name()\n\n def _assign_variable_from_projection(self, variable, size, projections):\n \"\"\"Assign variable to value of Projection in projections\n \"\"\"\n from psyneulink.core.components.projections.projection import \\\n Projection, _parse_connection_specs\n\n if not isinstance(projections, list):\n projections = [projections]\n\n # Use only first specification in the list returned, and assume any others are the same size\n # (which they must be); leave validation of this to _instantiate_projections_to_port\n proj_spec = _parse_connection_specs(InputPort, self, projections)[0]\n\n if isinstance(proj_spec.projection, Projection):\n variable = proj_spec.projection.defaults.value\n elif isinstance(proj_spec.port, OutputPort):\n variable = proj_spec.port.defaults.value\n else:\n raise InputPortError(f\"Unrecognized specification for \\'{PROJECTIONS}\\' arg of {self.name}.\")\n\n return variable\n\n def _validate_params(self, request_set, target_set=None, context=None):\n \"\"\"Validate weights and exponents\n\n This needs to be done here\n (so that they can be ignored if not specified here or in the function)\n \"\"\"\n\n super()._validate_params(request_set=request_set, target_set=target_set, context=context)\n\n # Make sure **combine** and **function** args specified in constructor don't conflict\n if hasattr(self, 'combine_function_args'):\n combine, function = self.combine_function_args\n if function:\n owner_name = \"\"\n if self.owner:\n owner_name = f\" for InputPort of {self.owner.name}.\"\n if isinstance(function, LinearCombination):\n # specification of combine conflicts with operation specified for LinearCombination in function arg\n if function.operation != combine:\n raise InputPortError(f\"Specification of {repr(COMBINE)} argument ({combine.upper()}) \"\n f\"conflicts with specification of {repr(OPERATION)} \"\n f\"({function.operation.upper()}) for LinearCombination in \"\n f\"{repr(FUNCTION)} argument{owner_name}.\")\n else:\n # LinearFunction has been specified with same operation as specified for combine,\n # so delete combine_function_args attribute so it is not seen in _instantiate_function\n # in order to leave function intact (as it may have other parameters specified by user)\n del self.combine_function_args\n # combine assumes LinearCombination, but Function other than LinearCombination specified for function\n elif isinstance(function, Function):\n raise InputPortError(f\"Specification of {repr(COMBINE)} argument ({combine.upper()}) \"\n f\"conflicts with Function specified in {repr(FUNCTION)} argument \"\n f\"({function.name}){owner_name}.\")\n # combine assumes LinearCombination, but class other than LinearCombination specified for function\n elif isinstance(function, type):\n if not issubclass(function, LinearCombination):\n raise InputPortError(f\"Specification of {repr(COMBINE)} argument ({combine.upper()}) \"\n f\"conflicts with Function specified in {repr(FUNCTION)} argument \"\n f\"({function.__name__}){owner_name}.\")\n else:\n raise InputPortError(f\"PROGRAM ERROR: unrecognized specification for function argument \"\n f\"({function}){owner_name}.\")\n\n if WEIGHT in target_set and target_set[WEIGHT] is not None:\n if not isinstance(target_set[WEIGHT], (int, float)):\n raise InputPortError(f\"'{WEIGHT}' parameter of {self.name} for {self.owner.name} \"\n f\"({target_set[WEIGHT]}) must be an int or float.\")\n\n if EXPONENT in target_set and target_set[EXPONENT] is not None:\n if not isinstance(target_set[EXPONENT], (int, float)):\n raise InputPortError(f\"'{EXPONENT}' parameter of {self.name} for {self.owner.name}\"\n f\"({ target_set[EXPONENT]}) must be an int or float.\")\n\n def _validate_against_reference_value(self, reference_value):\n \"\"\"Validate that Port.value is compatible with reference_value\n\n reference_value is the item of the owner Mechanism's variable to which the InputPort is assigned\n \"\"\"\n match_len_option = {kwCompatibilityLength:False}\n if reference_value is not None and not iscompatible(reference_value, self.defaults.value, **match_len_option):\n name = self.name or \"\"\n raise InputPortError(f\"Value specified for {name} {self.componentName} of {self.owner.name} \"\n f\"({self.defaults.value}) is not compatible with its expected format \"\n f\"({reference_value}).\")\n\n def _instantiate_function(self, function, function_params=None, context=None):\n \"\"\"If combine option was specified in constructor, assign as operation argument of LinearCombination function\"\"\"\n if hasattr(self, 'combine_function_args'):\n function = LinearCombination(operation=self.combine_function_args[0])\n del self.combine_function_args\n super()._instantiate_function(function=function, context=context)\n self._use_1d_variable = False\n if not isinstance(self.function, CombinationFunction):\n self._use_1d_variable = True\n self.function._variable_shape_flexibility = DefaultsFlexibility.RIGID\n else:\n self.function._variable_shape_flexibility = DefaultsFlexibility.FLEXIBLE\n\n def _instantiate_projections(self, projections, context=None):\n \"\"\"Instantiate Projections specified in PROJECTIONS entry of params arg of Port's constructor\n\n Call _instantiate_projections_to_port to assign:\n PathwayProjections to .path_afferents\n ModulatoryProjections to .mod_afferents\n \"\"\"\n self._instantiate_projections_to_port(projections=projections, context=context)\n\n def _check_for_duplicate_projections(self, projection):\n \"\"\"Check if projection is redundant with one in path_afferents of InputPort\n\n Check for any instantiated projection in path_afferents with the same sender as projection\n or one in deferred_init status with sender specification that is the same type as projection.\n\n Returns redundant Projection if found, otherwise False.\n \"\"\"\n\n try:\n self.path_afferents\n except:\n if self.initialization_status == ContextFlags.DEFERRED_INIT:\n raise InputPortError(f\"Attempt to assign Projection ('{projection}') \"\n f\"to InputPort ('{self.name}') that is in deferred init\")\n else:\n raise InputPortError(f\"No 'path_afferents' for {self.name}\")\n\n # FIX: 7/22/19 - CHECK IF SENDER IS SPECIFIED AS MECHANISM AND, IF SO, CHECK ITS PRIMARY_OUTPUT_PORT\n duplicate = next(iter([proj for proj in self.path_afferents\n if ((proj.sender == projection.sender and proj != projection)\n or (proj.initialization_status == ContextFlags.DEFERRED_INIT\n and proj._init_args[SENDER] == type(projection.sender)))]), None)\n if duplicate and self.verbosePref or self.owner.verbosePref:\n from psyneulink.core.components.projections.projection import Projection\n warnings.warn(f'{Projection.__name__} from {projection.sender.name} {projection.sender.__class__.__name__}'\n f' of {projection.sender.owner.name} to {self.name} {self.__class__.__name__} of '\n f'{self.owner.name} already exists; will ignore additional one specified ({projection.name}).')\n return duplicate\n\n def _parse_function_variable(self, variable, context=None):\n variable = super()._parse_function_variable(variable, context)\n try:\n if self._use_1d_variable and variable.ndim > 1:\n return np.array(variable[0])\n except AttributeError:\n pass\n return variable\n\n def _get_variable_from_projections(self, context=None):\n \"\"\"\n Call self.function with self._path_proj_values\n\n If variable is None there are no active PathwayProjections in the Composition being run,\n return None so that it is ignored in execute method (i.e., not combined with base_value)\n \"\"\"\n # Check for Projections that are active in the Composition being run\n path_proj_values = [\n proj.parameters.value._get(context)\n for proj in self.path_afferents\n if self.afferents_info[proj].is_active_in_composition(context.composition)\n ]\n\n if len(path_proj_values) > 0:\n return convert_to_np_array(path_proj_values)\n else:\n return None\n\n def _get_primary_port(self, mechanism):\n return mechanism.input_port\n\n @tc.typecheck\n def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec):\n \"\"\"Get weights, exponents and/or any connections specified in an InputPort specification tuple\n\n Tuple specification can be:\n (port_spec, connections)\n (port_spec, weights, exponents, connections)\n\n See Port._parse_port_specific_spec for additional info.\n\n Returns:\n - port_spec: 1st item of tuple if it is a numeric value; otherwise None\n - params dict with WEIGHT, EXPONENT and/or PROJECTIONS entries if any of these was specified.\n\n \"\"\"\n # FIX: ADD FACILITY TO SPECIFY WEIGHTS AND/OR EXPONENTS FOR INDIVIDUAL OutputPort SPECS\n # CHANGE EXPECTATION OF *PROJECTIONS* ENTRY TO BE A SET OF TUPLES WITH THE WEIGHT AND EXPONENT FOR IT\n # THESE CAN BE USED BY THE InputPort's LinearCombination Function\n # (AKIN TO HOW THE MECHANISM'S FUNCTION COMBINES InputPort VALUES)\n # THIS WOULD ALLOW AN ADDITIONAL HIERARCHICAL LEVEL FOR NESTING ALGEBRAIC COMBINATION OF INPUT VALUES\n # TO A MECHANISM\n from psyneulink.core.components.projections.projection import Projection, _parse_connection_specs\n\n params_dict = {}\n port_spec = port_specific_spec\n\n if isinstance(port_specific_spec, dict):\n # FIX: 10/3/17 - CHECK HERE THAT, IF MECHANISM ENTRY IS USED, A VARIABLE, WEIGHT AND/OR EXPONENT ENTRY\n # FIX: IS APPLIED TO ALL THE OutputPorts SPECIFIED IN OUTPUT_PORTS\n # FIX: UNLESS THEY THEMSELVES USE A Port specification dict WITH ANY OF THOSE ENTRIES\n # FIX: USE ObjectiveMechanism EXAMPLES\n # if MECHANISM in port_specific_spec:\n # if OUTPUT_PORTS in port_specific_spec\n if SIZE in port_specific_spec:\n if (VARIABLE in port_specific_spec or\n any(key in port_dict and port_dict[key] is not None for key in {VARIABLE, SIZE})):\n raise InputPortError(f\"PROGRAM ERROR: SIZE specification found in port_specific_spec dict \"\n f\"for {self.__name__} specification of {owner.name} when SIZE or VARIABLE \"\n f\"is already present in its port_specific_spec dict or port_dict.\")\n port_dict.update({VARIABLE:np.zeros(port_specific_spec[SIZE])})\n del port_specific_spec[SIZE]\n return port_dict, port_specific_spec\n return None, port_specific_spec\n\n elif isinstance(port_specific_spec, tuple):\n\n # GET PORT_SPEC AND ASSIGN PROJECTIONS_SPEC **********************************************************\n\n tuple_spec = port_specific_spec\n\n # 2-item tuple specification\n if len(tuple_spec) == 2:\n\n # 1st item is a value, so treat as Port spec (and return to _parse_port_spec to be parsed)\n # and treat 2nd item as Projection specification\n if is_numeric(tuple_spec[0]):\n port_spec = tuple_spec[0]\n reference_value = port_dict[REFERENCE_VALUE]\n # Assign value so sender_dim is skipped below\n # (actual assignment is made in _parse_port_spec)\n if reference_value is None:\n port_dict[REFERENCE_VALUE]=port_spec\n elif not iscompatible(port_spec, reference_value):\n raise PortError(f\"Value in first item of 2-item tuple specification {InputPort.__name__} of \"\n f\"{owner.name} ({port_spec}) is not compatible with its {REFERENCE_VALUE} \"\n f\"({reference_value}).\")\n projections_spec = tuple_spec[1]\n\n # Tuple is Projection specification that is used to specify the Port,\n else:\n # return None in port_spec to suppress further, recursive parsing of it in _parse_port_spec\n port_spec = None\n if tuple_spec[0] != self:\n # If 1st item is not the current port (self), treat as part of the projection specification\n projections_spec = tuple_spec\n else:\n # Otherwise, just use 2nd item as projection spec\n port_spec = None\n projections_spec = tuple_spec[1]\n\n # 3- or 4-item tuple specification\n elif len(tuple_spec) in {3,4}:\n # Tuple is projection specification that is used to specify the Port,\n # so return None in port_spec to suppress further, recursive parsing of it in _parse_port_spec\n port_spec = None\n # Reduce to 2-item tuple Projection specification\n projection_item = tuple_spec[3] if len(tuple_spec)==4 else None\n projections_spec = (tuple_spec[0],projection_item)\n\n\n # GET PROJECTIONS IF SPECIFIED *************************************************************************\n\n try:\n projections_spec\n except UnboundLocalError:\n pass\n else:\n try:\n params_dict[PROJECTIONS] = _parse_connection_specs(self,\n owner=owner,\n connections=projections_spec)\n # Parse the value of all of the Projections to get/validate variable for InputPort\n variable = []\n for projection_spec in params_dict[PROJECTIONS]:\n # FIX: 10/3/17 - PUTTING THIS HERE IS A HACK...\n # FIX: MOVE TO _parse_port_spec UNDER PROCESSING OF ProjectionTuple SPEC\n # FIX: USING _get_port_for_socket\n # from psyneulink.core.components.projections.projection import _parse_projection_spec\n\n # Try to get matrix for projection\n try:\n sender_dim = np.array(projection_spec.port.value).ndim\n except AttributeError as e:\n if (isinstance(projection_spec.port, type) or\n projection_spec.port.initialization_status == ContextFlags.DEFERRED_INIT):\n continue\n else:\n raise PortError(f\"PROGRAM ERROR: indeterminate value for {projection_spec.port.name} \"\n f\"specified to project to {self.__name__} of {owner.name}.\")\n\n projection = projection_spec.projection\n if isinstance(projection, dict):\n # Don't try to get MATRIX from projection without checking,\n # since projection is a defaultDict,\n # which will add a matrix entry and assign it to None if it is not there\n if MATRIX in projection:\n matrix = projection[MATRIX]\n else:\n matrix = None\n elif isinstance(projection, Projection):\n if projection.initialization_status == ContextFlags.DEFERRED_INIT:\n continue\n # possible needs to be projection.defaults.matrix?\n matrix = projection.matrix\n else:\n raise InputPortError(f\"Unrecognized Projection specification for {self.name} of \"\n f\"{owner.name} ({projection_spec}).\")\n\n # Determine length of value of projection\n if matrix is None:\n # If a reference_value has been specified, it presumably represents the item of the\n # owner Mechanism's default_variable to which the InputPort corresponds,\n # so use that to constrain the InputPort's variable\n if port_dict[REFERENCE_VALUE] is not None:\n variable.append(port_dict[REFERENCE_VALUE])\n continue\n # If matrix has not been specified, no worries;\n # variable_item can be determined by value of sender\n sender_shape = np.array(projection_spec.port.value).shape\n variable_item = np.zeros(sender_shape)\n # If variable_item HASN'T been specified, or it is same shape as any previous ones,\n # use sender's value\n if ((VARIABLE not in port_dict or port_dict[VARIABLE] is None) and\n (not variable or variable_item.shape == variable[0].shape)):\n # port_dict[VARIABLE] = variable\n variable.append(variable_item)\n # If variable HAS been assigned, make sure value is the same for this sender\n elif np.array(port_dict[VARIABLE]).shape != variable_item.shape:\n # If values for senders differ, assign None so that Port's default is used\n variable = None\n # No need to check any more Projections\n break\n\n # Remove dimensionality of sender OutputPort, and assume that is what receiver will receive\n else:\n proj_val_shape = matrix.shape[sender_dim :]\n # port_dict[VARIABLE] = np.zeros(proj_val_shape)\n variable.append(np.zeros(proj_val_shape))\n # Sender's value has not been defined or senders have values of different lengths,\n if not variable:\n # If reference_value was provided, use that as the InputPort's variable\n # (i.e., assume its function won't transform it)\n if REFERENCE_VALUE in port_dict and port_dict[REFERENCE_VALUE] is not None:\n port_dict[VARIABLE] = port_dict[REFERENCE_VALUE]\n # Nothing to use as variable, so raise exception and allow it to be handled \"above\"\n else:\n raise AttributeError(DEFER_VARIABLE_SPEC_TO_MECH_MSG)\n else:\n port_dict[VARIABLE] = variable\n\n except InputPortError:\n raise InputPortError(f\"Tuple specification in {InputPort.__name__} specification dictionary for \"\n f\"{owner.name} ({projections_spec}) is not a recognized specification for \"\n f\"one or more Mechanisms, {OutputPort.__name__}s, or {Projection.__name__}s \"\n f\"that project to it.\")\n\n # GET WEIGHT AND EXPONENT IF SPECIFIED ***************************************************************\n\n if len(tuple_spec) == 2:\n pass\n\n # Tuple is (spec, weights, exponents<, afferent_source_spec>),\n # for specification of weights and exponents, + connection(s) (afferent projection(s)) to InputPort\n elif len(tuple_spec) in {3, 4}:\n\n weight = tuple_spec[WEIGHT_INDEX]\n exponent = tuple_spec[EXPONENT_INDEX]\n\n if weight is not None and not isinstance(weight, numbers.Number):\n raise InputPortError(f\"Specification of the weight ({weight}) in tuple of {InputPort.__name__} \"\n f\"specification dictionary for {owner.name} must be a number.\")\n params_dict[WEIGHT] = weight\n\n if exponent is not None and not isinstance(exponent, numbers.Number):\n raise InputPortError(f\"Specification of the exponent ({exponent}) in tuple of {InputPort.__name__} \"\n f\"specification dictionary for {owner.name} must be a number.\")\n params_dict[EXPONENT] = exponent\n\n else:\n raise PortError(f\"Tuple provided as port_spec for {InputPort.__name__} of {owner.name} \"\n f\"({tuple_spec}) must have either 2, 3 or 4 items.\")\n\n elif port_specific_spec is not None:\n raise InputPortError(f\"PROGRAM ERROR: Expected tuple or dict for {self.__class__.__name__}-specific params \"\n f\"but, got: {port_specific_spec}.\")\n\n return port_spec, params_dict\n\n def _parse_self_port_type_spec(self, owner, input_port, context=None):\n \"\"\"Return InputPort specification dictionary with projections that shadow inputs to input_port\n\n Called by _parse_port_spec if InputPort specified for a Mechanism belongs to a different Mechanism\n \"\"\"\n\n if not isinstance(input_port, InputPort):\n raise InputPortError(f\"PROGRAM ERROR: InputPort._parse_self_port_type called \"\n f\"with non-InputPort specification ({input_port}).\")\n\n sender_output_ports = [p.sender for p in input_port.path_afferents]\n port_spec = {NAME: SHADOW_INPUT_NAME + input_port.owner.name,\n VARIABLE: np.zeros_like(input_port.variable),\n PORT_TYPE: InputPort,\n PROJECTIONS: sender_output_ports,\n PARAMS: {SHADOW_INPUTS: input_port},\n OWNER: owner}\n return port_spec\n\n @staticmethod\n def _port_spec_allows_override_variable(spec):\n \"\"\"\n Returns\n -------\n True - if **spec** outlines a spec for creating an InputPort whose variable can be\n overridden by a default_variable or size argument\n False - otherwise\n\n ex: specifying an InputPort with a Mechanism allows overriding\n \"\"\"\n from psyneulink.core.components.mechanisms.mechanism import Mechanism\n\n if isinstance(spec, Mechanism):\n return True\n if isinstance(spec, collections.abc.Iterable):\n # generally 2-4 tuple spec, but allows list spec\n for item in spec:\n if isinstance(item, Mechanism):\n return True\n # handles tuple spec where first item of tuple is itself a (name, Mechanism) tuple\n elif (\n isinstance(item, collections.abc.Iterable)\n and len(item) >= 2\n and isinstance(item[1], Mechanism)\n ):\n return True\n\n return False\n\n @property\n def pathway_projections(self):\n return self.path_afferents\n\n @pathway_projections.setter\n def pathway_projections(self, assignment):\n self.path_afferents = assignment\n\n @property\n def socket_width(self):\n return self.defaults.variable.shape[-1]\n\n @property\n def socket_template(self):\n return np.zeros(self.socket_width)\n\n @property\n def label(self):\n return self.get_label()\n\n def get_label(self, context=None):\n try:\n label_dictionary = self.owner.input_labels_dict\n except AttributeError:\n label_dictionary = {}\n return self._get_value_label(label_dictionary, self.owner.input_ports, context=context)\n\n @property\n def position_in_mechanism(self):\n if hasattr(self, \"owner\"):\n if self.owner is not None:\n return self.owner.get_input_port_position(self)\n else:\n return None\n return None\n\n @staticmethod\n def _get_port_function_value(owner, function, variable):\n \"\"\"Put InputPort's variable in a list if its function is LinearCombination and variable is >=2d\n\n InputPort variable must be embedded in a list so that LinearCombination (its default function)\n returns a variable that is >=2d intact (rather than as arrays to be combined);\n this is normally done in port._update() (and in Port._instantiate-function), but that\n can't be called by _parse_port_spec since the InputPort itself may not yet have been instantiated.\n\n \"\"\"\n\n if (\n (\n (inspect.isclass(function) and issubclass(function, LinearCombination))\n or isinstance(function, LinearCombination)\n )\n and isinstance(variable, np.matrix)\n ):\n variable = [variable]\n\n # if function is None, use Port's default function\n function = function or InputPort.defaults.function\n\n return Port_Base._get_port_function_value(owner=owner, function=function, variable=variable)\n\n\ndef _instantiate_input_ports(owner, input_ports=None, reference_value=None, context=None):\n \"\"\"Call Port._instantiate_port_list() to instantiate ContentAddressableList of InputPort(s)\n\n Create ContentAddressableList of InputPort(s) specified in self.input_ports\n\n If input_ports is not specified:\n - use owner.input_ports as list of InputPort specifications\n - if owner.input_ports is empty, user owner.defaults.variable to create a default InputPort\n\n When completed:\n - self.input_ports contains a ContentAddressableList of one or more input_ports\n - self.input_port contains the `primary InputPort <InputPort_Primary>`: first or only one in input_ports\n - self.input_ports contains the same ContentAddressableList (of one or more input_ports)\n - each InputPort corresponds to an item in the variable of the owner's function\n - the value of all of the input_ports is stored in a list in input_value\n - if there is only one InputPort, it is assigned the full value\n\n Note: Port._instantiate_port_list()\n parses self.defaults.variable (2D np.array, passed in reference_value)\n into individual 1D arrays, one for each InputPort\n\n (See Port._instantiate_port_list() for additional details)\n\n Returns list of instantiated InputPorts\n \"\"\"\n\n # This allows method to be called by Mechanism.add_input_ports() with set of user-specified input_ports,\n # while calls from init_methods continue to use owner.input_ports (i.e., InputPort specifications\n # assigned in the **input_ports** argument of the Mechanism's constructor)\n input_ports = input_ports or owner.input_ports\n\n # Parse any SHADOW_INPUTS specs into actual InputPorts to be shadowed\n if input_ports is not None:\n input_ports = _parse_shadow_inputs(owner, input_ports)\n\n port_list = _instantiate_port_list(owner=owner,\n port_list=input_ports,\n port_types=InputPort,\n port_Param_identifier=INPUT_PORT,\n reference_value=reference_value if reference_value is not None\n else owner.defaults.variable,\n reference_value_name=VALUE,\n context=context)\n\n # Call from Mechanism.add_ports, so add to rather than assign input_ports (i.e., don't replace)\n if context.source & (ContextFlags.METHOD | ContextFlags.COMMAND_LINE):\n owner.input_ports.extend(port_list)\n else:\n owner.parameters.input_ports._set(port_list, context)\n\n # Assign value of require_projection_in_composition\n for port in owner.input_ports:\n # Assign True for owner's primary InputPort and the value has not already been set in InputPort constructor\n if port.require_projection_in_composition is None and owner.input_port == port:\n port.parameters.require_projection_in_composition._set(True, context)\n\n # Check that number of input_ports and their variables are consistent with owner.defaults.variable,\n # and adjust the latter if not\n variable_item_is_OK = False\n for i, input_port in enumerate(owner.input_ports):\n try:\n variable_item_is_OK = iscompatible(owner.defaults.variable[i], input_port.defaults.value)\n if not variable_item_is_OK:\n break\n except IndexError:\n variable_item_is_OK = False\n break\n\n if not variable_item_is_OK:\n old_variable = owner.defaults.variable\n owner.defaults.variable = owner._handle_default_variable(default_variable=[port.defaults.value\n for port in owner.input_ports])\n\n if owner.verbosePref:\n warnings.warn(f\"Variable for {old_variable} ({append_type_to_name(owner)}) has been adjusted to match \"\n f\"number and format of its input_ports: ({owner.defaults.variable}).\")\n\n return port_list\n\ndef _parse_shadow_inputs(owner, input_ports):\n \"\"\"Parses any {SHADOW_INPUTS:[InputPort or Mechanism,...]} items in input_ports into InputPort specif. dict.\"\"\"\n\n input_ports = convert_to_list(input_ports)\n input_ports_to_shadow_specs=[]\n for spec_idx, spec in enumerate(input_ports):\n # If {SHADOW_INPUTS:[InputPort or Mechaism,...]} is found:\n if isinstance(spec, dict) and SHADOW_INPUTS in spec:\n input_ports_to_shadow_in_spec=[]\n # For each item in list of items to shadow specified in that entry:\n for item in list(spec[SHADOW_INPUTS]):\n from psyneulink.core.components.mechanisms.mechanism import Mechanism\n # If an InputPort was specified, just used that\n if isinstance(item, InputPort):\n input_ports_to_shadow_in_spec.append(item)\n # If Mechanism was specified, use all of its InputPorts\n elif isinstance(item, Mechanism):\n input_ports_to_shadow_in_spec.extend(item.input_ports)\n else:\n raise InputPortError(f\"Specification of {repr(SHADOW_INPUTS)} in for {repr(INPUT_PORTS)} arg of \"\n f\"{owner.name} must be a {Mechanism.__name__} or {InputPort.__name__}.\")\n input_ports_to_shadow_specs.append((spec_idx, input_ports_to_shadow_in_spec))\n\n # If any SHADOW_INPUTS specs were found in input_ports, replace them with actual InputPorts to be shadowed\n if input_ports_to_shadow_specs:\n for item in input_ports_to_shadow_specs:\n idx = item[0]\n del input_ports[idx]\n input_ports[idx:idx] = item[1]\n # Update owner's variable based on full set of InputPorts specified\n owner.defaults.variable, _ = owner._handle_arg_input_ports(input_ports)\n\n return input_ports\n" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.zeros" ] ]
stsauter/TDX
[ "23fe06876ad36162ac13e6c7365399b4bde10deb" ]
[ "src/data/skew_normal_drift_stream.py" ]
[ "import numpy as np\n\nfrom typing import List\nfrom collections import namedtuple\nfrom sklearn.utils import check_random_state\nfrom src.data.base_drift_stream import BaseDriftStream\nfrom src.proba.skew_normal import SkewNormal\n\n\nclass SkewNormalDriftStream(BaseDriftStream):\n \"\"\" Base class for streams which consists of skew normal components.\n\n Parameters\n ----------\n n_samples : int\n Number of samples.\n n_segments: int\n Number of time segments.\n n_components: int\n Number of components the stream consists of.\n dist_support : array_like of shape (2,)\n List containing the minimum and maximum value which should be supported the skew normal distributions\n seed : None | int | instance of RandomState (default=None)\n Random number generator seed for reproducibility.\n \"\"\"\n\n def __init__(\n self,\n n_samples: int,\n n_segments: int,\n n_components: int,\n dist_support: List[int] = None,\n seed: int or np.random.RandomState = None\n ):\n super().__init__(n_samples, n_segments, n_components)\n self._location = np.array([])\n self._scale = np.array([])\n self._shape = np.array([])\n if dist_support is not None and len(dist_support) != 2:\n raise ValueError('Distribution support should be an array containing 2 elements')\n self._support = dist_support\n self._seed = check_random_state(seed)\n\n @property\n def dist_support(self):\n \"\"\"\n Return the distribution support.\n\n Returns\n -------\n array_like of shape (2,):\n List containing the minimum and maximum value which should be supported the skew normal distributions.\n \"\"\"\n return self._support\n\n def _generate_data(self):\n params = self._get_distribution_params()\n for i in range(self._n_segments):\n n_seg_samples = np.rint(self._n_samples * (self._mixture_coefs[:, i] * self._seg_data_per[i])).astype(int)\n x_s = np.array([])\n c_s = np.array([])\n t_s = np.array([])\n for j in range(self._n_components):\n pd = SkewNormal(params[j, i].xi, params[j, i].omega, params[j, i].alpha, self._seed)\n if self._support is not None:\n pd.truncate(self._support[0], self._support[1])\n sampled_x = pd.generate_random_numbers(n_seg_samples[j])\n self._pds[j, i] = pd\n x_s = np.append(x_s, sampled_x, axis=0)\n c_s = np.append(c_s, np.tile(j + 1, n_seg_samples[j]), axis=0)\n t_s = np.append(t_s, np.tile(i * self._segment_length, n_seg_samples[j]), axis=0)\n\n self._x = np.append(self._x, x_s, axis=0)\n self._c = np.append(self._c, c_s, axis=0).astype(int)\n self._t = np.append(self._t, t_s, axis=0)\n\n def _get_distribution_params(self):\n SkewNormalParams = namedtuple('SkewNormalParams', ['xi', 'omega', 'alpha'])\n params = np.empty(shape=(self._n_components, self._n_segments), dtype=object)\n for i in range(self._n_components):\n for j in range(self._n_segments):\n params[i, j] = SkewNormalParams(self._location[i, j], self._scale[i, j], self._shape[i, j])\n return params\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.rint", "numpy.tile", "sklearn.utils.check_random_state", "numpy.append" ] ]
harunaabdu/Tensorflow-Models
[ "dc91f48b70ebb6e92deca7ce5bce24c2dfa30f90" ]
[ "official/vision/beta/modeling/backbones/factory_test.py" ]
[ "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for factory functions.\"\"\"\n# Import libraries\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom official.vision.beta.configs import backbones as backbones_cfg\nfrom official.vision.beta.configs import backbones_3d as backbones_3d_cfg\nfrom official.vision.beta.configs import common as common_cfg\nfrom official.vision.beta.modeling import backbones\nfrom official.vision.beta.modeling.backbones import factory\n\n\nclass FactoryTest(tf.test.TestCase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.combine(model_id=[18, 34, 50, 101, 152],))\n def test_resnet_creation(self, model_id):\n \"\"\"Test creation of ResNet models.\"\"\"\n\n network = backbones.ResNet(\n model_id=model_id, se_ratio=0.0, norm_momentum=0.99, norm_epsilon=1e-5)\n\n backbone_config = backbones_cfg.Backbone(\n type='resnet',\n resnet=backbones_cfg.ResNet(model_id=model_id, se_ratio=0.0))\n norm_activation_config = common_cfg.NormActivation(\n norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)\n\n factory_network = factory.build_backbone(\n input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),\n backbone_config=backbone_config,\n norm_activation_config=norm_activation_config)\n\n network_config = network.get_config()\n factory_network_config = factory_network.get_config()\n\n self.assertEqual(network_config, factory_network_config)\n\n @combinations.generate(\n combinations.combine(\n model_id=['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7'],\n se_ratio=[0.0, 0.25],\n ))\n def test_efficientnet_creation(self, model_id, se_ratio):\n \"\"\"Test creation of EfficientNet models.\"\"\"\n\n network = backbones.EfficientNet(\n model_id=model_id,\n se_ratio=se_ratio,\n norm_momentum=0.99,\n norm_epsilon=1e-5)\n\n backbone_config = backbones_cfg.Backbone(\n type='efficientnet',\n efficientnet=backbones_cfg.EfficientNet(\n model_id=model_id, se_ratio=se_ratio))\n norm_activation_config = common_cfg.NormActivation(\n norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)\n\n factory_network = factory.build_backbone(\n input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),\n backbone_config=backbone_config,\n norm_activation_config=norm_activation_config)\n\n network_config = network.get_config()\n factory_network_config = factory_network.get_config()\n\n self.assertEqual(network_config, factory_network_config)\n\n @combinations.generate(\n combinations.combine(\n model_id=['MobileNetV1', 'MobileNetV2',\n 'MobileNetV3Large', 'MobileNetV3Small',\n 'MobileNetV3EdgeTPU'],\n filter_size_scale=[1.0, 0.75],\n ))\n def test_mobilenet_creation(self, model_id, filter_size_scale):\n \"\"\"Test creation of Mobilenet models.\"\"\"\n\n network = backbones.MobileNet(\n model_id=model_id,\n filter_size_scale=filter_size_scale,\n norm_momentum=0.99,\n norm_epsilon=1e-5)\n\n backbone_config = backbones_cfg.Backbone(\n type='mobilenet',\n mobilenet=backbones_cfg.MobileNet(\n model_id=model_id, filter_size_scale=filter_size_scale))\n norm_activation_config = common_cfg.NormActivation(\n norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)\n\n factory_network = factory.build_backbone(\n input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),\n backbone_config=backbone_config,\n norm_activation_config=norm_activation_config)\n\n network_config = network.get_config()\n factory_network_config = factory_network.get_config()\n\n self.assertEqual(network_config, factory_network_config)\n\n @combinations.generate(combinations.combine(model_id=['49'],))\n def test_spinenet_creation(self, model_id):\n \"\"\"Test creation of SpineNet models.\"\"\"\n input_size = 128\n min_level = 3\n max_level = 7\n\n input_specs = tf.keras.layers.InputSpec(\n shape=[None, input_size, input_size, 3])\n network = backbones.SpineNet(\n input_specs=input_specs,\n min_level=min_level,\n max_level=max_level,\n norm_momentum=0.99,\n norm_epsilon=1e-5)\n\n backbone_config = backbones_cfg.Backbone(\n type='spinenet',\n spinenet=backbones_cfg.SpineNet(model_id=model_id))\n norm_activation_config = common_cfg.NormActivation(\n norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)\n\n factory_network = factory.build_backbone(\n input_specs=tf.keras.layers.InputSpec(\n shape=[None, input_size, input_size, 3]),\n backbone_config=backbone_config,\n norm_activation_config=norm_activation_config)\n\n network_config = network.get_config()\n factory_network_config = factory_network.get_config()\n\n self.assertEqual(network_config, factory_network_config)\n\n @combinations.generate(\n combinations.combine(model_id=[38, 56, 104],))\n def test_revnet_creation(self, model_id):\n \"\"\"Test creation of RevNet models.\"\"\"\n network = backbones.RevNet(\n model_id=model_id, norm_momentum=0.99, norm_epsilon=1e-5)\n\n backbone_config = backbones_cfg.Backbone(\n type='revnet',\n revnet=backbones_cfg.RevNet(model_id=model_id))\n norm_activation_config = common_cfg.NormActivation(\n norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)\n\n factory_network = factory.build_backbone(\n input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),\n backbone_config=backbone_config,\n norm_activation_config=norm_activation_config)\n\n network_config = network.get_config()\n factory_network_config = factory_network.get_config()\n\n self.assertEqual(network_config, factory_network_config)\n\n @combinations.generate(combinations.combine(model_type=['resnet_3d'],))\n def test_resnet_3d_creation(self, model_type):\n \"\"\"Test creation of ResNet 3D models.\"\"\"\n backbone_cfg = backbones_3d_cfg.Backbone3D(type=model_type).get()\n temporal_strides = []\n temporal_kernel_sizes = []\n for block_spec in backbone_cfg.block_specs:\n temporal_strides.append(block_spec.temporal_strides)\n temporal_kernel_sizes.append(block_spec.temporal_kernel_sizes)\n\n _ = backbones.ResNet3D(\n model_id=backbone_cfg.model_id,\n temporal_strides=temporal_strides,\n temporal_kernel_sizes=temporal_kernel_sizes,\n norm_momentum=0.99,\n norm_epsilon=1e-5)\n\n @combinations.generate(\n combinations.combine(\n model_id=[\n 'MobileDetCPU',\n 'MobileDetDSP',\n 'MobileDetEdgeTPU',\n 'MobileDetGPU'],\n filter_size_scale=[1.0, 0.75],\n ))\n def test_mobiledet_creation(self, model_id, filter_size_scale):\n \"\"\"Test creation of Mobiledet models.\"\"\"\n\n network = backbones.MobileDet(\n model_id=model_id,\n filter_size_scale=filter_size_scale,\n norm_momentum=0.99,\n norm_epsilon=1e-5)\n\n backbone_config = backbones_cfg.Backbone(\n type='mobiledet',\n mobiledet=backbones_cfg.MobileDet(\n model_id=model_id, filter_size_scale=filter_size_scale))\n norm_activation_config = common_cfg.NormActivation(\n norm_momentum=0.99, norm_epsilon=1e-5, use_sync_bn=False)\n\n factory_network = factory.build_backbone(\n input_specs=tf.keras.layers.InputSpec(shape=[None, None, None, 3]),\n backbone_config=backbone_config,\n norm_activation_config=norm_activation_config)\n\n network_config = network.get_config()\n factory_network_config = factory_network.get_config()\n\n self.assertEqual(network_config, factory_network_config)\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.keras.layers.InputSpec", "tensorflow.python.distribute.combinations.combine", "tensorflow.test.main" ] ]
rafaelcostafrf/ros_quad_ufabc
[ "0be895851a2b3e8ed2b7ba292e1d9e6e3a93f5c1" ]
[ "src/mateus/quad_ros.py" ]
[ "import rospy\nimport numpy as np\nfrom gazebo_msgs.msg import ModelStates, ModelState\nfrom gazebo_msgs.srv import SetModelState\nfrom std_msgs.msg import Float64MultiArray, Float32\nfrom std_srvs.srv import Empty\nfrom sensor_msgs.msg import JointState\nfrom geometry_msgs.msg import Pose\nfrom scipy.spatial.transform import Rotation as rot\n\n\nclass quad_robot:\n def __init__(self, name):\n self.name = name\n\n # Rate of the controller\n self.rate = rospy.Rate(100)\n\n self.motor_vel = Float64MultiArray()\n\n self.PoseSub = rospy.Subscriber('/gazebo/model_states', ModelStates, self.pose_read, queue_size=1)\n self.jointSub = rospy.Subscriber('/'+self.name+'/joint_states', JointState, self.joint_read, queue_size=1)\n self.reset_command = rospy.ServiceProxy(\"/gazebo/set_model_state\", SetModelState)\n self.vel_pub = rospy.Publisher(name + '/joint_motor_controller/command', Float64MultiArray, queue_size=1)\n \n self.attitude_euler = np.zeros(3)\n self.attitude_quat = np.zeros(4)\n self.velocity = np.zeros(3)\n self.position = np.zeros(3)\n self.angular_vel = np.zeros(3)\n self.reset_command = rospy.ServiceProxy(\"/gazebo/set_model_state\", SetModelState)\n\n def joint_read(self, data):\n self.data = data\n self.prop_velocity = data.velocity\n\n def joint_pub(self, data):\n msg = JointState()\n msg.name = ['joint_back_left_prop', 'joint_back_right_prop', 'joint_front_left_prop', 'joint_front_right_prop']\n msg.velocity = [0, 0, 0, 0]\n self.jointPub.publish(msg)\n\n def step(self, motor_vel):\n self.motor_vel.data = motor_vel\n self.vel_pub.publish(self.motor_vel)\n\n\n def pose_read(self, data):\n \"\"\"\n Stores the position and attitude data on the class\n \"\"\"\n ind = data.name.index(self.name)\n orientationObj = data.pose[ind].orientation\n positionObj = data.pose[ind].position\n velocityObj = data.twist[ind].linear\n angularvelocityObj = data.twist[ind].angular\n self.position = np.array([positionObj.x, positionObj.y, positionObj.z])\n self.attitude_quat = np.array([orientationObj.x, orientationObj.y, orientationObj.z, orientationObj.w])\n self.attitude_euler = rot.from_quat(self.attitude_quat).as_euler('xyz')\n self.velocity = np.array([velocityObj.x, velocityObj.y, velocityObj.z])\n self.angular_vel = np.array([angularvelocityObj.x, angularvelocityObj.y, angularvelocityObj.z])\n \n def reset(self):\n \"\"\"\n Resets robot to initial position\n \"\"\"\n command = ModelState()\n command.model_name = self.name\n location = Pose()\n # Location Reset\n self.init_pose = np.random.randn(2)/2\n\n location.position.x = 0.7\n location.position.y = 0\n location.position.z = 0\n\n # Orientation reset\n self.init_orientation = rot.from_euler('xyz', [0, 0, 0]).as_quat().flatten()\n location.orientation.x = self.init_orientation[0]\n location.orientation.y = self.init_orientation[1]\n location.orientation.z = self.init_orientation[2]\n location.orientation.w = self.init_orientation[3]\n\n command.pose = location\n self.reset_command(command)\n\n self.position = np.concatenate((self.init_pose, np.zeros(1)))\n self.attitude_euler = np.array([0, 0, 0])\n self.attitude_quat = self.init_orientation\n self.velocity = np.zeros(3)\n self.angular_vel = np.zeros(3)\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.randn", "scipy.spatial.transform.Rotation.from_euler", "scipy.spatial.transform.Rotation.from_quat" ] ]
yaskev/mipt-thesis
[ "fb7001c6dfbe17a114748aebce0685c037b7ec97" ]
[ "monte_carlo/greeks.py" ]
[ "import pandas as pd\n\nfrom monte_carlo.dataset_maker import _get_price\nfrom utils.mapping import idx_to_col_name, idx_to_greek\n\nEPS = 0.01\n\n\ndef get_greeks(data: pd.DataFrame) -> pd.DataFrame:\n print('original')\n original_price_ci_df = data.apply(_get_price, axis=1, result_type='expand')\n original_price_ci_df.columns = ['price_strike_ratio', 'left_ci', 'right_ci']\n\n greek_dict = dict()\n\n for key, value in idx_to_col_name.items():\n print(f'{value}')\n shifted_data = data.copy(deep=True)\n shifted_data[idx_to_col_name[key]] = shifted_data[idx_to_col_name[key]] * (1 + EPS)\n shifted_price_ci_df = shifted_data.apply(_get_price, axis=1, result_type='expand')\n shifted_price_ci_df.columns = ['price_strike_ratio', 'left_ci', 'right_ci']\n\n greeks = (shifted_price_ci_df['price_strike_ratio'] - original_price_ci_df['price_strike_ratio']) / (data[idx_to_col_name[key]] * EPS)\n greek_dict[idx_to_greek[key]] = greeks\n\n right_ci_greek = (shifted_price_ci_df['right_ci'] - original_price_ci_df['left_ci']) / (data[idx_to_col_name[key]] * EPS)\n left_ci_greek = (shifted_price_ci_df['left_ci'] - original_price_ci_df['right_ci']) / (data[idx_to_col_name[key]] * EPS)\n greek_dict[f'{idx_to_greek[key]}_l_ci'] = left_ci_greek\n greek_dict[f'{idx_to_greek[key]}_r_ci'] = right_ci_greek\n\n res_df = pd.DataFrame(greek_dict)\n res_df['theta_mc'] = -res_df['theta_mc']\n return res_df\n" ]
[ [ "pandas.DataFrame" ] ]
skillup-ai/tettei-engineer
[ "d3f8a36c068db44391afcf4727ccbb7c456852c2" ]
[ "appendix/max_2.py" ]
[ "import numpy as np\n\na = np.array([[1, 3, 5, 7, 9, 11, 13],\n [2, 4, 6, 8, 10, 12, 14],\n [3, 5, 7, 9, 11, 13, 15],\n [2, 4, 6, 8, 10, 12, 14],\n [1, 3, 5, 7, 9, 11, 13]])\n\nprint(np.argmax(a)) #最大値をとるインデックス(1 次元 array に引き伸ばされている)\n\n'''\n20\n'''\n\nprint(np.argmax(a, axis=0))\n\n'''\n[2 2 2 2 2 2 2]\n'''\n\nprint(np.argmax(a, axis=1))\n\n'''\n[6 6 6 6 6]\n'''\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
weicheng113/deep-reinforcement-learning
[ "9b1653b7aedeb4dc0e4aab9351cc4a7f4ccb4f32" ]
[ "soccer-twos-ppo/soccer_env.py" ]
[ "import numpy as np\n\n\nclass SoccerEnvWrapper:\n def __init__(self, env, train_mode=False):\n self.env = env\n self.brain_names = env.brain_names\n self.train_mode = train_mode\n self.goalie_action_size = 4\n self.num_goalies = 2\n self.striker_action_size = 6\n self.num_strikers = 2\n self.state_size = 336\n self.num_agents = self.num_goalies + self.num_strikers\n\n def reset(self):\n env_info = self.env.reset(train_mode=self.train_mode)\n states = np.zeros((4, self.state_size), dtype=np.float)\n states[:2, :] = env_info[self.env.brain_names[0]].vector_observations\n states[2:, :] = env_info[self.env.brain_names[1]].vector_observations\n return np.array(states, np.float)\n\n def step(self, actions):\n actions_per_brain = int(len(actions)/len(self.env.brain_names))\n brain_actions = dict()\n for i in range(len(self.env.brain_names)):\n start = i * actions_per_brain\n brain_actions[self.env.brain_names[i]] = actions[start: start+actions_per_brain]\n\n env_info = self.env.step(brain_actions)\n\n next_states = np.zeros((4, self.state_size), dtype=np.float)\n rewards = np.zeros(4, dtype=np.float)\n dones = np.zeros(4, dtype=np.bool)\n\n next_states[:2, :] = env_info[self.env.brain_names[0]].vector_observations\n next_states[2:, :] = env_info[self.env.brain_names[1]].vector_observations\n rewards[:2] = env_info[self.env.brain_names[0]].rewards\n rewards[2:] = env_info[self.env.brain_names[1]].rewards\n dones[:2] = env_info[self.env.brain_names[0]].local_done\n dones[2:] = env_info[self.env.brain_names[1]].local_done\n # for brain_name in self.env.brain_names:\n # next_states.extend(env_info[brain_name].vector_observations)\n # rewards.extend()\n # dones.extend(env_info[brain_name].local_done)\n\n # peer_rewards = rewards[2:] + rewards[:2]\n # team_avg_rewards = (np.array(rewards) + np.array(peer_rewards))/2.0\n # return np.array(next_states, np.float), np.array(team_avg_rewards, np.float), np.array(dones, np.bool)\n return next_states, rewards, dones\n\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
geeknarendra/The-Complete-FAANG-Preparation
[ "3ed22719022bc66bd05c5c1ed091fe605e979908" ]
[ "4]. Projects/Desktop Development/GUI Projects/10). Curve Fitting and Interpolation/mplwidget.py" ]
[ "# ------------------------------------------------------\n# -------------------- mplwidget.py --------------------\n# ------------------------------------------------------\nfrom PyQt5.QtWidgets import*\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n\n\n \nclass MplWidget(QWidget):\n \n def __init__(self, parent = None):\n\n QWidget.__init__(self, parent)\n \n self.canvas = FigureCanvas(Figure())\n\n vertical_layout = QVBoxLayout()\n vertical_layout.addWidget(self.canvas)\n\n self.canvas.axes = self.canvas.figure.add_subplot(111)\n self.canvas.figure.subplots_adjust(top=0.945,bottom=0.05,left=0.060,right=0.950,hspace=0.2,wspace=0.2)\n self.setLayout(vertical_layout)" ]
[ [ "matplotlib.figure.Figure" ] ]
zenetio/autogluon
[ "0b761703abfc0b9af6250e5bea378698d82bce9a" ]
[ "examples/image_classification/data_processing.py" ]
[ "import csv\nimport os\nimport pandas as pd\nimport shutil\nimport string\nfrom gluoncv.utils import makedirs\nimport argparse\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a model for different kaggle competitions.')\n parser.add_argument('--data-dir', type=str, default='/home/ubuntu/workspace/autogluon_kaggle/examples/image_classification/data/',\n help='training and validation pictures to use.')\n parser.add_argument('--dataset', type=str, default='dog',\n help='the kaggle competition')\n parser.add_argument('--csvfile', type=str, default='labels.csv',\n help='the csv file') \n opt = parser.parse_args()\n return opt\nopt = parse_args()\n\nif opt.dataset == 'dog':\n\n csvfile = opt.csvfile\n pic_path = \"images_all/\"\n train_path = \"images/\"\n name_csv, ext = csvfile.strip().split('.')\n if name_csv == 'train' or name_csv == 'val' or name_csv == 'test':\n train_path = os.path.join(train_path, name_csv)\n train_path += '/'\n \n csvfile = os.path.join(opt.data_dir,'dog-breed-identification',csvfile)\n pic_path = os.path.join(opt.data_dir,'dog-breed-identification',pic_path)\n train_path = os.path.join(opt.data_dir,'dog-breed-identification',train_path)\n\n csvfile = open(csvfile, 'r')\n data = []\n for line in csvfile:\n data.append(list(line.strip().split(',')))\n for i in range(len(data)):\n if i == 0:\n continue\n if i >= 1:\n cl = data[i][1]\n name = data[i][0]\n path = pic_path + str(name) + '.jpg'\n isExists = os.path.exists(path)\n if (isExists):\n if not os.path.exists(train_path + cl):\n os.makedirs(train_path + cl)\n newpath = train_path + cl + '/' + str(name) + '.jpg'\n shutil.copyfile(path, newpath)\n #print(str(name) + ',success')\n print(f\"{newpath}, success\")\n else:\n print(str(name) + \",not here\")\nelif opt.dataset == 'aerial':\n csvfile = \"train.csv\"\n pic_path = \"images_all/\"\n train_path = \"images/\"\n\n csvfile = os.path.join(opt.data_dir,'aerial-cactus-identification',csvfile)\n pic_path = os.path.join(opt.data_dir,'aerial-cactus-identification',pic_path)\n train_path = os.path.join(opt.data_dir,'aerial-cactus-identification',train_path)\n\n csvfile = open(csvfile, 'r')\n data = []\n for line in csvfile:\n data.append(list(line.strip().split(',')))\n for i in range(len(data)):\n if i == 0:\n continue\n if i >= 1:\n cl = data[i][1]\n name = data[i][0]\n path = pic_path + str(name)\n isExists = os.path.exists(path)\n if (isExists):\n if not os.path.exists(train_path + cl):\n os.makedirs(train_path + cl)\n newpath = train_path + cl + '/' + str(name)\n shutil.copyfile(path, newpath)\n print(str(name) + ',success')\n else:\n print(str(name) + \",not here\")\n\n##\nelif opt.dataset == 'fisheries_Monitoring':\n csvfile = os.path.join(opt.data_dir, opt.dataset, 'auto_5_30_fish.csv')\n df = pd.read_csv(csvfile)\n def get_name(name):\n if name.startswith('image'):\n name = 'test_stg2/' + name\n return name\n df['image'] = df['image'].apply(get_name)\n df.to_csv(csvfile.replace('auto_5_30_fish', 'auto_5_30_fish_add'), index=False)" ]
[ [ "pandas.read_csv" ] ]
thirionjl/chains
[ "31bd4b85744fd56096fcc08027e8d20ce145bf0b" ]
[ "tests/core/test_ops_regularization.py" ]
[ "import numpy as np\n\nfrom chains.core import ops_regularization as reg, env\nfrom chains.core.static_shape import StaticShape\n\n\ndef test_coursera_dropout_forward():\n np.random.seed(1)\n dropout = reg.Dropout(keep_prob=0.7)\n dropout.compute(np.array([[0., 3.32524635, 2.13994541, 2.60700654, 0.],\n [0., 4.1600994, 0.79051021, 1.46493512, 0.]]))\n\n np.testing.assert_equal(dropout.mask,\n np.array([[True, False, True, True, True],\n [True, True, True, True, True]]))\n np.testing.assert_allclose(dropout.output,\n np.array([[0., 0., 3.05706487, 3.72429505, 0.],\n [0., 5.94299915, 1.1293003,\n 2.09276446,\n 0.]]))\n\n\ndef test_coursera_dropout_backward():\n np.random.seed(1)\n dropout = reg.Dropout(keep_prob=0.8)\n\n dropout.mask = np.array([[True, False, True, False, True],\n [False, True, False, True, True],\n [False, False, True, False, False]])\n\n d = dropout.partials(np.array(\n [[0.46544685, 0.34576201, -0.00239743, 0.34576201, -0.22172585],\n [0.57248826, 0.42527883, -0.00294878, 0.42527883, -0.27271738],\n [0.45465921, 0.3377483, -0.00234186, 0.3377483, -0.21658692]]))[0]\n\n np.testing.assert_allclose(d, np.array([[0.58180856, 0., -0.00299679, 0.,\n -0.27715731],\n [0., 0.53159854, -0., 0.53159854,\n -0.34089673],\n [0., 0., -0.00292733, 0., -0.]]),\n atol=1e-8)\n\n\ndef test_l2_regularization_coursera_test_case():\n env.seed(1)\n y_assess = np.array([[1, 1, 0, 1, 0]])\n w1 = np.random.randn(2, 3)\n np.random.randn(2, 1)\n w2 = np.random.randn(3, 2)\n np.random.randn(3, 1)\n w3 = np.random.randn(1, 3)\n np.random.randn(1, 1)\n\n norm = reg.L2NormRegularization(0.1)\n norm.compute(y_assess.shape[-1], w1, w2, w3)\n\n np.testing.assert_allclose(norm.output, 0.183984340402)\n\n\ndef test_l2_regularization():\n # TODO Auto adjustment lamda = wanted_decay_rate_percent * m / learning_rate # wanted_decay_rate_percent = 0.1 (10%)\n\n w1 = np.array([[1, 2, 3], [1, 2, 3]])\n w2 = np.array([[1, 2], [3, 4]])\n lamda = 10.0\n batch_size = 32\n r = lamda / batch_size\n\n norm = reg.L2NormRegularization(lamda)\n norm.check_incoming_shapes(StaticShape.scalar(), StaticShape.from_tuple((1, 2)))\n norm.compute(batch_size, w1, w2)\n grad = norm.partials(1)\n\n np.testing.assert_equal(norm.output, 9.0625)\n np.testing.assert_allclose(grad[0], - norm.output / batch_size)\n np.testing.assert_allclose(grad[1], r * w1)\n np.testing.assert_allclose(grad[2], r * w2)\n\n\nif __name__ == '__main__':\n test_l2_regularization_coursera_test_case()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.testing.assert_equal", "numpy.random.seed", "numpy.random.randn" ] ]
khoaideptrai/deep500
[ "0953038f64bc73c8d41d01796e07d3a23ca97822" ]
[ "deep500/datasets/cifar.py" ]
[ "import tarfile\nfrom typing import List, Tuple, Dict\n\nimport numpy as np\n\nfrom deep500.utils.download import real_download, unzip\nfrom deep500.lv2.dataset import NumpyDataset\nfrom deep500.utils.onnx_interop.losses import SoftmaxCrossEntropy\n\ndef download_cifar10_and_get_file_paths(folder=''):\n \"\"\"\n Download cifar10 from University of Toronto\n The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch\n :return: paths to different files\n \"\"\"\n base_url = \"https://www.cs.toronto.edu/~kriz/\"\n filenames = [('cifar10', 'cifar-10-binary.tar.gz')]\n sub_folder = '/cifar10'\n\n local_files = real_download(base_url, filenames, sub_folder, output_dir=folder)\n files = unzip(local_files['cifar10'])\n\n data_files = [file for file in files if '_batch_' in file]\n test_data = [file for file in files if 'test_batch' in file]\n\n return data_files, test_data\n\ndef download_cifar100_and_get_file_paths(folder=''):\n \"\"\"\n Download cifar10 from University of Toronto\n The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch\n :return: paths to different files\n \"\"\"\n base_url = \"https://www.cs.toronto.edu/~kriz/\"\n filenames = [('cifar100', 'cifar-100-binary.tar.gz')]\n sub_folder = '/cifar100'\n\n local_files = real_download(base_url, filenames, sub_folder, output_dir=folder)\n files = unzip(local_files['cifar100'])\n\n data_files = [file for file in files if 'train.bin' in file]\n test_data = [file for file in files if 'test.bin' in file]\n\n return data_files, test_data\n\ndef _cifar_shape():\n return (3, 32, 32)\ndef cifar10_shape():\n return (10, *_cifar_shape())\ndef cifar100_shape():\n return (100, *_cifar_shape())\n\ndef cifar10_loss():\n return SoftmaxCrossEntropy\ndef cifar100_loss():\n return cifar10_loss()\n\ncifar_mean = {\n 'cifar10': (0.4914, 0.4822, 0.4465),\n 'cifar100': (0.5071, 0.4867, 0.4408),\n}\n\ncifar_std = {\n 'cifar10': (0.2023, 0.1994, 0.2010),\n 'cifar100': (0.2675, 0.2565, 0.2761),\n}\n\n# Fast learning rate schedule for CIFAR-10, obtained from \n# https://github.com/meliketoy/wide-resnet.pytorch\ndef cifar_learning_rate(epoch, init=0.1):\n optim_factor = 0\n if(epoch > 160):\n optim_factor = 3\n elif(epoch > 120):\n optim_factor = 2\n elif(epoch > 60):\n optim_factor = 1\n\n return init*math.pow(0.2, optim_factor)\n\ndef _cifar_numpy(train_files, test_files, dsname, normalize):\n # Images per batch\n ipb = 50000 if dsname == 'cifar100' else 10000\n test_ipb = 10000\n \n imgsize = 32 * 32 * 3\n entrylen = (imgsize+1) if dsname == 'cifar10' else (imgsize+2)\n entryoff = 0 if dsname == 'cifar10' else 1\n size = ipb * entrylen\n test_size = test_ipb * entrylen\n \n # Create arrays for train and test images\n train_images = np.zeros([len(train_files)*ipb, 3, 32, 32], dtype=np.float32)\n test_images = np.zeros([test_ipb, 3, 32, 32], dtype=np.float32)\n train_labels = np.zeros(len(train_files)*ipb, dtype=np.int64)\n test_labels = np.zeros(test_ipb, dtype=np.int64)\n\n # Extract training data (label followed by image data)\n for i, file in enumerate(train_files):\n with open(file, 'rb') as f:\n filebuffer = np.frombuffer(f.read(), 'B')\n \n # Read labels and images\n # Adapted from https://mattpetersen.github.io/load-cifar10-with-numpy\n train_labels[i*ipb:(i+1)*ipb] = filebuffer[entryoff::entrylen].astype(np.int64)\n pixeldata = np.delete(filebuffer, np.arange(entryoff, size, entrylen))\n if dsname == 'cifar100':\n pixeldata = np.delete(pixeldata, np.arange(0, size, entrylen-1))\n train_images[i*ipb:(i+1)*ipb] = pixeldata.reshape(-1,3,32,32).astype(np.float32) / 255.0\n\n # Extract test data \n with open(test_files[0], 'rb') as f:\n filebuffer = np.frombuffer(f.read(), 'B')\n test_labels[:] = filebuffer[entryoff::entrylen].astype(np.int64)\n pixeldata = np.delete(filebuffer, np.arange(entryoff, test_size, entrylen))\n if dsname == 'cifar100':\n pixeldata = np.delete(pixeldata, np.arange(0, test_size, entrylen-1))\n test_images[:] = pixeldata.reshape(-1,3,32,32).astype(np.float32) / 255.0\n\n # Normalize if necessary\n if normalize:\n for i in range(3):\n train_images[:,i,:,:] -= cifar_mean[dsname][i]\n test_images[:,i,:,:] -= cifar_mean[dsname][i]\n train_images[:,i,:,:] /= cifar_std[dsname][i]\n test_images[:,i,:,:] /= cifar_std[dsname][i]\n\n return train_images, train_labels, test_images, test_labels\n\ndef _load_cifar(is_cifar100, input_node_name, label_node_name, normalize=True, folder=''):\n if is_cifar100:\n train_batch, test_batch = download_cifar100_and_get_file_paths(folder=folder)\n train_img, train_lbl, test_img, test_lbl = _cifar_numpy(\n train_batch, test_batch, 'cifar100', normalize)\n else:\n train_batch, test_batch = download_cifar10_and_get_file_paths(folder=folder)\n train_img, train_lbl, test_img, test_lbl = _cifar_numpy(\n train_batch, test_batch, 'cifar10', normalize)\n\n return (NumpyDataset(train_img, input_node_name, train_lbl, label_node_name),\n NumpyDataset(test_img, input_node_name, test_lbl, label_node_name))\n \n \ndef load_cifar10(input_node_name, label_node_name, *args, normalize=True,\n folder='', **kwargs):\n return _load_cifar(False, input_node_name, label_node_name, normalize,\n folder)\n\n\ndef load_cifar100(input_node_name, label_node_name, *args, normalize=True,\n folder='', **kwargs):\n return _load_cifar(True, input_node_name, label_node_name, normalize,\n folder)\n" ]
[ [ "numpy.arange", "numpy.zeros" ] ]
jjhelmus/distributed
[ "3fceec696b81f02c8082f253bf340e3f494fc42c" ]
[ "distributed/protocol/numba.py" ]
[ "import numba.cuda\nimport numpy as np\n\nfrom .cuda import cuda_deserialize, cuda_serialize\nfrom .serialize import dask_deserialize, dask_serialize\n\ntry:\n from .rmm import dask_deserialize_rmm_device_buffer\nexcept ImportError:\n dask_deserialize_rmm_device_buffer = None\n\n\n@cuda_serialize.register(numba.cuda.devicearray.DeviceNDArray)\ndef cuda_serialize_numba_ndarray(x):\n # Making sure `x` is behaving\n if not (x.flags[\"C_CONTIGUOUS\"] or x.flags[\"F_CONTIGUOUS\"]):\n shape = x.shape\n t = numba.cuda.device_array(shape, dtype=x.dtype)\n t.copy_to_device(x)\n x = t\n\n header = x.__cuda_array_interface__.copy()\n header[\"strides\"] = tuple(x.strides)\n header[\"lengths\"] = [x.nbytes]\n frames = [\n numba.cuda.cudadrv.devicearray.DeviceNDArray(\n shape=(x.nbytes,), strides=(1,), dtype=np.dtype(\"u1\"), gpu_data=x.gpu_data,\n )\n ]\n\n return header, frames\n\n\n@cuda_deserialize.register(numba.cuda.devicearray.DeviceNDArray)\ndef cuda_deserialize_numba_ndarray(header, frames):\n (frame,) = frames\n shape = header[\"shape\"]\n strides = header[\"strides\"]\n\n arr = numba.cuda.devicearray.DeviceNDArray(\n shape=shape,\n strides=strides,\n dtype=np.dtype(header[\"typestr\"]),\n gpu_data=numba.cuda.as_cuda_array(frame).gpu_data,\n )\n return arr\n\n\n@dask_serialize.register(numba.cuda.devicearray.DeviceNDArray)\ndef dask_serialize_numba_ndarray(x):\n header, frames = cuda_serialize_numba_ndarray(x)\n frames = [memoryview(f.copy_to_host()) for f in frames]\n return header, frames\n\n\n@dask_deserialize.register(numba.cuda.devicearray.DeviceNDArray)\ndef dask_deserialize_numba_array(header, frames):\n if dask_deserialize_rmm_device_buffer:\n frames = [dask_deserialize_rmm_device_buffer(header, frames)]\n else:\n frames = [numba.cuda.to_device(np.asarray(memoryview(f))) for f in frames]\n\n arr = cuda_deserialize_numba_ndarray(header, frames)\n return arr\n" ]
[ [ "numpy.dtype" ] ]
jasonwirth/zipline
[ "4dd6e4fb61bcb01cb2af809128611a8f4a0fd788" ]
[ "tests/data/bundles/test_yahoo.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom six.moves.urllib.parse import urlparse, parse_qs\nfrom toolz import flip, identity\nfrom toolz.curried import merge_with, operator as op\n\nfrom zipline.data.bundles.core import _make_bundle_core\nfrom zipline.data.bundles import yahoo_equities, load\nfrom zipline.lib.adjustment import Float64Multiply\nfrom zipline.testing import test_resource_path, tmp_dir, read_compressed\nfrom zipline.testing.fixtures import WithResponses, ZiplineTestCase\nfrom zipline.testing.predicates import assert_equal\nfrom zipline.utils.tradingcalendar import trading_days\n\n\nclass YahooBundleTestCase(WithResponses, ZiplineTestCase):\n symbols = 'AAPL', 'IBM', 'MSFT'\n columns = 'open', 'high', 'low', 'close', 'volume'\n asset_start = pd.Timestamp('2014-01-02', tz='utc')\n asset_end = pd.Timestamp('2014-12-31', tz='utc')\n calendar = trading_days[\n (trading_days >= asset_start) &\n (trading_days <= asset_end)\n ]\n\n @classmethod\n def init_class_fixtures(cls):\n super(YahooBundleTestCase, cls).init_class_fixtures()\n (cls.bundles,\n cls.register,\n cls.unregister,\n cls.ingest) = map(staticmethod, _make_bundle_core())\n\n def _expected_data(self):\n sids = 0, 1, 2\n modifier = {\n 'low': 0,\n 'open': 1,\n 'close': 2,\n 'high': 3,\n 'volume': 0,\n }\n pricing = [\n np.hstack((\n np.arange(252, dtype='float64')[:, np.newaxis] +\n 1 +\n sid * 10000 +\n modifier[column] * 1000\n for sid in sorted(sids)\n ))\n for column in self.columns\n ]\n\n # There are two dividends and 1 split for each company.\n\n def dividend_adjustment(sid, which):\n \"\"\"The dividends occur at indices 252 // 4 and 3 * 252 / 4\n with a cash amount of sid + 1 / 10 and sid + 2 / 10\n \"\"\"\n if which == 'first':\n idx = 252 // 4\n else:\n idx = 3 * 252 // 4\n\n return {\n idx: [Float64Multiply(\n first_row=0,\n last_row=idx,\n first_col=sid,\n last_col=sid,\n value=float(\n 1 -\n ((sid + 1 + (which == 'second')) / 10) /\n (idx - 1 + sid * 10000 + 2000)\n ),\n )],\n }\n\n def split_adjustment(sid, volume):\n \"\"\"The splits occur at index 252 // 2 with a ratio of (sid + 1):1\n \"\"\"\n idx = 252 // 2\n return {\n idx: [Float64Multiply(\n first_row=0,\n last_row=idx,\n first_col=sid,\n last_col=sid,\n value=(identity if volume else op.truediv(1))(sid + 2),\n )],\n }\n\n merge_adjustments = merge_with(flip(sum, []))\n\n adjustments = [\n # ohlc\n merge_adjustments(\n *tuple(dividend_adjustment(sid, 'first') for sid in sids) +\n tuple(dividend_adjustment(sid, 'second') for sid in sids) +\n tuple(split_adjustment(sid, volume=False) for sid in sids)\n )\n ] * (len(self.columns) - 1) + [\n # volume\n merge_adjustments(\n split_adjustment(sid, volume=True) for sid in sids\n ),\n ]\n\n return pricing, adjustments\n\n def test_bundle(self):\n\n def get_symbol_from_url(url):\n params = parse_qs(urlparse(url).query)\n symbol, = params['s']\n return symbol\n\n def pricing_callback(request):\n headers = {\n 'content-encoding': 'gzip',\n 'content-type': 'text/csv',\n }\n path = test_resource_path(\n 'yahoo_samples',\n get_symbol_from_url(request.url) + '.csv.gz',\n )\n with open(path, 'rb') as f:\n return (\n 200,\n headers,\n f.read(),\n )\n\n for _ in range(3):\n self.responses.add_callback(\n self.responses.GET,\n 'http://ichart.finance.yahoo.com/table.csv',\n pricing_callback,\n )\n\n def adjustments_callback(request):\n path = test_resource_path(\n 'yahoo_samples',\n get_symbol_from_url(request.url) + '.adjustments.gz',\n )\n return 200, {}, read_compressed(path)\n\n for _ in range(3):\n self.responses.add_callback(\n self.responses.GET,\n 'http://ichart.finance.yahoo.com/x',\n adjustments_callback,\n )\n\n cal = self.calendar\n self.register(\n 'bundle',\n yahoo_equities(self.symbols),\n calendar=cal,\n )\n\n zipline_root = self.enter_instance_context(tmp_dir()).path\n environ = {\n 'ZIPLINE_ROOT': zipline_root,\n }\n\n self.ingest('bundle', environ=environ)\n bundle = load('bundle', environ=environ)\n\n sids = 0, 1, 2\n equities = bundle.asset_finder.retrieve_all(sids)\n for equity, expected_symbol in zip(equities, self.symbols):\n assert_equal(equity.symbol, expected_symbol)\n\n for equity in bundle.asset_finder.retrieve_all(sids):\n assert_equal(equity.start_date, self.asset_start, msg=equity)\n assert_equal(equity.end_date, self.asset_end, msg=equity)\n\n actual = bundle.daily_bar_reader.load_raw_arrays(\n self.columns,\n cal[cal.get_loc(self.asset_start, 'bfill')],\n cal[cal.get_loc(self.asset_end, 'ffill')],\n sids,\n )\n expected_pricing, expected_adjustments = self._expected_data()\n assert_equal(actual, expected_pricing, array_decimal=2)\n\n adjustments_for_cols = bundle.adjustment_reader.load_adjustments(\n self.columns,\n cal,\n pd.Index(sids),\n )\n\n for column, adjustments, expected in zip(self.columns,\n adjustments_for_cols,\n expected_adjustments):\n assert_equal(\n adjustments,\n expected,\n msg=column,\n )\n" ]
[ [ "pandas.Timestamp", "pandas.Index", "numpy.arange" ] ]
VishalSharma0309/gap_sdk
[ "09ccc594a3696a84953b732022cecae11e751c97" ]
[ "tools/nntool/interpreter/commands/fquant.py" ]
[ "# Copyright (C) 2020 GreenWaves Technologies, SAS\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\nimport argparse\nimport numpy as np\nfrom cmd2 import Cmd2ArgumentParser, with_argparser\nfrom interpreter.nntool_shell_base import NNToolShellBase\nfrom interpreter.shell_utils import output_table, table_options\nfrom quantization.symmetric.symmetric_quantizer import SymmetricQuantizer\nfrom reports.quantization_reporter import QuantizationReporter\nfrom stats.activation_stats_collector import ActivationStatsCollector\nfrom stats.fake_filter_stats_collector import FakeFilterStatsCollector\nfrom utils.stats_funcs import STATS_BITS\n\nclass FquantCommand(NNToolShellBase):\n #FQUANT COMMAND\n parser_fquant = Cmd2ArgumentParser()\n parser_fquant.add_argument('-f', '--force_width',\n choices=STATS_BITS, default=8, type=int, help='force all layers to this width')\n table_options(parser_fquant, default_width=140)\n\n @with_argparser(parser_fquant)\n def do_fquant(self, args: argparse.Namespace):\n \"\"\"\nAttempt to calculate a fake quantization for graph using random tensors and parameters.\nThis is intended to allow code generation for performance testing even if no real\nweights and input data are avalaible.\"\"\"\n self._check_graph()\n self.G.constant_store.fake = True\n stats_collector = ActivationStatsCollector()\n input_tensors = [np.random.normal(0, 0.2, input.dims.shape)\n for input in self.G.input_nodes()]\n stats_collector.collect_stats(self.G, input_tensors)\n astats = stats_collector.reduce_stats()\n stats_collector = FakeFilterStatsCollector()\n fstats = stats_collector.collect_stats(self.G)\n quantizer = SymmetricQuantizer(astats, fstats,\n force_width=args.force_width)\n qrecs = quantizer.quantize(self.G)\n self.G.quantization = qrecs\n tab = QuantizationReporter().report(self.G, qrecs)\n output_table(tab, args)\n self.G.constant_store.fake = False\n" ]
[ [ "numpy.random.normal" ] ]
redeboer/mpl-interactions
[ "649190fea86f2885ba62aeb3eb198f9ab148bf51" ]
[ "mpl_interactions/xarray_helpers.py" ]
[ "import numpy as np\n\nfrom .helpers import choose_fmt_str\n\n\ndef choose_datetime_nonsense(arr, timeunit=\"m\"):\n \"\"\"\n Try to do something reasonable to datetimes and timedeltas.\n\n Parameters\n ----------\n arr : np.array\n Array with values to be formatted.\n timeunit : str, default: m\n Truncation level for datetime and timedelta axes.\n\n Returns\n -------\n out : np.array\n Array modified to format decently in a slider.\n\n \"\"\"\n\n if np.issubdtype(arr.dtype, \"datetime64\"):\n out = arr.astype(f\"datetime64[{timeunit}]\")\n elif np.issubdtype(arr.dtype, \"timedelta64\"):\n out = arr.astype(f\"timedelta64[{timeunit}]\").astype(int)\n else:\n out = arr\n return out\n\n\ndef get_hs_axes(xarr, is_color_image=False, timeunit=\"m\"):\n \"\"\"\n Read the dims and coordinates from an xarray and construct the\n axes argument for hyperslicer. Called internally by hyperslicer.\n\n Parameters\n ----------\n xarr : xarray.DataArray\n DataArray being viewed with hyperslicer\n is_color_image : bool, default False\n Whether the individual images of the hyperstack are color images.\n timeunit : str, default \"m\"\n Truncation level for datetime and timedelta axes.\n\n Returns\n -------\n axes : list\n axes kwarg for hyperslicer\n\n \"\"\"\n if not is_color_image:\n dims = xarr.dims[:-2]\n else:\n dims = xarr.dims[:-3]\n coords_list = [choose_datetime_nonsense(xarr.coords[d].values, timeunit=timeunit) for d in dims]\n axes = zip(dims, coords_list)\n return list(axes)\n\n\ndef get_hs_extent(xarr, is_color_image=False):\n \"\"\"\n Read the \"YX\" coordinates of an xarray.DataArray to set extent of image for\n imshow.\n\n Parameters\n ----------\n xarr : xarray.DataArray\n DataArray being viewed with hyperslicer\n is_color_image : bool, default False\n Whether the individual images of the hyperstack are color images.\n\n Returns\n -------\n extent : list\n Extent argument for imshow. [d0_min, d0_max, d1_min, d1_max]\n\n \"\"\"\n\n if not is_color_image:\n dims = xarr.dims[-2:]\n else:\n dims = xarr.dims[-3:-1]\n\n # the reversal is because imshow transposes the array it receives\n dims = dims[::-1]\n extent = []\n for d in dims:\n vals = xarr[d].values\n extent.append(vals.min())\n extent.append(vals.max())\n return extent\n\n\ndef get_hs_fmts(xarr, units=None, is_color_image=False):\n \"\"\"\n Get appropriate slider format strings from xarray coordinates\n based the dtype of corresponding values.\n\n Parameters\n ----------\n xarr : xarray.DataArray\n DataArray being viewed with hyperslicer\n units : array-like\n Units to append to end of slider value. Must have the same length\n as number of non-image dimensions in xarray.\n is_color_image : bool, default False\n Whether the individual images of the hyperstack are color images.\n\n Returns\n -------\n fmt_strings : dict\n Slider format strings for hyperslicer (or other mpl-interactions?)\n \"\"\"\n if not is_color_image:\n dims = xarr.dims[:-2]\n else:\n dims = xarr.dims[:-3]\n fmt_strs = {}\n for i, d in enumerate(dims):\n fmt_strs[d] = choose_fmt_str(xarr[d].dtype)\n if units is not None and units[i] is not None:\n try:\n fmt_strs[d] += \" {}\".format(units[i])\n except:\n continue\n return fmt_strs\n" ]
[ [ "numpy.issubdtype" ] ]
IITDBGroup/cape
[ "0b656e3a03bc2b79ae9015d37dc372cd18cb3da0" ]
[ "capexplain/dev/find_user_questions.py" ]
[ "#!/usr/bin/python\n# -*- coding:utf-8 -*- \n\nimport sys, getopt\nimport pandas\nimport csv\n#import statsmodels.formula.api as smf\nfrom sklearn import preprocessing\nimport math\nimport time\nfrom heapq import *\n\nimport operator\n\nsys.path.append('./')\nsys.path.append('../')\nfrom similarity_calculation.category_similarity_matrix import *\nfrom similarity_calculation.category_network_embedding import *\nfrom utils import *\nfrom constraint_definition.LocalRegressionConstraint import *\n\n\nDEFAULT_RESULT_PATH = './input/query_res.csv'\nDEFAULT_QUESTION_PATH = './input/user_question.csv'\nDEFAULT_CONSTRAINT_PATH = './input/CONSTRAINTS'\nEXAMPLE_NETWORK_EMBEDDING_PATH = './input/NETWORK_EMBEDDING'\nEXAMPLE_SIMILARITY_MATRIX_PATH = './input/SIMILARITY_DEFINITION'\nDEFAULT_AGGREGATE_COLUMN = 'count'\nDEFAULT_CONSTRAINT_EPSILON = 0.05\nTOP_K = 5\n\ndef build_local_regression_constraint(data, column_index, t, con, epsilon, agg_col, regression_package):\n \"\"\"Build local regression constraint from Q(R), t, and global regression constraint\n\n Args:\n data: result of Q(R)\n column_index: index for values in each column\n t: target tuple in Q(R)\n con: con[0] is the list of fixed attributes in Q(R), con[1] is the list of variable attributes in Q(R)\n epsilon: threshold for local regression constraint\n regression_package: which package is used to compute regression \n Returns:\n A LocalRegressionConstraint object whose model is trained on \\pi_{con[1]}(Q_{t[con[0]]}(R))\n \"\"\"\n tF = get_F_value(con[0], t)\n local_con = LocalRegressionConstraint(con[0], tF, con[1], agg_col, epsilon)\n train_data = {agg_col: []}\n for v in con[1]:\n train_data[v] = []\n # for index, row in data['df'].iterrows():\n # if get_F_value(con[0], row) == tF:\n # for v in con[1]:\n # train_data[v].append(row[v])\n # train_data[agg_col].append(row[agg_col])\n\n \n for idx in column_index[con[0][0]][tF[0]]:\n row = data['df'].loc[data['df']['index'] == idx]\n row = row.to_dict('records')[0]\n #print row\n if get_F_value(con[0], row) == tF:\n for v in con[1]:\n train_data[v].append(row[v])\n train_data[agg_col].append(row[agg_col])\n if regression_package == 'scikit-learn':\n train_x = {}\n for v in con[1]:\n if v in data['le']:\n train_data[v] = data['le'][v].transform(train_data[v])\n train_data[v] = data['ohe'][v].transform(train_data[v].reshape(-1, 1))\n #print data['ohe'][v].transform(train_data[v].reshape(-1, 1))\n train_x[v] = train_data[v]\n else:\n if v != agg_col:\n train_x[v] = np.array(train_data[v]).reshape(-1, 1)\n train_y = np.array(train_data[agg_col]).reshape(-1, 1)\n train_x = np.concatenate(list(train_x.values()), axis=-1)\n local_con.train_sklearn(train_x, train_y)\n else:\n #train_data = pandas.DataFrame(train_data)\n formula = agg_col + ' ~ ' + ' + '.join(con[1])\n print \n local_con.train(train_data, formula)\n return local_con\n\ndef validate_local_regression_constraint(data, local_con, t, dir, agg_col, regression_package):\n \"\"\"Check the validicity of the user question under a local regression constraint\n\n Args:\n data: data['df'] is the data frame storing Q(R)\n data['le'] is the label encoder, data['ohe'] is the one-hot encoder\n local_con: a LocalRegressionConstraint object\n t: target tuple in Q(R)\n dir: whether user thinks t[agg(B)] is high or low\n agg_col: the column of aggregated value\n regression_package: which package is used to compute regression \n Returns:\n the actual direction that t[agg(B)] compares to its expected value, and the expected value from local_con\n \"\"\"\n test_tuple = {}\n for v in local_con.var_attr:\n test_tuple[v] = [t[v]]\n if regression_package == 'scikit-learn':\n for v in local_con.var_attr:\n if v in data['le']:\n test_tuple[v] = data['le'][v].transform(test_tuple[v])\n test_tuple[v] = data['ohe'][v].transform(test_tuple[v].reshape(-1, 1))\n else:\n test_tuple[v] = np.array(test_tuple[v]).reshape(-1, 1)\n \n test_tuple = np.concatenate(list(test_tuple.values()), axis=-1)\n predictY = local_con.predict_sklearn(test_tuple)\n else:\n predictY = local_con.predict(pandas.DataFrame(test_tuple))\n\n if t[agg_col] < (1-local_con.epsilon) * predictY[0]:\n return -dir, predictY[0]\n elif t[agg_col] > (1+local_con.epsilon) * predictY[0]:\n return dir, predictY[0]\n else:\n return 0, predictY[0]\n \ndef tuple_similarity(t1, t2, var_attr, cat_sim, num_dis_norm, agg_col):\n \"\"\"Compute the similarity between two tuples t1 and t2 on their attributes var_attr\n\n Args:\n t1, t2: two tuples\n var_attr: variable attributes\n cat_sim: the similarity measure for categorical attributes\n num_dis_norm: normalization terms for numerical attributes\n agg_col: the column of aggregated value\n Returns:\n the Gower similarity between t1 and t2\n \"\"\"\n sim = 0.0\n cnt = 0\n for col in var_attr:\n if t1[col] is None or t2[col] is None:\n continue\n if cat_sim.is_categorical(col):\n s = cat_sim.compute_similarity(col, t1[col], t2[col], agg_col)\n sim += s\n else:\n if col != agg_col and col != 'index':\n temp = abs(t1[col] - t2[col]) / num_dis_norm[col]['range']\n sim += 1-temp\n cnt += 1\n return sim / cnt\n\ndef find_best_user_questions(data, cons, cat_sim, num_dis_norm, cons_epsilon, agg_col, regression_package):\n\n \"\"\"Find explanations for user questions\n\n Args:\n data: data['df'] is the data frame storing Q(R)\n data['le'] is the label encoder, data['ohe'] is the one-hot encoder\n cons: list of fixed attributes and variable attributes of global constraints\n cat_sim: the similarity measure for categorical attributes\n num_dis_norm: normalization terms for numerical attributes\n cons_epsilon: threshold for local regression constraints\n agg_col: the column of aggregated value\n regression_package: which package is used to compute regression \n Returns:\n the top-k list of explanations for each user question\n \"\"\"\n\n index_building_time = 0\n constraint_building_time = 0\n question_validating_time = 0\n score_computing_time = 0\n result_merging_time = 0\n\n start = time.clock()\n column_index = dict()\n for column in data['df']:\n column_index[column] = dict()\n for index, row in data['df'].iterrows():\n for column in data['df']:\n val = row[column]\n if not val in column_index[column]:\n column_index[column][val] = []\n column_index[column][val].append(index)\n end = time.clock()\n index_building_time += end - start\n\n psi = []\n # local_cons = []\n # start = time.clock()\n # for i in range(len(cons)):\n # local_cons.append(build_local_regression_constraint(data, column_index, t, cons[i], cons_epsilon, agg_col, regression_package))\n # local_cons[i].print_fit_summary()\n # end = time.clock()\n # constraint_building_time += end - start\n\n explanation_type = 0\n max_support = []\n candidates = []\n for i in range(0, len(cons)):\n psi.append(0)\n max_support.append([])\n f_indexes = dict()\n print(cons[i])\n\n for index, row in data['df'].iterrows():\n t = get_F_value(cons[i][0], row)\n if ','.join(t) in f_indexes:\n continue\n \n con_index = None\n for j in range(len(cons[i][0])):\n idx_j = column_index[cons[i][0][j]][t[j]]\n # print(idx_j)\n # print(data['df']['index'].isin(idx_j))\n if con_index is None:\n con_index = pandas.Index(idx_j)\n else:\n con_index = con_index.intersection(pandas.Index(idx_j))\n # print(con_index)\n\n\n selected_rows = data['df'].loc[data['df']['index'].isin(con_index)]\n des = selected_rows['count'].describe()\n if des.loc[['count']].values[0] > 7:\n if des.loc[['mean']].values[0] > 1.49:\n print(t, des)\n candidates.append([selected_rows, des.loc[['mean']].values[0], \n des.loc[['count']].values[0], des.loc[['std']].values[0], \n des.loc[['75%']].values[0], des.loc[['25%']].values[0]])\n \n f_indexes[','.join(t)] = selected_rows\n data['df'].drop(con_index)\n\n # break\n \n # avg = sum()\n \n # rows = data['df'].loc[]\n # print(rows)\n print(\"i \", i)\n break\n \n return sorted(candidates, key=lambda x: x[4]-x[5])\n\ndef load_data(qr_file=DEFAULT_RESULT_PATH):\n ''' \n load query result\n '''\n df = pandas.read_csv(open(qr_file, 'r'), header=0, quotechar=\"'\")\n le = {}\n ohe = {}\n for column in df:\n df[column] = df[column].apply(lambda x: x.replace('\\'', '').strip())\n df[column] = df[column].apply(lambda x: float_or_integer(x))\n # if it is a categorical attribute, first encode each one into integers, and then use one-hot encoding\n if df[column].dtype.kind == 'S' or df[column].dtype.kind == 'O':\n le[column] = preprocessing.LabelEncoder()\n le[column].fit(df[column])\n ohe[column] = preprocessing.OneHotEncoder()\n le_col = le[column].transform(df[column])\n le_col = le_col.reshape(-1, 1)\n ohe[column] = preprocessing.OneHotEncoder(sparse=False)\n ohe[column].fit(le_col)\n df.insert(0, 'index', range(0, len(df))) \n data = {'df':df, 'le':le, 'ohe':ohe}\n return data\n\ndef load_user_question(uq_path=DEFAULT_QUESTION_PATH):\n '''\n load user questions\n '''\n uq = []\n with open(uq_path, 'rt') as uqfile:\n reader = csv.DictReader(uqfile, quotechar='\\'')\n headers = reader.fieldnames\n #temp_data = csv.reader(uqfile, delimiter=',', quotechar='\\'')\n #for row in temp_data:\n for row in reader:\n row_data = {}\n for k, v in enumerate(headers):\n print(k, v)\n if v != 'direction':\n if is_float(row[v]):\n row_data[v] = float(row[v])\n elif is_integer(row[v]):\n row_data[v] = float(long(row[v]))\n else:\n row_data[v] = row[v]\n if row['direction'][0] == 'h':\n dir = 1\n else:\n dir = -1\n uq.append({'target_tuple': row_data, 'dir':dir})\n return uq\n\ndef load_constraints(cons_path=DEFAULT_CONSTRAINT_PATH):\n '''\n load pre-defined constraints(currently only fixed attributes and variable attributes)\n '''\n inf = open(cons_path, 'r')\n cons = []\n while True:\n line = inf.readline()\n if not line:\n break\n cons.append([[],[]])\n cons[-1][0] = line.strip(' \\r\\n').split(',')\n line = inf.readline()\n cons[-1][1] = line.strip(' \\r\\n').split(',')\n inf.close()\n return cons\n\n \ndef main(argv=[]):\n query_result_file = DEFAULT_RESULT_PATH\n constraint_file = DEFAULT_CONSTRAINT_PATH\n user_question_file = DEFAULT_QUESTION_PATH\n outputfile = ''\n constraint_epsilon = DEFAULT_CONSTRAINT_EPSILON\n aggregate_column = DEFAULT_AGGREGATE_COLUMN\n try:\n opts, args = getopt.getopt(argv,\"hq:c:u:o:e:a\",[\"qfile=\",\"cfile=\",\"ufile=\",\"ofile=\",\"epsilon=\",\"aggregate_column=\"])\n except getopt.GetoptError:\n print('explanation.py -q <query_result_file> -c <constraint_file> -u \\\n <user_question_file> -o <outputfile> -e <epsilon> -a <aggregate_column>')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('explanation.py -q <query_result_file> -c <constraint_file> -u \\\n <user_question_file> -o <outputfile> -e <epsilon> -a <aggregate_column>')\n sys.exit(2) \n elif opt in (\"-q\", \"--qfile\"):\n query_result_file = arg\n elif opt in (\"-c\", \"--cfile\"):\n constraint_file = arg\n elif opt in (\"-u\", \"--ufile\"):\n user_question_file = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n elif opt in (\"-e\", \"--epsilon\"):\n constraint_epsilon = float(arg)\n elif opt in (\"-a\", \"--aggcolumn\"):\n aggregate_column = arg\n\n start = time.clock()\n data = load_data(query_result_file)\n constraints = load_constraints(DEFAULT_CONSTRAINT_PATH)\n Q = load_user_question(user_question_file)\n category_similarity = CategorySimilarityMatrix(EXAMPLE_SIMILARITY_MATRIX_PATH)\n #category_similarity = CategoryNetworkEmbedding(EXAMPLE_NETWORK_EMBEDDING_PATH, data['df'])\n num_dis_norm = normalize_numerical_distance(data['df'])\n end = time.clock()\n print('Loading time: ' + str(end-start) + 'seconds')\n \n start = time.clock()\n regression_package = 'scikit-learn'\n #regression_package = 'statsmodels'\n # explanations_list = find_explanation_regression_based(data, Q, constraints, category_similarity, \n # num_dis_norm, constraint_epsilon, \n # aggregate_column, regression_package)\n uq_list = find_best_user_questions(data, constraints, category_similarity, \n num_dis_norm, constraint_epsilon, \n aggregate_column, regression_package)\n end = time.clock()\n print('Total querying time: ' + str(end-start) + 'seconds')\n\n # ofile = sys.stdout\n ofile = open('./candidate_authors.txt', 'w')\n for i in range(len(uq_list)):\n #print(uq_list[i])\n ofile.write(str(uq_list))\n ofile.close()\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n\n" ]
[ [ "pandas.DataFrame", "sklearn.preprocessing.LabelEncoder", "sklearn.preprocessing.OneHotEncoder", "pandas.Index" ] ]
fbartolic/volcano
[ "c9630a8a6ac64ace71631b00cb7afe9482572f0b" ]
[ "paper/figures/scripts/irtf_fit.py" ]
[ "import numpy as np\nimport pickle as pkl\n\nimport starry\nimport jax.numpy as jnp\nfrom jax import random\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro.infer import *\n\nimport celerite2.jax\nfrom celerite2.jax import terms as jax_terms\nfrom celerite2 import terms, GaussianProcess\nfrom exoplanet.distributions import estimate_inverse_gamma_parameters\n\nfrom volcano.utils import *\n\nnp.random.seed(42)\nstarry.config.lazy = False\nnumpyro.enable_x64()\n\n\ndef fit_model(ydeg_inf, lc_in, lc_eg):\n # Compute ephemeris\n eph_list_io = []\n eph_list_jup = []\n\n for lc in (lc_in, lc_eg):\n times = lc.time\n\n eph_io = get_body_ephemeris(\n times, body_id=\"501\", step=\"1m\", return_orientation=True\n )\n eph_jup = get_body_ephemeris(\n times, body_id=\"599\", step=\"1m\", return_orientation=True\n )\n\n eph_list_io.append(eph_io)\n eph_list_jup.append(eph_jup)\n\n eph_io_in = eph_list_io[0]\n eph_jup_in = eph_list_jup[0]\n eph_io_eg = eph_list_io[1]\n eph_jup_eg = eph_list_jup[1]\n\n t_in = (lc_in.time.mjd - lc_in.time.mjd[0]) * 24 * 60\n t_eg = (lc_eg.time.mjd - lc_eg.time.mjd[0]) * 24 * 60\n\n f_obs_in = lc_in[\"flux\"].value\n f_err_in = lc_in[\"flux_err\"].value\n f_obs_eg = lc_eg[\"flux\"].value\n f_err_eg = lc_eg[\"flux_err\"].value\n\n f_obs = np.concatenate([f_obs_in, f_obs_eg])\n f_err = np.concatenate([f_err_in, f_err_eg])\n\n (xo_in, yo_in, ro_in, occ_lat_in,) = get_occultor_position_and_radius(\n eph_io_in,\n eph_jup_in,\n occultor_is_jupiter=True,\n rotate=True,\n return_occ_lat=True,\n method=\"\",\n )\n (xo_eg, yo_eg, ro_eg, occ_lat_eg,) = get_occultor_position_and_radius(\n eph_io_eg,\n eph_jup_eg,\n occultor_is_jupiter=True,\n rotate=True,\n return_occ_lat=True,\n method=\"\",\n )\n\n print(\"Ingress occultation latitude: \", occ_lat_in)\n print(\"Egress occultation latitude: \", occ_lat_eg)\n print(\"Ingress effective radius: \", ro_in)\n print(\"Egress effective radius: \", ro_eg)\n\n # Phase\n theta_in = eph_io_in[\"theta\"].value\n theta_eg = eph_io_eg[\"theta\"].value\n\n # Fit single map model with different map amplitudes for ingress and egress\n map = starry.Map(ydeg_inf)\n lat, lon, Y2P, P2Y, Dx, Dy = map.get_pixel_transforms(oversample=4)\n npix = Y2P.shape[0]\n\n Y2P = jnp.array(Y2P)\n P2Y = jnp.array(P2Y)\n Dx = jnp.array(Dx)\n Dy = jnp.array(Dy)\n\n # Evalute MAP model on denser grid\n xo_in_dense = np.linspace(xo_in[0], xo_in[-1], 200)\n yo_in_dense = np.linspace(yo_in[0], yo_in[-1], 200)\n theta_in_dense = np.linspace(theta_in[0], theta_in[-1], 200)\n\n xo_eg_dense = np.linspace(xo_eg[0], xo_eg[-1], 200)\n yo_eg_dense = np.linspace(yo_eg[0], yo_eg[-1], 200)\n theta_eg_dense = np.linspace(theta_eg[0], theta_eg[-1], 200)\n\n t_dense_in = np.linspace(t_in[0], t_in[-1], 200)\n t_dense_eg = np.linspace(t_eg[0], t_eg[-1], 200)\n\n # Compute design matrices\n map = starry.Map(ydeg_inf)\n A_in = jnp.array(\n map.design_matrix(xo=xo_in, yo=yo_in, ro=ro_in, theta=theta_in)\n )\n A_eg = jnp.array(\n map.design_matrix(xo=xo_eg, yo=yo_eg, ro=ro_eg, theta=theta_eg)\n )\n A_in_dense = jnp.array(\n map.design_matrix(\n xo=xo_in_dense, yo=yo_in_dense, ro=ro_in, theta=theta_in_dense\n )\n )\n A_eg_dense = jnp.array(\n map.design_matrix(\n xo=xo_eg_dense, yo=yo_eg_dense, ro=ro_eg, theta=theta_eg_dense\n )\n )\n # Set the prior scale tau0 for the global scale parameter tau\n D = npix\n N = len(f_obs)\n peff = 0.8 * D # Effective number of parameters\n sig = np.mean(f_err_in)\n tau0 = peff / (D - peff) * sig / np.sqrt(len(f_obs))\n print(\"tau0\", tau0)\n\n # Other constants for the model\n slab_scale = 1000.0\n slab_df = 4\n\n def model():\n #  Specify Finish Horseshoe prior\n # Non-centered distributions- loc=0, width=1 then shift/stretch afterwards\n beta_raw = numpyro.sample(\"beta_raw\", dist.HalfNormal(1.0).expand([D]))\n lamda_raw = numpyro.sample(\n \"lamda_raw\", dist.HalfCauchy(1.0).expand([D])\n )\n tau_raw = numpyro.sample(\"tau_raw\", dist.HalfCauchy(1.0))\n c2_raw = numpyro.sample(\n \"c2_raw\", dist.InverseGamma(0.5 * slab_df, 0.5 * slab_df)\n )\n\n # Do the shifting/stretching\n tau = numpyro.deterministic(\"tau\", tau_raw * tau0)\n c2 = numpyro.deterministic(\"c2\", slab_scale ** 2 * c2_raw)\n lamda_tilde = numpyro.deterministic(\n \"lamda_tilde\",\n jnp.sqrt(c2)\n * lamda_raw\n / jnp.sqrt(c2 + tau ** 2 * lamda_raw ** 2),\n )\n numpyro.deterministic(\n \"mu_meff\",\n (tau / sig * np.sqrt(N)) / (1 + tau / sig * np.sqrt(N)) * D,\n )\n\n # The Finnish Horseshoe prior on p\n p = numpyro.deterministic(\"p\", tau * lamda_tilde * beta_raw)\n x = jnp.dot(P2Y, p)\n\n # Run the smoothing filter\n S = jnp.array(get_smoothing_filter(ydeg_inf, 2 / ydeg_inf))\n x_s = jnp.dot(S, x[:, None]).reshape(-1)\n\n # Allow for a different amplitude of the egress map\n amp_eg = numpyro.sample(\"amp_eg\", dist.Normal(1.0, 0.1))\n numpyro.deterministic(\"x_in\", x_s)\n numpyro.deterministic(\"x_eg\", amp_eg * x_s)\n\n # Compute flux\n ln_flux_offset = numpyro.sample(\n \"ln_flux_offset\", dist.Normal(0.0, 4.0).expand([2])\n )\n\n flux_in = jnp.dot(A_in, x_s[:, None]).reshape(-1) + jnp.exp(\n ln_flux_offset[0]\n )\n flux_eg = jnp.dot(A_eg, amp_eg * x_s[:, None]).reshape(-1) + jnp.exp(\n ln_flux_offset[1]\n )\n\n numpyro.deterministic(\"flux_in\", flux_in)\n numpyro.deterministic(\"flux_eg\", flux_eg)\n flux = jnp.concatenate([flux_in, flux_eg])\n\n # Dense grid\n flux_in_dense = jnp.dot(A_in_dense, x_s[:, None]).reshape(\n -1\n ) + jnp.exp(ln_flux_offset[0])\n\n flux_eg_dense = jnp.dot(A_eg_dense, amp_eg * x_s[:, None]).reshape(\n -1\n ) + jnp.exp(ln_flux_offset[1])\n numpyro.deterministic(\"flux_in_dense\", flux_in_dense)\n numpyro.deterministic(\"flux_eg_dense\", flux_eg_dense)\n\n # GP likelihood\n sigma = numpyro.sample(\n \"sigma_gp\",\n dist.HalfNormal(0.1).expand([2]),\n )\n # params_in = estimate_inverse_gamma_parameters(\n # np.min(np.diff(t_in)), t_in[-1] - t_in[0]\n # )\n # params_eg = estimate_inverse_gamma_parameters(\n # np.min(np.diff(t_eg)), t_eg[-1] - t_eg[0]\n # )\n\n # rho = numpyro.sample(\n # \"rho_gp\",\n # dist.InverseGamma(\n # np.array([params_in[\"alpha\"], params_eg[\"alpha\"]]),\n # np.array([params_in[\"beta\"], params_eg[\"beta\"]])),\n # )\n rho = numpyro.sample(\n \"rho_gp\", dist.HalfNormal(np.array([t_in[-1], t_eg[-1]]))\n )\n\n kernel_in = jax_terms.Matern32Term(sigma=sigma[0], rho=rho[0])\n kernel_eg = jax_terms.Matern32Term(sigma=sigma[1], rho=rho[1])\n\n flux_in_fun = lambda _: flux_in\n flux_eg_fun = lambda _: flux_eg\n\n # Hierarchical model for the errobars\n err_in_scale = numpyro.sample(\"err_in_scale\", dist.HalfNormal(0.1))\n err_eg_scale = numpyro.sample(\"err_eg_scale\", dist.HalfNormal(0.1))\n f_err_in_mod = numpyro.sample(\n \"f_err_in_mod\",\n dist.HalfNormal(err_in_scale).expand([len(f_obs_in)]),\n )\n f_err_eg_mod = numpyro.sample(\n \"f_err_eg_mod\",\n dist.HalfNormal(err_eg_scale).expand([len(f_obs_eg)]),\n )\n\n # # Flux dependent noise term in quadrature to the errorbars quadrature\n # bounded_normal = dist.Normal(1, 0.1)\n # bounded_normal.support = dist.constraints.greater_than(1.0)\n # alpha = numpyro.sample(\"alpha\", bounded_normal.expand([2]))\n # beta = numpyro.sample(\"beta\", dist.HalfNormal(1.0).expand([2]))\n\n # White noise term\n # f_err_in_mod = numpyro.deterministic(\n # \"f_err_in_mod\",\n # jnp.sqrt((alpha[0] * f_err_in) ** 2 + beta[0] ** 2 * flux_in),\n # )\n # f_err_eg_mod = numpyro.deterministic(\n # \"f_err_eg_mod\",\n # jnp.sqrt((alpha[1] * f_err_eg) ** 2 + beta[1] ** 2 * flux_eg),\n # )\n\n # Ingress GP\n gp_in = celerite2.jax.GaussianProcess(kernel_in, mean=flux_in_fun)\n gp_in.compute(t_in, yerr=f_err_in_mod, check_sorted=False)\n numpyro.sample(\"obs_in\", gp_in.numpyro_dist(), obs=f_obs_in)\n\n # Egress GP\n gp_eg = celerite2.jax.GaussianProcess(kernel_eg, mean=flux_eg_fun)\n gp_eg.compute(t_eg, yerr=f_err_eg_mod, check_sorted=False)\n numpyro.sample(\"obs_eg\", gp_eg.numpyro_dist(), obs=f_obs_eg)\n\n init_vals = {\n \"beta_raw\": 0.5 * jnp.ones(D),\n \"lamda\": jnp.ones(D),\n \"tau_raw\": 0.1,\n \"c2_raw\": 5 ** 2,\n \"ln_flux_offset\": -2 * np.ones(2),\n \"sigma_gp\": 0.1 * np.ones(2) * f_err_in[0],\n \"rho_gp\": 0.15 * np.ones(2),\n }\n\n nuts_kernel = NUTS(\n model,\n dense_mass=False,\n init_strategy=init_to_value(values=init_vals),\n target_accept_prob=0.9,\n )\n\n mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=3000)\n rng_key = random.PRNGKey(0)\n mcmc.run(rng_key)\n print(\"Total nr. of parameters: \", rng_key.ndim)\n samples = mcmc.get_samples()\n return samples\n\n\n# Fit the 1998 pair of light curves\nwith open(\"../../../data/irtf_processed/lc_1998-08-27.pkl\", \"rb\") as handle:\n lc_in = pkl.load(handle)\n\nwith open(\"../../../data/irtf_processed/lc_1998-11-29.pkl\", \"rb\") as handle:\n lc_eg = pkl.load(handle)\n\nsamples = fit_model(20, lc_in, lc_eg)\n\nwith open(\"irtf_1998_samples_cubic.pkl\", \"wb\") as handle:\n pkl.dump(samples, handle)\n\n# Fit the 2017 pair of light curves\nwith open(\"../../../data/irtf_processed/lc_2017-03-31.pkl\", \"rb\") as handle:\n lc_in = pkl.load(handle)\n\nwith open(\"../../../data/irtf_processed/lc_2017-05-11.pkl\", \"rb\") as handle:\n lc_eg = pkl.load(handle)\n\nsamples2 = fit_model(20, lc_in, lc_eg)\n\nwith open(\"irtf_2017_samples_cubic.pkl\", \"wb\") as handle:\n pkl.dump(samples2, handle)" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.random.seed", "numpy.ones", "numpy.mean", "numpy.sqrt", "numpy.linspace" ] ]
sppleHao/water
[ "f7b274b5084e69da31ef4f7084cc7ab2c09e6b53" ]
[ "process_anno_angle.py" ]
[ "'''\n角度预处理\n'''\nimport numpy as np\nimport cv2\nimport os\nimport sys\nimport math\nfrom math import *\nimport scipy.misc\nfrom yolo import YOLO\n\n#恢复原图框\ndef scale_box(box,scale):\n new_box = (box[0]/scale[0],box[1]/scale[1],box[2]/scale[0],box[3]/scale[1])\n return [int(x) for x in new_box]\n\n#恢复原图点\ndef scale_point(point,scale):\n new_p = (point[0]/scale[0],point[1]/scale[1])\n return [int(x) for x in new_p]\n\ndef box_to_str(box):\n return str(box[0])+','+str(box[1])+','+str(box[2])+','+str(box[3])\n\n#逆时针旋转图片而不裁剪\ndef rotate(img,degree):\n height,width=img.shape[:2]\n heightNew=int(width*fabs(sin(radians(degree)))+height*fabs(cos(radians(degree))))\n widthNew=int(height*fabs(sin(radians(degree)))+width*fabs(cos(radians(degree))))\n\n matRotation=cv2.getRotationMatrix2D((width/2,height/2),degree,1)\n\n matRotation[0,2] +=(widthNew-width)/2 \n matRotation[1,2] +=(heightNew-height)/2 \n\n imgRotation=cv2.warpAffine(img,matRotation,(widthNew,heightNew),borderValue=(255,255,255))\n\n return imgRotation\n\ndef rotate_by_vec(img,sin_theta,cos_theta):\n height,width=img.shape[:2]\n #print(img.shape)\n heightNew=int(width*fabs(sin_theta)+height*fabs(cos_theta))\n widthNew=int(height*fabs(sin_theta)+width*fabs(cos_theta))\n\n a=np.array([\n [1,0,0],\n [0,1,0],\n [-width/2,-height/2,1]])\n b=np.array([\n [cos_theta,sin_theta,0],\n [-sin_theta,cos_theta,0],\n [0,0,1]],dtype='float32'\n )\n c=np.array([\n [1,0,0],\n [0,1,0],\n [width/2,height/2,1]])\n \n matRotation = np.dot(a,np.dot(b,c))\n matRotation = matRotation.T\n matRotation = matRotation[:2,:]\n #print(matRotation)\n\n matRotation[0,2] +=(widthNew-width)/2 \n matRotation[1,2] +=(heightNew-height)/2 \n\n imgRotation=cv2.warpAffine(img,matRotation,(widthNew,heightNew),borderValue=(255,255,255))\n\n return imgRotation \n\n#旋转角度(顺时针)\ndef compute_angel(point1,point2):\n \"\"\"\n 计算需要顺时针旋转才能转正的角度\n 输出[0-2],为弧度值除以pi\n \"\"\"\n p1 = np.array(point1,dtype=float)\n p2 = np.array(point2,dtype=float)\n\n #表盘左侧到右侧的向量\n v = p2-p1\n\n #x轴的向量\n up_vector = np.array([1,0],dtype=float)\n v_norm = np.linalg.norm(v)\n\n #夹角的弧度值\n cos_theta = np.arccos(np.dot(v,up_vector)/v_norm)\n\n #left y > right y , 夹角为顺时针\n if(point1[1]>point2[1]):\n cos_theta = cos_theta/math.pi\n else:\n cos_theta = 2-cos_theta/math.pi\n return round(float(cos_theta),3)\n\nif __name__ == '__main__':\n resize_shape = (1200,800)\n anno_dir = os.path.join(os.path.abspath('.'),os.path.join('data','images','annotation'))\n txts = os.listdir(anno_dir)\n\n yolo = YOLO()\n\n meta = []\n\n for txt in txts:\n #print(txt)\n if (str(txt).endswith('.txt')):\n with open(os.path.join(anno_dir,txt),'r') as f:\n img = cv2.imread('data/images/'+str(txt[:-4])+'.jpg')\n shape = img.shape\n scale_x = resize_shape[0]/shape[1]\n scale_y = resize_shape[1]/shape[0]\n scale = (scale_x,scale_y)\n\n #无用-----------------------\n xmin = f.readline().strip()\n ymin = f.readline().strip()\n xmax = f.readline().strip()\n ymax = f.readline().strip()\n #box = [int(x) for x in (xmin,ymin,xmax,ymax)]\n #box = scale_box(box,scale)\n #print([shape,box])\n xlist = [int(x) for x in f.readline().strip().split('\\t')]\n ylist = [int(x) for x in f.readline().strip().split('\\t')]\n\n long_number_points = []\n angle_points = []\n small_meter_points = []\n for i in range(len(xlist)):\n point = (xlist[i],ylist[i])\n if i<4:\n long_number_points.append(scale_point(point,scale))\n elif i<6:\n angle_points.append(point)\n else:\n small_meter_points.append(scale_point(point,scale))\n\n angle = compute_angel(angle_points[0],angle_points[1])\n \n #anno['long_number_points'] = long_number_points\n #anno['small_meter_points']=small_meter_points\n\n per_angle = 10\n \n for i in range(int(360/per_angle)):\n image = rotate(img,i*per_angle) #逆时针旋转30、60、。。。\n boxes = yolo.detect_result(image)\n if len(boxes)>0:\n print(boxes[0])\n anno = dict()\n anno['name'] = str(txt[:-4])+'.jpg'\n anno['meter_box'] = boxes[0] #(left,top,right,bottom)\n anno['aug_angle'] = i*per_angle/360\n anno['ori_angle']=angle\n meta.append(anno)\n else:\n print('cannot detect')\n\n with open('train_regression.txt','w+') as file:\n for m in meta:\n file.write('data/images/'+m['name'])\n file.write(' ')\n file.write(str(m['aug_angle']))\n file.write(' ') \n file.write(box_to_str(m['meter_box']))\n file.write(' ')\n file.write(str(m['ori_angle']))\n file.write('\\n')\n\n\n\n\n\n \n " ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.dot" ] ]
StoneSwine/Segmentt
[ "5bb1076b4ced3b108481994caf78461bcd60eb22" ]
[ "src/pixel_feature_extraction.py" ]
[ "#!/usr/bin/env python3\n\nimport math\n# Standard library imports\nimport os\n\n# Third party imports\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n# finseg imports\nfrom Segmentt.flooding import segment_image\n\nWSIZ=50\n\ndef local_coherence(img, window_s=WSIZ):\n \"\"\"\n Calculate the coherence according to methdology described in:\n Bazen, Asker M., and Sabih H. Gerez. \"Segmentation of fingerprint images.\"\n ProRISC 2001 Workshop on Circuits, Systems and Signal Processing. Veldhoven,\n The Netherlands, 2001.\n \"\"\"\n coherence = []\n rs = window_s\n cs = window_s\n for r in range(4, img.shape[0] - rs, rs):\n for c in range(4, img.shape[1] - cs, cs):\n window = img[r:r + rs, c:c + cs]\n if window.var() != 0: # Need variance because of the constraint (gxx + gyy) < 0\n gx = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 1, 0, ksize=5))).flatten()\n gy = np.uint8(np.absolute(cv2.Sobel(window, cv2.CV_64F, 0, 1, ksize=5))).flatten()\n\n gxx = sum([int(x) ** 2 for x in gx])\n gyy = sum([int(y) ** 2 for y in gy])\n gxy = sum([int(x) * int(y) for x, y in zip(gx, gy)])\n\n assert gxx + gyy != 0\n coherence.append(math.sqrt((math.pow((gxx - gyy), 2) + 4 * math.pow(gxy, 2))) / (gxx + gyy))\n return coherence\n\n\ndef local_variance_mean(img, window_s=WSIZ):\n \"\"\"\n Calculate local variance and mean values for a window using the numpy variance and mean functions\n \"\"\"\n rs = cs = window_s\n mean = []\n variance = []\n for r in range(4, img.shape[0] - rs, rs):\n for c in range(4, img.shape[1] - cs, cs):\n window = img[r:r + rs, c:c + cs]\n if window.var() != 0:\n mean.append(np.mean(window))\n variance.append(np.var(window))\n return variance, mean\n\n\ndef run_segment_image():\n \"\"\"\n This program is kind of slow because of all the looping needed to go through a full image\n \"\"\"\n HERE = os.path.dirname(__file__)\n\n fg_v = []\n fg_m = []\n fg_c = []\n bg_v = []\n bg_m = []\n bg_c = []\n\n for filename in os.listdir(HERE + '/baseline/'):\n if filename.endswith(\".jpg\") or filename.endswith(\".JPG\"):\n oimg = cv2.imread(HERE + '/baseline/' + filename, cv2.IMREAD_UNCHANGED)\n croppedforeground, _, background = segment_image(oimg)\n print(filename)\n\n variance, mean = local_variance_mean(croppedforeground)\n for x in variance:\n fg_v.append(x)\n for x in mean:\n fg_m.append(x)\n for x in local_coherence(croppedforeground):\n fg_c.append(x)\n\n variance, mean = local_variance_mean(background)\n for x in variance:\n bg_v.append(x)\n for x in mean:\n bg_m.append(x)\n for x in local_coherence(background):\n bg_c.append(x)\n\n plt.title(\"Variance\")\n sns.distplot(fg_v, hist=False, label=\"Foreground\")\n sns.distplot(bg_v, hist=False, label=\"Background\")\n plt.show()\n\n plt.title(\"Mean\")\n sns.distplot(fg_m, hist=False, label=\"Foreground\")\n sns.distplot(bg_m, hist=False, label=\"Background\")\n plt.show()\n\n plt.title(\"Coherence\")\n sns.distplot(fg_c, hist=False, label=\"Foreground\")\n sns.distplot(bg_c, hist=False, label=\"Background\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n run_segment_image()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.var", "matplotlib.pyplot.title", "numpy.mean" ] ]
duthedd/keras-deep-learning
[ "f4cb8900245a12d03e33a5b76d2e33aa2bdda4f0" ]
[ "keras/backend/common.py" ]
[ "import numpy as np\n\n# the type of float to use throughout the session.\n_FLOATX = 'float32'\n_EPSILON = 10e-8\n_IMAGE_DATA_FORMAT = 'channels_last'\n\n\ndef epsilon():\n \"\"\"Returns the value of the fuzz\n factor used in numeric expressions.\n\n # Returns\n A float.\n\n # Example\n ```python\n >>> keras.backend.epsilon()\n 1e-08\n ```\n \"\"\"\n return _EPSILON\n\n\ndef set_epsilon(e):\n \"\"\"Sets the value of the fuzz\n factor used in numeric expressions.\n\n # Arguments\n e: float. New value of epsilon.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.epsilon()\n 1e-08\n >>> K.set_epsilon(1e-05)\n >>> K.epsilon()\n 1e-05\n ```\n \"\"\"\n global _EPSILON\n _EPSILON = e\n\n\ndef floatx():\n \"\"\"Returns the default float type, as a string.\n (e.g. 'float16', 'float32', 'float64').\n\n # Returns\n String, the current default float type.\n\n # Example\n ```python\n >>> keras.backend.floatx()\n 'float32'\n ```\n \"\"\"\n return _FLOATX\n\n\ndef set_floatx(floatx):\n \"\"\"Sets the default float type.\n\n # Arguments\n String: 'float16', 'float32', or 'float64'.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.floatx()\n 'float32'\n >>> K.set_floatx('float16')\n >>> K.floatx()\n 'float16'\n ```\n \"\"\"\n global _FLOATX\n if floatx not in {'float16', 'float32', 'float64'}:\n raise ValueError('Unknown floatx type: ' + str(floatx))\n _FLOATX = str(floatx)\n\n\ndef cast_to_floatx(x):\n \"\"\"Cast a Numpy array to the default Keras float type.\n\n # Arguments\n x: Numpy array.\n\n # Returns\n The same Numpy array, cast to its new type.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.floatx()\n 'float32'\n >>> arr = numpy.array([1.0, 2.0], dtype='float64')\n >>> arr.dtype\n dtype('float64')\n >>> new_arr = K.cast_to_floatx(arr)\n >>> new_arr\n array([ 1., 2.], dtype=float32)\n >>> new_arr.dtype\n dtype('float32')\n ```\n \"\"\"\n return np.asarray(x, dtype=_FLOATX)\n\n\ndef image_data_format():\n \"\"\"Returns the default image data format convention ('channels_first' or 'channels_last').\n\n # Returns\n A string, either `'channels_first'` or `'channels_last'`\n\n # Example\n ```python\n >>> keras.backend.image_data_format()\n 'channels_first'\n ```\n \"\"\"\n return _IMAGE_DATA_FORMAT\n\n\ndef set_image_data_format(data_format):\n \"\"\"Sets the value of the data format convention.\n\n # Arguments\n data_format: string. `'channels_first'` or `'channels_last'`.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.image_data_format()\n 'channels_first'\n >>> K.set_image_data_format('channels_last')\n >>> K.image_data_format()\n 'channels_last'\n ```\n \"\"\"\n global _IMAGE_DATA_FORMAT\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('Unknown data_format:', data_format)\n _IMAGE_DATA_FORMAT = str(data_format)\n\n\n# Legacy methods\n\ndef set_image_dim_ordering(dim_ordering):\n \"\"\"Legacy setter for `image_data_format`.\n\n # Arguments\n dim_ordering: string. `tf` or `th`.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.image_data_format()\n 'channels_first'\n >>> K.set_image_data_format('channels_last')\n >>> K.image_data_format()\n 'channels_last'\n ```\n\n # Raises\n ValueError: if `dim_ordering` is invalid.\n \"\"\"\n global _IMAGE_DATA_FORMAT\n if dim_ordering not in {'tf', 'th'}:\n raise ValueError('Unknown dim_ordering:', dim_ordering)\n if dim_ordering == 'th':\n data_format = 'channels_first'\n else:\n data_format = 'channels_last'\n _IMAGE_DATA_FORMAT = data_format\n\n\ndef image_dim_ordering():\n \"\"\"Legacy getter for `image_data_format`.\n\n # Returns\n string, one of `'th'`, `'tf'`\n \"\"\"\n if _IMAGE_DATA_FORMAT == 'channels_first':\n return 'th'\n else:\n return 'tf'\n" ]
[ [ "numpy.asarray" ] ]
RileyMShea/vectorbt
[ "92ce571ce9fd0667f2994a2c922fb4cfcde10c88" ]
[ "tests/test_generic.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom numba import njit\nfrom datetime import datetime\nimport pytest\nfrom itertools import product\n\nfrom vectorbt.generic import nb\nfrom vectorbt.generic.drawdowns import Drawdowns\n\nseed = 42\n\nday_dt = np.timedelta64(86400000000000)\n\ndf = pd.DataFrame({\n 'a': [1, 2, 3, 4, np.nan],\n 'b': [np.nan, 4, 3, 2, 1],\n 'c': [1, 2, np.nan, 2, 1]\n}, index=pd.DatetimeIndex([\n datetime(2018, 1, 1),\n datetime(2018, 1, 2),\n datetime(2018, 1, 3),\n datetime(2018, 1, 4),\n datetime(2018, 1, 5)\n]))\ngroup_by = np.array(['g1', 'g1', 'g2'])\n\n\n@njit\ndef i_or_col_pow_nb(i_or_col, x, pow):\n return np.power(x, pow)\n\n\n@njit\ndef pow_nb(x, pow):\n return np.power(x, pow)\n\n\n@njit\ndef nanmean_nb(x):\n return np.nanmean(x)\n\n\n@njit\ndef i_col_nanmean_nb(i, col, x):\n return np.nanmean(x)\n\n\n@njit\ndef i_nanmean_nb(i, x):\n return np.nanmean(x)\n\n\n@njit\ndef col_nanmean_nb(col, x):\n return np.nanmean(x)\n\n\n# ############# accessors.py ############# #\n\n\nclass TestAccessors:\n def test_split_into_ranges(self):\n pd.testing.assert_frame_equal(\n df['a'].vbt.split_into_ranges(n=2),\n pd.DataFrame(\n np.array([\n [1., 4.],\n [2., np.nan]\n ]),\n index=pd.RangeIndex(start=0, stop=2, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-04'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-02', '2018-01-05'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n pd.testing.assert_frame_equal(\n df['a'].vbt.split_into_ranges(range_len=2),\n pd.DataFrame(\n np.array([\n [1., 2., 3., 4.],\n [2., 3., 4., np.nan]\n ]),\n index=pd.RangeIndex(start=0, stop=2, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n pd.testing.assert_frame_equal(\n df['a'].vbt.split_into_ranges(range_len=2, n=3),\n pd.DataFrame(\n np.array([\n [1., 3., 4.],\n [2., 4., np.nan]\n ]),\n index=pd.RangeIndex(start=0, stop=2, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-03', '2018-01-04'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-02', '2018-01-04', '2018-01-05'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n pd.testing.assert_frame_equal(\n df['a'].vbt.split_into_ranges(range_len=3, n=2),\n pd.DataFrame(\n np.array([\n [1., 3.],\n [2., 4.],\n [3., np.nan]\n ]),\n index=pd.RangeIndex(start=0, stop=3, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-03'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-03', '2018-01-05'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n pd.testing.assert_frame_equal(\n df.vbt.split_into_ranges(n=2),\n pd.DataFrame(\n np.array([\n [1., 4., np.nan, 2., 1., 2.],\n [2., np.nan, 4., 1., 2., 1.]\n ]),\n index=pd.RangeIndex(start=0, stop=2, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object'),\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-04', '2018-01-01', '2018-01-04', '2018-01-01', '2018-01-04'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-02', '2018-01-05', '2018-01-02', '2018-01-05', '2018-01-02', '2018-01-05'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n pd.testing.assert_frame_equal(\n df.vbt.split_into_ranges(start_idxs=[0, 1], end_idxs=[3, 4]),\n pd.DataFrame(\n np.array([\n [1., 2., np.nan, 4., 1., 2.],\n [2., 3., 4., 3., 2., np.nan],\n [3., 4., 3., 2., np.nan, 2.]\n ]),\n index=pd.RangeIndex(start=0, stop=3, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object'),\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-02', '2018-01-01', '2018-01-02', '2018-01-01', '2018-01-02'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-03', '2018-01-04', '2018-01-03', '2018-01-04', '2018-01-03', '2018-01-04'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n pd.testing.assert_frame_equal(\n df.vbt.split_into_ranges(start_idxs=df.index[[0, 1]], end_idxs=df.index[[2, 3]]),\n pd.DataFrame(\n np.array([\n [1., 2., np.nan, 4., 1., 2.],\n [2., 3., 4., 3., 2., np.nan],\n [3., 4., 3., 2., np.nan, 2.]\n ]),\n index=pd.RangeIndex(start=0, stop=3, step=1),\n columns=pd.MultiIndex.from_arrays([\n pd.Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object'),\n pd.DatetimeIndex([\n '2018-01-01', '2018-01-02', '2018-01-01', '2018-01-02', '2018-01-01', '2018-01-02'\n ], dtype='datetime64[ns]', name='range_start', freq=None),\n pd.DatetimeIndex([\n '2018-01-03', '2018-01-04', '2018-01-03', '2018-01-04', '2018-01-03', '2018-01-04'\n ], dtype='datetime64[ns]', name='range_end', freq=None)\n ])\n )\n )\n with pytest.raises(Exception) as e_info:\n df.vbt.split_into_ranges()\n with pytest.raises(Exception) as e_info:\n df.vbt.split_into_ranges(start_idxs=[0, 1])\n with pytest.raises(Exception) as e_info:\n df.vbt.split_into_ranges(end_idxs=[2, 4])\n with pytest.raises(Exception) as e_info:\n df.vbt.split_into_ranges(start_idxs=[0, 1], end_idxs=[2, 4])\n\n def test_shuffle(self):\n pd.testing.assert_series_equal(\n df['a'].vbt.shuffle(seed=seed),\n pd.Series(\n np.array([2.0, np.nan, 3.0, 1.0, 4.0]),\n index=df['a'].index,\n name=df['a'].name\n )\n )\n np.testing.assert_array_equal(\n df['a'].vbt.shuffle(seed=seed).values,\n nb.shuffle_1d_nb(df['a'].values, seed=seed)\n )\n pd.testing.assert_frame_equal(\n df.vbt.shuffle(seed=seed),\n pd.DataFrame(\n np.array([\n [2., 2., 2.],\n [np.nan, 4., 1.],\n [3., 3., 2.],\n [1., np.nan, 1.],\n [4., 1., np.nan]\n ]),\n index=df.index,\n columns=df.columns\n )\n )\n\n @pytest.mark.parametrize(\n \"test_value\",\n [-1, 0., np.nan],\n )\n def test_fillna(self, test_value):\n pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))\n pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))\n\n @pytest.mark.parametrize(\n \"test_n\",\n [1, 2, 3, 4, 5],\n )\n def test_bshift(self, test_n):\n pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))\n np.testing.assert_array_equal(\n df['a'].vbt.bshift(test_n).values,\n nb.bshift_1d_nb(df['a'].values, test_n)\n )\n pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))\n\n @pytest.mark.parametrize(\n \"test_n\",\n [1, 2, 3, 4, 5],\n )\n def test_fshift(self, test_n):\n pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))\n np.testing.assert_array_equal(\n df['a'].vbt.fshift(test_n).values,\n nb.fshift_1d_nb(df['a'].values, test_n)\n )\n pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))\n\n def test_diff(self):\n pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())\n np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))\n pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())\n\n def test_pct_change(self):\n pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))\n np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))\n pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))\n\n def test_ffill(self):\n pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())\n pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())\n\n def test_product(self):\n assert df['a'].vbt.product() == df['a'].product()\n np.testing.assert_array_equal(df.vbt.product(), df.product())\n\n def test_cumsum(self):\n pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())\n pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())\n\n def test_cumprod(self):\n pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())\n pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())\n\n @pytest.mark.parametrize(\n \"test_window,test_minp\",\n list(product([1, 2, 3, 4, 5], [1, None]))\n )\n def test_rolling_min(self, test_window, test_minp):\n if test_minp is None:\n test_minp = test_window\n pd.testing.assert_series_equal(\n df['a'].vbt.rolling_min(test_window, minp=test_minp),\n df['a'].rolling(test_window, min_periods=test_minp).min()\n )\n pd.testing.assert_frame_equal(\n df.vbt.rolling_min(test_window, minp=test_minp),\n df.rolling(test_window, min_periods=test_minp).min()\n )\n\n @pytest.mark.parametrize(\n \"test_window,test_minp\",\n list(product([1, 2, 3, 4, 5], [1, None]))\n )\n def test_rolling_max(self, test_window, test_minp):\n if test_minp is None:\n test_minp = test_window\n pd.testing.assert_series_equal(\n df['a'].vbt.rolling_max(test_window, minp=test_minp),\n df['a'].rolling(test_window, min_periods=test_minp).max()\n )\n pd.testing.assert_frame_equal(\n df.vbt.rolling_max(test_window, minp=test_minp),\n df.rolling(test_window, min_periods=test_minp).max()\n )\n\n @pytest.mark.parametrize(\n \"test_window,test_minp\",\n list(product([1, 2, 3, 4, 5], [1, None]))\n )\n def test_rolling_mean(self, test_window, test_minp):\n if test_minp is None:\n test_minp = test_window\n pd.testing.assert_series_equal(\n df['a'].vbt.rolling_mean(test_window, minp=test_minp),\n df['a'].rolling(test_window, min_periods=test_minp).mean()\n )\n pd.testing.assert_frame_equal(\n df.vbt.rolling_mean(test_window, minp=test_minp),\n df.rolling(test_window, min_periods=test_minp).mean()\n )\n\n @pytest.mark.parametrize(\n \"test_window,test_minp,test_ddof\",\n list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))\n )\n def test_rolling_std(self, test_window, test_minp, test_ddof):\n if test_minp is None:\n test_minp = test_window\n pd.testing.assert_series_equal(\n df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),\n df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)\n )\n pd.testing.assert_frame_equal(\n df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),\n df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)\n )\n\n @pytest.mark.parametrize(\n \"test_window,test_minp,test_adjust\",\n list(product([1, 2, 3, 4, 5], [1, None], [False, True]))\n )\n def test_ewm_mean(self, test_window, test_minp, test_adjust):\n if test_minp is None:\n test_minp = test_window\n pd.testing.assert_series_equal(\n df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),\n df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()\n )\n pd.testing.assert_frame_equal(\n df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),\n df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()\n )\n\n @pytest.mark.parametrize(\n \"test_window,test_minp,test_adjust,test_ddof\",\n list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))\n )\n def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):\n if test_minp is None:\n test_minp = test_window\n pd.testing.assert_series_equal(\n df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),\n df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)\n )\n pd.testing.assert_frame_equal(\n df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),\n df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)\n )\n\n def test_expanding_min(self):\n pd.testing.assert_series_equal(df['a'].vbt.expanding_min(), df['a'].expanding().min())\n pd.testing.assert_frame_equal(df.vbt.expanding_min(), df.expanding().min())\n\n def test_expanding_max(self):\n pd.testing.assert_series_equal(df['a'].vbt.expanding_max(), df['a'].expanding().max())\n pd.testing.assert_frame_equal(df.vbt.expanding_max(), df.expanding().max())\n\n def test_expanding_mean(self):\n pd.testing.assert_series_equal(df['a'].vbt.expanding_mean(), df['a'].expanding().mean())\n pd.testing.assert_frame_equal(df.vbt.expanding_mean(), df.expanding().mean())\n\n @pytest.mark.parametrize(\n \"test_ddof\",\n [0, 1]\n )\n def test_expanding_std(self, test_ddof):\n pd.testing.assert_series_equal(\n df['a'].vbt.expanding_std(ddof=test_ddof),\n df['a'].expanding().std(ddof=test_ddof)\n )\n pd.testing.assert_frame_equal(\n df.vbt.expanding_std(ddof=test_ddof),\n df.expanding().std(ddof=test_ddof)\n )\n\n def test_apply_along_axis(self):\n pd.testing.assert_frame_equal(\n df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),\n df.apply(pow_nb, args=(2,), axis=0, raw=True)\n )\n pd.testing.assert_frame_equal(\n df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),\n df.apply(pow_nb, args=(2,), axis=1, raw=True)\n )\n\n @pytest.mark.parametrize(\n \"test_window\",\n [1, 2, 3, 4, 5],\n )\n def test_rolling_apply(self, test_window):\n pd.testing.assert_series_equal(\n df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb),\n df['a'].rolling(test_window, min_periods=1).apply(nanmean_nb, raw=True)\n )\n pd.testing.assert_frame_equal(\n df.vbt.rolling_apply(test_window, i_col_nanmean_nb),\n df.rolling(test_window, min_periods=1).apply(nanmean_nb, raw=True)\n )\n pd.testing.assert_frame_equal(\n df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),\n pd.DataFrame(\n np.array([\n [1., 1., 1.],\n [2., 2., 2.],\n [2.28571429, 2.28571429, 2.28571429],\n [2.75, 2.75, 2.75],\n [2.28571429, 2.28571429, 2.28571429]\n ]),\n index=df.index,\n columns=df.columns\n )\n )\n\n def test_expanding_apply(self):\n pd.testing.assert_series_equal(\n df['a'].vbt.expanding_apply(i_col_nanmean_nb),\n df['a'].expanding(min_periods=1).apply(nanmean_nb, raw=True)\n )\n pd.testing.assert_frame_equal(\n df.vbt.expanding_apply(i_col_nanmean_nb),\n df.expanding(min_periods=1).apply(nanmean_nb, raw=True)\n )\n pd.testing.assert_frame_equal(\n df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),\n pd.DataFrame(\n np.array([\n [1., 1., 1.],\n [2., 2., 2.],\n [2.28571429, 2.28571429, 2.28571429],\n [2.4, 2.4, 2.4],\n [2.16666667, 2.16666667, 2.16666667]\n ]),\n index=df.index,\n columns=df.columns\n )\n )\n\n def test_groupby_apply(self):\n pd.testing.assert_series_equal(\n df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),\n df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))\n )\n pd.testing.assert_frame_equal(\n df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),\n df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({\n 'a': lambda x: nanmean_nb(x.values),\n 'b': lambda x: nanmean_nb(x.values),\n 'c': lambda x: nanmean_nb(x.values)\n }), # any clean way to do column-wise grouping in pandas?\n )\n\n def test_groupby_apply_on_matrix(self):\n pd.testing.assert_frame_equal(\n df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),\n pd.DataFrame(\n np.array([\n [2., 2., 2.],\n [2.8, 2.8, 2.8],\n [1., 1., 1.]\n ]),\n index=pd.Int64Index([1, 2, 3], dtype='int64'),\n columns=df.columns\n )\n )\n\n @pytest.mark.parametrize(\n \"test_freq\",\n ['1h', '3d', '1w'],\n )\n def test_resample_apply(self, test_freq):\n pd.testing.assert_series_equal(\n df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),\n df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))\n )\n pd.testing.assert_frame_equal(\n df.vbt.resample_apply(test_freq, i_col_nanmean_nb),\n df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))\n )\n pd.testing.assert_frame_equal(\n df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),\n pd.DataFrame(\n np.array([\n [2.28571429, 2.28571429, 2.28571429],\n [2., 2., 2.]\n ]),\n index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),\n columns=df.columns\n )\n )\n\n def test_applymap(self):\n @njit\n def mult_nb(i, col, x):\n return x * 2\n\n pd.testing.assert_series_equal(\n df['a'].vbt.applymap(mult_nb),\n df['a'].map(lambda x: x * 2)\n )\n pd.testing.assert_frame_equal(\n df.vbt.applymap(mult_nb),\n df.applymap(lambda x: x * 2)\n )\n\n def test_filter(self):\n @njit\n def greater_nb(i, col, x):\n return x > 2\n\n pd.testing.assert_series_equal(\n df['a'].vbt.filter(greater_nb),\n df['a'].map(lambda x: x if x > 2 else np.nan)\n )\n pd.testing.assert_frame_equal(\n df.vbt.filter(greater_nb),\n df.applymap(lambda x: x if x > 2 else np.nan)\n )\n\n def test_apply_and_reduce(self):\n @njit\n def every_nth_nb(col, a, n):\n return a[::n]\n\n @njit\n def sum_nb(col, a, b):\n return np.nansum(a) + b\n\n assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \\\n df['a'].iloc[::2].sum() + 3\n pd.testing.assert_series_equal(\n df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),\n df.iloc[::2].sum().rename('apply_and_reduce') + 3\n )\n pd.testing.assert_series_equal(\n df.vbt.apply_and_reduce(\n every_nth_nb, sum_nb, apply_args=(2,),\n reduce_args=(3,), wrap_kwargs=dict(time_units=True)),\n (df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt\n )\n\n def test_reduce(self):\n @njit\n def sum_nb(col, a):\n return np.nansum(a)\n\n assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()\n pd.testing.assert_series_equal(\n df.vbt.reduce(sum_nb),\n df.sum().rename('reduce')\n )\n pd.testing.assert_series_equal(\n df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),\n df.sum().rename('reduce') * day_dt\n )\n pd.testing.assert_series_equal(\n df.vbt.reduce(sum_nb, group_by=group_by),\n pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')\n )\n\n @njit\n def argmax_nb(col, a):\n a = a.copy()\n a[np.isnan(a)] = -np.inf\n return np.argmax(a)\n\n assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()\n pd.testing.assert_series_equal(\n df.vbt.reduce(argmax_nb, to_idx=True),\n df.idxmax().rename('reduce')\n )\n pd.testing.assert_series_equal(\n df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),\n pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')\n )\n\n @njit\n def min_and_max_nb(col, a):\n out = np.empty(2)\n out[0] = np.nanmin(a)\n out[1] = np.nanmax(a)\n return out\n\n pd.testing.assert_series_equal(\n df['a'].vbt.reduce(\n min_and_max_nb, to_array=True,\n wrap_kwargs=dict(name_or_index=['min', 'max'])),\n pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')\n )\n pd.testing.assert_frame_equal(\n df.vbt.reduce(\n min_and_max_nb, to_array=True,\n wrap_kwargs=dict(name_or_index=['min', 'max'])),\n df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)\n )\n pd.testing.assert_frame_equal(\n df.vbt.reduce(\n min_and_max_nb, to_array=True, group_by=group_by,\n wrap_kwargs=dict(name_or_index=['min', 'max'])),\n pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])\n )\n\n @njit\n def argmin_and_argmax_nb(col, a):\n # nanargmin and nanargmax\n out = np.empty(2)\n _a = a.copy()\n _a[np.isnan(_a)] = np.inf\n out[0] = np.argmin(_a)\n _a = a.copy()\n _a[np.isnan(_a)] = -np.inf\n out[1] = np.argmax(_a)\n return out\n\n pd.testing.assert_series_equal(\n df['a'].vbt.reduce(\n argmin_and_argmax_nb, to_idx=True, to_array=True,\n wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),\n pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')\n )\n pd.testing.assert_frame_equal(\n df.vbt.reduce(\n argmin_and_argmax_nb, to_idx=True, to_array=True,\n wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),\n df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)\n )\n pd.testing.assert_frame_equal(\n df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,\n flatten=True, order='C', group_by=group_by,\n wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),\n pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],\n dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])\n )\n pd.testing.assert_frame_equal(\n df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,\n flatten=True, order='F', group_by=group_by,\n wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),\n pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],\n dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])\n )\n\n def test_squeeze_grouped(self):\n pd.testing.assert_frame_equal(\n df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),\n pd.DataFrame([\n [1.0, 1.0],\n [3.0, 2.0],\n [3.0, np.nan],\n [3.0, 2.0],\n [1.0, 1.0]\n ], index=df.index, columns=['g1', 'g2'])\n )\n\n def test_flatten_grouped(self):\n pd.testing.assert_frame_equal(\n df.vbt.flatten_grouped(group_by=group_by, order='C'),\n pd.DataFrame([\n [1.0, 1.0],\n [np.nan, np.nan],\n [2.0, 2.0],\n [4.0, np.nan],\n [3.0, np.nan],\n [3.0, np.nan],\n [4.0, 2.0],\n [2.0, np.nan],\n [np.nan, 1.0],\n [1.0, np.nan]\n ], index=np.repeat(df.index, 2), columns=['g1', 'g2'])\n )\n pd.testing.assert_frame_equal(\n df.vbt.flatten_grouped(group_by=group_by, order='F'),\n pd.DataFrame([\n [1.0, 1.0],\n [2.0, 2.0],\n [3.0, np.nan],\n [4.0, 2.0],\n [np.nan, 1.0],\n [np.nan, np.nan],\n [4.0, np.nan],\n [3.0, np.nan],\n [2.0, np.nan],\n [1.0, np.nan]\n ], index=np.tile(df.index, 2), columns=['g1', 'g2'])\n )\n\n @pytest.mark.parametrize(\n \"test_name,test_func,test_func_nb\",\n [\n ('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),\n ('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),\n ('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),\n ('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),\n ('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),\n ('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),\n ('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)\n ],\n )\n def test_funcs(self, test_name, test_func, test_func_nb):\n # numeric\n assert test_func(df['a'].vbt) == test_func(df['a'])\n pd.testing.assert_series_equal(\n test_func(df.vbt),\n test_func(df).rename(test_name)\n )\n pd.testing.assert_series_equal(\n test_func(df.vbt, group_by=group_by),\n pd.Series([\n test_func(df[['a', 'b']].stack()),\n test_func(df['c'])\n ], index=['g1', 'g2']).rename(test_name)\n )\n np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))\n pd.testing.assert_series_equal(\n test_func(df.vbt, wrap_kwargs=dict(time_units=True)),\n test_func(df).rename(test_name) * day_dt\n )\n # boolean\n bool_ts = df == df\n assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])\n pd.testing.assert_series_equal(\n test_func(bool_ts.vbt),\n test_func(bool_ts).rename(test_name)\n )\n pd.testing.assert_series_equal(\n test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),\n test_func(bool_ts).rename(test_name) * day_dt\n )\n\n @pytest.mark.parametrize(\n \"test_name,test_func\",\n [\n ('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),\n ('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))\n ],\n )\n def test_arg_funcs(self, test_name, test_func):\n assert test_func(df['a'].vbt) == test_func(df['a'])\n pd.testing.assert_series_equal(\n test_func(df.vbt),\n test_func(df).rename(test_name)\n )\n pd.testing.assert_series_equal(\n test_func(df.vbt, group_by=group_by),\n pd.Series([\n test_func(df[['a', 'b']].stack())[0],\n test_func(df['c'])\n ], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)\n )\n\n def test_describe(self):\n pd.testing.assert_series_equal(\n df['a'].vbt.describe(),\n df['a'].describe()\n )\n pd.testing.assert_frame_equal(\n df.vbt.describe(percentiles=None),\n df.describe(percentiles=None)\n )\n pd.testing.assert_frame_equal(\n df.vbt.describe(percentiles=[]),\n df.describe(percentiles=[])\n )\n test_against = df.describe(percentiles=np.arange(0, 1, 0.1))\n pd.testing.assert_frame_equal(\n df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),\n test_against\n )\n pd.testing.assert_frame_equal(\n df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),\n pd.DataFrame({\n 'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,\n 'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values\n }, index=test_against.index)\n )\n\n def test_drawdown(self):\n pd.testing.assert_series_equal(\n df['a'].vbt.drawdown(),\n df['a'] / df['a'].expanding().max() - 1\n )\n pd.testing.assert_frame_equal(\n df.vbt.drawdown(),\n df / df.expanding().max() - 1\n )\n\n def test_drawdowns(self):\n assert type(df['a'].vbt.drawdowns) is Drawdowns\n assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq\n assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim\n assert df.vbt.drawdowns.wrapper.ndim == df.ndim\n\n def test_to_mapped_array(self):\n np.testing.assert_array_equal(\n df.vbt.to_mapped_array().values,\n np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])\n )\n np.testing.assert_array_equal(\n df.vbt.to_mapped_array().col_arr,\n np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])\n )\n np.testing.assert_array_equal(\n df.vbt.to_mapped_array().idx_arr,\n np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])\n )\n np.testing.assert_array_equal(\n df.vbt.to_mapped_array(dropna=False).values,\n np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])\n )\n np.testing.assert_array_equal(\n df.vbt.to_mapped_array(dropna=False).col_arr,\n np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])\n )\n np.testing.assert_array_equal(\n df.vbt.to_mapped_array(dropna=False).idx_arr,\n np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])\n )\n" ]
[ [ "pandas.DatetimeIndex", "numpy.argmin", "numpy.tile", "numpy.nanmean", "numpy.empty", "pandas.DataFrame", "numpy.nanmin", "numpy.argmax", "numpy.arange", "numpy.nanmax", "numpy.array", "numpy.nansum", "numpy.power", "numpy.timedelta64", "pandas.Index", "numpy.isnan", "numpy.asarray", "pandas.Int64Index", "pandas.RangeIndex", "pandas.Series", "numpy.repeat" ] ]
mearlboro/python-classifier-2020
[ "51125c3508af55fd04eddcb32729dcf35e6c4b1a" ]
[ "train_12ECG_classifier.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np, os, sys, joblib\nfrom scipy.io import loadmat\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom get_12ECG_features import get_12ECG_features\n\ndef train_12ECG_classifier(input_directory, output_directory):\n # Load data.\n print('Loading data...')\n\n header_files = []\n for f in os.listdir(input_directory):\n g = os.path.join(input_directory, f)\n if not f.lower().startswith('.') and f.lower().endswith('hea') and os.path.isfile(g):\n header_files.append(g)\n\n classes = get_classes(input_directory, header_files)\n\n # Train model.\n print('Training model...')\n\n features = list()\n labels = list()\n\n num_files = len(header_files)\n for i in range(num_files):\n feats, labs = process_single_recording(header_files[i], classes)\n features.append(feats)\n labels.append(labs)\n\n features = np.array(features)\n labels = np.array(labels)\n\n # Replace NaN values with mean values\n imputer=SimpleImputer().fit(features)\n features=imputer.transform(features)\n\n # Train the classifier\n model = RandomForestClassifier().fit(features,labels)\n\n # Save model.\n print('Saving model...')\n\n final_model={'model':model, 'imputer':imputer,'classes':classes}\n\n filename = os.path.join(output_directory, 'finalized_model.sav')\n joblib.dump(final_model, filename, protocol=0)\n\n\n# Load challenge data.\ndef load_challenge_data(header_file):\n with open(header_file, 'r') as f:\n header = f.readlines()\n mat_file = header_file.replace('.hea', '.mat')\n x = loadmat(mat_file)\n recording = np.asarray(x['val'], dtype=np.float64)\n return recording, header\n\n\n# Find unique classes.\ndef get_classes(input_directory, filenames):\n classes = set()\n for filename in filenames:\n with open(filename, 'r') as f:\n for l in f:\n if l.startswith('#Dx'):\n tmp = l.split(': ')[1].split(',')\n for c in tmp:\n classes.add(c.strip())\n return sorted(classes)\n\n\n# Process a single sample.\ndef process_single_recording(header_file, classes):\n\n recording, header = load_challenge_data(header_file)\n\n feats = get_12ECG_features(recording, header)\n num_classes = len(classes)\n\n for l in header:\n if l.startswith('#Dx:'):\n labels_act = np.zeros(num_classes)\n arrs = l.strip().split(' ')\n for arr in arrs[1].split(','):\n class_index = classes.index(arr.rstrip()) # Only use first positive index\n labels_act[class_index] = 1\n\n return feats, labels_act\n" ]
[ [ "sklearn.impute.SimpleImputer", "numpy.array", "numpy.asarray", "numpy.zeros", "sklearn.ensemble.RandomForestClassifier", "scipy.io.loadmat" ] ]
azaryabernard/audio_track_hackathon21
[ "3d3ae04dc93ef0f898e2a1e123c435030c74abe8" ]
[ "speechrecog/filter.py" ]
[ "import sounddevice as sd\nimport numpy as np\nfrom scipy.io.wavfile import write, read\nfrom scipy import signal\nimport os\n\n# These values can be adapted according to your requirements.\nsamplerate = 48000\ndownsample = 1\ninput_gain_db = 12\n#'''device = 'snd_rpi_i2s_card''''\n\ndef butter_highpass(cutoff, fs, order=5):\n '''\n Helper function for the high-pass filter.\n '''\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)\n return b, a\n\ndef butter_highpass_filter(data, cutoff, fs, order=5):\n '''\n High-pass filter for digital audio data.\n '''\n b, a = butter_highpass(cutoff, fs, order=order)\n y = signal.filtfilt(b, a, data)\n return y\n\ndef set_gain_db(audiodata, gain_db):\n '''\n This function allows to set the audio gain\n in decibel. Values above 1 or below -1 are set to\n the max/min values.\n '''\n audiodata *= np.power(10, gain_db/10)\n return np.array([1 if s > 1 else -1 if s < -1 else s for s in audiodata], dtype=np.float32)\n\ndef process_audio_data(audiodata):\n # Extract mono channels from input data.\n ch1 = np.array(audiodata[::downsample, 0], dtype=np.float32)\n ch2 = np.array(audiodata[::downsample, 1], dtype=np.float32)\n\n # High-pass filter the data at a cutoff frequency of 10Hz.\n # This is required because I2S microhones have a certain DC offset\n # which we need to filter in order to amplify the volume later.\n ch1 = butter_highpass_filter(ch1, 10, samplerate)\n ch2 = butter_highpass_filter(ch2, 10, samplerate)\n\n # Amplify audio data.\n # Recommended, because the default input volume is very low.\n # Due to the DC offset this is not recommended without using\n # a high-pass filter in advance.\n ch1 = set_gain_db(ch1, input_gain_db)\n ch2 = set_gain_db(ch2, input_gain_db)\n\n # Output the data in the same format as it came in.\n return np.array([[ch1[i], ch2[i]] for i in range(len(ch1))], dtype=np.float32)\n\ndef output_audio_file(rec):\n wavaudio = read(rec)\n arr = np.array(wavaudio[1],dtype=float)\n processed = process_audio_data(arr)\n write('out.wav', int(samplerate/downsample), processed)\n return ('out.wav')\n\n'''# Record stereo audio data for the given duration in seconds.\nrec = sd.rec(int(seconds * samplerate), samplerate=samplerate, channels=2)\n# Wait until the recording is done.\nsd.wait()\n\n# Process the audio data as explained above.\nprocessed = process_audio_data(rec)\n\n# Write the processed audio data to a wav file.\nwrite('out.wav', int(samplerate/downsample), processed)'''" ]
[ [ "numpy.array", "scipy.io.wavfile.read", "scipy.signal.butter", "scipy.signal.filtfilt", "numpy.power" ] ]
VincentSaleh/Mundi_NotebookUsage
[ "37a50631971fc58b2b18d0b0b3f020b8e06f3623" ]
[ "lib/internal_lib/utils.py" ]
[ "import os \n\nimport geopandas as gpd\nimport math\nfrom descartes import PolygonPatch\nfrom matplotlib import pyplot as plt\nimport osmnx\nimport folium\nfrom IPython.display import display\nfrom PIL import Image\n\nos.environ[\"PROJ_LIB\"] = r\"c:\\Users\\a766113\\AppData\\Local\\Continuum\\anaconda3\\envs\\mundi-final\\Library\\share\"\n\n\n# --------------------------\n# TOOLBOX - BBOX\n# --------------------------\ndef height2width(bbox, height):\n \"\"\"Get optimized width for a given height regarding a known bbox\"\"\"\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n return int(height * (x2 - x1) / (y2 - y1))\n\n\ndef width2height(bbox, width):\n \"\"\"Get optimized height for a given width regarding a known bbox\"\"\"\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n return int(width * (y2 - y1) / (x2 - x1))\n\n\n# --------------------------\n# POLYGON/MAP HELPERS\n# --------------------------\ndef country_polygon_bbox(country_name):\n \"\"\"\n Get the polygon and bbox of a country\n\n :param country_name: the English name of a country (eg, 'Switzerland', 'Germany'...)\n :return: a tuple of a shapely [multi]polygon and a bbox (xmin, ymin, xmax, ymax)\n \"\"\"\n\n # get bbox from a given polygon\n # function found here: https://www.programcreek.com/python/example/95238/shapely.geometry.box\n def polygon_box(min_x, min_y, max_x, max_y):\n f_min_x, f_min_y = int(math.floor(min_x)), int(math.floor(min_y))\n c_max_x, c_max_y = int(math.ceil(max_x)), int(math.ceil(max_y))\n offset = (f_min_x, f_min_y)\n polygon_width = c_max_x - f_min_x\n polygon_height = c_max_y - f_min_y\n return offset, polygon_width, polygon_height\n\n # retrieve all countries\n world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n\n # only keep geometry and name columns\n countries = world[['geometry', 'name']]\n\n # get our country of interest\n country = countries[countries['name'] == country_name]\n polygon = country.geometry.values[0]\n\n # get polygon coordinates & dimensions\n (x, y), w, h = polygon_box(*polygon.bounds)\n bbox = x, y, x + w, y + h\n\n return polygon, bbox\n\n\ndef display_country_on_world_map(country_name, fig_size=18, color='red'):\n \"\"\"\n Display the polygon of a country on a static map\n\n :param country_name: name of the country to display, eg 'Switzerland', 'Germany', etc\n :param fig_size: size of the figure. Defaults to 20\n :param color: color to use for the country, as a string. Defaults to 'red'\n \"\"\"\n\n def plot_country_patch(ax):\n # plot a country on the provided axes\n country = world[world.name == country_name]\n country_features = country.__geo_interface__['features']\n country_type_coordinates = {\n 'type': country_features[0]['geometry']['type'],\n 'coordinates': country_features[0]['geometry']['coordinates']\n }\n ax.add_patch(PolygonPatch(country_type_coordinates, fc=color, ec=\"black\", alpha=0.85, zorder=2))\n\n world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n\n # plot the whole world\n axe_world = world.plot(figsize=(fig_size, 30), edgecolor=u'gray')\n\n plot_country_patch(axe_world)\n\n plt.ylabel('Latitude')\n plt.xlabel('Longitude')\n\n plt.show()\n\n\ndef city_polygon_bbox(city_name):\n \"\"\"\n Get the polygon, the bounding box and the place name (with region, country, etc) of a city\n\n :param city_name: the city of intereset (eg 'Toulouse')\n :return: a tuple of (shapely Polygon, bbox [east, north, west, south], place name [string])\n \"\"\"\n city = osmnx.gdf_from_place(city_name)\n\n # retrieve data from row\n row = city.loc[0]\n polygon = row.get('geometry')\n bbox_east = row.get('bbox_east')\n bbox_north = row.get('bbox_north')\n bbox_south = row.get('bbox_south')\n bbox_west = row.get('bbox_west')\n place_name = row.get('place_name')\n\n # build bbox\n bbox = (bbox_east, bbox_north, bbox_west, bbox_south)\n\n return polygon, bbox, place_name\n\n\ndef display_wms(polygon, bbox, wms, wms_layers, time, height=512):\n \"\"\"\n Display polygons and their satellite img using WMS and an interactive map. Use only in Jupyter Notebooks.\n\n :param polygon: a shapely [Multi]Polygon for the area of interest (AOI)\n :param bbox: the bbox of the AOI\n :param height: the height of the satellite image. Width is computed to keep proportions. Defaults to 512\n :param wms_layers: a string mapped to the layers of interest. Can take two values:\n '0' (only first layer) or 'all' (all layers)\n :param time: date range for the satellite image formatted as 'YYYY-MM-DD' or 'YYYY-MM-DD/YYYY-MM-DD'\n (eg '2018-12-27/2019-01-10')\n \"\"\"\n map_center = polygon.centroid\n m = folium.Map([map_center.y, map_center.x], zoom_start=3, tiles='cartodbpositron')\n folium.GeoJson(polygon).add_to(m)\n folium.LatLngPopup().add_to(m)\n display(m)\n\n projection = 'EPSG:4326'\n width = height2width(bbox, height)\n\n layers = list(wms.contents)\n\n if wms_layers == '0':\n # get layer from WMS\n print(wms[layers[0]].title)\n img = wms.getmap(layers=[wms[layers[1]].name],\n srs=projection,\n bbox=bbox,\n size=(width, height),\n format='image/png',\n time=time,\n showlogo=False,\n transparent=False,\n maxcc=30)\n\n display(Image.open(img))\n\n elif wms_layers == 'all':\n # get layers from WMS\n for lay in layers:\n print(wms[lay].title)\n img = wms.getmap(layers=[wms[lay].name],\n srs=projection,\n bbox=bbox,\n size=(width, height),\n format='image/png',\n time=time,\n showlogo=False,\n transparent=False,\n maxcc=30)\n\n display(Image.open(img))\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
nouyang/howe299r
[ "5ec4e23a2d808272ec541df511f4cbf60bfa4cd9" ]
[ "Experiments/03April2018/3d_plots_resid_vs_all_vars.py" ]
[ "\"\"\"\nAbstract version of 3d_residuals_plot.py, to handle making 14 graphs\nCreated on Fri Apr 10\n@author: nrw\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport shelve\nfrom datetime import datetime\n\nimport plotly.offline as po\nimport plotly.graph_objs as go\nfrom plotly import tools\n\nfrom sklearn import linear_model\nfrom sklearn.linear_model import Ridge\nfrom sklearn import metrics\n\n#===============================================\n#### DECLARE CONSTANTS ####\n#===============================================\n\nwith shelve.open('calculated_data', 'r') as shelf:\n BigTheta = shelf['BigTheta']\n BigTorque = shelf['BigTorque']\n BigForce = shelf['BigForce'] \n BigPosition = shelf['BigPosition'] \n\nwith shelve.open('calculated_data2', 'r') as shelf:\n torq_est = shelf['torq_est'] \n resid = shelf['resid'] \n K = shelf['K'] \n\ntorq_estX = torq_est[:,0]\ntorq_estY = torq_est[:,1] \n\ndef plot_vs_resid(dataA, dataA_list, dataB = None, dataB_xtitle =''):\n print(dataA.shape)\n\n y1 = torq_estX\n y2 = torq_estY\n\n x1 = dataA # e.g. ForceZ\n Atitle, Aunit = dataA_list\n\n print('~~~~~~~~~~Plotting! residX, residY vs ', Atitle , '~~~~~~~~')\n\n\n trace0 = go.Scatter( x = x1, y = y1, mode = 'markers',\n name = 'TorqueX residuals vs ' + Atitle, marker=dict(size=4,\n symbol='circle'))\n # https://plot.ly/python/reference/#scatter\n\n trace1 = go.Scatter( x = x1, y = y2, mode = 'markers', \n name = 'TorqueY residuals vs ' + Atitle, marker=dict(size=4, symbol='circle'))\n\n\n strtime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n overall_title='Torque Residuals vs ' + Atitle + \\\n '<br>with 3x3 K, using SkLearn LinReg) (IMU data)'## + \\\n\n\n layout = go.Layout(\n title = overall_title,\n legend=dict(x=.5, y=0.1) )\n\n fig = tools.make_subplots(rows=2, cols=1)#, subplot_titles=(trace0.name, trace1.name))\n\n fig.append_trace(trace0, 1,1)\n fig.append_trace(trace1, 2,1)\n\n fig['layout'].update(title=overall_title, showlegend=False)\n\n fig['layout']['xaxis1'].update(title=Atitle + ' ' + Aunit)\n fig['layout']['xaxis2'].update(title=Atitle + ' ' + Aunit + \\\n '<br><br>K: ' + np.array_str(K, precision=2) + \\\n '<br>Time: ' + strtime )\n\n fig['layout']['yaxis1'].update(title='TorqueX residual (g*cm)')\n fig['layout']['yaxis2'].update(title='TorqueY residual (g*cm)')\n\n po.plot(fig, image='png', image_width=900, image_height=1000, filename= Atitle +'.html', image_filename = Atitle )\n # po.plot(fig, filename= Atitle +'.html')\n return\n\n\nplot_vs_resid(BigForce[:,2], ['ForceZ', 'g'])\nplot_vs_resid(BigPosition[:,0], ['PositionX', 'cm'])\nplot_vs_resid(BigPosition[:,1], ['PositionY', 'cm'])\nplot_vs_resid(BigTheta[:,0], ['ThetaX', 'deg'])\nplot_vs_resid(BigTheta[:,1], ['ThetaY', 'deg'])\nplot_vs_resid(BigTheta[:,2], ['ThetaZ', 'deg'])\nplot_vs_resid(torq_est[:,0], ['Torq Est X (K*measured thetas)', 'g cm'])\nplot_vs_resid(torq_est[:,1], ['Torq Est Y (K*measured thetas)', 'g cm'])\n\n\n\n\n#===============================================\n#### PLOT: Residuals (of Y torque_est - torque) vs Force (Z only)\n#===============================================\n\n# xplot = torq_est[:,dim]\n# xplot2 = BigForce[:,2]\n# yplot = resid[:,dim] \n\n# trace0 = go.Scatter( x = xplot, y = yplot, mode = 'markers',\n # name = 'resid_torqY vs %s-axis %s estimated'%(names[dim], param))\n\n# trace1 = go.Scatter( x = xplot2, y = yplot, mode = 'markers', \n# name = 'resid_torqY vs Resid vs Z-axis Force, as applied')\n\n# #data = [trace0]\n\n# overall_title='%s-axis %s: Resid vs Force applied (with 3x3 K, using SkLearn LinReg) (IMU data)' % \\\n # (names[dim], param) + '<br>K: ' + np.array_str(K, precision=2) + '<br>'\n\n# yaxistitle= 'resid (g cm)'\n# xaxistitle= 'force (g)'\n# |\n# layout = go.Layout(\n # title = overall_title,\n # legend=dict(x=.5, y=0.1) )\n\n# fig = tools.make_subplots(rows=2, cols=1, subplot_titles=(trace0.name, trace1.name))\n\n# fig.append_trace(trace0, 1,1)\n# fig.append_trace(trace1, 2,1)\n\n# fig['layout'].update(title=overall_title, showlegend=False)\n# fig['layout']['xaxis1'].update(title='%s torque est (g cm)' % (names[dim]))\n# fig['layout']['xaxis2'].update(title=xaxistitle)\n# fig['layout']['yaxis1'].update(title=yaxistitle)\n# fig['layout']['yaxis2'].update(title=yaxistitle)\n\n# #fig = go.Figure(data=data, layout=layout)\n" ]
[ [ "numpy.array_str" ] ]
robmarkcole/img2vec
[ "02f1a59edd6d8a3711567ce82e559463ad0fe72e" ]
[ "img2vec_pytorch/img_to_vec.py" ]
[ "import torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport numpy as np\n\nclass Img2Vec():\n RESNET_OUTPUT_SIZES = {\n 'resnet18': 512,\n 'resnet34': 512,\n 'resnet50': 2048,\n 'resnet101': 2048,\n 'resnet152': 2048,\n }\n\n def __init__(self, cuda=False, model='resnet-18', layer='default', layer_output_size=512):\n \"\"\" Img2Vec\n :param cuda: If set to True, will run forward pass on GPU\n :param model: String name of requested model\n :param layer: String or Int depending on model. See more docs: https://github.com/christiansafka/img2vec.git\n :param layer_output_size: Int depicting the output size of the requested layer\n \"\"\"\n self.device = torch.device(\"cuda\" if cuda else \"cpu\")\n self.layer_output_size = layer_output_size\n self.model_name = model\n\n self.model, self.extraction_layer = self._get_model_and_layer(model, layer)\n\n self.model = self.model.to(self.device)\n\n self.model.eval()\n\n self.scaler = transforms.Resize((224, 224))\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n self.to_tensor = transforms.ToTensor()\n\n def get_vec(self, img, tensor=False):\n \"\"\" Get vector embedding from PIL image\n :param img: PIL Image or list of PIL Images\n :param tensor: If True, get_vec will return a FloatTensor instead of Numpy array\n :returns: Numpy ndarray\n \"\"\"\n if type(img) == list:\n a = [self.normalize(self.to_tensor(self.scaler(im))) for im in img]\n images = torch.stack(a).to(self.device)\n if self.model_name in ['alexnet', 'vgg']:\n my_embedding = torch.zeros(len(img), self.layer_output_size)\n elif self.model_name == 'densenet':\n my_embedding = torch.zeros(len(img), self.layer_output_size, 7, 7)\n else:\n my_embedding = torch.zeros(len(img), self.layer_output_size, 1, 1)\n\n def copy_data(m, i, o):\n my_embedding.copy_(o.data)\n\n h = self.extraction_layer.register_forward_hook(copy_data)\n with torch.no_grad():\n h_x = self.model(images)\n h.remove()\n\n if tensor:\n return my_embedding\n else:\n if self.model_name in ['alexnet', 'vgg']:\n return my_embedding.numpy()[:, :]\n elif self.model_name == 'densenet':\n return torch.mean(my_embedding, (2, 3), True).numpy()[:, :, 0, 0]\n else:\n return my_embedding.numpy()[:, :, 0, 0]\n else:\n image = self.normalize(self.to_tensor(self.scaler(img))).unsqueeze(0).to(self.device)\n\n if self.model_name in ['alexnet', 'vgg']:\n my_embedding = torch.zeros(1, self.layer_output_size)\n elif self.model_name == 'densenet':\n my_embedding = torch.zeros(1, self.layer_output_size, 7, 7)\n else:\n my_embedding = torch.zeros(1, self.layer_output_size, 1, 1)\n\n def copy_data(m, i, o):\n my_embedding.copy_(o.data)\n\n h = self.extraction_layer.register_forward_hook(copy_data)\n with torch.no_grad():\n h_x = self.model(image)\n h.remove()\n\n if tensor:\n return my_embedding\n else:\n if self.model_name in ['alexnet', 'vgg']:\n return my_embedding.numpy()[0, :]\n elif self.model_name == 'densenet':\n return torch.mean(my_embedding, (2, 3), True).numpy()[0, :, 0, 0]\n else:\n return my_embedding.numpy()[0, :, 0, 0]\n\n def _get_model_and_layer(self, model_name, layer):\n \"\"\" Internal method for getting layer from model\n :param model_name: model name such as 'resnet-18'\n :param layer: layer as a string for resnet-18 or int for alexnet\n :returns: pytorch model, selected layer\n \"\"\"\n\n if model_name.startswith('resnet') and not model_name.startswith('resnet-'):\n model = getattr(models, model_name)(pretrained=True)\n if layer == 'default':\n layer = model._modules.get('avgpool')\n self.layer_output_size = self.RESNET_OUTPUT_SIZES[model_name]\n else:\n layer = model._modules.get(layer)\n return model, layer\n elif model_name == 'resnet-18':\n model = models.resnet18(pretrained=True)\n if layer == 'default':\n layer = model._modules.get('avgpool')\n self.layer_output_size = 512\n else:\n layer = model._modules.get(layer)\n\n return model, layer\n\n elif model_name == 'alexnet':\n model = models.alexnet(pretrained=True)\n if layer == 'default':\n layer = model.classifier[-2]\n self.layer_output_size = 4096\n else:\n layer = model.classifier[-layer]\n\n return model, layer\n\n elif model_name == 'vgg':\n # VGG-11\n model = models.vgg11_bn(pretrained=True)\n if layer == 'default':\n layer = model.classifier[-2]\n self.layer_output_size = model.classifier[-1].in_features # should be 4096\n else:\n layer = model.classifier[-layer]\n\n return model, layer\n\n elif model_name == 'densenet':\n # Densenet-121\n model = models.densenet121(pretrained=True)\n if layer == 'default':\n layer = model.features[-1]\n self.layer_output_size = model.classifier.in_features # should be 1024\n else:\n raise KeyError('Un support %s for layer parameters' % model_name)\n\n return model, layer\n\n else:\n raise KeyError('Model %s was not found' % model_name)\n" ]
[ [ "torch.zeros", "torch.device", "torch.stack", "torch.no_grad", "torch.mean" ] ]
nikvaessen/voxceleb_trainer
[ "6f3a6fd316fbcfe357234ad678181fc3666b067e" ]
[ "dataprep.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# The script downloads the VoxCeleb datasets and converts all files to WAV.\n# Requirement: ffmpeg and wget running on a Linux system.\n\nimport argparse\nimport multiprocessing\nimport os\nimport pathlib\nimport subprocess\nimport pathlib\nimport pdb\nimport hashlib\nimport time\nimport glob\nimport tarfile\nimport threading\n\nfrom zipfile import ZipFile\nfrom tqdm import tqdm\nfrom scipy.io import wavfile\n\n## ========== ===========\n## Parse input arguments\n## ========== ===========\nparser = argparse.ArgumentParser(description=\"VoxCeleb downloader\")\n\nparser.add_argument(\"--save_path\", type=str, default=\"data\", help=\"Target directory\")\nparser.add_argument(\"--user\", type=str, default=\"user\", help=\"Username\")\nparser.add_argument(\"--password\", type=str, default=\"pass\", help=\"Password\")\n\nparser.add_argument(\n \"--download\", dest=\"download\", action=\"store_true\", help=\"Enable download\"\n)\nparser.add_argument(\n \"--extract\", dest=\"extract\", action=\"store_true\", help=\"Enable extract\"\n)\nparser.add_argument(\n \"--convert\", dest=\"convert\", action=\"store_true\", help=\"Enable convert\"\n)\nparser.add_argument(\n \"--augment\",\n dest=\"augment\",\n action=\"store_true\",\n help=\"Download and extract augmentation files\",\n)\n\nargs = parser.parse_args()\n\n\n## ========== ===========\n## MD5SUM\n## ========== ===========\ndef md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\n## ========== ===========\n## Download with wget\n## ========== ===========\ndef download(args, lines):\n for line in lines:\n url = line.split()[0]\n md5gt = line.split()[1]\n outfile = url.split(\"/\")[-1]\n\n path = pathlib.Path(args.save_path) / outfile\n\n ## Download files\n out = subprocess.call(\n f\"wget {url} --user {args.user} --password {args.password} -O {str(path)}\",\n shell=True,\n )\n if out != 0:\n raise ValueError(\n \"Download failed %s. If download fails repeatedly, use alternate URL on the VoxCeleb website.\"\n % url\n )\n\n ## Check MD5\n md5ck = md5(str(path))\n if md5ck == md5gt:\n print(\"Checksum successful %s.\" % outfile)\n else:\n raise Warning(\"Checksum failed %s.\" % outfile)\n\n\n## ========== ===========\n## Concatenate file parts\n## ========== ===========\ndef concatenate(args, lines):\n for line in lines:\n infile = line.split()[0]\n outfile = line.split()[1]\n md5gt = line.split()[2]\n\n infile_path = pathlib.Path(args.save_path) / infile\n outfile_path = pathlib.Path(args.save_path) / \"concat\" / outfile\n outfile_path.parent.mkdir(parents=True, exist_ok=True)\n\n ## Concatenate files\n out = subprocess.call(\n f\"cat {infile_path} > {outfile_path}\",\n shell=True,\n )\n\n ## Check MD5\n md5ck = md5(str(outfile_path))\n if md5ck == md5gt:\n print(\"Checksum successful %s.\" % outfile)\n else:\n raise Warning(\"Checksum failed %s.\" % outfile)\n\n\n## ========== ===========\n## Extract zip files\n## ========== ===========\ndef full_extract(args, fname):\n print(\"Extracting %s\" % fname)\n if fname.endswith(\".tar.gz\"):\n with tarfile.open(fname, \"r:gz\") as tar:\n tar.extractall(args.save_path)\n elif fname.endswith(\".zip\"):\n path = pathlib.Path(fname)\n with ZipFile(fname, \"r\") as zf:\n zf.extractall(args.save_path)\n\n\n## ========== ===========\n## Partially extract zip files\n## ========== ===========\ndef part_extract(args, fname, target):\n print(\"Extracting %s\" % fname)\n with ZipFile(fname, \"r\") as zf:\n for infile in zf.namelist():\n if any([infile.startswith(x) for x in target]):\n zf.extract(infile, args.save_path)\n # pdb.set_trace()\n # zf.extractall(args.save_path)\n\n\n## ========== ===========\n## Convert\n## ========== ===========\ndef convert_file(fname):\n outfile = fname.replace(\".m4a\", \".wav\")\n out = subprocess.call(\n f\"ffmpeg -y -i {str(fname)} -ac 1 -vn -acodec pcm_s16le -ar 16000 {str(outfile)} >/dev/null 2>/dev/null\",\n shell=True,\n )\n if out != 0:\n raise ValueError(f\"Conversion failed {str(fname)}\")\n\n\ndef convert(args):\n files = pathlib.Path(args.save_path).rglob(\"*.m4a\")\n files = [f for f in files]\n files = sorted(files)\n\n print(f\"Converting {len(files)} files from AAC to WAV\")\n with tqdm(total=len(files)) as pbar, multiprocessing.Pool(8) as workers:\n for file in files:\n workers.apply_async(\n convert_file,\n args=(str(file),),\n error_callback=lambda x: print(x),\n callback=lambda _: pbar.update(1),\n )\n\n workers.close()\n workers.join()\n\n\n## ========== ===========\n## Split MUSAN for faster random access\n## ========== ===========\ndef split_musan(args):\n files = glob.glob(\"%s/musan/*/*/*.wav\" % args.save_path)\n\n audlen = 16000 * 5\n audstr = 16000 * 3\n\n for idx, file in enumerate(files):\n fs, aud = wavfile.read(file)\n writedir = os.path.splitext(file.replace(\"/musan/\", \"/musan_split/\"))[0]\n os.makedirs(writedir)\n for st in range(0, len(aud) - audlen, audstr):\n wavfile.write(writedir + \"/%05d.wav\" % (st / fs), fs, aud[st : st + audlen])\n\n print(idx, file)\n\n\n## ========== ===========\n## Main script\n## ========== ===========\nif __name__ == \"__main__\":\n\n if not os.path.exists(args.save_path):\n raise ValueError(\"Target directory does not exist.\")\n\n f = open(\"lists/fileparts.txt\", \"r\")\n fileparts = f.readlines()\n f.close()\n\n f = open(\"lists/files.txt\", \"r\")\n files = f.readlines()\n f.close()\n\n f = open(\"lists/augment.txt\", \"r\")\n augfiles = f.readlines()\n f.close()\n\n if args.augment:\n download(args, augfiles)\n part_extract(\n args,\n os.path.join(args.save_path, \"rirs_noises.zip\"),\n [\n \"RIRS_NOISES/simulated_rirs/mediumroom\",\n \"RIRS_NOISES/simulated_rirs/smallroom\",\n ],\n )\n full_extract(args, os.path.join(args.save_path, \"musan.tar.gz\"))\n split_musan(args)\n\n if args.download:\n download(args, fileparts)\n\n if args.extract:\n concatenate(args, files)\n\n for file in files:\n full_extract(args, os.path.join(args.save_path, \"concat\", file.split()[1]))\n\n save_path = pathlib.Path(args.save_path)\n out = subprocess.call(\n f\"mv {str(save_path/'dev'/'aac')} {str(save_path / 'aac')} && rmdir {str(save_path / 'dev')}\",\n shell=True,\n )\n out = subprocess.call(\n f\"mv {str(save_path / 'wav')} {str(save_path / 'voxceleb1')}\", shell=True\n )\n out = subprocess.call(\n f\"mv {str(save_path / 'aac')} {str(save_path / 'voxceleb2')}\", shell=True\n )\n\n if args.convert:\n convert(args)\n" ]
[ [ "scipy.io.wavfile.read", "scipy.io.wavfile.write" ] ]
jchen42703/g2net_ml_dl
[ "7d514d3ef00727990202f86d67e3020b342d5584" ]
[ "python/scripts/train_minirocket.py" ]
[ "from g2net.train import TrainPipeline, create_base_transforms, create_dataloaders\nfrom g2net.train import create_base_transforms, create_dataloaders\nfrom datetime import datetime\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nfrom typing import List\nimport pprint\n\n\ndef train_one_fold(fold: int,\n seed: int,\n train_loader: DataLoader,\n valid_loader: DataLoader,\n pipeline_params: dict = None):\n \"\"\"Trains one fold\n\n Args:\n pipeline_params: Corresponds to the pipeline params config.\n See train_minirocket.yml\n \"\"\"\n timestamp = datetime.now().timestamp()\n params = {\n \"train_loader\": train_loader,\n \"valid_loader\": valid_loader,\n }\n\n pipeline_params[\"model_params\"][\"random_state\"] = seed\n model_path = f\"minirocket_rocket_fold{fold}_seed{seed}_{timestamp}.pt\"\n params = {**params, **pipeline_params, \"save_path\": model_path}\n pp = pprint.PrettyPrinter(depth=4)\n print(\"PIPELINE PARAMS:\")\n pp.pprint(params)\n pipeline = TrainPipeline(**params)\n pipeline.train_minirocket()\n\n\ndef prep_CV(train: pd.DataFrame,\n seed: int,\n num_splits: int = 5) -> List[List[List[int]]]:\n \"\"\"Loads the components need for KFolds\n \"\"\"\n splitter = StratifiedKFold(n_splits=num_splits,\n shuffle=True,\n random_state=seed)\n fold_iter = list(splitter.split(X=train, y=train['target']))\n return fold_iter\n\n\ndef create_fold_dl(train: pd.DataFrame,\n train_idx: List[int],\n valid_idx: List[int],\n batch_size: int = 64,\n num_workers: int = 8):\n \"\"\"Creates the fold subset dfs and dataloaders.\n \n Args:\n fold_iter: from kfolds\n \"\"\"\n train_fold = train.iloc[train_idx]\n valid_fold = train.iloc[valid_idx]\n print(\n f'train positive: {train_fold.target.values.mean(0)} ({len(train_fold)})'\n )\n print(\n f'valid positive: {valid_fold.target.values.mean(0)} ({len(valid_fold)})'\n )\n transforms = create_base_transforms()\n train_loader, valid_loader = create_dataloaders(\n train_fold,\n valid_fold,\n batch_size=batch_size,\n train_transforms=transforms[\"train\"],\n test_transforms=transforms[\"test\"],\n num_workers=num_workers)\n return (train_loader, valid_loader)\n\n\nif __name__ == \"__main__\":\n import argparse\n import os\n from g2net.utils.config_reader import load_config\n from g2net.utils.torch import seed_everything\n\n parser = argparse.ArgumentParser(description=\"For training.\")\n parser.add_argument(\"--yml_path\",\n type=str,\n required=True,\n help=\"Path to the .yml config.\")\n args = parser.parse_args()\n\n cfg = load_config(args.yml_path)[\"train_config\"]\n\n print(\"CONFIG: \\n\", cfg)\n\n # Cross validation\n seed = cfg[\"seed\"]\n num_splits = cfg[\"num_splits\"]\n seed_everything(cfg[\"seed\"], deterministic=False)\n train_path = os.path.join(cfg[\"dset_dir\"], \"train.csv\")\n train = pd.read_csv(train_path).iloc[:cfg[\"dset_size\"]]\n print(f\"Creating {num_splits} folds with seed {seed}...\")\n fold_iter = prep_CV(train, seed, num_splits=num_splits)\n\n # Training for cfg.num_splits folds\n orig_logdir = cfg[\"pipeline_params\"][\"logdir\"]\n for fold, (train_idx, valid_idx) in enumerate(fold_iter):\n print(f\"======== TRAINING FOLD {fold} ========\")\n # idk why it reformatted to look like this LOL\n train_loader, valid_loader = create_fold_dl(\n train,\n train_idx,\n valid_idx,\n batch_size=cfg[\"batch_size\"],\n num_workers=cfg[\"num_workers\"])\n cfg[\"pipeline_params\"][\"logdir\"] = os.path.join(orig_logdir,\n f\"logs_{fold}\")\n train_one_fold(fold,\n seed,\n train_loader=train_loader,\n valid_loader=valid_loader,\n pipeline_params=cfg[\"pipeline_params\"])\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.StratifiedKFold" ] ]
OlivierDehaene/GPA4.0
[ "11e39f3e43c08081b70cdf67bef8894bf75b5afb" ]
[ "gpa/scripts/decoding_utils.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom copy import deepcopy, copy\nfrom itertools import groupby, chain\nfrom tqdm import tqdm\n\nimport pandas as pd\n\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\n\nEOS_ID = 1\n\n\ndef get_att_mats(translate_model):\n \"\"\"\n Get's the tensors representing the attentions from a build model.\n\n The attentions are stored in a dict on the Transformer object while building\n the graph.\n\n :param translate_model: Transformer object to fetch the attention weights from.\n :return:\n \"\"\"\n encdec_atts = []\n\n prefix = 'transformer/body/'\n postfix = '/multihead_attention/dot_product_attention'\n\n for i in range(1, translate_model.hparams.num_hidden_layers):\n encdec_att = translate_model.attention_weights[\n '%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix)]\n encdec_atts.append(encdec_att)\n\n encdec_att_mats = [tf.squeeze(tf.reduce_sum(mat, axis=1)) for mat in encdec_atts]\n\n return encdec_att_mats\n\n\ndef build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1, top_beams=1):\n \"\"\"Build the graph required to featch the attention weights.\n\n Args:\n model_name: Name of model.\n data_dir: Path to directory contatining training data.\n problem_name: Name of problem.\n beam_size: (Optional) Number of beams to use when decoding a traslation.\n If set to 1 (default) then greedy decoding is used.\n\n Returns:\n Tuple of (\n inputs: Input placeholder to feed in ids to be translated.\n targets: Targets placeholder to feed to translation when fetching\n attention weights.\n samples: Tensor representing the ids of the translation.\n att_mats: Tensors representing the attention weights.\n )\n \"\"\"\n hparams = trainer_lib.create_hparams(\n hparams_set, data_dir=data_dir, problem_name=problem_name)\n translate_model = registry.model(model_name)(\n hparams, tf.estimator.ModeKeys.EVAL)\n\n inputs = tf.placeholder(tf.int32, shape=(None, None, 1, 1), name='inputs')\n targets = tf.placeholder(tf.int32, shape=(None, None, 1, 1), name='targets')\n translate_model({\n 'inputs': inputs,\n 'targets': targets,\n })\n\n # Must be called after building the training graph, so that the dict will\n # have been filled with the attention tensors. BUT before creating the\n # interence graph otherwise the dict will be filled with tensors from\n # inside a tf.while_loop from decoding and are marked unfetchable.\n encdec_att_mats = get_att_mats(translate_model)\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n samples = translate_model.infer({\n 'inputs': inputs,\n }, beam_size=beam_size, top_beams=top_beams, alpha=0.6)['outputs']\n\n return inputs, targets, samples, encdec_att_mats\n\n\ndef _encode(str_input, encoder, padding_to=None):\n encoded_input = [encoder['inputs']._token_to_id[c] for c in str_input] + [EOS_ID]\n if padding_to:\n for _ in range(padding_to - len(encoded_input)):\n encoded_input += [0]\n encoded_input = np.reshape(encoded_input, [1, -1, 1, 1]) # Make it 3D.\n return encoded_input\n\n\ndef _decode(integers, encoder):\n decoded_str = []\n for i in integers:\n if i == 1:\n break\n elif i != 0:\n decoded_str.append(encoder['targets']._safe_id_to_token(i))\n return decoded_str\n\n\ndef _make_prediction_batch(sess, batch_input, input_tensor, input_phon_tensor, output_phon_tensor, att_mats_list,\n encoder):\n padding_to = len(max(batch_input, key=len)) + 1\n\n batch_input_tokenized = np.stack([_encode(input, encoder, padding_to).squeeze(0) for input in batch_input], 0)\n\n batch_phon_tokenized = sess.run(output_phon_tensor, feed_dict={input_tensor: batch_input_tokenized})\n\n batch_phon = [_decode(np.squeeze(phon_tokenized), encoder) for phon_tokenized in batch_phon_tokenized]\n\n batch_att_mats = sess.run(att_mats_list, feed_dict={input_tensor: batch_input_tokenized,\n input_phon_tensor: np.reshape(batch_phon_tokenized,\n [len(batch_input), -1, 1, 1])})\n\n batch_sum_all_layers = np.sum(np.stack(batch_att_mats), axis=0)\n\n return batch_phon, batch_sum_all_layers\n\n\ndef _make_translation_batch(sess, batch_input, input_tensor, output_phon_tensor, top_beams,\n encoder):\n padding_to = len(max(batch_input, key=len)) + 1\n\n batch_input_tokenized = np.stack([_encode(input, encoder, padding_to).squeeze(0) for input in batch_input], 0)\n\n batch_phon_tokenized = sess.run(output_phon_tensor, feed_dict={input_tensor: batch_input_tokenized})\n\n batch_phon = []\n for phon_tokenized_beam in batch_phon_tokenized:\n if top_beams > 1:\n batch_phon.append(\n [\"\".join(_decode(np.squeeze(phon_tokenized), encoder)) for phon_tokenized in phon_tokenized_beam])\n else:\n batch_phon.append([\"\".join(_decode(np.squeeze(phon_tokenized_beam), encoder))])\n return batch_phon\n\n\ndef g2p_mapping_batch(sess, batch_input, input_tensor, input_phon_tensor, output_phon_tensor, att_mats_list,\n encoder):\n \"\"\"\n Predict the phonetic translation of a word using a Transformer model\n\n :param input: String, word\n :param model_name: Name of the model to serve\n :return: Array[3], [0] input text, [1] phonetic translation, [2] mapping\n \"\"\"\n # open channel to tensorflow server\n\n # get phonetic translation and attention matrices\n batch_phon, batch_sum_all_layers = _make_prediction_batch(sess, batch_input, input_tensor, input_phon_tensor,\n output_phon_tensor, att_mats_list, encoder)\n\n # make prediction\n mapping_batch = [_mapping(input, batch_phon[idx], batch_sum_all_layers[idx, :len(batch_phon[idx]), :len(input)]) for\n idx, input in enumerate(batch_input)]\n\n return batch_input, batch_phon, mapping_batch\n\n\ndef reccurent_aggregation(graph, associated_phons):\n for i, phons in enumerate(associated_phons):\n try:\n for p in phons:\n if p in associated_phons[i + 1]:\n graph[i] = graph[i] + graph.pop(i + 1)\n associated_phons[i] = associated_phons[i] + list(\n set(associated_phons.pop(i + 1)) - set(associated_phons[i]))\n associated_phons[i].sort()\n return reccurent_aggregation(graph, associated_phons)\n except:\n return graph, associated_phons\n\n\ndef _mapping(inp_text, out_text, sum_all_layers):\n sum_all_layers = sum_all_layers / 8.0\n n_letters = len(inp_text)\n n_phon = len(out_text)\n\n associated_phons = []\n\n for i in range(n_letters):\n att_slice = copy(sum_all_layers[:, i])\n max_value = np.max(att_slice)\n masked_att_slice = att_slice[att_slice > 0.25]\n sorted_att_slice_idx = att_slice.argsort()[::-1]\n att_slice.sort()\n sorted_att_slice_values = att_slice[::-1]\n top_values = [idx for idx, v in zip(sorted_att_slice_idx, sorted_att_slice_values) if\n v > 0.25 and v > (max_value - max(1.5 * np.std(masked_att_slice), 0.1))]\n if not top_values:\n top_values = [n_phon + i]\n associated_phons.append(top_values)\n\n for i in range(n_phon):\n if i not in list(chain.from_iterable(associated_phons)):\n att_slice = sum_all_layers[i, :]\n idx = np.argmax(att_slice)\n associated_phons[idx] = associated_phons[idx] + [i]\n\n graph = [[i] for i in range(n_letters)]\n\n graph, associated_phons = reccurent_aggregation(graph, associated_phons)\n assert len(graph) == len(associated_phons)\n\n mapping = []\n for g, p in zip(graph, associated_phons):\n str_g = \"\".join([inp_text[i] for i in g])\n p_chs = []\n for i in p:\n try:\n p_chs.append(out_text[i])\n except IndexError:\n p_chs.append('#')\n str_p = \"\".join(p_chs)\n mapping.append(str_g + \"~\" + str_p)\n\n return mapping\n\n\ndef _dic_add(value, dic):\n if value not in dic:\n dic[value] = 1\n else:\n dic[value] += 1\n\n\ndef visualize_attention(sess, word, input_tensor, input_phon_tensor, output_phon_tensor, att_mats_encdec, encoder):\n word = [word]\n\n batch_input_tokenized = np.stack([_encode(input, encoder).squeeze(0) for input in word], 0)\n\n batch_phon_tokenized = sess.run(output_phon_tensor, feed_dict={input_tensor: batch_input_tokenized})\n\n batch_phon = [_decode(np.squeeze(phon_tokenized), encoder) for phon_tokenized in batch_phon_tokenized]\n\n encdec_att_mats = sess.run(att_mats_encdec,\n feed_dict={input_tensor: batch_input_tokenized,\n input_phon_tensor: np.reshape(\n batch_phon_tokenized,\n [len(word), -1, 1, 1])})\n for i, encdec_att_mat in enumerate(encdec_att_mats):\n encdec_sum_all_layers = np.array(encdec_att_mat)[:len(batch_phon[0]), :len(word[0])]\n _plot_attention_matrix(word[0], batch_phon[0], encdec_sum_all_layers, 'Enc Dec Att L{}'.format(i + 1))\n _plot_attention_matrix(word[0], batch_phon[0],\n np.sum(np.stack(encdec_att_mats), axis=0)[:len(batch_phon[0]), :len(word[0])],\n 'Enc Dec Att SUM')\n\n\ndef _plot_attention_matrix(inp_text, out_text, sum_all_layers, name):\n from matplotlib import pyplot as plt\n source_len = len(inp_text)\n prediction_len = len(out_text)\n\n fig = plt.figure(figsize=(8, 8))\n plt.imshow(\n X=sum_all_layers,\n interpolation=\"nearest\",\n cmap=plt.cm.Blues)\n plt.xticks(np.arange(source_len), inp_text, rotation=45)\n plt.yticks(np.arange(prediction_len), out_text, rotation=-45)\n fig.tight_layout()\n plt.show()\n\n\ndef load_model(model_dir, sess):\n ckpt = tf.train.get_checkpoint_state(model_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n saver = tf.train.Saver()\n saver.restore(sess, os.path.join(model_dir, ckpt_name))\n return True\n return False\n\n\ndef _get_unique_words(wordGp):\n uniqueWordList = []\n for word, pred, gpMatch, copy in wordGp:\n if (word, pred) not in uniqueWordList:\n uniqueWordList.append((word, pred))\n else:\n wordGp.remove((word, pred, gpMatch, copy))\n return wordGp\n\n\ndef _generate_word_list(wordGp, gpProg=None):\n if gpProg:\n gpProg = pd.read_csv(gpProg)\n tempList = []\n for i in range(len(gpProg)):\n lesson = gpProg.loc[i]\n\n for word, pred, gpMatch, copy in wordGp[:]:\n for gp in gpMatch[:]:\n if gp == lesson[\"GP\"]:\n gpMatch.remove(gp)\n if len(gpMatch) == 0:\n tempList.append(((int(lesson[\"LESSON\"])), \"\".join(word), \"\".join(pred), \".\".join(copy),\n len(word), len(pred)))\n\n wordGp.remove((word, pred, gpMatch, copy))\n for word, pred, gpMatch, copy in wordGp[:]:\n tempList.append((999, \"\".join(word), \"\".join(pred), \".\".join(copy), len(word), len(pred)))\n\n wordList = pd.DataFrame()\n wordList = wordList.append(tempList, ignore_index=True)\n wordList.columns = [[\"LESSON\", \"SPELLING\", \"PHONOLOGY\", \"GPMATCH\", \"N LETTERS\", \"N PHONEMES\"]]\n return wordList\n else:\n tempList = []\n for word, pred, gpMatch, copy in wordGp[:]:\n tempList.append((\"\".join(word), \" \".join(pred), \".\".join(copy), len(word), len(pred)))\n wordList = pd.DataFrame()\n wordList = wordList.append(tempList, ignore_index=True)\n wordList.columns = [[\"SPELLING\", \"PHONOLOGY\", \"GPMATCH\", \"N LETTERS\", \"N PHONEMES\"]]\n return wordList\n\n\ndef decode_wordList(sess, wordList, input_tensor, input_phon_tensor, output_phon_tensor, att_mats_list, encoder,\n decode_to_file, gpProg):\n batch_size = 128 # conservative batch size to dodge out of memory issues\n wordCount = len(wordList)\n\n phon_results = []\n gp_results = []\n\n n_batch = wordCount // batch_size\n for idx_batch in tqdm(range(n_batch + 1), \"GP Matching\"):\n try:\n batch = wordList[idx_batch * batch_size:(idx_batch + 1) * batch_size]\n except:\n batch = wordList[n_batch * batch_size:]\n _, batch_phon_results, batch_gp_results = g2p_mapping_batch(sess, batch, input_tensor, input_phon_tensor,\n output_phon_tensor, att_mats_list, encoder)\n phon_results.extend(batch_phon_results)\n gp_results.extend(batch_gp_results)\n\n wordGp = list(zip(wordList, phon_results, deepcopy(gp_results), deepcopy(gp_results)))\n wordGp = _get_unique_words(wordGp)\n wordList = _generate_word_list(wordGp, gpProg)\n\n wordList.to_csv(decode_to_file, encoding=\"UTF-8\")\n\n\ndef prepare_corpus(wordList, phon):\n corpus = {}\n for w, p in zip(wordList, phon):\n if w in corpus:\n corpus[w].append(p.replace(\" \", \"\"))\n else:\n corpus[w] = [p.replace(\" \", \"\")]\n return corpus\n\n\ndef evaluate_corpus(sess, corpus, input_tensor, output_phon_tensor, encoder, top_beams):\n batch_size = 128 # conservative batch size to dodge out of memory issues\n wordList = list(corpus.keys())\n wordCount = len(wordList)\n\n phon_results = []\n\n n_batch = wordCount // batch_size\n for idx_batch in tqdm(range(n_batch + 1), \"Phon Translation\"):\n try:\n batch = wordList[idx_batch * batch_size:(idx_batch + 1) * batch_size]\n except:\n batch = wordList[n_batch * batch_size:]\n batch_phon_results = _make_translation_batch(sess, batch, input_tensor, output_phon_tensor, top_beams,\n encoder)\n phon_results.extend(batch_phon_results)\n\n rates = error_rates(corpus, phon_results)\n print(\"WER : {:.4%} ; PER : {:.4%}\".format(rates[0], rates[1]))\n\n\ndef evaluate_gpa(sess, corpus, input_tensor, input_phon_tensor, output_phon_tensor, att_mats_list, encoder):\n batch_size = 128 # conservative batch size to dodge out of memory issues\n wordList = list(corpus.keys())\n wordCount = len(wordList)\n\n gp_results = []\n\n n_batch = wordCount // batch_size\n for idx_batch in tqdm(range(n_batch + 1), \"Phon Translation\"):\n try:\n batch = wordList[idx_batch * batch_size:(idx_batch + 1) * batch_size]\n except:\n batch = wordList[n_batch * batch_size:]\n\n _, _, batch_gp_results = g2p_mapping_batch(sess, batch, input_tensor, input_phon_tensor,\n output_phon_tensor, att_mats_list, encoder)\n gp_results.extend([[results] for results in batch_gp_results])\n\n rates = error_rates(corpus, gp_results)\n print(\"WER : {:.4%} ; PER : {:.4%}\".format(rates[0], rates[1]))\n\n\ndef stats(sess, wordList, phon, input_tensor, input_phon_tensor, output_phon_tensor, att_mats_list, encoder,\n weights, freq=None):\n if freq is not None:\n assert len(weights) == 4\n\n batch_size = 128 # conservative batch size to dodge out of memory issues\n wordCount = len(wordList)\n\n phon_results = []\n gp_results = []\n\n n_batch = wordCount // batch_size\n for idx_batch in tqdm(range(n_batch + 1), \"GP Matching\"):\n try:\n batch = wordList[idx_batch * batch_size:(idx_batch + 1) * batch_size]\n except:\n batch = wordList[n_batch * batch_size:]\n _, batch_phon_results, batch_gp_results = g2p_mapping_batch(sess, batch, input_tensor, input_phon_tensor,\n output_phon_tensor, att_mats_list, encoder)\n gp_results.extend(batch_gp_results)\n phon_results.extend(batch_phon_results)\n\n if \" \" in phon[0] and \" \" in phon[len(phon) - 1]:\n phon_results = [\" \".join(phon) for phon in phon_results]\n else:\n phon_results = [\"\".join(phon) for phon in phon_results]\n gp_results = np.delete(np.array(gp_results), np.where(np.array(phon) != np.array(phon_results)))\n\n gpList = []\n gpCountDic = {}\n gCountDic = {}\n pCountDic = {}\n\n for r in gp_results:\n for gp in r:\n g, p = gp.split(\"~\")\n if gp not in gpList:\n gpList.append(gp)\n _dic_add(gp, gpCountDic)\n _dic_add(g, gCountDic)\n _dic_add(p, pCountDic)\n\n gpCount = sum(gpCountDic.values())\n tupList = []\n for i in range(len(gpList)):\n gp = gpList[i]\n g, p = gp.split(\"~\")\n tup = (gp, gpCountDic[gp] / gpCount,\n gpCountDic[gp] / gCountDic[g], gpCountDic[gp] / pCountDic[p])\n tupList.append(tup)\n\n df = pd.DataFrame()\n df = df.append(tupList, ignore_index=True)\n df.columns = [[\"GP\", \"GP FREQ IN DATASET\", \"G CONSISTENCY\", \"P CONSISTENCY\"]]\n\n if freq is None:\n scores = np.dot(weights, np.transpose(df[[\"GP FREQ IN DATASET\", \"G CONSISTENCY\", \"P CONSISTENCY\"]]))\n else:\n scores = np.dot(weights,\n np.concatenate(np.transpose(df[[\"GP FREQ IN DATASET\", \"G CONSISTENCY\", \"P CONSISTENCY\"]]),\n freq))\n df[\"SCORE\"] = scores\n df.sort_values([\"SCORE\"], ascending=False, inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n gpProg = df[[\"GP\"]]\n gpProg[\"LESSON\"] = gpProg.index + 1\n\n return df, gpProg\n\n\ndef levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1\n deletions = current_row[j] + 1\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n return previous_row[-1]\n\n\ndef per(gd, pred):\n min_levenshtein = np.inf\n length_min_levenshtein_pronunciation = np.inf\n for beam in pred:\n for pronunciation in gd:\n current = levenshtein(pronunciation, beam)\n if current < min_levenshtein:\n min_levenshtein = current\n length_min_levenshtein_pronunciation = len(pronunciation)\n return min_levenshtein / length_min_levenshtein_pronunciation\n\n\ndef wer(gd, pred):\n for beam in pred:\n if beam in gd:\n return 0\n print(gd, pred)\n return 1\n\n\ndef error_rates(corpus, phon_results):\n wordList = list(corpus.keys())\n perS = 0\n werS = 0\n for i, word in tqdm(enumerate(wordList)):\n werS += wer(corpus[word], phon_results[i])\n perS += per(corpus[word], phon_results[i])\n return werS / len(wordList), perS / len(wordList)\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "pandas.read_csv", "numpy.max", "pandas.DataFrame", "tensorflow.train.Saver", "numpy.arange", "tensorflow.get_variable_scope", "numpy.argmax", "numpy.transpose", "numpy.array", "numpy.reshape", "matplotlib.pyplot.figure", "numpy.std", "numpy.stack", "tensorflow.placeholder", "tensorflow.reduce_sum", "matplotlib.pyplot.show", "numpy.squeeze", "matplotlib.pyplot.imshow" ] ]
VIVelev/PyDojo
[ "d932b3df841636208611192be1f881390c361289" ]
[ "dojo/cluster/kmeans.py" ]
[ "import numpy as np\nfrom scipy import linalg\n\nfrom ..base import Clustering\n\n__all__ = [\n \"KMeans\",\n]\n\n\nclass KMeans(Clustering):\n \"\"\"K-Means Clustering algorithm\n \n Parameters:\n -----------\n n_clusters : integer, optional\n n_runs : integer, how many times to run the algorithm, optional\n \n \"\"\"\n\n def __init__(self, n_clusters=2, n_runs=10):\n self.n_clusters = n_clusters\n self.n_runs = n_runs\n\n self.distortion = 0\n self.centroids = []\n self.clusters = []\n self._X = None\n\n def _calc_distortion(self):\n \"\"\"Calculates the distortion value of the current clusters\n \"\"\"\n m = self._X.shape[0]\n self.distortion = 1/m * sum(\n linalg.norm(self._X[i, :] - self.centroids[self.clusters[i]])**2 for i in range(m)\n )\n return self.distortion\n\n def _init_random_centroids(self):\n \"\"\"Initialize the centroids as k random samples of X (k = n_clusters)\n \"\"\"\n self.centroids = self._X[np.random.choice(list(range(self._X.shape[0])), size=self.n_clusters), :]\n\n def _move_centroids(self):\n \"\"\"Calculate new centroids as the means of the samples in each cluster\n \"\"\"\n for k in range(self.n_clusters):\n if k in self.clusters:\n centroid = np.mean(self._X[self.clusters == k, :], axis=0)\n self.centroids[k] = centroid\n\n else:\n self.n_clusters-=1\n self.centroids = self.centroids[:self.n_clusters]\n self.clusters-=1\n k-=1\n\n def _closest_centroid(self, x):\n \"\"\"Returns the index of the closest centroid to the sample\n \"\"\"\n closest_centroid = 0\n distance = 10^9\n\n for i in range(self.n_clusters):\n current_distance = linalg.norm(x - self.centroids[i])\n if current_distance < distance:\n closest_centroid = i\n distance = current_distance\n\n return closest_centroid\n\n def _assign_clusters(self):\n \"\"\"Assign the samples to the closest centroids to create clusters\n \"\"\"\n self.clusters = np.array([self._closest_centroid(x) for x in self._X])\n\n def fit(self, X):\n \"\"\"The K-Means itself\n \"\"\"\n\n self._X = super().cluster(X)\n candidates = []\n\n for _ in range(self.n_runs):\n self._init_random_centroids()\n while True:\n prev_clusters = self.clusters\n self._assign_clusters()\n self._move_centroids()\n\n if np.all(prev_clusters == self.clusters):\n break\n\n self._calc_distortion()\n candidates.append((self.distortion, self.centroids, self.clusters))\n \n candidates.sort(key=lambda x: x[0])\n self.distortion = candidates[0][0]\n self.centroids = candidates[0][1]\n self.clusters = candidates[0][2]\n\n return self\n\n def cluster(self, X):\n X = super().cluster(X)\n return np.array([self._closest_centroid(x) for x in X])\n" ]
[ [ "numpy.all", "scipy.linalg.norm", "numpy.mean" ] ]
jackleland/flopter
[ "8f18f81470b456884108dc33baee836a672409c4" ]
[ "analysis/scripts/magnum/magnum_midrun_analysis.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport json\nimport os\nimport glob\n# sys.path.append('/home/jleland/Coding/Projects/flopter')\nimport flopter.magnum.magoptoffline as mg\nimport flopter.core.lputils as lp\nimport flopter.core.ivdata as ivd\nimport flopter.core.fitters as fts\nfrom tkinter.filedialog import askopenfilename\n\n\nFOLDERS = ('2019-05-28_Leland/',\n '2019-05-29_Leland/',\n '2019-06-03_Leland/',\n '2019-06-04_Leland/',\n '2019-06-05_Leland/',\n '2019-06-06_Leland/',\n '2019-06-07_Leland/',\n )\nPROBE_DESIGNATIONS = ('S', 'L')\nSWEEP_RANGE = (0, 750)\n\n\ndef averaged_iv_analysis(filename=None, ts_temp=None, ts_dens=None, shunt_resistance=10.0, theta_perp=10.0,\n probe_designations=PROBE_DESIGNATIONS, sweep_range=SWEEP_RANGE, downsamplnig_factor=1):\n if filename is None:\n # folders = ['2019-05-28_Leland/', '2019-05-29_Leland/']\n mg.Magoptoffline._FOLDER_STRUCTURE = '/Data/external/magnum/'\n files = []\n file_folders = []\n for folder1 in FOLDERS:\n os.chdir(mg.Magoptoffline.get_data_path() + folder1)\n files.extend(glob.glob('*.adc'))\n file_folders.extend([folder1] * len(glob.glob('*.adc')))\n files.sort()\n\n # for i, f in enumerate(files):\n # print(i, f)\n\n # file = files[286]\n # adc_file = files[285]\n # ts_file = files[284]\n adc_file = files[-1]\n folder = FOLDERS[-1]\n else:\n # If using the tkinter file chooser\n adc_file = filename.split('/')[-1]\n folder = filename.split('/')[-2] + '/'\n mg.Magoptoffline._FOLDER_STRUCTURE = '/Data/external/magnum/'\n\n print('\"{}\" \\t\\t \"{}\"'.format(folder, adc_file))\n\n mp = lp.MagnumProbes()\n probe_S = mp.probe_s\n probe_B = mp.probe_b\n\n dsr = downsamplnig_factor\n\n # Create magopter object\n print('Creating magopter object')\n magopter = mg.Magoptoffline(folder, adc_file, shunt_resistor=shunt_resistance, cabling_resistance=2)\n magopter._VOLTAGE_CHANNEL = 3\n magopter._PROBE_CHANNEL_3 = 4\n magopter._PROBE_CHANNEL_4 = 5\n magopter.prepare(down_sampling_rate=dsr, roi_b_plasma=True, filter_arcs_fl=False, crit_freq=None, crit_ampl=None)\n\n print('0: {}, 1: {}'.format(len(magopter.iv_arrs[0]), len(magopter.iv_arrs[1])))\n\n if ts_dens is not None and ts_temp is not None:\n T_e_ts = ts_temp\n d_T_e_ts = ts_temp * 0.01\n n_e_ts = ts_dens\n d_n_e_ts = ts_dens * 0.01\n else:\n T_e_ts = 1.12\n d_T_e_ts = 0.01\n n_e_ts = 4.44e20\n d_n_e_ts = 0.01e20\n\n # print length of the 0th probes (probe S) number of sweeps\n print(len(magopter.iv_arrs[1]))\n\n # Create relative t array by subtracting the first timestep value from the first time array\n first_time_arr = magopter.iv_arrs[1][0]['t']\n relative_t = np.zeros(len(first_time_arr))\n\n sweep_length = np.shape(relative_t)[0] // 2\n print('Sweep length is {}'.format(sweep_length))\n\n relative_t = first_time_arr - first_time_arr[0]\n\n # create a list of datasets for each sweep\n ds_probes = []\n\n for i in range(len(magopter.iv_arrs)):\n ds_list = []\n for j, iv in enumerate(magopter.iv_arrs[i]):\n if j % 2 == 0:\n ds = xr.Dataset({'voltage': (['time'], iv['V'][:sweep_length]),\n 'current': (['time'], iv['I'][:sweep_length]),\n 'shot_time': (['time'], iv['t'][:sweep_length]),\n 'start_time': iv['t'][0]},\n coords={'time': relative_t[:sweep_length], 'direction': 'up',\n 'probe': probe_designations[i]})\n else:\n ds = xr.Dataset({'voltage': (['time'], np.flip(iv['V'][:sweep_length])),\n 'current': (['time'], np.flip(iv['I'][:sweep_length])),\n 'shot_time': (['time'], np.flip(iv['t'][:sweep_length])),\n 'start_time': iv['t'][0]},\n coords={'time': relative_t[:sweep_length], 'direction': 'down',\n 'probe': probe_designations[i]})\n ds_list.append(ds)\n\n # # Separate into up and down sweeps then concat along sweep direction as an axis\n print('Before equalisation: ', len(ds_list), len(ds_list[::2]), len(ds_list[1::2]))\n if len(ds_list[::2]) == len(ds_list[1::2]) + 1:\n ds_ups = xr.concat(ds_list[:-2:2], 'sweep')\n else:\n ds_ups = xr.concat(ds_list[::2], 'sweep')\n ds_downs = xr.concat(ds_list[1::2], 'sweep')\n print('After equalisation: ', len(ds_ups['sweep']), len(ds_downs['sweep']))\n\n direction = xr.DataArray(np.array(['up', 'down']), dims=['direction'], name='direction')\n ds_probes.append(xr.concat([ds_ups, ds_downs], dim=direction))\n\n probe = xr.DataArray(np.array(probe_designations), dims=['probe'], name='probe')\n ds_full = xr.concat(ds_probes, dim=probe)\n\n # Select the small probe\n ds_full = ds_full.sel(probe=probe_designations[0])\n\n manual_start = sweep_range[0]\n manual_end = sweep_range[1]\n plt.figure()\n ds_full.max(dim='time').mean('direction')['current'].plot.line(x='sweep')\n ds_full.max(dim='time').mean('direction').isel(sweep=slice(manual_start, manual_end))['current'].plot.line(\n x='sweep')\n\n # Choose only the IVs in the static section\n ds_full = ds_full.isel(sweep=slice(manual_start, manual_end))\n\n # Average across each sweep direction\n sweep_avg_up = ds_full.sel(direction='up').mean('sweep')\n sweep_avg_dn = ds_full.sel(direction='down').mean('sweep')\n\n # Add in standard deviation of each bin as a new data variable\n sweep_avg_up = sweep_avg_up.assign({'d_current': ds_full.sel(direction='up').std('sweep')['current']})\n sweep_avg_dn = sweep_avg_dn.assign({'d_current': ds_full.sel(direction='down').std('sweep')['current']})\n\n print(ds_full)\n\n sweep_avg_updn = ds_full.mean('direction').mean('sweep').assign(\n {'d_current': ds_full.std('direction').std('sweep')['current']})\n sweep_avg_updn = sweep_avg_updn.where(sweep_avg_updn.current <= 0, drop=True)\n print(sweep_avg_updn)\n\n # sweep_avg_updn['current'].plot.line()\n\n # concatenate the up and down sweeps together to cancel the (small) capacitance effect\n iv_data = ivd.IVData(sweep_avg_updn['voltage'].data,\n -sweep_avg_updn['current'].data,\n sweep_avg_updn['time'].data,\n sigma=sweep_avg_updn['d_current'].data, estimate_error_fl=False)\n\n starting_params = [0.69, 0.009, 1.12, +1]\n\n full_iv_fitter = fts.FullIVFitter()\n fit_data = full_iv_fitter.fit_iv_data(iv_data, initial_vals=starting_params)\n fig = plt.figure()\n fit_data.plot(fig=fig, show_fl=False)\n # plt.errorbar(fit_data.raw_x, fit_data.raw_y, yerr=iv_data['sigma'], ecolor='silver')\n # plt.plot(fit_data.raw_x, fit_data.fit_y, color='orange', label=r'')\n plt.plot(iv_data['V'], full_iv_fitter.fit_function(iv_data['V'], *starting_params), label='Start-param IV')\n plt.legend()\n plt.ylim([-.25, 1])\n\n # Create new averaged iv figure\n theta_perp = np.radians(theta_perp)\n\n # probe_selected = probe_L\n\n A_coll_0 = probe_S.get_collection_area(theta_perp)\n d_A_coll = np.abs(probe_S.get_collection_area(theta_perp + np.radians(0.8)) - A_coll_0)\n\n v_f_fitted = fit_data.get_param('V_f')\n d_v_f_fitted = fit_data.get_param('V_f', errors_fl=True).error\n\n v_f_approx = - 3 * fit_data.get_temp()\n d_v_f_approx = 0.05 * v_f_approx\n\n v_f_approx_ts = - 3 * T_e_ts\n d_v_f_approx_ts = 0.05 * v_f_approx_ts\n\n c_s_fitted = lp.sound_speed(fit_data.get_temp(), gamma_i=1)\n d_c_s_fitted = lp.d_sound_speed(c_s_fitted, fit_data.get_temp(), fit_data.get_temp(errors_fl=True).error)\n n_e_fitted = lp.electron_density(fit_data.get_isat(), c_s_fitted, A_coll_0)\n d_n_e_fitted = lp.d_electron_density(n_e_fitted, c_s_fitted, d_c_s_fitted, A_coll_0, d_A_coll, fit_data.get_isat(),\n fit_data.get_isat(errors_fl=True).error)\n\n print(\"iv = averaged: \\n\"\n \"\\t v_f = {:.3g} +- {:.1g} \\n\"\n \"\\t T_e = {:.3g} +- {:.1g} \\n\"\n \"\\t I_sat = {:.3g} +- {:.1g} \\n\"\n \"\\t n_e = {:.3g} +- {:.1g} \\n\"\n \"\\t a = {:.3g} +- {:.1g} \\n\"\n \"\\t c_s = {:.3g} +- {:.1g} \\n\"\n \"\\t A_coll = {:.3g} +- {:.1g} \\n\"\n .format(v_f_fitted, d_v_f_fitted,\n fit_data.get_temp(), fit_data.get_temp(errors_fl=True).error,\n fit_data.get_isat(), fit_data.get_isat(errors_fl=True).error,\n n_e_fitted, d_n_e_fitted,\n fit_data.get_sheath_exp(), fit_data.get_sheath_exp(errors_fl=True).error,\n c_s_fitted, d_c_s_fitted,\n A_coll_0, d_A_coll))\n\n I_f = probe_S.get_analytical_iv(fit_data.raw_x, v_f_fitted, theta_perp, fit_data.get_temp(), n_e_fitted,\n print_fl=True)\n I_ts = probe_S.get_analytical_iv(fit_data.raw_x, v_f_approx_ts, theta_perp, T_e_ts, n_e_ts,\n print_fl=True)\n\n plt.figure()\n plt.errorbar(fit_data.raw_x, fit_data.raw_y, yerr=fit_data.sigma,\n label='Raw IV', ecolor='silver', color='gray', zorder=-1)\n # plt.plot(iv_data[c.RAW_X].tolist()[0], I_f, label='Analytical - measured', linestyle='dashed', linewidth=1, color='r')\n plt.plot(fit_data.raw_x, fit_data.fit_y, color='blue', linewidth=1.2,\n label='Fit - ({:.2g}eV, {:.2g}m'.format(fit_data.get_temp(), n_e_fitted) + r'$^{-3}$)')\n plt.plot(fit_data.raw_x, I_ts, linestyle='dashed', color='red',\n label='Analytical from TS - ({:.2g}eV, {:.2g}m'.format(T_e_ts, n_e_ts) + '$^{-3}$)')\n\n plt.legend()\n # plt.title('Comparison of analytical to measured IV curves for the small area probe')\n plt.xlabel(r'$V_p$ / V')\n plt.ylabel(r'$I$ / A')\n # plt.ylim([-0.01, 3.2])\n plt.show()\n\n\nif __name__ == '__main__':\n with open('config.json', 'r') as fp:\n options = json.load(fp)\n filename = askopenfilename()\n print('ADC File: {} \\n'\n 'Options: {} \\n'\n .format(filename, options))\n averaged_iv_analysis(filename, **options)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "numpy.shape", "matplotlib.pyplot.figure", "numpy.radians", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.flip" ] ]
Bachmvp/mmdetection3d
[ "b5b1a15a885eee92749e60a5837e2ce4918119f8" ]
[ "mmdet3d/apis/inference.py" ]
[ "import mmcv\nimport numpy as np\nimport re\nimport torch\nfrom copy import deepcopy\nfrom mmcv.parallel import collate, scatter\nfrom mmcv.runner import load_checkpoint\nfrom os import path as osp\n\nfrom mmdet3d.core import (Box3DMode, CameraInstance3DBoxes,\n DepthInstance3DBoxes, LiDARInstance3DBoxes,\n show_multi_modality_result, show_result,\n show_seg_result)\nfrom mmdet3d.core.bbox import get_box_type\nfrom mmdet3d.datasets.pipelines import Compose\nfrom mmdet3d.models import build_model\n\n\ndef convert_SyncBN(config):\n \"\"\"Convert config's naiveSyncBN to BN.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n \"\"\"\n if isinstance(config, dict):\n for item in config:\n if item == 'norm_cfg':\n config[item]['type'] = config[item]['type']. \\\n replace('naiveSyncBN', 'BN')\n else:\n convert_SyncBN(config[item])\n\n\ndef init_model(config, checkpoint=None, device='cuda:0'):\n \"\"\"Initialize a model from config file, which could be a 3D detector or a\n 3D segmentor.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n device (str): Device to use.\n\n Returns:\n nn.Module: The constructed detector.\n \"\"\"\n if isinstance(config, str):\n config = mmcv.Config.fromfile(config)\n elif not isinstance(config, mmcv.Config):\n raise TypeError('config must be a filename or Config object, '\n f'but got {type(config)}')\n config.model.pretrained = None\n convert_SyncBN(config.model)\n config.model.train_cfg = None\n model = build_model(config.model, test_cfg=config.get('test_cfg'))\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint)\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = config.class_names\n if 'PALETTE' in checkpoint['meta']: # 3D Segmentor\n model.PALETTE = checkpoint['meta']['PALETTE']\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model\n\n\ndef inference_detector(model, pcd):\n \"\"\"Inference point cloud with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n pcd (str): Point cloud files.\n\n Returns:\n tuple: Predicted results and data from pipeline.\n \"\"\"\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # build the data pipeline\n test_pipeline = deepcopy(cfg.data.test.pipeline)\n test_pipeline = Compose(test_pipeline)\n box_type_3d, box_mode_3d = get_box_type(cfg.data.test.box_type_3d)\n data = dict(\n pts_filename=pcd,\n box_type_3d=box_type_3d,\n box_mode_3d=box_mode_3d,\n # for ScanNet demo we need axis_align_matrix\n ann_info=dict(axis_align_matrix=np.eye(4)),\n sweeps=[],\n # set timestamp = 0\n timestamp=[0],\n img_fields=[],\n bbox3d_fields=[],\n pts_mask_fields=[],\n pts_seg_fields=[],\n bbox_fields=[],\n mask_fields=[],\n seg_fields=[])\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device.index])[0]\n else:\n # this is a workaround to avoid the bug of MMDataParallel\n data['img_metas'] = data['img_metas'][0].data\n data['points'] = data['points'][0].data\n # forward the model\n\n with torch.no_grad():\n # result = model(return_loss=False, rescale=True, **data)\n # del data['img_metas']\n\n # for i in data.items():\n # print(i)\n # data.update({'return_loss':[0, 0]})\n torch.onnx.export(model, data, f=\"onnx_model/pfe.onnx\",\n export_params=True,\n verbose=True,opset_version=11)\n exit()\n return result, data\n\n\ndef inference_multi_modality_detector(model, pcd, image, ann_file):\n \"\"\"Inference point cloud with the multi-modality detector.\n\n Args:\n model (nn.Module): The loaded detector.\n pcd (str): Point cloud files.\n image (str): Image files.\n ann_file (str): Annotation files.\n\n Returns:\n tuple: Predicted results and data from pipeline.\n \"\"\"\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # build the data pipeline\n test_pipeline = deepcopy(cfg.data.test.pipeline)\n test_pipeline = Compose(test_pipeline)\n box_type_3d, box_mode_3d = get_box_type(cfg.data.test.box_type_3d)\n # get data info containing calib\n data_infos = mmcv.load(ann_file)\n image_idx = int(re.findall(r'\\d+', image)[-1]) # xxx/sunrgbd_000017.jpg\n for x in data_infos:\n if int(x['image']['image_idx']) != image_idx:\n continue\n info = x\n break\n data = dict(\n pts_filename=pcd,\n img_prefix=osp.dirname(image),\n img_info=dict(filename=osp.basename(image)),\n box_type_3d=box_type_3d,\n box_mode_3d=box_mode_3d,\n img_fields=[],\n bbox3d_fields=[],\n pts_mask_fields=[],\n pts_seg_fields=[],\n bbox_fields=[],\n mask_fields=[],\n seg_fields=[])\n data = test_pipeline(data)\n\n # TODO: this code is dataset-specific. Move lidar2img and\n # depth2img to .pkl annotations in the future.\n # LiDAR to image conversion\n if box_mode_3d == Box3DMode.LIDAR:\n rect = info['calib']['R0_rect'].astype(np.float32)\n Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)\n P2 = info['calib']['P2'].astype(np.float32)\n lidar2img = P2 @ rect @ Trv2c\n data['img_metas'][0].data['lidar2img'] = lidar2img\n # Depth to image conversion\n elif box_mode_3d == Box3DMode.DEPTH:\n rt_mat = info['calib']['Rt']\n # follow Coord3DMode.convert_point\n rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]\n ]) @ rt_mat.transpose(1, 0)\n depth2img = info['calib']['K'] @ rt_mat\n data['img_metas'][0].data['depth2img'] = depth2img\n\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device.index])[0]\n else:\n # this is a workaround to avoid the bug of MMDataParallel\n data['img_metas'] = data['img_metas'][0].data\n data['points'] = data['points'][0].data\n data['img'] = data['img'][0].data\n\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n return result, data\n\n\ndef inference_mono_3d_detector(model, image, ann_file):\n \"\"\"Inference image with the monocular 3D detector.\n\n Args:\n model (nn.Module): The loaded detector.\n image (str): Image files.\n ann_file (str): Annotation files.\n\n Returns:\n tuple: Predicted results and data from pipeline.\n \"\"\"\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # build the data pipeline\n test_pipeline = deepcopy(cfg.data.test.pipeline)\n test_pipeline = Compose(test_pipeline)\n box_type_3d, box_mode_3d = get_box_type(cfg.data.test.box_type_3d)\n # get data info containing calib\n data_infos = mmcv.load(ann_file)\n # find the info corresponding to this image\n for x in data_infos['images']:\n if osp.basename(x['file_name']) != osp.basename(image):\n continue\n img_info = x\n break\n data = dict(\n img_prefix=osp.dirname(image),\n img_info=dict(filename=osp.basename(image)),\n box_type_3d=box_type_3d,\n box_mode_3d=box_mode_3d,\n img_fields=[],\n bbox3d_fields=[],\n pts_mask_fields=[],\n pts_seg_fields=[],\n bbox_fields=[],\n mask_fields=[],\n seg_fields=[])\n\n # camera points to image conversion\n if box_mode_3d == Box3DMode.CAM:\n data['img_info'].update(dict(cam_intrinsic=img_info['cam_intrinsic']))\n\n data = test_pipeline(data)\n\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device.index])[0]\n else:\n # this is a workaround to avoid the bug of MMDataParallel\n data['img_metas'] = data['img_metas'][0].data\n data['img'] = data['img'][0].data\n\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n return result, data\n\n\ndef inference_segmentor(model, pcd):\n \"\"\"Inference point cloud with the segmentor.\n\n Args:\n model (nn.Module): The loaded segmentor.\n pcd (str): Point cloud files.\n\n Returns:\n tuple: Predicted results and data from pipeline.\n \"\"\"\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # build the data pipeline\n test_pipeline = deepcopy(cfg.data.test.pipeline)\n test_pipeline = Compose(test_pipeline)\n data = dict(\n pts_filename=pcd,\n img_fields=[],\n bbox3d_fields=[],\n pts_mask_fields=[],\n pts_seg_fields=[],\n bbox_fields=[],\n mask_fields=[],\n seg_fields=[])\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device.index])[0]\n else:\n # this is a workaround to avoid the bug of MMDataParallel\n data['img_metas'] = data['img_metas'][0].data\n data['points'] = data['points'][0].data\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n return result, data\n\n\ndef show_det_result_meshlab(data,\n result,\n out_dir,\n score_thr=0.0,\n show=False,\n snapshot=False):\n \"\"\"Show 3D detection result by meshlab.\"\"\"\n points = data['points'][0][0].cpu().numpy()\n pts_filename = data['img_metas'][0][0]['pts_filename']\n file_name = osp.split(pts_filename)[-1].split('.')[0]\n\n if 'pts_bbox' in result[0].keys():\n pred_bboxes = result[0]['pts_bbox']['boxes_3d'].tensor.numpy()\n pred_scores = result[0]['pts_bbox']['scores_3d'].numpy()\n else:\n pred_bboxes = result[0]['boxes_3d'].tensor.numpy()\n pred_scores = result[0]['scores_3d'].numpy()\n\n # filter out low score bboxes for visualization\n if score_thr > 0:\n inds = pred_scores > score_thr\n pred_bboxes = pred_bboxes[inds]\n\n # for now we convert points into depth mode\n box_mode = data['img_metas'][0][0]['box_mode_3d']\n if box_mode != Box3DMode.DEPTH:\n points = points[..., [1, 0, 2]]\n points[..., 0] *= -1\n show_bboxes = Box3DMode.convert(pred_bboxes, box_mode, Box3DMode.DEPTH)\n else:\n show_bboxes = deepcopy(pred_bboxes)\n\n show_result(\n points,\n None,\n show_bboxes,\n out_dir,\n file_name,\n show=show,\n snapshot=snapshot)\n\n return file_name\n\n\ndef show_seg_result_meshlab(data,\n result,\n out_dir,\n palette,\n show=False,\n snapshot=False):\n \"\"\"Show 3D segmentation result by meshlab.\"\"\"\n points = data['points'][0][0].cpu().numpy()\n pts_filename = data['img_metas'][0][0]['pts_filename']\n file_name = osp.split(pts_filename)[-1].split('.')[0]\n\n pred_seg = result[0]['semantic_mask'].numpy()\n\n if palette is None:\n # generate random color map\n max_idx = pred_seg.max()\n palette = np.random.randint(0, 256, size=(max_idx + 1, 3))\n palette = np.array(palette).astype(np.int)\n\n show_seg_result(\n points,\n None,\n pred_seg,\n out_dir,\n file_name,\n palette=palette,\n show=show,\n snapshot=snapshot)\n\n return file_name\n\n\ndef show_proj_det_result_meshlab(data,\n result,\n out_dir,\n score_thr=0.0,\n show=False,\n snapshot=False):\n \"\"\"Show result of projecting 3D bbox to 2D image by meshlab.\"\"\"\n assert 'img' in data.keys(), 'image data is not provided for visualization'\n\n img_filename = data['img_metas'][0][0]['filename']\n file_name = osp.split(img_filename)[-1].split('.')[0]\n\n # read from file because img in data_dict has undergone pipeline transform\n img = mmcv.imread(img_filename)\n\n if 'pts_bbox' in result[0].keys():\n result[0] = result[0]['pts_bbox']\n elif 'img_bbox' in result[0].keys():\n result[0] = result[0]['img_bbox']\n pred_bboxes = result[0]['boxes_3d'].tensor.numpy()\n pred_scores = result[0]['scores_3d'].numpy()\n\n # filter out low score bboxes for visualization\n if score_thr > 0:\n inds = pred_scores > score_thr\n pred_bboxes = pred_bboxes[inds]\n\n box_mode = data['img_metas'][0][0]['box_mode_3d']\n if box_mode == Box3DMode.LIDAR:\n if 'lidar2img' not in data['img_metas'][0][0]:\n raise NotImplementedError(\n 'LiDAR to image transformation matrix is not provided')\n\n show_bboxes = LiDARInstance3DBoxes(pred_bboxes, origin=(0.5, 0.5, 0))\n\n show_multi_modality_result(\n img,\n None,\n show_bboxes,\n data['img_metas'][0][0]['lidar2img'],\n out_dir,\n file_name,\n box_mode='lidar',\n show=show)\n elif box_mode == Box3DMode.DEPTH:\n show_bboxes = DepthInstance3DBoxes(pred_bboxes, origin=(0.5, 0.5, 0))\n\n show_multi_modality_result(\n img,\n None,\n show_bboxes,\n None,\n out_dir,\n file_name,\n box_mode='depth',\n img_metas=data['img_metas'][0][0],\n show=show)\n elif box_mode == Box3DMode.CAM:\n if 'cam2img' not in data['img_metas'][0][0]:\n raise NotImplementedError(\n 'camera intrinsic matrix is not provided')\n\n show_bboxes = CameraInstance3DBoxes(\n pred_bboxes, box_dim=pred_bboxes.shape[-1], origin=(0.5, 1.0, 0.5))\n\n show_multi_modality_result(\n img,\n None,\n show_bboxes,\n data['img_metas'][0][0]['cam2img'],\n out_dir,\n file_name,\n box_mode='camera',\n show=show)\n else:\n raise NotImplementedError(\n f'visualization of {box_mode} bbox is not supported')\n\n return file_name\n\n\ndef show_result_meshlab(data,\n result,\n out_dir,\n score_thr=0.0,\n show=False,\n snapshot=False,\n task='det',\n palette=None):\n \"\"\"Show result by meshlab.\n\n Args:\n data (dict): Contain data from pipeline.\n result (dict): Predicted result from model.\n out_dir (str): Directory to save visualized result.\n score_thr (float): Minimum score of bboxes to be shown. Default: 0.0\n show (bool): Visualize the results online. Defaults to False.\n snapshot (bool): Whether to save the online results. Defaults to False.\n task (str): Distinguish which task result to visualize. Currently we\n support 3D detection, multi-modality detection and 3D segmentation.\n Defaults to 'det'.\n palette (list[list[int]]] | np.ndarray | None): The palette of\n segmentation map. If None is given, random palette will be\n generated. Defaults to None.\n \"\"\"\n assert task in ['det', 'multi_modality-det', 'seg', 'mono-det'], \\\n f'unsupported visualization task {task}'\n assert out_dir is not None, 'Expect out_dir, got none.'\n\n if task in ['det', 'multi_modality-det']:\n file_name = show_det_result_meshlab(data, result, out_dir, score_thr,\n show, snapshot)\n\n if task in ['seg']:\n file_name = show_seg_result_meshlab(data, result, out_dir, palette,\n show, snapshot)\n\n if task in ['multi_modality-det', 'mono-det']:\n file_name = show_proj_det_result_meshlab(data, result, out_dir,\n score_thr, show, snapshot)\n\n return out_dir, file_name\n" ]
[ [ "numpy.array", "torch.no_grad", "numpy.eye", "numpy.random.randint", "torch.onnx.export" ] ]
MuneetSJ/Exploration-vs-Exploitation-in-RL
[ "22c55e4b8d33c9519dc0cf73af9e99d540175228" ]
[ "agents/deep_q_agent.py" ]
[ "import os\n\nimport numpy as np\nimport rl.policy\n\nimport wandb\nfrom tensorflow.keras.optimizers import Adam\nfrom rl.agents import DQNAgent\nfrom rl.memory import SequentialMemory\nfrom rl.callbacks import WandbLogger\nfrom exploration_policies import *\nimport cartpole.model as cp\nimport lunarlander.model as ll\nimport mountaincar.model as mc\n\n\n# builds the agent used to train from the actions and model created for the environment\n\ndef build_agent(model, actions):\n policy = RandomPolicy()\n memory = SequentialMemory(limit=50000, window_length=1)\n dqn_a = DQNAgent(model=model, memory=memory, policy=policy, nb_actions=actions, nb_steps_warmup=100,\n target_model_update=1e-2)\n return dqn_a\n\n\ndqn = build_agent(mc.build_model(mc.states, mc.actions), mc.actions)\n\ndqn.compile(Adam(lr=1e-3), metrics=['mse'])\ndqn.fit(mc.env, nb_steps=100000, visualize=False, verbose=1,\n callbacks=[WandbLogger(project='Computing-Project', entity='msjaswal')])\nscores = dqn.test(mc.env, nb_episodes=10, visualize=True)\nprint(np.mean(scores.history['episode_reward']))\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "numpy.mean" ] ]
bjwhite-fnal/decisionengine_modules
[ "1a2c3e4f57e60925dc374f386d8bca3ba9fa3e7e" ]
[ "AWS/sources/AWSSpotPriceWithSourceProxy.py" ]
[ "\"\"\"\nGet AWS spot price information\n\"\"\"\n\nimport datetime\nimport sys\nimport os\nimport time\nimport copy\nimport numpy as np\nimport pandas as pd\nimport pprint\nimport boto3\n\nimport decisionengine.framework.modules.SourceProxy as SourceProxy\nimport logging\nimport decisionengine_modules.load_config as load_config\n\n# default values\nREGION = 'us-west-2'\nINSTANCE_TYPES = ['m3.2xlarge', 'c3.2xlarge']\nHISTORY_OBSERVATION_TIME = 3600 # observe spot price for the last hour\n# HISTORY_START_TIME = HISTORY_END_TIME.replace(day=(HISTORY_END_TIME.day-1)) # 1 day ago\nPRODUCT_DESCRIPTIONS = ['Linux/UNIX']\nDRY_RUN = False\nMAX_RESULTS = 1000\nAVAILABILITY_ZONE = '' # any\nPRODUCES = ['provisioner_resource_spot_prices']\n\n\nclass SpotPriceData(object):\n \"\"\"\n Spot Price data element\n \"\"\"\n\n def __init__(self, sp_data):\n \"\"\"\n\n :type sp_data: :obj:`dict`\n :arg sp_data: spot price data\n \"\"\"\n self.data = sp_data\n self.data['Timestamp'] = sp_data['Timestamp'].isoformat()\n\n def __cmp__(self, other=None):\n \"\"\"\n overrides comparison method\n \"\"\"\n try:\n if (self.data['AvailabilityZone'], self.data['InstanceType']) == (other.data['AvailabilityZone'], other.data['InstanceType']):\n return 0\n except Exception as e:\n pass\n\n return -1\n\n\nclass AWSSpotPriceForRegion(object):\n \"\"\"\n Spot price data and methods\n \"\"\"\n\n def __init__(self, region=REGION, profile_name=None):\n \"\"\"\n\n :type region: :obj:`str`\n :arg region: AWS region name\n :type profile_name: :obj:`str`\n :arg profile_name: legal AWS profile name\n \"\"\"\n if profile_name:\n session = boto3.session.Session(profile_name=profile_name,\n region_name=region)\n self.ec2 = session.client('ec2', region_name=region)\n\n else:\n self.ec2 = boto3.client('ec2', region_name=region)\n self.account_name = profile_name\n t = time.time()\n self.start_time = datetime.datetime.utcfromtimestamp(\n t - HISTORY_OBSERVATION_TIME)\n self.end_time = datetime.datetime.utcfromtimestamp(t)\n self.intance_types = INSTANCE_TYPES\n self.dry_run = DRY_RUN\n self.max_results = MAX_RESULTS\n self.product_descriptions = PRODUCT_DESCRIPTIONS\n self.availability_zone = AVAILABILITY_ZONE\n\n def init_query(self,\n spot_price_history_start_time=None,\n spot_price_history_end_time=None,\n instance_types=INSTANCE_TYPES,\n product_descriptions=PRODUCT_DESCRIPTIONS,\n dry_run=DRY_RUN,\n max_resuts=MAX_RESULTS,\n availability_zone=AVAILABILITY_ZONE):\n \"\"\"\n Init AWS spot price query\n\n :type spot_price_history_start_time: :obj:`str`\n :arg spot_price_history_start_time: price since.\n :type spot_price_history_end_time: :obj:`str`\n :arg spot_price_history_end_time: price till.\n :type instance_types: :obj:`list`\n :arg instance_types: list of AWS instance types to query spot price for.\n :type dry_run: :obj:`bool`\n :arg dry_run: as described in boto3 documentation.\n :type max_resuts: :obj:`int`\n :arg max_resuts: maximum number of results to return.\n\n \"\"\"\n if spot_price_history_start_time:\n self.start_time = spot_price_history_start_time\n if spot_price_history_end_time:\n self.end_time = spot_price_history_end_time\n self.intance_types = instance_types\n self.dry_run = dry_run\n self.max_results = max_resuts\n self.product_descriptions = product_descriptions\n self.availability_zone = availability_zone\n\n def get_price(self):\n \"\"\"\n Get AWS spot prices.\n \"\"\"\n try:\n rc = self.ec2.describe_spot_price_history(\n DryRun=self.dry_run,\n StartTime=self.start_time,\n EndTime=self.end_time,\n InstanceTypes=self.intance_types,\n ProductDescriptions=self.product_descriptions,\n Filters=[],\n AvailabilityZone=self.availability_zone,\n MaxResults=self.max_results,\n NextToken='')\n except Exception as e:\n print(\"Exception\", e)\n return None\n price_history = rc.get('SpotPriceHistory')\n if len(price_history) == 0:\n price_history = None\n return price_history\n\n def spot_price_summary(self, spot_price_history):\n \"\"\"\n Returns the current spot prices per\n availability zone and instance type\n\n :type spot_price_history: :obj:`list`\n :arg spot_price_history: list of dictonaries\n :rtype: :obj:`list`: list of spot price data (:class:`SpotPriceData`)\n \"\"\"\n\n ll = []\n for item in spot_price_history:\n item['AccountName'] = self.account_name\n spd = SpotPriceData(item)\n if spd not in ll:\n # append if there is no element with given\n # availability zone and instance type\n ll.append(spd)\n else:\n # replace spot price by the most recent\n i = ll.index(spd)\n ll[i].data['Timestamp'] = spd.data['Timestamp']\n ll[i].data['SpotPrice'] = spd.data['SpotPrice']\n return ll\n\n\nclass AWSSpotPrice(SourceProxy.SourceProxy):\n def __init__(self, *args, **kwargs):\n super(AWSSpotPrice, self).__init__(*args, **kwargs)\n\n def produces(self, schema_id_list):\n return PRODUCES\n\n def acquire(self):\n \"\"\"\n Gets data from AWS\n\n :rtype: pandas frame (:class:`pd.DataFramelist`)\n \"\"\"\n\n # Load known accounts configuration\n account_conf = super(AWSSpotPrice, self).acquire()\n if len(account_conf.keys()) != 1:\n raise RuntimeError(\n 'Wrong configuration %s. Only one key is expected' % (account_conf,))\n self.account_dict = {}\n for k in account_conf:\n self.account_dict = account_conf[k].to_dict()\n sp_data = []\n for account in self.account_dict:\n for region, instances in self.account_dict[account].items():\n spot_price_info = AWSSpotPriceForRegion(\n region, profile_name=account)\n spot_price_info.init_query(instance_types=instances)\n spot_price_history = spot_price_info.get_price()\n if spot_price_history:\n sp_data += spot_price_info.spot_price_summary(\n spot_price_history)\n\n sp_list = [i.data for i in sp_data]\n column_names = ['AccountName', 'AvailabilityZone',\n 'InstanceType', 'ProductDescription', 'SpotPrice', 'Timestamp']\n return {PRODUCES[0]: pd.DataFrame(sp_list, columns=column_names)}\n\n\ndef module_config_template():\n \"\"\"\n print a template for this module configuration data\n \"\"\"\n\n d = {\"AWSSpotPrice\": {\n \"module\": \"modules.AWS.sources.AWSSpotPriceWithSourceProxy\",\n \"name\": \"AWSSpotPrice\",\n \"parameters\": {\n \"channel_name\": \"source_channel_name\",\n \"Dataproducts\": \"list of data keys to retrieve from source channel data\",\n \"retries\": \"<number of retries to acquire data>\",\n \"retry_timeout\": \"<retry timeout>\"\n },\n \"schedule\": 60 * 60,\n }\n }\n\n config = {\"ProfileName1\":\n {\"RegionName1\": [\"Instance1\", ], },\n }\n\n print(\"Entry in channel cofiguration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")\n print(\"\\t spot_price_configuration - configuration required to get AWS spot price information\")\n print(\"\\t Example:\")\n print(\"-------------\")\n pprint.pprint(config)\n print(\"where\")\n print(\"\\t ProfileName1 - name of account profile (example: hepcloud-rnd)\")\n print(\"\\t RegionName1 - name of region (example: us-west-2)\")\n print(\"\\t Instance1 - name of instance. If the list of instances is empty, price information for all instances is acquired\")\n\n\ndef module_config_info():\n \"\"\"\n print this module configuration information\n \"\"\"\n print(\"produces\", PRODUCES)\n module_config_template()\n\n\ndef main():\n \"\"\"\n Call this a a test unit or use as CLI of this module\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--configtemplate',\n action='store_true',\n help='prints the expected module configuration')\n\n parser.add_argument('--configinfo',\n action='store_true',\n help='prints config template along with produces and consumes info')\n args = parser.parse_args()\n if args.configtemplate:\n module_config_template()\n elif args.configinfo:\n module_config_info()\n else:\n sprice = AWSSpotPrice({\"channel_name\": \"channel_aws_config_data\",\n \"Dataproducts\": [\"spot_occupancy_config\"],\n \"retries\": 3,\n \"retry_timeout\": 20,\n })\n rc = sprice.acquire()\n print(\"INFO\")\n print(rc)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
arcada-uas/LSTMVis
[ "e0628caf16265feb61193251dc1c9b3e1af444ad" ]
[ "lstm_server.py" ]
[ "import argparse\nimport connexion\nimport numpy as np\nimport os\nimport yaml\nfrom flask import send_from_directory, redirect\nimport json\nfrom lstmdata.data_handler import LSTMDataHandler\nimport lstmdata.read_index as ri\nimport types\n\n__author__ = 'Hendrik Strobelt'\n\nCONFIG_FILE_NAME = 'lstm.yml'\ndata_handlers = {}\nindex_map = {}\n\napp = connexion.App(__name__, debug=True)\n\n\ndef get_context(**request):\n project = request['project']\n if project not in data_handlers:\n return 'No such project', 404\n else:\n dh = data_handlers[project] # type: LSTMDataHandler\n\n # check if source is exists\n if not dh.is_valid_source(request['source']):\n return 'No valid source. Valid are: ' + ' -- '.join(dh.valid_sources()), 404\n\n # cell selection by bitmask vs. cell array\n cells = []\n if 'bitmask' in request:\n cells = np.where(np.fromstring(request['bitmask'], dtype=np.uint8) > 48)[0].tolist()\n elif 'cells' in request:\n cells = request['cells']\n\n res = dh.get_dimensions(\n pos_array=request['pos'],\n source=request['source'],\n left=request['left'],\n right=request['right'],\n dimensions=request['dims'],\n data_transform=request['transform'],\n cells=cells,\n activation_threshold=request['activation']\n )\n res['cells'] = cells\n return {'request': request, 'results': res}\n\ndef cleanup_dict(old_dictionary):\n new_dictionary={}\n for key, val in old_dictionary.items():\n try:\n s=json.dumps(val)\n new_dictionary[key]=val\n except:\n if type(val) is dict:\n new_dictionary[key]=cleanup_dict(val)\n elif type(val) is type({}.keys()):\n new_dictionary[key]=list(val)\n return new_dictionary\n\n\ndef get_info():\n res = []\n for key, project in data_handlers.items():\n # print key\n res.append({\n 'project': key,\n 'info': cleanup_dict(project.config)\n })\n return sorted(res, key=lambda x: x['project'])\n\ndef search(**request):\n project = request['project']\n res = {}\n\n if project not in data_handlers:\n return 'No such project', 404\n\n else:\n # start search either using index or regex\n\n dh = data_handlers[project]\n if project in index_map:\n res = ri.query_index(request['q'], request['limit'], request['html'],\n dir=index_map[project])\n elif dh.config['etc']['regex_search']:\n res = dh.regex_search(request['q'], request['limit'], request['html'])\n\n return {'request': request, 'res': res}\n\n\ndef match(**request):\n project = request['project']\n res = {}\n\n if project not in data_handlers:\n return 'No such project', 404\n\n else:\n dh = data_handlers[project] # type: LSTMDataHandler\n\n # check if source is exists\n if not dh.is_valid_source(request['source']):\n return 'No valid source', 404\n\n ranking, meta = dh.query_similar_activations(\n source=request['source'],\n cells=request['cells'],\n activation_threshold=request['activation'],\n data_transform=request['transform'],\n phrase_length=request['phrase_length'],\n add_histograms=True,\n query_mode=request['mode'],\n constrain_left=request['constraints'][0] > 0,\n constrain_right=request['constraints'][1] > 0\n )\n\n request_positions = list(map(lambda x: x['pos'], ranking))\n position_details = dh.get_dimensions(\n pos_array=request_positions,\n source=request['source'],\n left=request['left'],\n right=request['right'],\n cells=request['cells'],\n dimensions=request['dims'],\n data_transform=request['transform'],\n activation_threshold=request['activation']\n )\n\n res = {\n 'rankingDetail': ranking,\n 'positionDetail': position_details,\n 'fuzzyLengthHistogram': meta['fuzzy_length_histogram'].tolist(),\n 'strictLengthHistogram': meta['strict_length_histogram'].tolist()\n }\n return {'request': request, 'results': res}\n\n\[email protected]('/client/<path:path>')\ndef send_static(path):\n \"\"\" serves all files from ./client/ to ``/client/<path:path>``\n\n :param path: path from api call\n \"\"\"\n return send_from_directory('client/', path)\n\[email protected]('/')\ndef redirect_home():\n return redirect('/client/index.html', code=302)\n\n\ndef create_data_handlers(directory):\n \"\"\"\n searches for CONFIG_FILE_NAME in all subdirectories of directory\n and creates data handlers for all of them\n\n :param directory: scan directory\n :return: null\n \"\"\"\n project_dirs = []\n for root, dirs, files in os.walk(directory):\n if CONFIG_FILE_NAME in files:\n project_dirs.append(os.path.abspath(root))\n\n i = 0\n for p_dir in project_dirs:\n with open(os.path.join(p_dir, CONFIG_FILE_NAME), 'r') as yf:\n config = yaml.load(yf, Loader=yaml.FullLoader)\n dh_id = os.path.split(p_dir)[1]\n data_handlers[dh_id] = LSTMDataHandler(directory=p_dir, config=config)\n if data_handlers[dh_id].config['index']:\n index_map[dh_id] = data_handlers[dh_id].config['index_dir']\n i += 1\n\n\napp.add_api('lstm_server.yaml')\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--nodebug\", default=False)\nparser.add_argument(\"--port\", default=\"8888\")\nparser.add_argument(\"--nocache\", default=False)\nparser.add_argument(\"-dir\", type=str, default=os.path.abspath('data'))\n\nif __name__ == '__main__':\n args = parser.parse_args()\n app.run(port=int(args.port), debug=not args.nodebug, host=\"127.0.0.1\")\nelse:\n args, _ = parser.parse_known_args()\n create_data_handlers(args.dir)\n" ]
[ [ "numpy.fromstring" ] ]
JayjeetAtGithub/dask
[ "ee9d64a98193f67567fc289b2306199b0bcf5b59" ]
[ "dask/array/routines.py" ]
[ "import inspect\nimport math\nimport warnings\nfrom collections.abc import Iterable\nfrom functools import partial, wraps\nfrom numbers import Integral, Real\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom tlz import concat, interleave, sliding_window\n\nfrom ..base import is_dask_collection, tokenize\nfrom ..compatibility import apply\nfrom ..core import flatten\nfrom ..delayed import Delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import derived_from, funcname, is_arraylike, is_cupy_type\nfrom . import chunk\nfrom .core import (\n Array,\n asanyarray,\n asarray,\n blockwise,\n broadcast_shapes,\n broadcast_to,\n concatenate,\n elemwise,\n implements,\n is_scalar_for_elemwise,\n map_blocks,\n stack,\n tensordot_lookup,\n)\nfrom .creation import arange, diag, empty, indices, tri\nfrom .einsumfuncs import einsum # noqa\nfrom .ufunc import multiply, sqrt\nfrom .utils import (\n array_safe,\n asarray_safe,\n meta_from_array,\n safe_wraps,\n validate_axis,\n zeros_like_safe,\n)\nfrom .wrap import ones\n\n# save built-in for histogram functions which use range as a kwarg.\n_range = range\n\n\n@derived_from(np)\ndef array(x, dtype=None, ndmin=None):\n x = asarray(x)\n while ndmin is not None and x.ndim < ndmin:\n x = x[None, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\n@derived_from(np)\ndef result_type(*args):\n args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]\n return np.result_type(*args)\n\n\n@derived_from(np)\ndef atleast_3d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None, None]\n elif x.ndim == 1:\n x = x[None, :, None]\n elif x.ndim == 2:\n x = x[:, :, None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef atleast_2d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None]\n elif x.ndim == 1:\n x = x[None, :]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef atleast_1d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef vstack(tup, allow_unknown_chunksizes=False):\n tup = tuple(atleast_2d(x) for x in tup)\n return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)\n\n\n@derived_from(np)\ndef hstack(tup, allow_unknown_chunksizes=False):\n if all(x.ndim == 1 for x in tup):\n return concatenate(\n tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n else:\n return concatenate(\n tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n\n\n@derived_from(np)\ndef dstack(tup, allow_unknown_chunksizes=False):\n tup = tuple(atleast_3d(x) for x in tup)\n return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)\n\n\n@derived_from(np)\ndef swapaxes(a, axis1, axis2):\n if axis1 == axis2:\n return a\n if axis1 < 0:\n axis1 = axis1 + a.ndim\n if axis2 < 0:\n axis2 = axis2 + a.ndim\n ind = list(range(a.ndim))\n out = list(ind)\n out[axis1], out[axis2] = axis2, axis1\n\n return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)\n\n\n@derived_from(np)\ndef transpose(a, axes=None):\n if axes:\n if len(axes) != a.ndim:\n raise ValueError(\"axes don't match array\")\n axes = tuple(d + a.ndim if d < 0 else d for d in axes)\n else:\n axes = tuple(range(a.ndim))[::-1]\n return blockwise(\n np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes\n )\n\n\ndef flip(m, axis):\n \"\"\"\n Reverse element order along axis.\n\n Parameters\n ----------\n axis : int\n Axis to reverse element order of.\n\n Returns\n -------\n reversed array : ndarray\n \"\"\"\n\n m = asanyarray(m)\n\n sl = m.ndim * [slice(None)]\n try:\n sl[axis] = slice(None, None, -1)\n except IndexError as e:\n raise ValueError(\n \"`axis` of %s invalid for %s-D array\" % (str(axis), str(m.ndim))\n ) from e\n sl = tuple(sl)\n\n return m[sl]\n\n\n@derived_from(np)\ndef flipud(m):\n return flip(m, 0)\n\n\n@derived_from(np)\ndef fliplr(m):\n return flip(m, 1)\n\n\n@derived_from(np)\ndef rot90(m, k=1, axes=(0, 1)):\n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError(\"len(axes) must be 2.\")\n\n m = asanyarray(m)\n\n if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:\n raise ValueError(\"Axes must be different.\")\n\n if axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim:\n raise ValueError(\n \"Axes={} out of range for array of ndim={}.\".format(axes, m.ndim)\n )\n\n k %= 4\n\n if k == 0:\n return m[:]\n if k == 2:\n return flip(flip(m, axes[0]), axes[1])\n\n axes_list = list(range(0, m.ndim))\n (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]])\n\n if k == 1:\n return transpose(flip(m, axes[1]), axes_list)\n else:\n # k == 3\n return flip(transpose(m, axes_list), axes[1])\n\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nALPHABET = alphabet.upper()\n\n\ndef _tensordot(a, b, axes):\n x = max([a, b], key=lambda x: x.__array_priority__)\n tensordot = tensordot_lookup.dispatch(type(x))\n x = tensordot(a, b, axes=axes)\n\n if len(axes[0]) != 1:\n ind = [slice(None, None)] * x.ndim\n for a in sorted(axes[0]):\n ind.insert(a, None)\n x = x[tuple(ind)]\n\n return x\n\n\n@derived_from(np)\ndef tensordot(lhs, rhs, axes=2):\n if isinstance(axes, Iterable):\n left_axes, right_axes = axes\n else:\n left_axes = tuple(range(lhs.ndim - axes, lhs.ndim))\n right_axes = tuple(range(0, axes))\n\n if isinstance(left_axes, Integral):\n left_axes = (left_axes,)\n if isinstance(right_axes, Integral):\n right_axes = (right_axes,)\n if isinstance(left_axes, list):\n left_axes = tuple(left_axes)\n if isinstance(right_axes, list):\n right_axes = tuple(right_axes)\n if len(left_axes) == 1:\n concatenate = True\n else:\n concatenate = False\n\n dt = np.promote_types(lhs.dtype, rhs.dtype)\n\n left_index = list(range(lhs.ndim))\n right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))\n out_index = left_index + right_index\n\n for l, r in zip(left_axes, right_axes):\n out_index.remove(right_index[r])\n right_index[r] = left_index[l]\n if concatenate:\n out_index.remove(left_index[l])\n\n intermediate = blockwise(\n _tensordot,\n out_index,\n lhs,\n left_index,\n rhs,\n right_index,\n dtype=dt,\n concatenate=concatenate,\n axes=(left_axes, right_axes),\n )\n\n if concatenate:\n return intermediate\n else:\n return intermediate.sum(axis=left_axes)\n\n\n@derived_from(np)\ndef dot(a, b):\n return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))\n\n\n@derived_from(np)\ndef vdot(a, b):\n return dot(a.conj().ravel(), b.ravel())\n\n\ndef _matmul(a, b):\n xp = np\n\n if is_cupy_type(a):\n import cupy\n\n xp = cupy\n\n chunk = xp.matmul(a, b)\n # Since we have performed the contraction via matmul\n # but blockwise expects all dimensions back, we need\n # to add one dummy dimension back\n return chunk[..., xp.newaxis]\n\n\n@derived_from(np)\ndef matmul(a, b):\n a = asanyarray(a)\n b = asanyarray(b)\n\n if a.ndim == 0 or b.ndim == 0:\n raise ValueError(\"`matmul` does not support scalars.\")\n\n a_is_1d = False\n if a.ndim == 1:\n a_is_1d = True\n a = a[np.newaxis, :]\n\n b_is_1d = False\n if b.ndim == 1:\n b_is_1d = True\n b = b[:, np.newaxis]\n\n if a.ndim < b.ndim:\n a = a[(b.ndim - a.ndim) * (np.newaxis,)]\n elif a.ndim > b.ndim:\n b = b[(a.ndim - b.ndim) * (np.newaxis,)]\n\n # out_ind includes all dimensions to prevent contraction\n # in the blockwise below\n out_ind = tuple(range(a.ndim + 1))\n # lhs_ind includes `a`/LHS dimensions\n lhs_ind = tuple(range(a.ndim))\n # on `b`/RHS everything above 2nd dimension, is the same\n # as `a`, -2 dimension is \"contracted\" with the last dimension\n # of `a`, last dimension of `b` is `b` specific\n rhs_ind = tuple(range(a.ndim - 2)) + (lhs_ind[-1], a.ndim)\n\n out = blockwise(\n _matmul,\n out_ind,\n a,\n lhs_ind,\n b,\n rhs_ind,\n adjust_chunks={lhs_ind[-1]: 1},\n dtype=result_type(a, b),\n concatenate=False,\n )\n\n # Because contraction + concatenate in blockwise leads to high\n # memory footprints, we want to avoid them. Instead we will perform\n # blockwise (without contraction) followed by reduction. More about\n # this issue: https://github.com/dask/dask/issues/6874\n\n # When we perform reduction, we need to worry about the last 2 dimensions\n # which hold the matrices, some care is required to handle chunking in\n # that space.\n contraction_dimension_is_chunked = (\n max(min(a.chunks[-1], b.chunks[-2])) < a.shape[-1]\n )\n b_last_dim_max_chunk = max(b.chunks[-1])\n if contraction_dimension_is_chunked or b_last_dim_max_chunk < b.shape[-1]:\n if b_last_dim_max_chunk > 1:\n # This is the case when both contraction and last dimension axes\n # are chunked\n out = out.reshape(out.shape[:-1] + (1, -1))\n out = out.sum(axis=-3)\n out = out.reshape(out.shape[:-2] + (b.shape[-1],))\n else:\n # Contraction axis is chunked\n out = out.sum(axis=-2)\n else:\n # Neither contraction nor last dimension axes are chunked, we\n # remove the dummy dimension without reduction\n out = out.reshape(out.shape[:-2] + (b.shape[-1],))\n\n if a_is_1d:\n out = out[..., 0, :]\n if b_is_1d:\n out = out[..., 0]\n\n return out\n\n\n@derived_from(np)\ndef outer(a, b):\n a = a.flatten()\n b = b.flatten()\n\n dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype\n\n return blockwise(np.outer, \"ij\", a, \"i\", b, \"j\", dtype=dtype)\n\n\ndef _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):\n return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)\n\n\n@derived_from(np)\ndef apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):\n \"\"\"\n This is a blocked variant of :func:`numpy.apply_along_axis` implemented via\n :func:`dask.array.map_blocks`\n\n Notes\n -----\n If either of `dtype` or `shape` are not provided, Dask attempts to\n determine them by calling `func1d` on a dummy array. This may produce\n incorrect values for `dtype` or `shape`, so we recommend providing them.\n \"\"\"\n arr = asarray(arr)\n\n # Verify that axis is valid and throw an error otherwise\n axis = len(arr.shape[:axis])\n\n # If necessary, infer dtype and shape of the output of func1d by calling it on test data.\n if shape is None or dtype is None:\n test_data = np.ones((1,), dtype=arr.dtype)\n test_result = np.array(func1d(test_data, *args, **kwargs))\n if shape is None:\n shape = test_result.shape\n if dtype is None:\n dtype = test_result.dtype\n\n # Rechunk so that func1d is applied over the full axis.\n arr = arr.rechunk(\n arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]\n )\n\n # Map func1d over the data to get the result\n # Adds other axes as needed.\n result = arr.map_blocks(\n _inner_apply_along_axis,\n name=funcname(func1d) + \"-along-axis\",\n dtype=dtype,\n chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),\n drop_axis=axis,\n new_axis=list(range(axis, axis + len(shape), 1)),\n func1d=func1d,\n func1d_axis=axis,\n func1d_args=args,\n func1d_kwargs=kwargs,\n )\n\n return result\n\n\n@derived_from(np)\ndef apply_over_axes(func, a, axes):\n # Validate arguments\n a = asarray(a)\n try:\n axes = tuple(axes)\n except TypeError:\n axes = (axes,)\n\n sl = a.ndim * (slice(None),)\n\n # Compute using `apply_along_axis`.\n result = a\n for i in axes:\n result = apply_along_axis(func, i, result, 0)\n\n # Restore original dimensionality or error.\n if result.ndim == (a.ndim - 1):\n result = result[sl[:i] + (None,)]\n elif result.ndim != a.ndim:\n raise ValueError(\n \"func must either preserve dimensionality of the input\"\n \" or reduce it by one.\"\n )\n\n return result\n\n\n@derived_from(np)\ndef ptp(a, axis=None):\n return a.max(axis=axis) - a.min(axis=axis)\n\n\n@derived_from(np)\ndef diff(a, n=1, axis=-1):\n a = asarray(a)\n n = int(n)\n axis = int(axis)\n\n sl_1 = a.ndim * [slice(None)]\n sl_2 = a.ndim * [slice(None)]\n\n sl_1[axis] = slice(1, None)\n sl_2[axis] = slice(None, -1)\n\n sl_1 = tuple(sl_1)\n sl_2 = tuple(sl_2)\n\n r = a\n for i in range(n):\n r = r[sl_1] - r[sl_2]\n\n return r\n\n\n@derived_from(np)\ndef ediff1d(ary, to_end=None, to_begin=None):\n ary = asarray(ary)\n\n aryf = ary.flatten()\n r = aryf[1:] - aryf[:-1]\n\n r = [r]\n if to_begin is not None:\n r = [asarray(to_begin).flatten()] + r\n if to_end is not None:\n r = r + [asarray(to_end).flatten()]\n r = concatenate(r)\n\n return r\n\n\ndef _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):\n \"\"\"\n x: nd-array\n array of one block\n coord: 1d-array or scalar\n coordinate along which the gradient is computed.\n axis: int\n axis along which the gradient is computed\n array_locs:\n actual location along axis. None if coordinate is scalar\n grad_kwargs:\n keyword to be passed to np.gradient\n \"\"\"\n block_loc = block_id[axis]\n if array_locs is not None:\n coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]\n grad = np.gradient(x, coord, axis=axis, **grad_kwargs)\n return grad\n\n\n@derived_from(np)\ndef gradient(f, *varargs, **kwargs):\n f = asarray(f)\n\n kwargs[\"edge_order\"] = math.ceil(kwargs.get(\"edge_order\", 1))\n if kwargs[\"edge_order\"] > 2:\n raise ValueError(\"edge_order must be less than or equal to 2.\")\n\n drop_result_list = False\n axis = kwargs.pop(\"axis\", None)\n if axis is None:\n axis = tuple(range(f.ndim))\n elif isinstance(axis, Integral):\n drop_result_list = True\n axis = (axis,)\n\n axis = validate_axis(axis, f.ndim)\n\n if len(axis) != len(set(axis)):\n raise ValueError(\"duplicate axes not allowed\")\n\n axis = tuple(ax % f.ndim for ax in axis)\n\n if varargs == ():\n varargs = (1,)\n if len(varargs) == 1:\n varargs = len(axis) * varargs\n if len(varargs) != len(axis):\n raise TypeError(\n \"Spacing must either be a single scalar, or a scalar / 1d-array per axis\"\n )\n\n if issubclass(f.dtype.type, (np.bool8, Integral)):\n f = f.astype(float)\n elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:\n f = f.astype(float)\n\n results = []\n for i, ax in enumerate(axis):\n for c in f.chunks[ax]:\n if np.min(c) < kwargs[\"edge_order\"] + 1:\n raise ValueError(\n \"Chunk size must be larger than edge_order + 1. \"\n \"Minimum chunk for axis {} is {}. Rechunk to \"\n \"proceed.\".format(ax, np.min(c))\n )\n\n if np.isscalar(varargs[i]):\n array_locs = None\n else:\n if isinstance(varargs[i], Array):\n raise NotImplementedError(\"dask array coordinated is not supported.\")\n # coordinate position for each block taking overlap into account\n chunk = np.array(f.chunks[ax])\n array_loc_stop = np.cumsum(chunk) + 1\n array_loc_start = array_loc_stop - chunk - 2\n array_loc_stop[-1] -= 1\n array_loc_start[0] = 0\n array_locs = (array_loc_start, array_loc_stop)\n\n results.append(\n f.map_overlap(\n _gradient_kernel,\n dtype=f.dtype,\n depth={j: 1 if j == ax else 0 for j in range(f.ndim)},\n boundary=\"none\",\n coord=varargs[i],\n axis=ax,\n array_locs=array_locs,\n grad_kwargs=kwargs,\n )\n )\n\n if drop_result_list:\n results = results[0]\n\n return results\n\n\ndef _bincount_agg(bincounts, dtype, **kwargs):\n if not isinstance(bincounts, list):\n return bincounts\n\n n = max(map(len, bincounts))\n out = zeros_like_safe(bincounts[0], shape=n, dtype=dtype)\n for b in bincounts:\n out[: len(b)] += b\n return out\n\n\n@derived_from(np)\ndef bincount(x, weights=None, minlength=0, split_every=None):\n if x.ndim != 1:\n raise ValueError(\"Input array must be one dimensional. Try using x.ravel()\")\n if weights is not None:\n if weights.chunks != x.chunks:\n raise ValueError(\"Chunks of input array x and weights must match.\")\n\n token = tokenize(x, weights, minlength)\n args = [x, \"i\"]\n if weights is not None:\n meta = array_safe(np.bincount([1], weights=[1]), like=meta_from_array(x))\n args.extend([weights, \"i\"])\n else:\n meta = array_safe(np.bincount([]), like=meta_from_array(x))\n\n if minlength == 0:\n output_size = (np.nan,)\n else:\n output_size = (minlength,)\n\n chunked_counts = blockwise(\n partial(np.bincount, minlength=minlength), \"i\", *args, token=token, meta=meta\n )\n chunked_counts._chunks = (\n output_size * len(chunked_counts.chunks[0]),\n *chunked_counts.chunks[1:],\n )\n\n from .reductions import _tree_reduce\n\n output = _tree_reduce(\n chunked_counts,\n aggregate=partial(_bincount_agg, dtype=meta.dtype),\n axis=(0,),\n keepdims=True,\n dtype=meta.dtype,\n split_every=split_every,\n concatenate=False,\n )\n output._chunks = (output_size, *chunked_counts.chunks[1:])\n output._meta = meta\n return output\n\n\n@derived_from(np)\ndef digitize(a, bins, right=False):\n bins = asarray_safe(bins, like=meta_from_array(a))\n dtype = np.digitize(asarray_safe([0], like=bins), bins, right=False).dtype\n return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)\n\n\n# TODO: dask linspace doesn't support delayed values\ndef _linspace_from_delayed(start, stop, num=50):\n linspace_name = \"linspace-\" + tokenize(start, stop, num)\n (start_ref, stop_ref, num_ref), deps = unpack_collections([start, stop, num])\n if len(deps) == 0:\n return np.linspace(start, stop, num=num)\n\n linspace_dsk = {(linspace_name, 0): (np.linspace, start_ref, stop_ref, num_ref)}\n linspace_graph = HighLevelGraph.from_collections(\n linspace_name, linspace_dsk, dependencies=deps\n )\n\n chunks = ((np.nan,),) if is_dask_collection(num) else ((num,),)\n return Array(linspace_graph, linspace_name, chunks, dtype=float)\n\n\ndef _block_hist(x, bins, range=None, weights=None):\n return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]\n\n\ndef histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n \"\"\"\n Blocked variant of :func:`numpy.histogram`.\n\n Parameters\n ----------\n a : dask.array.Array\n Input data; the histogram is computed over the flattened\n array. If the ``weights`` argument is used, the chunks of\n ``a`` are accessed to check chunking compatibility between\n ``a`` and ``weights``. If ``weights`` is ``None``, a\n :py:class:`dask.dataframe.Series` object can be passed as\n input data.\n bins : int or sequence of scalars, optional\n Either an iterable specifying the ``bins`` or the number of ``bins``\n and a ``range`` argument is required as computing ``min`` and ``max``\n over blocked arrays is an expensive operation that must be performed\n explicitly.\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n normed : bool, optional\n This is equivalent to the ``density`` argument, but produces incorrect\n results for unequal bin widths. It should not be used.\n weights : dask.array.Array, optional\n A dask.array.Array of weights, of the same block structure as ``a``. Each value in\n ``a`` only contributes its associated weight towards the bin count\n (instead of 1). If ``density`` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n Overrides the ``normed`` keyword if given.\n If ``density`` is True, ``bins`` cannot be a single-number delayed\n value. It must be a concrete number, or a (possibly-delayed)\n array/sequence of the bin edges.\n\n Returns\n -------\n hist : dask Array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics.\n bin_edges : dask Array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n Examples\n --------\n Using number of bins and range:\n\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array(np.arange(10000), chunks=10)\n >>> h, bins = da.histogram(x, bins=10, range=[0, 10000])\n >>> bins\n array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,\n 8000., 9000., 10000.])\n >>> h.compute()\n array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])\n\n Explicitly specifying the bins:\n\n >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))\n >>> bins\n array([ 0, 5000, 10000])\n >>> h.compute()\n array([5000, 5000])\n \"\"\"\n if isinstance(bins, Array):\n scalar_bins = bins.ndim == 0\n # ^ `np.ndim` is not implemented by Dask array.\n elif isinstance(bins, Delayed):\n scalar_bins = bins._length is None or bins._length == 1\n else:\n scalar_bins = np.ndim(bins) == 0\n\n if bins is None or (scalar_bins and range is None):\n raise ValueError(\n \"dask.array.histogram requires either specifying \"\n \"bins as an iterable or specifying both a range and \"\n \"the number of bins\"\n )\n\n if weights is not None and weights.chunks != a.chunks:\n raise ValueError(\"Input array and weights must have the same chunked structure\")\n\n if normed is not False:\n raise ValueError(\n \"The normed= keyword argument has been deprecated. \"\n \"Please use density instead. \"\n \"See the numpy.histogram docstring for more information.\"\n )\n\n if density and scalar_bins and isinstance(bins, (Array, Delayed)):\n raise NotImplementedError(\n \"When `density` is True, `bins` cannot be a scalar Dask object. \"\n \"It must be a concrete number or a (possibly-delayed) array/sequence of bin edges.\"\n )\n\n for argname, val in [(\"bins\", bins), (\"range\", range), (\"weights\", weights)]:\n if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):\n raise TypeError(\n \"Dask types besides Array and Delayed are not supported \"\n \"for `histogram`. For argument `{}`, got: {!r}\".format(argname, val)\n )\n\n if range is not None:\n try:\n if len(range) != 2:\n raise ValueError(\n f\"range must be a sequence or array of length 2, but got {len(range)} items\"\n )\n if isinstance(range, (Array, np.ndarray)) and range.shape != (2,):\n raise ValueError(\n f\"range must be a 1-dimensional array of two items, but got an array of shape {range.shape}\"\n )\n except TypeError:\n raise TypeError(\n f\"Expected a sequence or array for range, not {range}\"\n ) from None\n\n token = tokenize(a, bins, range, weights, density)\n name = \"histogram-sum-\" + token\n\n if scalar_bins:\n bins = _linspace_from_delayed(range[0], range[1], bins + 1)\n # ^ NOTE `range[1]` is safe because of the above check, and the initial check\n # that range must not be None if `scalar_bins`\n else:\n if not isinstance(bins, (Array, np.ndarray)):\n bins = asarray(bins)\n if bins.ndim != 1:\n raise ValueError(\n f\"bins must be a 1-dimensional array or sequence, got shape {bins.shape}\"\n )\n\n (bins_ref, range_ref), deps = unpack_collections([bins, range])\n\n # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk\n if weights is None:\n dsk = {\n (name, i, 0): (_block_hist, k, bins_ref, range_ref)\n for i, k in enumerate(flatten(a.__dask_keys__()))\n }\n dtype = np.histogram([])[0].dtype\n else:\n a_keys = flatten(a.__dask_keys__())\n w_keys = flatten(weights.__dask_keys__())\n dsk = {\n (name, i, 0): (_block_hist, k, bins_ref, range_ref, w)\n for i, (k, w) in enumerate(zip(a_keys, w_keys))\n }\n dtype = weights.dtype\n\n deps = (a,) + deps\n if weights is not None:\n deps += (weights,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n\n # Turn graph into a 2D Array of shape (nchunks, nbins)\n nchunks = len(list(flatten(a.__dask_keys__())))\n nbins = bins.size - 1 # since `bins` is 1D\n chunks = ((1,) * nchunks, (nbins,))\n mapped = Array(graph, name, chunks, dtype=dtype)\n\n # Sum over chunks to get the final histogram\n n = mapped.sum(axis=0)\n\n # We need to replicate normed and density options from numpy\n if density is not None:\n if density:\n db = asarray(np.diff(bins).astype(float), chunks=n.chunks)\n return n / db / n.sum(), bins\n else:\n return n, bins\n else:\n return n, bins\n\n\ndef _block_histogramdd(sample, bins, range=None, weights=None):\n \"\"\"Call numpy.histogramdd for a blocked/chunked calculation.\n\n Slurps the result into an additional outer axis via [np.newaxis].\n This new axis will be used to stack chunked calls of the numpy\n function and add them together later.\n\n Returns\n -------\n :py:object:`np.ndarray`\n NumPy array with an additional outer dimension.\n\n \"\"\"\n return np.histogramdd(sample, bins, range=range, weights=weights)[0:1]\n\n\ndef histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):\n \"\"\"Blocked variant of :func:`numpy.histogramdd`.\n\n Chunking of the input data (``sample``) is only allowed along the\n 0th (row) axis (the axis corresponding to the total number of\n samples). Data chunked along the 1st axis (column) axis is not\n compatible with this function. If weights are used, they must be\n chunked along the 0th axis identically to the input sample.\n\n A proper example setup for a three dimensional histogram, where\n the sample shape is ``(8, 3)`` and weights are shape ``(8,)``,\n sample chunks would be ``((4, 4), (3,))`` and the weights chunks\n would be ``((4, 4),)`` a table of the structure:\n\n +-------+-----------------------+-----------+\n | | sample (8 x 3) | weights |\n +=======+=====+=====+=====+=====+=====+=====+\n | chunk | row | `x` | `y` | `z` | row | `w` |\n +-------+-----+-----+-----+-----+-----+-----+\n | | 0 | 5 | 6 | 6 | 0 | 0.5 |\n | +-----+-----+-----+-----+-----+-----+\n | | 1 | 8 | 9 | 2 | 1 | 0.8 |\n | 0 +-----+-----+-----+-----+-----+-----+\n | | 2 | 3 | 3 | 1 | 2 | 0.3 |\n | +-----+-----+-----+-----+-----+-----+\n | | 3 | 2 | 5 | 6 | 3 | 0.7 |\n +-------+-----+-----+-----+-----+-----+-----+\n | | 4 | 3 | 1 | 1 | 4 | 0.3 |\n | +-----+-----+-----+-----+-----+-----+\n | | 5 | 3 | 2 | 9 | 5 | 1.3 |\n | 1 +-----+-----+-----+-----+-----+-----+\n | | 6 | 8 | 1 | 5 | 6 | 0.8 |\n | +-----+-----+-----+-----+-----+-----+\n | | 7 | 3 | 5 | 3 | 7 | 0.7 |\n +-------+-----+-----+-----+-----+-----+-----+\n\n If the sample 0th dimension and weight 0th (row) dimension are\n chunked differently, a ``ValueError`` will be raised. If\n coordinate groupings ((x, y, z) trios) are separated by a chunk\n boundry, then a ``ValueError`` will be raised. We suggest that you\n rechunk your data if it is of that form.\n\n The chunks property of the data (and optional weights) are used to\n check for compatibility with the blocked algorithm (as described\n above); therefore, you must call `to_dask_array` on a collection\n from ``dask.dataframe``, i.e. :class:`dask.dataframe.Series` or\n :class:`dask.dataframe.DataFrame`.\n\n Parameters\n ----------\n sample : dask.array.Array (N, D) or sequence of dask.array.Array\n Multidimensional data to be histogrammed.\n\n Note the unusual interpretation of a sample when it is a\n sequence of dask Arrays:\n\n * When a (N, D) dask Array, each row is an entry in the sample\n (coordinate in D dimensional space).\n * When a sequence of dask Arrays, each element in the sequence\n is the array of values for a single coordinate. This type of\n input will be automatically rechunked along the column axis\n if necessary. This may induce a runtime increase.\n bins : sequence of arrays describing bin edges, int, or sequence of ints\n The bin specification.\n\n The possible binning configurations are:\n\n * A sequence of arrays describing the monotonically increasing\n bin edges along each dimension.\n * A single int describing the total number of bins that will\n be used in each dimension (this requires the ``range``\n argument to be defined).\n * A sequence of ints describing the total number of bins to be\n used in each dimension (this requires the ``range`` argument\n to be defined).\n\n When bins are described by arrays, the rightmost edge is\n included. Bins described by arrays also allows for non-uniform\n bin widths.\n range : sequence of pairs, optional\n A sequence of length D, each a (min, max) tuple giving the\n outer bin edges to be used if the edges are not given\n explicitly in ``bins``. If defined, this argument is required\n to have an entry for each dimension. Unlike\n :func:`numpy.histogramdd`, if `bins` does not define bin\n edges, this argument is required (this function will not\n automatically use the min and max of of the value in a given\n dimension because the input data may be lazy in dask).\n normed : bool, optional\n An alias for the density argument that behaves identically. To\n avoid confusion with the broken argument to `histogram`,\n `density` should be preferred.\n weights : dask.array.Array, optional\n An array of values weighing each sample in the input data. The\n chunks of the weights must be identical to the chunking along\n the 0th (row) axis of the data sample.\n density : bool, optional\n If ``False`` (default), the returned array represents the\n number of samples in each bin. If ``True``, the returned array\n represents the probability density function at each bin.\n\n See Also\n --------\n histogram\n\n Examples\n --------\n Computing the histogram in 5 blocks using different bin edges\n along each dimension:\n\n >>> import dask.array as da\n >>> x = da.random.uniform(0, 1, size=(1000, 3), chunks=(200, 3))\n >>> edges = [\n ... np.linspace(0, 1, 5), # 4 bins in 1st dim\n ... np.linspace(0, 1, 6), # 5 in the 2nd\n ... np.linspace(0, 1, 4), # 3 in the 3rd\n ... ]\n >>> h, edges = da.histogramdd(x, bins=edges)\n >>> result = h.compute()\n >>> result.shape\n (4, 5, 3)\n\n Defining the bins by total number and their ranges, along with\n using weights:\n\n >>> bins = (4, 5, 3)\n >>> ranges = ((0, 1),) * 3 # expands to ((0, 1), (0, 1), (0, 1))\n >>> w = da.random.uniform(0, 1, size=(1000,), chunks=x.chunksize[0])\n >>> h, edges = da.histogramdd(x, bins=bins, range=ranges, weights=w)\n >>> np.isclose(h.sum().compute(), w.sum().compute())\n True\n\n \"\"\"\n\n # logic used in numpy.histogramdd to handle normed/density.\n if normed is None:\n if density is None:\n density = False\n elif density is None:\n # an explicit normed argument was passed, alias it to the new name\n density = normed\n else:\n raise TypeError(\"Cannot specify both 'normed' and 'density'\")\n\n # check if any dask collections (dc) were passed to bins= or\n # range= these are unsupported.\n dc_bins = is_dask_collection(bins)\n if isinstance(bins, (list, tuple)):\n dc_bins = dc_bins or any([is_dask_collection(b) for b in bins])\n dc_range = (\n any([is_dask_collection(r) for r in range]) if range is not None else False\n )\n if dc_bins or dc_range:\n raise NotImplementedError(\n \"Passing dask collections to bins=... or range=... is not supported.\"\n )\n\n # generate token and name for task\n token = tokenize(sample, bins, range, weights, density)\n name = f\"histogramdd-sum-{token}\"\n\n # N == total number of samples\n # D == total number of dimensions\n try:\n # Recommended input ND-array\n N, D = sample.shape\n except (AttributeError, ValueError):\n # If we have a sequence of 1D arrays\n sample = atleast_2d(sample).T\n N, D = sample.shape\n # rechunk if necessary\n if sample.chunksize[1] != D:\n sample = sample.rechunk((sample.chunksize[0], D))\n\n # Require only Array or Delayed objects for bins, range, and weights.\n for argname, val in [(\"bins\", bins), (\"range\", range), (\"weights\", weights)]:\n if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):\n raise TypeError(\n \"Dask types besides Array and Delayed are not supported \"\n \"for `histogramdd`. For argument `{}`, got: {!r}\".format(argname, val)\n )\n\n # Require data to be chunked along the first axis only.\n if sample.shape[1:] != sample.chunksize[1:]:\n raise ValueError(\"Input array can only be chunked along the 0th axis\")\n\n # Require that the chunking of the sample and weights are compatible.\n if weights is not None and weights.chunks[0] != sample.chunks[0]:\n raise ValueError(\n \"Input array and weights must have the same shape \"\n \"and chunk structure along the first dimension.\"\n )\n\n # if bins is a list, tuple, then make sure the length is the same\n # as the number dimensions.\n if isinstance(bins, (list, tuple)):\n if len(bins) != D:\n raise ValueError(\n \"The dimension of bins must be equal to the dimension of the sample.\"\n )\n\n # if range is defined, check that it's the right length and also a\n # sequence of pairs.\n if range is not None:\n if len(range) != D:\n raise ValueError(\n \"range argument requires one entry, a min max pair, per dimension.\"\n )\n if not all(len(r) == 2 for r in range):\n raise ValueError(\"range argument should be a sequence of pairs\")\n\n # we will return the edges to mimic the NumPy API (we also use the\n # edges later as a way to calculate the total number of bins).\n if isinstance(bins, int):\n bins = (bins,) * D\n if all(isinstance(b, int) for b in bins) and all(len(r) == 2 for r in range):\n edges = [np.linspace(r[0], r[1], b + 1) for b, r in zip(bins, range)]\n else:\n edges = [np.asarray(b) for b in bins]\n\n # With dsk below, we will construct a (D + 1) dimensional array\n # stacked for each chunk. For example, if the histogram is going\n # to be 3 dimensions, this creates a stack of cubes (1 cube for\n # each sample chunk) that will be collapsed into a final cube (the\n # result).\n\n # This tuple of zeros represents the chunk index along the columns\n # (we only allow chunking along the rows).\n column_zeros = tuple(0 for _ in _range(D))\n\n if weights is None:\n dsk = {\n (name, i, *column_zeros): (_block_histogramdd, k, bins, range)\n for i, k in enumerate(flatten(sample.__dask_keys__()))\n }\n dtype = np.histogramdd([])[0].dtype\n else:\n a_keys = flatten(sample.__dask_keys__())\n w_keys = flatten(weights.__dask_keys__())\n dsk = {\n (name, i, *column_zeros): (_block_histogramdd, k, bins, range, w)\n for i, (k, w) in enumerate(zip(a_keys, w_keys))\n }\n dtype = weights.dtype\n\n deps = (sample,)\n if weights is not None:\n deps += (weights,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n\n nchunks = len(list(flatten(sample.__dask_keys__())))\n all_nbins = tuple((b.size - 1,) for b in edges)\n stacked_chunks = ((1,) * nchunks, *all_nbins)\n mapped = Array(graph, name, stacked_chunks, dtype=dtype)\n\n # Finally, sum over chunks providing to get the final D\n # dimensional result array.\n n = mapped.sum(axis=0)\n\n if density:\n # compute array of values to divide by the bin width along\n # each dimension.\n width_divider = np.ones(n.shape)\n for i in _range(D):\n shape = np.ones(D, int)\n shape[i] = width_divider.shape[i]\n width_divider *= np.diff(edges[i]).reshape(shape)\n width_divider = asarray(width_divider, chunks=n.chunks)\n return n / width_divider / n.sum(), edges\n\n return n, [asarray(entry) for entry in edges]\n\n\n@derived_from(np)\ndef cov(m, y=None, rowvar=1, bias=0, ddof=None):\n # This was copied almost verbatim from np.cov\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be integer\")\n\n # Handles complex arrays too\n m = asarray(m)\n if y is None:\n dtype = np.result_type(m, np.float64)\n else:\n y = asarray(y)\n dtype = np.result_type(m, y, np.float64)\n X = array(m, ndmin=2, dtype=dtype)\n\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n N = X.shape[1]\n axis = 0\n else:\n N = X.shape[0]\n axis = 1\n\n # check ddof\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n fact = float(N - ddof)\n if fact <= 0:\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning)\n fact = 0.0\n\n if y is not None:\n y = array(y, ndmin=2, dtype=dtype)\n X = concatenate((X, y), axis)\n\n X = X - X.mean(axis=1 - axis, keepdims=True)\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()\n\n\n@derived_from(np)\ndef corrcoef(x, y=None, rowvar=1):\n c = cov(x, y, rowvar)\n if c.shape == ():\n return c / c\n d = diag(c)\n d = d.reshape((d.shape[0], 1))\n sqr_d = sqrt(d)\n return (c / sqr_d) / sqr_d.T\n\n\n@implements(np.round, np.round_)\n@derived_from(np)\ndef round(a, decimals=0):\n return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)\n\n\n@implements(np.iscomplexobj)\n@derived_from(np)\ndef iscomplexobj(x):\n return issubclass(x.dtype.type, np.complexfloating)\n\n\ndef _unique_internal(ar, indices, counts, return_inverse=False):\n \"\"\"\n Helper/wrapper function for :func:`numpy.unique`.\n\n Uses :func:`numpy.unique` to find the unique values for the array chunk.\n Given this chunk may not represent the whole array, also take the\n ``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``\n and reduce them in the same fashion as ``ar`` is reduced. Namely sum\n any counts that correspond to the same value and take the smallest\n index that corresponds to the same value.\n\n To handle the inverse mapping from the unique values to the original\n array, simply return a NumPy array created with ``arange`` with enough\n values to correspond 1-to-1 to the unique values. While there is more\n work needed to be done to create the full inverse mapping for the\n original array, this provides enough information to generate the\n inverse mapping in Dask.\n\n Given Dask likes to have one array returned from functions like\n ``blockwise``, some formatting is done to stuff all of the resulting arrays\n into one big NumPy structured array. Dask is then able to handle this\n object and can split it apart into the separate results on the Dask side,\n which then can be passed back to this function in concatenated chunks for\n further reduction or can be return to the user to perform other forms of\n analysis.\n\n By handling the problem in this way, it does not matter where a chunk\n is in a larger array or how big it is. The chunk can still be computed\n on the same way. Also it does not matter if the chunk is the result of\n other chunks being run through this function multiple times. The end\n result will still be just as accurate using this strategy.\n \"\"\"\n\n return_index = indices is not None\n return_counts = counts is not None\n\n u = np.unique(ar)\n\n dt = [(\"values\", u.dtype)]\n if return_index:\n dt.append((\"indices\", np.intp))\n if return_inverse:\n dt.append((\"inverse\", np.intp))\n if return_counts:\n dt.append((\"counts\", np.intp))\n\n r = np.empty(u.shape, dtype=dt)\n r[\"values\"] = u\n if return_inverse:\n r[\"inverse\"] = np.arange(len(r), dtype=np.intp)\n if return_index or return_counts:\n for i, v in enumerate(r[\"values\"]):\n m = ar == v\n if return_index:\n indices[m].min(keepdims=True, out=r[\"indices\"][i : i + 1])\n if return_counts:\n counts[m].sum(keepdims=True, out=r[\"counts\"][i : i + 1])\n\n return r\n\n\n@derived_from(np)\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n ar = ar.ravel()\n\n # Run unique on each chunk and collect results in a Dask Array of\n # unknown size.\n\n args = [ar, \"i\"]\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"indices\", np.intp))\n else:\n args.extend([None, None])\n if return_counts:\n args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"counts\", np.intp))\n else:\n args.extend([None, None])\n\n out = blockwise(_unique_internal, \"i\", *args, dtype=out_dtype, return_inverse=False)\n out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)\n\n # Take the results from the unique chunks and do the following.\n #\n # 1. Collect all results as arguments.\n # 2. Concatenate each result into one big array.\n # 3. Pass all results as arguments to the internal unique again.\n #\n # TODO: This should be replaced with a tree reduction using this strategy.\n # xref: https://github.com/dask/dask/issues/2851\n\n out_parts = [out[\"values\"]]\n if return_index:\n out_parts.append(out[\"indices\"])\n else:\n out_parts.append(None)\n if return_counts:\n out_parts.append(out[\"counts\"])\n else:\n out_parts.append(None)\n\n name = \"unique-aggregate-\" + out.name\n dsk = {\n (name, 0): (\n (_unique_internal,)\n + tuple(\n (np.concatenate, o.__dask_keys__())\n if hasattr(o, \"__dask_keys__\")\n else o\n for o in out_parts\n )\n + (return_inverse,)\n )\n }\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n out_dtype.append((\"indices\", np.intp))\n if return_inverse:\n out_dtype.append((\"inverse\", np.intp))\n if return_counts:\n out_dtype.append((\"counts\", np.intp))\n\n dependencies = [o for o in out_parts if hasattr(o, \"__dask_keys__\")]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n chunks = ((np.nan,),)\n out = Array(graph, name, chunks, out_dtype)\n\n # Split out all results to return to the user.\n\n result = [out[\"values\"]]\n if return_index:\n result.append(out[\"indices\"])\n if return_inverse:\n # Using the returned unique values and arange of unknown length, find\n # each value matching a unique value and replace it with its\n # corresponding index or `0`. There should be only one entry for this\n # index in axis `1` (the one of unknown length). Reduce axis `1`\n # through summing to get an array with known dimensionality and the\n # mapping of the original values.\n mtches = (ar[:, None] == out[\"values\"][None, :]).astype(np.intp)\n result.append((mtches * out[\"inverse\"]).sum(axis=1))\n if return_counts:\n result.append(out[\"counts\"])\n\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n\n return result\n\n\ndef _isin_kernel(element, test_elements, assume_unique=False):\n values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)\n return values.reshape(element.shape + (1,) * test_elements.ndim)\n\n\n@safe_wraps(getattr(np, \"isin\", None))\ndef isin(element, test_elements, assume_unique=False, invert=False):\n element = asarray(element)\n test_elements = asarray(test_elements)\n element_axes = tuple(range(element.ndim))\n test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))\n mapped = blockwise(\n _isin_kernel,\n element_axes + test_axes,\n element,\n element_axes,\n test_elements,\n test_axes,\n adjust_chunks={axis: lambda _: 1 for axis in test_axes},\n dtype=bool,\n assume_unique=assume_unique,\n )\n\n result = mapped.any(axis=test_axes)\n if invert:\n result = ~result\n return result\n\n\n@derived_from(np)\ndef roll(array, shift, axis=None):\n result = array\n\n if axis is None:\n result = ravel(result)\n\n if not isinstance(shift, Integral):\n raise TypeError(\n \"Expect `shift` to be an instance of Integral when `axis` is None.\"\n )\n\n shift = (shift,)\n axis = (0,)\n else:\n try:\n len(shift)\n except TypeError:\n shift = (shift,)\n try:\n len(axis)\n except TypeError:\n axis = (axis,)\n\n if len(shift) != len(axis):\n raise ValueError(\"Must have the same number of shifts as axes.\")\n\n for i, s in zip(axis, shift):\n s = -s\n s %= result.shape[i]\n\n sl1 = result.ndim * [slice(None)]\n sl2 = result.ndim * [slice(None)]\n\n sl1[i] = slice(s, None)\n sl2[i] = slice(None, s)\n\n sl1 = tuple(sl1)\n sl2 = tuple(sl2)\n\n result = concatenate([result[sl1], result[sl2]], axis=i)\n\n result = result.reshape(array.shape)\n\n return result\n\n\n@derived_from(np)\ndef shape(array):\n return array.shape\n\n\n@derived_from(np)\ndef union1d(ar1, ar2):\n return unique(concatenate((ar1.ravel(), ar2.ravel())))\n\n\n@derived_from(np)\ndef ravel(array_like):\n return asanyarray(array_like).reshape((-1,))\n\n\n@derived_from(np)\ndef squeeze(a, axis=None):\n if axis is None:\n axis = tuple(i for i, d in enumerate(a.shape) if d == 1)\n elif not isinstance(axis, tuple):\n axis = (axis,)\n\n if any(a.shape[i] != 1 for i in axis):\n raise ValueError(\"cannot squeeze axis with size other than one\")\n\n axis = validate_axis(axis, a.ndim)\n\n sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))\n\n a = a[sl]\n\n return a\n\n\n@derived_from(np)\ndef compress(condition, a, axis=None):\n\n if not is_arraylike(condition):\n # Allow `condition` to be anything array-like, otherwise ensure `condition`\n # is a numpy array.\n condition = np.asarray(condition)\n condition = condition.astype(bool)\n a = asarray(a)\n\n if condition.ndim != 1:\n raise ValueError(\"Condition must be one dimensional\")\n\n if axis is None:\n a = a.ravel()\n axis = 0\n axis = validate_axis(axis, a.ndim)\n\n # Treat `condition` as filled with `False` (if it is too short)\n a = a[\n tuple(\n slice(None, len(condition)) if i == axis else slice(None)\n for i in range(a.ndim)\n )\n ]\n\n # Use `condition` to select along 1 dimension\n a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]\n\n return a\n\n\n@derived_from(np)\ndef extract(condition, arr):\n condition = asarray(condition).astype(bool)\n arr = asarray(arr)\n return compress(condition.ravel(), arr.ravel())\n\n\n@derived_from(np)\ndef take(a, indices, axis=0):\n axis = validate_axis(axis, a.ndim)\n\n if isinstance(a, np.ndarray) and isinstance(indices, Array):\n return _take_dask_array_from_numpy(a, indices, axis)\n else:\n return a[(slice(None),) * axis + (indices,)]\n\n\ndef _take_dask_array_from_numpy(a, indices, axis):\n assert isinstance(a, np.ndarray)\n assert isinstance(indices, Array)\n\n return indices.map_blocks(\n lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype\n )\n\n\n@derived_from(np)\ndef around(x, decimals=0):\n return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)\n\n\ndef _asarray_isnull(values):\n import pandas as pd\n\n return np.asarray(pd.isnull(values))\n\n\ndef isnull(values):\n \"\"\" pandas.isnull for dask arrays \"\"\"\n # eagerly raise ImportError, if pandas isn't available\n import pandas as pd # noqa\n\n return elemwise(_asarray_isnull, values, dtype=\"bool\")\n\n\ndef notnull(values):\n \"\"\" pandas.notnull for dask arrays \"\"\"\n return ~isnull(values)\n\n\n@derived_from(np)\ndef isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)\n return elemwise(func, arr1, arr2, dtype=\"bool\")\n\n\n@derived_from(np)\ndef allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n\n\ndef variadic_choose(a, *choices):\n return np.choose(a, choices)\n\n\n@derived_from(np)\ndef choose(a, choices):\n return elemwise(variadic_choose, a, *choices)\n\n\ndef _isnonzero_vec(v):\n return bool(np.count_nonzero(v))\n\n\n_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])\n\n\ndef isnonzero(a):\n if a.dtype.kind in {\"U\", \"S\"}:\n # NumPy treats all-whitespace strings as falsy (like in `np.nonzero`).\n # but not in `.astype(bool)`. To match the behavior of numpy at least until\n # 1.19, we use `_isnonzero_vec`. When NumPy changes behavior, we should just\n # use the try block below.\n # https://github.com/numpy/numpy/issues/9875\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n try:\n np.zeros(tuple(), dtype=a.dtype).astype(bool)\n except ValueError:\n ######################################################\n # Handle special cases where conversion to bool does #\n # not work correctly. #\n # #\n # xref: https://github.com/numpy/numpy/issues/9479 #\n ######################################################\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n else:\n return a.astype(bool)\n\n\n@derived_from(np)\ndef argwhere(a):\n a = asarray(a)\n\n nz = isnonzero(a).flatten()\n\n ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)\n if ind.ndim > 1:\n ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)\n ind = compress(nz, ind, axis=0)\n\n return ind\n\n\n@derived_from(np)\ndef where(condition, x=None, y=None):\n if (x is None) != (y is None):\n raise ValueError(\"either both or neither of x and y should be given\")\n if (x is None) and (y is None):\n return nonzero(condition)\n\n if np.isscalar(condition):\n dtype = result_type(x, y)\n x = asarray(x)\n y = asarray(y)\n\n shape = broadcast_shapes(x.shape, y.shape)\n out = x if condition else y\n\n return broadcast_to(out, shape).astype(dtype)\n else:\n return elemwise(np.where, condition, x, y)\n\n\n@derived_from(np)\ndef count_nonzero(a, axis=None):\n return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)\n\n\n@derived_from(np)\ndef flatnonzero(a):\n return argwhere(asarray(a).ravel())[:, 0]\n\n\n@derived_from(np)\ndef nonzero(a):\n ind = argwhere(a)\n if ind.ndim > 1:\n return tuple(ind[:, i] for i in range(ind.shape[1]))\n else:\n return (ind,)\n\n\ndef _unravel_index_kernel(indices, func_kwargs):\n return np.stack(np.unravel_index(indices, **func_kwargs))\n\n\n@derived_from(np)\ndef unravel_index(indices, shape, order=\"C\"):\n if shape and indices.size:\n unraveled_indices = tuple(\n indices.map_blocks(\n _unravel_index_kernel,\n dtype=np.intp,\n chunks=(((len(shape),),) + indices.chunks),\n new_axis=0,\n func_kwargs={\"shape\": shape, \"order\": order},\n )\n )\n else:\n unraveled_indices = tuple(empty((0,), dtype=np.intp, chunks=1) for i in shape)\n\n return unraveled_indices\n\n\ndef _ravel_multi_index_kernel(multi_index, func_kwargs):\n return np.ravel_multi_index(multi_index, **func_kwargs)\n\n\n@wraps(np.ravel_multi_index)\ndef ravel_multi_index(multi_index, dims, mode=\"raise\", order=\"C\"):\n return multi_index.map_blocks(\n _ravel_multi_index_kernel,\n dtype=np.intp,\n chunks=(multi_index.shape[-1],),\n drop_axis=0,\n func_kwargs=dict(dims=dims, mode=mode, order=order),\n )\n\n\ndef _int_piecewise(x, *condlist, **kwargs):\n return np.piecewise(\n x, list(condlist), kwargs[\"funclist\"], *kwargs[\"func_args\"], **kwargs[\"func_kw\"]\n )\n\n\n@derived_from(np)\ndef piecewise(x, condlist, funclist, *args, **kw):\n return map_blocks(\n _int_piecewise,\n x,\n *condlist,\n dtype=x.dtype,\n name=\"piecewise\",\n funclist=funclist,\n func_args=args,\n func_kw=kw,\n )\n\n\ndef _partition(total: int, divisor: int) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:\n \"\"\"\n Given a total and a divisor, return two tuples: A tuple containing `divisor` repeated\n the number of times it divides `total`, and length-1 or empty tuple containing the remainder when\n `total` is divided by `divisor`. If `divisor` factors `total`, i.e. if the remainder is 0, then\n `remainder` is empty.\n \"\"\"\n multiples = (divisor,) * (total // divisor)\n remainder = ()\n if (total % divisor) > 0:\n remainder = (total % divisor,)\n return (multiples, remainder)\n\n\ndef aligned_coarsen_chunks(chunks: List[int], multiple: int) -> Tuple[int]:\n \"\"\"\n Returns a new chunking aligned with the coarsening multiple.\n Any excess is at the end of the array.\n\n Examples\n --------\n >>> aligned_coarsen_chunks(chunks=(1, 2, 3), multiple=4)\n (4, 2)\n >>> aligned_coarsen_chunks(chunks=(1, 20, 3, 4), multiple=4)\n (4, 20, 4)\n >>> aligned_coarsen_chunks(chunks=(20, 10, 15, 23, 24), multiple=10)\n (20, 10, 20, 20, 20, 2)\n \"\"\"\n overflow = np.array(chunks) % multiple\n excess = overflow.sum()\n new_chunks = np.array(chunks) - overflow\n # valid chunks are those that are already factorizable by `multiple`\n chunk_validity = new_chunks == chunks\n valid_inds, invalid_inds = np.where(chunk_validity)[0], np.where(~chunk_validity)[0]\n # sort the invalid chunks by size (ascending), then concatenate the results of\n # sorting the valid chunks by size (ascending)\n chunk_modification_order = [\n *invalid_inds[np.argsort(new_chunks[invalid_inds])],\n *valid_inds[np.argsort(new_chunks[valid_inds])],\n ]\n partitioned_excess, remainder = _partition(excess, multiple)\n # add elements the partitioned excess to the smallest invalid chunks,\n # then smallest valid chunks if needed.\n for idx, extra in enumerate(partitioned_excess):\n new_chunks[chunk_modification_order[idx]] += extra\n # create excess chunk with remainder, if any remainder exists\n new_chunks = np.array([*new_chunks, *remainder])\n # remove 0-sized chunks\n new_chunks = new_chunks[new_chunks > 0]\n return tuple(new_chunks)\n\n\n@wraps(chunk.coarsen)\ndef coarsen(reduction, x, axes, trim_excess=False, **kwargs):\n if not trim_excess and not all(x.shape[i] % div == 0 for i, div in axes.items()):\n msg = f\"Coarsening factors {axes} do not align with array shape {x.shape}.\"\n raise ValueError(msg)\n\n if \"dask\" in inspect.getfile(reduction):\n reduction = getattr(np, reduction.__name__)\n\n new_chunks = {}\n for i, div in axes.items():\n aligned = aligned_coarsen_chunks(x.chunks[i], div)\n if aligned != x.chunks[i]:\n new_chunks[i] = aligned\n if new_chunks:\n x = x.rechunk(new_chunks)\n\n name = \"coarsen-\" + tokenize(reduction, x, axes, trim_excess)\n dsk = {\n (name,)\n + key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)\n for key in flatten(x.__dask_keys__())\n }\n chunks = tuple(\n tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)\n )\n\n meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, meta=meta)\n\n\ndef split_at_breaks(array, breaks, axis=0):\n \"\"\"Split an array into a list of arrays (using slices) at the given breaks\n\n >>> split_at_breaks(np.arange(6), [3, 5])\n [array([0, 1, 2]), array([3, 4]), array([5])]\n \"\"\"\n padded_breaks = concat([[None], breaks, [None]])\n slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]\n preslice = (slice(None),) * axis\n split_array = [array[preslice + (s,)] for s in slices]\n return split_array\n\n\n@derived_from(np)\ndef insert(arr, obj, values, axis):\n # axis is a required argument here to avoid needing to deal with the numpy\n # default case (which reshapes the array to make it flat)\n axis = validate_axis(axis, arr.ndim)\n\n if isinstance(obj, slice):\n obj = np.arange(*obj.indices(arr.shape[axis]))\n obj = np.asarray(obj)\n scalar_obj = obj.ndim == 0\n if scalar_obj:\n obj = np.atleast_1d(obj)\n\n obj = np.where(obj < 0, obj + arr.shape[axis], obj)\n if (np.diff(obj) < 0).any():\n raise NotImplementedError(\n \"da.insert only implemented for monotonic ``obj`` argument\"\n )\n\n split_arr = split_at_breaks(arr, np.unique(obj), axis)\n\n if getattr(values, \"ndim\", 0) == 0:\n # we need to turn values into a dask array\n name = \"values-\" + tokenize(values)\n dtype = getattr(values, \"dtype\", type(values))\n values = Array({(name,): values}, name, chunks=(), dtype=dtype)\n\n values_shape = tuple(\n len(obj) if axis == n else s for n, s in enumerate(arr.shape)\n )\n values = broadcast_to(values, values_shape)\n elif scalar_obj:\n values = values[(slice(None),) * axis + (None,)]\n\n values_chunks = tuple(\n values_bd if axis == n else arr_bd\n for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))\n )\n values = values.rechunk(values_chunks)\n\n counts = np.bincount(obj)[:-1]\n values_breaks = np.cumsum(counts[counts > 0])\n split_values = split_at_breaks(values, values_breaks, axis)\n\n interleaved = list(interleave([split_arr, split_values]))\n interleaved = [i for i in interleaved if i.nbytes]\n return concatenate(interleaved, axis=axis)\n\n\n@derived_from(np)\ndef delete(arr, obj, axis):\n \"\"\"\n NOTE: If ``obj`` is a dask array it is implicitly computed when this function\n is called.\n \"\"\"\n # axis is a required argument here to avoid needing to deal with the numpy\n # default case (which reshapes the array to make it flat)\n axis = validate_axis(axis, arr.ndim)\n\n if isinstance(obj, slice):\n tmp = np.arange(*obj.indices(arr.shape[axis]))\n obj = tmp[::-1] if obj.step and obj.step < 0 else tmp\n else:\n obj = np.asarray(obj)\n obj = np.where(obj < 0, obj + arr.shape[axis], obj)\n obj = np.unique(obj)\n\n target_arr = split_at_breaks(arr, obj, axis)\n\n target_arr = [\n arr[\n tuple(slice(1, None) if axis == n else slice(None) for n in range(arr.ndim))\n ]\n if i != 0\n else arr\n for i, arr in enumerate(target_arr)\n ]\n return concatenate(target_arr, axis=axis)\n\n\n@derived_from(np)\ndef append(arr, values, axis=None):\n # based on numpy.append\n arr = asanyarray(arr)\n if axis is None:\n if arr.ndim != 1:\n arr = arr.ravel()\n values = ravel(asanyarray(values))\n axis = arr.ndim - 1\n return concatenate((arr, values), axis=axis)\n\n\ndef _average(a, axis=None, weights=None, returned=False, is_masked=False):\n # This was minimally modified from numpy.average\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n # Wrapper used by da.average or da.ma.average.\n a = asanyarray(a)\n\n if weights is None:\n avg = a.mean(axis)\n scl = avg.dtype.type(a.size / avg.size)\n else:\n wgt = asanyarray(weights)\n\n if issubclass(a.dtype.type, (np.integer, np.bool_)):\n result_dtype = result_type(a.dtype, wgt.dtype, \"f8\")\n else:\n result_dtype = result_type(a.dtype, wgt.dtype)\n\n # Sanity checks\n if a.shape != wgt.shape:\n if axis is None:\n raise TypeError(\n \"Axis must be specified when shapes of a and weights differ.\"\n )\n if wgt.ndim != 1:\n raise TypeError(\n \"1D weights expected when shapes of a and weights differ.\"\n )\n if wgt.shape[0] != a.shape[axis]:\n raise ValueError(\n \"Length of weights not compatible with specified axis.\"\n )\n\n # setup wgt to broadcast along axis\n wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)\n wgt = wgt.swapaxes(-1, axis)\n if is_masked:\n from .ma import getmaskarray\n\n wgt = wgt * (~getmaskarray(a))\n scl = wgt.sum(axis=axis, dtype=result_dtype)\n avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl\n\n if returned:\n if scl.shape != avg.shape:\n scl = broadcast_to(scl, avg.shape).copy()\n return avg, scl\n else:\n return avg\n\n\n@derived_from(np)\ndef average(a, axis=None, weights=None, returned=False):\n return _average(a, axis, weights, returned, is_masked=False)\n\n\n@derived_from(np)\ndef tril(m, k=0):\n m = asarray_safe(m, like=m)\n mask = tri(\n *m.shape[-2:], k=k, dtype=bool, chunks=m.chunks[-2:], like=meta_from_array(m)\n )\n\n return where(mask, m, zeros_like_safe(m, shape=(1,)))\n\n\n@derived_from(np)\ndef triu(m, k=0):\n m = asarray_safe(m, like=m)\n mask = tri(\n *m.shape[-2:],\n k=k - 1,\n dtype=bool,\n chunks=m.chunks[-2:],\n like=meta_from_array(m),\n )\n\n return where(mask, zeros_like_safe(m, shape=(1,)), m)\n\n\n@derived_from(np)\ndef tril_indices(n, k=0, m=None, chunks=\"auto\"):\n return nonzero(tri(n, m, k=k, dtype=bool, chunks=chunks))\n\n\n@derived_from(np)\ndef tril_indices_from(arr, k=0):\n if arr.ndim != 2:\n raise ValueError(\"input array must be 2-d\")\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)\n\n\n@derived_from(np)\ndef triu_indices(n, k=0, m=None, chunks=\"auto\"):\n return nonzero(~tri(n, m, k=k - 1, dtype=bool, chunks=chunks))\n\n\n@derived_from(np)\ndef triu_indices_from(arr, k=0):\n if arr.ndim != 2:\n raise ValueError(\"input array must be 2-d\")\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)\n" ]
[ [ "numpy.promote_types", "numpy.min", "numpy.histogramdd", "numpy.where", "numpy.apply_along_axis", "numpy.cumsum", "numpy.gradient", "numpy.bincount", "numpy.histogram", "numpy.count_nonzero", "numpy.empty", "numpy.vectorize", "numpy.take", "numpy.unravel_index", "numpy.ndim", "numpy.array", "numpy.diff", "numpy.isscalar", "numpy.argsort", "numpy.absolute", "numpy.ravel_multi_index", "pandas.isnull", "numpy.result_type", "numpy.asarray", "numpy.ones", "numpy.choose", "numpy.atleast_1d", "numpy.linspace", "numpy.unique" ] ]
flyaway1217/FeVER
[ "cd4a82cb2f2405671e53cecafcf59cda30eff8eb" ]
[ "FeVER/test/memory_test.py" ]
[ "# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n#\n# Author: Yichu Zhou - [email protected]\n# Blog: zhouyichu.com\n#\n# Python release: 3.6.0\n#\n# Date: 2018-10-02 15:03:12\n# Last modified: 2018-10-03 10:28:08\n\n\"\"\"\nMemory test.\n\"\"\"\n\nimport numpy as np\nimport torch\n\n# M = 500000\n# \n# x = [1, 2, 3, 4, 5] * 100\n# data = []\n# \n# for i in range(M):\n# t = np.int32(x[:])\n# data.append(torch.from_numpy(t).long())\n# \n# \n# print('I am done !')\n# while True:\n# pass\n\nx = [1, 2, 34]\nx = np.int32(x)\ny = torch.from_numpy(x).long()\nprint(y.type())\n \n" ]
[ [ "numpy.int32", "torch.from_numpy" ] ]
andreas-h/ledger-quotes
[ "6b300a7535f0bc3c9421bb29015ae6906a5aecad" ]
[ "aggregate_estateguru.py" ]
[ "#!/usr/bin/env python3\n\nfrom sys import argv\nimport pandas as pd\n\nSTRINGS_DE = {\n 'payment_date': 'Zahlungsdatum',\n 'cash_flow_status': 'Cashflow-Status',\n 'cash_flow_type': 'Cashflow-Typ',\n 'approved': 'Genehmigt',\n 'amount': 'Betrag',\n 'investment_auto': 'Investition(Auto Invest)',\n 'referral': 'Empfehlungsbonus',\n 'interest': 'Zins',\n}\n\nSTRINGS_EN = {\n 'payment_date': 'Payment Date',\n 'confirmation_date': 'Confirmation Date',\n 'cash_flow_status': 'Cash Flow Status',\n 'cash_flow_type': 'Cash Flow Type',\n 'approved': 'Approved',\n 'amount': 'Amount',\n 'investment_auto': 'Investment(Auto Invest)',\n 'referral': 'Referral',\n 'interest': 'Interest',\n 'principal': 'Principal',\n}\n\ndef aggregate(filename, string_translation):\n df = pd.read_csv(filename)\n df.set_index(pd.DatetimeIndex(df[string_translation['confirmation_date']], dayfirst=True), inplace=True)\n df = df[~df.index.isnull()]\n df = df[df[string_translation['cash_flow_status']] == string_translation['approved']]\n df1 = df.groupby([pd.Grouper(freq='M'), string_translation['cash_flow_type']])[string_translation['amount']].sum()\n return df1\n\n\ndef format(df, string_translation):\n print(df.index.levels[0][0].strftime('%Y-%m-%d Monatsabrechnung Estateguru'))\n try:\n print(' Einnahmen:Zinsen:p2p:Estateguru € {:>8.2f} ; Interest'.format(- df.loc[(slice(None), string_translation['interest'])].iloc[0]))\n except (pd.core.indexing.IndexingError, KeyError):\n pass\n try:\n print(' Einnahmen:Finanzen:Boni:p2p:EstateGuru € {:>8.2f} ; Referral'.format(- df.loc[(slice(None), string_translation['referral'])].iloc[0]))\n except (pd.core.indexing.IndexingError, KeyError):\n pass\n try:\n print(' Aktiva:Darlehen:p2p:Estateguru € {:>8.2f} ; Investment(Auto Invest)'.format(- df.loc[(slice(None), string_translation['investment_auto'])].iloc[0]))\n except (pd.core.indexing.IndexingError, KeyError):\n pass\n try:\n print(' Aktiva:Darlehen:p2p:Estateguru € {:>8.2f} ; Principal'.format(- df.loc[(slice(None), string_translation['principal'])].iloc[0]))\n except (pd.core.indexing.IndexingError, KeyError):\n pass\n print(' Aktiva:Sparkonten:p2p:Estateguru')\n print()\n\n\nif __name__ == '__main__':\n if len(argv) != 2:\n raise ValueError\n for lang in [STRINGS_DE, STRINGS_EN]:\n try:\n df = aggregate(argv[1], lang)\n format(df, lang)\n break\n except (KeyError, AttributeError):\n pass\n" ]
[ [ "pandas.Grouper", "pandas.read_csv", "pandas.DatetimeIndex" ] ]
zhhongsh/StablePose
[ "bf0b8bd98b9f514776474f1b368608e13ec4a84d" ]
[ "train_lmo.py" ]
[ "import torch.utils as utils\nimport argparse\nimport os\nimport random\nimport time\nimport numpy as np\nimport torch\nimport sys\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nfrom datasets.tless.dataset_triplet import PoseDataset as PoseDataset_lmo\nfrom datasets.linemod.dataset_lmo import PoseDataset as PoseDataset_linemod\nfrom lib.network_lmo import PatchNet, PoseRefineNet\nfrom lib.loss_tless import Loss\nfrom lib.loss_refiner import Loss_refine\nfrom lib.utils import setup_logger\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='lmo')\nparser.add_argument('--dataset_root', type=str, default='/home/dell/yifeis/pose/bop_datasets/linemod/lmo/',\n help='dataset root dir')\nparser.add_argument('--batch_size', type=int, default=8, help='batch size')\nparser.add_argument('--workers', type=int, default=64, help='number of data loading workers')\nparser.add_argument('--lr', default=0.0001, help='learning rate')\nparser.add_argument('--lr_rate', default=0.3, help='learning rate decay rate')\nparser.add_argument('--w', default=0.015, help='learning rate')\nparser.add_argument('--w_rate', default=0.3, help='learning rate decay rate')\nparser.add_argument('--decay_margin', default=0.01, help='margin to decay lr & w')\nparser.add_argument('--refine_margin', default=0.001, help='margin to start the training of iterative refinement')\nparser.add_argument('--noise_trans', default=0.03,\n help='range of the random noise of translation added to the training data')\nparser.add_argument('--iteration', type=int, default=2, help='number of refinement iterations')\nparser.add_argument('--nepoch', type=int, default=500, help='max numbesr of epochs to train')\nparser.add_argument('--resume_posenet', type=str, default='', help='resume PoseNet model')#pose_model_2_193909.25539978288.pth\nparser.add_argument('--resume_refinenet', type=str, default='', help='resume PoseRefineNet model')\nparser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')\nopt = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\nproj_dir = os.getcwd()+'/'\ntorch.set_num_threads(64)\n\ndef main():\n opt.manualSeed = random.randint(1, 10000)\n random.seed(opt.manualSeed)\n torch.manual_seed(opt.manualSeed)\n\n if opt.dataset == 'lmo':\n opt.num_objects = 8\n opt.num_points = 2000\n opt.outf = proj_dir +'trained_models/lmo/'\n opt.log_dir = proj_dir +'experiments/logs/lmo/'\n opt.repeat_epoch = 2\n else:\n print('Unknown dataset')\n return\n # torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:23456', rank=0, world_size=1)\n estimator = PatchNet(num_obj=opt.num_objects)\n # estimator = torch.nn.DataParallel(estimator)\n estimator = estimator.cuda()\n # estimator = torch.nn.parallel.DistributedDataParallel(estimator,find_unused_parameters=True)\n\n total_params = sum(p.numel() for p in estimator.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in estimator.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} training parameters.')\n # print(estimator)\n refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects)\n refiner.cuda()\n # utils.print_network(estimator)\n if opt.resume_posenet != '':\n estimator.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet)))\n\n if opt.resume_refinenet != '':\n refiner.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet)))\n opt.refine_start = False # True\n opt.decay_start = True\n opt.lr *= opt.lr_rate\n opt.w *= opt.w_rate\n opt.batch_size = int(opt.batch_size / opt.iteration)\n optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)\n else:\n opt.refine_start = False\n opt.decay_start = False\n optimizer = optim.Adam(estimator.parameters(), lr=opt.lr, weight_decay=0.01)\n\n\n dataset = PoseDataset_linemod('train', opt.num_points, False, opt.dataset_root, opt.noise_trans,\n opt.refine_start)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers,\n pin_memory=True)\n\n test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start)\n testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers,\n pin_memory=True)\n\n opt.sym_list = dataset.get_sym_list()\n nosym_list = dataset.get_nosym_list()\n rot_list = dataset.get_rot_list()\n ref_list = dataset.get_ref_list()\n opt.num_points_mesh = dataset.get_num_points_mesh()\n\n print(\n '>>>>>>>>----------Dataset loaded!---------<<<<<<<<\\nlength of the training set: {0}\\nlength of the testing set: {1}\\nnumber of sample points on mesh: {2}\\nsymmetry object list: {3}'.format(\n len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))\n\n criterion = Loss(opt.num_points_mesh, opt.sym_list,rot_list,ref_list,nosym_list)\n criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)\n\n best_test = np.Inf\n st_time = time.time()\n\n for epoch in range(opt.start_epoch, opt.nepoch):\n logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))\n logger.info('Train time {0}'.format(\n time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)) + ', ' + 'Training started'))\n train_count = 0\n train_dis_avg = 0.0\n train_patch_avg = 0.0\n train_norm_avg = 0.0\n\n if opt.refine_start:\n estimator.eval()\n refiner.train()\n else:\n estimator.train()\n optimizer.zero_grad()\n\n for rep in range(opt.repeat_epoch):\n for i, data in enumerate(testdataloader, 0):\n points, choose, img, target_rt,target_trans, idx, \\\n choose_patchs,target_pt,model_points,normals,model_info,model_axis,_,_ = data\n\n points, choose, img, target_rt, target_trans,idx,\\\n target_pt, model_points,normals,model_axis = Variable(points).cuda(), \\\n Variable(choose).cuda(), \\\n Variable(img).cuda(), \\\n Variable(target_rt).cuda(), \\\n Variable(target_trans).cuda(),\\\n Variable(idx).cuda(), \\\n Variable(target_pt).cuda(),\\\n Variable(model_points).cuda(),\\\n Variable(normals).cuda(),\\\n Variable(model_axis).cuda()\n\n normal_ls = []\n for patch_id in range(len(choose_patchs)):\n normal_ls.append(normals[0][choose_patchs[patch_id][0]])\n\n pred_r, pred_t, pred_choose = estimator(img, points, choose, choose_patchs, idx)\n\n loss, dis, norm_loss, patch_loss, r_pred, t_pred, _ = criterion(pred_r, pred_t, pred_choose, target_rt,\n target_trans, idx, points,opt.w,\n target_pt,model_points,\n model_info)\n\n if opt.refine_start:\n dis.backward()\n else:\n loss.backward()\n\n torch.cuda.empty_cache()\n\n train_dis_avg += dis.item()\n train_patch_avg += patch_loss.item()\n train_norm_avg += norm_loss.item()\n train_count += 1\n\n if train_count % opt.batch_size == 0:\n logger.info(\n 'Train time {0} Epoch {1} Batch {2} Frame {3} idx:{7} Avg_dis:{4} Avg_norm:{5} Avg_patch:{6}'.format(\n time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)), epoch,\n int(train_count / opt.batch_size), train_count, train_dis_avg / opt.batch_size,\n train_norm_avg / opt.batch_size,\n train_patch_avg / opt.batch_size,\n idx))\n optimizer.step()\n optimizer.zero_grad()\n train_dis_avg = 0\n train_norm_avg = 0\n train_patch_avg = 0\n\n if train_count != 0 and train_count % 1000 == 0:\n if opt.refine_start:\n torch.save(refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format(opt.outf))\n else:\n torch.save(estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf))\n\n print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format(epoch))\n\n logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))\n logger.info('Test time {0}'.format(\n time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))\n test_dis = 0.0\n test_patch = 0.0\n test_norm = 0.0\n test_count = 0\n estimator.eval()\n refiner.eval()\n\n for j, data in enumerate(testdataloader, 0):\n points, choose, img, target_rt, target_trans, idx, \\\n choose_patchs, target_pt, model_points, normals, model_info, model_axis, _, _ = data\n\n points, choose, img, target_rt, target_trans, idx, \\\n target_pt, model_points, normals, model_axis = Variable(points).cuda(), \\\n Variable(choose).cuda(), \\\n Variable(img).cuda(), \\\n Variable(target_rt).cuda(), \\\n Variable(target_trans).cuda(), \\\n Variable(idx).cuda(), \\\n Variable(target_pt).cuda(), \\\n Variable(model_points).cuda(), \\\n Variable(normals).cuda(), \\\n Variable(model_axis).cuda()\n\n normal_ls = []\n for patch_id in range(len(choose_patchs)):\n normal_ls.append(normals[0][choose_patchs[patch_id][0]])\n\n pred_r, pred_t, pred_choose = estimator(img, points, choose, choose_patchs, idx)\n\n loss, dis, norm_loss, patch_loss, r_pred, t_pred, _ = criterion(pred_r, pred_t, pred_choose, target_rt,\n target_trans, idx, points, opt.w,\n target_pt, model_points,\n model_info)\n\n test_dis += dis.item()\n test_norm += norm_loss.item()\n test_patch += patch_loss.item()\n logger.info('Test time {0} Test Frame No.{1} idx:{5} dis:{2} norm_loss:{3} patch_loss:{4}'.format(\n time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)), test_count, dis, norm_loss,\n patch_loss,idx))\n\n test_count += 1\n\n test_dis = test_dis / test_count\n test_norm = test_norm / test_count\n test_patch = test_patch / test_count\n logger.info('Test time {0} Epoch {1} TEST FINISH Avg dis: {2} avg norm: {3} avg tless: {4}'.format(\n time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)), epoch, test_dis, test_norm, test_patch))\n if test_dis <= best_test:\n best_test = test_dis\n if opt.refine_start:\n torch.save(refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))\n else:\n torch.save(estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))\n print(epoch, '>>>>>>>>----------BEST TEST MODEL SAVED---------<<<<<<<<')\n\n if best_test < opt.decay_margin and not opt.decay_start:\n opt.decay_start = True\n opt.lr *= opt.lr_rate\n opt.w *= opt.w_rate\n optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)\n\n if best_test < opt.refine_margin and not opt.refine_start:\n opt.refine_start = True\n opt.batch_size = int(opt.batch_size / opt.iteration)\n optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)\n\n dataset = PoseDataset_linemod('train', opt.num_points, False, opt.dataset_root, opt.noise_trans,\n opt.refine_start)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers,\n pin_memory=True)\n\n test_dataset = PoseDataset_linemod('test', opt.num_points, False, opt.dataset_root, 0.0, opt.refine_start)\n testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False,\n num_workers=opt.workers,\n pin_memory=True)\n\n opt.sym_list = dataset.get_sym_list()\n opt.num_points_mesh = dataset.get_num_points_mesh()\n\n print(\n '>>>>>>>>----------Dataset loaded!---------<<<<<<<<\\nlength of the training set: {0}\\nlength of the testing set: {1}\\nnumber of sample points on mesh: {2}\\nsymmetry object list: {3}'.format(\n len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))\n\n criterion = Loss(opt.num_points_mesh, opt.sym_list)\n criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)\n\ndef displayPoint(data,target,view,title):\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.rcParams['axes.unicode_minus'] = False\n\n while len(data[0]) > 20000:\n print(\"too much point\")\n exit()\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.set_title(title)\n ax.scatter3D(data[:,0], data[:,1], data[:,2], c='r', marker='.')\n ax.scatter3D(target[:, 0], target[:, 1], target[:, 2], c='b', marker='.')\n ax.scatter3D(view[:, 0], view[:, 1], view[:, 2], c='g', marker='.')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()\n plt.close()\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.autograd.Variable", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "torch.manual_seed", "torch.cuda.empty_cache", "torch.utils.data.DataLoader", "matplotlib.pyplot.show", "torch.set_num_threads" ] ]
franksam007/incubator-superset
[ "a0f572eb3ea4b89cb435a8af20436f8e1d34814e" ]
[ "superset/data/unicode_test_data.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport datetime\nimport json\nimport random\n\nimport pandas as pd\nfrom sqlalchemy import Date, Float, String\n\nfrom superset import db\nfrom superset.utils import core as utils\nfrom .helpers import (\n config,\n Dash,\n get_example_data,\n get_slice_json,\n merge_slice,\n Slice,\n TBL,\n update_slice_ids,\n)\n\n\ndef load_unicode_test_data():\n \"\"\"Loading unicode test dataset from a csv file in the repo\"\"\"\n data = get_example_data(\n 'unicode_utf8_unixnl_test.csv', is_gzip=False, make_bytes=True)\n df = pd.read_csv(data, encoding='utf-8')\n # generate date/numeric data\n df['dttm'] = datetime.datetime.now().date()\n df['value'] = [random.randint(1, 100) for _ in range(len(df))]\n df.to_sql( # pylint: disable=no-member\n 'unicode_test',\n db.engine,\n if_exists='replace',\n chunksize=500,\n dtype={\n 'phrase': String(500),\n 'short_phrase': String(10),\n 'with_missing': String(100),\n 'dttm': Date(),\n 'value': Float(),\n },\n index=False)\n print('Done loading table!')\n print('-' * 80)\n\n print('Creating table [unicode_test] reference')\n obj = db.session.query(TBL).filter_by(table_name='unicode_test').first()\n if not obj:\n obj = TBL(table_name='unicode_test')\n obj.main_dttm_col = 'dttm'\n obj.database = utils.get_or_create_main_db()\n db.session.merge(obj)\n db.session.commit()\n obj.fetch_metadata()\n tbl = obj\n\n slice_data = {\n 'granularity_sqla': 'dttm',\n 'groupby': [],\n 'metric': 'sum__value',\n 'row_limit': config.get('ROW_LIMIT'),\n 'since': '100 years ago',\n 'until': 'now',\n 'where': '',\n 'viz_type': 'word_cloud',\n 'size_from': '10',\n 'series': 'short_phrase',\n 'size_to': '70',\n 'rotation': 'square',\n 'limit': '100',\n }\n\n print('Creating a slice')\n slc = Slice(\n slice_name='Unicode Cloud',\n viz_type='word_cloud',\n datasource_type='table',\n datasource_id=tbl.id,\n params=get_slice_json(slice_data),\n )\n merge_slice(slc)\n\n print('Creating a dashboard')\n dash = (\n db.session.query(Dash)\n .filter_by(dashboard_title='Unicode Test')\n .first()\n )\n\n if not dash:\n dash = Dash()\n js = \"\"\"\\\n{\n \"CHART-Hkx6154FEm\": {\n \"children\": [],\n \"id\": \"CHART-Hkx6154FEm\",\n \"meta\": {\n \"chartId\": 2225,\n \"height\": 30,\n \"sliceName\": \"slice 1\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"GRID_ID\": {\n \"children\": [\n \"ROW-SyT19EFEQ\"\n ],\n \"id\": \"GRID_ID\",\n \"type\": \"GRID\"\n },\n \"ROOT_ID\": {\n \"children\": [\n \"GRID_ID\"\n ],\n \"id\": \"ROOT_ID\",\n \"type\": \"ROOT\"\n },\n \"ROW-SyT19EFEQ\": {\n \"children\": [\n \"CHART-Hkx6154FEm\"\n ],\n \"id\": \"ROW-SyT19EFEQ\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"DASHBOARD_VERSION_KEY\": \"v2\"\n}\n \"\"\"\n dash.dashboard_title = 'Unicode Test'\n pos = json.loads(js)\n update_slice_ids(pos, [slc])\n dash.position_json = json.dumps(pos, indent=4)\n dash.slug = 'unicode-test'\n dash.slices = [slc]\n db.session.merge(dash)\n db.session.commit()\n" ]
[ [ "pandas.read_csv" ] ]
liquidpeachy/advertools2
[ "86a7294c68f966e16ff1a021020645693770d91e" ]
[ "advertools2/knowledge_graph.py" ]
[ "\"\"\"\n.. _knowledge_graph:\n\nImport and Analyze Knowledge Graph Results on a Large Scale\n===========================================================\n\nIf :ref:`analyzing SERPs <serp>` is the first step in understanding your\nrankings on search engines, then analyzing the knowledge graph can be thought\nof as step zero.\n\nSERP positions for a certain keyword show how each page is ranked in comparison\nto all other eligible pages. Knowledge graph scores on the other hand, show the\nranks of the different meanings that a word can take for Google (a person, a\ncity, a brand, etc.).\n\n.. WARNING:: From `Google's documentation <https://developers.google.com/knowledge-graph>`_:\n This API is not suitable for use as a production-critical service. Your\n product should not form a critical dependence on this API.\n\nIt's not clear whether this is from a technical reliability or a content\ncorrectness point of view, but it is what the docs mention. So please keep this\nin mind when using it.\n\nAccount Setup\n*************\n\nIn order to be able to send requests, you will need to `create a project\n<https://console.developers.google.com/>`_, `set up billing\n<https://console.developers.google.com/billing>`_, and `activate the knowledge\ngraph API <https://console.developers.google.com/apis/library>`_ for your\nproject. You will then need to `create credentials\n<https://console.developers.google.com/apis/credentials>`_ (API Key).\nOnce you have that, you can use it as your ``key`` parameter when running\nrequests, as shown below.\n\nHow to use Google's Knowledge Graph API\n***************************************\n\n\nWhat is \"google\"? Is it a search engine, a company, a brand, a very large\nnumber? What else is it?\n\nAnd if it is all of those things, what is the relative ranking of each? What\nis the source of the information, its URL, images (if any)?\n\n.. code-block:: python\n\n >>> key = 'YOUR_GOOGLE_DEVELOPER_KEY'\n >>> google = knowledge_graph(key=key, query='google')\n >>> google\n query\tresultScore\t result.@type result.description result.name\n 0\tgoogle\t 203191\t ['Corporation', 'Organization', 'Thing'] Technology company\t Google\n 1\tgoogle\t 49462\t ['WebSite', 'Thing'] Google Search\n 2\tgoogle\t 19142\t ['WebSite', 'Thing'] nan Gmail\n 3\tgoogle\t 13251\t ['Brand', 'WebSite', 'Thing'] Website\t Google Maps\n 4\tgoogle\t 7549\t['WebSite', 'SoftwareApplication', 'Thing'] Website Google Drive\n 5\tgoogle\t 6853\t ['WebSite', 'Thing'] Website\t Google Play\n 6\tgoogle\t 6543\t ['SoftwareApplication', 'Thing'] Web browser Google Chrome\n 7\tgoogle\t 4312\t ['Corporation', 'Organization', 'Thing'] Multinational conglomerate company Alphabet Inc.\n 8\tgoogle\t 3395\t ['SoftwareApplication', 'Thing'] nan Google Account\n 9\tgoogle\t 1306\t ['Thing'] nan Google\n\n >>> google.columns\n Index(['query', 'resultScore', '@type', 'result.@type', 'result.description',\n 'result.image.contentUrl', 'result.image.url',\n 'result.detailedDescription.articleBody',\n 'result.detailedDescription.url', 'result.detailedDescription.license',\n 'result.url', 'result.name', 'result.@id', 'query_time'],\n dtype='object')\n\n\n\nThe above table is a sample response from the :func:`knowledge_graph` function.\nMany more columns are available as you can see in the second line above.\nWe can see that \"google\" is a company, with a result score of 203,191 and it is\na search engine/website with a result score of 49,462. It is then understood as\nan email application, a mapping application, and so on, as you can see in the\n`result.name` column.\n\nYou can also see that we get the types under which this result falls, in the\n`result.@type` column. Multiple types show the type inheritance, and as you can\nalso see, everything is a \"Thing\". This is the top element in the type\nhierarchy under which everything belongs.\n\nLike the :ref:`Google SERP <serp>` and :ref:`YouTube SERP <serp>`, functions\nthis funcion works in the same manner, creating, sending, and aggregating the\nproduct of the arguments passed\nto it.\n\nFor example if you run\n\n>>> knowledge_graph(key=key, query=['google', 'bing'], languages=['en', 'fr', 'de'])\n\nThe function will send 2 (queries) x 3 languages = 6 requests.\n\n(google, en), (google, fr), (google, de) , (bing, en), (bing, fr), (bing, de)\n\nThis is actually the main value of having this function, because you usually\nwant a large sample to evaluate certain keywords across languages or types.\n\nLet's check what \"seo\" and \"search engine optimization\" mean in different\nlanguages.\n\n>>> seo = knowledge_graph(key=key, query=['seo', 'search engine optimization'], languages=['en', 'es', 'de'])\n>>> seo\n\tquery\t languages\tresultScore\t result.name\t result.@type\t result.description\n0\tsearch engine optimization\tde \t 3587\t Suchmaschinenoptimierung\t ['Thing'] nan\n1\tsearch engine optimization\tde \t 321\t Lokale Suchmaschinenoptimierung\t ['Thing'] nan\n2\tsearch engine optimization\tde \t 252\t Suchmaschinenmarketing\t ['Thing'] nan\n4\tsearch engine optimization\ten \t 71756\t Search engine optimization\t ['Thing'] nan\n5\tsearch engine optimization\ten \t 5056\t Search engine marketing\t ['Thing'] nan\n6\tsearch engine optimization\ten \t 576\t SEOP, Inc.\t ['Organization', 'Corporation', 'Thing']\t Company\n13\tseo\t de \t 3313\t Seoul\t ['AdministrativeArea', 'Thing', 'City', 'Place'] Hauptstadt von Südkorea\n14\tseo\t de \t 1509\t Seo Yea-ji\t ['Thing', 'Person']\t Schauspielerin\n15\tseo\t de \t 584\t Suchmaschinenoptimierung\t ['Thing']\t nan\n33\tseo\t es \t 1509\t Seo Ye-ji\t ['Person', 'Thing']\t Actriz\n34\tseo\t es \t 584\t Posicionamiento en buscadores\t ['Thing']\t nan\n35\tseo\t es \t 316\t Jin\t ['Person', 'Thing']\t Cantante\n53\tseo\t en \t 8760\t Search engine optimization\t ['Thing']\t nan\n54\tseo\t en \t 3313\t Seoul\t ['AdministrativeArea', 'Thing', 'City', 'Place'] Capital of South Korea\n55\tseo\t en \t 1435\t Sulli\t ['Thing', 'Person']\t South Korean actress\n\n>>> seo.columns\nIndex(['query', 'languages', 'resultScore', '@type', 'result.name',\n 'result.@type', 'result.@id', 'result.image.contentUrl',\n 'result.image.url', 'result.detailedDescription.license',\n 'result.detailedDescription.url',\n 'result.detailedDescription.articleBody', 'result.description',\n 'result.url', 'query_time'],\n dtype='object')\n\nIt's interesting to see how the same word can mean different things in\ndifferent contexts.\n\n\"\"\"\n\nimport logging\nfrom concurrent import futures\n\nimport pandas as pd\nimport requests\n\nfrom advertools2.serp import _dict_product\n\nparam_regex = '^query$|^ids$|^languages$|^types$|^prefix$|^limit$'\n\ndef knowledge_graph(key, query=None, ids=None, languages=None, types=None,\n prefix=None, limit=None):\n \"\"\"Query Google's Knowledge Graph with any combination of parameters.\n\n Note that Google's documentation states that \"This API is not suitable for\n use as a production-critical service.\" So please keep this in mind.\n\n :param string key: Your Google developer key.\n :param string query: A literal string to search for in the Knowledge Graph.\n :param string ids: A list of entity IDs to search for in the Knowledge\n Graph.\n :param string languages: The list of language codes (defined in ISO 639) to\n run the query with, for instance `en`.\n :param string types: Restricts returned entities to those of the specified\n types. For example, you can specify `Person` (as\n defined in http://schema.org/Person) to restrict the\n results to entities representing people. If multiple\n types are specified, returned entities will contain\n one or more of these types.\n :param boolean prefix: Enables prefix (initial substring) match against\n names and aliases of entities. For example, a\n prefix `Jung` will match entities and aliases such\n as `Jung`, `Jungle`, and `Jung-ho Kang`.\n :param number limit: Limits the number of entities to be returned. Maximum\n is 500. Default is 20. Requests with high limits have\n a higher chance of timing out.\n\n https://developers.google.com/knowledge-graph/reference/rest/v1\n \"\"\"\n params = locals()\n base_url = 'https://kgsearch.googleapis.com/v1/entities:search?'\n supplied_params = {k: v for k, v in params.items()\n if params[k] is not None}\n for p in supplied_params:\n if isinstance(supplied_params[p], (str, int)):\n supplied_params[p] = [supplied_params[p]]\n\n params_list = _dict_product(supplied_params)\n result_df = pd.DataFrame()\n\n def single_request(param):\n nonlocal result_df\n resp = requests.get(base_url, params=param)\n param_log = ', '.join([k + '=' + str(v) for k, v in param.items()])\n logging.info(msg='Requesting: ' + param_log)\n df = pd.json_normalize(resp.json(), record_path='itemListElement')\n del param['key']\n param_columns = {k: [v] if df.empty else v\n for k, v in param.items()}\n df = df.assign(**param_columns)\n result_df = result_df.append(df, ignore_index=True)\n\n with futures.ThreadPoolExecutor(max_workers=16) as executor:\n executor.map(single_request, params_list)\n\n reordered_df = pd.concat([result_df.filter(regex=param_regex),\n result_df.filter(regex=f'^(?!{param_regex})')],\n axis=1)\n reordered_df['query_time'] = pd.Timestamp.utcnow()\n return reordered_df\n" ]
[ [ "pandas.DataFrame", "pandas.Timestamp.utcnow" ] ]
aleksanderujek/knapsack-problem
[ "42ab0887e84979ebcc18d80a1f9269ce46088346" ]
[ "models/individual.py" ]
[ "import numpy as np\n\nclass Individual:\n knapsack = np.array([])\n fitness = 0\n weight = 0\n\n def __init__(self, n):\n if (n is not None):\n self.knapsack = np.random.choice([False, True], size=n)\n \n def num_of_elements(self):\n return np.sum(self.knapsack)\n\n def calculate_fitness(self, cost_list, weight_list, max_weight):\n fitness = np.dot(self.knapsack, cost_list)\n num_of_elements = self.num_of_elements()\n weight = self.calculate_weight(weight_list)\n if (num_of_elements % 2 != 0 or num_of_elements == 0 or weight > max_weight):\n self.fitness = -1\n else:\n self.fitness =fitness\n return self.fitness\n\n def calculate_weight(self, weight_list):\n self.weight = np.dot(self.knapsack, weight_list)\n return self.weight" ]
[ [ "numpy.sum", "numpy.array", "numpy.dot", "numpy.random.choice" ] ]
sheelabhadra/Learning2Drive
[ "f93cb5651c08b87b66b3f2ffc8a3512a9af73db4" ]
[ "environment/env.py" ]
[ "import random\nimport time\nimport os\nimport warnings\n\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom PIL import Image\nimport numpy as np\n\nfrom config import INPUT_DIM, MIN_STEERING, MAX_STEERING, JERK_REWARD_WEIGHT, MAX_STEERING_DIFF\nfrom config import ROI, THROTTLE_REWARD_WEIGHT, MAX_THROTTLE, MIN_THROTTLE, REWARD_CRASH, CRASH_SPEED_WEIGHT\nfrom environment.carla.client import make_carla_client, CarlaClient \nfrom environment.carla.tcp import TCPConnectionError\nfrom environment.carla.settings import CarlaSettings\nfrom environment.carla.sensor import Camera\nfrom environment.carla.carla_server_pb2 import Control\n\nclass Env(gym.Env):\n def __init__(self, client, vae=None, min_throttle=0.4, max_throttle=0.6, n_command_history=20, frame_skip=1, n_stack=1, action_lambda=0.5):\n self.client = client\n # save last n commands\n self.n_commands = 2\n self.n_command_history = n_command_history\n self.command_history = np.zeros((1, self.n_commands * n_command_history))\n self.n_stack = n_stack\n self.stacked_obs = None\n\n # assumes that we are using VAE input\n self.vae = vae\n self.z_size = None\n if vae is not None:\n self.z_size = vae.z_size\n \n self.observation_space = spaces.Box(low=np.finfo(np.float32).min,\n high=np.finfo(np.float32).max,\n shape=(1, self.z_size + self.n_commands * n_command_history),\n dtype=np.float32)\n self.action_space = spaces.Box(low=np.array([-MAX_STEERING, -1]),\n high=np.array([MAX_STEERING, 1]),\n dtype=np.float32)\n\n self.min_throttle = min_throttle\n self.max_throttle = max_throttle\n self.frame_skip = frame_skip\n self.action_lambda = action_lambda\n self.last_throttle = 0.0\n self.seed()\n\n def jerk_penalty(self):\n \"\"\"\n Add a continuity penalty to limit jerk.\n :return: (float)\n \"\"\"\n jerk_penalty = 0\n if self.n_command_history > 1:\n # Take only last command into account\n for i in range(1):\n steering = self.command_history[0, -2 * (i + 1)]\n prev_steering = self.command_history[0, -2 * (i + 2)]\n steering_diff = (prev_steering - steering) / (MAX_STEERING - MIN_STEERING)\n\n if abs(steering_diff) > MAX_STEERING_DIFF:\n error = abs(steering_diff) - MAX_STEERING_DIFF\n jerk_penalty += JERK_REWARD_WEIGHT * (error ** 2)\n else:\n jerk_penalty += 0\n return jerk_penalty\n\n def postprocessing_step(self, action, observation, reward, done, info):\n \"\"\"\n Update the reward (add jerk_penalty if needed), the command history\n and stack new observation (when using frame-stacking).\n\n :param action: ([float])\n :param observation: (np.ndarray)\n :param reward: (float)\n :param done: (bool)\n :param info: (dict)\n :return: (np.ndarray, float, bool, dict)\n \"\"\"\n # Update command history\n if self.n_command_history > 0:\n self.command_history = np.roll(self.command_history, shift=-self.n_commands, axis=-1)\n self.command_history[..., -self.n_commands:] = action\n observation = np.concatenate((observation, self.command_history), axis=-1)\n\n jerk_penalty = self.jerk_penalty()\n # Cancel reward if the continuity constrain is violated\n if jerk_penalty > 0 and reward > 0:\n reward = 0\n reward -= jerk_penalty\n\n if self.n_stack > 1:\n self.stacked_obs = np.roll(self.stacked_obs, shift=-observation.shape[-1], axis=-1)\n if done:\n self.stacked_obs[...] = 0\n self.stacked_obs[..., -observation.shape[-1]:] = observation\n return self.stacked_obs, reward, done, info\n\n return observation, reward, done, info\n\n def step(self, action):\n # Convert from [-1, 1] to [0, 1]\n t = (action[1] + 1) / 2\n # Convert from [0, 1] to [min, max]\n action[1] = (1 - t) * self.min_throttle + self.max_throttle * t\n\n # Clip steering angle rate to enforce continuity\n if self.n_command_history > 0:\n prev_steering = self.command_history[0, -2]\n max_diff = (MAX_STEERING_DIFF - 1e-5) * (MAX_STEERING - MIN_STEERING)\n diff = np.clip(action[0] - prev_steering, -max_diff, max_diff)\n action[0] = prev_steering + diff\n\n control = Control()\n control.throttle = action[1]\n control.steer = action[0]\n control.brake = 0\n control.hand_brake = 0\n control.reverse = 0\n\n # Repeat action if using frame_skip\n for _ in range(self.frame_skip):\n self.client.send_control(control)\n measurements, sensor_data = self.client.read_data()\n im = sensor_data['CameraRGB'].data\n im = np.array(im)\n im = im[:, :, ::-1] # convert to BGR\n observation = self.vae.encode_from_raw_image(im)\n reward, done = self.reward(measurements, action)\n\n self.last_throttle = action[1]\n\n return self.postprocessing_step(action, observation, reward, done, {})\n\n def reset(self):\n print(\"Start to reset env\")\n settings = CarlaSettings()\n settings.set(\n SynchronousMode=True,\n SendNonPlayerAgentsInfo=False,\n NumberOfVehicles=0,\n NumberOfPedestrians=0,\n WeatherId=random.choice([1]),\n QualityLevel='Epic'\n )\n settings.randomize_seeds()\n camera = Camera('CameraRGB')\n camera.set(FOV=100)\n camera.set_image_size(160, 120)\n camera.set_position(2.0, 0.0, 1.4)\n camera.set_rotation(-15.0, 0, 0)\n settings.add_sensor(camera)\n observation = None\n\n scene = self.client.load_settings(settings)\n number_of_player_starts = len(scene.player_start_spots)\n player_start = random.randint(0, max(0, number_of_player_starts - 1))\n self.client.start_episode(player_start)\n\n measurements, sensor_data = self.client.read_data()\n im = sensor_data['CameraRGB'].data\n im = np.array(im)\n im = im[:, :, ::-1] # convert to BGR\n observation = self.vae.encode_from_raw_image(im)\n\n self.command_history = np.zeros((1, self.n_commands * self.n_command_history))\n\n if self.n_command_history > 0:\n observation = np.concatenate((observation, self.command_history), axis=-1)\n\n if self.n_stack > 1:\n self.stacked_obs[...] = 0\n self.stacked_obs[..., -observation.shape[-1]:] = observation\n return self.stacked_obs\n\n print('reset finished')\n return observation\n\n\n def reward(self, measurements, action):\n \"\"\"\n :param measurements:\n :return: reward, done\n \"\"\"\n done = False\n\n \"\"\"distance\"\"\"\n\n \"\"\"speed\"\"\"\n # # In the wayve.ai paper, speed has been used as reward\n # SPEED_REWARD_WEIGHT = 0.1\n # speed_reward = SPEED_REWARD_WEIGHT*measurements.player_measurements.forward_speed\n\n \"\"\"road\"\"\"\n if measurements.player_measurements.intersection_offroad > 0.2 or measurements.player_measurements.intersection_otherlane > 0.2:\n norm_throttle = (self.last_throttle - MIN_THROTTLE) / (MAX_THROTTLE - MIN_THROTTLE)\n done = True\n return REWARD_CRASH - CRASH_SPEED_WEIGHT * norm_throttle, done\n\n # 1 per timesteps + throttle\n throttle_reward = THROTTLE_REWARD_WEIGHT * (self.last_throttle / MAX_THROTTLE)\n return 1 + throttle_reward, done\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.roll", "numpy.finfo", "numpy.clip" ] ]
sjshtura/thesis_code
[ "1358f504cf3cfd0741e65723736dca7f039779d4" ]
[ "electricity_capacity.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt \nimport datetime\n\ndf_data = pd.read_excel(\"Wholesale_Electricity_Prices_2020.xlsx\")\n# print(df_data)\n\n# df_data = df_data.set_index(\"Gewerbe allgemein\")\ndf_data\n#df.index = pd.to_datetime(df.index, errors='ignore')\n#df\ndf_data\ndf_data = df_data[['dtDate', 'intHour','dblPrice']]\ndf_data\n\n\n#df_data[\"intHour\"] = pd.to_datetime(df_data[\"intHour\"])\n# df_data.intHour = df_data.intHour.astype('time64[h]')\ndf_data.intHour = pd.to_timedelta(df_data.intHour, unit='h')\n\n# df_data[\"intHour\"] = str(df_data[\"intHour\"].timedelta(seconds=666))\n\n# df_data['intHour'] = pd.to_datetime(df_data['intHour'], format='%H:%M:%S')\ndf_data\n\ndf_data.intHour = df_data.intHour.astype(str)\ndf_data[\"intHour\"]= df_data[\"intHour\"].str.slice(start = 7)\ndf_data.intHour = pd.to_datetime(df_data.intHour, errors='ignore', format=\"%H:%M:%S\").dt.time\ndf_data.dblPrice = df_data.dblPrice * 0.1\ndf_data\n# df_data.dtypes\n\ndf_data.dtDate.duplicated(keep = 'last')\ndf_data.dtypes\n\nElectricity_price_pivot1 = df_data.pivot(index = 'dtDate', columns = 'intHour', values = 'dblPrice')\n\nelec_capacity = Electricity_price_pivot1\nelec_capacity.loc[:] = 400\n# elec_capacity\nprint(\"electrical maximum capacity\\n\")\nprint(elec_capacity.head())" ]
[ [ "pandas.to_datetime", "pandas.read_excel", "pandas.to_timedelta" ] ]
lorentzj/garden
[ "6f946a441c02a89c30bd8e2498ff81d7c6275d3e" ]
[ "gan_model_definitions.py" ]
[ "import torch\nimport torch.nn as nn\n\n# Size of feature maps in generator\nngf = 64\n# Size of z latent vector (i.e. size of generator input)\nnz = 200\n# Size of inner layer for image type classification\nntc = 50\n\nclass Generator(nn.Module):\n def __init__(self, image_content_types):\n super(Generator, self).__init__()\n self.n_img_cont_types = len(image_content_types)\n self.image_conv = nn.Sequential(\n # input is Z, going into a convolution\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(nz, ngf * 32, 4, 1, 0, bias = False)\n ),\n nn.BatchNorm2d(ngf * 32),\n nn.ReLU(True),\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(ngf * 32, ngf * 16, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.utils.parametrizations.spectral_norm(\n nn.ConvTranspose2d(ngf, 3, 4, 2, 1, bias = False)\n ),\n nn.Tanh()\n # state size (nc) x 256 x 256\n )\n \n def forward(self, input):\n generated_image = self.image_conv(input)\n return generated_image, input.view((-1, nz))[:,-self.n_img_cont_types:]\n\n# Size of feature maps in discriminator\nndf = 64\n# Size of final linear layer to take image class into account\nncl = 50\n\nclass Discriminator(nn.Module):\n def __init__(self, image_content_types):\n super(Discriminator, self).__init__()\n self.image_conv = nn.Sequential(\n # input is (nc) x 256 x 256\n nn.utils.parametrizations.spectral_norm(\n nn.Conv2d(3, ndf, 4, 2, 1, bias = False)\n ),\n nn.LeakyReLU(0.2, inplace = True),\n nn.utils.parametrizations.spectral_norm(\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace = True),\n nn.utils.parametrizations.spectral_norm(\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace = True),\n nn.utils.parametrizations.spectral_norm(\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace = True),\n nn.utils.parametrizations.spectral_norm( \n nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace = True),\n nn.utils.parametrizations.spectral_norm( \n nn.Conv2d(ndf * 16, ndf * 32, 4, 2, 1, bias = False)\n ),\n nn.BatchNorm2d(ndf * 32),\n nn.LeakyReLU(0.2, inplace = True),\n nn.utils.parametrizations.spectral_norm(\n nn.Conv2d(ndf * 32, ncl, 4, 1, 0, bias = False)\n ),\n nn.Sigmoid()\n )\n self.image_classify = nn.Sequential(\n nn.Linear(ncl + len(image_content_types), 1),\n nn.Sigmoid(),\n )\n\n def forward(self, image_input, class_input):\n conv = self.image_conv(image_input)\n return self.image_classify(torch.cat([conv.view((-1, ncl)), class_input], axis = 1))" ]
[ [ "torch.nn.Sigmoid", "torch.nn.Tanh", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
OGalOz/omreegalozMediaPermutations
[ "a3480092967dee8f6ce6c911f10e4a731a410a78" ]
[ "lib/myothermodule/comp_fba_m.py" ]
[ "#!/bin/python3\nimport pandas as pd\nfrom optparse import OptionParser\nimport sys\nimport numpy as np\nimport logging\nimport os\n\ndef compare_two_files(filename1, filename2, output_dir_name, rxns_dir):\n f1_fullpath = os.path.join(rxns_dir, filename1)\n f2_fullpath = os.path.join(rxns_dir, filename2)\n try:\n data1 = pd.read_csv(f1_fullpath,sep=\"\\t\");\n data2 = pd.read_csv(f2_fullpath,sep=\"\\t\");\n except IOError:\n logging.critical(\"files not found\")\n sys.exit(2)\n data = data1.append(data2)\n grouped = data.groupby(['id'])\n # for the reactions that appear in both FBA solutions\n flux1 = []\n flux2 = []\n comi = []\n com_name = []\n diffi = []\n for i in grouped:\n content = i[1]\n fluxes = content['flux'].values\n if( len(fluxes) == 2 ):\n comi.append(i[0])\n com_name.append(content['name'].values[0])\n flux1.append(fluxes[0])\n flux2.append(fluxes[1])\n elif ( len(fluxes) == 1):\n diffi.append(i[0])\n else:\n logging.critical(\"Could not recognize length of flux\")\n logging.info(np.corrcoef(flux1,flux2))\n df = pd.DataFrame(list(zip(comi,com_name,flux1,flux2,np.abs(np.subtract(flux1,flux2)))),columns=['id','name','flux1','flux2','diff'])\n df = df.sort_values(by=['diff'],ascending=False)\n df.to_csv(os.path.join(output_dir_name,filename1[:-4] + \"VS\" + filename2[:-4] + \"COMPARED.csv\"))\n logging.info(diffi)\n\n\n\n\n# The original functions:\ndef main():\n parser = OptionParser()\n parser.add_option(\"-m\",\"--file1\",dest=\"filename1\",help=\"FBA input file 1 in tsv format\",metavar=\"FILE\")\n parser.add_option(\"-n\",\"--file2\",dest=\"filename2\",help=\"FBA input file 2 in tsv format\",metavar=\"FILE\")\n (options, args) = parser.parse_args()\n try:\n data1 = pd.read_csv(options.filename1,sep=\"\\t\");\n data2 = pd.read_csv(options.filename2,sep=\"\\t\");\n except IOError:\n print(\"files not found\")\n sys.exit(2)\n data = data1.append(data2)\n grouped = data.groupby(['id'])\n # for the reactions that appear in both FBA solutions\n flux1 = []\n flux2 = []\n comi = []\n com_name = []\n diffi = []\n for i in grouped:\n content = i[1]\n fluxes = content['flux'].values\n if( len(fluxes) == 2 ):\n comi.append(i[0])\n com_name.append(content['name'].values[0])\n flux1.append(fluxes[0])\n flux2.append(fluxes[1])\n elif ( len(fluxes) == 1):\n diffi.append(i[0])\n else:\n logging.critical(\"Could not recognize length of flux - comp_fba_m.py\")\n print(np.corrcoef(flux1,flux2))\n df = pd.DataFrame(list(zip(comi,com_name,flux1,flux2,np.abs(np.subtract(flux1,flux2)))),columns=['id','name','flux1','flux2','diff'])\n df = df.sort_values(by=['diff'],ascending=False)\n df.to_csv('output.csv')\n print(diffi)\n\n#if __name__ == \"__main__\":\n# main()\n" ]
[ [ "numpy.corrcoef", "pandas.read_csv", "numpy.subtract" ] ]
yashasvimisra2798/numpy
[ "b892ed2c7fa27b2e0d73c12d12ace4b4d4e12897" ]
[ "numpy/typing/tests/data/reveal/arraypad.py" ]
[ "from typing import List, Any, Mapping, Tuple, SupportsIndex\n\nimport numpy as np\nimport numpy.typing as npt\n\ndef mode_func(\n ar: npt.NDArray[np.number[Any]],\n width: Tuple[int, int],\n iaxis: SupportsIndex,\n kwargs: Mapping[str, Any],\n) -> None: ...\n\nAR_i8: npt.NDArray[np.int64]\nAR_f8: npt.NDArray[np.float64]\nAR_LIKE: List[int]\n\nreveal_type(np.pad(AR_i8, (2, 3), \"constant\")) # E: numpy.ndarray[Any, numpy.dtype[{int64}]]\nreveal_type(np.pad(AR_LIKE, (2, 3), \"constant\")) # E: numpy.ndarray[Any, numpy.dtype[Any]]\n\nreveal_type(np.pad(AR_f8, (2, 3), mode_func)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\n" ]
[ [ "numpy.pad" ] ]
Thevenkster/Sweepstakes
[ "78cf1b95c4bb174c6e57394a5f7b9ddd1f79941a" ]
[ "Sweepstakes.py" ]
[ "'''\nThis python script can be used to randomly assign teams to players using an\nexcel spreadsheet as input. Please ensure that the input files/sheets/columns\nfollow the naming conventions of the code to avoid errors/exceptions.\n\nWhat this script does:\n1. Opens an xlsx spreadsheet X\n2. Reads the column with participants' & teams 'names & writes them to sets\n3. Randomly picks a participant and assign both teams\n4. Pops them from their sets\n5. Saves the popped values into lists\n6. Repeats until all sets are empty\n7. Writes results to a new xlsx spreadsheet\n'''\n#importing libraries\nimport random\nimport pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\n#main function of the program\ndef main():\n\n #Read first sheet of the input file\n xl = pd.read_excel('inputdoc.xlsx', sheetname='Sheet1')\n\n #initialize sets for the players, pot1 teams & pot2 teams\n playerset = set()\n pot1set = set()\n pot2set = set()\n\n #Add all players, pot1 teams & pot2 teams to their respective set structures\n #We use sets instead of lists as they are unordered; hence more random\n for i in xl.index:\n playerset.add(xl['Player List'][i])\n pot1set.add(xl['Pot 1 List'][i])\n pot2set.add(xl['Pot 2 List'][i])\n\n #Create lists for assigning the teams to the players\n #We use lists here since the indicies will indicate who was assigned which teams\n players = []\n pot1 = []\n pot2 = []\n\n #Dataframe for output spreadsheet\n df = pd.DataFrame()\n\n #Writing into a new document instead of the old one for transparency\n writer = pd.ExcelWriter('AssignedTeams.xlsx', engine = 'xlsxwriter')\n\n #Loop through the index count\n for i in xl.index:\n\n #Choose a random element from each set\n chosenpl = random.choice(tuple(playerset))\n chosenpot1 = random.choice(tuple(pot1set))\n chosenpot2 = random.choice(tuple(pot2set))\n\n #Remove those elements from the set to avoid repetitions\n playerset.remove(chosenpl)\n pot1set.remove(chosenpot1)\n pot2set.remove(chosenpot2)\n\n #Write the selected values to their respective lists\n players.append(chosenpl)\n pot1.append(chosenpot1)\n pot2.append(chosenpot2)\n\n #Assign the list of values to their respective columns in the dataframe\n df['Player Name'] = players\n df['Team 1'] = pot1\n df['Team 2'] = pot2\n\n #Convert the dataframe to the excel format\n df.to_excel(writer, sheet_name = 'Sheet1')\n\n #Save the spreadsheet\n writer.save()\n\n#Without this the code would execute even if the script was imported as a module\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame", "pandas.read_excel", "pandas.ExcelWriter" ] ]
lgalmant/pyannote-audio
[ "d58a9d2e18fb2fddaab99dbc6f93fdbdcfc5f290" ]
[ "pyannote/audio/labeling/extraction.py" ]
[ "#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2016-2018 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\nimport numpy as np\nfrom cachetools import LRUCache\nCACHE_MAXSIZE = 12\n\nimport torch\nimport torch.nn as nn\nfrom pyannote.core import SlidingWindow, SlidingWindowFeature\nfrom pyannote.generators.batch import FileBasedBatchGenerator\nfrom pyannote.generators.fragment import SlidingSegments\nfrom pyannote.database import get_unique_identifier\nfrom pyannote.audio.features import Precomputed\nfrom pyannote.audio.features import RawAudio\n\n\nclass SequenceLabeling(FileBasedBatchGenerator):\n \"\"\"Sequence labeling\n\n Parameters\n ----------\n model : `nn.Module` or `str`\n Model (or path to model). When a path, the directory structure created\n by pyannote command line tools (e.g. pyannote-speech-detection) should\n be kept unchanged so that one can find the corresponding configuration\n file automatically.\n return_intermediate : `int`, optional\n Index of intermediate layer. Returns intermediate hidden state.\n Defaults to returning the final output.\n feature_extraction : callable, optional\n Feature extractor. When not provided and `model` is a path, it is\n inferred directly from the configuration file.\n duration : float, optional\n Subsequence duration, in seconds. When `model` is a path and `duration`\n is not provided, it is inferred directly from the configuration file.\n step : float, optional\n Subsequence step, in seconds. Defaults to 50% of `duration`.\n batch_size : int, optional\n Defaults to 32.\n device : torch.device, optional\n Defaults to CPU.\n \"\"\"\n\n def __init__(self, model=None, feature_extraction=None, duration=1,\n min_duration=None, step=None, batch_size=32, device=None,\n return_intermediate=None):\n\n if not isinstance(model, nn.Module):\n\n from pyannote.audio.applications.base_labeling import BaseLabeling\n app = BaseLabeling.from_model_pt(model, training=False)\n\n model = app.model_\n if feature_extraction is None:\n feature_extraction = app.feature_extraction_\n\n if duration is None:\n duration = app.task_.duration\n\n self.device = torch.device('cpu') if device is None \\\n else torch.device(device)\n self.model = model.eval().to(self.device)\n\n if feature_extraction.augmentation is not None:\n msg = (\n 'Data augmentation should not be used '\n 'when applying a pre-trained model.'\n )\n raise ValueError(msg)\n self.feature_extraction = feature_extraction\n\n if hasattr(self.model, 'frame_info_'):\n self.frame_info_ = self.model.frame_info_\n else:\n self.frame_info_ = self.feature_extraction.sliding_window\n\n if hasattr(self.model, 'frame_crop'):\n self.frame_crop_ = self.model.frame_crop\n else:\n self.frame_crop_ = 'center'\n\n self.duration = duration\n self.min_duration = min_duration\n\n generator = SlidingSegments(duration=duration, step=step,\n min_duration=min_duration, source='audio')\n self.step = generator.step if step is None else step\n\n self.return_intermediate = return_intermediate\n\n super(SequenceLabeling, self).__init__(\n generator, {'@': (self._process, self.forward)},\n batch_size=batch_size, incomplete=False)\n\n @property\n def dimension(self):\n if hasattr(self.model, 'n_classes'):\n return self.model.n_classes\n elif hasattr(self.model, 'dimension'):\n return self.model.dimension\n else:\n msg = 'Model has no \"n_classes\" nor \"dimension\" attribute.'\n raise ValueError(msg)\n\n @property\n def sliding_window(self):\n return self.frame_info_\n\n def preprocess(self, current_file):\n \"\"\"On-demand feature extraction\n\n Parameters\n ----------\n current_file : dict\n Generated by a pyannote.database.Protocol\n\n Returns\n -------\n current_file : dict\n Current file with additional \"features\" entry\n\n Notes\n -----\n Does nothing when self.feature_extraction is a\n pyannote.audio.features.Precomputed instance.\n \"\"\"\n\n # if \"features\" are precomputed on disk, do nothing\n # as \"process_segment\" will load just the part we need\n if isinstance(self.feature_extraction, (Precomputed, RawAudio)):\n return current_file\n\n # if (by chance) current_file already contains \"features\"\n # do nothing.\n if 'features' in current_file:\n return current_file\n\n # if we get there, it means that we need to extract features\n # for current_file. let's create a cache to store them...\n if not hasattr(self, 'preprocessed_'):\n self.preprocessed_ = LRUCache(maxsize=CACHE_MAXSIZE)\n\n # this is the key that will be used to know if \"features\"\n # already exist in cache\n uri = get_unique_identifier(current_file)\n\n # if \"features\" are not cached for current file\n # compute and cache them...\n if uri not in self.preprocessed_:\n features = self.feature_extraction(current_file)\n self.preprocessed_[uri] = features\n\n # create copy of current_file to prevent \"features\"\n # from consuming increasing memory...\n preprocessed = dict(current_file)\n\n # add \"features\" key\n preprocessed['features'] = self.preprocessed_[uri]\n\n return preprocessed\n\n def _process(self, segment, current_file=None):\n \"\"\"Extract features for current segment\n\n Parameters\n ----------\n segment : pyannote.core.Segment\n current_file : dict\n Generated by a pyannote.database.Protocol\n \"\"\"\n\n # use in-memory \"features\" whenever they are available\n if 'features' in current_file:\n features = current_file['features']\n return features.crop(segment, mode='center', fixed=self.duration)\n\n # this line will only happen when self.feature_extraction is a\n # pyannote.audio.features.{Precomputed | RawAudio} instance\n return self.feature_extraction.crop(current_file, segment,\n mode='center', fixed=self.duration)\n\n def forward(self, X):\n \"\"\"Process (variable-length) sequences\n\n Parameters\n ----------\n X : `list`\n List of input sequences\n\n Returns\n -------\n fX : `numpy.ndarray`\n Batch of sequence embeddings.\n \"\"\"\n\n lengths = [len(x) for x in X]\n variable_lengths = len(set(lengths)) > 1\n\n if variable_lengths:\n _, sort = torch.sort(torch.tensor(lengths), descending=True)\n _, unsort = torch.sort(sort)\n sequences = [torch.tensor(X[i],\n dtype=torch.float32,\n device=self.device) for i in sort]\n packed = pack_sequence(sequences)\n else:\n packed = torch.tensor(np.stack(X),\n dtype=torch.float32,\n device=self.device)\n\n if self.return_intermediate is None:\n fX = self.model(packed)\n else:\n _, fX = self.model(packed,\n return_intermediate=self.return_intermediate)\n\n fX = fX.detach().to('cpu').numpy()\n\n if variable_lengths:\n return fX[unsort]\n\n return fX\n\n def __call__(self, current_file):\n \"\"\"Compute predictions on a sliding window\n\n Parameters\n ----------\n current_file : `dict`\n File (from pyannote.database protocol)\n\n Returns\n -------\n predictions : `SlidingWindowFeature`\n Predictions.\n \"\"\"\n\n # frame and sub-sequence sliding windows\n frames = self.frame_info_\n batches = [batch for batch in self.from_file(current_file,\n incomplete=True)]\n if not batches:\n data = np.zeros((0, self.dimension), dtype=np.float32)\n return SlidingWindowFeature(data, frames)\n\n fX = np.vstack(batches)\n subsequences = SlidingWindow(duration=self.duration, step=self.step)\n\n # this happens for tasks that expects just one label per sequence\n # (rather than one label per frame)\n if fX.ndim == 2:\n return SlidingWindowFeature(fX, subsequences)\n # else: fX.ndim == 3\n\n # get total number of frames (based on last window end time)\n n_subsequences = len(fX)\n n_frames = frames.samples(subsequences[n_subsequences].end,\n mode='center')\n\n # data[i] is the sum of all predictions for frame #i\n data = np.zeros((n_frames, self.dimension), dtype=np.float32)\n\n # k[i] is the number of sequences that overlap with frame #i\n k = np.zeros((n_frames, 1), dtype=np.int8)\n\n for subsequence, fX_ in zip(subsequences, fX):\n\n # indices of frames overlapped by subsequence\n indices = frames.crop(subsequence,\n mode=self.frame_crop_,\n fixed=self.duration)\n\n # accumulate the outputs\n data[indices] += fX_\n\n # keep track of the number of overlapping sequence\n # TODO - use smarter weights (e.g. Hamming window)\n k[indices] += 1\n\n # compute average embedding of each frame\n data = data / np.maximum(k, 1)\n\n return SlidingWindowFeature(data, frames)\n" ]
[ [ "torch.device", "numpy.zeros", "numpy.stack", "torch.tensor", "torch.sort", "numpy.vstack", "numpy.maximum" ] ]
Shashank-Holla/motleyNet
[ "05a8c758f650a90f5f53e51bb89909fdc1b735f4" ]
[ "models/mnist_model.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n# Object Recognition\r\ndropout_value = 0.05\r\nclass Net(nn.Module):\r\n def __init__(self, norm_type):\r\n super(Net, self).__init__()\r\n\r\n self.norm_type = norm_type\r\n assert self.norm_type in ('BatchNorm', 'GroupNorm', 'LayerNorm'), \"Incorrect normalization applied\"\r\n\r\n self.convblock1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(3, 3), padding=1, bias=False),\r\n nn.ReLU(),\r\n self.norm_layer(self.norm_type, 8),\r\n nn.Dropout(dropout_value) \r\n )\r\n self.convblock2 = nn.Sequential(\r\n nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3, 3), padding=1, bias=False),\r\n nn.ReLU(),\r\n self.norm_layer(self.norm_type, 16),\r\n nn.Dropout(dropout_value) \r\n )\r\n # Maxpooling\r\n self.pool1 = nn.MaxPool2d(2, 2) \r\n # TRANSITION BLOCK 1\r\n self.transitionblock1 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=8, kernel_size=(1, 1), padding=0, bias=False),\r\n )\r\n\r\n self.convblock3 = nn.Sequential(\r\n nn.Conv2d(in_channels=8, out_channels=12, kernel_size=(3, 3), padding=0, bias=False),\r\n nn.ReLU(),\r\n self.norm_layer(self.norm_type, 12),\r\n nn.Dropout(dropout_value) \r\n )\r\n self.convblock4 = nn.Sequential(\r\n nn.Conv2d(in_channels=12, out_channels=12, kernel_size=(3, 3), padding=0, bias=False),\r\n nn.ReLU(),\r\n self.norm_layer(self.norm_type, 12),\r\n nn.Dropout(dropout_value) \r\n )\r\n \r\n self.convblock5 = nn.Sequential(\r\n nn.Conv2d(in_channels=12, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),\r\n nn.ReLU(),\r\n self.norm_layer(self.norm_type, 16),\r\n nn.Dropout(dropout_value) \r\n )\r\n self.convblock6 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),\r\n nn.ReLU(),\r\n self.norm_layer(self.norm_type, 16),\r\n nn.Dropout(dropout_value) \r\n )\r\n\r\n self.gap = nn.Sequential(\r\n nn.AvgPool2d(kernel_size=6)\r\n )\r\n\r\n self.translinear = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=10, kernel_size=1, padding=0, bias=False),\r\n )\r\n \r\n def norm_layer(self, norm_type, channels):\r\n if norm_type == 'BatchNorm':\r\n return nn.BatchNorm2d(channels)\r\n elif norm_type == 'GroupNorm':\r\n return nn.GroupNorm(num_groups=int(channels/2), num_channels=channels)\r\n elif norm_type == 'LayerNorm':\r\n return nn.GroupNorm(num_groups=1, num_channels=channels)\r\n\r\n\r\n def forward(self, x):\r\n x = self.convblock1(x)\r\n x = self.convblock2(x)\r\n x = self.pool1(x)\r\n x = self.transitionblock1(x)\r\n x = self.convblock3(x)\r\n x = self.convblock4(x)\r\n x = self.convblock5(x)\r\n x = self.convblock6(x)\r\n x = self.gap(x)\r\n x = self.translinear(x)\r\n x = x.view(-1, 10)\r\n return x " ]
[ [ "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.GroupNorm", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]